repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
epage/nqaap
|
support/builddeb.py
|
Python
|
lgpl-2.1
| 4,263 | 0.026742 |
#!/usr/bin/env python
import os
import sys
try:
import py2deb
except ImportError:
import fake_py2deb as py2deb
import constants
__app_name__ = constants.__app_name__
__description__ = """Very simple Audiobook player.
|
Supports playing, pausing, seeking (sort of) and saving state when changing book/cl
|
osing.
Plays books arranged as dirs under myDocs/Audiobooks
.
Homepage: http://wiki.maemo.org/Nqaap"""
__author__ = "Soeren 'Pengman' Pedersen"
__email__ = "pengmeister@gmail.com"
__version__ = constants.__version__
__build__ = constants.__build__
__changelog__ = """
* More unicode improvements
""".strip()
__postinstall__ = """#!/bin/sh -e
gtk-update-icon-cache -f /usr/share/icons/hicolor
rm -f ~/.%(name)s/%(name)s.log
""" % {"name": constants.__app_name__}
def find_files(prefix, path):
for root, dirs, files in os.walk(path):
for file in files:
if file.startswith(prefix+"-"):
fileParts = file.split("-")
unused, relPathParts, newName = fileParts[0], fileParts[1:-1], fileParts[-1]
assert unused == prefix
relPath = os.sep.join(relPathParts)
yield relPath, file, newName
def unflatten_files(files):
d = {}
for relPath, oldName, newName in files:
if relPath not in d:
d[relPath] = []
d[relPath].append((oldName, newName))
return d
def build_package(distribution):
try:
os.chdir(os.path.dirname(sys.argv[0]))
except:
pass
py2deb.Py2deb.SECTIONS = py2deb.SECTIONS_BY_POLICY[distribution]
p = py2deb.Py2deb(__app_name__)
p.prettyName = constants.__pretty_app_name__
p.description = __description__
p.bugTracker="https://bugs.maemo.org/enter_bug.cgi?product=nQa%%20Audiobook%%20Player"
p.author = __author__
p.mail = __email__
p.license = "lgpl"
p.depends = ", ".join([
"python2.6 | python2.5",
"python-gtk2 | python2.5-gtk2",
"python-dbus | python2.5-dbus",
"python-telepathy | python2.5-telepathy",
"python-gobject | python2.5-gobject",
"python-simplejson",
])
maemoSpecificDepends = ", python-osso | python2.5-osso, python-hildon | python2.5-hildon"
p.depends += {
"debian": ", python-gst0.10",
"diablo": maemoSpecificDepends,
"fremantle": maemoSpecificDepends + ", python-gst0.10",
}[distribution]
p.section = {
"debian": "sound",
"diablo": "user/multimedia",
"fremantle": "user/multimedia",
}[distribution]
p.arch="all"
p.urgency="low"
p.distribution=distribution
p.repository="extras"
p.changelog = __changelog__
p.postinstall = __postinstall__
p.icon = {
"debian": "26x26-%s.png" % constants.__app_name__,
"diablo": "26x26-%s.png" % constants.__app_name__,
"fremantle": "48x48-%s.png" % constants.__app_name__,
}[distribution]
p["/opt/%s/bin" % constants.__app_name__] = [ "%s.py" % constants.__app_name__ ]
for relPath, files in unflatten_files(find_files("src", ".")).iteritems():
fullPath = "/opt/%s/lib" % constants.__app_name__
if relPath:
fullPath += os.sep+relPath
p[fullPath] = list(
"|".join((oldName, newName))
for (oldName, newName) in files
)
p["/usr/share/applications/hildon"] = ["%s.desktop" % constants.__app_name__]
p["/usr/share/icons/hicolor/26x26/hildon"] = ["26x26-%s.png|%s.png" % (constants.__app_name__, constants.__app_name__)]
p["/usr/share/icons/hicolor/48x48/hildon"] = ["48x48-%s.png|%s.png" % (constants.__app_name__, constants.__app_name__)]
p["/usr/share/icons/hicolor/64x64/hildon"] = ["64x64-%s.png|%s.png" % (constants.__app_name__, constants.__app_name__)]
p["/usr/share/icons/hicolor/scalable/hildon"] = ["scale-%s.png|%s.png" % (constants.__app_name__, constants.__app_name__)]
print p
if distribution == "debian":
print p.generate(
version="%s-%s" % (__version__, __build__),
changelog=__changelog__,
build=True,
tar=False,
changes=False,
dsc=False,
)
else:
print p.generate(
version="%s-%s" % (__version__, __build__),
changelog=__changelog__,
build=False,
tar=True,
changes=True,
dsc=True,
)
print "Building for %s finished" % distribution
if __name__ == "__main__":
if len(sys.argv) == 1:
distribution = "fremantle"
else:
distribution = sys.argv[1]
build_package(distribution)
|
neilLasrado/erpnext
|
erpnext/projects/utils.py
|
Python
|
gpl-3.0
| 2,678 | 0.020164 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
@frappe.whitelist()
def query_task(doctype, txt, searchfield, start, page_len, filters):
from frappe.desk.reportview import build_match_conditions
search_string = "%%%s%%" % txt
order_by_string = "%s%%" % txt
match_conditions = build_match_conditions("Task")
match_conditions = ("and" + match_conditions) if match_conditions else ""
return frappe.db.sql("""select name, subject from `tabTask`
where (`%s` like %s or `subject` like %s) %s
order by
case when `subject` like %s then 0 else 1 end,
case when `%s` like %s then 0 else 1 end,
`%s`,
subject
limit %s, %s""" %
(searchfield, "%s", "%s", match_conditions, "%s",
searchfield, "%s", searchfield, "%s", "%s"),
(search_string, search_string, order_by_string, order_by_string, start, page_len))
@frappe.whitelist()
def update_timesheet_logs(ref_dt, ref_dn, billable):
time_logs = []
if ref_dt in ["Project", "Task"]:
if ref_dt == "Project":
tasks = update_linked_tasks(ref_dn, billable)
time_logs = [get_task_time_logs(task) for task in tasks]
# flatten the list of time log lists
time_logs = [log for time_log in time_logs for log in time_log]
else:
time_logs = frappe.get_all("Timesheet Detail", filters={frappe.scrub(ref_dt): ref_dn})
elif ref_dt in ["Project Type", "Project Template"]:
projects = update_linked_projects(frappe.scrub(ref_dt), ref_dn, billable)
time_logs = [get_project_time_logs(project) for project in projects]
# flatten the list of time log lists
time_logs = [log for time_log in time_logs for log in time_log]
for log in time_logs:
frappe.db.set_value("Timesheet Detail", log.name, "billable", billable)
def update_linked_projects(ref_field, ref_value, billable):
projects = frappe.get_all("Project", filters={ref_field: ref_value})
for project in projects:
project_doc = frappe.get_doc("Project", project.name)
project_doc.billable = billable
project_doc.save()
update_linked_tasks(project.name, billable)
return projects
def update_linked_tasks(project, billable):
tasks = frappe.get_all("Task", filters={"project": project})
for task in tasks:
task_doc = frappe.get_doc("Task", task.name)
task_doc.billable = billable
task_doc.save()
return tasks
def get_project_time_logs(project):
return frappe.get_all("Timesheet Detail", filters={"project": project.name})
|
def
|
get_task_time_logs(task):
return frappe.get_all("Timesheet Detail", filters={"task": task.name})
|
GunadarmaC0d3/Gilang-Aditya-Rahman
|
Python/Penggunaan else if.py
|
Python
|
gpl-2.0
| 141 | 0 |
kunci = "Python"
password = raw_input("Masukan password : ")
if password == kunci:
print"Passwor
|
d Benar"
else:
print"Password Salah"
| |
tehmaze/natural
|
natural/data.py
|
Python
|
mit
| 3,460 | 0.000291 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
import datetime
import fcntl
import os
import struct
import sys
import termios
try:
from io import BytesIO
except ImportError:
from cStringIO import StringIO as BytesIO
import six
from natural.constant import PRINTABLE, SPARKCHAR
from natural.language import _
from natural.file import filesize
def _termsize():
'''
Get the current terminal size, returns a ``(height, width)`` tuple.
'''
try:
return struct.unpack(
'hh',
fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ, '1234')
)
except:
return (
int(os.environ.get('LINES', 25)),
int(os.environ.get('COLUMNS', 80)),
)
def hexdump(stream):
'''
Display stream contents in hexadecimal and ASCII format. The ``stream``
specified must either be a file-like object that supports the ``read``
method to receive bytes, or it can be a string.
To dump a file::
>>> hexdump(file(filename)) # doctest: +SKIP
Or to dump stdin::
>>> import sys
>>> hexdump(sys.stdin) # doctest: +SKIP
:param stream: stream input
'''
if isinstance(stream, six.string_types):
stream = BytesIO(stream)
row = 0
while True:
data = stream.read(16)
if not data:
break
hextets = data.encode('hex').ljust(32)
canonical = printable(data)
print('%08x %s %s |%s|' % (
row * 16,
' '.join(hextets[x:x + 2] for x in range(0x00, 0x10, 2)),
' '.join(hextets[x:x + 2] fo
|
r x in range(0x10, 0x20, 2)),
canonical,
))
|
row += 1
def printable(sequence):
'''
Return a printable string from the input ``sequence``
:param sequence: byte or string sequence
>>> print(printable('\\x1b[1;34mtest\\x1b[0m'))
.[1;34mtest.[0m
>>> printable('\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x06') == '........'
True
>>> print(printable('12345678'))
12345678
>>> print(printable('testing\\n'))
testing.
'''
return ''.join(list(
map(lambda c: c if c in PRINTABLE else '.', sequence)
))
def sparkline(data):
'''
Return a spark line for the given data set.
:value data: sequence of numeric values
>>> print sparkline([1, 2, 3, 4, 5, 6, 5, 4, 3, 1, 5, 6]) # doctest: +SKIP
▁▂▃▄▅▆▅▄▃▁▅▆
'''
min_value = float(min(data))
max_value = float(max(data))
steps = (max_value - min_value) / float(len(SPARKCHAR) - 1)
return ''.join([
SPARKCHAR[int((float(value) - min_value) / steps)]
for value in data
])
def throughput(sample, window=1, format='binary'):
'''
Return the throughput in (intelli)bytes per second.
:param sample: number of samples sent
:param window: default 1, sample window in seconds or
:class:`datetime.timedelta` object
:param format: default 'decimal', see :func:`natural.size.filesize`
>>> print(throughput(123456, 42))
2.87 KiB/s
'''
if isinstance(window, datetime.timedelta):
window = float(window.days * 86400 + window.seconds)
elif isinstance(window, six.string_types):
window = float(window)
per_second = sample / float(window)
return _('%s/s') % (filesize(per_second, format=format),)
|
andreasf/django-notify
|
notify/migrations/0002_add_honeypot.py
|
Python
|
mit
| 553 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def add_honeypot(apps, schema_editor):
Destination = apps.get_model("notify", "Destination")
try:
obj = Destination.objects.get(name="honeypot")
except Destination.DoesNotExist:
obj = Destination(name="honeypot", email="honeypot")
obj.save()
class Migration(migrations.Migration):
dependencies = [
('notify', '0001_initial'),
]
|
operations = [
migrations.RunPython(add_honeypot),
]
|
|
stefanseefeld/synopsis
|
Synopsis/Formatters/HTML/Views/RawFile.py
|
Python
|
lgpl-2.1
| 3,392 | 0.003243 |
#
# Copyright (C) 2000 Stephen Davies
# Copyright (C) 2000 Stefan Seefeld
# All rights reserved.
# Licensed to the public under the terms of the GNU LGPL (>= 2),
# see the file COPYING for details.
#
from Synopsis.Processor import Parameter
from Synopsis.Formatters.HTML.View import View
from Synopsis.Formatters.HTML.Tags import *
from Directory import compile_glob
import time, os, stat, os.path
class RawFile(View):
"""A module for creating a view for each file with hyperlinked source"""
src_dir = Parameter('', 'starting point for directory listing')
base_path = Parameter('', 'path prefix to strip off of the file names')
exclude = Parameter([], 'TODO: define an exclusion mechanism (glob based ?)')
def register(self, frame):
super(RawFile, self).register(frame)
self._exclude = [compile_glob(e) for e in self.exclude]
self.__files = None
def filename(self):
return self.__filename
def title(self):
return self.__title
def _get_files(self):
"""Returns a list of (path, output_filename) for each file."""
if self.__files is not None: return self.__files
self.__files = []
dirs = [self.src_dir]
while dirs:
dir = dirs.pop(0)
for entry in os.listdir(os.path.abspath(dir)):
exclude = 0
for re in self._exclude:
if re.match(entry):
exclude = 1
break
if exclude:
continue
entry_path = os.path.join(dir, entry)
info = os.stat(entry_path)
if stat.S_ISDIR(info[stat.ST_MODE]):
dirs.append(entry_path)
else:
# strip of base_path
path = entry_path[len(self.base_path):]
if path[0] == '/': path = path[1:]
filename = self.directory_layout.file_source(path)
self.__files.append((entry_path, filename))
return self.__files
def process(self):
"""Creates a view for every file."""
for path, filename i
|
n self._get_files():
self.process_file(path, filename)
def register_filenames(self):
for path, filename in self._get_files():
self.processor.register_filename(filename, self, path)
def process_file(self, original, filename):
"""Creates a view for the given filename."""
# Check that we got the rego
reg_view, reg_scope = self.
|
processor.filename_info(filename)
if reg_view is not self: return
self.__filename = filename
self.__title = original[len(self.base_path):]
self.start_file()
self.write_navigation_bar()
self.write('File: '+element('b', self.__title))
try:
lines = open(original, 'rt').readlines()
lineno_template = '%%%ds' % len(`len(lines)`)
lines = ['<span class="lineno">%s</span><span class="line">%s</span>\n'
%(lineno_template % (i + 1), escape(l[:-1]))
for i, l in enumerate(lines)]
self.write('<pre class="sxr">')
self.write(''.join(lines))
self.write('</pre>')
except:
self.write('An error occurred')
self.end_file()
|
isb-cgc/ISB-CGC-data-proc
|
tcga_etl_pipeline/maf/part1/extract2.py
|
Python
|
apache-2.0
| 3,767 | 0.005575 |
#!/usr/bin/env python
# Copyright 2015, Institute for Systems Biology.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing per
|
missions and
# limitations under the License.
"""Script to parse MAF file
"""
from bigquery_etl.utils.gcutils import read_mysql_query
import sys
import json
import r
|
e
from pandas import ExcelWriter
writer = ExcelWriter('maf.xlsx')
def identify_data(config):
"""Gets the metadata info from database
"""
# cloudSql connection params
host = config['cloudsql']['host']
database = config['cloudsql']['db']
user = config['cloudsql']['user']
passwd = config['cloudsql']['passwd']
# sqlquery = """
# SELECT ParticipantBarcode, SampleBarcode, AliquotBarcode, Pipeline, Platform,
# SampleType, SampleTypeCode, Study, DatafileName, DatafileNameKey, Datatype,
# DatafileUploaded, IncludeForAnalysis, DataCenterName
# FROM metadata_data
# WHERE DatafileUploaded='true'
# AND DatafileNameKey LIKE '%.maf'
# AND IncludeForAnalysis='yes'
# """
sqlquery = """
SELECT ParticipantBarcode, SampleBarcode, AliquotBarcode, Pipeline, Platform,
SampleType, SampleTypeCode, Study, DatafileName, DatafileNameKey, Datatype,
DatafileUploaded, IncludeForAnalysis, DataCenterName
FROM metadata_data
WHERE Datatype='Mutations'
AND DatafileNameKey LIKE '%.maf'
AND DatafileUploaded='true'
AND IncludeForAnalysis='yes'
"""
#sqlquery = """ select datafilename, datafilenamekey from metadata_data where 0 < instr(datafilename, 'maf') and 'true' = datafileuploaded and 0 = instr(datafilename, 'protected') group by datafilename, dataarchivename;
#"""
# connect to db and get results in a dataframe
metadata_df = read_mysql_query(host, database, user, passwd, sqlquery)
# print metadata_df
for i, x in metadata_df.iterrows():
print x.to_dict()
# print metadata_df
sys.exit()
# rename platforms in rows
for i, row in metadata_df.iterrows():
metadata = row.to_dict()
metadata_df.loc[i, 'OutDatafileNameKey'] = config['mirna']['mirna']['output_dir']\
+ metadata['DatafileName'] + '.json'
#metadata_df.loc[:, 'SampleTypeLetterCode'] = metadata_df['SampleTypeCode']\
# .map(lambda code: config['sample_code2letter'][code])
metadata_df.loc[:, 'DatafileNameKey'] = metadata_df['DatafileNameKey']\
.map(lambda inputfile: re.sub(r"^/", "", inputfile))
# tag CELLC samples
metadata_df['transform_function'] = 'mirna.mirna.transform.parse_mirna'
print "Found {0} rows, columns." .format(str(metadata_df.shape))
# Filter - check all "is_" fields - remember all 'is_' fields must be boolean
all_flag_columns = [key for key in metadata_df.columns.values if key.startswith("is_")]
flag_df = metadata_df[all_flag_columns]
metadata_df = metadata_df[flag_df.all(axis=1)]
metadata_df.to_excel(writer, 'maf_files_metadata_table')
writer.save()
print "After filtering: Found {0} rows, columns." .format(str(metadata_df.shape))
return metadata_df
if __name__ == '__main__':
print identify_data(json.load(open(sys.argv[1])))
|
Chittr/Chittr
|
chittr/api.py
|
Python
|
mit
| 1,106 | 0.012658 |
from . import auth, users, rooms
from .config import config
import time
from flask import session
from pprint import pprint
import emoji
import re
escape_chars = (
("&", "&"),
("<", "<"),
(">", ">"),
)
def parse_markdown(message):
message = re.sub('\*{1}([^\*]+)\*{1}', '<b>\\1</b>', message)
message = re.sub('\_{1}([^\_]+)\_{1}', '<i>\\1</i>', message)
message = re.sub('\~{1}([^\~]+)\~{1}', '<s>\\1</s>', message)
message = re
|
.sub('\`{1}([^\`]+)\`{1}', '<code>\\1</code>', message)
return message
def escape_message(message):
for a, b in escape_chars:
message = message.replace(a, b)
return message
def parse_message(user, room, message):
user_data = users.safe_user(user)
user_data["tag"] =
|
rooms.get_tag(room, user)
user_data["rank"] = None # not implemented
message = escape_message(message)
message = emoji.emojize(message, use_aliases=True)
# Markdown
message = parse_markdown(message)
return {
"user": user_data,
"text": message,
"timestamp": time.time()
}
|
dnr2/fml-twitter
|
tweepy-master/tests/test_streaming.py
|
Python
|
mit
| 4,635 | 0.002589 |
from time import sleep
import unittest2 as unittest
from tweepy.api import API
from tweepy.auth import OAuthHandler
from tweepy.models import Status
from tweepy.streaming import Stream, StreamListener
from config import create_auth
from test_utils import mock_tweet
from mock import MagicMock, patch
class MockStreamListener(StreamListener):
def __init__(self, test_case):
super(MockStreamListener, self).__init__()
self.test_case = test_case
self.status_count = 0
self.status_stop_count = 0
self.connect_cb = None
def on_connect(self):
if self.connect_cb:
self.connect_cb()
def on_timeout(self):
self.test_case.fail('timeout')
return False
def on_error(self, code):
print("re
|
sponse: %s" % code)
return True
def on_status(self, status):
self.status_count += 1
self.test_case.assertIsInstance(status, Status)
if self.status_stop_count == self.status_count:
return False
class TweepyStreamTests(
|
unittest.TestCase):
def setUp(self):
self.auth = create_auth()
self.listener = MockStreamListener(self)
self.stream = Stream(self.auth, self.listener, timeout=3.0)
def tearDown(self):
self.stream.disconnect()
def test_userstream(self):
# Generate random tweet which should show up in the stream.
def on_connect():
API(self.auth).update_status(mock_tweet())
self.listener.connect_cb = on_connect
self.listener.status_stop_count = 1
self.stream.userstream()
self.assertEqual(self.listener.status_count, 1)
def test_userstream_with_params(self):
# Generate random tweet which should show up in the stream.
def on_connect():
API(self.auth).update_status(mock_tweet())
self.listener.connect_cb = on_connect
self.listener.status_stop_count = 1
self.stream.userstream(_with='user', replies='all', stall_warnings=True)
self.assertEqual(self.listener.status_count, 1)
def test_sample(self):
self.listener.status_stop_count = 10
self.stream.sample()
self.assertEquals(self.listener.status_count,
self.listener.status_stop_count)
def test_filter_track(self):
self.listener.status_stop_count = 5
phrases = ['twitter']
self.stream.filter(track=phrases)
self.assertEquals(self.listener.status_count,
self.listener.status_stop_count)
def test_track_encoding(self):
s = Stream(None, None)
s._start = lambda async: None
s.filter(track=[u'Caf\xe9'])
# Should be UTF-8 encoded
self.assertEqual(u'Caf\xe9'.encode('utf8'), s.parameters['track'])
def test_follow_encoding(self):
s = Stream(None, None)
s._start = lambda async: None
s.filter(follow=[u'Caf\xe9'])
# Should be UTF-8 encoded
self.assertEqual(u'Caf\xe9'.encode('utf8'), s.parameters['follow'])
class TweepyStreamBackoffTests(unittest.TestCase):
def setUp(self):
#bad auth causes twitter to return 401 errors
self.auth = OAuthHandler("bad-key", "bad-secret")
self.auth.set_access_token("bad-token", "bad-token-secret")
self.listener = MockStreamListener(self)
self.stream = Stream(self.auth, self.listener)
def tearDown(self):
self.stream.disconnect()
def test_exp_backoff(self):
self.stream = Stream(self.auth, self.listener, timeout=3.0,
retry_count=1, retry_time=1.0, retry_time_cap=100.0)
self.stream.sample()
# 1 retry, should be 4x the retry_time
self.assertEqual(self.stream.retry_time, 4.0)
def test_exp_backoff_cap(self):
self.stream = Stream(self.auth, self.listener, timeout=3.0,
retry_count=1, retry_time=1.0, retry_time_cap=3.0)
self.stream.sample()
# 1 retry, but 4x the retry_time exceeds the cap, so should be capped
self.assertEqual(self.stream.retry_time, 3.0)
mock_resp = MagicMock()
mock_resp.return_value.status = 420
@patch('httplib.HTTPConnection.getresponse', mock_resp)
def test_420(self):
self.stream = Stream(self.auth, self.listener, timeout=3.0, retry_count=0,
retry_time=1.0, retry_420=1.5, retry_time_cap=20.0)
self.stream.sample()
# no retries, but error 420, should be double the retry_420, not double the retry_time
self.assertEqual(self.stream.retry_time, 3.0)
|
arunhotra/tensorflow
|
tensorflow/g3doc/how_tos/adding_an_op/zero_out_3_test.py
|
Python
|
apache-2.0
| 1,175 | 0.011064 |
"""Test for version 3 of the zero_out op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import tensorflow as tf
from tensorflow.g3doc.how_tos.adding_an_op import gen_zero_out_op_3
class ZeroOut3Test(tf.test.TestCase):
def test(self):
with self.test_session():
result = gen_zero_out_op_3.zero_out([5, 4, 3, 2, 1])
self.assertAllEqual(result.eval(), [5, 0, 0, 0, 0])
def testAttr(self):
with self.test_session():
r
|
esult = gen_zero_out_op_3.zero_out([5, 4, 3, 2, 1], preserve_index=3)
self.assertAllEqual(result.eval(), [0, 0, 0
|
, 2, 0])
def testNegative(self):
with self.test_session():
result = gen_zero_out_op_3.zero_out([5, 4, 3, 2, 1], preserve_index=-1)
with self.assertRaisesOpError("Need preserve_index >= 0, got -1"):
result.eval()
def testLarge(self):
with self.test_session():
result = gen_zero_out_op_3.zero_out([5, 4, 3, 2, 1], preserve_index=17)
with self.assertRaisesOpError("preserve_index out of range"):
result.eval()
if __name__ == '__main__':
tf.test.main()
|
mzdaniel/oh-mainline
|
vendor/packages/scrapy/scrapy/tests/test_downloadermiddleware_redirect.py
|
Python
|
agpl-3.0
| 7,244 | 0.002761 |
import unittest
from scrapy.contrib.downloadermiddleware.redirect import RedirectMiddleware
from scrapy.spider import BaseSpider
from scrapy.exceptions import IgnoreRequest
from scrapy.http import Request, Response, HtmlResponse, Headers
class RedirectMiddlewareTest(unittest.TestCase):
def setUp(self):
self.spider = BaseSpider('foo')
self.mw = RedirectMiddleware()
def test_priority_adjust(self):
req = Request('http://a.com')
rsp = Response('http://a.com', headers={'Location': 'http://a.com/redirected'}, status=301)
req2 = self.mw.process_response(req, rsp, self.spider)
assert req2.priority > req.priority
def test_redirect_301(self):
def _test(method):
url = 'http://www.example.com/301'
url2 = 'http://www.example.com/redirected'
req = Request(url, method=method)
rsp = Response(url, headers={'Location': url2}, status=301)
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, url2)
self.assertEqual(req2.method, method)
# response without Location header but with status code is 3XX should be ignored
del rsp.headers['Location']
assert self.mw.process_response(req, rsp, self.spider) is rsp
_test('GET')
_test('POST')
_test('HEAD')
def test_dont_redirect(self):
url = 'http://www.example.com/301'
url2 = 'http://www.example.com/redirected'
req = Request(url, meta={'dont_redirect': True})
rsp = Response(url, headers={'Location': url2}, status=301)
r = self.mw.process_response(req, rsp, self.spider)
assert isinstance(r, Response)
assert r is rsp
def test_redirect_302(self):
url = 'http://www.example.com/302'
url2 = 'http://www.example.com/redirected2'
req = Request(url, method='POST', body='test',
headers={'Content-Type': 'text/plain', 'Content-length': '4'})
rsp = Response(url, headers={'Location': url2}, status=302)
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, url2)
self.assertEqual(req2.method, 'GET')
assert 'Content-Type' not in req2.headers, \
"Content-Type header must not be present in redirected request"
assert 'Content-Length' not in req2.headers, \
"Content-Length header must not be present in redirected request"
assert not req2.body, \
"Redirected body must be empty, not '%s'" % req2.body
# response without Location header but with status code is 3XX should be ignored
del rsp.headers['Location']
assert self.mw.process_response(req, rsp, self.spider) is rsp
def test_redirect_302_head(self):
url = 'http://www.example.com/302'
url2 = 'http://www.example.com/redirected2'
req = Request(url, method='HEAD')
rsp = Response(url, headers={'Location': url2}, status=302)
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, url2)
self.assertEqual(req2.method, 'HEAD')
# response without Location header but with status code is 3XX should be ignored
del rsp.headers['Location']
assert self.mw.process_response(req, rsp, self.spider) is rsp
def test_meta_refresh(self):
body = """<html>
<head><meta http-equiv="refresh" content="5;url=http://example.org/newpage" /></head>
</html>"""
req = Request(url='http://example.org')
rsp = HtmlResponse(url='http://example.org', body=body)
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, 'http://example.org/newpage')
def test_meta_refresh_with_high_interval(self):
# meta-refresh with high intervals don't trigger redirects
body = """<html>
<head><meta http-equiv="refresh" content="1000;url=http://example.org/newpage" /></head>
</html>"""
req = Request(url='http://example.org')
rsp = HtmlResponse(url='http://example.org', body=body)
rsp2 = self.mw.process_response(req, rsp, self.spider)
assert rsp is rsp2
def test_meta_refresh_trough_posted_request(self):
body = """<html>
<head><meta http-equiv="refresh" content="5;url=http://example.org/newpage" /></head>
</html>"""
req = Request(url='http://example.org', method='POST', body='test',
headers={'Content-Type': 'text/plain', 'Content-length': '4'})
rsp = HtmlResponse(url='http://example.org', body=body)
req2 = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req2, Request)
self.assertEqual(req2.url, 'http://example.org/newpage')
self.assertEqual(req2.method, 'GET')
assert 'Content-Type' not in req2.headers, \
"Content-Type header must not be present in redirected request"
assert 'Content-Length' not in req2.headers, \
"Content-Length header must not be present i
|
n redirected request"
assert not req2.body, \
"Redirected body must be empty, not '%s'" % req2.body
def test_max_redirect_times(self):
self.mw.max_redirect_times = 1
req = Request('http://scrapytest.org/302')
rsp = Response('http://scrapytest.org/302', headers={'Location': '/redirected'}, status=302)
req = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req, Request)
assert 'redirect_times' in req.meta
|
self.assertEqual(req.meta['redirect_times'], 1)
self.assertRaises(IgnoreRequest, self.mw.process_response, req, rsp, self.spider)
def test_ttl(self):
self.mw.max_redirect_times = 100
req = Request('http://scrapytest.org/302', meta={'redirect_ttl': 1})
rsp = Response('http://www.scrapytest.org/302', headers={'Location': '/redirected'}, status=302)
req = self.mw.process_response(req, rsp, self.spider)
assert isinstance(req, Request)
self.assertRaises(IgnoreRequest, self.mw.process_response, req, rsp, self.spider)
def test_redirect_urls(self):
req1 = Request('http://scrapytest.org/first')
rsp1 = Response('http://scrapytest.org/first', headers={'Location': '/redirected'}, status=302)
req2 = self.mw.process_response(req1, rsp1, self.spider)
rsp2 = Response('http://scrapytest.org/redirected', headers={'Location': '/redirected2'}, status=302)
req3 = self.mw.process_response(req2, rsp2, self.spider)
self.assertEqual(req2.url, 'http://scrapytest.org/redirected')
self.assertEqual(req2.meta['redirect_urls'], ['http://scrapytest.org/first'])
self.assertEqual(req3.url, 'http://scrapytest.org/redirected2')
self.assertEqual(req3.meta['redirect_urls'], ['http://scrapytest.org/first', 'http://scrapytest.org/redirected'])
if __name__ == "__main__":
unittest.main()
|
rohitranjan1991/home-assistant
|
homeassistant/components/blockchain/sensor.py
|
Python
|
mit
| 1,990 | 0 |
"""Support for Blockchain.com sensors."""
from __future__ import annotations
from datetime import timedelta
import logging
from pyblockchain import get_balance, validate_address
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from
|
homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by blockchain.com"
CONF_ADDRESSES = "addresses"
DEFAULT_NAME = "Bitcoin Balance"
ICON = "mdi:currency-btc"
SCAN_INTERVAL = timedelta(minutes=5)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ADDRESSES): [cv.string],
vol.Optional(CONF
|
_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Blockchain.com sensors."""
addresses = config[CONF_ADDRESSES]
name = config[CONF_NAME]
for address in addresses:
if not validate_address(address):
_LOGGER.error("Bitcoin address is not valid: %s", address)
return
add_entities([BlockchainSensor(name, addresses)], True)
class BlockchainSensor(SensorEntity):
"""Representation of a Blockchain.com sensor."""
_attr_extra_state_attributes = {ATTR_ATTRIBUTION: ATTRIBUTION}
_attr_icon = ICON
_attr_native_unit_of_measurement = "BTC"
def __init__(self, name, addresses):
"""Initialize the sensor."""
self._attr_name = name
self.addresses = addresses
def update(self):
"""Get the latest state of the sensor."""
self._attr_native_value = get_balance(self.addresses)
|
barak/autograd
|
examples/hmm_em.py
|
Python
|
mit
| 2,862 | 0.003494 |
from __future__ import division, print_function
import autograd.numpy as np
import autograd.numpy.random as npr
from autograd.scipy.misc import logsumexp
from autograd.convenience_wrappers import value_and_grad as vgrad
from functools import partial
from os.path import join, dirname
import string
import sys
def EM(init_params, data, callback=None):
def EM_update(params):
natural_params = map(np.log, params)
loglike, E_stats = vgrad(log_partition_function)(natural_params, data) # E step
if callback: callback(loglike, params)
return map(normalize, E_stats) # M step
def fixed_point(f, x0):
x1 = f(x0)
while different(x0, x1):
x0, x1 = x1, f(x1)
return x1
def different(params1, params2):
allclose = partial(np.allclose, atol=1e-3, rtol=1e-3)
return not all(map(allclose, params1, params2))
return fixed_point(EM_update, init_params)
def normalize(a):
def replace_zeros(a):
return np.where(a > 0., a, 1.)
return a / replace_zeros(a.sum(-1, keepdims=True))
def log_partition_function(natural_params, data):
if isinstance(data, list):
return sum(map(partial(log_partition_function, natural_params), data))
log_pi, log_A, log_B = natural_params
log_alpha = log_pi
for y in data:
log_alpha = logsumexp(log_alpha[:,None] + log_A, axis=0) + log_B[:,y]
return logsumexp(log_alpha)
def initialize_hmm_parameters(num_states, num_outputs):
init_pi = normalize(npr.rand(num_states))
init_A = normalize(npr.rand(num_states, num_states))
init_B = normalize(npr.rand(num_states, num_outputs))
return init_pi, init_A, init_B
def build_dataset(filename, max_lines=-1):
"""Loads a text file, and turns each line into an encoded sequence."""
encodings = dict(map(reversed, enumerate(string.printable)))
digitize = lambda char: encodings[char] if char in encodings else len(encodings)
encode_line = lambda line: np.array(list(map(digitize, line)))
nonblank_line = lambda line: len(line) > 2
with open(filename) as f:
lines = f.readlines()
encoded_lines = map(encode_line, filter(nonblank_line, lines)[:max_lines])
num_outputs = len(encodings) + 1
return encoded_lines, num_outputs
if __name__ == '__mai
|
n__':
np.random.seed(0)
np.seterr(divide='ignore')
# callback to print log likelihoods during training
print_loglike = lambda loglike, params: print(loglike)
# load training data
lstm_filename = join(dirname(__file__), 'lstm.py')
train_inputs, num_outputs = build_dataset(lstm_filename, max_lines=60)
# train with EM
num_states = 20
|
init_params = initialize_hmm_parameters(num_states, num_outputs)
pi, A, B = EM(init_params, train_inputs, print_loglike)
|
PyQuake/earthquakemodels
|
code/gaModel/gamodel_bbob.py
|
Python
|
bsd-3-clause
| 6,592 | 0.003337 |
import sys
from deap import base, creator, tools
import numpy as np
from csep.loglikelihood import calcLogLikelihood
# from models.mathUtil import calcNumberBins
import models.model
import random
import array
import time
from operator import attrgetter
# from scoop import futures
import fgeneric
import bbobbenchmarks as bn
sys.path.insert(0, '../code')
toolbox = base.Toolbox()
creator.create("FitnessFunction", base.Fitness, weights=(-1.0,))
creator.create("Individual", array.array, typecode='d', fitness=creator.FitnessFunction)
# pool = Pool()
# toolbox.register("map", futures.map)
def tupleize(func):
"""A decorator that tuple-ize the result of a function. This is useful
when the evaluation function returns a single value.
"""
def wrapper(*args, **kargs):
return func(*args, **kargs),
return wrapper
def gaModel(func,NGEN,CXPB,MUTPB,modelOmega,year,region, mean, n_aval, tournsize, ftarget):
"""The main function. It evolves models, namely modelLamba or individual.
"""
# start = time.clock()
# Attribute generator
toolbox.register("attr_float", random.random)
toolbox.register("mate", tools.cxOnePoint)
toolbox.register("select", tools.selTournament, tournsize=2)
toolbox.register("mutate", tools.mutPolynomialBounded,indpb=0.1, eta = 1, low = 0, up = 1)
stats = tools.Statistics(key=lambda ind: ind.fitness.values)
stats.register("avg", np.mean)
stats.register("std", np.std)
stats.register("min", np.min)
stats.register("max", np.max)
# calculating the number of individuals of the populations based on the number of executions
y = int(n_aval / NGEN)
x = n_aval - y * NGEN
n = x + y
toolbox.register("evaluate", func, modelOmega=modelOmega, mean=mean)
toolbox.decorate("evaluate", tupleize)
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_float, len(modelOmega[0].bins))
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
# logbook = tools.Logbook()
# logbook.header = "gen","min","avg","max","std"
pop = toolbox.population(n)
# Evaluate the entire population
# 2 model.bins: real data, generated model
fitnesses = list(toolbox.map(toolbox.evaluate, pop))
# numero_avaliacoes = len(pop)
# normalize fitnesses
# fitnesses = normalizeFitness(fitnesses)
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
for g in range(NGEN):
# Select the next generation individuals
offspring = toolbox.select(pop, len(pop))
# create offspring
offspring = list(toolbox.map(toolbox.clone, pop))
# Apply crossover and mutation on the offspring
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < CXPB:
toolbox.mate(child1, child2)
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
if random.random() < MUTPB:
toolbox.mutate(mutant)
del mutant.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = list(toolbox.map(toolbox.evaluate, invalid_ind))
for ind, fit in zip(inval
|
id_ind, fitnesses):
ind.fitness.values = fit
# The population is entirely replaced by the offspring, but the last ind replaced by best_pop
# Elitism
best_pop = tools.selBest(pop, 1)[0]
offspring = sorted(offspring, key=attrgetter("fitness"), reverse=True)
|
offspring[0] = best_pop
random.shuffle(offspring)
pop[:] = offspring
record = stats.compile(pop)
# logbook.record(gen=g, **record)
if (abs(record["min"]) - abs(ftarget)) < 10e-8:
return best_pop
if record["std"] < 10e-12:
sortedPop = sorted(pop, key=attrgetter("fitness"), reverse=True)
pop = toolbox.population(n)
pop[0] = sortedPop[0]
pop = toolbox.population(n)
fitnesses = list(toolbox.map(toolbox.evaluate, pop))
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
g += 1
record = stats.compile(pop)
# logbook.record(gen=g, **record)
return best_pop
if __name__ == "__main__":
output = sys.argv[1]
gaParams = sys.argv[2]
region = sys.argv[3]
year = sys.argv[4]
f = open(gaParams, "r")
keys = ['key', 'NGEN', 'n_aval', 'qntYears', 'CXPB', 'MUTPB', 'tournsize']
params = dict()
for line in f:
if line[0] == '#':
continue
tokens = line.split()
for key, value in zip(keys, tokens):
if key == 'key':
params[key] = value
elif key == 'CXPB' or key == 'MUTPB':
params[key] = float(value)
elif key == 'region':
params[key] = value
else:
params[key] = int(value)
f.close()
# Create a COCO experiment that will log the results under the
# ./output directory
e = fgeneric.LoggingFunction(output)
observations = list()
means = list()
for i in range(params['qntYears']):
observation = models.model.loadModelDB(region + 'jmaData', year + i)
observation.bins = observation.bins.tolist()
observations.append(observation)
means.append(observation.bins)
# del observation
mean = np.mean(means, axis=0)
param = (params['region'], params['year'], params['qntYears'])
func, opt = bn.instantiate(2, iinstance=1, param=param)
observation = models.model.loadModelDB(region + 'jmaData', year + params['qntYears'] + 1)
ftarget = calcLogLikelihood(observation, observation)
del observation
e.setfun(func, opt=ftarget)
gaModel(e.evalfun,
NGEN=params['NGEN'],
CXPB=params['CXPB'],
MUTPB=params['MUTPB'],
modelOmega=observations,
year=params['year'] +
params['qntYears'],
region=params['region'],
mean=mean,
n_aval=params['n_aval'],
tournsize=params['tournsize'],
ftarget=e.ftarget)
print('ftarget=%.e4 FEs=%d fbest-ftarget=%.4e and fbest = %.4e' % (e.ftarget, e.evaluations, e.fbest - e.ftarget, e.fbest))
e.finalizerun()
print('date and time: %s' % time.asctime())
# output = generatedModel.loglikelihood
# return((-1)*output[0])
|
seap-udea/tQuakes
|
util/Legacy/allprd.py
|
Python
|
gpl-2.0
| 391 | 0.038363 |
from rutinas im
|
port *
inputdir="ETERNA-INI/"
outputdir="ETERNA-OUT/"
in
|
putdb=argv[1]
f=open(inputdir+"%s-INI/numeracionsismos.dat"%inputdb,"r")
for line in f:
if "#" in line:continue
line=line.strip()
numsismo=line.split()[0]
print "Generando datos de sismo '%s'..."%numsismo
cmd="python prd2dat.py %s/%s-OUT/ %s"%(outputdir,inputdb,numsismo)
system(cmd)
f.close()
|
arielrossanigo/fades
|
tests/test_pipmanager.py
|
Python
|
gpl-3.0
| 4,315 | 0.001391 |
# Copyright 2015 Facundo Batista, Nicolás Demarchi
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further info, check https://github.com/PyAr/fades
""" Tests for pip related code. """
import unittest
from unittest.mock import patch
import logassert
from fades.pipmanager import PipManager
from fades import helpers
class PipManagerTestCase(unittest.TestCase):
""" Check parsing for `pip show`. """
def setUp(self):
logassert.setup(self, 'fades.pipmanager')
def test_get_parsing_ok(self):
mocked_stdout = ['Name: foo',
'Version: 2.0.0',
'Location: ~/.local/share/fades/86cc492/lib/python3.4/site-packages',
'Requires: ']
mgr = PipManager('/usr/bin', pip_installed=True)
with patch.object(helpers, 'logged_exec') as mock:
mock.return_value = mocked_stdout
version = mgr.get_version('foo')
self.assertEqual(version, '2.0.0')
def test_get_parsing_error(self):
mocked_stdout = ['Name: foo',
'Release: 2.0.0',
'Location: ~/.local/share/fades/86cc492/lib/python3.4/site-packages',
'Requires: ']
mgr = PipManager('/usr/bin', pip_installed=True)
with patch.object(helpers, 'logged_exec') as mock:
version = mgr.get_version('foo')
mock.return_value = mocked_stdout
self.assertEqual(version, '')
self.assertLoggedError('Fades is having problems getting the installed version. '
'Run with -v or check the logs for details')
def test_real_case_levenshtein(self):
mocked_stdout = [
'Metadata-Version: 1.1',
'Name: python-Levenshtein',
'Version: 0.12.0',
'License: GPL',
]
mgr = PipManager('/usr/bin', pip_installed=True)
with patch.object(helpers, 'logged_exec') as mock:
mock.return_value = mocked_stdout
version = mgr.get_version('foo')
self.assertEqual(version, '0.12.0')
def test_install(self):
mgr = PipManager('/usr/bin', pip_installed=True)
with patch.object(helpers, 'logged_exec') as mock:
mgr.install('foo')
mock.assert_called_with(['/usr/bin/pip', 'install', 'foo'])
def test_install_with_options(self):
mgr = PipManager('/usr/bin', pip_installed=True, options=['--bar baz'])
with patch.object(helpers, 'logged_exec') as mock:
mgr.install('foo')
mock.assert_called_with(['/usr/bin/pip', 'install', 'foo', '--bar', 'baz'])
def test_install_with_options_using_equal(self):
mgr = PipManager('/usr/bin', pip_installed=True, options=['--bar=baz'])
with patch.object(helpers, 'logged_exec') as mock:
mgr.install('foo')
mock.assert_called_with(['/usr/bin/pip', 'install', 'foo', '--bar=baz'])
def test_install_raise_error(self):
mgr = PipManager('/usr/bin', pip_installed=True)
with patch.object(helpers, 'logged_exec') as mock:
mock.side_effect = Exception("Kapow!")
with self.assertRaises(Exception):
mgr.install('foo')
self.assertLogg
|
edError("Error installing foo: Kapow!")
def test_install_without_pip(self):
mgr = PipManager('/usr/bin', pip_installed=False)
with patch.object(helpers, 'logged_exec') as mocked_exec:
with patch.object(mgr, '_brute_force_install_pip') as mocked_install_pip:
mgr.install('foo')
|
self.assertEqual(mocked_install_pip.call_count, 1)
mocked_exec.assert_called_with(['/usr/bin/pip', 'install', 'foo'])
|
tos-kamiya/pyrem_torq
|
src/pyrem_torq/treeseq/__init__.py
|
Python
|
mit
| 29 | 0 |
f
|
rom treeseq_funcs import *
| |
dataxu/ansible
|
lib/ansible/modules/cloud/vmware/vmware_local_role_manager.py
|
Python
|
gpl-3.0
| 10,915 | 0.003207 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author(s): Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_local_role_manager
sho
|
rt_description: Manage local roles on an ESXi host
description:
- Manage local roles on an ESXi host
version_added: "2.5"
author: Abhijeet Kasurde (@akasurde) <akasurde@redhat.com>
notes:
- Tested on ESXi 6.5
- Be sure that the ESXi user used for login, has the appropriate rights to create / delete / edit roles
requirements:
- "python >= 2.6"
- PyVmomi
options:
local_role_name:
description:
- The local role name to be managed.
required: True
local
|
_privilege_ids:
description:
- The list of privileges that role needs to have.
- Please see U(https://docs.vmware.com/en/VMware-vSphere/6.0/com.vmware.vsphere.security.doc/GUID-ED56F3C4-77D0-49E3-88B6-B99B8B437B62.html)
default: []
state:
description:
- Indicate desired state of the role.
- If the role already exists when C(state=present), the role info is updated.
choices: ['present', 'absent']
default: present
force_remove:
description:
- If set to C(False) then prevents the role from being removed if any permissions are using it.
default: False
type: bool
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example vmware_local_role_manager command from Ansible Playbooks
- name: Add local role to ESXi
vmware_local_role_manager:
hostname: esxi_hostname
username: root
password: vmware
local_role_name: vmware_qa
state: present
- name: Add local role with privileges to ESXi
vmware_local_role_manager:
hostname: esxi_hostname
username: root
password: vmware
local_role_name: vmware_qa
local_privilege_ids: [ 'Folder.Create', 'Folder.Delete']
state: present
- name: Remove local role from ESXi
vmware_local_role_manager:
hostname: esxi_hostname
username: root
password: vmware
local_role_name: vmware_qa
state: absent
'''
RETURN = r'''
local_role_name:
description: Name of local role
returned: always
type: string
role_id:
description: ESXi generated local role id
returned: always
type: int
old_privileges:
description: List of privileges of role before update
returned: on update
type: list
new_privileges:
description: List of privileges of role after update
returned: on update
type: list
'''
try:
from pyVmomi import vim, vmodl
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
class VMwareLocalRoleManager(PyVmomi):
def __init__(self, module):
super(VMwareLocalRoleManager, self).__init__(module)
self.module = module
self.params = module.params
self.role_name = self.params['local_role_name']
self.state = self.params['state']
self.priv_ids = self.params['local_privilege_ids']
self.force = not self.params['force_remove']
self.current_role = None
if self.content.authorizationManager is None:
self.module.fail_json(msg="Failed to get local authorization manager settings.",
details="It seems that %s is a vCenter server "
"instead of an ESXi server" % self.params['hostname'])
def process_state(self):
local_role_manager_states = {
'absent': {
'present': self.state_remove_role,
'absent': self.state_exit_unchanged,
},
'present': {
'present': self.state_update_role,
'absent': self.state_create_role,
}
}
try:
local_role_manager_states[self.state][self.check_local_role_manager_state()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except Exception as e:
self.module.fail_json(msg=str(e))
def check_local_role_manager_state(self):
auth_role = self.find_authorization_role()
if auth_role:
self.current_role = auth_role
return 'present'
else:
return 'absent'
def find_authorization_role(self):
desired_role = None
for role in self.content.authorizationManager.roleList:
if role.name == self.role_name:
desired_role = role
return desired_role
def state_create_role(self):
try:
role_id = self.content.authorizationManager.AddAuthorizationRole(name=self.role_name,
privIds=self.priv_ids)
except vim.fault.AlreadyExists as e:
self.module.fail_json(msg="Failed to create a role %s as the user specified role name "
"already exists." % self.role_name,
details=e.msg)
except vim.fault.InvalidName as e:
self.module.fail_json(msg="Failed to create a role %s as the user specified role name "
"is empty" % self.role_name,
details=e.msg)
except vmodl.fault.InvalidArgument as e:
self.module.fail_json(msg="Failed to create a role %s as the user specified privileges "
"are unknown" % self.role_name,
details=e.msg)
result = {
'changed': True,
'role_id': role_id,
'privileges': self.priv_ids,
'local_role_name': self.role_name,
}
self.module.exit_json(**result)
def state_remove_role(self):
try:
self.content.authorizationManager.RemoveAuthorizationRole(roleId=self.current_role.roleId,
failIfUsed=self.force)
except vim.fault.NotFound as e:
self.module.fail_json(msg="Failed to remove a role %s as the user specified role name "
"does not exist." % self.role_name,
details=e.msg)
except vim.fault.RemoveFailed as e:
msg = "Failed to remove a role %s as the user specified role name." % self.role_name
if self.force:
msg += " Use force_remove as True."
self.module.fail_json(msg=msg, details=e.msg)
except vmodl.fault.InvalidArgument as e:
self.module.fail_json(msg="Failed to remove a role %s as the user specified "
"role is a system role" % self.role_name,
details=e.msg)
result = {
'changed': True,
'role_id': self.current_role.roleId,
'local_role_name': self.role_name,
}
self.module.exit_json(**result)
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def state_update_role(self):
current_privileges = set(self.current_role.privilege)
# Add system-defined privileges, "System.Anonymous", "System.View", and "System.Read".
self.params['local_privilege_ids'].extend(['System.Anonymous', 'System.Read', 'System.View'])
desired_privileges = set(self.params['local_privilege_ids'])
changed_privileges = current_privileges ^ desired_privileges
changed_privileges = list(changed_privileges)
if not changed_privileges:
|
blassphemy/mqtt-ble-gateway
|
main.py
|
Python
|
apache-2.0
| 208 | 0.009615 |
import ble
import m
|
qtt
from config i
|
mport *
try:
ble.start()
mqtt.start()
finally:
#notify MQTT subscribers that gateway is offline
mqtt.publish(TOPIC_PREFIX, "offline")
ble.stop()
|
3WiseMen/python
|
pystock/pystock_xingAPI/abstract_component.py
|
Python
|
mit
| 1,088 | 0.029412 |
#abstract_component.py
import threading
class AbstractComponent:
logger = None
def init(self):
pass
def finalize(self):
pass
class AbstractQueryProviderComponent(AbstractComponent):
async_query_availabale = False
def getAvailableQueryCodeSet(self):
raise NotImplementedError('You have to override AbstractQueryProviderComponent.getAvailableQuerySet method')
def query(self, query_code, arg_set):
raise NotImplementedError('You have to override AbstractQueryProviderComponent.query method')
class AbstractSubscriptionProviderComponent(AbstractComponent):
def getAvailableSubscript
|
ionCodeSet(self):
raise NotImplementedError('You have to override AbstractSubscriptionProviderComponent.getAvailableSubscriptionSet method')
def subscribe(self, subscribe_code, arg_set, queue):
raise NotImplementedError('You have to override AbstractSubscriptionProviderComponent.subscribe method')
def unsubscribe(self, subscribe_code, arg_set, queue):
raise NotImplementedEr
|
ror('You have to override AbstractSubscriptionProviderComponent.unsubscribe method')
|
betoesquivel/PLYpractice
|
testingLexer.py
|
Python
|
mit
| 126 | 0 |
import lexer
s = "program id; var beto
|
: int; { id = 1234; }"
lexer.lexer.input(s)
for token in lexer.lexer:
print toke
|
n
|
henriquebastos/django-test-without-migrations
|
tests/nose_settings.py
|
Python
|
mit
| 509 | 0.005894 |
# coding: utf-8
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
INSTALLED_APPS = (
'tests.myapp',
'test_without_migrations',
'django_nose'
)
|
SITE_ID=1,
SECRET_KEY='secret'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
TEST_WITHOUT_MIGRATIONS_COMMAND = '
|
django_nose.management.commands.test.Command'
|
our-city-app/oca-backend
|
src/shop/handlers.py
|
Python
|
apache-2.0
| 26,782 | 0.002576 |
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import binascii
import datetime
import json
import logging
import os
import time
import urllib
import webapp2
from babel import Locale
from google.appengine.api import search, users as gusers
from google.appengine.ext import db
from google.appengine.ext.deferred import deferred
from google.appengine.ext.webapp import template
from markdown import Markdown
from mcfw.cache import cached
from mcfw.consts import MISSING
from mcfw.exceptions import HttpNotFoundException
from mcfw.restapi import rest, GenericRESTRequestHandler
from mcfw.rpc import serialize_complex_value, arguments, returns
from rogerthat.bizz.communities.communities import get_communities_by_country, get_community, get_community_countries
from rogerthat.bizz.friends import user_code_by_hash, makeFriends, ORIGIN_USER_INVITE
from rogerthat.bizz.registration import get_headers_for_consent
from rogerthat.bizz.service import SERVICE_LOCATION_INDEX, re_index_map_only
from rogerthat.bizz.session import create_session
from rogerthat.dal.app import get_app_by_id
from rogerthat.exceptions.login import AlreadyUsedUrlException, InvalidUrlException, ExpiredUrlException
from rogerthat.models import ProfilePointer, ServiceProfile
from rogerthat.pages.legal import DOC_TERMS_SERVICE, get_current_document_version, get_version_content, \
get_legal_language, LANGUAGES as LEGAL_LANGUAGES
from rogerthat.pages.login import SetPasswordHandler
from rogerthat.rpc import users
from rogerthat.rpc.service import BusinessException
from rogerthat.settings import get_server_settings
from rogerthat.templates import get_languages_from_request
from rogerthat.to import ReturnStatusTO, RETURNSTATUS_TO_SUCCESS, WarningReturnStatusTO
from rogerthat.translations import DEFAULT_LANGUAGE
from rogerthat.utils import bizz_check, try_or_defer, get_country_code_by_ipaddress
from rogerthat.utils.app import get_app_id_from_app_user
from rogerthat.utils.cookie import set_cookie
from rogerthat.utils.service import create_service_identity_user
from shop import SHOP_JINJA_ENVIRONMENT
from shop.bizz import create_customer_signup, complete_customer_signup, get_organization_types, \
update_customer_consents, get_customer_signup, validate_customer_url_data, \
get_customer_consents
from shop.business.permissions import is_admin
from shop.constants import OFFICIALLY_SUPPORTED_LANGUAGES
from shop.models import Customer
from shop.to import CompanyTO, CustomerTO, CustomerLocationTO
from shop.view import get_shop_context, get_current_http_host
from solution_server_settings import get_solution_server_settings
from solutions import translate
from solutions.common.bizz.grecaptcha import recaptcha_verify
from solutions.common.bizz.settings import get_consents_for_community
from solutions.common.integrations.cirklo.cirklo import get_whitelisted_merchant
from solutions.common.integrations.cirklo.models import CirkloMerchant, CirkloCity
from solutions.common.markdown_newtab import NewTabExtension
from solutions.common.models import SolutionServiceConsent
from solutions.common.restapi.services import do_create_service
from solutions.common.to.settings import PrivacySettingsGroupTO
class StaticFileHandler(webapp2.RequestHandler):
def get(self, filename):
cur_path = os.path.dirname(__file__)
path = os.path.join(cur_path, u'html', filename)
with open(path, 'r') as f:
self.response.write(f.read())
class GenerateQRCodesHandler(webapp2.RequestHandler):
def get(self):
current_user = gusers.get_current_user()
if not is_admin(current_user):
self.abort(403)
path = os.path.join(os.path.dirname(__file__), 'html', 'generate_qr_codes.html')
context = get_shop_context()
self.response.out.write(template.render(path, context))
class CustomerMapHandler(webapp2.RequestHandler):
def get(self, app_id):
path = os.path.join(os.path.dirname(__file__), 'html', 'customer_map.html')
settings = get_server_settings()
lang = get_languages_from_request(self.request)[0]
translations = {
'merchants': translate(lang, 'merchants'),
'merchants_with_terminal': translate(lang, 'merchants_with_terminal'),
'community_services': translate(lang, 'community_services'),
'care': translate(lang, 'care'),
'associations': translate(lang, 'associations'),
}
params = {
'maps_key': settings.googleMapsKey,
'app_id': app_id,
'translations': json.dumps(translations)
}
self.response.out.write(template.render(path, params))
@cached(2, 21600)
@returns(unicode)
@arguments(app_id=unicode)
def get_customer_locations_for_app(app_id):
query_string = (u'app_ids:"%s"' % app_id)
query = search.Query(query_string=query_string,
options=search.QueryOptions(returned_fields=['service', 'name', 'location', 'description'],
limit=1000))
search_result = search.Index(name=SERVICE_LOCATION_INDEX).search(query)
customers = {customer.service_email: customer for customer in Customer.list_by_app_id(app_id)}
def map_result(service_search_result):
customer_location = CustomerLocationTO()
for field in service_search_result.fields:
if field.name == 'service':
customer = customers.get(field.value.split('/')[0])
if customer:
customer_location.address = customer.address1
customer_location.type = customer.organization_type
if customer.address2:
customer_location.address += '\n%s' % customer.address2
if customer.zip_code or customer.city:
customer_location.address += '\n'
if customer.zip_code:
customer_location.address += customer.zip_code
if customer.zip_code and customer.city:
customer_location.address += ' '
if customer.city:
customer_location.address += customer.city
else:
cu
|
stomer_location.type = ServiceProfile.ORGANIZATION_TYPE_PRO
|
FIT
continue
if field.name == 'name':
customer_location.name = field.value
continue
if field.name == 'location':
customer_location.lat = field.value.latitude
customer_location.lon = field.value.longitude
continue
if field.name == 'description':
customer_location.description = field.value
continue
return customer_location
return json.dumps(serialize_complex_value([map_result(r) for r in search_result.results], CustomerLocationTO, True))
class CustomerMapServicesHandler(webapp2.RequestHandler):
def get(self, app_id):
customer_locations = get_customer_locations_for_app(app_id)
self.response.write(customer_locations)
@rest('/unauthenticated/loyalty/scanned', 'get', read_only_access=True, authenticated=False)
@returns(ReturnStatusTO)
@arguments(user_email_hash=unicode, merchant_email=unicode, app_id=unicode)
def rest_loyalty_scanned(user_email_hash, merchant_email, app_id):
try:
bizz_check(user_email_hash is not MISSING, 'user_email_hash is required')
bizz_check(merchant_email is not MISSING, 'me
|
cozy-labs/cozy-fuse
|
cozyfuse/dbutils.py
|
Python
|
bsd-3-clause
| 10,338 | 0.000193 |
import json
import string
import random
import requests
import logging
import local_config
from couchdb import Server, http
from couchdb.http import PreconditionFailed, ResourceConflict
logger = logging.getLogger(__name__)
local_config.configure_logger(logger)
def create_db(database):
server = Server('http://localhost:5984/')
try:
db = server.create(database)
logger.info('[DB] Database %s created' % database)
except PreconditionFailed:
db = server[database]
logger.info('[DB] Database %s already exists.' % database)
return db
def get_db(database, credentials=True):
'''
Get or create given database from/in CouchDB.
'''
try:
server = Server('http://localhost:5984/')
if credentials:
server.resource.credentials = \
local_config.get_db_credentials(database)
return server[database]
except Exception:
logging.exception('[DB] Cannot connect to the database')
return None
def get_db_and_server(database):
'''
Get or create given database from/in CouchDB.
'''
try:
server = Server('http://localhost:5984/')
server.resource.credentials = local_config.get_db_credentials(database)
db = server[database]
return (db, server)
except Exception:
logging.exception('[DB] Cannot connect to the database %s' % database)
return (None, None)
def init_db(database):
'''
Create all required views to make Cozy FUSE working properly.
'''
create_db(database)
init_database_views(database)
password = get_random_key()
create_db_user(database, database, password)
logger.info('[DB] Local database %s initialized' % database)
return (database, password)
def remove_db(database):
'''
Destroy given database.
'''
server = Server('http://localhost:5984/')
try:
server.delete(database)
except http.ResourceNotFound:
logger.info('[DB] Local database %s already removed' % database)
logger.info('[DB] Local database %s removed' % database)
def get_device(name):
'''
Get device corresponding to given name. Device is returned as a dict.
'''
try:
device = list(get_db(name).view("device/all", key=name))[0].value
except IndexError:
device = None
return device
def get_folders(db):
return db.view("folder/all")
def get_files(db):
return db.view("file/all")
def get_folder(db, path):
if len(path) > 0 and path[0] != '/':
path = '/' + path
try:
folder = list(db.view("folder/byFullPath", key=path))[0].value
except IndexError:
folder = None
return folder
def get_file(db, path):
if len(path) > 0 and path[0] != '/':
path = '/' + path
try:
file_doc = list(db.view("file/byFullPath", key=path))[0].value
except IndexError:
file_doc = None
return file_doc
def get_random_key():
'''
Generate a random key of 20 chars. The first character is not a number
because CouchDB does not link string that starts with a digit.
'''
chars = string.ascii_lowercase + string.digits
random_val = ''.join(random.choice(chars) for x in range(19))
return random.choice(string.ascii_lowercase) + random_val
def create_db_user(database, login, password, protocol="http"):
'''
Create a user for given *database*. User credentials are *login* and
*password*.
'''
headers = {'content-type': 'application/json'}
data = {
"_id": "org.couchdb.user:%s" % login,
"name": login,
"type": "user",
"roles": [],
"password": password
}
requests.post('%s://localhost:5984/_users' % (protocol),
data=json.dumps(data),
headers=headers,
verify=False)
headers = {'content-type': 'application/json'}
data = {
"admins": {
"names": [login],
"roles": []
},
"members": {
"names": [login],
"roles": []
},
}
requests.put('%s://localhost:5984/%s/_security' % (protocol, database),
data=json.dumps(data),
headers=headers,
verify=False)
logger.info('[DB] Db user created')
def remove_db_user(database):
'''
Delete user created for this database.
'''
response = requests.get(
'http://localhost:5984/_users/org.couchdb.user:%s' % database)
rev = response.json().get("_rev", "")
response = requests.delete(
'http://localhost:5984/_users/org.couchdb.user:%s?rev=%s' %
(database, rev)
)
logger.info('[DB] Db user %s deleted' % database)
def init_database_view(docType, db):
'''
Add view in database for given docType.
'''
db["_design/%s" % docType.lower()] = {
"views": {
"all": {
"map": """function (doc) {
if (doc.docType === \"%s\") {
emit(doc._id, doc)
}
}""" % docType
},
"byFolder": {
"map": """function (doc) {
if (doc.docType === \"%s\") {
emit(doc.path, doc)
}
}""" % docType
},
"byFullPath": {
"map": """function (doc) {
if (doc.docType === \"%s\") {
emit(doc.path + '/' + doc.name, doc);
}
}""" % docType
}
},
"filters": {
"all": """function (doc, req) {
return doc.docType === \"%s\"
}""" % docType
}
}
def init_database_views(database):
'''
Initialize database:
* Create database
* Initialize folder, file, binary and device views
'''
db = get_db(database, credentials=False)
try:
i
|
nit_database_view('Folder', db)
logger.info('[DB] Folder design document created')
except ResourceConflict:
logger.warn('[DB] Folder design document already exists')
try:
init_database_view('File', db)
logger.info('[DB] File design document created')
except ResourceConflict:
logger.warn('[DB] File design document already exists')
try:
db["_design/device"] = {
|
"views": {
"all": {
"map": """function (doc) {
if (doc.docType === \"Device\") {
emit(doc.login, doc)
}
}"""
},
"byUrl": {
"map": """function (doc) {
if (doc.docType === \"Device\") {
emit(doc.url, doc)
}
}"""
}
}
}
logger.info('[DB] Device design document created')
except ResourceConflict:
logger.warn('[DB] Device design document already exists')
try:
db["_design/binary"] = {
"views": {
"all": {
"map": """function (doc) {
if (doc.docType === \"Binary\") {
emit(doc._id, doc)
}
}"""
}
}
}
logger.info('[DB] Binary design document created')
except ResourceConflict:
logger.warn('[DB] Binary design document already exists')
def init_device(database, url, path, device_pwd, device_id):
'''
Create device objects wiht filter to apply to synchronize them.
'''
db = get_db(database)
device = get_device(database)
# Update device
device['password'] = device_pwd
device['change'] = 0
device['url'] = url
device['fol
|
mwarkentin/ansible
|
plugins/inventory/vmware.py
|
Python
|
gpl-3.0
| 6,210 | 0.007085 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
VMWARE external inventory script
=================================
shamelessly copied from existing inventory scripts.
This script and it's ini can be used more than once,
i.e vmware.py/vmware_colo.ini vmware_idf.py/vmware_idf.ini
(script can be link)
so if you don't have clustered vcenter but multiple esx machines or
just diff clusters you can have a inventory per each and automatically
group hosts based on file name or specify a group in the ini.
'''
import os
import sys
import time
import ConfigParser
from psphere.client import Client
from psphere.managedobjects import HostSystem
try:
import json
except ImportError:
import simplejson as json
def save_cache(cache_item, data, config):
''' saves item to cache '''
dpath = config.get('defaults', 'cache_dir')
try:
cache = open('/'.join([dpath,cache_item]), 'w')
cache.write(json.dumps(data))
cache.close()
except IOError, e:
pass # not really sure what to do here
def get_cache(cache_item, config):
''' returns cached item '''
dpath = config.get('defaults', 'cache_dir')
inv = {}
try:
cache = open('/'.join([dpath,cache_item]), 'r')
inv = json.loads(cache.read())
cache.close()
except IOError, e:
pass # not really sure what to do here
return inv
def cache_available(cache_item, config):
''' checks if we have a 'fresh' cache available for item requested '''
if config.has_option('defaults', 'cache_dir'):
dpath = config.get('defaults', 'cache_dir')
try:
existing = os.stat( '/'.join([dpath,cache_item]))
except:
# cache doesn't exist or isn't accessible
return False
if config.has_option('defaults', 'cache_max_age'):
maxage = config.get('defaults', 'cache_max_age')
if (existing.st_mtime - int(time.time())) <= maxage:
return True
return False
def get_host_info(host):
''' Get variables about a specific host '''
hostinfo = {
'vmware_name' : host.name,
'vmware_tag' : host.tag,
'vmware_parent': host.parent.name,
}
for k in host.capability.__dict__.keys():
if k.startswith('_'):
continue
try:
hostinfo['vmware_' + k] = str(host.capability[k])
except:
continue
return hostinfo
def get_inventory(client, config):
''' Reads the inventory from cache or vmware api '''
if cache_available('inventory', config):
inv = get_cache('inventory',config)
else:
inv= { 'all': {'hosts': []}, '_meta': { 'hostvars': {} } }
default_group = os.path.basename(sys.argv[0]).rstrip('.py')
if config.has_option('defaults', 'guests_only'):
guests_only = config.get('defaults', 'guests_only')
else:
guests_only = True
if not guests_only:
if config.has_option('defaults','hw_group'):
hw_group = config.get('defaults','hw_group')
else:
hw_group = default_group + '_hw'
inv[hw_group] = []
if config.has_option('defaults','vm_group'):
vm_group = config.get('defaults','vm_group')
else:
vm_group = default_group + '_vm'
inv[vm_group] = []
# Loop through physical hosts:
hosts = HostSystem.
|
all(client)
for host in hosts:
if not guests_only:
inv['all']['hosts'].append(host.name)
inv[hw_
|
group].append(host.name)
if host.tag:
taggroup = 'vmware_' + host.tag
if taggroup in inv:
inv[taggroup].append(host.name)
else:
inv[taggroup] = [ host.name ]
inv['_meta']['hostvars'][host.name] = get_host_info(host)
save_cache(vm.name, inv['_meta']['hostvars'][host.name], config)
for vm in host.vm:
inv['all']['hosts'].append(vm.name)
inv[vm_group].append(vm.name)
if vm.tag:
taggroup = 'vmware_' + vm.tag
if taggroup in inv:
inv[taggroup].append(vm.name)
else:
inv[taggroup] = [ vm.name ]
inv['_meta']['hostvars'][vm.name] = get_host_info(host)
save_cache(vm.name, inv['_meta']['hostvars'][vm.name], config)
save_cache('inventory', inv, config)
return json.dumps(inv)
def get_single_host(client, config, hostname):
inv = {}
if cache_available(hostname, config):
inv = get_cache(hostname,config)
else:
hosts = HostSystem.all(client) #TODO: figure out single host getter
for host in hosts:
if hostname == host.name:
inv = get_host_info(host)
break
for vm in host.vm:
if hostname == vm.name:
inv = get_host_info(host)
break
save_cache(hostname,inv,config)
return json.dumps(inv)
if __name__ == '__main__':
inventory = {}
hostname = None
if len(sys.argv) > 1:
if sys.argv[1] == "--host":
hostname = sys.argv[2]
# Read config
config = ConfigParser.SafeConfigParser()
for configfilename in [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'vmware.ini']:
if os.path.exists(configfilename):
config.read(configfilename)
break
try:
client = Client( config.get('auth','host'),
config.get('auth','user'),
config.get('auth','password'),
)
except Exception, e:
client = None
#print >> STDERR "Unable to login (only cache avilable): %s", str(e)
# acitually do the work
if hostname is None:
inventory = get_inventory(client, config)
else:
inventory = get_single_host(client, config, hostname)
# return to ansible
print inventory
|
egineering-llc/egat_example_project
|
tests/test_helpers/selenium_helper.py
|
Python
|
mit
| 338 | 0.002959 |
from selenium.webdriver.support.select import Select
def get_selected_option(browser, css_selector):
# Takes a css selector for a <select> element and returns the value of
# the selected option
select = Select(browser.find_element_by_css_se
|
lector(css_selector))
return select.first_selected_option.get_attribute('value')
|
|
WarwickAnimeSoc/aniMango
|
archive/migrations/0002_auto_20181215_1934.py
|
Python
|
mit
| 1,365 | 0.003663 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-12-15 19:34
from __futur
|
e__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('archive', '0001_initial'),
]
operations = [
migratio
|
ns.AlterField(
model_name='item',
name='date',
field=models.DateField(help_text='Date of creation or last known time'),
),
migrations.AlterField(
model_name='item',
name='details',
field=models.TextField(blank=True, help_text='Any details about the item', null=True),
),
migrations.AlterField(
model_name='item',
name='file',
field=models.FileField(help_text='The file that should be uploaded', upload_to='archive/'),
),
migrations.AlterField(
model_name='item',
name='name',
field=models.CharField(blank=True, help_text='Displayed name rather than file name (Note. filename will still be shown)', max_length=40, null=True),
),
migrations.AlterField(
model_name='item',
name='type',
field=models.CharField(choices=[('im', 'Image'), ('tx', 'Text File'), ('we', 'Website File')], default='tx', max_length=2),
),
]
|
darcyliu/storyboard
|
boto/emr/connection.py
|
Python
|
mit
| 19,314 | 0.002537 |
# Copyright (c) 2010 Spotify AB
# Copyright (c) 2010-2011 Yelp
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents a connection to the EMR service
"""
import types
import boto
import boto.utils
from boto.ec2.regioninfo import RegionInfo
from boto.emr.emrobject import JobFlow, RunJobFlowResponse
from boto.emr.emrobject import AddInstanceGroupsResponse, ModifyInstanceGroupsResponse
from boto.emr.step import JarStep
from boto.connection import AWSQueryConnection
from boto.exception import EmrResponseError
class EmrConnection(AWSQueryConnection):
APIVersion = boto.config.get('Boto', 'emr_version', '2009-03-31')
DefaultRegionName = boto.config.get('Boto', 'emr_region_name', 'us-east-1')
DefaultRegionEndpoint = boto.config.get('Boto', 'emr_region_endpoint',
'elasticmapreduce.amazonaws.com')
ResponseError = EmrResponseError
# Constants for AWS Console debugging
DebuggingJar = 's3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar'
DebuggingArgs = 's3n://us-east-1.elasticmapreduce/libs/state-pusher/0.1/fetch'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
|
https_connection_factory=None, region=None, path='/'):
if not region:
|
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
AWSQueryConnection.__init__(self, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path)
def _required_auth_capability(self):
return ['emr']
def describe_jobflow(self, jobflow_id):
"""
Describes a single Elastic MapReduce job flow
:type jobflow_id: str
:param jobflow_id: The job flow id of interest
"""
jobflows = self.describe_jobflows(jobflow_ids=[jobflow_id])
if jobflows:
return jobflows[0]
def describe_jobflows(self, states=None, jobflow_ids=None,
created_after=None, created_before=None):
"""
Retrieve all the Elastic MapReduce job flows on your account
:type states: list
:param states: A list of strings with job flow states wanted
:type jobflow_ids: list
:param jobflow_ids: A list of job flow IDs
:type created_after: datetime
:param created_after: Bound on job flow creation time
:type created_before: datetime
:param created_before: Bound on job flow creation time
"""
params = {}
if states:
self.build_list_params(params, states, 'JobFlowStates.member')
if jobflow_ids:
self.build_list_params(params, jobflow_ids, 'JobFlowIds.member')
if created_after:
params['CreatedAfter'] = created_after.strftime(
boto.utils.ISO8601)
if created_before:
params['CreatedBefore'] = created_before.strftime(
boto.utils.ISO8601)
return self.get_list('DescribeJobFlows', params, [('member', JobFlow)])
def terminate_jobflow(self, jobflow_id):
"""
Terminate an Elastic MapReduce job flow
:type jobflow_id: str
:param jobflow_id: A jobflow id
"""
self.terminate_jobflows([jobflow_id])
def terminate_jobflows(self, jobflow_ids):
"""
Terminate an Elastic MapReduce job flow
:type jobflow_ids: list
:param jobflow_ids: A list of job flow IDs
"""
params = {}
self.build_list_params(params, jobflow_ids, 'JobFlowIds.member')
return self.get_status('TerminateJobFlows', params, verb='POST')
def add_jobflow_steps(self, jobflow_id, steps):
"""
Adds steps to a jobflow
:type jobflow_id: str
:param jobflow_id: The job flow id
:type steps: list(boto.emr.Step)
:param steps: A list of steps to add to the job
"""
if type(steps) != types.ListType:
steps = [steps]
params = {}
params['JobFlowId'] = jobflow_id
# Step args
step_args = [self._build_step_args(step) for step in steps]
params.update(self._build_step_list(step_args))
return self.get_object(
'AddJobFlowSteps', params, RunJobFlowResponse, verb='POST')
def add_instance_groups(self, jobflow_id, instance_groups):
"""
Adds instance groups to a running cluster.
:type jobflow_id: str
:param jobflow_id: The id of the jobflow which will take the
new instance groups
:type instance_groups: list(boto.emr.InstanceGroup)
:param instance_groups: A list of instance groups to add to the job
"""
if type(instance_groups) != types.ListType:
instance_groups = [instance_groups]
params = {}
params['JobFlowId'] = jobflow_id
params.update(self._build_instance_group_list_args(instance_groups))
return self.get_object('AddInstanceGroups', params,
AddInstanceGroupsResponse, verb='POST')
def modify_instance_groups(self, instance_group_ids, new_sizes):
"""
Modify the number of nodes and configuration settings in an
instance group.
:type instance_group_ids: list(str)
:param instance_group_ids: A list of the ID's of the instance
groups to be modified
:type new_sizes: list(int)
:param new_sizes: A list of the new sizes for each instance group
"""
if type(instance_group_ids) != types.ListType:
instance_group_ids = [instance_group_ids]
if type(new_sizes) != types.ListType:
new_sizes = [new_sizes]
instance_groups = zip(instance_group_ids, new_sizes)
params = {}
for k, ig in enumerate(instance_groups):
# could be wrong - the example amazon gives uses
# InstanceRequestCount, while the api documentation
# says InstanceCount
params['InstanceGroups.member.%d.InstanceGroupId' % (k+1) ] = ig[0]
params['InstanceGroups.member.%d.InstanceCount' % (k+1) ] = ig[1]
return self.get_object('ModifyInstanceGroups', params,
ModifyInstanceGroupsResponse, verb='POST')
def run_jobflow(self, name, log_uri=None, ec2_keyname=None,
availability_zone=None,
master_instance_type='m1.small',
slave_instance_type='m1.small', num_instances=1,
action_on_failure='TERMINATE_JOB_FLOW', keep_alive=False,
enable_debugging=False,
hadoop_ver
|
mfwarren/FreeCoding
|
2014/12/fc_30/app/models.py
|
Python
|
mit
| 596 | 0 |
from . import db
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(
|
db.String(64), unique=True)
users = db.relationship('User', backref='role', lazy='dynamic')
def __repr__(self):
return '<Role %s>' % self.name
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, d
|
b.ForeignKey('roles.id'))
def __repr__(self):
return '<User %s>' % self.username
|
skosukhin/spack
|
var/spack/repos/builtin/packages/jags/package.py
|
Python
|
lgpl-2.1
| 1,927 | 0.000519 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL
|
-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundat
|
ion) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Jags(AutotoolsPackage):
"""JAGS is Just Another Gibbs Sampler. It is a program for analysis of
Bayesian hierarchical models using Markov Chain Monte Carlo (MCMC)
simulation not wholly unlike BUGS"""
homepage = "http://mcmc-jags.sourceforge.net/"
url = "https://downloads.sourceforge.net/project/mcmc-jags/JAGS/4.x/Source/JAGS-4.2.0.tar.gz"
version('4.2.0', '9e521b3cfb23d3290a8c6bc0b79bf426')
depends_on('blas')
depends_on('lapack')
def configure_args(self):
args = ['--with-blas=-L%s' % self.spec['blas'].prefix.lib,
'--with-lapack=-L%s' % self.spec['lapack'].prefix.lib]
return args
|
kingosticks/mopidy
|
mopidy/config/types.py
|
Python
|
apache-2.0
| 9,146 | 0 |
import logging
import re
import socket
from mopidy.config import validators
from mopidy.internal import log, path
def decode(value):
if isinstance(value, bytes):
value = value.decode(errors="surrogateescape")
for char in ("\\", "\n", "\t"):
value = value.replace(
char.encode(encoding="unicode-escape").decode(), char
)
return value
def encode(value):
if isinstance(value, bytes):
value = value.decode(errors="surrogateescape")
for char in ("\\", "\n", "\t"):
value = value.replace(
char, char.encode(encoding="unicode-escape").decode()
)
return value
class DeprecatedValue:
pass
class ConfigValue:
"""Represents a config key's value and how to handle it.
Normally you will only be interacting with sub-classes for config values
that encode either deserialization behavior and/or validation.
Each config value should be used for the following actions:
1. Deserializing from a raw string and validating, raising ValueError on
failure.
2. Serializing a value back to a string that can be stored in a config.
3. Formatting a value to a printable form (useful for masking secrets).
:class:`None` values should not be deserialized, serialized or formatted,
the code interacting with the config should simply skip None config values.
"""
def deserialize(self, value):
"""Cast raw string to appropriate type."""
return decode(value)
def serialize(self, value, display=False):
"""Convert value back to string for saving."""
if value is None:
return ""
return str(value)
class Deprecated(ConfigValue):
"""Deprecated value.
Used for ignoring old config values that are no longer in use, but should
not cause the config parser to crash.
"""
def deserialize(self, value):
return DeprecatedValue()
def serialize(self, value, display=False):
return DeprecatedValue()
class String(ConfigValue):
"""String value.
Is decoded as utf-8 and \\n \\t escapes should work and be preserved.
"""
def __init__(self, optional=False, choices=None):
self._required = not optional
self._choices = choices
def deserialize(self, value):
value = decode(value).strip()
validators.validate_required(value, self._required)
if not value:
return None
validators.validate_choice(value, self._choices)
return value
|
def serialize(self, value, display=False):
if value is None:
return ""
|
return encode(value)
class Secret(String):
"""Secret string value.
Is decoded as utf-8 and \\n \\t escapes should work and be preserved.
Should be used for passwords, auth tokens etc. Will mask value when being
displayed.
"""
def __init__(self, optional=False, choices=None):
self._required = not optional
self._choices = None # Choices doesn't make sense for secrets
def serialize(self, value, display=False):
if value is not None and display:
return "********"
return super().serialize(value, display)
class Integer(ConfigValue):
"""Integer value."""
def __init__(
self, minimum=None, maximum=None, choices=None, optional=False
):
self._required = not optional
self._minimum = minimum
self._maximum = maximum
self._choices = choices
def deserialize(self, value):
value = decode(value)
validators.validate_required(value, self._required)
if not value:
return None
value = int(value)
validators.validate_choice(value, self._choices)
validators.validate_minimum(value, self._minimum)
validators.validate_maximum(value, self._maximum)
return value
class Boolean(ConfigValue):
"""Boolean value.
Accepts ``1``, ``yes``, ``true``, and ``on`` with any casing as
:class:`True`.
Accepts ``0``, ``no``, ``false``, and ``off`` with any casing as
:class:`False`.
"""
true_values = ("1", "yes", "true", "on")
false_values = ("0", "no", "false", "off")
def __init__(self, optional=False):
self._required = not optional
def deserialize(self, value):
value = decode(value)
validators.validate_required(value, self._required)
if not value:
return None
if value.lower() in self.true_values:
return True
elif value.lower() in self.false_values:
return False
raise ValueError(f"invalid value for boolean: {value!r}")
def serialize(self, value, display=False):
if value is True:
return "true"
elif value in (False, None):
return "false"
else:
raise ValueError(f"{value!r} is not a boolean")
class List(ConfigValue):
"""List value.
Supports elements split by commas or newlines. Newlines take presedence and
empty list items will be filtered out.
"""
def __init__(self, optional=False):
self._required = not optional
def deserialize(self, value):
value = decode(value)
if "\n" in value:
values = re.split(r"\s*\n\s*", value)
else:
values = re.split(r"\s*,\s*", value)
values = tuple(v.strip() for v in values if v.strip())
validators.validate_required(values, self._required)
return tuple(values)
def serialize(self, value, display=False):
if not value:
return ""
return "\n " + "\n ".join(encode(v) for v in value if v)
class LogColor(ConfigValue):
def deserialize(self, value):
value = decode(value)
validators.validate_choice(value.lower(), log.COLORS)
return value.lower()
def serialize(self, value, display=False):
if value.lower() in log.COLORS:
return encode(value.lower())
return ""
class LogLevel(ConfigValue):
"""Log level value.
Expects one of ``critical``, ``error``, ``warning``, ``info``, ``debug``,
``trace``, or ``all``, with any casing.
"""
levels = {
"critical": logging.CRITICAL,
"error": logging.ERROR,
"warning": logging.WARNING,
"info": logging.INFO,
"debug": logging.DEBUG,
"trace": log.TRACE_LOG_LEVEL,
"all": logging.NOTSET,
}
def deserialize(self, value):
value = decode(value)
validators.validate_choice(value.lower(), self.levels.keys())
return self.levels.get(value.lower())
def serialize(self, value, display=False):
lookup = {v: k for k, v in self.levels.items()}
if value in lookup:
return encode(lookup[value])
return ""
class Hostname(ConfigValue):
"""Network hostname value."""
def __init__(self, optional=False):
self._required = not optional
def deserialize(self, value, display=False):
value = decode(value).strip()
validators.validate_required(value, self._required)
if not value:
return None
socket_path = path.get_unix_socket_path(value)
if socket_path is not None:
path_str = Path(not self._required).deserialize(socket_path)
return f"unix:{path_str}"
try:
socket.getaddrinfo(value, None)
except OSError:
raise ValueError("must be a resolveable hostname or valid IP")
return value
class Port(Integer):
"""Network port value.
Expects integer in the range 0-65535, zero tells the kernel to simply
allocate a port for us.
"""
def __init__(self, choices=None, optional=False):
super().__init__(
minimum=0, maximum=2 ** 16 - 1, choices=choices, optional=optional
)
class _ExpandedPath(str):
def __new__(cls, original, expanded):
return super().__new__(cls, expanded)
def __init__(self, original, expanded):
self.original = original
class Path(ConfigValue):
"""File system path.
The following expansio
|
SVladkov/Numismatic
|
database/idatabase.py
|
Python
|
gpl-3.0
| 94 | 0.042553 |
#import coin
class IDatabase:
def enter_coin(coin):
raise Exception
|
('NotImplementedError
|
')
|
davidgillies/ships
|
ships_proj/ships/tests/test_models.py
|
Python
|
gpl-2.0
| 109 | 0.018349 |
from django.test import TestCase
from ships.mode
|
ls import *
|
class ShipModelTest(TestCase):
pass
|
bikash/kaggleCompetition
|
microsoft malware/Malware_Say_No_To_Overfitting/kaggle_Microsoft_malware_small/find_2g.py
|
Python
|
apache-2.0
| 1,327 | 0.027882 |
import sys
import pickle
###############################################
|
###########
# usa
|
ge
# pypy find_2g.py xid_train.p ../../data/train
# xid_train.p is a list like ['loIP1tiwELF9YNZQjSUO',''....] to specify
# the order of samples in traing data
# ../../data/train is the path of original train data
##########################################################
xid_name=sys.argv[1]
data_path=sys.argv[2]
xid=pickle.load(open(xid_name)) #xid_train.p or xid_test.p
newc=pickle.load(open('newc.p'))
cmd2g={}
for i in newc:
for j in newc:
cmd2g[(i,j)]=0
print newc
for c,f in enumerate(xid):#(files[len(files)/10*a1:len(files)/10*a2]):
count={}
for i in cmd2g:
count[i]=0
fo=open(data_path+'/'+f+'.asm')
tot=0
a=-1
b=-1
for line in fo:
xx=line.split()
for x in xx:
if x in newc:
a=b
b=x
if (a,b) in cmd2g:
count[(a,b)]+=1
tot+=1
# print (b,a)
fo.close()
if c%10==0:
print c*1.0/len(xid),tot
for i in cmd2g:
cmd2g[i]=count[i]+cmd2g[i]
del count
import pickle
cmd2gx={}
for i in cmd2g:
if cmd2g[i]>10:
cmd2gx[i]=cmd2g[i]
print len(cmd2gx)
pickle.dump(cmd2gx,open('cmd2g.p','w'))
|
mlabru/ptracks
|
view/piloto/dlg_subida.py
|
Python
|
gpl-3.0
| 7,178 | 0.001123 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
---------------------------------------------------------------------------------------------------
dlg_subida
mantém as informações sobre a dialog de subida
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
revision 0.2 2015/nov mlabru
pep8 style conventions
revision 0.1 2014/nov mlabru
initial release (Linux/Python)
---------------------------------------------------------------------------------------------------
"""
__version__ = "$revision: 0.2$"
__author__ = "mlabru, sophosoft"
__date__ = "2015/12"
# < imports >--------------------------------------------------------------------------------------
# python library
import json
import os
# PyQt library
from PyQt4 import QtCore
from PyQt4 import QtGui
# view
import view.piloto.dlg_subida_ui as dlg
# < class CDlgSubida >-----------------------------------------------------------------------------
class CDlgSubida(QtGui.QDialog, dlg.Ui_CDlgSubida):
"""
mantém as informações sobre a dialog de subida
"""
# ---------------------------------------------------------------------------------------------
def __init__(self, fsck_http, fdct_config, f_strip_cur, fdct_sub, f_parent=None):
"""
@param fsck_http: socket de comunicação com o servidor
@param fdct_config: dicionário de configuração
@param f_strip_cur: strip selecionada
@param fdct_sub: dicionário de subidas
@param f_parent: janela pai
"""
# init super class
super(CDlgSubida, self).__init__(f_parent)
# socket de comunicação
self.__sck_http = fsck_http
assert self.__sck_http
# dicionário de configuração
self.__dct_config = fdct_config
assert self.__dct_config is not None
# dicionário de subidas
self.__dct_sub = fdct_sub
assert self.__dct_sub is not None
# monta a dialog
self.setupUi(self)
# configura título da dialog
self.setWindowTitle(u"Procedimento de Subida")
# configurações de conexões slot/signal
self.__config_connects()
# configurações de títulos e mensagens da janela de edição
self.__config_texts()
# restaura as configurações da janela de edição
self.__restore_settings()
# dicionário de subidas vazio ?
if not self.__dct_sub:
# carrega o dicionário
self.__load_sub()
# inicia valores
self.cbx_sub.addItems(sorted(self.__dct_sub.values()))
# configura botões
self.bbx_subida.button(QtGui.QDialogButtonBox.Cancel).setText("&Cancela")
self.bbx_subida.button(QtGui.QDialogButtonBox.Ok).setFocus()
# inicia os parâmetros da subida
self.__update_command()
# ---------------------------------------------------------------------------------------------
def __config_connects(self):
"""
configura as conexões slot/signal
"""
# conecta spinBox
self.cbx_sub.currentIndexChanged.connect(self.__on_cbx_currentIndexChanged)
# ---------------------------------------------------------------------------------------------
def __config_texts(self):
"""
DOCUMENT ME!
"""
# configura títulos e mensagens
self.__txt_settings = "CDlgSubida"
# ---------------------------------------------------------------------------------------------
def get_data(self):
"""
DOCUMENT ME!
"""
# return command line
return self.lbl_comando.text()
# ---------------------------------------------------------------------------------------------
def __load_sub(self):
"""
carrega o dicionário de subidas
"""
# check for requirements
assert self.__sck_http is not None
assert self.__dct_config is not None
assert self.__dct_sub is not None
# monta o request das subidas
ls_req = "data/sub.json"
# get server address
l_srv = self.__dct_config.get("srv.addr", None)
if l_srv is not None:
# obtém os dados de subidas do servidor
l_dict = self.__sck_http.get_data(l_srv, ls_req)
if l_dict is not None:
# coloca a subidas no dicionário
self.__dct_sub.update(json.loads(l_dict))
# senão, não achou no servidor...
else:
# logger
l_log = logging.getLogger("CDlgSubida::__load_sub")
l_log.setLevel(logging.ERROR)
l_log.error(u"<E01: tabela de subidas não existe no servidor.")
# senão, não achou endereço do servidor
else:
# logger
l_log = logging.getLogger("CDlgSubida::__load_sub")
l_log.setLevel(logging.WARNING)
l_log.warning(u"<E02: srv.addr não existe na configuração.")
# ---------------------------------------------------------------------------------------------
def __restore_settings(self):
"""
restaura as configurações salvas para esta janela
"""
# obtém os settings
l_set = QtCore.QSettings("sophosoft", "piloto")
assert l_set
# restaura geometria da janela
self.restoreGeometry(l_set.value("%s/Geometry" % (self.__txt_settings)).toByteArray())
# ---------------------------------------------------------------------------------------------
def __update_command(self):
"""
DOCUMENT ME!
"""
# para todas as subidas...
for l_key, l_sub in self.__dct_sub.iteritems():
# é a subida selecionada ?
if self.cbx_sub.currentText() == l_sub:
break
# inicia o comando
ls_cmd = "SUB {}".format(l_key)
# coloca o comando no label
self.lbl_comando.setText(ls_cmd)
# =============================================================================================
# edição de campos
# ==================================================================
|
===========================
# ---------------------------------------------------------------------------------------------
|
@QtCore.pyqtSignature("int")
def __on_cbx_currentIndexChanged(self, f_val):
"""
DOCUMENT ME!
"""
# atualiza comando
self.__update_command()
# < the end >--------------------------------------------------------------------------------------
|
dougwig/a10-neutron-lbaas
|
a10_neutron_lbaas/neutron_ext/extensions/a10DeviceInstance.py
|
Python
|
apache-2.0
| 4,037 | 0.000991 |
# Copyright 2015, A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
import a10_openstack_lib.resources.a10_device_instance as a10_device_instance
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.api.v2 import resource_helper
from neutron.common import exceptions
from neutron.services import service_base
import a10_neutron_lbaas.neutron_ext.common.constants as constants
from a10_neutron_lbaas.neutron_ext.common import resources
RESOURCE_ATTRIBUTE_MAP = resources.apply_template(a10_device_instance.RESOURCE_ATTRIBUTE_MAP,
attributes)
# TODO(rename this to *Extension to avoid config file confusion)
class A10DeviceInstance(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "A10 Device Instances"
@classmethod
def get_alias(cls):
return constants.A10_DEVICE_INSTANCE_EXT
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/neutron/a10_device_instance/api/v1.0"
@classmethod
def get_updated(cls):
return "2015-11-18T16:17:00-07:00"
@classmethod
def get_description(cls):
return ("A10 Device Instances")
@classmethod
def get_resources(cls):
"""Returns external resources."""
my_plurals = resource_helper.build_plural_mappings(
{}, RESOURCE_ATTRIBUTE_MAP)
attributes.PLURALS.update(my_plurals)
attr_map = RESOURCE_ATTRIBUTE_MAP
resources = resource_helper.build_resource_info(my_plurals,
attr_map,
constants.A10_DEVICE_INSTANCE)
return resources
def update_attributes_map(self, attributes):
super(A10DeviceInstance, self).update_attributes_map(
attributes,
extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
class A10DeviceInstanceNotFoundError(exceptions.NotFound):
def __init__(self, a10_device_instance_id):
self.msg = _("A10 Device Instance {} could not be found.")
super(A10DeviceInstanceNotFoundError, self).__init__()
class A10DeviceInstanceInUseError(exceptions.InUse):
def __init__(self, a10_device_instance_id):
self.message = _("A10 Device Instance is in use and cannot be deleted.")
self.msg = self.message
super(A10DeviceInstanceInUseError, self).__init__()
@six.add_metaclass(abc.ABCMeta)
class A10DeviceInstancePluginBase(service_base.ServicePluginBase):
def get_plugin_name(self):
return constants.A10_DEVICE_INSTANCE
def get_plugin_description(self):
return constant
|
s.A10_DEVICE_INSTANCE
def get_plugin_type(self):
|
return constants.A10_DEVICE_INSTANCE
def __init__(self):
super(A10DeviceInstancePluginBase, self).__init__()
@abc.abstractmethod
def get_a10_device_instances(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def create_a10_device_instance(self, context, device_instance):
pass
@abc.abstractmethod
def get_a10_device_instance(self, context, id, fields=None):
pass
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-monitor/azure/mgmt/monitor/models/diagnostic_settings_category_resource.py
|
Python
|
mit
| 1,650 | 0.000606 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .pro
|
xy_only_resource import ProxyOnlyResource
class DiagnosticSettingsCategoryResource(ProxyOnlyResource):
"""The diagnostic settings category resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Azure resource Id
:vartype id: str
:ivar name: Azure resource name
:vartype name: str
:ivar type: Azure resource type
:vartype type: str
:para
|
m category_type: The type of the diagnostic settings category.
Possible values include: 'Metrics', 'Logs'
:type category_type: str or ~azure.mgmt.monitor.models.CategoryType
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'category_type': {'key': 'properties.categoryType', 'type': 'CategoryType'},
}
def __init__(self, category_type=None):
super(DiagnosticSettingsCategoryResource, self).__init__()
self.category_type = category_type
|
sghai/robottelo
|
tests/foreman/ui/test_satellitesync.py
|
Python
|
gpl-3.0
| 3,555 | 0 |
# -*- encoding: utf-8 -*-
"""Test class for InterSatellite Sync feature
:Requirement: Satellitesync
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: UI
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from robottelo.decorators import (
run_only_on,
stubbed,
tier1,
tier3,
upgrade
)
from robottelo.
|
test import UITestCase
class InterSatelliteSyncTestCase(UITestCase):
"""Implements InterSatellite Sync tests in UI"""
@run_only_on('sat')
@stubbed()
@tier3
@upgrade
def test_positive_show_repo_export_history(self):
"""Product history shows repo export history on export.
:id: 01d82253-081b-4d11-9a5b-e6052173fe47
:steps: Export a repo to a specified location in settings.
:expecte
|
dresults: Repo/Product history should reflect the export
history with user and time.
:caseautomation: notautomated
:CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier3
@upgrade
def test_positive_show_cv_export_history(self):
"""CV history shows CV version export history on export.
:id: 06e26cca-e262-4eff-b8d7-fbca504a8acb
:steps: Export a CV to a specified location in settings.
:expectedresults: CV history should reflect the export history with
user, version, action and time.
:caseautomation: notautomated
:CaseLevel: System
"""
@run_only_on('sat')
@stubbed()
@tier1
def test_positive_update_cdn_url(self):
"""Update CDN URL to import from upstream.
:id: 5ff30764-a1b1-48df-a6a1-0f1d23f883b9
:steps:
1. In upstream, Export Redhat repo/CV to a directory.
2. Copy exported contents to /var/www/html.
3. In downstream, Update CDN URL with step 2 location to import the
Redhat contents.
4. Enable and sync the imported repo from Redhat Repositories page.
:expectedresults:
1. The CDN URL is is updated successfully.
2. The imported repo is enabled and sync.
:caseautomation: notautomated
:CaseImportance: Critical
"""
@run_only_on('sat')
@stubbed()
@tier1
def test_negative_update_cdn_url(self):
"""Update non existing CDN URL to import from upstream.
:id: 4bf74712-dac8-447b-9c9f-227a41cdec4d
:steps:
1. In downstream, Update CDN URL with some non existing url.
2. Attempt to Enable and sync some repo from Redhat Repositories
page.
:expectedresults:
1. The CDN URL is not allowed to update any non existing url.
2. None of the repo is allowed to enable and sync.
:caseautomation: notautomated
:CaseImportance: Critical
"""
@run_only_on('sat')
@stubbed()
@tier3
@upgrade
def test_positive_restrict_other_redhat_repo_import(self):
"""Restrict the import/sync of non exported repos.
:id: 7091ca13-7f58-4733-87d5-1fa3670bfcee
:steps:
1. Export Red Hat YUM repo to path which will be accessible over
HTTP.
2. Define the CDN URL the same as the exported HTTP URL.
3. Attempt to Import/Enable non exported repos from Redhat
Repositories page.
:expectedresults: The import of non exported repos is restricted.
:caseautomation: notautomated
:CaseLevel: System
"""
|
djtaylor/cloudscape-DEPRECATED
|
python/cloudscape/common/remote.py
|
Python
|
gpl-3.0
| 25,284 | 0.009294 |
import os
import re
import copy
import paramiko
import StringIO
import unicodedata
from paramiko import SSHException, BadHostKeyException
# CloudScape Libraries
from cloudscape.common import config
from cloudscape.common import logger
from cloudscape.common.scp import SCPClient
from cloudscape.common.utils import valid, invalid
class RemoteConnect(object):
"""
Wapper class designed to handle remote SSH connections to both Linux
and Windows hosts. Provides methods to open a connection, run remote
commands, as well as basic SCP functionality.
The connection object can use either password or SSH key authentication
to connect to the remote host.
Setting up a connection::
# Import the class
from cloudscape.common.remote import RemoteConnect
# SSH password connection parameters. Note that 'port' is optional for
# both password and key authentication and defaults to 22.
pass_params = {
'host': '192.168.0.12',
'user': 'admin',
'password': 'secret',
'port': 22
}
# SSH key connection parameters. The key parameter can either be a path
# to a file, or a private key you have read into a string.
key_params = {
'host': '192.168.0.12',
'user': 'admin',
'key': '/home/user/.ssh/private_key'
}
# Setup the connection
remote = RemoteConnect('linux', key_params)
"""
def __init__(self, sys_type='linux', conn={}):
"""
Initialize a Paramiko SSH connection.
:param sys_type: The type of machine to connection, either 'linux' or 'windows'. Defaults to 'linux'
:type sys_type: str
:param conn: SSH connection parameters.
:type conn: dict
"""
self.conf = config.parse()
self.log = logger.create(__name__, self.conf.server.log)
# Valid system types
self.sys_types = ['linux', 'windows']
# Required connection parameters
self.conn_attr = {
'linux': ['host', 'user'],
'windows': ['host', 'user']
}
# Remote system type and connection parameters
self.sys_type = sys_type
self.sys_conn = copy.copy(conn)
# Make sure connection parameters are valid
self.params_valid = self._validate()
# Set any default connection parameters
if self.params_valid['valid'] == True:
self._set_defaults()
""" ERROR MESSAGES
Method to define string messages for internal error codes.
"""
def _error(self, code):
# Error code numbers and messages
error_codes = {
000: "Missing required 'type' parameter",
001: "Unsupported 'type' parameter '%s' - must be one of '%s'" % (self.sys_type, str(self.sys_types)),
002: "Missing required 'conn' paremeter for remote connection details",
003: "Remote commands must be in list format",
004: "Files argument must be in list format",
005: "Cannot use sudo on host '%s', no password provided'" % self.sys_conn['host'],
100: "Must supply a 'passwd' or 'key' connection parameter for system type '%s'" % (self.sys_type),
101: "Missing a required connection parameter for system type '%s', must supply '%s'" % (self.sys_type, str(self.conn_attr[self.sys_type])),
999: "An unknown error occured when creating the remote connection"
}
# Return the error message
if not code or not code in error_codes:
return error_codes[999]
else:
return error_codes[code]
""" SET DEFAULTS
Set any defaults for unspecific, optional connection parameters depending
on the system type.
"""
def _set_defaults(self):
|
# System Defaults
if not 'port' in self.sys_conn or not self.sys_conn['port']:
self.sys_conn['port'] = 22
""" LOAD SSH KEY
Method to load an SSH key for a Linux connection into a Parmiko object.
"""
def _load_ssh_key(self):
if os.path.isfile(self.sys_conn['key']):
key_obj = paramiko.RSAKey.from_private_key_file(self.sys_conn['key'])
else:
key_str = unicodedata.
|
normalize('NFKD', self.sys_conn['key']).encode('ascii', 'ignore')
key_fo = StringIO.StringIO(key_str)
key_obj = paramiko.RSAKey.from_private_key(key_fo)
return key_obj
""" VALIDATE PARAMETERS
Make sure the system type and connection parameters are valid. Check the
connection parameters based on the system type.
"""
def _validate(self):
# Require a system type parameter
if not self.sys_type:
return invalid(self._error(000))
# Make sure the system type is supported
if not self.sys_type in self.sys_types:
return invalid(self._error(001))
# Require system connection parameters
if not self.sys_conn or not isinstance(self.sys_conn, dict):
return invalid(self._error(002))
# Windows system type validation
if self.sys_type == 'windows':
# Require an SSH key
if not 'key' in self.sys_conn:
return invalid(self._error(100))
# Linux system type validation
if self.sys_type == 'linux':
# Make sure either a key or password are set
if not ('passwd' in self.sys_conn) and not ('key' in self.sys_conn):
return invalid(self._error(100))
# Make sure the required parameters are set
for param in self.conn_attr[self.sys_type]:
if not param in self.sys_conn:
return invalid(self._error(101))
# If a key is specified, read into an RSA object
if 'key' in self.sys_conn and self.sys_conn['key']:
self.auth_type = 'key'
self.sys_conn['key'] = self._load_ssh_key()
else:
self.auth_type = 'passwd'
self.sys_conn['key'] = None
# Connection parameters OK
return valid()
""" Connection Handler """
def _connect(self):
try:
if self.auth_type == 'key':
self.client_exec.connect(self.sys_conn['host'],
username = self.sys_conn['user'],
port = int(self.sys_conn['port']),
pkey = self.sys_conn['key'])
else:
self.client_exec.connect(self.sys_conn['host'],
username = self.sys_conn['user'],
port = int(self.sys_conn['port']),
password = self.sys_conn['passwd'])
except BadHostKeyException as e:
self.log.exception('Failed to establish SSH connection: %s' % str(e))
return invalid(str(e))
# Open the SCP client
self.client_copy = SCPClient(self.client_exec.get_transport())
return valid()
def open(self):
"""
Open the connection to the remote host with the constructed connection
object. This class is called internally to the API, so the return object
is constructed accordingly.
The open method returns a dictionary with two
elements, 'valid' and 'content'. If the connection failed, 'valid' is set
to False and 'content' contains the error. Otherwise, 'valid' is set to True.
:rtype: dictionary
Opening a connection::
# Attempt to connect
status = remote.open()
# If the connection failed to open
if not status['valid']:
# You can return the object to the calling module or raise your own Exception
|
mzdaniel/oh-mainline
|
vendor/packages/Django/tests/modeltests/order_with_respect_to/tests.py
|
Python
|
agpl-3.0
| 2,870 | 0.002787 |
from operator import attrgetter
from django.test import TestCase
from models import Post, Question, Answer
class OrderWithRespectToTests(TestCase):
def test_basic(self):
q1 = Question.objects.create(text="Which Beatle starts with the letter 'R'?")
q2 = Question.objects.create(text="What is your name?")
Answer.objects.create(text="John", question=q1)
Answer.objects.create(text="Jonno", question=q2)
Answer.o
|
bjects.create(text="Paul", question=q1)
Answer.objects.create(text="Paulo", question=q2)
Answer.objects.create(text="George", question=q1)
Answer.objects.create(text="Ringo", question=q1)
# The answers will always be ordered in the order they were inserted.
self.assertQuerysetEqual(
q1.answer_set.all(), [
"John", "Paul", "George", "Ri
|
ngo",
],
attrgetter("text"),
)
# We can retrieve the answers related to a particular object, in the
# order they were created, once we have a particular object.
a1 = Answer.objects.filter(question=q1)[0]
self.assertEqual(a1.text, "John")
a2 = a1.get_next_in_order()
self.assertEqual(a2.text, "Paul")
a4 = list(Answer.objects.filter(question=q1))[-1]
self.assertEqual(a4.text, "Ringo")
self.assertEqual(a4.get_previous_in_order().text, "George")
# Determining (and setting) the ordering for a particular item is also
# possible.
id_list = [o.pk for o in q1.answer_set.all()]
self.assertEqual(a2.question.get_answer_order(), id_list)
a5 = Answer.objects.create(text="Number five", question=q1)
# It doesn't matter which answer we use to check the order, it will
# always be the same.
self.assertEqual(
a2.question.get_answer_order(), a5.question.get_answer_order()
)
# The ordering can be altered:
id_list = [o.pk for o in q1.answer_set.all()]
x = id_list.pop()
id_list.insert(-1, x)
self.assertNotEqual(a5.question.get_answer_order(), id_list)
a5.question.set_answer_order(id_list)
self.assertQuerysetEqual(
q1.answer_set.all(), [
"John", "Paul", "George", "Number five", "Ringo"
],
attrgetter("text")
)
def test_recursive_ordering(self):
p1 = Post.objects.create(title='1')
p2 = Post.objects.create(title='2')
p1_1 = Post.objects.create(title="1.1", parent=p1)
p1_2 = Post.objects.create(title="1.2", parent=p1)
p2_1 = Post.objects.create(title="2.1", parent=p2)
p1_3 = Post.objects.create(title="1.3", parent=p1)
self.assertEqual(p1.get_post_order(), [p1_1.pk, p1_2.pk, p1_3.pk])
|
alphagov/stagecraft
|
stagecraft/libs/mass_update/test_data_set_mass_update.py
|
Python
|
mit
| 3,422 | 0 |
from stagecraft.apps.datasets.models import DataGroup, DataSet, DataType
from django.test import TestCase
from stagecraft.libs.mass_update import DataSetMassUpdate
from nose.tools import assert_equal
class TestDataSetMassUpdate(TestCase):
@classmethod
def setUpClass(cls):
cls.data_group1 = DataGroup.objects.create(name='datagroup1')
cls.data_group2 = Data
|
Group.objects.create(name='datagroup2')
cls.data_type1 = DataType.objects.create(name='datatype1')
cls.data_type2 = DataType.objects.create(name='datatype2')
cls.dataset_a = DataSet.objects.create(
name='foo',
data_group=cls.data_group1,
bearer_token="abc123",
data_type=cls.data_type1)
cls.dataset_b = DataSet.objects.create(
|
name='bar',
data_group=cls.data_group2,
bearer_token="def456",
data_type=cls.data_type1)
cls.dataset_c = DataSet.objects.create(
name='baz',
data_group=cls.data_group2,
bearer_token="999999",
data_type=cls.data_type2)
@classmethod
def tearDownClass(cls):
pass
def test_update_bearer_token_by_date_type(self):
new_bearer_token = "ghi789"
query = {u'data_type': self.data_type1.name}
number_updated = DataSetMassUpdate \
.update_bearer_token_for_data_type_or_group_name(
query, new_bearer_token)
dataset_a = DataSet.objects.get(id=self.dataset_a.id)
dataset_b = DataSet.objects.get(id=self.dataset_b.id)
dataset_c = DataSet.objects.get(id=self.dataset_c.id)
assert_equal(number_updated, 2)
assert_equal(dataset_a.bearer_token, new_bearer_token)
assert_equal(dataset_b.bearer_token, new_bearer_token)
assert_equal(dataset_c.bearer_token == new_bearer_token, False)
def test_update_bearer_token_by_data_group(self):
new_bearer_token = "ghi789"
query = {u'data_group': self.data_group2.name}
number_updated = DataSetMassUpdate \
.update_bearer_token_for_data_type_or_group_name(
query, new_bearer_token)
dataset_a = DataSet.objects.get(id=self.dataset_a.id)
dataset_b = DataSet.objects.get(id=self.dataset_b.id)
dataset_c = DataSet.objects.get(id=self.dataset_c.id)
assert_equal(number_updated, 2)
assert_equal(dataset_a.bearer_token == new_bearer_token, False)
assert_equal(dataset_b.bearer_token, new_bearer_token)
assert_equal(dataset_c.bearer_token, new_bearer_token)
def test_update_bearer_token_by_data_group_and_data_type(self):
new_bearer_token = "ghi789"
query = {
u'data_type': self.data_type1.name,
u'data_group': self.data_group2.name}
number_updated = DataSetMassUpdate \
.update_bearer_token_for_data_type_or_group_name(
query, new_bearer_token)
dataset_a = DataSet.objects.get(id=self.dataset_a.id)
dataset_b = DataSet.objects.get(id=self.dataset_b.id)
dataset_c = DataSet.objects.get(id=self.dataset_c.id)
assert_equal(number_updated, 1)
assert_equal(dataset_a.bearer_token == new_bearer_token, False)
assert_equal(dataset_b.bearer_token, new_bearer_token)
assert_equal(dataset_c.bearer_token == new_bearer_token, False)
|
jo-soft/jadfr
|
jadfr/feedreader/conf/my_conf.py
|
Python
|
gpl-3.0
| 238 | 0 |
try:
from settings_local import
|
MyConf
except ImportError:
from feedreader.conf.base import Dev as MyConf
try:
from settings_local import MyTestConf
except ImportError:
from feedreader.conf.base import Test as MyTestConf
| |
dungeonsnd/test-code
|
dev_examples/pyserver/src/util/pys_define.py
|
Python
|
gpl-3.0
| 184 | 0.033784 |
#!/bin/env python
# -*-
|
coding: utf-8 -*-
PYS_SERVICE_MOD_PRE='pys_' # 模块名称的前缀
PYS_HEAD_LEN=12 # 报文头长度
PYS_MAX_BODY_LE
|
N=10485760 # 最大报文长度
|
pycook/cmdb
|
cmdb-api/api/lib/decorator.py
|
Python
|
gpl-2.0
| 838 | 0 |
# -*- coding:utf-8 -*-
from functools import wraps
from flask import abort
from flask import request
def kwargs_required(
|
*required_args):
def decorate(func):
@wraps(func)
def wrapper(*args, **kwargs):
for arg in required_args:
if arg not in kwargs:
return abort(400, "Argument <{0}> is required".format(
|
arg))
return func(*args, **kwargs)
return wrapper
return decorate
def args_required(*required_args):
def decorate(func):
@wraps(func)
def wrapper(*args, **kwargs):
for arg in required_args:
if arg not in request.values:
return abort(400, "Argument <{0}> is required".format(arg))
return func(*args, **kwargs)
return wrapper
return decorate
|
froschdesign/zf2-documentation
|
docs/src/conf.py
|
Python
|
bsd-3-clause
| 8,058 | 0.007322 |
# -*- coding: utf-8 -*-
#
# Zend Framework 2 documentation build configuration file, created by
# sphinx-quickstart on Fri Jul 6 18:55:07 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Zend Framework 2'
copyright = u'2015, Zend Technologies Ltd.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.4'
# The full version, including alpha/beta/rc tags.
release = '2.4.10dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '*snippets.rst']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, t
|
he current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and mod
|
uleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../zf2_logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ZendFramework2doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ZendFramework2.tex', u'Zend Framework 2 Documentation',
u'Zend Technologies Ltd.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'zendframework2', u'Zend Framework 2 Documentation',
[u'Zend Technologies Ltd.'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ZendFramework2', u'Zend Framework 2 Documentation',
u'Zend Technologies Ltd.', 'ZendFramework2', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Hack to render the php source code without the <?php tag
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True)
|
libsh-archive/sh
|
test/regress/frac.cpp.py
|
Python
|
lgpl-2.1
| 1,097 | 0.003646 |
#!/usr/bin/python
import shtest, sys, common
from common import *
from math import *
def frac_test(p, types=[]):
if is_array(p):
result = [a - floor(a) for a in p]
else:
result = [a - floor(a)]
return shtest.make_test(result, [p], types)
def insert_into(test):
test.add_test(frac_test((-8.0, -8.9, -8.5, -8.1)))
test.add_test(frac_test((-0.0, -0.5, 0.0, 0.5)))
test.add_test(frac_test((8.0, 8.9, 8.5, 8.1)))
test.add_test(frac_test((18.0, 18.9, -18.1)))
test.add_make_test((0, 0), [(1098908.975123, -12131318.123456)])
test.add_test(frac_test((1234567890123456789.0, )))
test.add_test(frac_test((-1234567890123456789.0, )))
# Test frac in stream programs
test = shtest.S
|
treamTest('frac', 1)
test.add_call(shtest.Call(shtest.Call.call, 'frac', 1))
insert_into(test)
test.output_header(sys.stdout)
test.output(sys.stdout, False)
# Test frac in immediate mode
test = shtest.ImmediateTest('frac_im', 1)
test.add_call(shtest.Call(shtest.Call.call, 'frac', 1))
insert_into(t
|
est)
test.output(sys.stdout, False)
test.output_footer(sys.stdout)
|
googleads/google-ads-python
|
google/ads/googleads/v9/errors/types/ad_parameter_error.py
|
Python
|
apache-2.0
| 1,192 | 0.000839 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific langu
|
age governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.errors",
marshal="google.ads.googleads.v9",
manifest={"AdParameterErrorEnum",},
)
class AdParameterErrorEnum(proto.Message):
r"""Container for enum describing possible ad parameter errors.
"""
class AdParameterE
|
rror(proto.Enum):
r"""Enum describing possible ad parameter errors."""
UNSPECIFIED = 0
UNKNOWN = 1
AD_GROUP_CRITERION_MUST_BE_KEYWORD = 2
INVALID_INSERTION_TEXT_FORMAT = 3
__all__ = tuple(sorted(__protobuf__.manifest))
|
klen/django-netauth
|
example_project/settings/test.py
|
Python
|
lgpl-3.0
| 347 | 0.005764 |
" Settings for tests. "
from settings.project import *
#
|
Da
|
tabases
DATABASES= {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
'USER': '',
'PASSWORD': '',
'TEST_CHARSET': 'utf8',
}
}
|
AndrewSallans/osf.io
|
framework/analytics/tasks.py
|
Python
|
apache-2.0
| 822 | 0 |
# -*- coding: utf-8 -*-
from framework.tasks import app
from framework.tasks.handlers import enqueue_task
from website import settings
from . import piwik
@app.task(bind=True, max_retries=5, default_retry_delay=60)
def _update_node(self, node_id, updated_fields=None):
# Avoid circular imports
from framework.tr
|
ansactions.context import TokuTransaction
from website import models
node = models.Node.load(node_id)
try:
with TokuTransaction():
piwik._update_node_object(node, updated_fields)
except Exception as error:
raise self.retry(exc=error)
def update_node(node_id, updated_fields):
if settings.USE_CELERY:
signature = _update_node.s(node_id, updated_fields
|
)
enqueue_task(signature)
else:
_update_node(node_id, updated_fields)
|
mozilla/inventory
|
systems/migrations/0002_auto__add_field_userprofile_is_mysqldba_oncall__add_field_userprofile_.py
|
Python
|
bsd-3-clause
| 21,325 | 0.008159 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserProfile.is_mysqldba_oncall'
db.add_column(u'user_profiles', 'is_mysqldba_oncall',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'UserProfile.is_pgsqldba_oncall'
db.add_column(u'user_profiles', 'is_pgsqldba_oncall',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'UserProfile.current_mysqldba_oncall'
db.add_column(u'user_profiles', 'current_mysqldba_oncall',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'UserProfile.current_pgsqldba_oncall'
db.add_column(u'user_profiles', 'current_pgsqldba_oncall',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'UserProfile.is_mysqldba_oncall'
db.delete_column(u'user_profiles', 'is_mysqldba_oncall')
# Deleting field 'UserProfile.is_pgsqldba_oncall'
db.delete_column(u'user_profiles', 'is_pgsqldba_oncall')
# Deleting field 'UserProfile.current_mysqldba_oncall'
db.delete_column(u'user_profiles', 'current_mysqldba_oncall')
# Deleting field 'UserProfile.current_pgsqldba_oncall'
db.delete_column(u'user_profiles', 'current_pgsqldba_oncall')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField',
|
[], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
|
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'dhcp.dhcp': {
'Meta': {'object_name': 'DHCP', 'db_table': "u'dhcp_scopes'"},
'allow_booting': ('django.db.models.fields.IntegerField', [], {'max_length': '32'}),
'allow_bootp': ('django.db.models.fields.IntegerField', [], {'max_length': '32'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option_domain_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'option_domain_name_servers': ('django.db.models.fields.CharField', [], {'max_length': '48', 'null': 'True', 'blank': 'True'}),
'option_ntp_servers': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'option_routers': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'option_subnet_mask': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'pool_deny_dynamic_bootp_agents': ('django.db.models.fields.IntegerField', [], {'max_length': '32'}),
'pool_range_end': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'pool_range_start': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'scope_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'scope_netmask': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'scope_notes': ('django.db.models.fields.TextField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'scope_start': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'})
},
'systems.advisorydata': {
'Meta': {'object_name': 'AdvisoryData', 'db_table': "u'advisory_data'"},
'advisory': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'references': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'severity': ('django.db.models.fields.FloatField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'systems.allocation': {
'Meta': {'ordering': "['name']", 'object_name': 'Allocation', 'db_table': "u'allocations'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'systems.contract': {
'Meta': {'object_name': 'Contract', 'db_table': "u'contracts'"},
'contract_link': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'contract_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'created_on': ('django.db.models.fields.DateT
|
BryanCutler/spark
|
python/pyspark/ml/classification.py
|
Python
|
apache-2.0
| 126,641 | 0.002953 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import operator
import sys
import uuid
import warnings
from abc import ABCMeta, abstractmethod, abstractproperty
from multiprocessing.pool import ThreadPool
from pyspark import keyword_only, since, SparkContext
from pyspark.ml import Estimator, Predictor, PredictionModel, Model
from pyspark.ml.param.shared import HasRawPredictionCol, HasProbabilityCol, HasThresholds, \
HasRegParam, HasMaxIter, HasFitIntercept, HasTol, HasStandardization, HasWeightCol, \
HasAggregationDepth, HasThreshold, HasBlockSize, HasMaxBlockSizeInMB, Param, Params, \
TypeConverters, HasElasticNetParam, HasSeed, HasStepSize, HasSolver, HasParallelism
from pyspark.ml.tree import _DecisionTreeModel, _DecisionTreeParams, \
_TreeEnsembleModel, _RandomForestParams, _GBTParams, \
_HasVarianceImpurity, _TreeClassifierParams
from pyspark.ml.regression import _FactorizationMachinesParams, DecisionTreeRegressionModel
from pyspark.ml.base import _PredictorParams
from pyspark.ml.util import DefaultParamsReader, DefaultParamsWriter, \
JavaMLReadable, JavaMLReader, JavaMLWritable, JavaMLWriter, \
MLReader, MLReadable, MLWriter, MLWritable, HasTrainingSummary
from pyspark.ml.wrapper import JavaParams, \
JavaPredictor, JavaPredictionModel, JavaWrapper
from pyspark.ml.common import inherit_doc
from pyspark.ml.linalg import Vectors, VectorUDT
from pyspark.sql import DataFrame
|
from pyspark.sql.functions import udf, when
from pyspark.sql.types import ArrayType, DoubleType
from pyspark.storagelevel import StorageLevel
__all__ = ['LinearSVC', 'LinearSVCModel',
'LinearSVCSummary', 'LinearSVCTrainingSummary',
|
'LogisticRegression', 'LogisticRegressionModel',
'LogisticRegressionSummary', 'LogisticRegressionTrainingSummary',
'BinaryLogisticRegressionSummary', 'BinaryLogisticRegressionTrainingSummary',
'DecisionTreeClassifier', 'DecisionTreeClassificationModel',
'GBTClassifier', 'GBTClassificationModel',
'RandomForestClassifier', 'RandomForestClassificationModel',
'RandomForestClassificationSummary', 'RandomForestClassificationTrainingSummary',
'BinaryRandomForestClassificationSummary',
'BinaryRandomForestClassificationTrainingSummary',
'NaiveBayes', 'NaiveBayesModel',
'MultilayerPerceptronClassifier', 'MultilayerPerceptronClassificationModel',
'MultilayerPerceptronClassificationSummary',
'MultilayerPerceptronClassificationTrainingSummary',
'OneVsRest', 'OneVsRestModel',
'FMClassifier', 'FMClassificationModel', 'FMClassificationSummary',
'FMClassificationTrainingSummary']
class _ClassifierParams(HasRawPredictionCol, _PredictorParams):
"""
Classifier Params for classification tasks.
.. versionadded:: 3.0.0
"""
pass
@inherit_doc
class Classifier(Predictor, _ClassifierParams, metaclass=ABCMeta):
"""
Classifier for classification tasks.
Classes are indexed {0, 1, ..., numClasses - 1}.
"""
@since("3.0.0")
def setRawPredictionCol(self, value):
"""
Sets the value of :py:attr:`rawPredictionCol`.
"""
return self._set(rawPredictionCol=value)
@inherit_doc
class ClassificationModel(PredictionModel, _ClassifierParams, metaclass=ABCMeta):
"""
Model produced by a ``Classifier``.
Classes are indexed {0, 1, ..., numClasses - 1}.
"""
@since("3.0.0")
def setRawPredictionCol(self, value):
"""
Sets the value of :py:attr:`rawPredictionCol`.
"""
return self._set(rawPredictionCol=value)
@abstractproperty
@since("2.1.0")
def numClasses(self):
"""
Number of classes (values which the label can take).
"""
raise NotImplementedError()
@abstractmethod
@since("3.0.0")
def predictRaw(self, value):
"""
Raw prediction for each possible label.
"""
raise NotImplementedError()
class _ProbabilisticClassifierParams(HasProbabilityCol, HasThresholds, _ClassifierParams):
"""
Params for :py:class:`ProbabilisticClassifier` and
:py:class:`ProbabilisticClassificationModel`.
.. versionadded:: 3.0.0
"""
pass
@inherit_doc
class ProbabilisticClassifier(Classifier, _ProbabilisticClassifierParams,
metaclass=ABCMeta):
"""
Probabilistic Classifier for classification tasks.
"""
@since("3.0.0")
def setProbabilityCol(self, value):
"""
Sets the value of :py:attr:`probabilityCol`.
"""
return self._set(probabilityCol=value)
@since("3.0.0")
def setThresholds(self, value):
"""
Sets the value of :py:attr:`thresholds`.
"""
return self._set(thresholds=value)
@inherit_doc
class ProbabilisticClassificationModel(ClassificationModel,
_ProbabilisticClassifierParams,
metaclass=ABCMeta):
"""
Model produced by a ``ProbabilisticClassifier``.
"""
@since("3.0.0")
def setProbabilityCol(self, value):
"""
Sets the value of :py:attr:`probabilityCol`.
"""
return self._set(probabilityCol=value)
@since("3.0.0")
def setThresholds(self, value):
"""
Sets the value of :py:attr:`thresholds`.
"""
return self._set(thresholds=value)
@abstractmethod
@since("3.0.0")
def predictProbability(self, value):
"""
Predict the probability of each class given the features.
"""
raise NotImplementedError()
@inherit_doc
class _JavaClassifier(Classifier, JavaPredictor, metaclass=ABCMeta):
"""
Java Classifier for classification tasks.
Classes are indexed {0, 1, ..., numClasses - 1}.
"""
@since("3.0.0")
def setRawPredictionCol(self, value):
"""
Sets the value of :py:attr:`rawPredictionCol`.
"""
return self._set(rawPredictionCol=value)
@inherit_doc
class _JavaClassificationModel(ClassificationModel, JavaPredictionModel):
"""
Java Model produced by a ``Classifier``.
Classes are indexed {0, 1, ..., numClasses - 1}.
To be mixed in with :class:`pyspark.ml.JavaModel`
"""
@property
@since("2.1.0")
def numClasses(self):
"""
Number of classes (values which the label can take).
"""
return self._call_java("numClasses")
@since("3.0.0")
def predictRaw(self, value):
"""
Raw prediction for each possible label.
"""
return self._call_java("predictRaw", value)
@inherit_doc
class _JavaProbabilisticClassifier(ProbabilisticClassifier, _JavaClassifier,
metaclass=ABCMeta):
"""
Java Probabilistic Classifier for classification tasks.
"""
pass
@inherit_doc
class _JavaProbabilisticClassificationModel(ProbabilisticClassificationModel,
_JavaClassificationModel):
"""
Java Model produced by a ``ProbabilisticClassifier``.
"""
@since("3.0.0")
def predictProbability(self, value):
"""
Predict the probability of each class given the features.
"""
return self._call_java("predictProbability", value)
@inherit_doc
class _Class
|
okuta/chainer
|
tests/chainerx_tests/unit_tests/test_backend.py
|
Python
|
mit
| 1,149 | 0 |
import pytest
import chainerx
def test_name_native():
backend = chainerx.get_global_default_context().get_backend('native')
assert 'native' == backend.name
def test_get_device_native():
backend = chainerx.get_global_default_context().get_backend('native')
device = backend.get_device(0)
assert 0 == device.index
assert 'native:0' == device.name
assert device is backend.get_device(0)
def test_get_device_count_native():
backend = chainerx.get_global_default_context().get_backend('native')
assert backend.get_device_count() > 0
@pytest.mark.cuda
def test_name_cuda():
backend = chainerx.get_global_default_context().get_backend('cuda')
assert 'cuda' == backend.name
@pytest.mark.cuda
de
|
f test_get_device_cuda():
backend = chainerx.get_global_default_context().get_backend('cuda')
device = backend.get_device(0)
assert 0 == device.index
assert 'cuda:0' == device.name
assert device is backend.get_device(0)
@pytest.mark.cuda
def test_get_device_count_cuda():
backend = chainerx.get_global_default_context().get_backend('cuda')
assert backend.g
|
et_device_count() > 0
|
jnmclarty/pysia
|
docs/conf.py
|
Python
|
mit
| 8,373 | 0.005374 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# pysia documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import pysia
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PySia'
copyright = u"2017, Jeffrey McLarty"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = pysia.__version__
# The full version, including alpha/beta/rc tags.
release = pysia.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pysiadoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'pysia.tex',
u'PySia Documentation',
u'Jeffrey McLarty', 'manual'),
]
# The name of
|
an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
|
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pysia',
u'PySia Documentation',
[u'Jeffrey McLarty'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pysia',
u'PySia Documentation',
u'Jeffrey McLarty',
'pysia',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
#
|
shirishagaddi/django-seo
|
rollyourown/seo/base.py
|
Python
|
bsd-3-clause
| 14,784 | 0.004532 |
# -*- coding: utf-8 -*-
# TODO:
# * Move/rename namespace polluting attributes
# * Documentation
# * Make backends optional: Meta.backends = (path, modelinstance/model, view)
import hashlib
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.datastructures import SortedDict
from django.utils.functional import curry
from django.contrib.sites.models import Site
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
from django.utils.safestring import mark_safe
from django.core.cache import cache
from django.utils.encoding import iri_to_uri
from rollyourown.seo.utils import NotSet, Literal
from rollyourown.seo.options import Options
from rollyourown.seo.fields import MetadataField, Tag, MetaTag, KeywordTag, Raw
from rollyourown.seo.backends import backend_registry, RESERVED_FIELD_NAMES
registry = SortedDict()
class FormattedMetadata(object):
""" Allows convenient access to selected metadata.
Metadata for each field may be sourced from any one of the relevant instances passed.
"""
def __init__(self, metadata, instances, path, site=None, language=None):
self.__metadata = metadata
if metadata._meta.use_cache:
if metadata._meta.use_sites and site:
hexpath = hashlib.md5(iri_to_uri(site.domain+path)).hexdigest()
else:
hexpath = hashlib.md5(iri_to_uri(path)).hexdigest()
if metadata._meta.use_i18n:
self.__cache_prefix = 'rollyourown.seo.%s.%s.%s' % (self.__metadata.__class__.__name__, hexpath, language)
else:
self.__cache_prefix = 'rollyourown.
|
seo.%s.%s' % (self.__metadata.__class__.__name__, hexpath)
else:
self.__cache_prefix = None
self.__instances_original = instances
self.__instances_cache = []
def __instances(self):
""" Cache instances, allowing generators to be us
|
ed and reused.
This fills a cache as the generator gets emptied, eventually
reading exclusively from the cache.
"""
for instance in self.__instances_cache:
yield instance
for instance in self.__instances_original:
self.__instances_cache.append(instance)
yield instance
def _resolve_value(self, name):
""" Returns an appropriate value for the given name.
This simply asks each of the instances for a value.
"""
for instance in self.__instances():
value = instance._resolve_value(name)
if value:
return value
# Otherwise, return an appropriate default value (populate_from)
# TODO: This is duplicated in meta_models. Move this to a common home.
if name in self.__metadata._meta.elements:
populate_from = self.__metadata._meta.elements[name].populate_from
if callable(populate_from):
return populate_from(None)
elif isinstance(populate_from, Literal):
return populate_from.value
elif populate_from is not NotSet:
return self._resolve_value(populate_from)
def __getattr__(self, name):
# If caching is enabled, work out a key
if self.__cache_prefix:
cache_key = '%s.%s' % (self.__cache_prefix, name)
value = cache.get(cache_key)
else:
cache_key = None
value = None
# Look for a group called "name"
if name in self.__metadata._meta.groups:
if value is not None:
return value or None
value = '\n'.join(unicode(BoundMetadataField(self.__metadata._meta.elements[f], self._resolve_value(f))) for f in self.__metadata._meta.groups[name]).strip()
# Look for an element called "name"
elif name in self.__metadata._meta.elements:
if value is not None:
return BoundMetadataField(self.__metadata._meta.elements[name], value or None)
value = self._resolve_value(name)
if cache_key is not None:
cache.set(cache_key, value or '')
return BoundMetadataField(self.__metadata._meta.elements[name], value)
else:
raise AttributeError
if cache_key is not None:
cache.set(cache_key, value or '')
return value or None
def __unicode__(self):
""" String version of this object is the html output of head elements. """
if self.__cache_prefix is not None:
value = cache.get(self.__cache_prefix)
else:
value = None
if value is None:
value = mark_safe(u'\n'.join(unicode(getattr(self, f)) for f,e in self.__metadata._meta.elements.items() if e.head))
if self.__cache_prefix is not None:
cache.set(self.__cache_prefix, value or '')
return value
class BoundMetadataField(object):
""" An object to help provide templates with access to a "bound" metadata field. """
def __init__(self, field, value):
self.field = field
if value:
self.value = field.clean(value)
else:
self.value = None
def __unicode__(self):
if self.value:
return mark_safe(self.field.render(self.value))
else:
return u""
def __str__(self):
return self.__unicode__().encode("ascii", "ignore")
class MetadataBase(type):
def __new__(cls, name, bases, attrs):
# TODO: Think of a better test to avoid processing Metadata parent class
if bases == (object,):
return type.__new__(cls, name, bases, attrs)
# Save options as a dict for now (we will be editing them)
# TODO: Is this necessary, should we bother relaying Django Meta options?
Meta = attrs.pop('Meta', {})
if Meta:
Meta = Meta.__dict__.copy()
# Remove our options from Meta, so Django won't complain
help_text = attrs.pop('HelpText', {})
# TODO: Is this necessary
if help_text:
help_text = help_text.__dict__.copy()
options = Options(Meta, help_text)
# Collect and sort our elements
elements = [(key, attrs.pop(key)) for key, obj in attrs.items()
if isinstance(obj, MetadataField)]
elements.sort(lambda x, y: cmp(x[1].creation_counter,
y[1].creation_counter))
elements = SortedDict(elements)
# Validation:
# TODO: Write a test framework for seo.Metadata validation
# Check that no group names clash with element names
for key,members in options.groups.items():
assert key not in elements, "Group name '%s' clashes with field name" % key
for member in members:
assert member in elements, "Group member '%s' is not a valid field" % member
# Check that the names of the elements are not going to clash with a model field
for key in elements:
assert key not in RESERVED_FIELD_NAMES, "Field name '%s' is not allowed" % key
# Preprocessing complete, here is the new class
new_class = type.__new__(cls, name, bases, attrs)
options.metadata = new_class
new_class._meta = options
# Some useful attributes
options._update_from_name(name)
options._register_elements(elements)
try:
for backend_name in options.backends:
new_class._meta._add_backend(backend_registry[backend_name])
for backend_name in options.backends:
backend_registry[backend_name].validate(options)
except KeyError:
raise Exception('Metadata backend "%s" is not installed.' % backend_name)
#new_class._meta._add_backend(PathBackend)
#new_class._meta._add_backend(ModelInstanceBackend)
#new_class._meta._add_backend(ModelBackend)
#new_class._meta._add_backend(ViewBackend)
registry[name] = new_class
|
wdm0006/myflaskapp
|
tests/test_functional.py
|
Python
|
bsd-3-clause
| 3,662 | 0.000273 |
# -*- coding: utf-8 -*-
"""Functional tests using WebTest.
See: http://webtest.readthedocs.org/
"""
import pytest
from flask import url_for
from myflaskapp.models.user import User
from .factories import UserFactory
class TestLoggingIn:
def test_can_log_in_returns_200(self, user, testapp):
# Goes to homepage
res = testapp.get("/")
# Fills
|
out login form in navbar
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'myprecious'
# Submits
res = form.submit().follow()
assert res.status_code == 200
def test_sees_alert_on_log_
|
out(self, user, testapp):
res = testapp.get("/")
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'myprecious'
# Submits
res = form.submit().follow()
res = testapp.get(url_for('public.logout')).follow()
# sees alert
assert 'You are logged out.' in res
def test_sees_error_message_if_password_is_incorrect(self, user, testapp):
# Goes to homepage
res = testapp.get("/")
# Fills out login form, password incorrect
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'wrong'
# Submits
res = form.submit()
# sees error
assert "Invalid password" in res
def test_sees_error_message_if_username_doesnt_exist(self, user, testapp):
# Goes to homepage
res = testapp.get("/")
# Fills out login form, password incorrect
form = res.forms['loginForm']
form['username'] = 'unknown'
form['password'] = 'myprecious'
# Submits
res = form.submit()
# sees error
assert "Unknown user" in res
class TestRegistering:
def test_can_register(self, user, testapp):
old_count = len(User.query.all())
# Goes to homepage
res = testapp.get("/")
# Clicks Create Account button
res = res.click("Create account")
# Fills out the form
form = res.forms["registerForm"]
form['username'] = 'foobar'
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['confirm'] = 'secret'
# Submits
res = form.submit().follow()
assert res.status_code == 200
# A new user was created
assert len(User.query.all()) == old_count + 1
def test_sees_error_message_if_passwords_dont_match(self, user, testapp):
# Goes to registration page
res = testapp.get(url_for("public.register"))
# Fills out form, but passwords don't match
form = res.forms["registerForm"]
form['username'] = 'foobar'
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['confirm'] = 'secrets'
# Submits
res = form.submit()
# sees error message
assert "Passwords must match" in res
def test_sees_error_message_if_user_already_registered(self, user, testapp):
user = UserFactory(active=True) # A registered user
user.save()
# Goes to registration page
res = testapp.get(url_for("public.register"))
# Fills out form, but username is already registered
form = res.forms["registerForm"]
form['username'] = user.username
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['confirm'] = 'secret'
# Submits
res = form.submit()
# sees error
assert "Username already registered" in res
|
chriskiehl/Gooey
|
gooey/gui/events.py
|
Python
|
mit
| 840 | 0.014286 |
"""
|
App wide event registry
Everything in the application is communicated via pubsub. These are the events
that tie everything together.
"""
import wx # type: ignore
WINDOW_STOP = wx.Window.NewControlId()
WINDOW_CANCEL = wx.Window.NewControlId()
WINDOW_CLO
|
SE = wx.Window.NewControlId()
WINDOW_START = wx.Window.NewControlId()
WINDOW_RESTART = wx.Window.NewControlId()
WINDOW_EDIT = wx.Window.NewControlId()
WINDOW_CHANGE = wx.Window.NewControlId()
PANEL_CHANGE = wx.Window.NewControlId()
LIST_BOX = wx.Window.NewControlId()
CONSOLE_UPDATE = wx.Window.NewControlId()
EXECUTION_COMPLETE = wx.Window.NewControlId()
PROGRESS_UPDATE = wx.Window.NewControlId()
TIME_UPDATE = wx.Window.NewControlId()
USER_INPUT = wx.Window.NewControlId()
LEFT_DOWN = wx.Window.NewControlId()
|
andrewchambers/pixie
|
pixie/vm/test/test_reader.py
|
Python
|
gpl-3.0
| 1,607 | 0.001245 |
from pixie.vm.reader import read, StringReader
from pixie.vm.object import Object
from pixie.vm.cons import Cons
from pixie.vm.numbers import Integer
from pixie.vm.symbol import symbol, Symbol
from pixie.vm.persistent_vector import PersistentVector
import pixie.vm.rt as rt
import unittest
data = {u"(1 2)": (1, 2,),
u"(foo)": (symbol(u"foo"),),
u"foo": sy
|
mbol(u"foo"),
u"1": 1,
u"2": 2,
u"((42))": ((42,),),
u"(platform+ 1 2)": (symbol(u"platform+"), 1, 2),
u"[42 43 44]": [42, 43, 44],
u"(1 2 ; 7 8 9\n3)": (1, 2, 3,),
u"(1 2
|
; 7 8 9\r\n3)": (1, 2, 3,)}
class TestReader(unittest.TestCase):
def _compare(self, frm, to):
if isinstance(to, tuple):
assert isinstance(frm, Cons)
for x in to:
self._compare(frm.first(), x)
frm = frm.next()
elif isinstance(to, int):
assert isinstance(frm, Integer)
assert frm._int_val == to
elif isinstance(to, Symbol):
assert isinstance(frm, Symbol)
assert frm._str == to._str
elif isinstance(to, list):
assert isinstance(frm, PersistentVector)
for x in range(len(to)):
self._compare(rt.nth(frm, rt.wrap(x)), to[x])
else:
raise Exception("Don't know how to handle " + str(type(to)))
def test_forms(self):
for s in data:
tst = data[s]
result = read(StringReader(s), True)
assert isinstance(result, Object)
self._compare(result, tst)
|
tensorflow/profiler-ui
|
server/server.py
|
Python
|
apache-2.0
| 2,149 | 0.011633 |
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Serves the TensorFlow Profiler UI."""
import os
import threading
import webbrowser
import flask
from .route_handlers import handle_home_page
from .route_handlers import handle_loading_page
from .route_handlers import handle_profile_api
from .utils import prepare_tmp_dir
def start_server(port, open_browser):
"""Starts Flask web server."""
# Define and prepare directories.
resources_dir = os.path.dirname(os.path.realpath(__file__))
static_dir = os.path.join(resources_dir, 'static')
templates_dir = os.path.join(resources_dir, 'templates')
prepare_tmp_dir()
# Create Flask app.
app = flask.Flask(
__name__, static_folder=static_dir, template_folder=templates_dir)
# Enable verbose error messages.
app.config['PROPAGATE_EXCEPTIONS'] = True
# Disable HTML caching.
app.config['TEMPLATES_AUTO_RELOAD'] = True
#
|
Define routes.
@app.route('/')
def home():
"""Responds to request for home page."""
return handle_home_page()
@app.route('/profile')
def profile():
"""Responds to request for profile API."""
# Build options.
return handle_profile_api()
@app.route('/loading')
def loa
|
ding():
"""Responds to request for loading page."""
return handle_loading_page()
# Define URL.
host = '0.0.0.0'
url = 'http://localhost:{}'.format(port)
if open_browser:
# Open new browser window after short delay.
threading.Timer(1, lambda: webbrowser.open(url)).start()
# Starting the server, and then opening browser after a delay
app.run(host, port, threaded=True)
|
pombredanne/django-bulbs
|
docs/conf.py
|
Python
|
mit
| 8,285 | 0.007483 |
# -*- coding: utf-8 -*-
#
# django-bulbs documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 18 16:55:34 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.settings'
print(sys.path)
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-bulbs'
copyright = u'2013, Onion Tech Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps p
|
age names to
# template names.
#html_additional_pages = {}
|
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-bulbsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-bulbs.tex', u'django-bulbs Documentation',
u'Onion Tech Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-bulbs', u'django-bulbs Documentation',
[u'Onion Tech Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-bulbs', u'django-bulbs Documentation',
u'Onion Tech Team', 'django-bulbs', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
intersphinx_mapping = {
#'http://docs.python.org/'
|
dimonaks/siman
|
siman/set_functions.py
|
Python
|
gpl-2.0
| 28,264 | 0.022053 |
# -*- coding: utf-8 -*-
from __future__ import division, unicode_literals, absolute_import, print_function
import json
import copy
"""
Oграничения режима sequence_set:
1) OCCMATRIX не копируется для дочерних сетов
2) Режим U-ramping выключен для дочерних сетов
3) Есть еще, режим afm_ordering, возможно neb
4) kpoints file только для первого сета
5) u-ramping inherit_xred - могут быть проблемы более чем для двух сетов
TODO:
ngkpt_dict_for_kspacings - when ngkpt is used could be problems, please test.
"""
from siman import header
from siman.header import print_and_log, printlog;
from siman.small_functions import is_list_like, red_prec
from siman.functions import invert
#Vasp keys
vasp_electronic_keys = [
'ALGO',
'PREC',
'LREAL',
'ENCUT',
'ENAUG',
'ISMEAR',
'SIGMA',
'EDIFF',
'NELM',
'NELMIN',
'NELMDL',
'MAXMIX',
'NELECT'
]
vasp_ionic_keys = [
'IBRION',
'ISIF',
'NSW',
'EDIFFG',
'POTIM',
'POMASS',
'ZVAL',
'SMASS'
]
vasp_other_keys = [
'AGGAC',
'LUSE_VDW',
'PARAM1',
'PARAM2',
'LVDW',
'LVHAR',
'LCALCPOL',
'EFIELD',
'VDW_RADIUS',
'VDW_SCALING',
'VDW_CNRADIUS',
'LWANNIER90_RUN',
'IVDW',
'VDW_D',
'MDALGO',
'TEBEG',
'TEEND',
'SYSTEM',
'ISTART',
'ICHARG',
'KGAMMA',
'KSPACING',
'EFIELD_PEAD',
'LPLANE',
'LSEPC',
'LSEPB',
'OMEGAMAX',
'ENCUTGW',
'NBANDSGW',
'NBANDSO',
'NBANDSV',
'ANTIRES',
'NOMEGA',
'OMEGATL',
'NCORE',
'NPAR',
'LSCALU',
'NSIM',
'ISYM',
'SYMPREC',
'LORBIT',
'EMIN',
'EMAX',
'NEDOS',
'LAECHG',
'LSORBIT',
'SAXIS',
'ISPIN',
'NBANDS',
'PSTRESS',
'ADDGRID',
'MAGMOM',
'GGA_COMPAT',
'IMAGES',
'LDAU',
'LDAUTYPE',
'LDAUL',
'LDAUU',
'LDAUJ',
'LDAUPRINT',
'LASPH',
'LMAXMIX',
'NFREE',
'AMIX',
'BMIX',
'AMIX_MAG',
'BMIX_MAG',
'WC',
'MAXMIX',
'OCCDIR1',
'OCCEXT',
'LHFCALC',
'HFSCREEN',
'TIME',
'PRECFOCK',
'NKRED',
'NGX',
'NGY',
'NGZ',
'NBMOD',
'LPARD',
'EINT',
'LWAVE',
'GGA',
'IALGO',
'LSCALAPACK',
'AMIN',
'IDIPOL',
'LDIPOL',
'DIPOL',
'LVTOT',
'AEXX',
'LDIAG',
'METAGGA',
'CMBJB',
'CMBJA',
'IMIX',
'LPEAD',
'LEPSILON',
'LCALCEPS',
'CSHIFT',
'LOPTICS',
'LRPA',
'LSPECTRAL',
'LCHARG',
'LELF',
'RWIGS',
'NUPDOWN',
'ALDAC',
'LAMBDA',
'SUBATOM',
'KPPRA',
'LAMBDA_D_K',
'USEPOT',
'M_CONSTR',
'I_CONSTRAINED_M',
'CORE_C',
'EB_K',
'LVDW_EWALD',
]
vasp_keys = vasp_electronic_keys+vasp_ionic_keys+vasp_other_keys
siman_keys = [
'universal', # universal paramater with any content
'u_ramping_region', #deprecated
'u_ramping_nstep', #number of u ramping steps
'magnetic_moments',
'afm_ordering',
'set_sequence',# sequence of sets
'savefile', #additional keys pointing which files should be saved
'k_band_structure', # list, first position is number of points, then high-symmetry k-points in the form ['G', 0, 0, 0] in reciprocal space for calculating band structure
'path2pot', # path to folder with potentials - used with potdir; if not provided that header.path2potentials is used
'path_to_potcar', # explicit path to potential - depreacated
'periodic', # 1 or 0, periodic boundary conditions or not; by default considered periodic
]
aims_keys = [
'k_grid',
'default_initial_moment',
'spin',
]
def read_vasp_sets(user_vasp_sets, override_global = False):
"""
Read user sets and add them to project database
Now for VASP
###INPUT:
- varset (dict) - database dict with all sets of a project
- user_vasp_sets (list) - list of user sets that describes creation of new sets based on inheritance
- override - allows to recreate all sets; can be usefull than you want to add some new property to all your sets - very dangerous to do!
###RETURN:
- user_vasp_sets (list)
"""
varset = header.varset
bfolder = '' #by default no blockfolder
for l in user_vasp_sets:
if override_global or 'over' in l[-1]:
override = True
else:
override = False
if override or l[0] not in varset:
# print override, 'override'
param = l[2]
if 'bfolder' in param:
bfolder = param['bfolder']
else:
bfolder = None
s = inherit_iset(l[0], l[1], varset, override = override, newblockfolder = bfolder)
# print ('param', param,)
s.load(param, inplace = True)
header.varset = varset
return varset
class InputSet():
"""docstring for InputSet
The second important class which is used to store
parameters of calculation
For VASP parameters *self.vasp_params* dict is used;
usually it contains the parameters in the same format as INCAR file.
However, several exceptions are:
for 'LDAUU', 'LDAUJ', 'LDAUL' you should provide
dictionaries with correponding values for each element in the form: {'Co':3.4,}.
self.potdir (dict) - name of POTCAR folder for each element, for example {3:'Li', 8:'O'}
self.blockfolder (str) - additional subfolder will be created calculation with this set
self.save_last_wave (bool) - set True to save last WAVECAR in u-ramping mode
self.kpoints_file - if True, k-points file is created, if string t
|
hen it is considered as path to external kpoints file
self.path_to_potcar (str) - explicit path to potcar, can be used instead of self.potdir
self.set_sequence (list) - list of InputSet() objects to make multiset runs. The current set is used as a first one.
TODO
Describe the difference between update() and load() methods !
"""
def __init__(self, ise = None, path_to_potcar = None):
|
#super(InputSet, self).__init__()
self.ise = ise
self.name = ise
self.des = "" # description
self.potdir = {}
self.units = "vasp"
self.vasp_params = {}
self.params = self.vasp_params # params for any code!
self.mul_enaug = 1
self.history = "Here is my uneasy history( :\n"
self.tsmear = None
self.tolmxf = None
self.ngkpt = None
self.blockfolder = ''
self.set_sequence = None
self.kpoints_file = None # can be path to external file
self.save_last_wave = None #if True than do no remove last wavefunction
self.periodic = 1 # PBC
# self.use_ngkpt = False
if path_to_potcar:
self.path_to_potcar = path_to_potcar
else:
self.path_to_potcar = None
#Code scpecific parameters, now only for Vasp
for key in vasp_electronic_keys:
self.vasp_params[key] = None
for key in vasp_ionic_keys:
self.vasp_params[key] = None
for key in vasp_other_keys:
self.vasp_params[key] = None
for key in aims_keys:
self.params[key] = None
#add to varset
# if ise not in header.varset:
header.varset[ise] = self
def printme(self):
for key in self.vasp_params:
if self.vasp_params[key] == None: continue
print_and_log( "{:30s} = {:s} ".format("s.vasp_params['"+key+"']", str(self.vasp_params[key]) ), imp = 'Y', end = '\n' )
printlog('ngkpt:', self.ngkpt, imp = 'Y')
printlog('POTDIR:', self.potdir, imp = 'Y', end = '\n' )
def update(self):
#deprecated, but still can be usefull
# print_and_log('Updating set ...\n')
# c1 = 1; c2 = 1
# if self.units == "abinit":
# c1 = to_eV
# c2 = Ha_Bohr_to_eV_A
# #Update Vasp parameters
# if self.units == "vasp":
# c1 = 1
# c2 = 1
# if self.ecut == None:
# self.vasp_params['ENCUT'] = None
# self.vasp_params['ENAUG'] = None
# else:
# self.vasp_params['ENCUT'] = self.ecut * c1* self.dilatmx * self.dilatmx
# self.vasp_params['ENAUG'] = self.mul_enaug * self.vasp_params['ENCUT']
# self.vasp_params['SIGMA'] = self.tsmear * c1
vp = self.vasp_params
self.tsmear = vp.get('SIGMA')
self.tolmxf = vp.get('EDIFFG')
if self.tolmxf and self.tolmxf < 0:
self.tolmxf*=-1
self.toldfe = vp.get('EDIFF')
# self.vasp_params['EDIFF'] = self.toldfe * c1
# self.vasp_params['N
|
Storj/metacore
|
metacore/tests/test_upload.py
|
Python
|
agpl-3.0
| 12,336 | 0 |
import sys
import copy
import json
import os.path
import unittest
from hashlib import sha256
from io import BytesIO
from metacore import storj
from metacore.database import files
from metacore.error_codes import *
from metacore.tests import *
if sys.version_info.major == 3:
from unittest.mock import patch, Mock
else:
from mock import patch, Mock
__author__ = 'karatel'
class UploadFileCase(unittest.TestCase):
"""
Test uploading files to the Node.
"""
url = '/api/files/'
def setUp(self):
"""
Switch to test config.
Remember initial records in the 'files' table.
Remember initial files set in the Upload Dir.
Remember initial blacklist content.
"""
self.app = storj.app
self.app.config['TESTING'] = True
self.files = set(tuple(_) for _ in files.select().execute())
self.stored_files = set(os.listdir(self.app.config['UPLOAD_FOLDER']))
self.file_data = b'some data'
valid_hash = sha256(self.file_data).hexdigest()
valid_signature = test_btctx_api.sign_unicode(test_owner_wif,
valid_hash)
self.blocked_data = b'blocked_data'
self.blocked_hash = sha256(self.blocked_data).hexdigest()
with open(self.app.config['BLACKLIST_FILE'], 'r+') as fp:
self.initial_blacklist = fp.read()
fp.writelines((self.blocked_hash + '\n',))
self.send_data = {
'data_hash': valid_hash,
'file_data': (BytesIO(self.file_data), 'test_file'),
'file_role': '000'
}
self.headers = {
'sender_address': test_owner_address,
'signature': valid_signature
}
self.patcher = patch('metacore.processor.BTCTX_API', test_btctx_api)
|
self.patcher.start()
def tearDown(self):
"""
Switch off some test configs.
Remove new records form the 'files' table.
Remove new fi
|
les from Upload Dir.
Return initial blacklist content.
"""
self.patcher.stop()
files.delete().where(
files.c.hash not in (_[0] for _ in self.files)
).execute()
added_files = set(
os.listdir(self.app.config['UPLOAD_FOLDER'])
) - self.stored_files
for filename in added_files:
os.unlink(os.path.join(self.app.config['UPLOAD_FOLDER'], filename))
with open(self.app.config['BLACKLIST_FILE'], 'w') as fp:
fp.write(self.initial_blacklist)
def _get_saved_file_path(self):
"""
Generate path for file which is expected to be saved.
:return: file path
:rtype: str
"""
return os.path.join(self.app.config['UPLOAD_FOLDER'],
self.send_data['data_hash'])
def _make_request(self, data, headers=None):
"""
Make a common request for this Test Case. Get a response.
:return: Response
"""
if headers is None:
headers = self.headers
with self.app.test_client() as c:
response = c.post(
path=self.url,
data=data,
content_type='multipart/form-data',
headers=headers
)
return response
def test_success_upload(self):
"""
Upload file with all valid data.
"""
response = self._make_request(self.send_data)
self.assertEqual(201, response.status_code,
"'Created' status code is expected.")
self.assertEqual('application/json', response.content_type,
"Has to be a JSON-response.")
self.assertDictEqual(
{'data_hash': self.send_data['data_hash'],
'file_role': self.send_data['file_role']},
json.loads(response.data.decode()),
"Unexpected response data."
)
uploaded_file_record = files.select(
files.c.hash == self.send_data['data_hash']
).execute().first()
self.assertIsNotNone(uploaded_file_record,
"File record does not exist in the table.")
self.assertEqual(self.headers['sender_address'],
uploaded_file_record.owner,
"Sender address has to be stored into 'owner' field.")
self.assertSetEqual(
self.files | {tuple(uploaded_file_record)},
set(tuple(_) for _ in files.select().execute()),
"Only new record has to be inserted in the database. "
"No other changes."
)
try:
with open(self._get_saved_file_path(), 'rb') as stored_file:
self.assertEqual(
self.file_data, stored_file.read(),
"Stored file data does not match with uploaded one."
)
except OSError:
self.assertTrue(False, 'Uploaded file is not saved.')
def test_invalid_hash(self):
"""
Try to upload file with invalid SHA-256 hash.
"""
self.send_data['data_hash'] = 'invalid hash'
self.headers['signature'] = test_btctx_api.sign_unicode(
test_owner_wif,
self.send_data['data_hash']
)
response = self._make_request(self.send_data)
self.assertEqual(400, response.status_code,
"Response has to be marked as 'Bad Request'.")
self.assertEqual('application/json', response.content_type,
"Has to be a JSON-response.")
self.assertDictEqual(
{'error_code': ERR_TRANSFER['INVALID_HASH']},
json.loads(response.data.decode()),
"Unexpected response data."
)
self.assertSetEqual(
self.files,
set(tuple(_) for _ in files.select().execute()),
"Database has to be unchanged."
)
self.assertFalse(os.path.exists(self._get_saved_file_path()),
"File should not be saved.")
def test_blocked_hash(self):
"""
Try to upload file with blacklisted SHA-256 hash.
"""
self.send_data.update({
'file_data': (BytesIO(self.blocked_data), 'test_file'),
'data_hash': self.blocked_hash
})
self.headers['signature'] = test_btctx_api.sign_unicode(
test_owner_wif,
self.send_data['data_hash']
)
response = self._make_request(self.send_data)
self.assertEqual(404, response.status_code,
"'Not Found' status code is expected.")
self.assertSetEqual(
self.files,
set(tuple(_) for _ in files.select().execute()),
"Database has to be unchanged."
)
self.assertFalse(os.path.exists(self._get_saved_file_path()),
"File should not be saved.")
def test_invalid_signature(self):
"""
Try to upload file with invalid signature.
"""
self.headers['signature'] = self.headers['signature'].swapcase()
response = self._make_request(self.send_data)
self.assertEqual(400, response.status_code,
"Response has to be marked as 'Bad Request'.")
self.assertEqual('application/json', response.content_type,
"Has to be a JSON-response.")
self.assertDictEqual(
{'error_code': ERR_TRANSFER['INVALID_SIGNATURE']},
json.loads(response.data.decode()),
"Unexpected response data."
)
self.assertSetEqual(
self.files,
set(tuple(_) for _ in files.select().execute()),
"Database has to be unchanged."
)
self.assertFalse(os.path.exists(self._get_saved_file_path()),
"File should not be saved.")
def test_mismatched_hash(self):
"""
Try to upload file with mismatched SHA-256 hash.
"""
self.send_data['data_hash'] = sha256(self.file_data + b'_').hexdige
|
netdingo/cbplayer
|
disk.py
|
Python
|
gpl-2.0
| 882 | 0.037415 |
## encoding=utf-8
#!/usr/bin/env python
"""
cubieboard ip module, which show ip
"""
__author__ = "Dingo"
__copyright__ = "Copyright 2013, Cubieboard Player Project"
__credits__ = ["PySUNXI project"]
__license__ = "GPL"
__version__ = "0.0.2"
__maintainer__= "Dingo"
__email__ = "btrfs@sina.com"
import os, sys, time
import cbtask
import pdb
class IPTask(cbtask.CbPlayerTask):
|
"""
do IP playing
"""
def __init__(self, name):
cbtask.CbPlayerTask.__init__(self, name)
def main(self): ## task entry
ret = 0
return ret
pass
def handle_enter_key(self): ##
#TODO
pass
def handle_exit_key(self): ##
#TODO
pass
def handle_left_key(self): ##
#TODO
|
pass
def handle_right_key(self): ##
#TODO
pass
if __name__ == "__main__" :
pass
|
npuichigo/ttsflow
|
third_party/tensorflow/tensorflow/contrib/learn/python/learn/dataframe/transforms/example_parser.py
|
Python
|
apache-2.0
| 2,407 | 0.005401 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Transform that parses serialized tensorflow.Example protos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.python.ops import parsing_ops
class ExampleParser(transform.TensorFlowTransform):
"""A Transform that parses serialized `tensorflow.Example` protos."""
def __init__(self, features):
"""Initialize `ExampleParser`.
The `features` argument must be an object that can be converted to an
`OrderedDict`. The keys should be strings and will be used to name the
output. Values should be either `VarLenFeature` or `FixedLenFeature`. If
`features` is a dict, it will be sorted by key.
Args:
features: An object that can be converted to an `OrderedDict` mapping
column names to feature definitions.
"""
super(ExampleParser, self).__init__()
if isinstance(features, dict):
self._ordered_features = collections.OrderedDict(sorted(features.items(
), key=lambda f: f[0]))
else:
self._ordered_features = collections.OrderedDict(features)
@property
def name(self):
return "ExampleParser"
@property
def input_valency(self):
return 1
@property
def _output_names(self):
return list(self._ordered_features.keys())
@transform._parameter # pylint: disable=protected-access
def feature_definitions(self):
return self._ordered_features
def _apply_transform(self, input_tensors, **kwargs):
parsed_values = parsing_ops.parse_example(input_tensors[0],
|
features=self._ordered_features)
# pylint: disable=not-callable
return self.return_type(**parsed_val
|
ues)
|
JeffreyBLewis/WebVoteView
|
model/searchMeta.py
|
Python
|
apache-2.0
| 568 | 0.03169 |
import pymongo
import json
cache = {}
client = pymongo.MongoClient()
try:
dbConf = json.load(open("./model/db.json","r"))
except:
try:
dbConf = json.load(open("./db.json","r"))
except:
dbConf = {"dbname": "voteview"}
db = client[dbConf["dbname"]]
def metaLookup(api = ""):
if not api:
returnDict = {"loyalty_counts": 0}
elif api == "Web_Members":
re
|
turnDic
|
t = {"nominate": 0}
for m in db.voteview_metadata.find({}, returnDict).sort('time', -1).limit(1):
meta = m
return meta
|
Yhzhtk/pytool
|
game/aixiaochu/xiaochu.py
|
Python
|
gpl-2.0
| 2,786 | 0.013943 |
# encoding=utf-8
'''
Created on 2013-9-8
@author: gudh
'''
start_pos = (5, 222) # 开始的位置
block_size = (67, 67) # 块大小
rel_pos = (33, 28) # 相对块头位置
colors = (
(255, 255, 255), # 白
(164, 130, 213), # 紫
(247, 214, 82), # 黄
(244, 160, 90), # 土
(90, 186, 238), # 蓝
(247, 69, 95), # 红
(173, 235, 82) # 绿
)
colornames = (u'ba', u'zh', u'hu', u'tu', u'la', u'ho', u'lv')
ax = (35, 35, 35) # 允许的误差
def get_pix(img):
'''获取测试开始位置,块大小'''
m = 5
n = 222 + 67
x = 67
for i in range(7):
print "c%d = %s" % (i + 1, get_color(img, m + i * x + 33, n + 20)[0:3])
def get_pos(i, j):
'''获取块内判断的点'''
x = start_pos[0] + i * block_size[0] + rel_pos[0]
y = start_pos[1] + j * blo
|
ck_size[1] + rel_pos[1]
return (x, y)
def get_rc_pos(rc):
'''获取rc的点,注意横纵是反的'''
x = start_pos[0] + rc[1] * block_s
|
ize[0] + rel_pos[0]
y = start_pos[1] + rc[0] * block_size[1] + rel_pos[1]
return (x, y)
def get_block(i, j):
'''获取块的区域'''
x = start_pos[0] + i * block_size[0]
y = start_pos[1] + j * block_size[1]
w = x + block_size[0]
h = y + block_size[1]
return (x, y, w, h)
def similar_color(p, color):
'''判断是否是相似'''
#print p, color
for i in range(3):
if abs(p[i] - color[i]) >= ax [i]:
return False
return True
def get_color(img, i, j):
'''获取像素点的颜色'''
p = get_pos(i, j)
#print p
index = 0
color = img.getRawPixel(p[0], p[1])[1:]
#color = img.getpixel(p)
for index in range(len(colors)):
if similar_color(color, colors[index]):
return index
return -1
def get_pic_info(img):
'''获取像素矩阵'''
mat = []
blank_c = 0
for j in range(7):
mx = []
for i in range(7):
c = get_color(img, i, j)
mx.append(c)
if c == -1:
blank_c += 1
mat.append(mx)
print_mat(mat)
if(blank_c > 7):
print "blank is %d, return None" % blank_c
mat = None
return mat
def cut_all(img):
'''将所有单独的块截图保存'''
for j in range(7):
for i in range(7):
b = get_block(i, j)
im = img.crop(b)
im.save("c:/m/%d%d.jpg" % (i, j), "JPEG")
def print_mat(mat):
'''输出结果矩阵'''
print ".", "|", "0 1 2 3 4 5 6"
i = 0
for m in mat:
print i,"|",
i += 1
for n in m:
if n < 0:
print "No",
else:
print colornames[n],
print
if __name__ == "main":
import Image
img = Image.open(r"c:/m.png")
mat = get_pic_info(img)
|
bsmedberg/socorro
|
webapp-django/crashstats/symbols/tests/test_views.py
|
Python
|
mpl-2.0
| 15,401 | 0 |
import os
import shutil
import tempfile
from nose.tools import eq_, ok_
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User, Permission
from django.core.files import File
from crashstats.tokens.models import Token
from crashstats.crashstats.tests.test_views import BaseTestViews
from crashstats.symbols import models
from crashstats.symbols.views import check_symbols_archive_content
from .base import ZIP_FILE, TARGZ_FILE, TGZ_FILE, TAR_FILE
class EmptyFile(object):
def __init__(self, name):
self.name = name
def read(self):
return ''
def size(self):
return 0
class TestViews(BaseTestViews):
def setUp(self):
super(TestViews, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
super(TestViews, self).tearDown()
shutil.rmtree(self.tmp_dir)
def _login(self):
user = User.objects.create_user('test', 'test@mozilla.com', 'secret')
assert self.client.login(username='test', password='secret')
return user
def test_check_symbols_archive_content(self):
content = """
HEADER 1
HEADER 2
Line 1
Line Two
Line Three
"""
# check that the header is not checked
disallowed = ('HEADER',)
with self.settings(DISALLOWED_SYMBOLS_SNIPPETS=disallowed):
error = check_symbols_archive_content(content.strip())
ok_(not error)
# match something
disallowed = ('Two', '2')
with self.settings(DISALLOWED_SYMBOLS_SNIPPETS=disallowed):
error = check_symbols_archive_content(content.strip())
ok_(error)
ok_('Two' in error)
# match nothing
disallowed = ('evil', 'Bad')
with self.settings(DISALLOWED_SYMBOLS_SNIPPETS=disallowed):
error = check_symbols_archive_content(content.strip())
ok_(not error)
def test_home(self):
self._create_group_with_permission('upload_symbols')
url = reverse('symbols:home')
response = self.client.get(url)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('crashstats:login') + '?next=%s' % url
)
self._login()
with self.settings(SYMBOLS_PERMISSION_HINT_LINK=None):
response = self.client.get(url)
eq_(response.status_code, 200)
link = {
'url': 'https://bugzilla.mozilla.org',
'label': 'Bugzilla'
}
with self.settings(SYMBOLS_PERMISSION_HINT_LINK=link):
response = self.client.get(url)
eq_(response.status_code, 200)
ok_(link['url'] in response.content)
ok_(link['label'] in response.content)
def test_home_with_previous_uploads(self):
url = reverse('symbols:home')
user = self._login()
self._add_permission(user, 'upload_symbols')
upload1 = models.SymbolsUpload.objects.create(
user=user,
content='file1\nfile2',
filename='file1.zip',
size=12345
)
upload2 = models.SymbolsUpload.objects.create(
user=user,
content='file1\nfile2',
filename='sample.zip',
size=10000
)
with open(ZIP_FILE) as f:
upload2.file.save('sample.zip', File(f))
response = self.client.get(url)
eq_(response.status_code, 200)
# note that the file for upload1 does not exist
ok_(
reverse('symbols:download', args=(upload1.pk,))
not in response.content
)
# but you can for upload 2
ok_(
reverse('symbols:download', args=(upload2.pk,))
in response.content
)
# but you can preview both
ok_(
reverse('symbols:preview', args=(upload1.pk,))
in response.content
)
ok_(
reverse('symbols:preview', args=(upload2.pk,))
in response.content
)
def test_web_upload(self):
url = reverse('symbols:web_upload')
response = self.client.get(url)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('crashstats:login') + '?next=%s' % url
)
user = self._login()
response = self.client.get(url)
eq_(response.status_code, 302)
self.assertRedirects(
response,
reverse('crashstats:login') + '?next=%s' % url
)
# you need to have the permission
self._add_permission(user, 'upload_symbols')
response = self.client.get(url)
eq_(response.status_code, 200)
# now we can post
with self.settings(MEDIA_ROOT=self.tmp_dir):
with open(ZIP_FILE) as file_object:
response = self.client.post(
url,
{'file': file_object}
)
eq_(response.status_code, 302)
symbol_upload = models.SymbolsUpload.objects.get(user=user)
eq_(symbol_upload.filename, os.path.basename(ZIP_FILE))
ok_(symbol_upload.size)
ok_(symbol_upload.file)
ok_(symbol_upload.file_exists)
ok_(symbol_upload.content)
def test_web_upload_disallowed_content(self):
url = reverse('symbols:web_upload')
user = self._login()
self._add_permission(user, 'upload_symbols')
# because the file ZIP_FILE contains the word `south-africa-flag.jpeg`
# it should not be allowed to be uploaded
disallowed = ('flag',)
with self.settings(MEDIA_ROOT=self.tmp_dir,
DISALLOWED_SYMBOLS_SNIPPETS=disallowed):
with open(ZIP_FILE) as file_object:
response = self.client.post(
url,
{'file': file_object}
)
eq_(response.status_code, 400)
ok_('flag' in response.content)
def test_web_upload_tar_gz_file(self):
url = reverse('symbols:web_upload')
user = self._login()
self._add_permission(user, 'upload_symbols')
# now we can post
with self.settings(MEDIA_ROOT=self.tmp_dir):
with open(TARGZ_FILE) as file_object:
response = self.client.post(
url,
{'file': file_object}
)
eq_(response.status_code, 302)
symbol_upload = models.SymbolsUpload.objects.get(user=user)
eq_(symbol_upload.filename, os.path.basename(TARGZ_FILE))
|
ok_(symbol_upload.size)
ok_(symbol_upload.file)
ok_(symbol_upload.file_exists)
ok_(symbol_upload.content)
def test_web_upload_tgz_file(self):
url = reverse('symbols:web_upload')
user = self._login()
self._add_permission(user, 'upload_symbols')
# now we can post
with self.settings(MEDIA_ROOT=self.tmp_dir):
with open(TGZ_FILE) as file_object:
|
response = self.client.post(
url,
{'file': file_object}
)
eq_(response.status_code, 302)
symbol_upload = models.SymbolsUpload.objects.get(user=user)
eq_(symbol_upload.filename, os.path.basename(TGZ_FILE))
ok_(symbol_upload.size)
ok_(symbol_upload.file)
ok_(symbol_upload.file_exists)
ok_(symbol_upload.content)
def test_web_upload_tar_file(self):
url = reverse('symbols:web_upload')
user = self._login()
self._add_permission(user, 'upload_symbols')
# now we can post
with self.settings(MEDIA_ROOT=self.tmp_dir):
with open(TAR_FILE) as file_object:
response = self.client.post(
url,
{'file': file_object}
)
eq_(response.status_code, 302)
symbol_upload = models.SymbolsUpload
|
zhkzyth/storm_maker
|
template/src/app.py
|
Python
|
mit
| 1,540 | 0 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import signal
import tornado.ioloop
import tornado.web
import tornado.httpserver
import tornadoredis
import torndb
from tornado.options import options, define
import log
import handlers.test
from settings import (
DEBUG, PORT, HOST,
MYSQL_CONFIG, REDIS_CONFIG
)
class Application(tornado.web.Application):
def __init__(self, *args, **kwargs):
_handlers = [
(r"/test/hello", handlers.test.TestHandler),
(r".*", handlers.common.Better404Handler),
]
_settings = {
"debug": options.debug,
}
self.db = torndb.Connection(**MYSQL_CONFIG)
self.redis_conn = tornadoredis.Client(**REDIS_CONFIG)
tornado.web.Application.__init__(self, _handlers, **_settings)
def sig_handler(sig, frame):
log.warning('Caught signal: %s', sig)
tornado.ioloop.IOLoop.instance().stop()
def main():
# Tricks enable some log features of tornado
options.parse_command_line()
log.info("server start")
signal.signal(signal.SIGTERM, sig_handler)
signal.signal(signal.SIGINT, sig_handler)
app = Application()
http_server = tornado.httpserver.HTTPServer(app, xheaders=True)
http_server.listen(options.port, options.host)
tornado.ioloop.IOLoop.instance().start()
define("port", default=PORT, he
|
lp="port", type=int)
def
|
ine("host", default=HOST, help="host", type=str)
define("debug", default=DEBUG, help="debug mode", type=bool)
if __name__ == '__main__':
main()
|
Shirling-VT/davitpy_sam
|
davitpy/gme/sat/poes.py
|
Python
|
gpl-3.0
| 33,273 | 0.033751 |
"""
.. module:: poes
:synopsis: A module for reading, writing, and storing poes Data
.. moduleauthor:: AJ, 20130129
*********************
**Module**: gme.sat.poes
*********************
**Classes**:
* :class:`poesRec`
**Functions**:
* :func:`readPoes`
* :func:`readPoesFtp`
* :func:`mapPoesMongo`
* :func:`overlayPoesTed`
"""
from davitpy.gme.base.gmeBase import gmeData
class poesRec(gmeData):
"""a class to represent a record of poes data. Extends :class:`gmeBase.gmeData`. Insight on the class members can be obtained from `the NOAA NGDC site <ftp://satdat.ngdc.noaa.gov/sem/poes/data/readme.txt>`_. Note that Poes data is available from 1998-present day (or whatever the latest NOAA has uploaded is). **The data are the 16-second averages**
**Members**:
* **time** (`datetime <http://tinyurl.com/bl352yx>`_): an object identifying which time these data are for
* **info** (str): information about where the data come from. *Please be courteous and give credit to data providers when credit is due.*
* **dataSet** (str): the name of the data set
* **satnum** (ind): the noaa satellite number
* **sslat** (float): Geographic Latitude of sub-satellite point, degrees
* **sslon** (float): Geographic Longitude of sub-satellite point, degrees
* **folat** (float): Geographic Latitude of foot-of-field-line, degrees
* **folon** (float): Geographic Longitude of foot-of-field-line, degrees
* **lval** (float): L-value
* **mlt** (float): Magnetic local time of foot-of-field-line, degrees
* **pas0** (float): MEPED-0 pitch angle at satellite, degrees
* **pas90** (float): MEPED-90 pitch angle at satellite, degrees
* **mep0e1** (float): MEPED-0 > 30 keV electrons, counts/sec
* **mep0e2** (float): MEPED-0 > 100 keV electrons, counts/sec
* **mep0e3** (float): MEPED-0 > 300 keV electrons, counts/sec
* **mep0p1** (float):MEPED-0 30 keV to 80 keV protons, counts/sec
* **mep0p2** (float): MEPED-0 80 keV to 240 keV protons, counts/sec
* **mep0p3** (float): 240 kev to 800 keV protons, counts/sec
* **mep0p4** (float): MEPED-0 800 keV to 2500 keV protons, counts/sec
* **mep0p5** (float): MEPED-0 2500 keV to 6900 keV protons, counts/sec
* **mep0p6** (float): MEPED-0 > 6900 keV protons, counts/sec,
* **mep90e1** (float): MEPED-90 > 30 keV electrons, counts/sec,
* **mep90e2** (float): MEPED-90 > 100 keV electrons, counts/sec
* **mep90e3** (float): MEPED-90 > 300 keV electrons, counts/sec
* **mep90p1** (float): MEPED-90 30 keV to 80 keV protons, counts/sec
* **mep90p2** (float): MEPED-90 80 keV to 240 keV protons, counts/sec
* **mep90p3** (float): MEPED-90 240 kev to 800 keV protons, counts/sec,
* **mep90p4** (float): MEPED-90 800 keV to 2500 keV protons, counts/sec
* **mep90p5** (float): MEPED-90 2500 keV to 6900 keV protons, counts/sec
* **mep90p6** (float):MEPED-90 > 6900 keV protons, counts/sec
* **mepomp6** (float): MEPED omni-directional > 16 MeV protons, counts/sec
* **mepomp7** (float): MEPED omni-directional > 36 Mev protons, counts/sec
* **mepomp8** (float): MEPED omni-directional > 70 MeV protons, counts/sec
* **mepomp9** (float): MEPED omni-directional >= 140 MeV protons
* **ted** (float): TED, Total Energy Detector Average, ergs/cm2/sec
* **echar** (float): TED characteristic energy of electrons, eV
* **pchar** (float): TED characteristic energy of protons, eV
* **econtr** (float): TED electron contribution, Electron Energy/Total Energy
.. note::
If any of the members have a value of None, this means that they could not be read for that specific time
**Methods**:
* :func:`parseFtp`
**Example**:
::
emptyPoesObj = gme.sat.poesRec()
written by AJ, 20130131
"""
def parseFtp(self,line, header):
"""This method is
|
used to convert a line of poes data read from the NOAA NGDC FTP site into a :class:`poesRec` object.
.. note::
In general, users will not need to worry about this.
**Belongs to**: :class:`poesRec`
**Args**:
* **line** (str): the ASCII line from the FTP server
**Returns**:
* Nothing.
**Example**:
::
myPoesObj.parseFtp(ftpLine)
written by AJ, 20130131
"""
|
import datetime as dt
#split the line into cols
cols = line.split()
head = header.split()
self.time = dt.datetime(int(cols[0]), int(cols[1]), int(cols[2]), int(cols[3]),int(cols[4]), \
int(float(cols[5])),int(round((float(cols[5])-int(float(cols[5])))*1e6)))
for key in self.__dict__.iterkeys():
if(key == 'dataSet' or key == 'info' or key == 'satnum' or key == 'time'): continue
try: ind = head.index(key)
except Exception,e:
print e
print 'problem setting attribute',key
#check for a good value
if(float(cols[ind]) != -999.): setattr(self,key,float(cols[ind]))
def __init__(self, ftpLine=None, dbDict=None, satnum=None, header=None):
"""the intialization fucntion for a :class:`omniRec` object.
.. note::
In general, users will not need to worry about this.
**Belongs to**: :class:`omniRec`
**Args**:
* [**ftpLine**] (str): an ASCII line from the FTP server. if this is provided, the object is initialized from it. header must be provided in conjunction with this. default=None
* [**header**] (str): the header from the ASCII FTP file. default=None
* [**dbDict**] (dict): a dictionary read from the mongodb. if this is provided, the object is initialized from it. default = None
* [**satnum**] (int): the satellite nuber. default=None
**Returns**:
* Nothing.
**Example**:
::
myPoesObj = poesRec(ftpLine=aftpLine)
written by AJ, 20130131
"""
#note about where data came from
self.dataSet = 'Poes'
self.info = 'These data were downloaded from NASA SPDF. *Please be courteous and give credit to data providers when credit is due.*'
self.satnum = satnum
self.sslat = None
self.sslon = None
self.folat = None
self.folon = None
self.lval = None
self.mlt = None
self.pas0 = None
self.pas90 = None
self.mep0e1 = None
self.mep0e2 = None
self.mep0e3 = None
self.mep0p1 = None
self.mep0p2 = None
self.mep0p3 = None
self.mep0p4 = None
self.mep0p5 = None
self.mep0p6 = None
self.mep90e1 = None
self.mep90e2 = None
self.mep90e3 = None
self.mep90p1 = None
self.mep90p2 = None
self.mep90p3 = None
self.mep90p4 = None
self.mep90p5 = None
self.mep90p6 = None
self.mepomp6 = None
self.mepomp7 = None
self.mepomp8 = None
self.mepomp9 = None
self.ted = None
self.echar = None
self.pchar = None
self.econtr = None
#if we're initializing from an object, do it!
if(ftpLine != None): self.parseFtp(ftpLine,header)
if(dbDict != None): self.parseDb(dbDict)
def readPoes(sTime,eTime=None,satnum=None,folat=None,folon=None,ted=None,echar=None,pchar=None):
"""This function reads poes data. First, it will try to get it from the mongodb, and if it can't find it, it will look on the NOAA NGDC FTP server using :func:`readPoesFtp`. The data are 16-second averages
**Args**:
* **sTime** (`datetime <http://tinyurl.com/bl352yx>`_ or None): the earliest time you want data for
* [**eTime**] (`datetime <http://tinyurl.com/bl352yx>`_ or None): the latest time you want data for. if this is None, end Time will be 1 day after sTime. default = None
* [**satnum**] (int): the satellite you want data for. eg 17 for noaa17. if this is None, data for all satellites will be returned. default = None
* [**satnum**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with bx values in the range [a,b] will be returned. default = None
* [**folat**] (list or None): if this is not None, it must be a 2-element list of numbers, [a,b]. In this case, only data with bx values in the range [a,b]
|
BijoySingh/HomePage
|
homepage/admin.py
|
Python
|
gpl-2.0
| 1,571 | 0.00191 |
from django.contrib import admin
from .models import *
class CategoryAdmin(admin.ModelAdmin):
|
list_display = ('id', 'title')
class ReviewCategoryAdmin(admin.ModelAdmin):
list_display = ('id', 'title')
class BlogCategoryAdmin(admin.ModelAdmin):
list_display = ('id', 'title')
class CardAdmin(admin.ModelAdmin):
model = Card
list_display = ['title', 'description', 'get_category', ]
def get_category(self, obj):
return obj.category.title
get_category.admin_order_field = 'cat
|
egory__title'
get_category.short_description = 'Category'
class AccessAdmin(admin.ModelAdmin):
list_display = ['ip', 'visit_count']
class ReviewsAdmin(admin.ModelAdmin):
list_display = ['title', 'description', 'get_category', 'score', 'created']
def get_category(self, obj):
return obj.category.title
get_category.admin_order_field = 'category__title'
get_category.short_description = 'Category'
class BlogAdmin(admin.ModelAdmin):
model = Card
list_display = ['title', 'description', 'get_category', 'position',]
def get_category(self, obj):
return obj.category.title
get_category.admin_order_field = 'category__title'
get_category.short_description = 'Category'
admin.site.register(Category, CategoryAdmin)
admin.site.register(Card, CardAdmin)
admin.site.register(AccessCount, AccessAdmin)
admin.site.register(ReviewCategory, ReviewCategoryAdmin)
admin.site.register(Reviews, ReviewsAdmin)
admin.site.register(Blog, BlogAdmin)
admin.site.register(BlogCategory, BlogCategoryAdmin)
|
grycap/clues
|
cluesplugins/nomad.py
|
Python
|
gpl-3.0
| 29,940 | 0.012792 |
#!/usr/bin/env pyth
|
on
#
# CLUES - Cluster Energy Saving System
# Copyright (C) 2015 - GRyCAP - Universitat Po
|
litecnica de Valencia
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import collections
import requests
import cpyutils.config
import clueslib.helpers as Helpers
import json, time
import os
from cpyutils.evaluate import TypedClass, TypedList
from cpyutils.log import Log
from clueslib.node import NodeInfo
from clueslib.platform import LRMS
from clueslib.request import Request, ResourcesNeeded, JobInfo
_LOGGER = Log("PLUGIN-NOMAD")
def open_file(file_path):
try:
file_read = open(file_path, 'r')
except:
message = "Could not open file with path '%s'" % file_path
_LOGGER.error(message)
raise Exception(message)
return file_read
def _get_memory_in_bytes(str_memory):
if str_memory.strip()[-2:] in ['Mi', 'Gi', 'Ki', 'Ti']:
unit = str_memory.strip()[-2:][1]
memory = int(str_memory.strip()[:-2])
elif str_memory.strip()[-1:] in ['M', 'G', 'K', 'T']:
unit = str_memory.strip()[-1:]
memory = int(str_memory.strip()[:-1])
else:
return int(str_memory)
if unit == 'K':
memory *= 1024
elif unit == 'M':
memory *= 1024 * 1024
elif unit == 'G':
memory *= 1024 * 1024 * 1024
elif unit == 'T':
memory *= 1024 * 1024 * 1024 * 1024
return memory
class lrms(LRMS):
def _create_request(self, method, url, headers=None, body=None, auth_data=None):
if body is None:
body = {}
if headers is None:
headers = {}
if self._acl_token is not None:
headers.update({ 'X-Nomad-Token': self._acl_token})
auth = None
if auth_data is not None:
if 'user' in auth_data and 'passwd' in auth_data:
auth=requests.auth.HTTPBasicAuth( auth_data['user'], auth_data['passwd'])
response = {}
retries = 0
ok = False
while (self._max_retries > retries) and (not ok) :
retries += 1
try:
r = requests.request(method, url, verify=self._verify, cert=self._certs, headers=headers, data=body, auth=auth)
response[ 'status_code' ] = r.status_code
response[ 'text' ] = r.text
response[ 'json' ] = r.json()
ok=True
except requests.exceptions.ConnectionError:
_LOGGER.error("Cannot connect to %s, waiting 5 seconds..." % (url))
time.sleep(5)
except ValueError as e:
_LOGGER.error("JSON cannot be decoded: %s" %(r.text))
response[ 'json' ]={}
if not ok:
_LOGGER.error("Cannot connect to %s . Retries: %s" % (url, retries))
response[ 'status_code' ] = -1
response[ 'text' ] = 'No response text'
response[ 'json' ] = {}
return response
def __init__(self, NOMAD_SERVER=None, NOMAD_HEADERS=None, NOMAD_API_VERSION=None, NOMAD_API_URL_GET_ALLOCATIONS=None, NOMAD_API_URL_GET_SERVERS=None, NOMAD_API_URL_GET_CLIENTS=None, NOMAD_API_URL_GET_CLIENT_INFO=None, MAX_RETRIES=None, NOMAD_ACL_TOKEN=None, NOMAD_AUTH_DATA=None, NOMAD_API_URL_GET_CLIENT_STATUS=None, NOMAD_STATE_OFF=None, NOMAD_STATE_ON=None, NOMAD_PRIVATE_HTTP_PORT=None, NOMAD_API_URL_GET_JOBS=None, NOMAD_API_URL_GET_JOBS_INFO=None, NOMAD_API_URL_GET_ALLOCATION_INFO=None, NOMAD_NODES_LIST_CLUES=None, NOMAD_QUEUES=None, NOMAD_QUEUES_OJPN=None, NOMAD_API_URL_GET_CLIENT_ALLOCATIONS=None, NOMAD_DEFAULT_CPUS_PER_NODE=None, NOMAD_DEFAULT_MEMORY_PER_NODE=None, NOMAD_DEFAULT_CPU_GHZ=None, NOMAD_CA_CERT=None, NOMAD_SERVER_CERT=None, NOMAD_SERVER_KEY=None):
config_nomad = cpyutils.config.Configuration(
"NOMAD",
{
"NOMAD_SERVER": "http://localhost:4646",
"NOMAD_HEADERS": "{}",
"NOMAD_API_VERSION": "/v1",
"NOMAD_API_URL_GET_SERVERS": "/agent/members", # Server node
"NOMAD_API_URL_GET_CLIENTS": "/nodes", # Server node
"NOMAD_API_URL_GET_CLIENT_INFO": "/node/$CLIENT_ID$", # Server node
"NOMAD_API_URL_GET_CLIENT_STATUS": "/client/stats", # Client node
"NOMAD_API_URL_GET_CLIENT_ALLOCATIONS": "/node/$CLIENT_ID$/allocations", # Server node
"NOMAD_API_URL_GET_ALLOCATIONS": "/allocations", # Server node
"NOMAD_API_URL_GET_JOBS": "/jobs", # Server node
"NOMAD_API_URL_GET_JOBS_INFO": "/job/$JOB_ID$", # Server node
"NOMAD_API_URL_GET_ALLOCATION_INFO": "/allocation", # Server node
"NOMAD_ACL_TOKEN": None,
"MAX_RETRIES": 10,
"NOMAD_AUTH_DATA": None,
"NOMAD_STATE_OFF": "down",
"NOMAD_STATE_ON": "ready",
"NOMAD_PRIVATE_HTTP_PORT": "4646",
"NOMAD_NODES_LIST_CLUES": "/etc/clues2/nomad_vnodes.info",
"NOMAD_QUEUES": "default",
"NOMAD_QUEUES_OJPN": "", # Queues One Job Per Node
"NOMAD_DEFAULT_CPUS_PER_NODE": 2.0,
"NOMAD_DEFAULT_MEMORY_PER_NODE": "8Gi",
"NOMAD_DEFAULT_CPU_GHZ": 2.6, # Nomad use MHz to manage the jobs assigned CPU
"NOMAD_SERVER_CERT": None,
"NOMAD_SERVER_KEY": None,
"NOMAD_CA_CERT": None,
"NOMAD_TOKEN": None
}
)
self._server_url = Helpers.val_default(NOMAD_SERVER, config_nomad.NOMAD_SERVER).replace('"','')
self._api_version = Helpers.val_default(NOMAD_API_VERSION, config_nomad.NOMAD_API_VERSION).replace('"','')
self._api_url_get_allocations = Helpers.val_default(NOMAD_API_URL_GET_ALLOCATIONS, config_nomad.NOMAD_API_URL_GET_ALLOCATIONS).replace('"','')
self._api_url_get_allocation_info = Helpers.val_default(NOMAD_API_URL_GET_ALLOCATION_INFO, config_nomad.NOMAD_API_URL_GET_ALLOCATION_INFO).replace('"','')
self._api_url_get_jobs = Helpers.val_default(NOMAD_API_URL_GET_JOBS, config_nomad.NOMAD_API_URL_GET_JOBS).replace('"','')
self._api_url_get_jobs_info = Helpers.val_default(NOMAD_API_URL_GET_JOBS_INFO, config_nomad.NOMAD_API_URL_GET_JOBS_INFO).replace('"','')
self._api_url_get_servers = Helpers.val_default(NOMAD_API_URL_GET_SERVERS, config_nomad.NOMAD_API_URL_GET_SERVERS).replace('"','')
self._api_url_get_clients = Helpers.val_default(NOMAD_API_URL_GET_CLIENTS, config_nomad.NOMAD_API_URL_GET_CLIENTS).replace('"','')
self._api_url_get_clients_info = Helpers.val_default(NOMAD_API_URL_GET_CLIENT_INFO, config_nomad.NOMAD_API_URL_GET_CLIENT_INFO).replace('"','')
self._api_url_get_clients_status = Helpers.val_default(NOMAD_API_URL_GET_CLIENT_STATUS, config_nomad.NOMAD_API_URL_GET_CLIENT_STATUS).replace('"','')
self._api_url_get_clients_allocations = Helpers.val_default(NOMAD_API_URL_GET_CLIENT_ALLOCATIONS, config_nomad.NOMAD_API_URL_GET_CLIENT_ALLOCATIONS).replace('"','')
self._max_retries = Helpers.val_default(MAX_RETRIES, config_nomad.MAX_RETRIES)
self._acl_token = Helpers.val_default(NOMAD_ACL_TOKEN, config_nomad.NOMAD_ACL_TOKEN)
self._auth_data = Helpers.val_default(NOMAD_AUTH_DATA, config_nomad.NOMAD_AUTH_DATA)
self._state_off = Helpers.val_default(NOMAD_STATE_OFF, config_nomad.NOMAD_STATE_OFF).replace('"','')
self._state_on = Helpers.val_default(NOMAD_STATE_ON, config_nomad.NOMAD_STATE_ON).replace('"','')
self._http_port
|
respawner/peering-manager
|
utils/testing/functions.py
|
Python
|
apache-2.0
| 1,327 | 0.000754 |
import json
import logging
import re
from contextlib import contextmanager
@contextmanager
def disable_warnings(logger_name
|
):
"""
Suppresses expected warning messages to keep the test output clean.
"""
logger = logging.getLogger(logger_name)
current_level = logger.level
logger.setLevel(logging.ERROR)
yield
logger.setLevel(current_level)
def extract_form_failures(html):
"""
Given raw HTML content from an HTTP response, returns a list of form errors
|
.
"""
FORM_ERROR_REGEX = r"<!-- FORM-ERROR (.*) -->"
return re.findall(FORM_ERROR_REGEX, str(html))
def json_file_to_python_type(filename):
with open(filename, mode="r") as f:
return json.load(f)
def post_data(data):
"""
Takes a dictionary of test data and returns a dict suitable for POSTing.
"""
r = {}
for key, value in data.items():
if value is None:
r[key] = ""
elif type(value) in (list, tuple):
if value and hasattr(value[0], "pk"):
# Value is a list of instances
r[key] = [v.pk for v in value]
else:
r[key] = value
elif hasattr(value, "pk"):
# Value is an instance
r[key] = value.pk
else:
r[key] = str(value)
return r
|
haup/totoro
|
totoro/app/auth/views.py
|
Python
|
gpl-3.0
| 6,038 | 0 |
from flask import render_template, redirect, request, url_for, flash
from flask_login import login_user, logout_user, login_required, current_user
from . import auth
from .. import db
from ..models import User
from ..email import send_email
from .forms import LoginForm, RegistrationForm, ChangePasswordForm,\
PasswordResetRequestForm, PasswordResetForm, ChangeEmailForm
@auth.before_app_request
def before_request():
if current_user.is_authenticated:
current_user.ping()
if not current_user.confirmed \
and request.endpoint[:5] != 'auth.' \
and request.endpoint != 'static':
return redirect(url_for('auth.unconfirmed'))
@auth.route('/unconfirmed')
def unconfirmed():
if current_user.is_anonymous or current_user.confirmed:
return redirect(url_for('main.index'))
return render_template('auth/unconfirmed.html')
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or password.')
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('main.index'))
@auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data,
username=form.username.data,
password=form.password.data)
db.session.add(user)
db.session.commit()
token = user.generate_confirmation_token()
send_email(user.email, 'Confirm Your Account',
'auth/email/confirm', user=user, token=token)
flash('A confirmation email has been sent to you by email.')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form)
@au
|
th.route('/confirm/<token>')
@login_required
def confirm(token):
if current_user.confirmed:
return redir
|
ect(url_for('main.index'))
if current_user.confirm(token):
flash('You have confirmed your account. Thanks!')
else:
flash('The confirmation link is invalid or has expired.')
return redirect(url_for('main.index'))
@auth.route('/confirm')
@login_required
def resend_confirmation():
token = current_user.generate_confirmation_token()
send_email(current_user.email, 'Confirm Your Account',
'auth/email/confirm', user=current_user, token=token)
flash('A new confirmation email has been sent to you by email.')
return redirect(url_for('main.index'))
@auth.route('/change-password', methods=['GET', 'POST'])
@login_required
def change_password():
form = ChangePasswordForm()
if form.validate_on_submit():
if current_user.verify_password(form.old_password.data):
current_user.password = form.password.data
db.session.add(current_user)
flash('Your password has been updated.')
return redirect(url_for('main.index'))
else:
flash('Invalid password.')
return render_template('auth/change_password.html', form=form)
@auth.route('/reset', methods=['GET', 'POST'])
def password_reset_request():
if not current_user.is_anonymous:
return redirect(url_for('main.index'))
form = PasswordResetRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
token = user.generate_reset_token()
send_email(user.email, 'Reset Your Password',
'auth/email/reset_password',
user=user, token=token,
next=request.args.get('next'))
flash('An email with instructions to reset your password has been '
'sent to you.')
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/reset/<token>', methods=['GET', 'POST'])
def password_reset(token):
if not current_user.is_anonymous:
return redirect(url_for('main.index'))
form = PasswordResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None:
return redirect(url_for('main.index'))
if user.reset_password(token, form.password.data):
flash('Your password has been updated.')
return redirect(url_for('auth.login'))
else:
return redirect(url_for('main.index'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/change-email', methods=['GET', 'POST'])
@login_required
def change_email_request():
form = ChangeEmailForm()
if form.validate_on_submit():
if current_user.verify_password(form.password.data):
new_email = form.email.data
token = current_user.generate_email_change_token(new_email)
send_email(new_email, 'Confirm your email address',
'auth/email/change_email',
user=current_user, token=token)
flash('An email with instructions to confirm your new email '
'address has been sent to you.')
return redirect(url_for('main.index'))
else:
flash('Invalid email or password.')
return render_template('auth/change_email.html', form=form)
@auth.route('/change-email/<token>')
@login_required
def change_email(token):
if current_user.change_email(token):
flash('Your email address has been updated.')
else:
flash('Invalid request.')
return redirect(url_for('main.index'))
|
spezifanta/Paste-It
|
api/v01/views.py
|
Python
|
mit
| 749 | 0.006676 |
from django.shortcuts import redirect
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from paste.models import Paste, Language
@csrf_exempt
def add(request):
print "jojo"
if request.method == 'POST':
language = request.POST['language']
content = requ
|
est.POST['content']
try:
lang = Language.objects.get(pk=language)
except:
print "lang not avalible", language
lang
|
= Language.objects.get(pk='txt')
paste = Paste(content=content, language=lang)
paste.save()
paste = Paste.objects.latest()
return HttpResponse(paste.pk, content_type='text/plain')
else:
return redirect('/api')
|
B-UMMI/INNUca
|
modules/mlst.py
|
Python
|
gpl-3.0
| 7,916 | 0.003284 |
import utils
import os
from functools import partial
import sys
from itertools import groupby as itertools_groupby
mlst_timer = partial(utils.timer, name='MLST')
def get_species_scheme_map_version(mlst_folder):
species_scheme_map_version = 1
mlst_db_path = os.path.join(os.path.dirname(os.path.dirname(mlst_folder)), 'db', 'species_scheme_map.tab')
if not os.path.isfile(mlst_db_path):
mlst_db_path = os.path.join(os.path.dirname(os.path.dirname(mlst_folder)), 'db', 'scheme_species_map.tab')
if not os.path.isfile(mlst_db_path):
sys.exit('ERROR: species_scheme_map not found. Contact the developers. In the meantime try running INNUca'
' with --skipMLST option')
else:
species_scheme_map_version = 2
return mlst_db_path, species_scheme_map_version
def set_species_scheme_map_variables(list_values, species_scheme_map_version):
if species_scheme_map_version == 1:
val_genus = list_values[0]
val_species = list_values[1]
val_scheme = list_values[2]
elif species_scheme_map_version == 2:
val_genus = list_values[1]
val_species = list_values[2]
val_scheme = list_values[0]
return val_genus, val_species, val_scheme
def parse_species_scheme_map(species_splited, mlst_db_path, species_scheme_map_version):
scheme = 'unknown'
genus_mlst_scheme = None
with open(mlst_db_path, 'rtU') as reader:
for line in reader:
line = line.splitlines()[0]
if len(line) > 0:
if not line.startswith('#'
|
):
line = line.lower().split('\t')
line = [line[i].split(' ')[0] for i in range(0, len(line))]
val_genus, val_species, val_scheme = set_species_scheme_map_variables(line,
|
species_scheme_map_version)
if val_genus == species_splited[0]:
if val_species == '':
genus_mlst_scheme = val_scheme
elif val_species == species_splited[1]:
scheme = val_scheme
if scheme == 'unknown' and genus_mlst_scheme is not None:
scheme = genus_mlst_scheme
return scheme, genus_mlst_scheme
def getScheme(species):
command = ['which', 'mlst']
run_successfully, stdout, stderr = utils.runCommandPopenCommunicate(command, False, None, False)
mlst_folder = os.path.abspath(os.path.realpath(stdout.splitlines()[0]))
mlst_db_path, species_scheme_map_new = get_species_scheme_map_version(mlst_folder)
scheme, genus_mlst_scheme = parse_species_scheme_map(species.lower().split(' '), mlst_db_path,
species_scheme_map_new)
print('\n' + 'MLST scheme found for {species}: {scheme}'.format(species=species, scheme=scheme))
return scheme, species.lower().split(' ')[0], genus_mlst_scheme
def getBlastPath():
print('\n' + 'The following blastn will be used')
command = ['which', 'blastn']
run_successfully, stdout, stderr = utils.runCommandPopenCommunicate(command, False, None, True)
print(stdout)
def clean_novel_alleles(novel_alleles, scheme_mlst, profile):
"""
Clean the fasta file with the novel alleles produced by mlst
Parameters
----------
novel_alleles : str
Path for fasta file containing the novel alleles
scheme_mlst : str
MLST schema found by mlst
profile : list
List of strings with the profile found
Returns
-------
"""
unknown_genes = []
for gene_allele in profile:
gene = gene_allele.split('(')[0]
try:
allele = gene_allele.split('(')[1].rstrip(')')
if allele.startswith('~'):
unknown_genes.append(gene)
except IndexError as e:
print('WARNING: {}'.format(e))
novel_alleles_keep = {}
if len(unknown_genes) > 0:
reader = open(novel_alleles, mode='rt') # TODO: newline=None in Python3
fasta_iter = (g for k, g in itertools_groupby(reader, lambda x: x.startswith('>')))
for header in fasta_iter:
# header = header.__next__()[1:].rstrip('\r\n') # TODO: Python3
header = header.next()[1:].rstrip('\r\n')
# seq = ''.join(s.rstrip('\r\n') for s in fasta_iter.__next__()) # TODO: Python3
seq = ''.join(s.rstrip('\r\n') for s in fasta_iter.next())
if header.startswith(scheme_mlst):
gene = header.split('.')[1].split('~')[0]
if gene in unknown_genes:
novel_alleles_keep[header] = seq
reader.close()
os.remove(novel_alleles)
if len(novel_alleles_keep) > 0:
with open(novel_alleles, 'wt') as writer:
for header, seq in novel_alleles_keep.items():
writer.write('>{}\n'.format(header))
writer.write('\n'.join(utils.chunkstring(seq, 80)) + '\n')
@mlst_timer
def runMlst(contigs, scheme, outdir, species_genus, mlst_scheme_genus):
pass_qc = False
failing = {}
failing['sample'] = False
warnings = {}
novel_alleles = os.path.join(outdir, 'mlst_novel_alleles.fasta')
command = ['mlst', '--novel', novel_alleles, contigs]
run_successfully, stdout, _ = utils.runCommandPopenCommunicate(command, False, None, True)
if run_successfully:
scheme_mlst = stdout.splitlines()[0].split('\t')[1].split('_')[0]
st = stdout.splitlines()[0].split('\t')[2]
profile = stdout.splitlines()[0].split('\t')[3:]
if st == '-' and os.path.isfile(novel_alleles):
clean_novel_alleles(novel_alleles=novel_alleles, scheme_mlst=scheme_mlst, profile=profile)
else:
if os.path.isfile(novel_alleles):
os.remove(novel_alleles)
report = 'MLST found ST ' + str(st) + ' from scheme ' + scheme_mlst
print(report)
with open(os.path.join(outdir, 'mlst_report.txt'), 'wt') as writer:
writer.write('#scheme' + '\n' + scheme_mlst + '\n' + '#ST' + '\n' + st + '\n')
writer.write('#profile' + '\n' + ' '.join(profile) + '\n')
writer.flush()
if scheme_mlst.split('_', 1)[0] == scheme.split('_', 1)[0]:
pass_qc = True
else:
if scheme == 'unknown' and scheme_mlst != '-':
pass_qc = True
warnings['sample'] = 'Found {scheme_mlst} scheme for a species with unknown' \
' scheme'.format(scheme_mlst=scheme_mlst)
elif scheme == 'unknown' and scheme_mlst == '-':
pass_qc = True
elif scheme != 'unknown' and scheme_mlst == '-':
pass_qc = True
warnings['sample'] = 'Could not find a scheme for a species with known scheme ({})'.format(scheme)
elif species_genus == 'yersinia' and mlst_scheme_genus == 'yersinia':
pass_qc = True
warnings['sample'] = 'Found a Yersinia scheme ({scheme_mlst}), but it is different from what it was' \
' expected ({scheme})'.format(scheme_mlst=scheme_mlst, scheme=scheme)
else:
if mlst_scheme_genus is not None and scheme_mlst == scheme == mlst_scheme_genus:
pass_qc = True
else:
failing['sample'] = 'MLST scheme found ({scheme_mlst}) and provided ({scheme}) are not the' \
' same'.format(scheme_mlst=scheme_mlst, scheme=scheme)
print(failing['sample'])
else:
failing['sample'] = 'Did not run'
if len(warnings) > 0:
print(warnings['sample'])
return run_successfully, pass_qc, failing, warnings
|
msimacek/koschei
|
alembic/versions/42ad047fd7ff_add_user_table.py
|
Python
|
gpl-2.0
| 903 | 0.014396 |
"""Add user table
Revision ID: 42ad047fd7ff
Revises: 5cd786f3176
Create Date: 2014-10-03 12:14:11.091123
"""
# revision identifiers, used by Alembic.
revision = '42ad047fd7ff'
down_revision = '5cd786f3176'
from alembic import op
import sqlalchemy as sa
|
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=Fal
|
se),
sa.Column('email', sa.String(), nullable=False),
sa.Column('timezone', sa.String(), nullable=True),
sa.Column('admin', sa.Boolean(), server_default='false', nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('user')
### end Alembic commands ###
|
mitdbg/modeldb
|
client/verta/verta/_swagger/_public/uac/model/UacGetOrganizationByIdResponse.py
|
Python
|
mit
| 653 | 0.01072 |
# THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
class UacGetOrganizationByIdResponse(BaseType):
def __init__(self, organization=None):
required = {
"organization": False,
}
|
self.organization = organization
for k, v in required.items():
if self[k] is None and v:
raise ValueError('attribute {} is required'.format(k))
@staticmethod
def from_json(d):
from .UacOrganization import UacOrganization
tmp = d.get('organization', None)
if tmp is not None:
|
d['organization'] = UacOrganization.from_json(tmp)
return UacGetOrganizationByIdResponse(**d)
|
turnerm/podpy
|
podpy/__init__.py
|
Python
|
mit
| 271 | 0.01845 |
"""
podpy is an implementatin of the pixel optical depth method as descri
|
bed in
Turner et al. 2014, MNRAS, 445, 794, and Aguirre et al. 2002, ApJ, 576, 1.
Please contact the author (Monica Turner) at turnerm@mit.edu if you hav
|
e
any questions, comment or issues.
"""
|
toobaz/pandas
|
pandas/plotting/_matplotlib/compat.py
|
Python
|
bsd-3-clause
| 554 | 0 |
#
|
being a bit too dynamic
from distutils.version import LooseVersion
import operator
def _mpl_version(version, op):
def inner():
try:
import matplotlib as mpl
except ImportError:
return False
return (
op(LooseVersion(mpl.__version__), LooseVersion(version))
and str(mpl.__version__)[0] != "0"
)
return inner
|
_mpl_ge_2_2_3 = _mpl_version("2.2.3", operator.ge)
_mpl_ge_3_0_0 = _mpl_version("3.0.0", operator.ge)
_mpl_ge_3_1_0 = _mpl_version("3.1.0", operator.ge)
|
redarmy30/Eurobot-2017
|
old year/RESET-master/Testing/server_small.py
|
Python
|
mit
| 805 | 0.034783 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket, time
import sys
import multiprocessing
def readlines(sock, recv_buffer=4096, delim='\n'):
buffer = ''
data = True
while data:
data = sock.recv(recv_buffer)
buffer += data
while buffer.find(delim) != -1:
line, buffer = buffer.split('\n', 1)
yield line
return
def main(d
|
ata_queue):
HOST = "192.168.1.146" # Symbolic name meaning all available interf
|
aces
PORT = 9090 # Arbitrary non-privileged port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(1)
conn, addr = s.accept()
print 'Connected by', addr
message = readlines(conn)
while 1:
try:
data_queue.put(eval(message.next()))
except Exception as err:
print 'Error on server: ', err
conn.close()
sys.exit()
|
gitfred/fuel-extension-volume-manager
|
volume_manager/tests/test_migration_volume_manager_extension_001_add_volumes_table.py
|
Python
|
apache-2.0
| 3,073 | 0 |
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import alembic
from oslo_serialization import jsonutils
import sqlalchemy as sa
from nailgun.db import db
from nailgun.db import dropdb
from nailgun.db.migration import ALEMBIC_CONFIG
from nailgun.db.migration import make_alembic_config_from_extension
from nailgun.extensions.consts import extensions_migration_buffer_table_name
from nailgun.test import base
from volume_manager.extension import VolumeManagerExtension
_core_test_revision = '1e50a4903910'
_extension_test_revision = '086cde3de7cf'
def setup_mo
|
dule():
dropdb()
# Run core migration in order to create buffer table
alembic.command.upgrade(ALEMBIC_CONFIG, _core_test_revision)
prepare()
# Run extension migrations
ext_alembic_config = make_alembic_config_from_extension(
VolumeManagerExtension)
alembic.command.upgrade(ext_alembic_config, _extension_test_revision)
def prepare():
meta = base.
|
reflect_db_metadata()
# Fill in migration table with data
db.execute(
meta.tables[extensions_migration_buffer_table_name].insert(),
[{'extension_name': 'volume_manager',
'data': jsonutils.dumps({'node_id': 1, 'volumes': [{'volume': 1}]})},
{'extension_name': 'volume_manager',
'data': jsonutils.dumps({'node_id': 2, 'volumes': [{'volume': 2}]})},
{'extension_name': 'some_different_extension',
'data': 'some_data'}])
db.commit()
class TestVolumeManagerExtensionAddVolumesTable(base.BaseAlembicMigrationTest):
@classmethod
def setUpClass(cls):
setup_module()
def test_add_volumes_table(self):
result = db.execute(
sa.select([
self.meta.tables['volume_manager_node_volumes'].c.node_id,
self.meta.tables['volume_manager_node_volumes'].c.volumes]))
records = list(result)
node_ids = [r[0] for r in records]
self.assertItemsEqual(node_ids, [1, 2])
volumes = [jsonutils.loads(r[1]) for r in records]
self.assertItemsEqual(
[[{'volume': 1}], [{'volume': 2}]],
volumes)
result = db.execute(
sa.select([
self.meta.tables[
extensions_migration_buffer_table_name].c.extension_name,
self.meta.tables[
extensions_migration_buffer_table_name].c.data]))
self.assertEqual(
list(result),
[('some_different_extension', 'some_data')])
|
F-Secure/distci
|
src/distci/frontend/tests/test-frontend-tasks.py
|
Python
|
apache-2.0
| 4,501 | 0.004666 |
"""
Tests for DistCI task management interfaces
Copyright (c) 2012-2013 Heikki Nousiainen, F-Secure
See LICENSE for details
"""
from nose.plugins.skip import SkipTest
from webtest import TestApp, TestRequest
import json
import tempfile
import os
import shutil
from distci import frontend
class TestTasks:
app = None
config_file = None
data_directory = None
test_state = {}
@classmethod
def setUpClass(cls):
cls.data_directory = '/Users/noushe/CI-proto'
cls.data_directory = tempfile.mkdtemp()
config_file = os.path.join(cls.data_directory, 'frontend.conf')
os.mkdir(os.path.join(cls.data_directory, 'tasks'))
config = { "data_directory": cls.data_directory }
json.dump(config, file(config_file, 'wb'))
frontend_app = frontend.Frontend(config)
cls.app = TestApp(frontend_app)
@classmethod
def tearDownClass(cls):
cls.app = None
shutil.rmtree(cls.data_directory)
def test_01_list_tasks_empty(self):
response = self.app.request('/tasks')
result = json.loads(response.body)
assert result.has_key('tasks'), "Tasks entry went missing"
assert len(result['tasks']) == 0, "Tasks entry was not empty"
def test_02_post_task(self):
task_data = json.dumps({ 'command': 'something' })
request = TestRequest.blank('/tasks', content_type='application/json')
request.method = 'POST'
request.body = task_data
response = self.app.do_request(request, 201, False)
result = json.loads(response.body)
assert result
|
.has_key('id'), "ID entry went missing"
assert result.has_key('data'), "data entry went missing"
self.test_state['id'] = st
|
r(result['id'])
def test_03_check_single_task(self):
task_id = self.test_state.get('id')
if task_id is None:
raise SkipTest("Skipping test for single task status, no recorded state")
response = self.app.request('/tasks/%s' % task_id)
result = json.loads(response.body)
assert result['id'] == task_id, "ID mismatch"
assert result['data']['command'] == 'something', "Wrong data"
def test_04_update(self):
task_id = self.test_state.get('id')
if task_id is None:
raise SkipTest("Skipping test for single task update, no recorded state")
new_task_data = json.dumps({'command': 'something_else', 'assignee': 'my-id'})
request = TestRequest.blank('/tasks/%s' % task_id, content_type='application/json')
request.method = 'PUT'
request.body = new_task_data
response = self.app.do_request(request, 200, False)
result = json.loads(response.body)
assert result.has_key('id'), "ID entry went missing"
assert result.has_key('data'), "data entry went missing"
assert result['data']['command'] == 'something_else', "Wrong command"
assert result['data']['assignee'] == 'my-id', "Wrong assignee"
def test_05_update_with_wrong_assignee(self):
task_id = self.test_state.get('id')
if task_id is None:
raise SkipTest("Skipping test for single task update, no recorded state")
new_task_data = json.dumps({'command': 'something_else', 'assignee': 'my-id-not-same'})
request = TestRequest.blank('/tasks/%s' % task_id, content_type='application/json')
request.method = 'PUT'
request.body = new_task_data
response = self.app.do_request(request, 409, False)
def test_06_list_tasks(self):
response = self.app.request('/tasks')
result = json.loads(response.body)
assert result.has_key('tasks'), "Tasks entry went missing"
assert len(result['tasks']) == 1, "Invalid task count"
task_id = self.test_state.get('id')
if task_id is not None:
assert task_id in result['tasks'], "Task not included in the list"
def test_07_delete(self):
task_id = self.test_state.get('id')
if task_id is None:
raise SkipTest("Skipping test for single task status, no recorded state")
request = TestRequest.blank('/tasks/%s' % task_id)
request.method = 'DELETE'
response = self.app.do_request(request, 204, False)
response = self.app.request('/tasks')
result = json.loads(response.body)
assert result.has_key('tasks'), "Tasks entry went missing"
assert len(result['tasks']) == 0, "Invalid task count"
|
zlorb/mitmproxy
|
pathod/language/generators.py
|
Python
|
mit
| 2,469 | 0.00081 |
import os
import string
import random
import mmap
import sys
DATATYPES = dict(
ascii_letters=string.ascii_letters.encode(),
ascii_lowercase=string.ascii_lowercase.encode(),
ascii_uppercase=string.ascii_uppercase.encode(),
digits=string.digits.encode(),
hexdigits=string.hexdigits.encode(),
octdigits=string.octdigits.encode(),
punctuation=string.punctuation.encode(),
whitespace=string.whitespace.encode(),
ascii=string.printable.encode(),
bytes=bytes(range(256))
)
class TransformGenerator:
"""
Perform a byte-by-byte transform another generator - that is, for each
input byte, the transformation must produce one output byte.
gen: A generator to wrap
transform: A function (offset, data) -> transformed
"""
def __init__(self, gen, transform):
self.gen = gen
self.transform = transform
def __len__(self):
return len(self.gen)
def __getitem__(self, x):
d = self.gen.__getitem__(x)
if isinstance(x, slice):
return self.transform(x.start, d)
return self.transform(x, d)
def __repr__(self):
return "'transform(%s)'" % self.gen
def rand_byte(chars):
"""
Return a random character as byte from a charset.
"""
# bytearray has consistent behaviour on both Python 2 and 3
# while bytes does not
return bytes([random.choice(chars)])
class RandomGenerator:
def __init__(self, dtype, length):
self.dtype = dtype
self.length = length
def __len__(self):
return self.length
def __getitem__(self, x):
chars = DATATYPES[self.dtype]
|
if isinstance(x, slice):
return b"".join(rand_byte(c
|
hars) for _ in range(*x.indices(min(self.length, sys.maxsize))))
return rand_byte(chars)
def __repr__(self):
return "%s random from %s" % (self.length, self.dtype)
class FileGenerator:
def __init__(self, path):
self.path = os.path.expanduser(path)
def __len__(self):
return os.path.getsize(self.path)
def __getitem__(self, x):
with open(self.path, mode="rb") as f:
if isinstance(x, slice):
with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as mapped:
return mapped.__getitem__(x)
else:
f.seek(x)
return f.read(1)
def __repr__(self):
return "<%s" % self.path
|
EHRI/rspub-core
|
rspub/core/rs_enum.py
|
Python
|
apache-2.0
| 3,776 | 0.001324 |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import gettext
from enum import Enum, unique
_ = gettext.gettext
strategy_descriptions = [_("New resourcelist strategy"),
_("New changelist strategy"),
_("Incremental changelist strategy")]
@unique
class Strategy(Enum):
"""
:samp:`Strategy for ResourceSync Publishing`
"""
resourcelist = 0
"""
``0`` :samp:`New resourcelist {strategy}`
Create new resourcelist(s) every run.
"""
new_changelist = 1
"""
``1`` :samp:`New changelist {strategy}`
Create a new changelist every run.
If no resourcelist was found in the metadata directory switch to new resourcelist strategy.
"""
inc_changelist = 2
"""
``2`` :samp:`Incremental changelist {strategy}`
Add changes to an existing changelist. If no changelist exists, create a new one.
If no resourcelist was found in the metadata directory switch to new resourcelist strategy.
"""
# resourcedump = 3 # not implemented
# changedump = 4 # not implemented
@staticmethod
def names():
"""
:samp:`Get Strategy names`
:return: List<str> of names
"""
names = dir(Strategy)
return [x for x in names if not x.startswith("_")]
@staticmethod
def sanitize(name):
"""
:samp:`Verify a {Strategy} name`
:param str name: string to test
:return: name if it is the name of a strategy
:raises: :exc:`ValueError` if the given name is not the name of a strategy
"""
try:
strategy = Strategy[name]
return strategy.name
except KeyError as err:
raise ValueError(err)
@staticmethod
def strategy_for(value):
"""
:samp:`Get a Strategy for the given value`
:param value: may be :class:`Strategy`, str or int
:return: :class:`Strategy`
:raises: :exc:`ValueError` if the given value could not be converted to a :class:`Strategy`
"""
try:
if isinstance(value, Strategy):
return value
elif isinstance(value, int):
return Strategy(value)
else:
return Strategy[value]
except KeyError as err:
raise ValueError(err)
def describe(self):
return strategy_descriptions[self.value]
class Capability(Enum):
"""
:samp:`Capabilities as defined in the ResourceSync Framework`
"""
resourcelist = 0
"""
``0`` :samp:`resourcelist`
"""
changelist = 1
"""
``1`` :samp:`changelist`
"""
resourcedump = 2
"""
``2`` :samp:`resourcedump`
"""
changedump = 3
"""
``3`` :samp:`changedump`
"""
resourcedump_manifest = 4
"""
``4`` :samp:`resourcedump_manifest`
"""
changedump_manifest = 5
"""
``5`` :samp:`changedump_manifest`
"""
capabilitylist = 6
"""
``6`` :samp:`capabilitylist`
"""
description = 7
"""
``7`` :samp:`description`
"""
class SelectMode(Enum):
"""
:samp:`Mode of selection`
"""
simple = 0
selector = 1
@staticmethod
def names():
|
"""
:samp:`Get SelectMode names`
:return: List<str> of names
"""
names = dir(SelectMode)
ret
|
urn [x for x in names if not x.startswith("_")]
@staticmethod
def select_mode_for(mode):
try:
if isinstance(mode, SelectMode):
return mode
elif isinstance(mode, int):
return SelectMode(mode)
else:
return SelectMode[mode]
except KeyError as err:
raise ValueError(err)
|
elyezer/robottelo
|
robottelo/ui/architecture.py
|
Python
|
gpl-3.0
| 1,561 | 0 |
# -*- encoding: utf-8 -*-
"""Implements Architecture UI"""
from robottelo.constants import FILTER
from robottelo.ui.base import Base
from robottelo.ui.locators import common_locators, locators
from robottelo.ui.navigator import Navigator
class Architecture(Base):
"""Manipulates architecture from UI"""
def navigate_to_entity(self):
"""Navigate to Architecture entity page"""
Navigator(self.browser).go_to_architectures()
def _search_locator(self):
"""Specify locator for Architecture entity search procedure"""
return locators['arch.arch_name']
def create(self, name, os_names=None):
"""Creates new architecture from UI with existing OS"""
self.click(locators['arch.new'])
self.assign_value(locators['arch.name'], name)
self.configure_entity(os_names, FILTER['arch_os'])
self.click(common_locators['submit'])
def delete(self, name, really=True):
"""Delete existing architecture from UI"""
self.delete_entity(
name,
really,
locators['arch.delete'],
)
def update(self, old
|
_name, new_name=None, os_names=None,
new_os_names=None):
"""Update existing arch's name and OS"""
self.search_and_click(old_name)
if new_name:
self.assign_value(locators['arch.name'], new_name)
self.configure_entity(
os_names,
FILTER['arch_os'],
new_entity_list=new_os_names
)
self.click(common_locators
|
['submit'])
|
JayVora-SerpentCS/vertical-hotel
|
hotel/wizard/__init__.py
|
Python
|
agpl-3.0
| 153 | 0 |
#
|
-*- coding: utf-8 -*-
# See LICENSE file for full copyright and licensing details.
from . import hotel_wizard
from . import sale_make_invoice_ad
|
vance
|
SM2015/orchid
|
core/migrations/0002_auto__add_field_score_created_at__add_field_score_updated_at__add_fiel.py
|
Python
|
mit
| 11,751 | 0.007404 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Score.created_at'
db.add_column(u'core_score', 'created_at',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=datetime.datetime(2015, 1, 22, 0, 0), blank=True),
keep_default=False)
# Adding field 'Score.updated_at'
db.add_column(u'core_score', 'updated_at',
self.gf('django.db.models.fields.DateTimeField')(auto_now=True, default=datetime.datetime(2015, 1, 22, 0, 0), blank=True),
keep_default=False)
# Adding field 'Score.changed_by'
db.add_column(u'core_score', 'changed_by',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name=u'core_score_related', null=True, to=orm['auth.User']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Score.created_at'
db.delete_column(u'core_score', 'created_at')
# Deleting field 'Score.updated_at'
db.delete_column(u'core_score', 'updated_at')
# Deleting field 'Score.changed_by'
db.delete_column(u'core_score', 'changed_by_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100
|
'}),
u'id': ('django.db.models.field
|
s.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.image': {
'Meta': {'object_name': 'Image'},
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'core_image_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'original_file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'core.indicator': {
'Meta': {'object_name': 'Indicator'},
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'core_indicator_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'form': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['forms.Form']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'form_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'maximum_monthly_records': ('django.db.models.fields.IntegerField', [], {'default': '20'}),
'passing_percentage': ('django.db.models.fields.FloatField', [], {'default': '85'}),
'title': ('django.db.models.fields.TextField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'core.location': {
'Meta': {'object_name': 'Location'},
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'core_location_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['core.Image']", 'null': 'True', 'blank': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['core.Indicator']", 'null': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'position': ('geoposition.fields.GeopositionField', [], {'max_length': '42'}),
'title': ('django.db.models.fields.TextField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'core.score': {
'Meta': {'object_name': 'Score'},
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'core_score_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank':
|
asgeirrr/django-is-core
|
example/issues/cores.py
|
Python
|
lgpl-3.0
| 228 | 0.004386 |
f
|
rom django.contrib.auth.models import User
from is_core.main import UIRestModelISCore
from .models import Issue
class IssueIsCore(UIRestModelISCore):
model = Issue
class UserIsCore(
|
UIRestModelISCore):
model = User
|
BFriedland/TerrainGenerators
|
PerlinGeneratorDemo.py
|
Python
|
mit
| 17,655 | 0.020164 |
'''
This version of the terrain generator library interpreter is set up to produce terrain maps using Perlin noise.
Important settings and inits specific to the Perlin generator can be found in the code by searching for cfref:perlin-gen
Additional display options may be viewed by swapping WHICH_COLOR_SCHEME constant is commented. 'terrain' and 'grayscale' are the only interesting options for this particular generator's current arrangement.
Note that the dependencies do NOT include math, even though the generator library does.
'''
import NoiseMapGenerators_14 as NoiseMapGenerators
import pygame
import sys
import random
#### Constants ####
WINDOW_CAPTION = "NoiseMapGenerator Library Demonstration"
WHICH_COLOR_SCHEME = 'terrain'
#WHICH_COLOR_SCHEME = 'grayscale'
## Starfield will require extensive parameters testing with each generator.
#WHICH_COLOR_SCHEME = 'starfield'
#WHICH_COLOR_SCHEME = 'dungeon'
## For display purposes.
MAPTILE_SIZE_IN_PIXELS = MAPTILE_WIDTH_IN_PIXELS, MAPTILE_HEIGHT_IN_PIXELS = 4, 4
print("MAPTILE_SIZE_IN_PIXELS == %s" % str(MAPTILE_SIZE_IN_PIXELS))
## The NOISE_WIDTH (and NOISE_HEIGHT) options adjust the size of the map that is created. Or at least I'd be awfully surprised to learn that isn't what they do.
NOISE_WIDTH = 120
## Adjusted this to make it a perfect square for testing purposes.
NOISE_HEIGHT = NOISE_WIDTH
#NOISE_HEIGHT = 16
#### Dungeon map testing observations ####
## Does not need to be commented when not using a dungeon map generator.
ROOM_MAX_SIZE = 18
ROOM_MIN_SIZE = 6
## Note: Room max count and room min count are not currently used by the RoomFilledMapGenerator.
ROOM_MAX_COUNT = 14444
ROOM_MIN_COUNT = 12
#### Simplex noise testing observations ####
#NOISE_FREQUENCY = 0.01
#NOISE_OCTAVES = 32
#NOISE_PERSISTENCE = 0.5
## IMPORTANT!!! Frequency MUST BE VERY LOW! Beneath 1.0 and above 0.0, possibly always beneath 0.1
## Otherwise the "width of tiles on the planet's surface" is going to be SMALLER THAN A MAPTILE.
## This makes it seem hugely spikey, when simplex noise should be smooth and cloudlike.
## I got decent results at 0.01 frequency with all sorts of octaves and 0.5 persistence.
## 0.01f 32o 0.5p looks fairly zoomed in, though.
## Below data is from before I discovered the above.
## octaves seems to smoothen it out as it increases, as with before
## f2 o2 p2-4-8-16 seemed to create more land area with more p, though p0 created plenty of land and was extremely spikey
## no visible difference between f2o2p64 and f2o2p512, but f2o2p2 had more water and seemed less spikey
## f32o2p2 made visible lleft-->uright diagonal repetition
## f256o2p2 made a series of lleft-->uright streaks.
## I think scaling up frequency too far makes it more obviously repeat itself.
## f1o2p1 had visible lleft-->uright diagonal repetition
## ...
## heh oops. Persistence should be between 0 and 1.
#### Perlin noise testing observations #### cfref:perlin-gen
# width of the tiles on the planet's surface??
#NOISE_FREQUENCY = 64
# how close we are to the tiles?? <-- this seems to be a decent interpretation of its effects
#NOISE_OCTAVES = 1024
## Lower octaves makes it calculate faster. Higher frequency:octaves ratio makes it spikier and more repetitive.
#NOISE_FREQUENCY = 32
#NOISE_OCTAVES = 512
#NOISE_FREQUENCY = 8
#NOISE_OCTAVES = 128
## Nice smooth islands. Probably.
NOISE_FREQUENCY = 3
NOISE_OCTAVES = 64
## Could this algorithm be used to zoom in to a specific map by creating one particular randseed and using it for every pass of the algorithm? Would need to modify the generator.
MAP_SIZE_IN_TILES = MAP_WIDTH_IN_TILES, MAP_HEIGHT_IN_TILES = NOISE_WIDTH, NOISE_HEIGHT
print("MAP_SIZE_IN_TILES == %s" % str(MAP_SIZE_IN_TILES))
## Size the screen so the maptiles fit in it neatly.
SCREEN_SIZE = SCREEN_WIDTH, SCREEN_HEIGHT = (MAP_WIDTH_IN_TILES * MAPTILE_WIDTH_IN_PIXELS), (MAP_HEIGHT_IN_TILES * MAPTILE_HEIGHT_IN_PIXELS)
print("SCREEN_SIZE == %s" % str(SCREEN_SIZE))
print("\n")
## In this program, BLACK is used to clean the screen when redrawing.
BLACK = [0, 0, 0]
WHITE = [255, 255, 255]
LIGHT_GRAY = [120, 120, 120]
DARK_GRAY = [50, 50, 50]
RED = [255, 0, 0]
GREEN = [0, 255, 0]
BLUE = [0, 0, 255]
DARK_BLUE = [0, 0, 150]
DEEP_BLUE = [0, 0, 75]
BRAUN = [95, 45, 0]
LIGHT_BRAUN = [115, 65, 20]
SANDY_TAN = [245, 165, 95]
DARK_GREEN = [0, 155, 0]
LIGHT_GREEN = [50, 255, 50]
DEBUG_PINK = [224, 176, 255]
#### Classes ####
class MapTile:
def __init__(self, supplied_x_in_maptiles, supplied_y_in_maptiles, supplied_z):
''' Make a MapTile object with coordinates on the screen and in the map (meansured in pixels and tiles, respectively). Each MapTile has a magnitude, called: z '''
self.x = supplied_x_in_maptiles
self.pixel_x = self.x * MAPTILE_WIDTH_IN_PIXELS
self.y = supplied_y_in_maptiles
self.pixel_y = self.y * MAPTILE_HEIGHT_IN_PIXELS
self.z = supplied_z
if self.z != None:
if self.z > 255:
self.z = 255
elif self.z < 0:
self.z = 0
elif self.z == None:
#print("NONE DETECTED in self.z!! " + str(self.z))
|
pass
def draw_maptile(self):
## Regardless of the color scheme, pixels with value of None type will be set to DEBUG_PINK.
if type(self.z) == None:
_color_of_this_pixel = DEBUG_PINK
pygame.draw.rect(screen, _color_of_this_pixel, [self.p
|
ixel_x, self.pixel_y, MAPTILE_WIDTH_IN_PIXELS, MAPTILE_HEIGHT_IN_PIXELS])
return
if WHICH_COLOR_SCHEME == 'terrain':
if self.z < 90:
_color_of_this_pixel = DEEP_BLUE
elif self.z < 120:
_color_of_this_pixel = DARK_BLUE
elif self.z < 160:
_color_of_this_pixel = BLUE
elif self.z < 170:
_color_of_this_pixel = GREEN
elif self.z < 180:
_color_of_this_pixel = DARK_GREEN
elif self.z < 190:
_color_of_this_pixel = GREEN
elif self.z < 200:
_color_of_this_pixel = BRAUN
elif self.z < 210:
_color_of_this_pixel = LIGHT_BRAUN
else:
_color_of_this_pixel = WHITE
elif WHICH_COLOR_SCHEME == 'starfield':
if self.z == 1:
_color_of_this_pixel = WHITE
elif self.z == 2:
_color_of_this_pixel = LIGHT_GRAY
elif self.z == 3:
_color_of_this_pixel = DARK_GRAY
else:
_color_of_this_pixel = BLACK
elif WHICH_COLOR_SCHEME == 'dungeon':
if self.z == 0:
_color_of_this_pixel = BLACK
elif self.z == 1:
_color_of_this_pixel = LIGHT_GRAY
else:
_color_of_this_pixel = DEBUG_PINK
#print("\n OMG DEBUG PINK OMG OMG\n x: %d y: %d" % (self.x, self.y))
elif WHICH_COLOR_SCHEME == 'grayscale':
_color_of_this_pixel = [self.z, self.z, self.z] # I summon thee
pygame.draw.rect(screen, _color_of_this_pixel, [self.pixel_x, self.pixel_y, MAPTILE_WIDTH_IN_PIXELS, MAPTILE_HEIGHT_IN_PIXELS])
#### Functions ####
def convert_noise_map_to_maptile_map(supplied_map):
''' Return a list full of MapTiles with x, y and z values corresponding to the output of a TerrainGenerator. '''
## Note: This whole script is an example of how to interpret the more "pure" results of a noise generator as something useful to another program.
## This program interprets the noise as "terrain maps" and therefore
|
csakatoku/uamobile
|
uamobile/factory/ezweb.py
|
Python
|
mit
| 321 | 0.009346 |
# -*- coding: utf-8 -*-
from uamobile.factory.base import AbstractUserAgentFactory
from uamobile.ezweb import EZwebUserAgent
from uamobile.parser import CachingEZwebUserAgentParser
class EZwebUserAgentFactory(AbstractUserAgentFa
|
ctory):
device_class =
|
EZwebUserAgent
parser = CachingEZwebUserAgentParser()
|
ZhangXFeng/hadoop
|
src/hadoop-mapreduce1-project/src/contrib/cloud/src/py/hadoop/cloud/util.py
|
Python
|
apache-2.0
| 2,562 | 0.016393 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility functions.
"""
import ConfigParser
import socket
import urllib2
def bash_quote(text):
"""Quotes a string for bash, by using single quotes."""
if text == None:
return ""
return "'%s'" % text.replace("'", "'\\''")
def bash_quote_env(env):
"""Quotes the value in an environment variable assignment."""
if env.find("=") == -1:
return env
(var, value) = env.split("=")
return "%s=%s" % (var, bash_quote(value))
def build_env_string(env_strings=[], pairs={}):
"""Build a bash environment variable assignment"""
env = ''
if env_strings:
for env_string in env_strings:
env += "%s " % bash_quote_env(env_string)
if pairs:
for key, val in pairs.items():
env += "%s=%s " % (key, bash_quote(val))
return env[:-1]
def merge_config_with_options(section_name, config, options):
"""
Merge configuration options with a dictionary of options.
Keys in the options dictionary take precedence.
"""
res = {}
try:
for (key, value) in config.items(section_name):
if value.find("\n") != -1:
res[key] = value.split("\n")
else:
res[key] = value
except ConfigParser.NoSectionError:
pass
for key in options:
if options[key] != None:
res[key] = options[key]
return res
def url_get(url, timeout=10, retries=0):
"""
Retrieve content from the given URL.
"""
# in Python 2.6 we can pass timeout to urllib2.urlopen
socket
|
.setdefaulttimeout(timeout)
attempts = 0
while True:
try:
return urllib2.urlopen(url).read()
except urllib2.URLError:
attempts = attempts + 1
if attempts > retries:
raise
def xstr(string):
"""Sane string conversion: return an empty string if string is None."""
return ''
|
if string is None else str(string)
|
taigaio/taiga-back
|
taiga/webhooks/signal_handlers.py
|
Python
|
agpl-3.0
| 2,526 | 0.000396 |
# -*- coding: utf-8 -*-
# Copyright (C) 2014-present Taiga Agile LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import connection
from django.conf import settings
from django.utils import timezone
from taiga.projects.history import services as history_service
from taiga.projects.history.choices import HistoryType
from . import tasks
def _get_project_webhooks(project):
webhooks = []
for webhook in project.webhooks.all():
webhooks.append({
"id": webhook.pk,
"url": webhook.url,
"key": webhook.key,
})
return webhooks
def on_new_history_entry(sender, instance, created, **kwargs):
if not settings.WEBHOOKS_ENABLED:
return None
if instance.is_hidden:
return None
model = history_service.get_model_from_key(instan
|
ce.key)
pk = history_service.get_pk_from_key(instance.key)
try:
obj = model.objects.get(pk=pk)
except model.DoesNotExist:
# Catch simultaneous DELETE request
return None
webhooks = _get_project_webhooks(obj.project)
if instance.type == HistoryType.create:
task = tasks.create_webhook
extra_args = []
elif instance.type == HistoryType.change:
task = tasks.change_webhook
extr
|
a_args = [instance]
elif instance.type == HistoryType.delete:
task = tasks.delete_webhook
extra_args = []
by = instance.owner
date = timezone.now()
webhooks_args = []
for webhook in webhooks:
args = [webhook["id"], webhook["url"], webhook["key"], by, date, obj] + extra_args
webhooks_args.append(args)
connection.on_commit(lambda: _execute_task(task, webhooks_args))
def _execute_task(task, webhooks_args):
for webhook_args in webhooks_args:
if settings.CELERY_ENABLED:
task.delay(*webhook_args)
else:
task(*webhook_args)
|
jccaicedo/localization-agent
|
learn/cnn/convertProtobinToNumpy.py
|
Python
|
mit
| 447 | 0.017897 |
from caffe import io as c
import numpy as np
import os,sys
if len(sys.argv) < 3:
print 'Use: convertProtobinToNumpy
|
protobinFile numpyOutput'
sys.exit()
protoData = c.caffe_pb2.BlobProto()
f = open(sys.argv[1],'rb')
protoData.ParseFromString(f.read())
f.close()
array = c.blobproto_to_array(protoData)
np.save(sys.argv[2],array[0].swapaxes(1, 0).swapaxes(2,1)[:, :, ::-1])
A =
|
np.load(sys.argv[2]+'.npy')
print 'Final matrix shape:',A.shape
|
mbuesch/toprammer
|
libtoprammer/chips/at89c2051dip20.py
|
Python
|
gpl-2.0
| 6,509 | 0.036104 |
"""
# TOP2049 Open Source programming suite
#
# Atmel AT89C2051 DIP20 Support
#
# Copyright (c) 2010 Guido
# Copyright (c) 2010 Michael Buesch
|
<m@bues.ch>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
|
by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from libtoprammer.chip import *
class Chip_AT89C2051dip20(Chip):
STAT_BUSY = 0x01 # Programmer is running a command
STAT_ERR = 0x02 # Error during write
def __init__(self):
Chip.__init__(self,
chipPackage = "DIP20",
chipPinVCC = 20,
chipPinsVPP = 1,
chipPinGND = 10)
def __initChip(self):
self.applyVCC(False)
self.applyVPP(False)
self.applyGND(True)
self.top.cmdSetVCCVoltage(5)
self.top.cmdSetVPPVoltage(5)
def readSignature(self):
self.__initChip()
self.applyGND(True)
self.applyVCC(True)
self.top.cmdSetVPPVoltage(5)
self.__loadCommand(5) # VPP on
self.__loadCommand(1) # set P3.2
self.__setP3x(P33=0, P34=0, P35=0, IA=0)
data = b""
self.top.cmdFPGARead(0x10)
self.__setP3x(P33=0, P34=0, P35=0, IA=1)
self.__setP3x(P33=0, P34=0, P35=0, IA=0)
self.top.cmdFPGARead(0x10)
self.__setP3x(P33=0, P34=0, P35=0, IA=1)
self.__setP3x(P33=0, P34=0, P35=0, IA=0)
self.top.cmdFPGARead(0x10)
data += self.top.cmdReadBufferReg()
self.__setP3x(P33=0, P34=1, P35=0, IA=0)
self.__loadCommand(6) # VPP off
signature = b""
signature += int2byte(data[0])
signature += int2byte(data[1])
self.top.printInfo("Signature: %X, %X" % (byte2int(signature[0]), byte2int(signature[1])))
return signature
def erase(self):
self.__initChip()
self.applyGND(True)
self.applyVCC(True)
self.__loadCommand(1) # set P3.2
self.top.cmdSetVPPVoltage(5)
self.applyVPP(True)
self.__loadCommand(5) # VPP on
self.__setP3x(P33=1, P34=0, P35=0, IA=0)
self.top.cmdSetVPPVoltage(12)
self.__runCommandSync(4)
self.applyVPP(False)
self.top.cmdSetVPPVoltage(5)
self.__setP3x(P33=0, P34=1, P35=0, IA=0)
self.__loadCommand(5) # VPP off
self.top.flushCommands()
self.top.printInfo("at89c2051dip20: Erasing flash, verifying ...")
ok = self.__verifyErase()
if ok == 0:
self.top.printInfo("at89c2051dip20: Erase done.")
else:
self.top.printInfo("at89c2051dip20: Erase failed!")
def readProgmem(self):
self.__initChip()
self.applyGND(True)
self.applyVCC(True)
self.__loadCommand(1) # set P3.2
self.top.cmdSetVPPVoltage(5)
self.applyVPP(True)
self.__loadCommand(5) # VPP on
self.__setP3x(P33=0, P34=0, P35=1, IA=0)
image = b""
byteCount = 0
self.progressMeterInit("Reading Flash", 0x800)
for addr in range(0, 0x800):
self.progressMeter(addr)
self.top.cmdFPGARead(0x10)
self.__setP3x(P33=0, P34=0, P35=1, IA=1)
self.__setP3x(P33=0, P34=0, P35=1, IA=0)
byteCount += 1
if byteCount == self.top.getBufferRegSize():
image += self.top.cmdReadBufferReg(byteCount)
byteCount = 0
image += self.top.cmdReadBufferReg(byteCount)
self.applyVPP(False)
self.__setP3x(P33=0, P34=1, P35=0, IA=0)
self.__loadCommand(5) # VPP off
self.top.flushCommands()
self.progressMeterFinish()
return image
def writeProgmem(self, image):
if len(image) > 0x800:
self.throwError("Invalid EPROM image size %d (expected <=%d)" %\
(len(image), 0x800))
self.__initChip()
self.applyGND(True)
self.applyVCC(True)
self.__loadCommand(1) # set P3.2
self.top.cmdSetVPPVoltage(5)
self.applyVPP(True)
self.__loadCommand(5) # VPP on
self.__setP3x(P33=0, P34=1, P35=1, IA=0)
self.top.cmdSetVPPVoltage(12)
self.progressMeterInit("Writing Flash", len(image))
for addr in range(0, len(image)):
self.progressMeter(addr)
data = byte2int(image[addr])
if data != 0xFF:
self.__loadData(data)
self.__loadCommand(3)
ok = self.__progWait()
if (ok & self.STAT_ERR) != 0:
self.throwError("Write byte failed.")
self.__setP3x(P33=0, P34=1, P35=1, IA=1)
self.__setP3x(P33=0, P34=1, P35=1, IA=0)
self.applyVPP(False)
self.top.cmdSetVPPVoltage(5)
self.__setP3x(P33=0, P34=1, P35=0, IA=0)
self.__loadCommand(5) # VPP off
self.top.flushCommands()
self.progressMeterFinish()
ok = self.__verifyProgmem(image)
if ok == 0:
self.top.printInfo("at89c2051dip20: Write flash done.")
else:
self.top.printInfo("at89c2051dip20: Write flash failed!")
def __verifyErase(self):
ok = 0
image = self.readProgmem()
for addr in range(0, 0x800):
if byte2int(image[addr]) != 0xFF:
ok = 1
return ok
def __verifyProgmem(self,image):
data = self.readProgmem()
ok = 0
for addr in range(0, 0x800):
if byte2int(image[addr]) != byte2int(data[addr]):
ok = 1
return ok
def __loadData(self, data):
self.top.cmdFPGAWrite(0x10, data)
def __loadCommand(self, command):
self.top.cmdFPGAWrite(0x12, command & 0xFF)
def __runCommandSync(self, command):
self.__loadCommand(command)
self.__busyWait()
def __setP3x(self, P33, P34, P35, IA):
data = 0
if P33:
data |= 1
if P34:
data |= 2
if P35:
data |= 4
if IA:
data |= 8
self.top.cmdFPGAWrite(0x16, data)
def __getStatusFlags(self):
self.top.cmdFPGARead(0x12)
stat = self.top.cmdReadBufferReg()
return byte2int(stat[0])
def __busy(self):
return bool(self.__getStatusFlags() & self.STAT_BUSY)
def __busyWait(self):
for i in range(0, 26):
if not self.__busy():
return
self.top.hostDelay(0.001)
self.throwError("Timeout in busywait.")
def __progWait(self):
for i in range(0,4):
self.top.cmdFPGARead(0x12)
stat = self.top.cmdReadBufferReg()
if (byte2int(stat[0]) & self.STAT_BUSY) == 0:
return byte2int(stat[0])
self.top.hostDelay(0.001)
self.throwError("Timeout in busywait.")
ChipDescription(
Chip_AT89C2051dip20,
bitfile = "at89c2051dip20",
runtimeID = (0x0005, 0x01),
chipVendors = "Atmel",
description = "AT89C2051",
maintainer = None,
packages = ( ("DIP20", ""), )
)
|
victorywang80/Maintenance
|
saltstack/src/salt/modules/kmod.py
|
Python
|
apache-2.0
| 6,443 | 0.000466 |
# -*- coding: utf-8 -*-
'''
Module to manage Linux kernel modules
'''
# Import python libs
import os
import re
# Import salt libs
import salt.utils
def __virtual__():
'''
Only runs on Linux systems
'''
return 'kmod' if __grains__['kernel'] == 'Linux' else False
def _new_mods(pre_mods, post_mods):
'''
Return a list of the new modules, pass an lsmod dict before running
modprobe and one after modprobe has run
'''
pre = set()
post = set()
for mod in pre_mods:
pre.add(mod['module'])
for mod in post_mods:
post.add(mod['module'])
return post - pre
def _rm_mods(pre_mods, post_mods):
'''
Return a list of the new modules, pass an lsmod dict before running
modprobe and one after modprobe has run
'''
pre = set()
post = set()
for mod in pre_mods:
pre.add(mod['module'])
for mod in post_mods:
post.add(mod['module'])
return pre - post
def _union_module(a, b):
'''
Return union of two list where duplicated items are only once
'''
return list(set(a) | set(b))
def _get_modules_conf():
'''
Return location of modules config file.
Default: /etc/modules
'''
if __grains__['os'] == 'Arch':
return '/etc/modules-load.d/salt_managed.conf'
return '/etc/modules'
def _strip_module_name(mod):
'''
Return module name and strip configuration. It is possible insert modules
in this format:
bonding mode=4 miimon=1000
This method return only 'bonding'
'''
if mod.strip() == '':
return False
return mod.split()[0]
def _set_persistent_module(mod):
'''
Add module to configuration file to make it persistent. If module is
commented uncomment it.
'''
conf = _get_modules_conf()
if not os.path.exists(conf):
__salt__['file.touch'](conf)
mod_name = _strip_module_name(mod)
if not mod_name or mod_name in mod_list(True) or mod_name not in available():
return set()
escape_mod = re.escape(mod)
## If module is commented only uncomment it
if __salt__['file.contains_regex_multiline'](conf, "^#[\t ]*{}[\t ]*$".format(escape_mod)):
__salt__['file.uncomment'](conf, escape_mod)
else:
__salt__['file.append'](conf, mod)
return set([mod_name])
def _remove_persistent_module(mod, comment):
'''
Remove module from configuration file. If comment is true only comment line
where module is.
'''
conf = _get_modules_conf()
mod_name = _strip_module_name(mod)
if not mod_name or mod_name not in mod_list(True):
return set()
escape_mod = re.escape(mod)
if comment:
__salt__['file.comment'](conf, "^[\t ]*{}[\t ]?".format(escape_mod))
else:
__salt__['file.sed'](conf, "^[\t ]*{}[\t ]?".format(escape_mod), '')
return set([mod_name])
def available():
'''
Return a list of all available kernel modules
CLI Example:
.. code-block:: bash
salt '*' kmod.available
'''
ret = []
mod_dir = os.path.join('/lib/modules/', os.uname()[2])
for root, dirs, files in os.walk(mod_dir):
for fn_ in files:
if '.ko' in fn_:
ret.append(fn_[:fn_.index('.ko')])
return sorted(list(ret))
def check_available(mod):
'''
Check to see if the specified kernel module is available
CLI Example:
.. code-block:: bash
salt '*' kmod.check_available kvm
'''
return mod in available()
def lsmod():
'''
Return a dict containing information about currently loaded modules
CLI Example:
.. code-block:: bash
salt '*' kmod.lsmod
'''
ret = []
for line in __salt__['cmd.run']('lsmod').splitlines():
comps = line.split()
if not len(comps) > 2:
continue
if comps[0] == 'Module':
continue
mdat = {
'size': comps[1],
|
'module': comps[0],
'depcount': comps[2],
}
if len(comps) > 3:
mdat['deps'] = comps[3].split(',')
else:
mdat['deps'] = []
ret.append(mdat)
return ret
def mod_list(only_persist=False):
'''
Return a list of the loaded module names
CLI Example:
.. code-block:: bash
salt '*' kmod.mod_list
'''
mods = set(
|
)
if only_persist:
conf = _get_modules_conf()
if os.path.exists(conf):
with salt.utils.fopen(conf, 'r') as modules_file:
for line in modules_file:
line = line.strip()
mod_name = _strip_module_name(line)
if not line.startswith('#') and mod_name:
mods.add(mod_name)
else:
for mod in lsmod():
mods.add(mod['module'])
return sorted(list(mods))
def load(mod, persist=False):
'''
Load the specified kernel module
mod
Name of module to add
persist
Write module to /etc/modules to make it load on system reboot
CLI Example:
.. code-block:: bash
salt '*' kmod.load kvm
'''
pre_mods = lsmod()
response = __salt__['cmd.run_all']('modprobe {0}'.format(mod))
if response['retcode'] == 0:
post_mods = lsmod()
mods = _new_mods(pre_mods, post_mods)
persist_mods = set()
if persist:
persist_mods = _set_persistent_module(mod)
return sorted(list(mods | persist_mods))
else:
return 'Module {0} not found'.format(mod)
def is_loaded(mod):
'''
Check to see if the specified kernel module is loaded
CLI Example:
.. code-block:: bash
salt '*' kmod.is_loaded kvm
'''
return mod in mod_list()
def remove(mod, persist=False, comment=True):
'''
Remove the specified kernel module
mod
Name of module to remove
persist
Also remove module from /etc/modules
comment
If persist is set don't remove line from /etc/modules but only
comment it
CLI Example:
.. code-block:: bash
salt '*' kmod.remove kvm
'''
pre_mods = lsmod()
__salt__['cmd.run_all']('modprobe -r {0}'.format(mod))
post_mods = lsmod()
mods = _rm_mods(pre_mods, post_mods)
persist_mods = set()
if persist:
persist_mods = _remove_persistent_module(mod, comment)
return sorted(list(mods | persist_mods))
|
ethz-asl/segmatch
|
segmappy/segmappy/tools/hull.py
|
Python
|
bsd-3-clause
| 583 | 0.001715 |
import numpy as np
def point_in_hull(point
|
, hull, tolerance=1e-12):
return all((np.dot(eq[:-1], point) + eq[-1] <= tolerance) for eq in hull.equations)
def n_points_in_hull(points, hull):
n_points = 0
for i in range(points.shape[0]):
if point_in_hull(points[i, :], hull):
n_points = n_points + 1
return n_points
def are_in_hull(points, hull):
ins = []
outs =
|
[]
for i in range(points.shape[0]):
if point_in_hull(points[i, :], hull):
ins.append(i)
else:
outs.append(i)
return ins, outs
|
shinken-monitoring/mod-perfdata-service
|
module/module.py
|
Python
|
agpl-3.0
| 5,056 | 0.001978 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# This Class is a plugin for the Shinken Broker. It is in charge
# to brok information of the service perfdata into the file
# var/service-perfdata
# So it just manage the service_check_return
# Maybe one day host data will be useful too
# It will need just a new file, and a new manager :)
import codecs
from shinken.basemodule import BaseModule
properties = {
'daemons': ['broker'],
'type': 'service_perfdata',
'phases': ['running'],
}
# called by the plugin manager to get a broker
def get_instance(plugin):
print "Get a Service Perfdata broker for plugin %s" % plugin.get_name()
# Catch errors
path = plugin.path
if hasattr(plugin, 'mode'):
mode = plugin.mode
else:
mode = 'a'
if hasattr(plugin, 'template'):
template = plugin.template
else:
template = "$LASTSERVICECHECK$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICEOUTPUT$\t$SERVICESTATE$\t$SERVICEPERFDATA$\n"
# int(data['last_chk']),data['host_name'], data['service_description'], data['output'], current_state, data['perf_data']
instance = Service_perfdata_broker(plugin, path, mode, template)
return instance
# Class for the Merlindb Broker
# Get broks and puts them in merlin database
class Service_perfdata_broker(BaseModule):
def __init__(self, modconf, path, mode, template):
BaseModule.__init__(self, modconf)
self.path = path
self.mode = mode
self.template = template
# Make some raw change
self.template = self.template.replace(r'\t', '\t')
self.template = self.template.replace(r'\n', '\n')
# In Nagios it's said to force a return in line
if not self.template.endswith('\n'):
self.template += '\n'
self.buffer = []
# Called by Broker so we can do init stuff
# TODO: add conf param to get pass with init
# Conf from arbiter!
def init(self):
print "[%s] I open the service-perfdata file '%s'" % (self.name, self.path)
# Try to open the file to be sure we can
self.file = codecs.open(self.path, self.mode, "utf-8")
self.file.close()
# We've got a 0, 1, 2 or 3 (or something else? ->3
# And want a real OK, WARNING, CRITICAL, etc...
def resolve_service_state(self, state):
states = {0: 'OK', 1: 'WARNING', 2: 'CRITICAL', 3: 'UNKNOWN'}
if state in states:
return states[state]
else:
return 'UNKNOWN'
# A service check have
|
just arrived, we UPDATE data info with this
def manage_service_check_result_brok(self, b):
data = b.data
# The original model
# "$TIMET\t$HOSTNAME\t$SERVICEDESC\t$OUTPUT\t$SERVICESTATE\t$PERFDATA\n"
current_state = self.resolve_service_state(data['state_id'])
macros = {
'$LASTSERVICECHECK$': int(data['last_chk']),
'$HOSTNAME$': data['host_name'],
'$SERVICEDESC$': data['service_description'],
|
'$SERVICEOUTPUT$': data['output'],
'$SERVICESTATE$': current_state,
'$SERVICEPERFDATA$': data['perf_data'],
'$LASTSERVICESTATE$': data['last_state'],
}
s = self.template
for m in macros:
#print "Replacing in %s %s by %s" % (s, m, str(macros[m]))
s = s.replace(m, unicode(macros[m]))
#s = "%s\t%s\t%s\t%s\t%s\t%s\n" % (int(data['last_chk']),data['host_name'], \
# data['service_description'], data['output'], \
# current_state, data['perf_data'] )
self.buffer.append(s)
# Each second the broker say it's a new second. Let use this to
# dump to the file
def hook_tick(self, brok):
# Go to write it :)
buf = self.buffer
self.buffer = []
try:
self.file = codecs.open(self.path, self.mode, "utf-8")
for s in buf:
self.file.write(s)
self.file.flush()
self.file.close()
except IOError, exp: # Maybe another tool is just getting it, pass
pass
|
project-hopkins/Westworld
|
hopkin/routes/items.py
|
Python
|
gpl-3.0
| 8,707 | 0.001034 |
import json
from bson.errors import InvalidId
from flask import Blueprint, jsonify, request, g
item_api = Blueprint('itemApi', __name__)
def get_item_as_object(item) -> dict:
return_item = {
"_id": str(item['_id']),
"name": item['name'],
"description": item['description'],
"imageURL": item['imageURL'],
"price": item['price'],
"calories": item['calories'],
"category": item['category'],
"tags": item['tags']
}
if 'isRecommended' in item:
return_item['isRecommended'] = item['isRecommended']
return return_item
@item_api.route('/item', methods=['GET'])
def get_all_items() -> tuple:
"""
swagger_from_file: ../swagger/item/getItems.yml
returns all the items as a json array
:return:
"""
from hopkin.models.items import Item
# get all items
items = Item.get_all()
# create items list
items_list = []
# create response
for item in items:
items_list.append(get_item_as_object(item))
return jsonify({'data': {'items': items_list}})
@item_api.route('/item/id/<item_id>', methods=['GET'])
def get_item_by_id(item_id) -> tuple:
"""
swagger_from_file: ../swagger/item/getItem.yml
returns one item as a json array
:return:
"""
from hopkin.models.items import Item
# find specific item
item = Item.get_by_id(item_id)
return jsonify({'data': {'item': get_item_as_object(item)}})
@item_api.route('/item/category/<category>', methods=['GET'])
def get_item_by_category(category) -> tuple:
"""
swagger_from_file: ../swagger/item/getItemsByCategory.yml
returns all the items in a category as a json array
:return:
"""
from hopkin.models.items import Item
# find items by category
items = Item.get_by_category(category)
# create items list
items_list = []
# create response
for item in items:
items_list.append(get_item_as_object(item))
return jsonify({'data': {'items': items_list}})
@item_api.route('/item/category/<category>/count', methods=['GET'])
def get_category_count(category) -> tuple:
"""
swagger_from_file: ../swagger/item/getNumItemsInCat.yml
Returns the number items in that category
:param category:
:return:
"""
json_response = get_item_by_category(category)
return jsonify({'data': {'count': len(json.loads(json_response.data)['data']['items'])}})
@item_api.route('/item/search', methods=['GET'])
def search_item() -> tuple:
"""
swagger_from_file: ../swagger/item/searchItem.yml
Searches items if query less that 3
it only searches the name else it will
search the names and tags
:return:
"""
from hopkin.model
|
s.items import Item
items_list = []
query: str = request.args['q']
if not len(query) > 0:
return jsonify({'error': 'no search results provided'})
query = query.title()
items = list(Item.get_by_name_search(query.lower()))
if len(query) > 3:
items = items + list(Item.get_by_tag_starts_with(query.lower()))
unique_ids =
|
[]
for item in items:
if str(item['_id']) not in unique_ids:
items_list.append({
"_id": str(item['_id']),
"name": item['name'],
"description": item['description'],
"imageURL": item['imageURL'],
"price": item['price'],
"calories": item['calories'],
"category": item['category'],
"tags": item['tags'],
"isRecommended": item['isRecommended']
})
unique_ids.append(str(item['_id']))
return jsonify({'data': {'items': items_list}})
@item_api.route('/rate/item/<itemid>', methods=['GET'])
def get_rating(itemid: str) -> tuple:
"""
swagger_from_file: ../swagger/item/getItemRating.yml
Gets a user rating of an item
:param itemid:
:return:
"""
from hopkin.models.ratings import Rating
user_id = str(g.user_id)
rating = Rating.get_rating(itemid, user_id)
if rating is None:
return jsonify({
'error': {'error': {'message': 'No Rating for item'}}
})
return jsonify({'data': {'rating': {
'item_id': rating['item_id'],
'rating': rating['rating'],
}}})
@item_api.route('/rate/item', methods=['POST'])
def rate_item() -> tuple:
"""
Adds a user rating of an item
:return:
"""
from hopkin.models.items import Item
from hopkin.models.ratings import Rating
if request.json is None:
return jsonify({'error': 'invalid request'})
try:
item_id = Item.get_by_id(request.json['itemid'])
if item_id is None:
return jsonify({'error': f"No item with id: {request.json['itemid']} found"}), 400
elif request.json['rating'] > 5:
return jsonify({'error': 'rating can\'t be grater than 5'}), 400
except InvalidId:
return jsonify({'error': 'Invalid item id format'}), 400
user_id = str(g.user_id)
rating = Rating.get_rating(request.json['itemid'], user_id)
if rating is None:
Rating.save({
'item_id': request.json['itemid'],
'user_id': user_id,
'rating': request.json['rating']
})
return jsonify({'data': {'success': True, 'message': 'new rating added'}})
rating['item_id'] = request.json['itemid']
rating['user_id'] = user_id
Rating.update(rating)
return jsonify({'data': {'success': True, 'message': 'rating updated'}})
@item_api.route('/admin/item/add', methods=['POST'])
def add_new_item() -> tuple:
"""
swagger_from_file: ../swagger/item/itemAdd.yml
adds an item to the database and returns it in a JSON object
:return:
"""
from hopkin.models.items import Item
if request.json is not None and g.is_admin:
new_item = {
'name': request.json['name'],
'description': request.json['description'],
'imageURL': request.json['imageURL'],
'price': request.json['price'],
'calories': request.json['calories'],
'category': request.json['category'],
'tags': request.json['tags'],
"isRecommended": request.json['isRecommended']
}
new_item_id = Item.insert(new_item)
return jsonify({'data': {'item': request.json, 'itemId': str(new_item_id)}})
return jsonify({'error': 'invalid item' + request.json}), 403
@item_api.route('/admin/item/delete/<item_id>', methods=['POST'])
def delete_item(item_id):
"""
swagger_from_file: ../swagger/item/deleteItem.yml
deletes the selected item from the database
:return:
"""
from hopkin.models.items import Item
# search for item by id
item = Item.get_by_id(str(item_id))
if item is not None and g.is_admin:
# remove item
Item.remove(item_id)
return jsonify({'data': {'success': True}})
return jsonify({'error': 'No item found with id ' + item_id})
@item_api.route('/admin/item/update', methods=['POST'])
def update_item():
"""
swagger_from_file: ../swagger/item/updateItem.yml
updated the selected item in the database
:return:
"""
from hopkin.models.items import Item
if request.json is not None:
item_update = Item.get_by_id(request.json['_id'])
item_update['calories'] = request.json['calories']
item_update['category'] = request.json['category']
item_update['description'] = request.json['description']
# will be updated to get base64 image
item_update['imageURL'] = request.json['imageURL']
item_update['name'] = request.json['name']
item_update['price'] = request.json['price']
item_update['tags'] = request.json['tags']
item_update['isRecommended'] = request.json['isRecommended']
Item.save(item_update)
return jsonify({'data': {'message': 'Updated with item id: ' + str(item_update['_id']),
'mongo_id': str(item_update['_id'])}
})
return jsonify({'error': 'item not
|
anhstudios/swganh
|
data/scripts/templates/object/mobile/shared_dressed_diplomat_zabrak_male_01.py
|
Python
|
mit
| 457 | 0.045952 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION F
|
OR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_diplomat_zabrak_male_01.iff"
re
|
sult.attribute_template_id = 9
result.stfName("npc_name","zabrak_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
kstaniek/pampio
|
pampio/version.py
|
Python
|
apache-2.0
| 50 | 0 |
"""Version in
|
formation."""
__version__
|
= '0.0.2'
|
samstav/fastfood
|
fastfood/book.py
|
Python
|
apache-2.0
| 10,125 | 0 |
# Copyright 2015 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fastfood Chef Cookbook manager."""
from __future__ import print_function
import os
from fastfood import utils
class CookBook(object):
"""Chef Cookbook object.
Understands metadata.rb, Berksfile and how to parse them.
"""
def __init__(self, path):
"""Initialize CookBook wrapper at 'path'."""
self.path = utils.normalize_path(path)
self._metadata = None
if not os.path.isdir(path):
raise ValueError("Cookbook dir %s does not exist."
% self.path)
self._berksfile = None
@property
def name(self):
"""Cookbook name property."""
try:
return self.metadata.to_dict()['name']
except KeyError:
raise LookupError("%s is missing 'name' attribute'."
% self.metadata)
@property
def metadata(self):
"""Return dict representation of this cookbook's metadata.rb ."""
self.metadata_path = os.path.join(self.path, 'metadata.rb')
if not os.path.isfile(self.metadata_path):
raise ValueError("Cookbook needs metadata.rb, %s"
% self.metadata_path)
if not self._metadata:
self._metadata = MetadataRb(open(self.metadata_path, 'r+'))
return self._metadata
@property
def berksfile(self):
"""Return this cookbook's Berksfile instance."""
self.berks_path = os.path.join(self.path, 'Berksfile')
|
if not self._berksfile:
if not os.path.isfile(self.berks_path):
raise ValueError("No Berksfile found at %s"
% self.berks_path)
self._berksfile = Berksfile(open(self.berks_path, 'r+'))
return self._berksfile
class M
|
etadataRb(utils.FileWrapper):
"""Wrapper for a metadata.rb file."""
@classmethod
def from_dict(cls, dictionary):
"""Create a MetadataRb instance from a dict."""
cookbooks = set()
# put these in order
groups = [cookbooks]
for key, val in dictionary.items():
if key == 'depends':
cookbooks.update({cls.depends_statement(cbn, meta)
for cbn, meta in val.items()})
body = ''
for group in groups:
if group:
body += '\n'
body += '\n'.join(group)
return cls.from_string(body)
@staticmethod
def depends_statement(cookbook_name, metadata=None):
"""Return a valid Ruby 'depends' statement for the metadata.rb file."""
line = "depends '%s'" % cookbook_name
if metadata:
if not isinstance(metadata, dict):
raise TypeError("Stencil dependency options for %s "
"should be a dict of options, not %s."
% (cookbook_name, metadata))
if metadata:
line = "%s '%s'" % (line, "', '".join(metadata))
return line
def to_dict(self):
"""Return a dictionary representation of this metadata.rb file."""
return self.parse()
def parse(self):
"""Parse the metadata.rb into a dict."""
data = utils.ruby_lines(self.readlines())
data = [tuple(j.strip() for j in line.split(None, 1))
for line in data]
depends = {}
for line in data:
if not len(line) == 2:
continue
key, value = line
if key == 'depends':
value = value.split(',')
lib = utils.ruby_strip(value[0])
detail = [utils.ruby_strip(j) for j in value[1:]]
depends[lib] = detail
datamap = {key: utils.ruby_strip(val) for key, val in data}
if depends:
datamap['depends'] = depends
self.seek(0)
return datamap
def merge(self, other):
"""Add requirements from 'other' metadata.rb into this one."""
if not isinstance(other, MetadataRb):
raise TypeError("MetadataRb to merge should be a 'MetadataRb' "
"instance, not %s.", type(other))
current = self.to_dict()
new = other.to_dict()
# compare and gather cookbook dependencies
meta_writelines = ['%s\n' % self.depends_statement(cbn, meta)
for cbn, meta in new.get('depends', {}).items()
if cbn not in current.get('depends', {})]
self.write_statements(meta_writelines)
return self.to_dict()
class Berksfile(utils.FileWrapper):
"""Wrapper for a Berksfile."""
berks_options = [
'branch',
'git',
'path',
'ref',
'revision',
'tag',
]
def to_dict(self):
"""Return a dictionary representation of this Berksfile."""
return self.parse()
def parse(self):
"""Parse this Berksfile into a dict."""
self.flush()
self.seek(0)
data = utils.ruby_lines(self.readlines())
data = [tuple(j.strip() for j in line.split(None, 1))
for line in data]
datamap = {}
for line in data:
if len(line) == 1:
datamap[line[0]] = True
elif len(line) == 2:
key, value = line
if key == 'cookbook':
datamap.setdefault('cookbook', {})
value = [utils.ruby_strip(v) for v in value.split(',')]
lib, detail = value[0], value[1:]
datamap['cookbook'].setdefault(lib, {})
# if there is additional dependency data but its
# not the ruby hash, its the version constraint
if detail and not any("".join(detail).startswith(o)
for o in self.berks_options):
constraint, detail = detail[0], detail[1:]
datamap['cookbook'][lib]['constraint'] = constraint
if detail:
for deet in detail:
opt, val = [
utils.ruby_strip(i)
for i in deet.split(':', 1)
]
if not any(opt == o for o in self.berks_options):
raise ValueError(
"Cookbook detail '%s' does not specify "
"one of '%s'" % (opt, self.berks_options))
else:
datamap['cookbook'][lib][opt.strip(':')] = (
utils.ruby_strip(val))
elif key == 'source':
datamap.setdefault(key, [])
datamap[key].append(utils.ruby_strip(value))
elif key:
datamap[key] = utils.ruby_strip(value)
self.seek(0)
return datamap
@classmethod
def from_dict(cls, dictionary):
"""Create a Berksfile instance from a dict."""
cookbooks = set()
sources = set()
other = set()
# put these in order
groups = [sources, cookbooks, other]
for key, val in dictionary.items():
if key == 'cookbook':
cookbooks.update({cls.cookbook_statement(cbn, meta)
for cbn, meta in val.items()})
elif key == 'source':
sources.update({"sou
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.