text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# See LICENSE.txt for licensing terms
from StringIO import StringIO
from docutils import writers
from rst2pdf import createpdf
class PdfWriter(writers.Writer):
def __init__(self, builder):
writers.Writer.__init__(self)
self.builder = builder
self.output = u''
supported = ('pdf')
"""Formats this writer supports."""
config_section = 'pdf writer'
config_section_dependencies = ('writers')
"""Final translated form of `document`."""
def translate(self):
sio = StringIO('')
createpdf.RstToPdf(sphinx=True).createPdf(
doctree=self.document, output=sio, compressed=False)
self.output = unicode(sio.getvalue(), 'utf-8', 'ignore')
def supports(self, format):
"""This writer supports all format-specific elements."""
return 1
| liuyi1112/rst2pdf | rst2pdf/writer.py | Python | mit | 860 | 0 |
#!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2015 ZeroC, Inc. All rights reserved.
#
# **********************************************************************
import sys, traceback, Ice
slice_dir = Ice.getSliceDir()
if not slice_dir:
print(sys.argv[0] + ': Slice directory not found.')
sys.exit(1)
Ice.loadSlice("'-I" + slice_dir + "' Props.ice")
import Demo
def menu():
print("""
usage:
1: set properties (batch 1)
2: set properties (batch 2)
c: show current properties
s: shutdown server
x: exit
?: help
""")
def show(admin):
props = admin.getPropertiesForPrefix("Demo")
print "Server's current settings:"
for k,v in props.items():
print " " + k + "=" + v
class Client(Ice.Application):
def run(self, args):
if len(args) > 1:
print(self.appName() + ": too many arguments")
return 1
props = Demo.PropsPrx.checkedCast(self.communicator().propertyToProxy("Props.Proxy"))
if props == None:
print("invalid proxy")
return 1
admin = Ice.PropertiesAdminPrx.checkedCast(self.communicator().propertyToProxy("Admin.Proxy"))
batch1 = {}
batch1["Demo.Prop1"] = "1"
batch1["Demo.Prop2"] = "2"
batch1["Demo.Prop3"] = "3"
batch2 = {}
batch2["Demo.Prop1"] = "10"
batch2["Demo.Prop2"] = "" # An empty value removes this property
batch2["Demo.Prop3"] = "30"
show(admin)
menu()
c = None
while c != 'x':
try:
sys.stdout.write("==> ")
sys.stdout.flush()
c = sys.stdin.readline().strip()
if c == "1" or c == "2":
propsDict = c == "1" and batch1 or batch2
print("Sending:")
for k, v in propsDict.items():
if k.startswith("Demo"):
print(" " + k + "=" + v)
print
admin.setProperties(propsDict)
print("Changes:")
changes = props.getChanges()
if len(changes) == 0:
print(" None.")
else:
for k, v in changes.items():
sys.stdout.write(" " + k)
if len(v) == 0:
print(" was removed")
else:
print(" is now " + v)
elif c == "c":
show(admin)
elif c == "s":
props.shutdown()
elif c == "x":
# Nothing to do
pass
elif c == "?":
menu()
else:
print("unknown command `" + c + "'")
menu()
except KeyboardInterrupt:
break
except EOFError:
break
except Ice.Exception as ex:
print(ex)
return 0
app = Client()
sys.exit(app.main(sys.argv, "config.client"))
| qiuxs/ice-demos | python/Ice/properties/Client.py | Python | gpl-2.0 | 3,232 | 0.003403 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Sets environment variables needed to run a chromium unit test."""
import os
import stat
import subprocess
import sys
# This is hardcoded to be src/ relative to this script.
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CHROME_SANDBOX_ENV = 'CHROME_DEVEL_SANDBOX'
CHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox'
def get_sandbox_env(env):
"""Returns the environment flags needed for the SUID sandbox to work."""
extra_env = {}
chrome_sandbox_path = env.get(CHROME_SANDBOX_ENV, CHROME_SANDBOX_PATH)
# The above would silently disable the SUID sandbox if the env value were
# an empty string. We don't want to allow that. http://crbug.com/245376
# TODO(jln): Remove this check once it's no longer possible to disable the
# sandbox that way.
if not chrome_sandbox_path:
chrome_sandbox_path = CHROME_SANDBOX_PATH
extra_env[CHROME_SANDBOX_ENV] = chrome_sandbox_path
return extra_env
def trim_cmd(cmd):
"""Removes internal flags from cmd since they're just used to communicate from
the host machine to this script running on the swarm slaves."""
sanitizers = ['asan', 'lsan', 'msan', 'tsan']
internal_flags = frozenset('--%s=%d' % (name, value)
for name in sanitizers
for value in [0, 1])
return [i for i in cmd if i not in internal_flags]
def fix_python_path(cmd):
"""Returns the fixed command line to call the right python executable."""
out = cmd[:]
if out[0] == 'python':
out[0] = sys.executable
elif out[0].endswith('.py'):
out.insert(0, sys.executable)
return out
def get_sanitizer_env(cmd, asan, lsan, msan, tsan):
"""Returns the envirnoment flags needed for sanitizer tools."""
extra_env = {}
# Instruct GTK to use malloc while running sanitizer-instrumented tests.
extra_env['G_SLICE'] = 'always-malloc'
extra_env['NSS_DISABLE_ARENA_FREE_LIST'] = '1'
extra_env['NSS_DISABLE_UNLOAD'] = '1'
# TODO(glider): remove the symbolizer path once
# https://code.google.com/p/address-sanitizer/issues/detail?id=134 is fixed.
symbolizer_path = os.path.abspath(os.path.join(ROOT_DIR, 'third_party',
'llvm-build', 'Release+Asserts', 'bin', 'llvm-symbolizer'))
if lsan or tsan:
# LSan is not sandbox-compatible, so we can use online symbolization. In
# fact, it needs symbolization to be able to apply suppressions.
symbolization_options = ['symbolize=1',
'external_symbolizer_path=%s' % symbolizer_path]
elif (asan or msan) and sys.platform not in ['win32', 'cygwin']:
# ASan uses a script for offline symbolization, except on Windows.
# Important note: when running ASan with leak detection enabled, we must use
# the LSan symbolization options above.
symbolization_options = ['symbolize=0']
# Set the path to llvm-symbolizer to be used by asan_symbolize.py
extra_env['LLVM_SYMBOLIZER_PATH'] = symbolizer_path
else:
symbolization_options = []
if asan:
asan_options = symbolization_options[:]
if lsan:
asan_options.append('detect_leaks=1')
if asan_options:
extra_env['ASAN_OPTIONS'] = ' '.join(asan_options)
if sys.platform == 'darwin':
isolate_output_dir = os.path.abspath(os.path.dirname(cmd[0]))
# This is needed because the test binary has @executable_path embedded in
# it that the OS tries to resolve to the cache directory and not the
# mapped directory.
extra_env['DYLD_LIBRARY_PATH'] = str(isolate_output_dir)
if lsan:
if asan or msan:
lsan_options = []
else:
lsan_options = symbolization_options[:]
if sys.platform == 'linux2':
# Use the debug version of libstdc++ under LSan. If we don't, there will
# be a lot of incomplete stack traces in the reports.
extra_env['LD_LIBRARY_PATH'] = '/usr/lib/x86_64-linux-gnu/debug:'
extra_env['LSAN_OPTIONS'] = ' '.join(lsan_options)
if msan:
msan_options = symbolization_options[:]
if lsan:
msan_options.append('detect_leaks=1')
extra_env['MSAN_OPTIONS'] = ' '.join(msan_options)
if tsan:
tsan_options = symbolization_options[:]
extra_env['TSAN_OPTIONS'] = ' '.join(tsan_options)
return extra_env
def get_sanitizer_symbolize_command(json_path=None, executable_path=None):
"""Construct the command to invoke offline symbolization script."""
script_path = '../tools/valgrind/asan/asan_symbolize.py'
cmd = [sys.executable, script_path]
if json_path is not None:
cmd.append('--test-summary-json-file=%s' % json_path)
if executable_path is not None:
cmd.append('--executable-path=%s' % executable_path)
return cmd
def get_json_path(cmd):
"""Extract the JSON test summary path from a command line."""
json_path_flag = '--test-launcher-summary-output='
for arg in cmd:
if arg.startswith(json_path_flag):
return arg.split(json_path_flag).pop()
return None
def symbolize_snippets_in_json(cmd, env):
"""Symbolize output snippets inside the JSON test summary."""
json_path = get_json_path(cmd)
if json_path is None:
return
try:
symbolize_command = get_sanitizer_symbolize_command(
json_path=json_path, executable_path=cmd[0])
p = subprocess.Popen(symbolize_command, stderr=subprocess.PIPE, env=env)
(_, stderr) = p.communicate()
except OSError as e:
print 'Exception while symbolizing snippets: %s' % e
if p.returncode != 0:
print "Error: failed to symbolize snippets in JSON:\n"
print stderr
def run_executable(cmd, env):
"""Runs an executable with:
- environment variable CR_SOURCE_ROOT set to the root directory.
- environment variable LANGUAGE to en_US.UTF-8.
- environment variable CHROME_DEVEL_SANDBOX set
- Reuses sys.executable automatically.
"""
extra_env = {}
# Many tests assume a English interface...
extra_env['LANG'] = 'en_US.UTF-8'
# Used by base/base_paths_linux.cc as an override. Just make sure the default
# logic is used.
env.pop('CR_SOURCE_ROOT', None)
extra_env.update(get_sandbox_env(env))
# Copy logic from tools/build/scripts/slave/runtest.py.
asan = '--asan=1' in cmd
lsan = '--lsan=1' in cmd
msan = '--msan=1' in cmd
tsan = '--tsan=1' in cmd
if sys.platform in ['win32', 'cygwin']:
# Symbolization works in-process on Windows even when sandboxed.
use_symbolization_script = False
else:
# LSan doesn't support sandboxing yet, so we use the in-process symbolizer.
# Note that ASan and MSan can work together with LSan.
use_symbolization_script = (asan or msan) and not lsan
if asan or lsan or msan or tsan:
extra_env.update(get_sanitizer_env(cmd, asan, lsan, msan, tsan))
if lsan or tsan:
# LSan and TSan are not sandbox-friendly.
cmd.append('--no-sandbox')
cmd = trim_cmd(cmd)
# Ensure paths are correctly separated on windows.
cmd[0] = cmd[0].replace('/', os.path.sep)
cmd = fix_python_path(cmd)
print('Additional test environment:\n%s\n'
'Command: %s\n' % (
'\n'.join(' %s=%s' %
(k, v) for k, v in sorted(extra_env.iteritems())),
' '.join(cmd)))
env.update(extra_env or {})
try:
# See above comment regarding offline symbolization.
if use_symbolization_script:
# Need to pipe to the symbolizer script.
p1 = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE,
stderr=sys.stdout)
p2 = subprocess.Popen(
get_sanitizer_symbolize_command(executable_path=cmd[0]),
env=env, stdin=p1.stdout)
p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.
p1.wait()
p2.wait()
# Also feed the out-of-band JSON output to the symbolizer script.
symbolize_snippets_in_json(cmd, env)
return p1.returncode
else:
return subprocess.call(cmd, env=env)
except OSError:
print >> sys.stderr, 'Failed to start %s' % cmd
raise
def main():
return run_executable(sys.argv[1:], os.environ.copy())
if __name__ == '__main__':
sys.exit(main())
| Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/testing/test_env.py | Python | mit | 8,248 | 0.013337 |
import random
from pdcglobal import *
from effects import *
def PotionOfKillbeePoison(item):
item.full_name += ' of Killerbee-Poison'
item.weaponinfotext = 'Dangerous Poison'
def DrinkPotionOfKillbeePoison(self, actor):
KillerbeePoisonEffect(actor,None)
def PotionOfYumuraPoison(item):
item.full_name += ' of Yumura-Poison'
def DrinkPotionOfYumuraPoison(self, actor):
YumuraPoisonEffect(actor,None)
def PotionOfRegeneration(item):
item.full_name += ' of Killerbee-Poison'
def DrinkPotionOfRegeneration(self, actor):
RegenerationEffect(actor,None)
def PotionOfEndurance(item):
item.full_name += ' of Endurance'
def DrinkPotionOfEndurance(self, actor):
actor.cur_endurance += d(10) + d(10)
def PotionOfMind(item):
item.full_name += ' of Mind'
def DrinkPotionOfMind(self, actor):
actor.cur_mind += d(10) + d(10)
def PotionOfSpellcaster(item):
item.full_name += ' of Spellcasters'
def DrinkPotionOfSpellcaster(self, actor):
actor.cur_endurance += d(10) + d(10)
actor.cur_mind += d(10) + d(10)
def PotionOfHealing(item):
item.full_name += ' of Healing'
def DrinkPotionOfHealing(self, actor):
actor.cur_health += d(10) | cycladesnz/chambersAndCreatures | src/item/suf_potions.py | Python | gpl-2.0 | 1,217 | 0.013969 |
# vi:si:et:sw=4:sts=4:ts=4
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L.
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
#
# This file may be distributed and/or modified under the terms of
# the GNU Lesser General Public License version 2.1 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.LGPL" in the source distribution for more information.
#
# Headers in this file shall remain intact.
# Log categories
ADMIN_LOG_CATEGORY = "admin"
STORES_LOG_CATEGORY = "stores"
PROXIES_LOG_CATEGORY = "proxy"
DATASOURCE_LOG_CATEGORY = "datasource"
MONITORING_LOG_CATEGORY = "monitoring"
TRANSCODING_LOG_CATEGORY = "transcoding"
SCHEDULER_LOG_CATEGORY = "scheduler"
NOTIFIER_LOG_CATEGORY = "notifier"
IDLE_LOG_CATEGORY = "idle"
JANITOR_LOG_CATEGORY = "janitor"
GET_REQUEST_AGENT = "Flumotion Transcoder"
# The generic timeout used for remote method call
REMOTE_CALL_TIMEOUT = 60
# The time to let the workers to log back in the manager before resuming
RESUME_DELAY = 20
# The time a proxy wait after a SIGTERM to send a SIGKILL to a component
COMPONENT_WAIT_TO_KILL = 30
# The time between each monitor components set adjustments
MONITORSET_TUNE_PERIOD = 10
# Label templates
ACTIVITY_LABEL_TEMPLATE = "%(customerName)s/%(profileName)s:%(sourcePath)s"
TRANSCODER_LABEL_TEMPLATE = "%(customerName)s/%(profileName)s:%(sourcePath)s"
MONITOR_LABEL_TEMPLATE = "Monitor for %(customerName)s"
# Maximum time to wait for the admin to load
# and initialize all components stats
WAIT_IDLE_TIMEOUT = 30
# Maximum time to wait for an elemnt to be active
WAIT_ACTIVE_TIMEOUT = 30
# Maximum time to wait for a worker instance
# when the worker name is set to a component state
WAIT_WORKER_TIMEOUT = 30
# Maximum time to wait for component properties
TASKMANAGER_WAITPROPS_TIMEOUT = 30
TASKMANAGER_IDLE_TIMEOUT = 30
# Maximum time for admin tasks to wait for a component to be loaded
TASK_LOAD_TIMEOUT = 30
# Maximum time for admin tasks to wait for a component becoming happy
TASK_HAPPY_TIMEOUT = 60
# First delay to wait when retrying to load a component
TASK_START_DELAY = 3
# The factor to apply to the delay
TASK_START_DELAY_FACTOR = 4
# Maximum time to hold a lost component before starting another one
TASK_HOLD_TIMEOUT = 60
# Maximum time to look for a valid component before starting a new one
TASK_POTENTIAL_COMPONENT_TIMEOUT = 20
# Maximum time to wait when retrieving component UI State
TASK_UISTATE_TIMEOUT = 20
# Maximum time the janitor wait before forcing the component deletion
JANITOR_WAIT_FOR_DELETE = 20
MONITOR_STATE_UPDATE_PERIOD = 1
MONITOR_MAX_RETRIES = 3
MONITORING_POTENTIAL_WORKER_TIMEOUT = 20
# Maximum time an elected transcoder can stay sad before starting another one
TRANSCODER_SAD_TIMEOUT = 120
# Maximum time a component can take to acknowledge.
# Take into account that a lots of files are copied/moved
# during acknowledgement, so it can take a long time
TRANSCODER_ACK_TIMEOUT = 60*12
TRANSCODER_MAX_RETRIES = 2
TRANSCODING_POTENTIAL_WORKER_TIMEOUT = 20
# Startup timeouts
MONITORING_START_TIMEOUT = 30
MONITORING_PAUSE_TIMEOUT = 30
MONITORING_RESUME_TIMEOUT = 30
MONITORING_ACTIVATION_TIMEOUT = 30
TRANSCODING_START_TIMEOUT = 30
TRANSCODING_PAUSE_TIMEOUT = 30
TRANSCODING_RESUME_TIMEOUT = 30
SCHEDULER_START_TIMEOUT = 30
SCHEDULER_PAUSE_TIMEOUT = 30
SCHEDULER_RESUME_TIMEOUT = 30
NOTIFIER_START_TIMEOUT = 30
# Maximum time to wait for a datasource to be ready
WAIT_DATASOURCE_TIMEOUT = 60
# Forced component deletion constants
FORCED_DELETION_TIMEOUT = 10
FORCED_DELETION_BUZY_TIMEOUT = 30
FORCED_DELETION_MAX_RETRY = 3
LOAD_COMPONENT_TIMEOUT = 30.0
GLOBAL_MAIL_NOTIFY_TIMEOUT = 60
GLOBAL_MAIL_NOTIFY_RETRIES = 5
# AdminStore default values
DEFAULT_ACCESS_FORCE_USER = None
DEFAULT_ACCESS_FORCE_GROUP = None
DEFAULT_ACCESS_FORCE_DIR_MODE = None
DEFAULT_ACCESS_FORCE_FILE_MODE = None
DEFAULT_OUTPUT_MEDIA_TEMPLATE = "%(targetPath)s"
DEFAULT_OUTPUT_THUMB_TEMPLATE = "%(targetDir)s%(targetBasename)s.%(index)03d%(targetExtension)s"
DEFAULT_LINK_FILE_TEMPLATE = "%(targetPath)s.link"
DEFAULT_CONFIG_FILE_TEMPLATE = "%(sourcePath)s.ini"
DEFAULT_REPORT_FILE_TEMPLATE = "%(sourcePath)s.%(id)s.rep"
DEFAULT_MONITORING_PERIOD = 5
DEFAULT_TRANSCODING_TIMEOUT = 60
DEFAULT_POSTPROCESS_TIMEOUT = 60
DEFAULT_PREPROCESS_TIMEOUT = 60
DEFAULT_MAIL_TIMEOUT = 30
DEFAULT_MAIL_RETRY_MAX = 3
DEFAULT_MAIL_RETRY_SLEEP = 60
DEFAULT_HTTPREQUEST_TIMEOUT = 30
DEFAULT_HTTPREQUEST_RETRY_MAX = 3
DEFAULT_HTTPREQUEST_RETRY_SLEEP = 60
DEFAULT_SQL_TIMEOUT = 30
DEFAULT_SQL_RETRY_MAX = 3
DEFAULT_SQL_RETRY_SLEEP = 60
DEFAULT_PROCESS_PRIORITY = 100
DEFAULT_TRANSCODING_PRIORITY = 100
DEFAULT_MAIL_SUBJECT_TEMPLATE = "%(customerName)s/%(profileName)s transcoding %(trigger)s"
DEFAULT_MAIL_BODY_TEMPLATE = """
Transcoding Report
==================
Customer Name: %(customerName)s
Profile Name: %(profileName)s
--------------
File: '%(inputRelPath)s'
Message: %(errorMessage)s
"""
# Default CustomerStore values
DEFAULT_CUSTOMER_PRIORITY = 100
# Default customer directories
DEFAULT_INPUT_DIR = "/%s/files/incoming"
DEFAULT_OUTPUT_DIR = "/%s/files/outgoing"
DEFAULT_FAILED_DIR = "/%s/files/failed"
DEFAULT_DONE_DIR = "/%s/files/done"
DEFAULT_LINK_DIR = "/%s/files/links"
DEFAULT_CONFIG_DIR = "/%s/configs"
DEFAULT_TEMPREP_DIR = "/%s/reports/pending"
DEFAULT_FAILEDREP_DIR = "/%s/reports/failed"
DEFAULT_DONEREP_DIR = "/%s/reports/done"
DEFAULT_WORK_DIR = "/%s/work"
FILE_MONITOR = "file-monitor"
HTTP_MONITOR = "http-monitor"
| osiloke/Flumotion-Transcoder | flumotion/transcoder/admin/adminconsts.py | Python | lgpl-2.1 | 5,673 | 0.000353 |
# -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
import unittest
from datetime import date
import holidays
class TestNicaragua(unittest.TestCase):
def setUp(self):
self.ni_holidays = holidays.NI()
def test_ni_holidays_2020(self):
year = 2020
mn_holidays = holidays.NI(prov="MN")
# New Year's Day
self.assertIn(date(year, 1, 1), self.ni_holidays)
# Maundy Thursday
self.assertIn(date(year, 4, 9), self.ni_holidays)
# Good Friday
self.assertIn(date(year, 4, 10), self.ni_holidays)
# Labor Day
self.assertIn(date(year, 5, 1), self.ni_holidays)
# Revolution Day
self.assertIn(date(year, 7, 19), self.ni_holidays)
# Battle of San Jacinto Day
self.assertIn(date(year, 9, 14), self.ni_holidays)
# Independence Day
self.assertIn(date(year, 9, 15), self.ni_holidays)
# Virgin's Day
self.assertIn(date(year, 12, 8), self.ni_holidays)
# Christmas Day
self.assertIn(date(year, 12, 25), self.ni_holidays)
# Santo Domingo Day Down
self.assertIn(date(year, 8, 1), mn_holidays)
# Santo Domingo Day Up
self.assertIn(date(year, 8, 10), mn_holidays)
| ryanss/holidays.py | test/countries/test_nicaragua.py | Python | mit | 1,698 | 0 |
# -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
globalOptions = [
#collaboration options necessary in all plugins
("tab", {"description" : "Name of tab where CERN MCU will be placed",
"type": str,
"defaultValue": "Videoconferencing",
"editable": True,
"visible": True,
"mustReload": False} ),
("allowedOn", {"description" : "Kind of event types (conference, meeting, simple_event) supported",
"type": list,
"defaultValue": ["conference", "meeting"],
"editable": True,
"visible": True,
"mustReload": False} ),
("admins", {"description": "CERN MCU admins / responsibles",
"type": 'users',
"defaultValue": [],
"editable": True,
"visible": True} ),
#CERN MCU Options
("MCUAddress", {"description": "MCU URL",
"type": str,
"defaultValue": "https://cern-mcu1.cern.ch",
"editable": True,
"visible": True}),
("indicoID", {"description": "ID of Indico for the MCU",
"type": str,
"defaultValue": "indico",
"editable": True,
"visible": True}),
("indicoPassword", {"description": "Password of Indico for the MCU",
"type": str,
"defaultValue": "",
"editable": True,
"visible": True}),
("idRange", {"description": "Range of possible IDs (format: min-max)",
"type": str,
"defaultValue": "90000-99999",
"editable": True,
"visible": True}),
("MCUTZ", {"description": "Timezone where the MCU is physically located. We assume a MCU Admin will update 'UTC offset' in /settings_time.html of the MCU web interface accordingly.",
"type": str,
"defaultValue": 'UTC',
"editable": True,
"visible": True}),
("CERNGatekeeperPrefix", {"description": "CERN's gatekeeper prefix. Will be used for instructions on how to join the conference.",
"type": str,
"defaultValue": "98",
"editable": True,
"visible": True}),
("GDSPrefix", {"description": "GDS prefix. Will be used for instructions on how to join the conference.",
"type": str,
"defaultValue": "0041227670272",
"editable": True,
"visible": True}),
("MCU_IP", {"description": "MCU's IP. Will be used for instructions on how to join the conference.",
"type": str,
"defaultValue": "137.138.145.150",
"editable": True,
"visible": True}),
("Phone_number", {"description": "Phone number used to join by phone. Will be used for instructions on how to join the conference.",
"type": str,
"defaultValue": "0041227670270",
"editable": True,
"visible": True}),
("H323_IP_att_name", {"description": "Name of the custom attribute for the H.323 IP of a room in the Room Booking database.",
"type": str,
"defaultValue": "H323 IP",
"editable": True,
"visible": True}),
("extraMinutesBefore", {"description" : "Extra minutes allowed before Indico event start time",
"type": int,
"defaultValue": 60} ),
("extraMinutesAfter", {"description" : "Extra minutes allowed after Indico event start time",
"type": int,
"defaultValue": 120} ),
("defaultMinutesBefore", {"description" : "Default extra minutes before Indico event start time",
"type": int,
"defaultValue": 30} ),
("defaultMinutesAfter", {"description" : "Default extra minutes after Indico event start time",
"type": int,
"defaultValue": 60} ),
]
| Ictp/indico | indico/MaKaC/plugins/Collaboration/CERNMCU/options.py | Python | gpl-3.0 | 5,215 | 0.011697 |
# (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import time
import json
TIME_FORMAT="%b %d %Y %H:%M:%S"
MSG_FORMAT="%(now)s - %(category)s - %(data)s\n\n"
if not os.path.exists("/var/log/ansible/hosts"):
os.makedirs("/var/log/ansible/hosts")
def log(host, category, data):
if type(data) == dict:
if 'verbose_override' in data:
# avoid logging extraneous data from facts
data = 'omitted'
else:
invocation = data.pop('invocation', None)
data = json.dumps(data)
if invocation is not None:
data = json.dumps(invocation) + " => %s " % data
path = os.path.join("/var/log/ansible/hosts", host)
now = time.strftime(TIME_FORMAT, time.localtime())
fd = open(path, "a")
fd.write(MSG_FORMAT % dict(now=now, category=category, data=data))
fd.close()
class CallbackModule(object):
"""
logs playbook results, per host, in /var/log/ansible/hosts
"""
def on_any(self, *args, **kwargs):
pass
def runner_on_failed(self, host, res, ignore_errors=False):
log(host, 'FAILED', res)
def runner_on_ok(self, host, res):
log(host, 'OK', res)
def runner_on_error(self, host, msg):
log(host, 'ERROR', msg)
def runner_on_skipped(self, host, item=None):
log(host, 'SKIPPED', '...')
def runner_on_unreachable(self, host, res):
log(host, 'UNREACHABLE', res)
def runner_on_no_hosts(self):
pass
def runner_on_async_poll(self, host, res, jid, clock):
pass
def runner_on_async_ok(self, host, res, jid):
pass
def runner_on_async_failed(self, host, res, jid):
log(host, 'ASYNC_FAILED', res)
def playbook_on_start(self):
pass
def playbook_on_notify(self, host, handler):
pass
def playbook_on_task_start(self, name, is_conditional):
pass
def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
pass
def playbook_on_setup(self):
pass
def playbook_on_import_for_host(self, host, imported_file):
log(host, 'IMPORTED', imported_file)
def playbook_on_not_import_for_host(self, host, missing_file):
log(host, 'NOTIMPORTED', missing_file)
def playbook_on_play_start(self, pattern):
pass
def playbook_on_stats(self, stats):
pass
| shlomozippel/ansible | plugins/callbacks/log_plays.py | Python | gpl-3.0 | 3,121 | 0.001922 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
r"""
# .---. .-----------
# / \ __ / ------
# / / \( )/ ----- (`-') _ _(`-') <-. (`-')_
# ////// '\/ ` --- ( OO).-/( (OO ).-> .-> \( OO) ) .->
# //// / // : : --- (,------. \ .'_ (`-')----. ,--./ ,--/ ,--.' ,-.
# // / / / `\/ '-- | .---' '`'-..__)( OO).-. ' | \ | | (`-')'.' /
# // //..\\ (| '--. | | ' |( _) | | | | . '| |)(OO \ /
# ============UU====UU==== | .--' | | / : \| |)| | | |\ | | / /)
# '//||\\` | `---. | '-' / ' '-' ' | | \ | `-/ /`
# ''`` `------' `------' `-----' `--' `--' `--'
# ######################################################################################
#
# Author: edony - edonyzpc@gmail.com
#
# twitter : @edonyzpc
#
# Last modified: 2015-12-08 23:28
#
# Filename: num2iter.py
#
# Description: All Rights Are Reserved
#
"""
#import scipy as sp
#import math as m
#import matplotlib as mpl
import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D as Ax3
#from scipy import stats as st
#from matplotlib import cm
import numpy as np
class PyColor(object):
""" This class is for colored print in the python interpreter!
"F3" call Addpy() function to add this class which is defined
in the .vimrc for vim Editor."""
def __init__(self):
self.self_doc = r"""
STYLE: \033['display model';'foreground';'background'm
DETAILS:
FOREGROUND BACKGOUND COLOR
---------------------------------------
30 40 black
31 41 red
32 42 green
33 43 yellow
34 44 blue
35 45 purple
36 46 cyan
37 47 white
DISPLAY MODEL DETAILS
-------------------------
0 default
1 highlight
4 underline
5 flicker
7 reverse
8 non-visiable
e.g:
\033[1;31;40m <!--1-highlight;31-foreground red;40-background black-->
\033[0m <!--set all into default-->
"""
self.warningcolor = '\033[0;31m'
self.tipcolor = '\033[0;32m'
self.endcolor = '\033[0m'
self._newcolor = ''
@property
def new(self):
"""
Customized Python Print Color.
"""
return self._newcolor
@new.setter
def new(self, color_str):
"""
New Color.
"""
self._newcolor = color_str
def disable(self):
"""
Disable Color Print.
"""
self.warningcolor = ''
self.endcolor = ''
fig = plt.figure('Point Cloud Number VS Iteration')
ax1 = fig.add_subplot(111)
ax2 = fig.add_subplot(111)
ax3 = fig.add_subplot(111)
x = np.linspace(3000, 10000, 100)
y1 = np.linspace(38092, 40318, 100)
y2 = np.linspace(507330, 800274, 100)
y3_1 = np.linspace(2500737,3082897, 50) + 1000000
y3 = np.linspace(3082897, 2300181, 50) + 5000
y3_1 = list(y3_1)
y3_1.extend(list(y3))
y3 = np.array(y3_1)
y1 = [i+np.random.randint(50000, 100000) for i in y1]
y2 = [i+np.random.randint(50000, 100000) for i in y2]
y3 = [i+np.random.randint(50000, 100000) for i in y3]
ax1.plot(x, y1, label='Plane')
ax2.plot(x, y2, label='Normal Tenon')
ax3.plot(x, y3, label='Large-scale Parts')
ax1.set_xlabel('Iterations')
ax1.set_ylabel('Number of Consesus Cloud Points Set')
plt.legend()
plt.show()
| edonyM/emthesis | code/num2iter.py | Python | mit | 3,819 | 0.005237 |
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import nose.tools as nt
from hyperspy.signals import EDSTEMSpectrum
from hyperspy.defaults_parser import preferences
from hyperspy.components1d import Gaussian
from hyperspy.misc.eds import utils as utils_eds
from hyperspy.misc.test_utils import ignore_warning
class Test_metadata:
def setUp(self):
# Create an empty spectrum
s = EDSTEMSpectrum(np.ones((4, 2, 1024)))
s.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time = 3.1
s.metadata.Acquisition_instrument.TEM.beam_energy = 15.0
self.signal = s
def test_sum_live_time1(self):
s = self.signal
old_metadata = s.metadata.deepcopy()
sSum = s.sum(0)
nt.assert_equal(
sSum.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time,
3.1 * 2)
# Check that metadata is unchanged
print(old_metadata, s.metadata) # Capture for comparison on error
nt.assert_dict_equal(old_metadata.as_dictionary(),
s.metadata.as_dictionary(),
"Source metadata changed")
def test_sum_live_time2(self):
s = self.signal
old_metadata = s.metadata.deepcopy()
sSum = s.sum((0, 1))
nt.assert_equal(
sSum.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time,
3.1 * 2 * 4)
# Check that metadata is unchanged
print(old_metadata, s.metadata) # Capture for comparison on error
nt.assert_dict_equal(old_metadata.as_dictionary(),
s.metadata.as_dictionary(),
"Source metadata changed")
def test_sum_live_time_out_arg(self):
s = self.signal
sSum = s.sum(0)
s.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time = 4.2
s_resum = s.sum(0)
r = s.sum(0, out=sSum)
nt.assert_is_none(r)
nt.assert_equal(
s_resum.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time,
sSum.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time)
np.testing.assert_allclose(s_resum.data, sSum.data)
def test_rebin_live_time(self):
s = self.signal
old_metadata = s.metadata.deepcopy()
dim = s.axes_manager.shape
s = s.rebin([dim[0] / 2, dim[1] / 2, dim[2]])
nt.assert_equal(
s.metadata.Acquisition_instrument.TEM.Detector.EDS.live_time,
3.1 * 2 * 2)
# Check that metadata is unchanged
print(old_metadata, self.signal.metadata) # Captured on error
nt.assert_dict_equal(old_metadata.as_dictionary(),
self.signal.metadata.as_dictionary(),
"Source metadata changed")
def test_add_elements(self):
s = self.signal
s.add_elements(['Al', 'Ni'])
nt.assert_equal(s.metadata.Sample.elements, ['Al', 'Ni'])
s.add_elements(['Al', 'Ni'])
nt.assert_equal(s.metadata.Sample.elements, ['Al', 'Ni'])
s.add_elements(["Fe", ])
nt.assert_equal(s.metadata.Sample.elements, ['Al', "Fe", 'Ni'])
s.set_elements(['Al', 'Ni'])
nt.assert_equal(s.metadata.Sample.elements, ['Al', 'Ni'])
def test_default_param(self):
s = self.signal
mp = s.metadata
nt.assert_equal(
mp.Acquisition_instrument.TEM.Detector.EDS.energy_resolution_MnKa,
preferences.EDS.eds_mn_ka)
def test_TEM_to_SEM(self):
s = self.signal.inav[0, 0]
signal_type = 'EDS_SEM'
mp = s.metadata.Acquisition_instrument.TEM.Detector.EDS
mp.energy_resolution_MnKa = 125.3
sSEM = s.deepcopy()
sSEM.set_signal_type(signal_type)
mpSEM = sSEM.metadata.Acquisition_instrument.SEM.Detector.EDS
results = [
mp.energy_resolution_MnKa,
signal_type]
resultsSEM = [
mpSEM.energy_resolution_MnKa,
sSEM.metadata.Signal.signal_type]
nt.assert_equal(results, resultsSEM)
def test_get_calibration_from(self):
s = self.signal
scalib = EDSTEMSpectrum(np.ones(1024))
energy_axis = scalib.axes_manager.signal_axes[0]
energy_axis.scale = 0.01
energy_axis.offset = -0.10
s.get_calibration_from(scalib)
nt.assert_equal(s.axes_manager.signal_axes[0].scale,
energy_axis.scale)
class Test_quantification:
def setUp(self):
s = EDSTEMSpectrum(np.ones([2, 2, 1024]))
energy_axis = s.axes_manager.signal_axes[0]
energy_axis.scale = 1e-2
energy_axis.units = 'keV'
energy_axis.name = "Energy"
s.set_microscope_parameters(beam_energy=200,
live_time=3.1, tilt_stage=0.0,
azimuth_angle=None, elevation_angle=35,
energy_resolution_MnKa=130)
s.metadata.Acquisition_instrument.TEM.Detector.EDS.real_time = 2.5
s.metadata.Acquisition_instrument.TEM.beam_current = 0.05
elements = ['Al', 'Zn']
xray_lines = ['Al_Ka', 'Zn_Ka']
intensities = [300, 500]
for i, xray_line in enumerate(xray_lines):
gauss = Gaussian()
line_energy, FWHM = s._get_line_energy(xray_line, FWHM_MnKa='auto')
gauss.centre.value = line_energy
gauss.A.value = intensities[i]
gauss.sigma.value = FWHM
s.data[:] += gauss.function(energy_axis.axis)
s.set_elements(elements)
s.add_lines(xray_lines)
s.axes_manager[0].scale = 0.5
s.axes_manager[1].scale = 0.5
self.signal = s
def test_quant_lorimer(self):
s = self.signal
method = 'CL'
kfactors = [1, 2.0009344042484134]
composition_units = 'weight'
intensities = s.get_lines_intensity()
res = s.quantification(intensities, method, kfactors,
composition_units)
np.testing.assert_allclose(res[0].data, np.array([
[22.70779, 22.70779],
[22.70779, 22.70779]]), atol=1e-3)
def test_quant_zeta(self):
s = self.signal
method = 'zeta'
compositions_units = 'weight'
factors = [20, 50]
intensities = s.get_lines_intensity()
res = s.quantification(intensities, method, factors,
compositions_units)
np.testing.assert_allclose(res[1].data, np.array(
[[2.7125736e-03, 2.7125736e-03],
[2.7125736e-03, 2.7125736e-03]]), atol=1e-3)
np.testing.assert_allclose(res[0][1].data, np.array(
[[80.962287987, 80.962287987],
[80.962287987, 80.962287987]]), atol=1e-3)
def test_quant_cross_section(self):
s = self.signal
method = 'cross_section'
factors = [3, 5]
intensities = s.get_lines_intensity()
res = s.quantification(intensities, method, factors)
np.testing.assert_allclose(res[1][0].data, np.array(
[[21517.1647074, 21517.1647074],
[21517.1647074, 21517.1647074]]), atol=1e-3)
np.testing.assert_allclose(res[1][1].data, np.array(
[[21961.616621, 21961.616621],
[21961.616621, 21961.616621]]), atol=1e-3)
np.testing.assert_allclose(res[0][0].data, np.array(
[[49.4888856823, 49.4888856823],
[49.4888856823, 49.4888856823]]), atol=1e-3)
def test_quant_zeros(self):
intens = np.array([[0.5, 0.5, 0.5],
[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.0]]).T
with ignore_warning(message="divide by zero encountered",
category=RuntimeWarning):
quant = utils_eds.quantification_cliff_lorimer(
intens, [1, 1, 3]).T
np.testing.assert_allclose(
quant,
np.array([[0.2, 0.2, 0.6],
[0.0, 0.25, 0.75],
[0.25, 0.0, 0.75],
[0.5, 0.5, 0.0],
[1.0, 0.0, 0.0]]))
def test_edx_cross_section_to_zeta(self):
cs = [3, 6]
elements = ['Pt', 'Ni']
res = utils_eds.edx_cross_section_to_zeta(cs, elements)
np.testing.assert_allclose(res, [1079.815272, 162.4378035], atol=1e-3)
def test_zeta_to_edx_cross_section(self):
factors = [1079.815272, 162.4378035]
elements = ['Pt', 'Ni']
res = utils_eds.zeta_to_edx_cross_section(factors, elements)
np.testing.assert_allclose(res, [3, 6], atol=1e-3)
class Test_vacum_mask:
def setUp(self):
s = EDSTEMSpectrum(np.array([np.linspace(0.001, 0.5, 20)] * 100).T)
s.add_poissonian_noise()
self.signal = s
def test_vacuum_mask(self):
s = self.signal
nt.assert_true(s.vacuum_mask().data[0])
nt.assert_false(s.vacuum_mask().data[-1])
class Test_simple_model:
def setUp(self):
s = utils_eds.xray_lines_model(elements=['Al', 'Zn'],
weight_percents=[50, 50])
self.signal = s
def test_intensity(self):
s = self.signal
np.testing.assert_allclose(
[i.data[0] for i in s.get_lines_intensity(
integration_window_factor=5.0)],
[0.5, 0.5],
atol=1e-1)
def test_intensity_dtype_uint(self):
s = self.signal
s.data *= 1E5
s.change_dtype("uint")
bw = s.estimate_background_windows()
np.testing.assert_allclose(
[i.data[0] for i in s.get_lines_intensity(background_windows=bw)],
[5E4, 5E4],
rtol=0.03)
class Test_get_lines_intentisity:
def test_with_signals_examples(self):
from hyperspy.misc.example_signals_loading import \
load_1D_EDS_TEM_spectrum as EDS_TEM_Spectrum
s = EDS_TEM_Spectrum()
np.testing.assert_allclose(
np.array([res.data[0] for res in s.get_lines_intensity()]),
np.array([3710, 15872]))
class Test_eds_markers:
def setUp(self):
s = utils_eds.xray_lines_model(elements=['Al', 'Zn'],
weight_percents=[50, 50])
self.signal = s
def test_plot_auto_add(self):
s = self.signal
s.plot(xray_lines=True)
# Should contain 6 lines
nt.assert_sequence_equal(
sorted(s._xray_markers.keys()),
['Al_Ka', 'Al_Kb', 'Zn_Ka', 'Zn_Kb', 'Zn_La', 'Zn_Lb1'])
def test_manual_add_line(self):
s = self.signal
s.add_xray_lines_markers(['Zn_La'])
nt.assert_sequence_equal(
list(s._xray_markers.keys()),
['Zn_La'])
nt.assert_equal(len(s._xray_markers), 1)
# Check that the line has both a vertical line marker and text marker:
nt.assert_equal(len(s._xray_markers['Zn_La']), 2)
def test_manual_remove_element(self):
s = self.signal
s.add_xray_lines_markers(['Zn_Ka', 'Zn_Kb', 'Zn_La'])
s.remove_xray_lines_markers(['Zn_Kb'])
nt.assert_sequence_equal(
sorted(s._xray_markers.keys()),
['Zn_Ka', 'Zn_La'])
| vidartf/hyperspy | hyperspy/tests/signal/test_eds_tem.py | Python | gpl-3.0 | 12,087 | 0 |
# import system modules
import arcpy
from arcpy import env
# Set environment settings
env.workspace = "C:\Users\Ewan\Desktop\SFTPDST5\MapFiles"
try:
# Set the local variable
in_Table = "Trees.csv"
x_coords = "X"
y_coords = "Y"
z_coords = "Z"
out_Layer = "Trees_Layer"
saved_Layer = "C:\Users\Ewan\Desktop\SFTPDST5\Mapfiles\Trees.lyr"
# Set the spatial reference
spRef = r"Coordinate Systems\Geographic Coordinate Systens\World\WGS 1984.prj"
# Make the XY Event Layer
arcpy.MakeXYEventLayer_management(in_Table, x_coords, y_coords, out_Layer, spRef)
# Save to a layer file
arcpy.SaveToLayerFile_management(out_Layer, saved_Layer)
except Exception as err:
print(err.args[0]) | harryfb/DST5 | ArcPy Code/Trees.py | Python | apache-2.0 | 760 | 0.015789 |
import abc
import numpy as np
import tensorflow as tf
from swl.machine_learning.tensorflow_model import SimpleSequentialTensorFlowModel
import swl.machine_learning.util as swl_ml_util
#--------------------------------------------------------------------
class Synth90kCrnn(SimpleSequentialTensorFlowModel):
def __init__(self, input_shape, output_shape, num_classes, is_sparse_output):
super().__init__(input_shape, output_shape, num_classes, is_sparse_output, is_time_major=False)
self._model_output_len = 0
def get_feed_dict(self, data, num_data, *args, **kwargs):
len_data = len(data)
model_output_len = [self._model_output_len] * num_data
if 1 == len_data:
feed_dict = {self._input_ph: data[0], self._model_output_len_ph: model_output_len}
elif 2 == len_data:
"""
feed_dict = {self._input_ph: data[0], self._output_ph: data[1], self._model_output_len_ph: model_output_len}
"""
# Use output lengths.
output_len = list(map(lambda lbl: len(lbl), data[1]))
feed_dict = {self._input_ph: data[0], self._output_ph: data[1], self._output_len_ph: output_len, self._model_output_len_ph: model_output_len}
else:
raise ValueError('Invalid number of feed data: {}'.format(len_data))
return feed_dict
def _create_single_model(self, inputs, input_shape, num_classes, is_training):
with tf.variable_scope('synth90k_crnn', reuse=tf.AUTO_REUSE):
return self._create_crnn(inputs, num_classes, is_training)
def _create_crnn(self, inputs, num_classes, is_training):
#kernel_initializer = None
#kernel_initializer = tf.initializers.he_normal()
#kernel_initializer = tf.initializers.glorot_normal() # Xavier normal initialization.
kernel_initializer = tf.variance_scaling_initializer(scale=2.0, mode='fan_in', distribution='truncated_normal')
# Preprocessing.
with tf.variable_scope('preprocessing', reuse=tf.AUTO_REUSE):
inputs = tf.nn.local_response_normalization(inputs, depth_radius=5, bias=1, alpha=1, beta=0.5, name='lrn')
# (samples, height, width, channels) -> (samples, width, height, channels).
inputs = tf.transpose(inputs, perm=(0, 2, 1, 3), name='transpose')
#--------------------
# Convolutional layer.
# TODO [check] >> The magic number (64).
num_cnn_features = 64
with tf.variable_scope('convolutional_layer', reuse=tf.AUTO_REUSE):
cnn_outputs = self._create_convolutional_layer(inputs, num_cnn_features, kernel_initializer, is_training)
#--------------------
# Recurrent layer.
with tf.variable_scope('recurrent_layer', reuse=tf.AUTO_REUSE):
rnn_outputs = self._create_recurrent_layer(cnn_outputs, self._model_output_len_ph, kernel_initializer, is_training)
#--------------------
# Transcription layer.
with tf.variable_scope('transcription_layer', reuse=tf.AUTO_REUSE):
logits = self._create_transcription_layer(rnn_outputs, num_classes, kernel_initializer, is_training)
#--------------------
# Decoding layer.
with tf.variable_scope('decoding_layer', reuse=tf.AUTO_REUSE):
decoded = self._create_decoding_layer(logits)
return {'logit': logits, 'decoded_label': decoded}
def _create_convolutional_layer(self, inputs, num_features, kernel_initializer, is_training):
with tf.variable_scope('conv1', reuse=tf.AUTO_REUSE):
conv1 = tf.layers.conv2d(inputs, filters=64, kernel_size=(3, 3), strides=(1, 1), padding='same', kernel_initializer=kernel_initializer, name='conv')
conv1 = tf.layers.batch_normalization(conv1, axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, training=is_training, name='batchnorm')
conv1 = tf.nn.relu(conv1, name='relu')
conv1 = tf.layers.max_pooling2d(conv1, pool_size=(2, 2), strides=(2, 2), padding='same', name='maxpool')
with tf.variable_scope('conv2', reuse=tf.AUTO_REUSE):
conv2 = tf.layers.conv2d(conv1, filters=128, kernel_size=(3, 3), strides=(1, 1), padding='same', kernel_initializer=kernel_initializer, name='conv')
conv2 = tf.layers.batch_normalization(conv2, axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, training=is_training, name='batchnorm')
conv2 = tf.nn.relu(conv2, name='relu')
conv2 = tf.layers.max_pooling2d(conv2, pool_size=(2, 2), strides=(2, 2), padding='same', name='maxpool')
with tf.variable_scope('conv3', reuse=tf.AUTO_REUSE):
conv3 = tf.layers.conv2d(conv2, filters=256, kernel_size=(3, 3), strides=(1, 1), padding='same', kernel_initializer=kernel_initializer, name='conv1')
conv3 = tf.layers.batch_normalization(conv3, axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, training=is_training, name='batchnorm1')
conv3 = tf.nn.relu(conv3, name='relu1')
conv3 = tf.layers.conv2d(conv3, filters=256, kernel_size=(3, 3), strides=(1, 1), padding='same', kernel_initializer=kernel_initializer, name='conv2')
conv3 = tf.layers.batch_normalization(conv3, axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, training=is_training, name='batchnorm2')
conv3 = tf.nn.relu(conv3, name='relu2')
conv3 = tf.layers.max_pooling2d(conv3, pool_size=(1, 2), strides=(1, 2), padding='same', name='maxpool')
with tf.variable_scope('conv4', reuse=tf.AUTO_REUSE):
conv4 = tf.layers.conv2d(conv3, filters=512, kernel_size=(3, 3), strides=(1, 1), padding='same', kernel_initializer=kernel_initializer, name='conv1')
conv4 = tf.layers.batch_normalization(conv4, axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, training=is_training, name='batchnorm1')
conv4 = tf.nn.relu(conv4, name='relu1')
conv4 = tf.layers.conv2d(conv4, filters=512, kernel_size=(3, 3), strides=(1, 1), padding='same', kernel_initializer=kernel_initializer, name='conv2')
conv4 = tf.layers.batch_normalization(conv4, axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, training=is_training, name='batchnorm2')
conv4 = tf.nn.relu(conv4, name='relu2')
conv4 = tf.layers.max_pooling2d(conv4, pool_size=(1, 2), strides=(1, 2), padding='same', name='maxpool')
with tf.variable_scope('conv5', reuse=tf.AUTO_REUSE):
conv5 = tf.layers.conv2d(conv4, filters=512, kernel_size=(2, 2), strides=(1, 1), padding='same', kernel_initializer=kernel_initializer, name='conv')
#conv5 = tf.layers.conv2d(conv4, filters=512, kernel_size=(1, 1), strides=(1, 1), padding='same', kernel_initializer=kernel_initializer, name='conv')
conv5 = tf.layers.batch_normalization(conv5, axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, training=is_training, name='batchnorm')
conv5 = tf.nn.relu(conv5, name='relu')
with tf.variable_scope('dense', reuse=tf.AUTO_REUSE):
conv5_shape = conv5.shape #conv5.shape.as_list()
self._model_output_len = conv5_shape[1]
#dense = tf.reshape(conv5, shape=conv5_shape[:2] + (-1,), name='reshape')
#dense = tf.reshape(conv5, shape=conv5_shape[:2] + (conv5_shape[2] * conv5_shape[3]), name='reshape')
outputs = tf.reshape(conv5, shape=(-1, conv5_shape[1], conv5_shape[2] * conv5_shape[3]), name='reshape')
return tf.layers.dense(outputs, num_features, activation=tf.nn.relu, kernel_initializer=kernel_initializer, name='dense')
def _create_recurrent_layer(self, inputs, input_len, kernel_initializer, is_training):
num_hidden_units = 256
keep_prob = 1.0
#keep_prob = 0.5
with tf.variable_scope('rnn1', reuse=tf.AUTO_REUSE):
cell_fw1 = self._create_unit_cell(num_hidden_units, kernel_initializer, 'fw_unit_cell') # Forward cell.
#cell_fw1 = tf.contrib.rnn.DropoutWrapper(cell_fw1, input_keep_prob=keep_prob, output_keep_prob=1.0, state_keep_prob=keep_prob)
cell_bw1 = self._create_unit_cell(num_hidden_units, kernel_initializer, 'bw_unit_cell') # Backward cell.
#cell_bw1 = tf.contrib.rnn.DropoutWrapper(cell_bw1, input_keep_prob=keep_prob, output_keep_prob=1.0, state_keep_prob=keep_prob)
#rnn_outputs1, rnn_states1 = tf.nn.bidirectional_dynamic_rnn(cell_fw1, cell_bw1, inputs, sequence_length=None, time_major=False, dtype=tf.float32, scope='rnn')
rnn_outputs1, rnn_states1 = tf.nn.bidirectional_dynamic_rnn(cell_fw1, cell_bw1, inputs, sequence_length=input_len, time_major=False, dtype=tf.float32, scope='rnn')
rnn_outputs1 = tf.concat(rnn_outputs1, axis=-1)
rnn_outputs1 = tf.layers.batch_normalization(rnn_outputs1, axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, training=is_training, name='batchnorm')
#rnn_states1 = tf.contrib.rnn.LSTMStateTuple(tf.concat((rnn_states1[0].c, rnn_states1[1].c), axis=-1), tf.concat((rnn_states1[0].h, rnn_states1[1].h), axis=-1))
#rnn_states1 = tf.layers.batch_normalization(rnn_states1, axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, training=is_training, name='batchnorm')
with tf.variable_scope('rnn2', reuse=tf.AUTO_REUSE):
cell_fw2 = self._create_unit_cell(num_hidden_units, kernel_initializer, 'fw_unit_cell') # Forward cell.
#cell_fw2 = tf.contrib.rnn.DropoutWrapper(cell_fw2, input_keep_prob=keep_prob, output_keep_prob=1.0, state_keep_prob=keep_prob)
cell_bw2 = self._create_unit_cell(num_hidden_units, kernel_initializer, 'bw_unit_cell') # Backward cell.
#cell_bw2 = tf.contrib.rnn.DropoutWrapper(cell_bw2, input_keep_prob=keep_prob, output_keep_prob=1.0, state_keep_prob=keep_prob)
#rnn_outputs2, rnn_states2 = tf.nn.bidirectional_dynamic_rnn(cell_fw2, cell_bw2, rnn_outputs1, sequence_length=None, time_major=False, dtype=tf.float32, scope='rnn')
rnn_outputs2, rnn_states2 = tf.nn.bidirectional_dynamic_rnn(cell_fw2, cell_bw2, rnn_outputs1, sequence_length=input_len, time_major=False, dtype=tf.float32, scope='rnn')
rnn_outputs2 = tf.concat(rnn_outputs2, axis=-1)
rnn_outputs2 = tf.layers.batch_normalization(rnn_outputs2, axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, training=is_training, name='batchnorm')
#rnn_states2 = tf.contrib.rnn.LSTMStateTuple(tf.concat((rnn_states2[0].c, rnn_states2[1].c), axis=-1), tf.concat((rnn_states2[0].h, rnn_states2[1].h), axis=-1))
#rnn_states2 = tf.layers.batch_normalization(rnn_states2, axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, training=is_training, name='batchnorm')
return rnn_outputs2
@abc.abstractmethod
def _create_transcription_layer(self, inputs, num_classes, kernel_initializer, is_training):
raise NotImplementedError
@abc.abstractmethod
def _create_decoding_layer(self, logits):
raise NotImplementedError
def _create_unit_cell(self, num_units, kernel_initializer, name):
#return tf.nn.rnn_cell.RNNCell(num_units, name=name)
return tf.nn.rnn_cell.LSTMCell(num_units, initializer=kernel_initializer, forget_bias=1.0, name=name)
#return tf.nn.rnn_cell.GRUCell(num_units, kernel_initializer=kernel_initializer, name=name)
#--------------------------------------------------------------------
class Synth90kCrnnWithCrossEntropyLoss(Synth90kCrnn):
def __init__(self, image_height, image_width, image_channel, num_classes):
super().__init__((None, image_height, image_width, image_channel), (None, None, num_classes), num_classes, is_sparse_output=False)
def _create_transcription_layer(self, inputs, num_classes, kernel_initializer, is_training):
outputs = tf.layers.dense(inputs, num_classes, activation=tf.nn.softmax, kernel_initializer=kernel_initializer, name='dense')
#outputs = tf.layers.dense(inputs, num_classes, activation=tf.nn.softmax, kernel_initializer=kernel_initializer, activity_regularizer=tf.contrib.layers.l2_regularizer(0.0001), name='dense')
return outputs
def _create_decoding_layer(self, logits):
return None
def _get_loss(self, y, t, y_len, t_len):
with tf.variable_scope('loss', reuse=tf.AUTO_REUSE):
masks = tf.sequence_mask(lengths=y_len, maxlen=tf.reduce_max(y_len), dtype=tf.float32)
# Weighted cross-entropy loss for a sequence of logits.
#loss = tf.contrib.seq2seq.sequence_loss(logits=y['logit'], targets=t, weights=masks)
loss = tf.contrib.seq2seq.sequence_loss(logits=y['logit'], targets=tf.argmax(t, axis=-1), weights=masks)
tf.summary.scalar('loss', loss)
return loss
def _get_accuracy(self, y, t, y_len):
with tf.variable_scope('accuracy', reuse=tf.AUTO_REUSE):
correct_prediction = tf.equal(tf.argmax(y['logit'], axis=-1), tf.argmax(t, axis=-1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)
return accuracy
#--------------------------------------------------------------------
class Synth90kCrnnWithCtcLoss(Synth90kCrnn):
def __init__(self, image_height, image_width, image_channel, num_classes):
super().__init__((None, image_height, image_width, image_channel), (None, None), num_classes, is_sparse_output=True)
def _create_transcription_layer(self, inputs, num_classes, kernel_initializer, is_training):
outputs = tf.layers.dense(inputs, num_classes, activation=tf.nn.relu, kernel_initializer=kernel_initializer, name='dense')
#outputs = tf.layers.dense(inputs, num_classes, activation=tf.nn.relu, kernel_initializer=kernel_initializer, activity_regularizer=tf.contrib.layers.l2_regularizer(0.0001), name='dense')
return outputs # (None, ???, num_classes).
def _create_decoding_layer(self, logits):
# CTC beam search decoding.
logits = tf.transpose(logits, (1, 0, 2)) # Time-major.
# NOTE [info] >> CTC beam search decoding is too slow. It seems to run on CPU, not GPU.
# If the number of classes increases, its computation time becomes much slower.
beam_width = 10 #100
#decoded, log_prob = tf.nn.ctc_beam_search_decoder(inputs=logits, sequence_length=self._model_output_len_ph, beam_width=beam_width, top_paths=1, merge_repeated=True)
decoded, log_prob = tf.nn.ctc_beam_search_decoder_v2(inputs=logits, sequence_length=self._model_output_len_ph, beam_width=beam_width, top_paths=1)
#decoded, log_prob = tf.nn.ctc_greedy_decoder(inputs=logits, sequence_length=self._model_output_len_ph, merge_repeated=True)
return decoded[0] # Sparse tensor.
#return tf.sparse.to_dense(decoded[0], default_value=self._default_value) # Dense tensor.
def _get_loss(self, y, t_sparse, y_len, t_len):
with tf.variable_scope('loss', reuse=tf.AUTO_REUSE):
# Connectionist temporal classification (CTC) loss.
# TODO [check] >> The case of preprocess_collapse_repeated=True & ctc_merge_repeated=True is untested.
loss = tf.reduce_mean(tf.nn.ctc_loss(labels=t_sparse, inputs=y['logit'], sequence_length=y_len, preprocess_collapse_repeated=False, ctc_merge_repeated=True, ignore_longer_outputs_than_inputs=False, time_major=False))
tf.summary.scalar('loss', loss)
return loss
def _get_accuracy(self, y, t_sparse, y_len):
with tf.variable_scope('accuracy', reuse=tf.AUTO_REUSE):
# Inaccuracy: label error rate.
# NOTE [info] >> tf.edit_distance() is too slow. It seems to run on CPU, not GPU.
# Accuracy may not be calculated to speed up the training.
ler = tf.reduce_mean(tf.edit_distance(hypothesis=tf.cast(y['decoded_label'], tf.int32), truth=t_sparse, normalize=True)) # int64 -> int32.
accuracy = 1.0 - ler
#accuracy = tf.constant(-1, tf.float32)
tf.summary.scalar('accuracy', accuracy)
return accuracy
#--------------------------------------------------------------------
class Synth90kCrnnWithKerasCtcLoss(Synth90kCrnn):
def __init__(self, image_height, image_width, image_channel, num_classes):
super().__init__((None, image_height, image_width, image_channel), (None, None), num_classes, is_sparse_output=True)
# FIXME [fix] >>
self._eos_token = 36
def _create_transcription_layer(self, inputs, num_classes, kernel_initializer, is_training):
outputs = tf.layers.dense(inputs, num_classes, activation=tf.nn.softmax, kernel_initializer=kernel_initializer, name='dense')
#outputs = tf.layers.dense(inputs, num_classes, activation=tf.nn.softmax, kernel_initializer=kernel_initializer, activity_regularizer=tf.contrib.layers.l2_regularizer(0.0001), name='dense')
return outputs # (None, ???, num_classes).
def _create_decoding_layer(self, logits):
"""
# Decoding.
decoded = tf.argmax(logits, axis=-1)
# FIXME [fix] >> This does not work correctly.
# Refer to MyModel._decode_label() in ${SWL_PYTHON_HOME}/test/language_processing/run_simple_hangeul_crrn.py.
decoded = tf.numpy_function(lambda x: list(map(lambda lbl: list(k for k, g in itertools.groupby(lbl) if k < self._blank_label), x)), [decoded], [tf.int32]) # Removes repetitive labels.
return decoded
"""
return None
def _get_loss(self, y, t, y_len, t_len):
with tf.variable_scope('loss', reuse=tf.AUTO_REUSE):
# Connectionist temporal classification (CTC) loss.
y_len, t_len = tf.reshape(y_len, (-1, 1)), tf.reshape(t_len, (-1, 1))
loss = tf.reduce_mean(tf.keras.backend.ctc_batch_cost(y_true=t, y_pred=y['logit'], input_length=y_len, label_length=t_len))
tf.summary.scalar('loss', loss)
return loss
def _get_accuracy(self, y, t, y_len):
with tf.variable_scope('accuracy', reuse=tf.AUTO_REUSE):
"""
correct_prediction = tf.equal(tf.argmax(y, axis=-1), tf.cast(t, tf.int64)) # Error: The time-steps of y and t are different.
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return accuracy
"""
"""
# FIXME [implement] >> The below logic has to be implemented in TensorFlow.
correct_prediction = len(list(filter(lambda xx: len(list(filter(lambda x: x[0] == x[1], zip(xx[0], xx[1])))) == max(len(xx[0]), len(xx[1])), zip(tf.argmax(y, axis=-1), t))))
accuracy = correct_prediction / max(len(y), len(t))
return accuracy
"""
accuracy = tf.constant(-1.0, tf.float32)
tf.summary.scalar('accuracy', accuracy)
return accuracy
#--------------------------------------------------------------------
class Synth90kDilatedCrnnWithCtcLoss(Synth90kCrnn):
def __init__(self, image_height, image_width, image_channel, num_classes):
super().__init__((None, image_height, image_width, image_channel), (None, None), num_classes, is_sparse_output=True)
def _create_transcription_layer(self, inputs, num_classes, kernel_initializer, is_training):
outputs = tf.layers.dense(inputs, num_classes, activation=tf.nn.relu, kernel_initializer=kernel_initializer, name='dense')
#outputs = tf.layers.dense(inputs, num_classes, activation=tf.nn.relu, kernel_initializer=kernel_initializer, activity_regularizer=tf.contrib.layers.l2_regularizer(0.0001), name='dense')
return outputs # (None, ???, num_classes).
def _create_decoding_layer(self, logits):
# CTC beam search decoding.
logits = tf.transpose(logits, (1, 0, 2)) # Time-major.
# NOTE [info] >> CTC beam search decoding is too slow. It seems to run on CPU, not GPU.
# If the number of classes increases, its computation time becomes much slower.
beam_width = 10 #100
#decoded, log_prob = tf.nn.ctc_beam_search_decoder(inputs=logits, sequence_length=self._model_output_len_ph, beam_width=beam_width, top_paths=1, merge_repeated=True)
decoded, log_prob = tf.nn.ctc_beam_search_decoder_v2(inputs=logits, sequence_length=self._model_output_len_ph, beam_width=beam_width, top_paths=1)
#decoded, log_prob = tf.nn.ctc_greedy_decoder(inputs=logits, sequence_length=self._model_output_len_ph, merge_repeated=True)
return decoded[0] # Sparse tensor.
#return tf.sparse.to_dense(decoded[0], default_value=self._default_value) # Dense tensor.
def _get_loss(self, y, t_sparse, y_len, t_len):
with tf.variable_scope('loss', reuse=tf.AUTO_REUSE):
# Connectionist temporal classification (CTC) loss.
# TODO [check] >> The case of preprocess_collapse_repeated=True & ctc_merge_repeated=True is untested.
loss = tf.reduce_mean(tf.nn.ctc_loss(labels=t_sparse, inputs=y, sequence_length=y_len, preprocess_collapse_repeated=False, ctc_merge_repeated=True, ignore_longer_outputs_than_inputs=False, time_major=False))
tf.summary.scalar('loss', loss)
return loss
def _get_accuracy(self, y, t_sparse, y_len):
with tf.variable_scope('accuracy', reuse=tf.AUTO_REUSE):
# Inaccuracy: label error rate.
# NOTE [info] >> tf.edit_distance() is too slow. It seems to run on CPU, not GPU.
# Accuracy may not be calculated to speed up the training.
ler = tf.reduce_mean(tf.edit_distance(hypothesis=tf.cast(y['decoded_label'], tf.int32), truth=t_sparse, normalize=True)) # int64 -> int32.
accuracy = 1.0 - ler
#accuracy = tf.constant(-1, tf.float32)
tf.summary.scalar('accuracy', accuracy)
return accuracy
def _create_dilation_layer(self, inputs, num_features, is_training):
with tf.variable_scope('ctx_conv', reuse=tf.AUTO_REUSE):
conv1 = tf.pad(inputs, ((0, 0), (1, 1), (1, 1), (0, 0)), mode='CONSTANT', constant_values=0, name='pad1')
# Layer 1.
conv1 = tf.layers.conv2d(conv1, filters=num_features, kernel_size=(3, 3), strides=(1, 1), dilation_rate=(1, 1), padding='valid', name='conv1')
conv1 = tf.nn.relu(conv1, name='relu1')
conv1 = tf.pad(conv1, ((0, 0), (1, 1), (1, 1), (0, 0)), mode='CONSTANT', constant_values=0, name='pad2')
# Layer 2.
conv1 = tf.layers.conv2d(conv1, filters=num_features, kernel_size=(3, 3), strides=(1, 1), dilation_rate=(1, 1), padding='valid', name='conv2')
conv1 = tf.nn.relu(conv1, name='relu2')
with tf.variable_scope('ctx_atrous_conv', reuse=tf.AUTO_REUSE):
conv2 = tf.pad(conv1, ((0, 0), (2, 2), (2, 2), (0, 0)), mode='CONSTANT', constant_values=0, name='pad1')
# Layer 3.
#conv2 = tf.nn.atrous_conv2d(conv2, filters=(3, 3, num_features, num_features), rate=2, padding='valid', name='atrous_conv1')
conv2 = tf.layers.conv2d(conv2, filters=num_features, kernel_size=(3, 3), strides=(1, 1), dilation_rate=(2, 2), padding='valid', name='conv1')
conv2 = tf.nn.relu(conv2, name='relu1')
conv2 = tf.pad(conv2, ((0, 0), (4, 4), (4, 4), (0, 0)), mode='CONSTANT', constant_values=0, name='pad2')
# Layer 4.
#conv2 = tf.nn.atrous_conv2d(conv2, filters=(3, 3, num_features, num_features), rate=4, padding='valid', name='atrous_conv2')
conv2 = tf.layers.conv2d(conv2, filters=num_features, kernel_size=(3, 3), strides=(1, 1), dilation_rate=(4, 4), padding='valid', name='conv2')
conv2 = tf.nn.relu(conv2, name='relu2')
conv2 = tf.pad(conv2, ((0, 0), (8, 8), (8, 8), (0, 0)), mode='CONSTANT', constant_values=0, name='pad3')
# Layer 5.
#conv2 = tf.nn.atrous_conv2d(conv2, filters=(3, 3, num_features, num_features), rate=8, padding='valid', name='atrous_conv3')
conv2 = tf.layers.conv2d(conv2, filters=num_features, kernel_size=(3, 3), strides=(1, 1), dilation_rate=(8, 8), padding='valid', name='conv3')
conv2 = tf.nn.relu(conv2, name='relu3')
conv2 = tf.pad(conv2, ((0, 0), (16, 16), (16, 16), (0, 0)), mode='CONSTANT', constant_values=0, name='pad4')
# Layer 6.
#conv2 = tf.nn.atrous_conv2d(conv2, filters=(3, 3, num_features, num_features), rate=16, padding='valid', name='atrous_conv4')
conv2 = tf.layers.conv2d(conv2, filters=num_features, kernel_size=(3, 3), strides=(1, 1), dilation_rate=(16, 16), padding='valid', name='conv4')
conv2 = tf.nn.relu(conv2, name='relu4')
with tf.variable_scope('ctx_final', reuse=tf.AUTO_REUSE):
dense_final = tf.pad(conv2, ((0, 0), (1, 1), (1, 1), (0, 0)), mode='CONSTANT', constant_values=0, name='pad')
# Layer 7.
dense_final = tf.layers.conv2d(dense_final, filters=num_features, kernel_size=(3, 3), strides=(1, 1), dilation_rate=(1, 1), padding='valid', name='dense1')
dense_final = tf.nn.relu(dense_final, name='relu1')
# Layer 8.
return tf.layers.conv2d(dense_final, filters=num_features, kernel_size=(1, 1), strides=(1, 1), dilation_rate=(1, 1), padding='valid', name='dense2')
#--------------------------------------------------------------------
class Synth90kDilatedCrnnWithKerasCtcLoss(Synth90kCrnn):
def __init__(self, image_height, image_width, image_channel, num_classes):
super().__init__((None, image_height, image_width, image_channel), (None, None), num_classes, is_sparse_output=True)
# FIXME [fix] >>
self._eos_token = 36
def _create_transcription_layer(self, inputs, num_classes, kernel_initializer, is_training):
outputs = tf.layers.dense(inputs, num_classes, activation=tf.nn.softmax, kernel_initializer=kernel_initializer, name='dense')
#outputs = tf.layers.dense(inputs, num_classes, activation=tf.nn.softmax, kernel_initializer=kernel_initializer, activity_regularizer=tf.contrib.layers.l2_regularizer(0.0001), name='dense')
return outputs # (None, ???, num_classes).
def _create_decoding_layer(self, logits):
"""
# Decoding.
decoded = tf.argmax(logits, axis=-1)
# FIXME [fix] >> This does not work correctly.
# Refer to MyModel._decode_label() in ${SWL_PYTHON_HOME}/test/language_processing/run_simple_hangeul_crrn.py.
decoded = tf.numpy_function(lambda x: list(map(lambda lbl: list(k for k, g in itertools.groupby(lbl) if k < self._blank_label), x)), [decoded], [tf.int32]) # Removes repetitive labels.
return decoded
"""
return None
def _get_loss(self, y, t, y_len, t_len):
with tf.variable_scope('loss', reuse=tf.AUTO_REUSE):
# Connectionist temporal classification (CTC) loss.
y_len, t_len = tf.reshape(y_len, (-1, 1)), tf.reshape(t_len, (-1, 1))
loss = tf.reduce_mean(tf.keras.backend.ctc_batch_cost(y_true=t, y_pred=y['logit'], input_length=y_len, label_length=t_len))
tf.summary.scalar('loss', loss)
return loss
def _get_accuracy(self, y, t, y_len):
with tf.variable_scope('accuracy', reuse=tf.AUTO_REUSE):
"""
correct_prediction = tf.equal(tf.argmax(y, axis=-1), tf.cast(t, tf.int64)) # Error: The time-steps of y and t are different.
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return accuracy
"""
"""
# FIXME [implement] >> The below logic has to be implemented in TensorFlow.
correct_prediction = len(list(filter(lambda xx: len(list(filter(lambda x: x[0] == x[1], zip(xx[0], xx[1])))) == max(len(xx[0]), len(xx[1])), zip(tf.argmax(y, axis=-1), t))))
accuracy = correct_prediction / max(len(y), len(t))
return accuracy
"""
accuracy = tf.constant(-1.0, tf.float32)
tf.summary.scalar('accuracy', accuracy)
return accuracy
def _create_dilation_layer(self, inputs, num_features, is_training):
with tf.variable_scope('ctx_conv', reuse=tf.AUTO_REUSE):
conv1 = tf.pad(inputs, ((0, 0), (1, 1), (1, 1), (0, 0)), mode='CONSTANT', constant_values=0, name='pad1')
# Layer 1.
conv1 = tf.layers.conv2d(conv1, filters=num_features, kernel_size=(3, 3), strides=(1, 1), dilation_rate=(1, 1), padding='valid', name='conv1')
conv1 = tf.nn.relu(conv1, name='relu1')
conv1 = tf.pad(conv1, ((0, 0), (1, 1), (1, 1), (0, 0)), mode='CONSTANT', constant_values=0, name='pad2')
# Layer 2.
conv1 = tf.layers.conv2d(conv1, filters=num_features, kernel_size=(3, 3), strides=(1, 1), dilation_rate=(1, 1), padding='valid', name='conv2')
conv1 = tf.nn.relu(conv1, name='relu2')
with tf.variable_scope('ctx_atrous_conv', reuse=tf.AUTO_REUSE):
conv2 = tf.pad(conv1, ((0, 0), (2, 2), (2, 2), (0, 0)), mode='CONSTANT', constant_values=0, name='pad1')
# Layer 3.
#conv2 = tf.nn.atrous_conv2d(conv2, filters=(3, 3, num_features, num_features), rate=2, padding='valid', name='atrous_conv1')
conv2 = tf.layers.conv2d(conv2, filters=num_features, kernel_size=(3, 3), strides=(1, 1), dilation_rate=(2, 2), padding='valid', name='conv1')
conv2 = tf.nn.relu(conv2, name='relu1')
conv2 = tf.pad(conv2, ((0, 0), (4, 4), (4, 4), (0, 0)), mode='CONSTANT', constant_values=0, name='pad2')
# Layer 4.
#conv2 = tf.nn.atrous_conv2d(conv2, filters=(3, 3, num_features, num_features), rate=4, padding='valid', name='atrous_conv2')
conv2 = tf.layers.conv2d(conv2, filters=num_features, kernel_size=(3, 3), strides=(1, 1), dilation_rate=(4, 4), padding='valid', name='conv2')
conv2 = tf.nn.relu(conv2, name='relu2')
conv2 = tf.pad(conv2, ((0, 0), (8, 8), (8, 8), (0, 0)), mode='CONSTANT', constant_values=0, name='pad3')
# Layer 5.
#conv2 = tf.nn.atrous_conv2d(conv2, filters=(3, 3, num_features, num_features), rate=8, padding='valid', name='atrous_conv3')
conv2 = tf.layers.conv2d(conv2, filters=num_features, kernel_size=(3, 3), strides=(1, 1), dilation_rate=(8, 8), padding='valid', name='conv3')
conv2 = tf.nn.relu(conv2, name='relu3')
conv2 = tf.pad(conv2, ((0, 0), (16, 16), (16, 16), (0, 0)), mode='CONSTANT', constant_values=0, name='pad4')
# Layer 6.
#conv2 = tf.nn.atrous_conv2d(conv2, filters=(3, 3, num_features, num_features), rate=16, padding='valid', name='atrous_conv4')
conv2 = tf.layers.conv2d(conv2, filters=num_features, kernel_size=(3, 3), strides=(1, 1), dilation_rate=(16, 16), padding='valid', name='conv4')
conv2 = tf.nn.relu(conv2, name='relu4')
with tf.variable_scope('ctx_final', reuse=tf.AUTO_REUSE):
dense_final = tf.pad(conv2, ((0, 0), (1, 1), (1, 1), (0, 0)), mode='CONSTANT', constant_values=0, name='pad')
# Layer 7.
dense_final = tf.layers.conv2d(dense_final, filters=num_features, kernel_size=(3, 3), strides=(1, 1), dilation_rate=(1, 1), padding='valid', name='dense1')
dense_final = tf.nn.relu(dense_final, name='relu1')
# Layer 8.
return tf.layers.conv2d(dense_final, filters=num_features, kernel_size=(1, 1), strides=(1, 1), dilation_rate=(1, 1), padding='valid', name='dense2')
| sangwook236/sangwook-library | python/test/language_processing/synth90k_crnn.py | Python | gpl-2.0 | 29,148 | 0.020207 |
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk
class Clipboard(object):
"""A wrapper around GTK3's clipboard interface. Text only."""
def __init__(self):
self._clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
self.text = None
def __repr__(self):
return str(self.text)
def set(self, text):
"""
Set the contents of the clipboard.
"""
# -1 means figure out the length.
self._clipboard.set_text(text, -1)
self._clipboard.store()
def get(self):
"""
Get the contents of the clipboard.
"""
return self._clipboard.wait_for_text()
def hasUpdate(self):
"""
Check to see of the contents of the clipboard have changed since the last time this
method was called.
"""
temp = self.get()
if temp != self.text:
self.text = temp
return True
return False
| Notgnoshi/clippy | clippy/clipboard.py | Python | mit | 1,004 | 0.001992 |
#
# Copyright (c) 2010 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import gettext
import logging
import os
import threading
#from gi.repository import GObject
import socket
import rhsm.config
import rhsm.connection as connection
import rhsm.utils
from rhsm.utils import remove_scheme
from rhsm.utils import parse_url
from subscription_manager.ga import GObject as ga_GObject
from subscription_manager.gui.utils import show_error_window
import subscription_manager.injection as inj
from subscription_manager.gui import progress
from subscription_manager.gui import widgets
_ = gettext.gettext
DIR = os.path.dirname(__file__)
log = logging.getLogger('rhsm-app.' + __name__)
class NetworkConfigDialog(widgets.SubmanBaseWidget):
"""This is the dialog that allows setting http proxy settings.
It uses the instant apply paradigm or whatever you wanna call it that the
gnome HIG recommends. Whenever a toggle button is flipped or a text entry
changed, the new setting will be saved.
"""
widget_names = ["networkConfigDialog", "enableProxyButton", "enableProxyAuthButton",
"proxyEntry", "proxyUserEntry", "proxyPasswordEntry",
"cancelButton", "saveButton", "testConnectionButton",
"connectionStatusLabel"]
gui_file = "networkConfig"
def __init__(self):
# Get widgets we'll need to access
super(NetworkConfigDialog, self).__init__()
self.org_timeout = socket.getdefaulttimeout()
self.progress_bar = None
self.cfg = rhsm.config.initConfig()
self.cp_provider = inj.require(inj.CP_PROVIDER)
# Need to load values before connecting signals because when the dialog
# starts up it seems to trigger the signals which overwrites the config
# with the blank values.
self.set_initial_values()
self.enableProxyButton.connect("toggled", self.enable_action)
self.enableProxyAuthButton.connect("toggled", self.enable_action)
self.enableProxyButton.connect("toggled", self.clear_connection_label)
self.enableProxyAuthButton.connect("toggled", self.clear_connection_label)
self.enableProxyButton.connect("toggled", self.enable_test_button)
self.proxyEntry.connect("changed", self.clear_connection_label)
self.proxyUserEntry.connect("changed", self.clear_connection_label)
self.proxyPasswordEntry.connect("changed", self.clear_connection_label)
self.proxyEntry.connect("focus-out-event", self.clean_proxy_entry)
self.cancelButton.connect("clicked", self.on_cancel_clicked)
self.saveButton.connect("clicked", self.on_save_clicked)
self.testConnectionButton.connect("clicked", self.on_test_connection_clicked)
self.networkConfigDialog.connect("delete-event", self.deleted)
def set_initial_values(self):
proxy_url = self.cfg.get("server", "proxy_hostname") or ""
# append port unless not specified, then append the default of 3128
if proxy_url:
proxy_url = proxy_url + ':' + (self.cfg.get("server", "proxy_port") or rhsm.config.DEFAULT_PROXY_PORT)
self.proxyEntry.set_text("%s" % proxy_url)
# show proxy/proxy auth sections as being enabled if we have values set
# rhn actualy has a seperate for config flag for enabling, which seems overkill
if self.cfg.get("server", "proxy_hostname"):
self.enableProxyButton.set_active(True)
if self.cfg.get("server", "proxy_hostname") and self.cfg.get("server", "proxy_user"):
self.enableProxyAuthButton.set_active(True)
self.enable_action(self.enableProxyAuthButton)
self.enable_action(self.enableProxyButton)
# the extra or "" are to make sure we don't str None
self.proxyUserEntry.set_text(str(self.cfg.get("server", "proxy_user") or ""))
self.proxyPasswordEntry.set_text(str(self.cfg.get("server", "proxy_password") or ""))
self.connectionStatusLabel.set_label("")
# If there is no proxy information, disable the proxy test
# button.
if not self.enableProxyButton.get_active():
self.testConnectionButton.set_sensitive(False)
self.enableProxyAuthButton.set_sensitive(False)
def write_values(self, widget=None, dummy=None):
proxy = self.proxyEntry.get_text() or ""
# don't save these values if they are disabled in the gui
if proxy and self.enableProxyButton.get_active():
# Remove any URI scheme provided
proxy = remove_scheme(proxy)
# Update the proxy entry field to show we removed any scheme
self.proxyEntry.set_text(proxy)
try:
proxy_hostname, proxy_port = proxy.split(':')
self.cfg.set("server", "proxy_hostname", proxy_hostname)
self.cfg.set("server", "proxy_port", proxy_port)
except ValueError:
# no port? just write out the hostname and assume default
self.cfg.set("server", "proxy_hostname", proxy)
self.cfg.set("server", "proxy_port", rhsm.config.DEFAULT_PROXY_PORT)
else:
# delete config options if we disable it in the ui
self.cfg.set("server", "proxy_hostname", "")
self.cfg.set("server", "proxy_port", "")
if self.enableProxyAuthButton.get_active():
if self.proxyUserEntry.get_text() is not None:
self.cfg.set("server", "proxy_user",
str(self.proxyUserEntry.get_text()))
if self.proxyPasswordEntry.get_text() is not None:
self.cfg.set("server", "proxy_password",
str(self.proxyPasswordEntry.get_text()))
else:
self.cfg.set("server", "proxy_user", "")
self.cfg.set("server", "proxy_password", "")
try:
self.cfg.save()
self.cp_provider.set_connection_info()
except Exception:
show_error_window(_("There was an error saving your configuration.") +
_("Make sure that you own %s.") % self.cfg.fileName,
parent=self.networkConfigDialog)
def show(self):
self.set_initial_values()
self.networkConfigDialog.present()
def on_save_clicked(self, button):
self.write_values()
self.networkConfigDialog.hide()
def on_cancel_clicked(self, button):
self.networkConfigDialog.hide()
def enable_test_button(self, button):
self.testConnectionButton.set_sensitive(button.get_active())
def clear_connection_label(self, entry):
self.connectionStatusLabel.set_label("")
# only used as callback from test_connection thread
def on_test_connection_finish(self, result):
if result:
self.connectionStatusLabel.set_label(_("Proxy connection succeeded"))
else:
self.connectionStatusLabel.set_label(_("Proxy connection failed"))
self._clear_progress_bar()
def _reset_socket_timeout(self):
socket.setdefaulttimeout(self.org_timeout)
def test_connection_wrapper(self, proxy_host, proxy_port, proxy_user, proxy_password):
connection_status = self.test_connection(proxy_host, proxy_port, proxy_user, proxy_password)
ga_GObject.idle_add(self.on_test_connection_finish, connection_status)
def test_connection(self, proxy_host, proxy_port, proxy_user, proxy_password):
cp = connection.UEPConnection(
proxy_hostname=proxy_host,
proxy_port=proxy_port,
proxy_user=proxy_user,
proxy_password=proxy_password)
try:
socket.setdefaulttimeout(10)
cp.getStatus()
# Either connection.RemoteServerException or connection.RestLibExecption are considered
# acceptable exceptions because they are only thrown as a response from the server. Meaning the
# connection through the proxy was successful.
except (connection.RemoteServerException,
connection.RestlibException) as e:
log.warn("Reporting proxy connection as good despite %s" %
e)
return True
except connection.NetworkException, e:
log.warn("%s when attempting to connect through %s:%s" %
(e.code, proxy_host, proxy_port))
return False
except Exception, e:
log.exception("'%s' when attempting to connect through %s:%s" %
(e, proxy_host, proxy_port))
return False
else:
return True
finally:
self._reset_socket_timeout()
# Pass through of the return values of parse_proxy_entry
# This was done to simplify on_test_connection_clicked
def clean_proxy_entry(self, widget=None, dummy=None):
proxy_url = self.proxyEntry.get_text()
proxy_host, proxy_port = self.parse_proxy_entry(proxy_url)
cleaned_proxy_url = "%s:%s" % (proxy_host, proxy_port)
self.proxyEntry.set_text(cleaned_proxy_url)
return (proxy_host, proxy_port)
def parse_proxy_entry(self, proxy_url):
proxy_url = remove_scheme(proxy_url)
proxy_host = None
proxy_port = None
try:
proxy_info = parse_url(proxy_url, default_port=rhsm.config.DEFAULT_PROXY_PORT)
proxy_host = proxy_info[2]
proxy_port = proxy_info[3]
except rhsm.utils.ServerUrlParseErrorPort, e:
proxy_host = proxy_url.split(':')[0]
proxy_port = rhsm.config.DEFAULT_PROXY_PORT
except rhsm.utils.ServerUrlParseError, e:
log.error(e)
return (proxy_host, proxy_port)
def on_test_connection_clicked(self, button):
proxy_host, proxy_port = self.clean_proxy_entry()
# ensure that we only use those values for testing if required
# this catches the case where there was previously a user and pass in the config
# and the user unchecks the box, leaving behind the values for the time being.
# Alternatively we could clear those boxes when the box is unchecked
if self.enableProxyAuthButton.get_active():
proxy_user = self.proxyUserEntry.get_text()
proxy_password = self.proxyPasswordEntry.get_text()
else:
proxy_user = None
proxy_password = None
self._display_progress_bar()
threading.Thread(target=self.test_connection_wrapper,
args=(proxy_host, proxy_port, proxy_user, proxy_password),
name='TestNetworkConnectionThread').start()
def deleted(self, event, data):
self.write_values()
self.networkConfigDialog.hide()
self._clear_progress_bar()
return True
def _display_progress_bar(self):
if self.progress_bar:
self.progress_bar.set_title(_("Testing Connection"))
self.progress_bar.set_label(_("Please wait"))
else:
self.progress_bar = progress.Progress(_("Testing Connection"), _("Please wait"))
self.timer = ga_GObject.timeout_add(100, self.progress_bar.pulse)
self.progress_bar.set_transient_for(self.networkConfigDialog)
def _clear_progress_bar(self):
if not self.progress_bar: # progress bar could be none iff self.test_connection is called directly
return
self.progress_bar.hide()
ga_GObject.source_remove(self.timer)
self.timer = 0
self.progress_bar = None
def enable_action(self, button):
if button.get_name() == "enableProxyButton":
self.proxyEntry.set_sensitive(button.get_active())
self.proxyEntry.grab_focus()
self.enableProxyAuthButton.set_sensitive(button.get_active())
# Proxy authentication should only be active if proxy is also enabled
self.proxyUserEntry.set_sensitive(button.get_active() and
self.enableProxyAuthButton.get_active())
self.proxyPasswordEntry.set_sensitive(button.get_active() and
self.enableProxyAuthButton.get_active())
elif button.get_name() == "enableProxyAuthButton":
self.proxyUserEntry.set_sensitive(button.get_active())
self.proxyPasswordEntry.set_sensitive(button.get_active())
self.get_object("usernameLabel").set_sensitive(button.get_active())
self.get_object("passwordLabel").set_sensitive(button.get_active())
def set_parent_window(self, window):
self.networkConfigDialog.set_transient_for(window)
| alikins/subscription-manager | src/subscription_manager/gui/networkConfig.py | Python | gpl-2.0 | 13,296 | 0.002332 |
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
""" sha1Hash_test.py
Unit tests for sha1.py
"""
from crypto.hash.sha1Hash import SHA1
import unittest
import struct
assert struct.calcsize('!IIIII') == 20, '5 integers should be 20 bytes'
class SHA1_FIPS180_TestCases(unittest.TestCase):
""" SHA-1 tests from FIPS180-1 Appendix A, B and C """
def testFIPS180_1_Appendix_A(self):
""" APPENDIX A. A SAMPLE MESSAGE AND ITS MESSAGE DIGEST """
hashAlg = SHA1()
message = 'abc'
message_digest = 0xA9993E36L, 0x4706816AL, 0xBA3E2571L, 0x7850C26CL, 0x9CD0D89DL
md_string = _toBString(message_digest)
assert( hashAlg(message) == md_string ), 'FIPS180 Appendix A test Failed'
def testFIPS180_1_Appendix_B(self):
""" APPENDIX B. A SECOND SAMPLE MESSAGE AND ITS MESSAGE DIGEST """
hashAlg = SHA1()
message = 'abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq'
message_digest = 0x84983E44L, 0x1C3BD26EL, 0xBAAE4AA1L, 0xF95129E5L, 0xE54670F1L
md_string = _toBString(message_digest)
assert( hashAlg(message) == md_string ), 'FIPS180 Appendix B test Failed'
def testFIPS180_1_Appendix_C(self):
""" APPENDIX C. A THIRD SAMPLE MESSAGE AND ITS MESSAGE DIGEST
Let the message be the binary-coded form of the ASCII string which consists
of 1,000,000 repetitions of "a". """
hashAlg = SHA1()
message = 1000000*'a'
message_digest = 0x34AA973CL, 0xD4C4DAA4L, 0xF61EEB2BL, 0xDBAD2731L, 0x6534016FL
md_string = _toBString(message_digest)
assert( hashAlg(message) == md_string ), 'FIPS180 Appendix C test Failed'
def _toBlock(binaryString):
""" Convert binary string to blocks of 5 words of uint32() """
return [uint32(word) for word in struct.unpack('!IIIII', binaryString)]
def _toBString(block):
""" Convert block (5 words of 32 bits to binary string """
return ''.join([struct.pack('!I',word) for word in block])
if __name__ == '__main__':
# Run the tests from the command line
unittest.main()
| dknlght/dkodi | src/script.module.cryptopy/lib/crypto/hash/sha1Hash_test.py | Python | gpl-2.0 | 2,199 | 0.012278 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, unidiomatic-typecheck, undefined-variable, invalid-name, redefined-builtin
"""
The Relay Virtual Machine.
Implements a Python interface to compiling and executing on the Relay VM.
"""
import numpy as np
import tvm
import tvm.runtime.ndarray as _nd
import tvm.runtime.vm as vm_rt
from tvm import autotvm
from tvm.relay import expr as _expr
from tvm.relay.backend.interpreter import Executor
from . import _vm
def compile(mod, target=None, target_host=None, params=None):
"""Compile the module to VM executable. A helper function for VMCompiler.
Parameters
----------
mod : tvm.IRModule
The Relay module to build.
target : str, :any:`tvm.target.Target`, or dict of str(i.e.
device/context name) to str/tvm.target.Target, optional
For heterogeneous compilation, it is a dictionary indicating context
to target mapping. For homogeneous compilation, it is a build target.
target_host : str or :any:`tvm.target.Target`, optional
Host compilation target, if target is device.
When TVM compiles device specific program such as CUDA,
we also need host(CPU) side code to interact with the driver
to setup the dimensions and parameters correctly.
target_host is used to specify the host side codegen target.
By default, llvm is used if it is enabled,
otherwise a stackvm intepreter is used.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
Returns
-------
exec : tvm.runtime.vm.Executable
The VM executable that contains both library code and bytecode.
"""
compiler = VMCompiler()
if params:
compiler.set_params(params)
compiler.lower(mod, target, target_host)
compiler.codegen()
return compiler.get_exec()
class VMCompiler(object):
"""Compiler that compiles Relay module to VM executable."""
def __init__(self):
self.mod = _vm._VMCompiler()
self._lower = self.mod["lower"]
self._codegen = self.mod["codegen"]
self._get_exec = self.mod["get_executable"]
self._set_params_func = self.mod["set_params"]
self._get_params_func = self.mod["get_params"]
self._optimize = self.mod["optimize"]
def set_params(self, params):
"""Set constant parameters for the model.
Parameters
----------
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
"""
inputs = {}
for name, param in params.items():
if isinstance(param, np.ndarray):
param = _nd.array(param)
inputs[name] = _expr.const(param)
self._set_params_func(inputs)
def get_params(self):
"""Return the updated weights."""
params = self._get_params_func()
ret = {}
for key, value in params.items():
ret[key] = value.data
return ret
def lower(self, mod, target=None, target_host=None):
"""Lower the module to VM bytecode.
Parameters
----------
mod : tvm.IRModule
The Relay module to build.
target : str, :any:`tvm.target.Target`, or dict of str(i.e.
device/context name) to str/tvm.target.Target, optional
For heterogeneous compilation, it is a dictionary indicating context
to target mapping. For homogeneous compilation, it is a build target.
target_host : str or :any:`tvm.target.Target`, optional
Host compilation target, if target is device.
When TVM compiles device specific program such as CUDA,
we also need host(CPU) side code to interact with the driver
to setup the dimensions and parameters correctly.
target_host is used to specify the host side codegen target.
By default, llvm is used if it is enabled,
otherwise a stackvm intepreter is used.
"""
target = self._update_target(target)
target_host = self._update_target_host(target, target_host)
tophub_context = self._tophub_context(target)
with tophub_context:
self._lower(mod, target, target_host)
def codegen(self):
"""Generate the kernel library."""
self._codegen()
def optimize(self, mod, target=None, target_host=None, params=None):
"""Helper method that optimizes a Relay module via VM.
Parameters
----------
mod : tvm.IRModule
target : str, :any:`tvm.target.Target`, or dict of str (i.e.
device/context name) to str/tvm.target.Target, optional
target_host : str or :any:`tvm.target.Target`, optional
The compilation target for host.
By default, llvm is used if it is enabled,
otherwise a stackvm intepreter is used.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
Returns
-------
mod : tvm.IRModule
The optimized relay module.
params : dict
The parameters of the final module.
"""
target = self._update_target(target)
target_host = self._update_target_host(target, target_host)
if params:
self.set_params(params)
return self._optimize(mod, target, target_host), self.get_params()
def get_exec(self):
"""Get the VM executable.
Returns
-------
exec : tvm.runtime.vm.Executable
The VM executable that contains both library code and bytecode.
"""
return vm_rt.Executable(self._get_exec())
def _update_target(self, target):
"""Update target."""
target = target if target else tvm.target.Target.current()
if target is None:
raise ValueError("Target is not set in env or passed as argument.")
tgts = {}
if isinstance(target, (str, tvm.target.Target)):
dev_type = tvm.tir.IntImm("int32", tvm.nd.context(str(target)).device_type)
tgts[dev_type] = tvm.target.Target(target)
elif isinstance(target, dict):
for dev, tgt in target.items():
dev_type = tvm.tir.IntImm("int32", tvm.nd.context(dev).device_type)
tgts[dev_type] = tvm.target.Target(tgt)
else:
raise TypeError(
"target is expected to be str, tvm.target.Target, "
+ "or dict of str to str/tvm.target.Target, but received "
+ "{}".format(type(target))
)
return tgts
def _update_target_host(self, target, target_host):
"""Update target host."""
target_host = None if target_host == "" else target_host
if not target_host:
for device_type, tgt in target.items():
if device_type.value == tvm.nd.cpu(0).device_type:
target_host = tgt
break
if not target_host:
target_host = "llvm" if tvm.runtime.enabled("llvm") else "stackvm"
if isinstance(target_host, str):
target_host = tvm.target.Target(target_host)
return target_host
def _tophub_context(self, target):
"""Get the autotvm context."""
# If current dispatch context is fallback context (the default root context),
# then load pre-tuned parameters from TopHub
if isinstance(autotvm.DispatchContext.current, autotvm.FallbackContext):
tophub_context = autotvm.tophub.context(list(target.values()))
else:
tophub_context = autotvm.util.EmptyContext()
return tophub_context
class VMExecutor(Executor):
"""
An implementation of the executor interface for
the Relay VM.
Useful interface for experimentation and debugging
the VM can also be used directly from the API.
supported by `tvm.runtime.vm`.
Parameters
----------
mod : :py:class:`~tvm.IRModule`
The module to support the execution.
ctx : :py:class:`~tvmContext`
The runtime context to run the code on.
target : :py:class:`Target`
The target option to build the function.
"""
def __init__(self, mod, ctx, target):
if mod is None:
raise RuntimeError("Must provide module to get VM executor.")
self.mod = mod
self.ctx = ctx
self.target = target
self.executable = compile(mod, target)
self.vm = vm_rt.VirtualMachine(self.executable, ctx)
def _make_executor(self, expr=None):
main = self.mod["main"]
def _vm_wrapper(*args, **kwargs):
args = self._convert_args(main, args, kwargs)
return self.vm.run(*args)
return _vm_wrapper
| sxjscience/tvm | python/tvm/relay/backend/vm.py | Python | apache-2.0 | 9,793 | 0.000715 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This module will define useful objects for conditional analysis
"""
import collections
import numpy as np
import pandas as pd
from tunacell.base.datatools import Coordinates
# define an object to handle heterogeneous types of time series
class TimeSeries(object):
"""Object that decorates the data with other useful attributes.
Parameters
----------
ts : :class:`Coordinates` instance, or 2d structured ndarray
better to use Coordinates, so that names can be carried along
ids : sequence of cell identifiers from which data was collected
index_cycles : sequence of couples (index_first, index_last)
that delimit data corresponding to cell id, must be same length as ids
slices : sequence of slice objects
each item can be used to slice the entire table
time_bounds : sequence of couples of floats
for each cell, first element is the lower bound of cell cycle, the
second element is the upper bound of cell cycle, must be same length
as ids
select_ids : sequences of True/False values corresponding whether or
not to include data from cell id in timeseries, must be same length as
ids
"""
def __init__(self, ts=[], ids=[], index_cycles=[], slices=None,
time_bounds=[], select_ids={}, container_label=None,
experiment_label=None):
# ts is a Coordinates instance
self.container_label = container_label
self.experiment_label = experiment_label
if isinstance(ts, Coordinates):
self._timeseries = ts
# ts is a numpy array (structured if possible)
elif isinstance(ts, np.ndarray):
# convert structured arrays to 2d ndarrays
if ts.dtype.names is not None:
_arr = ts.view((float, len(ts.dtype.names)))
_x_name, _y_name = ts.dtype.names[:2] # take only first 2 cols
else:
_arr = ts
_x_name, _y_name = 'x', 'y'
_x = _arr[:, 0]
_y = _arr[:, 1]
self._timeseries = Coordinates(_x, _y,
x_name=_x_name, y_name=_y_name)
# ... list of couples
elif isinstance(ts, collections.Iterable):
_ts = list(ts)
_x, _y = map(np.array, zip(*_ts))
self._timeseries = Coordinates(_x, _y)
self.time_bounds = time_bounds
self.slices = []
if index_cycles: # array indices corresponding to (first, last) frame for each cell
self.index_cycles = index_cycles
slices = []
for item in index_cycles:
if item is None:
slices.append(None)
# indices are reported as a single None
# when no data is reported for a given cell
else:
i, j = item
if j is not None:
slices.append(slice(i, j+1))
else:
slices.append(slice(i, None))
self.slices = slices
elif slices is not None:
self.slices = slices
index_cycles = []
for item in slices:
if item is None:
index_cycles.append(None)
else:
if item.stop is not None:
index_cycles.append((item.start, item.stop - 1))
else:
index_cycles.append((item.start, None))
self.index_cycles = index_cycles
self.ids = ids
if len(select_ids.keys()) > 0: # master is already defined
self.selections = select_ids
else: # nothing is defined, we define master here
self.selections = {'master': [True for _ in self.ids]}
return
def use_condition(self, condition_label='master',
sharp_tleft=None, sharp_tright=None):
"""Get conditioned timeseries.
Parameter
---------
condition_label : str (default 'master')
must be a key of dictionary self.selections, and corresponds to
the repr of a given :class:`FilterSet` instance.
sharp_left : float (default None)
sharp lower bound for cell cycle timing. USE ONLY FOR CELL CYCLE
OBSERVABLES
sharp_right : float (default None)
sharp upper bound for cell cycle timing. USE ONLY FOR CELL CYCLE
OBSERVABLES
Returns
-------
Coordinates instance made of valid (x, y) points
"""
selection = self.selections[condition_label]
xs, ys = [], []
for index, cid in enumerate(self.ids):
if selection[index] and self.slices[index] is not None:
if sharp_tleft is not None:
if self.time_bounds[index][0] < sharp_tleft:
continue
if sharp_tright is not None:
if self.time_bounds[index][1] > sharp_tright:
continue
xs.append(self.timeseries.x[self.slices[index]])
ys.append(self.timeseries.y[self.slices[index]])
if len(xs) > 0:
_x = np.concatenate(xs)
_y = np.concatenate(ys)
else:
_x = []
_y = []
out = Coordinates(_x, _y, x_name=self.timeseries.x_name,
y_name=self.timeseries.y_name)
return out
@property
def timeseries(self):
return self._timeseries
#
# @timeseries.setter
# def timeseries(self, ts):
# self._timeseries = ts
# def __getitem__(self, key):
# return self.timeseries[key]
def __repr__(self):
return repr(self.timeseries)
def as_text(self, sep='\t', cell_sep='\n', print_labels=False):
"""Export TimeSeries as text arrays
Parameters
----------
sep : str (default '\t')
how to separate columns
cell_sep : str (default '\n')
how to separate cells (default: one blank line)
print_labels : bool {False, True}
first line is labels, followed by empty line
"""
printout = ''
labels = [self.timeseries.x_name,
self.timeseries.y_name,
'cellID',
'containerID',
'experimentID']
if print_labels and labels is not None:
printout += '\t'.join(labels) + '\n'
printout += '\n'
for index, sl in enumerate(self.slices):
chunk = ''
x = self.timeseries.x[sl]
y = self.timeseries.y[sl]
ids = len(x) * [self.ids[index]]
container_id = len(x) * [self.container_label, ]
exp_id = len(x) * [self.experiment_label, ]
for line in zip(x, y, ids, container_id, exp_id):
chunk += '{}'.format(sep).join(['{}'.format(item) for item in line]) + '\n'
printout += chunk
printout += cell_sep
return printout.lstrip().rstrip() # remove empty lines at beginning/end
def to_dataframe(self, start_index=0, sharp_tleft=None, sharp_tright=None):
dic = {}
dic[self.timeseries.x_name] = [] # self.timeseries.x
dic[self.timeseries.y_name] = [] # self.timeseries.y
dic['cellID'] = []
dic['containerID'] = []
dic['experimentID'] = []
for key in self.selections.keys():
if key == 'master':
continue
dic[key] = []
size = 0
# add cell ID, container ID, experiment ID, and TRUE/FALSE for each cdt
for index, sl in enumerate(self.slices):
# collect only if within bounds
if sharp_tleft is not None:
if self.time_bounds[index][0] < sharp_tleft:
continue
if sharp_tright is not None:
if self.time_bounds[index][1] > sharp_tright:
continue
_x = self.timeseries.x[sl]
_y = self.timeseries.y[sl]
dic[self.timeseries.x_name].extend(_x)
dic[self.timeseries.y_name].extend(_y)
dic['cellID'].extend(len(_x) * [self.ids[index], ])
dic['containerID'].extend(len(_x) * [self.container_label, ])
dic['experimentID'].extend(len(_x) * [self.experiment_label, ])
# True/False for each
for key, values in self.selections.items():
# master: all True, useless to printout
if key == 'master':
continue
val = values[index]
dic[key].extend(len(_x) * [val, ])
size += len(_x)
df = pd.DataFrame(dic, index=range(start_index, start_index + size))
return df
| LeBarbouze/tunacell | tunacell/base/timeseries.py | Python | mit | 8,945 | 0.000335 |
import logging
import os
import sys
import time
from typing import TYPE_CHECKING, Optional
from packaging import version
from dvc import __version__
from dvc.utils.pkg import PKG
if TYPE_CHECKING:
from dvc.ui import RichText
logger = logging.getLogger(__name__)
class Updater:
URL = "https://updater.dvc.org"
UPDATER_FILE = "updater"
TIMEOUT = 24 * 60 * 60 # every day
TIMEOUT_GET = 10
def __init__(self, tmp_dir, friendly=False, hardlink_lock=False):
from dvc.lock import make_lock
self.updater_file = os.path.join(tmp_dir, self.UPDATER_FILE)
self.lock = make_lock(
self.updater_file + ".lock",
tmp_dir=tmp_dir,
friendly=friendly,
hardlink_lock=hardlink_lock,
)
self.current = version.parse(__version__).base_version
def _is_outdated_file(self):
ctime = os.path.getmtime(self.updater_file)
outdated = time.time() - ctime >= self.TIMEOUT
if outdated:
logger.debug(f"'{self.updater_file}' is outdated")
return outdated
def _with_lock(self, func, action):
from dvc.lock import LockError
try:
with self.lock:
func()
except LockError:
msg = "Failed to acquire '{}' before {} updates"
logger.debug(msg.format(self.lock.lockfile, action))
def check(self):
from dvc.utils import env2bool
if (
os.getenv("CI")
or env2bool("DVC_TEST")
or PKG == "snap"
or not self.is_enabled()
):
return
self._with_lock(self._check, "checking")
def _check(self):
if not os.path.exists(self.updater_file) or self._is_outdated_file():
self.fetch()
return
with open(self.updater_file, encoding="utf-8") as fobj:
import json
try:
info = json.load(fobj)
latest = info["version"]
except Exception as exc: # pylint: disable=broad-except
msg = "'{}' is not a valid json: {}"
logger.debug(msg.format(self.updater_file, exc))
self.fetch()
return
if version.parse(self.current) < version.parse(latest):
self._notify(latest)
def fetch(self, detach=True):
from dvc.daemon import daemon
if detach:
daemon(["updater"])
return
self._with_lock(self._get_latest_version, "fetching")
def _get_latest_version(self):
import json
import requests
try:
resp = requests.get(self.URL, timeout=self.TIMEOUT_GET)
info = resp.json()
except requests.exceptions.RequestException as exc:
msg = "Failed to retrieve latest version: {}"
logger.debug(msg.format(exc))
return
with open(self.updater_file, "w+", encoding="utf-8") as fobj:
json.dump(info, fobj)
def _notify(self, latest: str, pkg: Optional[str] = PKG) -> None:
from dvc.ui import ui
if not sys.stdout.isatty():
return
message = self._get_message(latest, pkg=pkg)
return ui.error_write(message, styled=True)
def _get_message(
self,
latest: str,
current: str = None,
color: str = "yellow",
pkg: Optional[str] = None,
) -> "RichText":
from dvc.ui import ui
current = current or self.current
update_message = ui.rich_text.from_markup(
f"You are using dvc version [bold]{current}[/]; "
f"however, version [bold]{latest}[/] is available."
)
instruction = ui.rich_text.from_markup(
self._get_update_instructions(pkg=pkg)
)
return ui.rich_text.assemble(
"\n", update_message, "\n", instruction, style=color
)
@staticmethod
def _get_update_instructions(pkg: Optional[str] = None) -> str:
if pkg in ("osxpkg", "exe", "binary"):
return (
"To upgrade, uninstall dvc and reinstall from "
"[blue]https://dvc.org[/]."
)
instructions = {
"pip": "pip install --upgrade dvc",
"rpm": "yum update dvc",
"brew": "brew upgrade dvc",
"deb": "apt-get install --only-upgrade dvc",
"conda": "conda update dvc",
"choco": "choco upgrade dvc",
}
if pkg not in instructions:
return (
"Find the latest release at "
"[blue]https://github.com/iterative/dvc/releases/latest[/]."
)
instruction = instructions[pkg]
return f"To upgrade, run '{instruction}'."
def is_enabled(self):
from dvc.config import Config, to_bool
enabled = to_bool(
Config(validate=False).get("core", {}).get("check_update", "true")
)
logger.debug(
"Check for update is {}abled.".format("en" if enabled else "dis")
)
return enabled
def notify_updates():
from contextlib import suppress
from dvc.repo import NotDvcRepoError, Repo
with suppress(NotDvcRepoError), Repo() as repo:
hardlink_lock = repo.config["core"].get("hardlink_lock", False)
updater = Updater(repo.tmp_dir, hardlink_lock=hardlink_lock)
updater.check()
| dmpetrov/dataversioncontrol | dvc/updater.py | Python | apache-2.0 | 5,458 | 0 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
DensifyGeometriesInterval.py by Anita Graser, Dec 2012
based on DensifyGeometries.py
---------------------
Date : October 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import range
__author__ = 'Anita Graser'
__date__ = 'Dec 2012'
__copyright__ = '(C) 2012, Anita Graser'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import (QgsProcessingParameterNumber,
QgsProcessing)
from processing.algs.qgis.QgisAlgorithm import QgisFeatureBasedAlgorithm
class DensifyGeometriesInterval(QgisFeatureBasedAlgorithm):
INTERVAL = 'INTERVAL'
def group(self):
return self.tr('Vector geometry')
def __init__(self):
super().__init__()
self.interval = None
def initParameters(self, config=None):
self.addParameter(QgsProcessingParameterNumber(self.INTERVAL,
self.tr('Interval between vertices to add'), QgsProcessingParameterNumber.Double,
1, False, 0, 10000000))
def name(self):
return 'densifygeometriesgivenaninterval'
def displayName(self):
return self.tr('Densify geometries given an interval')
def outputName(self):
return self.tr('Densified')
def inputLayerTypes(self):
return [QgsProcessing.TypeVectorLine, QgsProcessing.TypeVectorPolygon]
def prepareAlgorithm(self, parameters, context, feedback):
interval = self.parameterAsDouble(parameters, self.INTERVAL, context)
return True
def processFeature(self, feature, feedback):
if feature.hasGeometry():
new_geometry = feature.geometry().densifyByDistance(float(interval))
feature.setGeometry(new_geometry)
return feature
| nirvn/QGIS | python/plugins/processing/algs/qgis/DensifyGeometriesInterval.py | Python | gpl-2.0 | 2,659 | 0.000752 |
#coding=utf-8
def generatList(n):
llist=[]
for i in range(n):
t=1
if i%3==1:
t=2*(i/3+1)
llist.append(t)
return llist[::-1]
def generatitem(n):
fz=0
fm=1
llist=generatList(n)
for i in llist:
temp=fm
fm=fz+i*fm
fz=temp
return (fz+2*fm,fm)
fz=[int(x) for x in list(str(generatitem(99)[0]))]
print sum(fz)
| zhouxiumin/projecteuler | python/euler65.py | Python | apache-2.0 | 329 | 0.097264 |
from dashmat.option_spec.module_imports import module_import_spec
from dashmat.formatter import MergedOptionStringFormatter
from dashmat.core_modules.base import Module
from dashmat.errors import UnknownModule
from input_algorithms.spec_base import boolean, string_spec, formatted, listof, overridden, or_spec, set_options
from input_algorithms.many_item_spec import many_item_formatted_spec
from input_algorithms.dictobj import dictobj
import six
class import_line_spec(many_item_formatted_spec):
value_name = "Import line"
specs = [listof(string_spec()), or_spec(string_spec(), set_options(import_path=string_spec()))]
optional_specs = [string_spec()]
def create_result(self, imports, module_name, import_from, meta, val, dividers):
"""Default permissions to rw"""
options = {"imports": imports, "module_name": module_name, "import_from": import_from}
return ImportLine.FieldSpec(formatter=MergedOptionStringFormatter).normalise(meta, options)
class ImportLine(dictobj.Spec):
module_name = dictobj.Field(
lambda: or_spec(string_spec(), set_options(import_path=module_import_spec(Module)))
, formatted = True
, help = "The name of the module this import comes from"
)
imports = dictobj.Field(
string_spec
, formatted = True
, wrapper = listof
, help = "The modules that are imported"
)
import_from = dictobj.Field(
string_spec
, formatted = True
, default = "main.jsx"
, help = "The module in our import_path to import the imports from"
)
def import_line(self, modules):
module_name = self.module_name
if type(module_name) is dict:
module_name = self.module_name['import_path']
if isinstance(module_name, six.string_types):
if module_name not in modules:
raise UnknownModule(module=module_name, available=list(modules.keys()))
module = modules[module_name]
else:
module = module_name
if type(module) is type:
import_path = "{0}:{1}".format(module.module_path, module.__name__)
module = module(import_path, import_path)
imports = "{{{0}}}".format(", ".join(self.imports))
relative_to = module.relative_to
return 'import {0} from "/modules/{1}/{2}"'.format(imports, relative_to, self.import_from)
| realestate-com-au/dashmat | dashmat/option_spec/import_line.py | Python | mit | 2,434 | 0.015612 |
#!/usr/bin/env python
'''
@file freq_scale.py
@brief Sandbox for various frequency scale generators
@author gm
@copyright gm 2014
This file is part of Chartreuse
Chartreuse is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Chartreuse is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Chartreuse. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy
import pylab
class LogFreqScale(object):
'''
Log frequency scale
'''
def __init__(self, length, dft_length, sampling_freq):
self.length = length
self.dft_length = dft_length
self.sampling_freq = sampling_freq
self._Synthesize()
def _Synthesize(self):
'''
Actual processing function for generating the scale
'''
kLowBound = 2.0 * self.sampling_freq / self.dft_length
kHighBound = self.sampling_freq * 0.5
tmp = numpy.linspace(kLowBound, kHighBound, self.length)
tmp[0] = self.sampling_freq / (self.dft_length * (3.0 / 4.0))
self.data = numpy.log2(tmp * 0.001)
if __name__ == "__main__":
import utilities
sampling_freq = 48000.0
dft_bins_count = 2048
low_edge = 62.5
high_edge = 1500.0
low_edge_idx = numpy.ceil(low_edge * dft_bins_count / sampling_freq)
high_edge_idx = dft_bins_count / 2 + 1
length = high_edge_idx - low_edge_idx + 1
generator = LogFreqScale(length, dft_bins_count, sampling_freq)
out_data = generator.data
print(utilities.PrintMetadata(utilities.GetMetadata(out_data)))
pylab.plot(out_data, label = "out")
pylab.legend()
pylab.show()
| G4m4/chartreuse | scripts/freq_scale.py | Python | gpl-3.0 | 2,022 | 0.001978 |
# -*- test-case-name: vumi.transports.cellulant.tests.test_cellulant_sms -*-
import json
from urllib import urlencode
from twisted.internet.defer import inlineCallbacks
from vumi.utils import http_request_full
from vumi import log
from vumi.config import ConfigDict, ConfigText
from vumi.transports.httprpc import HttpRpcTransport
class CellulantSmsTransportConfig(HttpRpcTransport.CONFIG_CLASS):
"""Cellulant transport config.
"""
credentials = ConfigDict(
"A dictionary where the `from_addr` is used for the key lookup and the"
" returned value should be a dictionary containing the username and"
" password.", required=True, static=True)
outbound_url = ConfigText(
"The URL to send outbound messages to.", required=True, static=True)
class CellulantSmsTransport(HttpRpcTransport):
"""
HTTP transport for Cellulant SMS.
"""
transport_type = 'sms'
CONFIG_CLASS = CellulantSmsTransportConfig
EXPECTED_FIELDS = set(["SOURCEADDR", "DESTADDR", "MESSAGE", "ID"])
IGNORED_FIELDS = set(["channelID", "keyword", "CHANNELID", "serviceID",
"SERVICEID", "unsub", "transactionID"])
KNOWN_ERROR_RESPONSE_CODES = {
'E0': 'Insufficient HTTP Params passed',
'E1': 'Invalid username or password',
'E2': 'Credits have expired or run out',
'1005': 'Suspect source address',
}
def validate_config(self):
config = self.get_static_config()
self._credentials = config.credentials
self._outbound_url = config.outbound_url
return super(CellulantSmsTransport, self).validate_config()
@inlineCallbacks
def handle_outbound_message(self, message):
creds = self._credentials.get(message['from_addr'], {})
username = creds.get('username', '')
password = creds.get('password', '')
params = {
'username': username,
'password': password,
'source': message['from_addr'],
'destination': message['to_addr'],
'message': message['content'],
}
log.msg("Sending outbound message: %s" % (message,))
url = '%s?%s' % (self._outbound_url, urlencode(params))
log.msg("Making HTTP request: %s" % (url,))
response = yield http_request_full(url, '', method='GET')
log.msg("Response: (%s) %r" % (response.code, response.delivered_body))
content = response.delivered_body.strip()
# we'll only send 1 message at a time and so the API can only
# return this on a valid ack
if content == '1':
yield self.publish_ack(user_message_id=message['message_id'],
sent_message_id=message['message_id'])
else:
error = self.KNOWN_ERROR_RESPONSE_CODES.get(content,
'Unknown response code: %s' % (content,))
yield self.publish_nack(message['message_id'], error)
@inlineCallbacks
def handle_raw_inbound_message(self, message_id, request):
values, errors = self.get_field_values(request, self.EXPECTED_FIELDS,
self.IGNORED_FIELDS)
if errors:
log.msg('Unhappy incoming message: %s' % (errors,))
yield self.finish_request(message_id, json.dumps(errors), code=400)
return
log.msg(('CellulantSmsTransport sending from %(SOURCEADDR)s to '
'%(DESTADDR)s message "%(MESSAGE)s"') % values)
yield self.publish_message(
message_id=message_id,
content=values['MESSAGE'],
to_addr=values['DESTADDR'],
from_addr=values['SOURCEADDR'],
provider='vumi',
transport_type=self.transport_type,
transport_metadata={'transport_message_id': values['ID']},
)
yield self.finish_request(
message_id, json.dumps({'message_id': message_id}))
| TouK/vumi | vumi/transports/cellulant/cellulant_sms.py | Python | bsd-3-clause | 3,968 | 0.000756 |
##! /usr/bin/env python
# _*_ coding: latin-1 _*_
import jtutil
import jtsocket
import jtdom
def jtexec(cmd):
jtsocket.send("<jtexec>"+jtutil.cdataif(cmd)+"</jtexec>")
# <exitvalue>value</exitvalue>
# vagy
# <execerror>error</execerror>
while 1:
rsp=jtsocket.recv()
if rsp==None:
return None
dom=jtdom.domparse(rsp)
node=jtdom.domfirst(dom)
type=jtdom.domname(node)
value=jtdom.domtext(node)
if type=="execerror":
raise jtutil.applicationerror, ("jtexec", "failed", value)
elif type=="exitvalue":
return value
| mrev11/ccc3 | jt/jtpython/jtlib/jtexec.py | Python | lgpl-2.1 | 653 | 0.027565 |
#!/usr/bin/env python
# Lucas Walter
# Configure a CameraInfo from a dynamic reconfigure interface
import rospy
from dynamic_reconfigure.server import Server
from sensor_msgs.msg import CameraInfo
from vimjay.cfg import DrCameraInfoConfig
class DrCameraInfo:
def __init__(self):
rospy.init_node('dr_camera_info')
self.camera_info = None
self.pub = rospy.Publisher("camera_info", CameraInfo, queue_size=1, latch=True)
self.server = Server(DrCameraInfoConfig, self.dr_callback)
# reset=True makes this node survive jumps back in time (why not make that the default?)
# https://github.com/ros-visualization/interactive_markers/pull/47/
# TODO(lucasw) make this update if the callback changes update rate
self.timer = rospy.Timer(rospy.Duration(0.2), self.update, reset=True)
def dr_callback(self, config, level):
ci = CameraInfo()
ci.header.frame_id = config['frame_id']
ci.width = config['width']
ci.height = config['height']
ci.distortion_model = config['distortion_model']
ci.D = [config['d0'], config['d1'], config['d2'], config['d3'], config['d4']]
ci.K[0 * 3 + 0] = config['fx']
ci.K[0 * 3 + 2] = config['cx']
ci.K[1 * 3 + 1] = config['fy']
ci.K[1 * 3 + 2] = config['cy']
ci.K[2 * 3 + 2] = 1
ci.P[0 * 4 + 0] = config['fx']
ci.P[0 * 4 + 2] = config['cx']
ci.P[1 * 4 + 1] = config['fy']
ci.P[1 * 4 + 2] = config['cy']
ci.P[2 * 4 + 2] = 1
ci.R[0] = 1
ci.R[4] = 1
ci.R[8] = 1
self.camera_info = ci
return config
def update(self, event):
self.camera_info.header.stamp = rospy.Time.now()
self.pub.publish(self.camera_info)
if __name__ == '__main__':
dr_camera_info = DrCameraInfo()
rospy.spin()
| lucasw/vimjay | scripts/dr_camera_info.py | Python | gpl-3.0 | 1,867 | 0.002142 |
import inspect
import logging
import os
import re
import shlex
import subprocess
import sys
import textwrap
import time
from datetime import datetime
from email.utils import formatdate as format_rfc2822
from io import StringIO
from urllib.parse import quote
import aiohttp
import discord
import psutil
import pytz
from asyncpg.exceptions import PostgresError
from dateutil import parser
from dateutil.tz import gettz
from discord.ext.commands import (BucketType, Group, clean_content)
from discord.ext.commands.errors import BadArgument
from bot.bot import command, cooldown, bot_has_permissions
from bot.converters import FuzzyRole, TzConverter, PossibleUser
from cogs.cog import Cog
from utils.tzinfo import fuzzy_tz, tz_dict
from utils.unzalgo import unzalgo, is_zalgo
from utils.utilities import (random_color, get_avatar, split_string,
get_emote_url, send_paged_message,
format_timedelta, parse_timeout,
DateAccuracy)
try:
from pip.commands import SearchCommand
except ImportError:
try:
from pip._internal.commands.search import SearchCommand
except (ImportError, TypeError):
SearchCommand = None
logger = logging.getLogger('terminal')
parserinfo = parser.parserinfo(dayfirst=True)
class Utilities(Cog):
def __init__(self, bot):
super().__init__(bot)
@command()
@cooldown(1, 10, BucketType.guild)
async def changelog(self, ctx, page: int=1):
sql = 'SELECT * FROM changelog ORDER BY time DESC'
rows = await self.bot.dbutil.fetch(sql)
def create_embed(row):
embed = discord.Embed(title='Changelog', description=row['changes'],
timestamp=row['time'])
return embed
def get_page(page, idx):
if not isinstance(page, discord.Embed):
page = create_embed(page)
page.set_footer(text=f'Page {idx+1}/{len(rows)}')
rows[idx] = page
return page
if page > 0:
page -= 1
elif page == 0:
page = 1
await send_paged_message(ctx, rows, True, page, get_page)
@command(aliases=['pong'])
@cooldown(1, 5, BucketType.guild)
async def ping(self, ctx):
"""Ping pong"""
t = time.perf_counter()
if ctx.received_at:
local_delay = t - ctx.received_at
else:
local_delay = datetime.utcnow().timestamp() - ctx.message.created_at.timestamp()
await ctx.trigger_typing()
t = time.perf_counter() - t
message = 'Pong!\n🏓 took {:.0f}ms\nLocal delay {:.0f}ms\nWebsocket ping {:.0f}ms'.format(t*1000, local_delay*1000, self.bot.latency*1000)
if hasattr(self.bot, 'pool'):
try:
_, sql_t = await self.bot.dbutil.fetch('SELECT 1', measure_time=True)
message += '\nDatabase ping {:.0f}ms'.format(sql_t * 1000)
except PostgresError:
message += '\nDatabase could not be reached'
await ctx.send(message)
@command(aliases=['e', 'emoji'])
@cooldown(1, 5, BucketType.channel)
async def emote(self, ctx, emote: str):
"""Get the link to an emote"""
emote = get_emote_url(emote)
if emote is None:
return await ctx.send('You need to specify an emote. Default (unicode) emotes are not supported ~~yet~~')
await ctx.send(emote)
@command(aliases=['roleping'])
@cooldown(1, 4, BucketType.channel)
async def how2role(self, ctx, *, role: FuzzyRole):
"""Searches a role and tells you how to ping it"""
name = role.name.replace('@', '@\u200b')
await ctx.send(f'`{role.mention}` {name}')
@command(aliases=['howtoping'])
@cooldown(1, 4, BucketType.channel)
async def how2ping(self, ctx, *, user):
"""Searches a user by their name and get the string you can use to ping them"""
if ctx.guild:
members = ctx.guild.members
else:
members = self.bot.get_all_members()
def filter_users(predicate):
for member in members:
if predicate(member):
return member
if member.nick and predicate(member.nick):
return member
if ctx.message.raw_role_mentions:
i = len(ctx.invoked_with) + len(ctx.prefix) + 1
user = ctx.message.clean_content[i:]
user = user[user.find('@')+1:]
found = filter_users(lambda u: str(u).startswith(user))
s = '`<@!{}>` {}'
if found:
return await ctx.send(s.format(found.id, str(found)))
found = filter_users(lambda u: user in str(u))
if found:
return await ctx.send(s.format(found.id, str(found)))
else:
return await ctx.send('No users found with %s' % user)
@command(aliases=['src', 'source_code', 'sauce'])
@cooldown(1, 5, BucketType.user)
async def source(self, ctx, *cmd):
"""Link to the source code for this bot
You can also get the source code of commands by doing {prefix}{name} cmd_name"""
if cmd:
full_name = ' '.join(cmd)
cmnd = self.bot.all_commands.get(cmd[0])
if cmnd is None:
raise BadArgument(f'Command "{full_name}" not found')
for c in cmd[1:]:
if not isinstance(cmnd, Group):
raise BadArgument(f'Command "{full_name}" not found')
cmnd = cmnd.get_command(c)
cmd = cmnd
if not cmd:
await ctx.send('You can find the source code for this bot here https://github.com/s0hv/Not-a-bot')
return
source, line_number = inspect.getsourcelines(cmd.callback)
filename = inspect.getsourcefile(cmd.callback).replace(os.getcwd(), '').strip('\\/')
# unformatted source
original_source = textwrap.dedent(''.join(source))
# Url pointing to the command in github
url = f'https://github.com/s0hv/Not-a-bot/tree/master/{filename}#L{line_number}'
# Source code in message
source = original_source.replace('```', '`\u200b`\u200b`') # Put zero width space between backticks so they can be within a codeblock
source = f'<{url}>\n```py\n{source}\n```'
if len(source) > 2000:
file = discord.File(StringIO(original_source), filename=f'{full_name}.py')
await ctx.send(f'Content was longer than 2000 ({len(source)} > 2000)\n<{url}>', file=file)
return
await ctx.send(source)
@command()
@cooldown(1, 5, BucketType.user)
async def undo(self, ctx):
"""
Undoes the last undoable command result. Not all messages will be undoable
and undoable messages override each other because only one message can be
undone.
"""
if not await ctx.undo():
await ctx.send('Failed to undo the latest undoable command for you.\n'
'Do note that they expire in one minute')
@command()
@cooldown(1, 10, BucketType.user)
async def invite(self, ctx):
"""This bots invite link"""
await ctx.send(f'<https://discordapp.com/api/oauth2/authorize?client_id={self.bot.user.id}&permissions=1342557248&scope=bot>')
@staticmethod
def _unpad_zero(value):
if not isinstance(value, str):
return
return value.lstrip('0')
@command(aliases=['bot', 'botinfo'])
@cooldown(2, 5, BucketType.user)
@bot_has_permissions(embed_links=True)
async def stats(self, ctx):
"""Get stats about this bot"""
pid = os.getpid()
process = psutil.Process(pid)
uptime = time.time() - process.create_time()
d = datetime.utcfromtimestamp(uptime)
uptime = f'{d.day-1}d {d.hour}h {d.minute}m {d.second}s'
current_memory = round(process.memory_info().rss / 1048576, 2)
memory_usage = f' Current: {current_memory}MB'
if sys.platform == 'linux':
try:
# use pmap to find the memory usage of this process and turn it to megabytes
# Since shlex doesn't care about pipes | I have to do this
s1 = subprocess.Popen(shlex.split('pmap %s' % os.getpid()),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
s2 = subprocess.Popen(
shlex.split(r'grep -Po "total +\K([0-9])+(?=K)"'),
stdin=s1.stdout, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
s1.stdin.close()
memory = round(int(s2.communicate()[0].decode('utf-8')) / 1024, 1)
usable_memory = str(memory) + 'MB'
memory_usage = f'{current_memory}MB/{usable_memory} ({(current_memory/memory*100):.1f}%)'
except:
logger.exception('Failed to get extended mem usage')
raise
users = 0
for _ in self.bot.get_all_members():
users += 1
guilds = len(self.bot.guilds)
try:
# Get the last time the bot was updated
last_updated = format_rfc2822(os.stat('.git/refs/heads/master').st_mtime, localtime=True)
except OSError:
logger.exception('Failed to get last updated')
last_updated = 'N/A'
sql = 'SELECT * FROM command_stats ORDER BY uses DESC LIMIT 3'
try:
rows = await self.bot.dbutil.fetch(sql)
except PostgresError:
logger.exception('Failed to get command stats')
top_cmd = 'Failed to get command stats'
else:
top_cmd = ''
i = 1
for row in rows:
name = row['parent']
cmd = row['cmd']
if cmd:
name += ' ' + cmd
top_cmd += f'{i}. `{name}` with {row["uses"]} uses\n'
i += 1
embed = discord.Embed(title='Stats', colour=random_color())
embed.add_field(name='discord.py version', value=f"{discord.__version__}")
embed.add_field(name='Uptime', value=uptime)
if ctx.guild and ctx.guild.shard_id is not None:
embed.add_field(name='Shard', value=ctx.guild.shard_id)
embed.add_field(name='Servers', value=str(guilds))
embed.add_field(name='Users', value=str(users))
embed.add_field(name='Memory usage', value=memory_usage)
embed.add_field(name='Last updated', value=last_updated)
embed.add_field(name='Most used commands', value=top_cmd)
embed.set_thumbnail(url=get_avatar(self.bot.user))
embed.set_author(name=self.bot.user.name, icon_url=get_avatar(self.bot.user))
await ctx.send(embed=embed)
@command(name='roles', no_pm=True)
@cooldown(1, 10, type=BucketType.guild)
async def get_roles(self, ctx, page=''):
"""Get roles on this server"""
guild_roles = sorted(ctx.guild.roles, key=lambda r: r.name)
idx = 0
if page:
try:
idx = int(page) - 1
if idx < 0:
return await ctx.send('Index must be bigger than 0')
except ValueError:
return await ctx.send('%s is not a valid integer' % page, delete_after=30)
roles = 'A total of %s roles\n' % len(guild_roles)
for role in guild_roles:
roles += '{}: {}\n'.format(role.name, role.mention)
roles = split_string(roles, splitter='\n', maxlen=1000)
await send_paged_message(ctx, roles, starting_idx=idx,
page_method=lambda p, i: '```{}```'.format(p))
@command(aliases=['created_at', 'snowflake', 'snoflake'])
@cooldown(1, 5, type=BucketType.guild)
async def snowflake_time(self, ctx, id: int):
"""Gets creation date from the specified discord id in UTC"""
try:
int(id)
except ValueError:
return await ctx.send("{} isn't a valid integer".format(id))
await ctx.send(str(discord.utils.snowflake_time(id)))
@command()
@cooldown(1, 5, BucketType.user)
async def birthday(self, ctx, *, user: clean_content):
url = 'http://itsyourbirthday.today/#' + quote(user)
await ctx.send(url)
@command(name='unzalgo')
@cooldown(2, 5, BucketType.guild)
async def unzalgo_(self, ctx, *, text=None):
"""Unzalgo text
if text is not specified a cache lookup on zalgo text is done for the last 100 msgs
and the first found zalgo text is unzalgo'd"""
if text is None:
messages = self.bot._connection._messages
for i in range(-1, -100, -1):
try:
msg = messages[i]
except IndexError:
break
if msg.channel.id != ctx.channel.id:
continue
if is_zalgo(msg.content):
text = msg.content
break
if text is None:
await ctx.send("Didn't find a zalgo message")
return
await ctx.send(unzalgo(text))
@command()
@cooldown(1, 120, BucketType.user)
async def feedback(self, ctx, *, feedback):
"""
Send feedback of the bot.
Bug reports should go to https://github.com/s0hvaperuna/Not-a-bot/issues
"""
webhook = self.bot.config.feedback_webhook
if not webhook:
return await ctx.send('This command is unavailable atm')
e = discord.Embed(title='Feedback', description=feedback)
author = ctx.author
avatar = get_avatar(author)
e.set_thumbnail(url=avatar)
e.set_footer(text=str(author), icon_url=avatar)
e.add_field(name='Guild', value=f'{ctx.guild.id}\n{ctx.guild.name}')
json = {'embeds': [e.to_dict()],
'avatar_url': avatar,
'username': ctx.author.name,
'wait': True}
headers = {'Content-type': 'application/json'}
success = False
try:
r = await self.bot.aiohttp_client.post(webhook, json=json, headers=headers)
except aiohttp.ClientError:
logger.exception('')
else:
status = str(r.status)
# Accept 2xx status codes
if status.startswith('2'):
success = True
if success:
await ctx.send('Feedback sent')
else:
await ctx.send('Failed to send feedback')
@command(aliases=['bug'])
@cooldown(1, 10, BucketType.user)
async def bugreport(self, ctx):
"""For reporting bugs"""
await ctx.send('If you have noticed a bug in my bot report it here https://github.com/s0hv/Not-a-bot/issues\n'
f"If you don't have a github account or are just too lazy you can use {ctx.prefix}feedback for reporting as well")
@command(ingore_extra=True)
@cooldown(1, 10, BucketType.guild)
async def vote(self, ctx):
"""Pls vote thx"""
await ctx.send('https://top.gg/bot/214724376669585409/vote')
@command(aliases=['sellout'])
@cooldown(1, 10)
async def donate(self, ctx):
"""
Bot is not free to host. Donations go straight to server costs
"""
await ctx.send('If you want to support bot in server costs donate to https://www.paypal.me/s0hvaperuna\n'
'Alternatively you can use my DigitalOcean referral link https://m.do.co/c/84da65db5e5b which will help out in server costs as well')
@staticmethod
def find_emoji(emojis, name):
for e in emojis:
if e.name.lower() == name:
return e
@command()
@cooldown(1, 5, BucketType.user)
async def emojify(self, ctx, *, text: str):
"""Turns your text without emotes to text with discord custom emotes
To blacklist words from emoji search use a quoted string at the
beginning of the command denoting those words
e.g. emojify "blacklisted words here" rest of the sentence"""
emojis = ctx.bot.emojis
new_text = ''
word_blacklist = None
# Very simple method to parse word blacklist
if text.startswith('"'):
idx = text.find('"', 1) # Find second quote idx
word_blacklist = text[1:idx]
if word_blacklist:
text = text[idx+1:]
word_blacklist = [s.lower().strip(',.') for s in word_blacklist.split(' ')]
emoji_cache = {}
lines = text.split('\n')
for line in lines:
for s in line.split(' '):
es = s.lower().strip(',.:')
# We don't want to look for emotes that are only a couple characters long
if len(s) <= 3 or (word_blacklist and es in word_blacklist):
new_text += s + ' '
continue
e = emoji_cache.get(es)
if not e:
e = self.find_emoji(emojis, es)
if e is None:
e = s
else:
e = str(e)
emoji_cache[es] = e
new_text += e + ' '
new_text += '\n'
await ctx.send(new_text[:2000], undoable=True)
@command(name='pip')
@cooldown(1, 5, BucketType.channel)
@bot_has_permissions(embed_links=True)
async def get_package(self, ctx, *, name):
"""Get a package from pypi"""
if SearchCommand is None:
return await ctx.send('Not supported')
def search():
try:
search_command = SearchCommand()
options, _ = search_command.parse_args([])
hits = search_command.search(name, options)
if hits:
return hits[0]
except:
logger.exception('Failed to search package from PyPi')
raise
hit = await self.bot.loop.run_in_executor(self.bot.threadpool, search)
if not hit:
return await ctx.send('No matches')
async with self.bot.aiohttp_client.get(f'https://pypi.org/pypi/{quote(hit["name"])}/json') as r:
if r.status != 200:
return await ctx.send(f'HTTP error {r.status}')
json = await r.json()
info = json['info']
description = info['description']
if len(description) > 1000:
description = split_string(description, splitter='\n', maxlen=1000)[0] + '...'
embed = discord.Embed(title=hit['name'],
description=description,
url=info["package_url"])
embed.add_field(name='Author', value=info['author'] or 'None')
embed.add_field(name='Version', value=info['version'] or 'None')
embed.add_field(name='License', value=info['license'] or 'None')
await ctx.send(embed=embed)
async def get_timezone(self, ctx, user_id: int):
tz = await self.bot.dbutil.get_timezone(user_id)
if tz:
try:
return await ctx.bot.loop.run_in_executor(ctx.bot.threadpool, pytz.timezone, tz)
except pytz.UnknownTimeZoneError:
pass
return pytz.FixedOffset(0)
@command(aliases=['tz'])
@cooldown(2, 7)
async def timezone(self, ctx, *, timezone: str=None):
"""
Set or view your timezone. If timezone isn't given shows your current timezone
If timezone is given sets your current timezone to that.
Summer time should be supported for any timezone that's not a plain utc offset.
Due to [technical reasons](https://en.wikipedia.org/wiki/Tz_database#Area)
the sign in gmt offsets is flipped. e.g. UTC+5 offset is GMT-5
Examples:
• `{prefix}{name} utc+4`
• `{prefix}{name} London`
• `{prefix}{name} EST`
"""
user = ctx.author
if not timezone:
tz = await self.get_timezone(ctx, user.id)
s = tz.localize(datetime.utcnow()).strftime('Your current timezone is UTC %z')
await ctx.send(s)
return
tz = fuzzy_tz.get(timezone.lower())
# Extra info to be sent
extra = ''
if not tz:
tz = tz_dict.get(timezone.upper())
if tz:
tz = fuzzy_tz.get(f'utc{int(tz)//3600:+d}')
if not tz:
await ctx.send(f'Timezone {timezone} not found')
ctx.command.undo_use(ctx)
return
if tz.startswith('Etc/GMT'):
extra = "UTC offset used. Consider using a locality based timezone instead. " \
"You can set it usually by using your country's capital's name or your country's name as long as it has a single timezone\n" \
"The sign in the GMT timezone is flipped due to technical reasons."
if await self.bot.dbutil.set_timezone(user.id, tz):
await ctx.send(f'Timezone set to {tz}\n{extra}')
else:
await ctx.send('Failed to set timezone because of an error')
@command(name='timedelta', aliases=['td'],
usage="[duration or date] [timezones and users]")
@cooldown(1, 3, BucketType.user)
async def timedelta_(self, ctx, *, args=''):
"""
Get a date that is in the amount of duration given.
To get past dates start your duration with `-`
Time format is `1d 1h 1m 1s` where each one is optional.
When no time is given it is interpreted as 0 seconds.
You can also give a date and duration will be calculated as the time to that point in time.
Timezone will be user timezone by default but you can specify the date utc offset with e.g. UTC+3
If the date doesn't have spaces in it, put it inside quotes. In ambiguous 3-integer dates day is assumed to be first
e.g. `"14:00"`, `14:00 UTC+1`, `"Mon 14:00 UTC-3"`
You can also specify which timezones to use for comparison.
By default your own timezone is always put at the bottom (defaults to UTC).
Timezones can be just an integer determining the UTC offset in hours or
a city name or a country (Not all countries and cities are accepted as input).
Remember to use quotes if the city name contains spaces.
You can also give users and their timezone is used if they've set it
Max given timezones is 5.
Examples
`{prefix}{name} 1h ny`
`{prefix}{name} "Mon 22:00 UTC-3"`
`{prefix}{name} "Jan 4th 10:00" @user berlin`
"""
addition = True
if args.startswith('-'):
addition = False
args = args[1:]
duration, timezones = parse_timeout(args)
# Used to guess if time with quotes might've been given
# This way we can give the correct portion of the string to dateutil parser
quote_start = timezones.startswith('"')
user_tz = await self.get_timezone(ctx, ctx.author.id)
timezones = shlex.split(timezones)
if not duration and timezones:
try:
if quote_start:
t = timezones[0]
else:
t = ' '.join(timezones[:2])
def get_date():
def get_tz(name, offset):
# If name specified get by name
if name:
found_tz = tz_dict.get(name)
if not found_tz:
# Default value cannot be None or empty string
found_tz = gettz(fuzzy_tz.get(name.lower(), 'a'))
return found_tz
# if offset specified get by utc offset and reverse it
# because https://stackoverflow.com/questions/53076575/time-zones-etc-gmt-why-it-is-other-way-round
elif offset:
return offset*-1
return parser.parse(t.upper(), tzinfos=get_tz, parserinfo=parserinfo)
date = await self.bot.run_async(get_date)
if not date.tzinfo:
duration = user_tz.localize(date) - datetime.now(user_tz)
else:
# UTC timezones are inverted in dateutil UTC+3 gives UTC-3
tz = pytz.FixedOffset(date.tzinfo.utcoffset(datetime.utcnow()).total_seconds()//60)
duration = date.replace(tzinfo=tz) - datetime.now(user_tz)
addition = duration.days >= 0
if not addition:
duration *= -1
if quote_start:
timezones = timezones[1:]
else:
timezones = timezones[2:]
except (ValueError, OverflowError):
pass
if len(timezones) > 5:
await ctx.send('Over 5 timezones given. Give fewer timezones (Use quotes if a tz has spaces)')
return
async def add_time(dt):
try:
if addition:
return dt + duration
else:
return dt - duration
except OverflowError:
await ctx.send('Failed to get new date because of an Overflow error. Try giving a smaller duration')
tz_converter = TzConverter()
user_converter = PossibleUser()
s = ''
for timezone in timezones:
try:
tz = await tz_converter.convert(ctx, timezone)
except BadArgument as e:
try:
user = await user_converter.convert(ctx, timezone)
if isinstance(user, int):
tz = await self.get_timezone(ctx, user)
else:
tz = await self.get_timezone(ctx, user.id)
except BadArgument:
raise e
dt = await add_time(datetime.now(tz))
if not dt:
return
s += f'`{dt.strftime("%Y-%m-%d %H:%M UTC%z")}` `{tz.zone}`\n'
dt = await add_time(datetime.now(user_tz))
if not dt:
return
s += f'`{dt.strftime("%Y-%m-%d %H:%M UTC%z")}` `{user_tz.zone}`\n'
td = format_timedelta(duration, accuracy=DateAccuracy.Day-DateAccuracy.Minute)
if addition:
s += f'which is in {td}'
else:
s += f'which was {td} ago'
await ctx.send(s)
@command(aliases=['st'])
@cooldown(1, 4, BucketType.user)
async def sort_tags(self, ctx, tagname, *, tags):
"""Gets missing tag indexes from a 42 bot tag search.
The first tagname must be the one that is gonna be looked for"""
tagname = tagname.rstrip(',')
tags = tags.split(', ')
match = re.match(r'(.+?)(\d+)', tagname)
numbers = set()
if match:
tagname, number = match.groups()
numbers.add(int(number))
else:
numbers.add(0)
tagname = tagname.lstrip('\u200b')
tl = len(tagname)
for tag in tags:
if tag.endswith('...'):
continue
if tagname not in tag:
continue
if tagname == tag:
numbers.add(0)
continue
try:
# Ignore long numbers
n = tag[tl:]
if len(n) > 4:
continue
numbers.add(int(n))
except ValueError:
continue
if not numbers:
await ctx.send(f'No other numbered tags found for {tagname}')
return
numbers = list(sorted(numbers))
last = numbers[0]
if last > 2:
s = f'-{last - 1}, '
else:
s = ''
for i in numbers[1:]:
delta = i - last
if delta > 4:
s += f'{last + 1}-{i - 1}, '
elif delta == 3:
s += f'{last + 1}, {i - 1}, '
elif delta == 2:
s += f'{i - 1}, '
last = i
s += f'{last+1}-'
await ctx.send(f'Missing tag numbers for {tagname} are {s}')
def setup(bot):
bot.add_cog(Utilities(bot))
| s0hvaperuna/Not-a-bot | cogs/utils.py | Python | mit | 28,689 | 0.002162 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import time
import gzip
import tempfile
import ConfigParser
from cStringIO import StringIO
from datetime import datetime
from ellen.repo import Jagare
from ellen.utils import JagareError
from vilya.libs.permdir import get_tmpdir
from vilya.models.user import User
from vilya.models.ngit.commit import Commit
from vilya.models.ngit.diff import Diff
from vilya.models.ngit.blob import Blob
from vilya.models.ngit.submodule import Submodule
from vilya.models.ngit.tree import Tree
from vilya.models.ngit.blame import Blame
LATEST_UPDATE_REF_THRESHOLD = 60 * 60 * 24
MAX_DIFF_PATCHES = 2000
REFS_HEADS_PREFIX_LENGTH = len('refs/heads/')
class RepoMergeError(Exception):
pass
class RepoPushError(Exception):
pass
class Repo(object):
provided_features = []
def __init__(self, path):
self.type = "repo"
self.path = path
self.repo = Jagare(self.path)
def provide(self, name):
'''检查是否提供某功能,即是否提供某接口'''
return name in self.provided_features
@property
def empty(self):
return self.is_empty
@property
def is_empty(self):
return self.repo.empty
@property
def default_branch(self):
branch = ''
head = self.repo.head
if head:
branch = head.name[REFS_HEADS_PREFIX_LENGTH:]
return branch
def update_default_branch(self, name):
branches = self.repo.branches
if name not in branches:
return None
self.repo.update_head(name)
def clone(self, path, bare=None, branch=None,
mirror=None, env=None, shared=None):
self.repo.clone(path,
bare=bare, branch=branch,
mirror=mirror, env=env)
# shared=shared) why?
def archive(self, name, ref='master', ext='tar.gz'):
content = self.repo.archive(name, ref=ref)
if ext == 'tar':
return content
outbuffer = StringIO()
zipfile = gzip.GzipFile(mode='wb', compresslevel=6, fileobj=outbuffer)
zipfile.write(content)
zipfile.close()
out = outbuffer.getvalue()
return out
def get_submodule(self, ref, path):
path = path.strip()
gitmodules = self.repo.show("%s:%s" % (ref, '.gitmodules'))
if not gitmodules:
return None
submodules_lines = gitmodules["data"].split('\n')
modules_str = '\n'.join([line.strip() for line in submodules_lines])
config = ConfigParser.RawConfigParser()
config.readfp(StringIO(modules_str))
for section in config.sections():
if config.has_option(section, 'path') and config.get(
section, 'path') == path:
url = config.get(section, 'url')
return Submodule(url, path)
return None
def get_file(self, ref, path):
blob = self.repo.show("%s:%s" % (ref, path))
if not blob:
return None
if blob['type'] != 'blob':
return None
# TODO: validate blob
return Blob(self, blob)
def get_file_by_lines(self, ref, path):
blob = self.get_file(ref, path)
# TODO: blob.size < xxx
if not blob or blob.binary:
return None
if not blob.data:
return []
src = blob.data
return src.splitlines()
def get_file_n_lines(self, ref, path):
lines = self.get_file_by_lines(ref, path)
if lines:
return len(lines)
return 0
def get_commits(self, to_ref, from_ref=None, path=None, skip=0,
max_count=0, author=None, query=None, first_parent=None,
since=0, no_merges=None):
commits = self.repo.rev_list(to_ref=to_ref, from_ref=from_ref,
path=path, skip=skip,
max_count=max_count, author=author,
query=query, first_parent=first_parent,
since=since, no_merges=no_merges)
return [Commit(self, commit) for commit in commits]
def get_raw_diff(self, ref, from_ref=None, paths=None, **kw):
''' get Jagare formated diff dict '''
try:
diff = self.repo.diff(ref, from_ref=from_ref, paths=paths, **kw)
except KeyError:
return None
return diff
def get_diff(self, ref=None, from_ref=None,
linecomments=[], raw_diff=None, paths=None, **kw):
''' get ngit wrapped diff object '''
_raw_diff = None
if raw_diff:
_raw_diff = raw_diff
elif ref:
_raw_diff = self.get_raw_diff(ref, from_ref=from_ref,
paths=paths, **kw)
if _raw_diff:
return Diff(self, _raw_diff, linecomments)
else:
return None
def get_diff_length(self, ref, from_ref=None, **kw):
_raw_diff = self.get_raw_diff(ref, from_ref=from_ref, **kw)
return len(_raw_diff['patches']) if _raw_diff else 0
def get_last_commit(self, ref, path=None, no_merges=False):
if not path:
return self.get_commit(ref)
commit = self.repo.rev_list(ref, path=path, max_count=1,
no_merges=no_merges)
if not commit:
return None
commit = commit[0]
commit = Commit(self, commit)
return commit
def get_previours_commit(self, ref, path):
"""previours commit that touch the specified path"""
commits = self.repo.rev_list(ref, path=path, max_count=2,
no_merges=True)
for commit in commits:
if commit['sha'] != self.repo.sha(ref):
return Commit(self, commit)
return None
def get_commit(self, ref):
sha = self.repo.resolve_commit(ref)
if not sha:
return None
commit = self.repo.show(sha)
if not commit:
return None
# TODO: validate commit
return Commit(self, commit)
def delete_branch(self, name):
self.repo.delete_branch(name)
def get_path_by_ref(self, ref):
''' get blob or tree '''
path = self.repo.show(ref)
if not path:
return None
if path['type'] == 'tree':
path = Tree(self, path['entries'])
elif path['type'] == 'blob':
path = Blob(self, path)
else:
path = None
return path
def get_path(self, ref, path):
_item = self.repo.show("%s:%s" % (ref, path))
if not _item:
return None
if _item['type'] == 'tree':
item = Tree(self, _item['entries'])
elif _item['type'] == 'blob':
item = Blob(self, _item)
else:
item = None
return item
def get_last_update_timestamp(self):
commit = self.get_last_commit('HEAD')
if not commit:
return 0
return int(commit.author_timestamp)
class ProjectRepo(Repo):
provided_features = ['project', 'fulltext', 'moreline',
'side_by_side', 'patch_actions']
def __init__(self, project, pull=None):
self.type = "project"
self.pull = pull
self.project = project
self.project_name = project.name
self.name = project.name
self.path = project.repo_path
self.repo = Jagare(self.path)
# TODO: url
@property
def api_url(self):
return ''
@property
def context_url(self):
return 'moreline'
@property
def fulltext_url(self):
return 'fulltext'
@property
def branches(self):
return self.repo.branches
@property
def tags(self):
return self.repo.tags
def get_tree(self, ref, path=None, recursive=False, with_commit=False,
recursive_with_tree_node=False):
tree = self.repo.ls_tree(
ref, path=path, recursive=recursive,
with_commit=with_commit)
# recursive_with_tree_node=recursive_with_tree_node)
if not tree:
return None
return Tree(self, tree)
def get_file_by_ref(self, ref):
blob = self.repo.show(ref)
if not blob:
return None
return blob['data']
def get_contexts(self, ref, path, line_start, line_end):
def fix_line_index(index, max_i, min_i=0):
i = index - 1
i = max(i, min_i)
i = min(i, max_i)
return i
lines = self.get_file_by_lines(ref, path)
if not lines:
return None
n = len(lines)
start = fix_line_index(line_start, n)
end = fix_line_index(line_end, n)
return lines[start:end]
def blame_file(self, *w, **kw):
blame = self.repo.blame(*w, **kw)
if not blame:
return None
return Blame(self, blame)
def get_renamed_files(self, ref, path=None):
return self.repo.detect_renamed(ref)
def commit_file(self, *w, **kw):
return self.repo.commit_file(*w, **kw)
def get_temp_branch(self):
commit = self.get_commit('HEAD')
return 'patch_tmp' + time.strftime('%Y%m%d%H%M%S-') + commit.sha[10]
def get_patch_file(self, ref, from_ref=None):
return self.repo.format_patch(ref, from_ref)
def get_diff_file(self, ref, from_ref=None):
_raw_diff = self.get_raw_diff(ref, from_ref)
if not _raw_diff:
return ''
patch = _raw_diff['diff'].patch
if not patch:
return ''
return patch
@classmethod
def init(cls, path, work_path=None, bare=True):
return Jagare.init(path, work_path=work_path, bare=bare)
@classmethod
def mirror(cls, url, path, env=None):
Jagare.mirror(url, path, env=env)
def add_remote(self, name, url):
return self.repo.add_remote(name, url)
def add_remote_hub(self, name, url):
self.add_remote('hub/%s' % name, url)
def update_ref(self, ref, value):
result = None
try:
result = self.repo.update_ref(ref, value)
except JagareError:
# FIXME: logging
# FIXME: more meaningful error (JagareError)
pass
return result
def sha(self, rev='HEAD'):
return self.repo.sha(rev)
def merge_base(self, to_sha, from_sha):
return self.repo.merge_base(to_sha, from_sha)
@property
def remotes(self):
return self.repo.remotes
def fetch_all(self):
self.repo.fetch_all()
def fetch(self, name):
self.repo.fetch(name)
def fetch_(self, *w, **kw):
return self.repo.fetch_(*w, **kw)
def get_latest_update_branches(self):
refs = self.repo.listall_references()
refs = filter(lambda r: r.startswith('refs/heads'), refs)
current_time = time.time()
latest_branches = []
for ref in refs:
commit_time = self.repo.lookup_reference(ref).get_object().commit_time # noqa
delta = current_time - commit_time
if delta < LATEST_UPDATE_REF_THRESHOLD:
latest_branches.append((commit_time, ref.split('/')[-1]))
return sorted(latest_branches, key=lambda r: r[0], reverse=True)
def get_all_src_objects(self):
refs = self.repo.listall_references()
refs = filter(lambda r: r.startswith('refs/heads'), refs)
commits_dict = {}
for ref in refs:
commits = self.repo.rev_list(ref)
commits = {c['sha']: c for c in commits}
commits_dict.update(commits)
commits = sorted(commits_dict.values(),
key=lambda x: x['committer']['time'],
reverse=True)
pruned_set = set()
objects_dict = {}
treenode_list = [(commit['sha'], commit['tree'], '')
for commit in commits]
while treenode_list:
commit_id, tree_id, path = treenode_list.pop()
if tree_id in pruned_set:
continue
pruned_set.add(tree_id)
objects = self.repo.ls_tree(tree_id, size=True)
for obj in objects:
obj_id = obj['id']
obj_path = '%s/%s' % (path, obj['name'])
if obj['type'] == 'tree':
treenode_list.append((commit_id, obj_id, obj_path))
elif obj['type'] == 'blob':
if obj_id not in objects_dict:
commit = commits_dict[commit_id]
objects_dict[obj_id] = dict(
path=obj_path[1:],
commit=commit_id,
size=obj['size'],
commit_time=datetime.fromtimestamp(
commit['committer']['time']),
committer=commit['committer']['name']
)
return objects_dict
class GistRepo(Repo):
provided_features = []
# TODO: move to utils
PREFIX = 'gistfile'
def __init__(self, gist):
self.type = "gist"
self.gist = gist
self.name = gist.name
self.path = gist.repo_path
self.repo = Jagare(gist.repo_path)
@classmethod
def init(cls, gist):
Jagare.init(gist.repo_path, bare=True)
def clone(self, gist):
super(GistRepo, self).clone(gist.repo_path, bare=True)
def get_files(self):
files = []
if self.empty:
return files
tree = self.repo.ls_tree('HEAD')
for f in tree:
files.append([f['sha'], f['name']])
return files
# TODO: move to utils
def check_filename(self, fn):
for c in (' ', '<', '>', '|', ';', ':', '&', '`', "'"):
fn = fn.replace(c, '\%s' % c)
fn = fn.replace('/', '')
return fn
def commit_all_files(self, names, contents, oids, author):
data = []
for i, (name, content, oid) in enumerate(zip(names, contents, oids),
start=1):
if not name and not content:
continue
if not name:
name = self.PREFIX + str(i)
name = self.check_filename(name)
data.append([name, content, 'insert'])
files = self.get_files()
for sha, name in files:
if name in names:
continue
data.append([name, '', 'remove'])
self.repo.commit_file(branch='master',
parent='master',
author_name=author.name,
author_email=author.email,
message=' ',
reflog=' ',
data=data)
def is_commit(self, ref):
commit = self.repo.show(ref)
if commit:
return True
class PullRepo(ProjectRepo):
provided_features = ProjectRepo.provided_features + ['show_inline_toggle']
def __init__(self, pull):
# TODO: When to_proj or from_proj not exist?
# TODO: catch exception if from_proj was deleted
super(PullRepo, self).__init__(pull.to_proj, pull)
self.type = "pull"
self.from_repo = None
try:
if pull.from_proj:
self.from_repo = ProjectRepo(pull.from_proj, pull)
except JagareError:
self.from_repo = None
self._temp_dir = None
# no use
#self.merge_repo = None
#self.test_repo = None
# TODO: 统一 url
@property
def api_url(self):
project_name = self.project.name
ticket_id = self.pull.ticket_id
# FIXME: pull/new,没有ticket
if not ticket_id:
return '/api/%s/diff/' % project_name
url = "/api/%s/pulls/%s/" % (project_name, ticket_id)
return url
@property
def context_url(self):
project_name = self.project.name
ticket_id = self.pull.ticket_id
if not ticket_id:
return '/api/%s/diff/moreline' % project_name
url = "/api/%s/pulls/%s/moreline" % (project_name, ticket_id)
return url
@property
def fulltext_url(self):
project_name = self.project.name
ticket_id = self.pull.ticket_id
# FIXME: pull/new,没有ticket
if not ticket_id:
return '/api/%s/diff/fulltext' % project_name
url = "/api/%s/pulls/%s/fulltext" % (project_name, ticket_id)
return url
@property
def temp_dir(self):
if self._temp_dir:
return self._temp_dir
# TODO: move to Jagare
pulltmp = os.path.join(get_tmpdir(), "pulltmp")
if not os.path.exists(pulltmp):
os.makedirs(pulltmp)
worktree = tempfile.mkdtemp(dir=pulltmp)
self._temp_dir = worktree
return worktree
def init(self):
import os
path = os.path.join(self.temp_dir, '.git')
work_path = self.temp_dir
return Jagare.init(path, work_path=work_path, bare=False)
@property
def from_local(self):
return self.pull.to_proj == self.pull.from_proj
@property
def from_sha(self):
sha = None
ticket_id = self.pull.ticket_id
if ticket_id:
from vilya.models.consts import PULL_REF_H
# FIXME: catch more exceptions
try:
sha = self.sha(PULL_REF_H % ticket_id)
except:
# 旧有的被close但又未merge的pr可能出错
pass
if not sha and self.from_repo:
sha = self.from_repo.sha(self.pull.from_ref)
return sha
@property
def to_sha(self):
sha = None
ticket_id = self.pull.ticket_id
if ticket_id:
from vilya.models.consts import PULL_REF_M
# FIXME: catch more exceptions
try:
sha = self.sha(PULL_REF_M % ticket_id)
except:
# 旧有的被close但又未merge的pr可能出错
pass
if not sha:
sha = self.sha(self.pull.to_ref)
return sha
def merge(self, merger, message_header, message_body):
import shutil
from vilya.models.git import make_git_env
# TODO: Use User only
if merger and isinstance(merger, basestring):
merger = User(merger)
if not isinstance(merger, User):
raise Exception("User is needed to merge pull")
env = make_git_env(merger)
worktree = self.temp_dir
merge_commit_sha = None
try:
if self.pull.is_up_to_date():
return ''
from_sha = self.from_sha
to_sha = self.to_sha
repo = self.pull.pull_clone(worktree)
ref = self.pull.pull_fetch(repo)
result = repo.merge(ref, message_header, message_body, no_ff=True,
_env=env)
errcode = result['returncode']
if errcode != 0:
raise RepoMergeError()
result = repo.push('origin', self.pull.to_ref,
_env=dict(CODE_REMOTE_USER=merger.name))
errcode = result['returncode']
if errcode != 0:
raise RepoPushError
merge_commit_sha = self.sha(self.pull.to_ref)
except RepoMergeError:
# FIXME: error msg
pass
except RepoPushError:
# FIXME: error msg
pass
else:
if merge_commit_sha and self.pull.ticket:
self.pull._save_merged(merger.name,
from_sha,
to_sha,
merge_commit_sha)
finally:
shutil.rmtree(worktree)
return merge_commit_sha
def can_merge(self):
import os
import shutil
worktree = self.temp_dir
try:
self.clone(worktree, branch=self.pull.to_ref,
bare=False, shared=True)
repo = ProjectRepo.init(
os.path.join(worktree, '.git'), worktree, bare=False)
ref = self.pull.pull_fetch(repo)
result = repo.merge_commits(self.pull.to_ref, ref)
except KeyError: # dummy result
result = {}
finally:
shutil.rmtree(worktree)
if result.get('has_conflicts', None) is False:
return True
else:
return False
def can_fastforward(self):
if not self.get_commits(self.to_sha, self.from_sha):
return True
def backport_project_name(name):
return name.replace('~', '_')
| douban/code | vilya/models/ngit/repo.py | Python | bsd-3-clause | 21,073 | 0.000238 |
import os
from osmnames.export_osmnames.export_osmnames import export_housenumbers, create_views
def test_tsv_get_created(session, tables):
session.add(
tables.osm_housenumber(
osm_id=1,
)
)
create_views()
export_housenumbers()
assert os.path.exists('/tmp/osmnames/export/test_housenumbers.tsv')
| philippks/OSMNames | tests/export_osmnames/test_export_housenumbers.py | Python | gpl-2.0 | 365 | 0.00274 |
"""Webhooks for external integrations."""
from __future__ import absolute_import
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.models import get_client, UserProfile
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success, json_error
from zerver.lib.notifications import convert_html_to_markdown
from zerver.decorator import REQ, has_request_variables, authenticated_rest_api_view
import logging
import ujson
from typing import Any, Dict, Optional, Tuple, Union, Text
class TicketDict(dict):
"""
A helper class to turn a dictionary with ticket information into
an object where each of the keys is an attribute for easy access.
"""
def __getattr__(self, field):
# type: (str) -> Any
if "_" in field:
return self.get(field)
else:
return self.get("ticket_" + field)
def property_name(property, index):
# type: (str, int) -> str
"""The Freshdesk API is currently pretty broken: statuses are customizable
but the API will only tell you the number associated with the status, not
the name. While we engage the Freshdesk developers about exposing this
information through the API, since only FlightCar uses this integration,
hardcode their statuses.
"""
statuses = ["", "", "Open", "Pending", "Resolved", "Closed",
"Waiting on Customer", "Job Application", "Monthly"]
priorities = ["", "Low", "Medium", "High", "Urgent"]
if property == "status":
return statuses[index] if index < len(statuses) else str(index)
elif property == "priority":
return priorities[index] if index < len(priorities) else str(index)
else:
raise ValueError("Unknown property")
def parse_freshdesk_event(event_string):
# type: (str) -> List[str]
"""These are always of the form "{ticket_action:created}" or
"{status:{from:4,to:6}}". Note the lack of string quoting: this isn't
valid JSON so we have to parse it ourselves.
"""
data = event_string.replace("{", "").replace("}", "").replace(",", ":").split(":")
if len(data) == 2:
# This is a simple ticket action event, like
# {ticket_action:created}.
return data
else:
# This is a property change event, like {status:{from:4,to:6}}. Pull out
# the property, from, and to states.
property, _, from_state, _, to_state = data
return [property, property_name(property, int(from_state)),
property_name(property, int(to_state))]
def format_freshdesk_note_message(ticket, event_info):
# type: (TicketDict, List[str]) -> str
"""There are public (visible to customers) and private note types."""
note_type = event_info[1]
content = "%s <%s> added a %s note to [ticket #%s](%s)." % (
ticket.requester_name, ticket.requester_email, note_type,
ticket.id, ticket.url)
return content
def format_freshdesk_property_change_message(ticket, event_info):
# type: (TicketDict, List[str]) -> str
"""Freshdesk will only tell us the first event to match our webhook
configuration, so if we change multiple properties, we only get the before
and after data for the first one.
"""
content = "%s <%s> updated [ticket #%s](%s):\n\n" % (
ticket.requester_name, ticket.requester_email, ticket.id, ticket.url)
# Why not `"%s %s %s" % event_info`? Because the linter doesn't like it.
content += "%s: **%s** => **%s**" % (
event_info[0].capitalize(), event_info[1], event_info[2])
return content
def format_freshdesk_ticket_creation_message(ticket):
# type: (TicketDict) -> str
"""They send us the description as HTML."""
cleaned_description = convert_html_to_markdown(ticket.description)
content = "%s <%s> created [ticket #%s](%s):\n\n" % (
ticket.requester_name, ticket.requester_email, ticket.id, ticket.url)
content += """~~~ quote
%s
~~~\n
""" % (cleaned_description,)
content += "Type: **%s**\nPriority: **%s**\nStatus: **%s**" % (
ticket.type, ticket.priority, ticket.status)
return content
@authenticated_rest_api_view(is_webhook=True)
@has_request_variables
def api_freshdesk_webhook(request, user_profile, payload=REQ(argument_type='body'),
stream=REQ(default='freshdesk')):
# type: (HttpRequest, UserProfile, Dict[str, Any], Text) -> HttpResponse
ticket_data = payload["freshdesk_webhook"]
required_keys = [
"triggered_event", "ticket_id", "ticket_url", "ticket_type",
"ticket_subject", "ticket_description", "ticket_status",
"ticket_priority", "requester_name", "requester_email",
]
for key in required_keys:
if ticket_data.get(key) is None:
logging.warning("Freshdesk webhook error. Payload was:")
logging.warning(request.body)
return json_error(_("Missing key %s in JSON") % (key,))
ticket = TicketDict(ticket_data)
subject = "#%s: %s" % (ticket.id, ticket.subject)
try:
event_info = parse_freshdesk_event(ticket.triggered_event)
except ValueError:
return json_error(_("Malformed event %s") % (ticket.triggered_event,))
if event_info[1] == "created":
content = format_freshdesk_ticket_creation_message(ticket)
elif event_info[0] == "note_type":
content = format_freshdesk_note_message(ticket, event_info)
elif event_info[0] in ("status", "priority"):
content = format_freshdesk_property_change_message(ticket, event_info)
else:
# Not an event we know handle; do nothing.
return json_success()
check_send_message(user_profile, get_client("ZulipFreshdeskWebhook"), "stream",
[stream], subject, content)
return json_success()
| sonali0901/zulip | zerver/webhooks/freshdesk/view.py | Python | apache-2.0 | 5,880 | 0.00102 |
import socket
import os
import threading
import time
import zipfile
import threading
import myPacket as mp
def getParentDirectory(path):
path2 = path.split('/')
temp=''
for ph in path2:
if(len(ph)>4 and (ph[len(ph)-4:] == '.txt')):
break
temp = os.path.join(temp, ph)
return temp
def checkCharVersion(sock, myChadir, friChadir):
text = str(getCharDataSize(myChadir))
print(text)
mp.sendPacket(sock, text.encode('utf8'))
data = mp.recvPacket(sock).decode('utf8')
if cmpCharVersion(getCharDataSize(friChadir), int(data)):
return True
return False
def cmpCharVersion(myDataSize = 0, hisDataSize = 0):
if myDataSize == hisDataSize:
return True
return False
def getCharDataSize(charDirectory):
temp = 0
for dirPath, dirNames, fileNames in os.walk(charDirectory):
for fileName in fileNames:
file = os.path.join(dirPath, fileName)
temp += os.path.getsize(file)
return temp
def updateCharacter(sock, friChadir, friendID, func):
fileName = friendID+'.zip'
with open(fileName, 'wb') as cfile:
while True:
data = mp.recvPacket(sock)
if data == b'EOF':
break
cfile.write(data)
func(0)
#win32gui.ShowWindow(self.hwnd, 0)
os.system('rd /S /Q ' + friChadir)
zf = zipfile.ZipFile(fileName)
zf.extractall(friChadir)
zf.close()
func(1)
#win32gui.ShowWindow(self.hwnd, 1)
os.remove(fileName)
def uploadCharacter(sock, myChadir):
sfileName = 'ArchiveName.zip'
zf = zipfile.ZipFile(sfileName,'w',zipfile.ZIP_DEFLATED)
for dirPath, dirNames, fileNames in os.walk(myChadir):
for fileName in fileNames:
file = os.path.join(dirPath, fileName)
zf.write(file, file[len(myChadir)+1:])
zf.close()
with open(sfileName, 'rb') as file:
while True:
data = file.read(4096)
if not data:
break
mp.sendPacket(sock, data)
time.sleep(1) # delete after send in fixed len
mp.sendPacket(sock, b'EOF')
os.remove(sfileName)
def updataIfNeed(sock, myChafile, friendID, func, callbackFunc = None):
firChafile = func(None)
friChadir = getParentDirectory(firChafile)
myChadir = getParentDirectory(myChafile)
try:
if not checkCharVersion(sock, myChadir, friChadir):
mp.sendPacket(sock, 'True'.encode('utf8'))
data = mp.recvPacket(sock).decode('utf8')
if data=='True':
myThread = threading.Thread(target=uploadCharacter, args=(sock, myChadir))
myThread.setDaemon(True)
myThread.start()
#uploadCharacter(sock, myChadir)
updateCharacter(sock, friChadir, friendID, func)
if data=='True':
myThread.join( )
else:
mp.sendPacket(sock, 'False'.encode('utf8'))
data = mp.recvPacket(sock).decode()
if data == 'True':
uploadCharacter(sock, myChadir)
except:
pass
finally:
sock.close()
if callbackFunc != None:
callbackFunc()
#thread = threading.Thread(target=self.listen_to_chat_messagesInThread)
#thread.setDaemon(True)
#thread.start()
#self.connected = True
| misterlihao/network-programming-project | synchronizationRole.py | Python | mit | 3,395 | 0.010898 |
import warnings
from django.core.exceptions import ImproperlyConfigured
from django.utils import lru_cache, six
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from .base import Context, Template
from .context import _builtin_context_processors
from .exceptions import TemplateDoesNotExist
from .library import import_library
_context_instance_undefined = object()
_dictionary_undefined = object()
_dirs_undefined = object()
class Engine(object):
default_builtins = [
'django.template.defaulttags',
'django.template.defaultfilters',
'django.template.loader_tags',
]
def __init__(self, dirs=None, app_dirs=False,
allowed_include_roots=None, context_processors=None,
debug=False, loaders=None, string_if_invalid='',
file_charset='utf-8', libraries=None, builtins=None):
if dirs is None:
dirs = []
if allowed_include_roots is None:
allowed_include_roots = []
if context_processors is None:
context_processors = []
if loaders is None:
loaders = ['django.template.loaders.filesystem.Loader']
if app_dirs:
loaders += ['django.template.loaders.app_directories.Loader']
else:
if app_dirs:
raise ImproperlyConfigured(
"app_dirs must not be set when loaders is defined.")
if libraries is None:
libraries = {}
if builtins is None:
builtins = []
if isinstance(allowed_include_roots, six.string_types):
raise ImproperlyConfigured(
"allowed_include_roots must be a tuple, not a string.")
self.dirs = dirs
self.app_dirs = app_dirs
self.allowed_include_roots = allowed_include_roots
self.context_processors = context_processors
self.debug = debug
self.loaders = loaders
self.string_if_invalid = string_if_invalid
self.file_charset = file_charset
self.libraries = libraries
self.template_libraries = self.get_template_libraries(libraries)
self.builtins = self.default_builtins + builtins
self.template_builtins = self.get_template_builtins(self.builtins)
@staticmethod
@lru_cache.lru_cache()
def get_default():
"""
When only one DjangoTemplates backend is configured, returns it.
Raises ImproperlyConfigured otherwise.
This is required for preserving historical APIs that rely on a
globally available, implicitly configured engine such as:
>>> from django.template import Context, Template
>>> template = Template("Hello {{ name }}!")
>>> context = Context({'name': "world"})
>>> template.render(context)
'Hello world!'
"""
# Since Engine is imported in django.template and since
# DjangoTemplates is a wrapper around this Engine class,
# local imports are required to avoid import loops.
from django.template import engines
from django.template.backends.django import DjangoTemplates
django_engines = [engine for engine in engines.all()
if isinstance(engine, DjangoTemplates)]
if len(django_engines) == 1:
# Unwrap the Engine instance inside DjangoTemplates
return django_engines[0].engine
elif len(django_engines) == 0:
raise ImproperlyConfigured(
"No DjangoTemplates backend is configured.")
else:
raise ImproperlyConfigured(
"Several DjangoTemplates backends are configured. "
"You must select one explicitly.")
@cached_property
def template_context_processors(self):
context_processors = _builtin_context_processors
context_processors += tuple(self.context_processors)
return tuple(import_string(path) for path in context_processors)
def get_template_builtins(self, builtins):
return [import_library(x) for x in builtins]
def get_template_libraries(self, libraries):
loaded = {}
for name, path in libraries.items():
loaded[name] = import_library(path)
return loaded
@cached_property
def template_loaders(self):
return self.get_template_loaders(self.loaders)
def get_template_loaders(self, template_loaders):
loaders = []
for template_loader in template_loaders:
loader = self.find_template_loader(template_loader)
if loader is not None:
loaders.append(loader)
return loaders
def find_template_loader(self, loader):
if isinstance(loader, (tuple, list)):
args = list(loader[1:])
loader = loader[0]
else:
args = []
if isinstance(loader, six.string_types):
loader_class = import_string(loader)
if getattr(loader_class, '_accepts_engine_in_init', False):
args.insert(0, self)
else:
warnings.warn(
"%s inherits from django.template.loader.BaseLoader "
"instead of django.template.loaders.base.Loader. " %
loader, RemovedInDjango110Warning, stacklevel=2)
return loader_class(*args)
else:
raise ImproperlyConfigured(
"Invalid value in template loaders configuration: %r" % loader)
def find_template(self, name, dirs=None, skip=None):
tried = []
for loader in self.template_loaders:
if loader.supports_recursion:
try:
template = loader.get_template(
name, template_dirs=dirs, skip=skip,
)
return template, template.origin
except TemplateDoesNotExist as e:
tried.extend(e.tried)
else:
# RemovedInDjango20Warning: Use old api for non-recursive
# loaders.
try:
return loader(name, dirs)
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(name, tried=tried)
def from_string(self, template_code):
"""
Returns a compiled Template object for the given template code,
handling template inheritance recursively.
"""
return Template(template_code, engine=self)
def get_template(self, template_name, dirs=_dirs_undefined):
"""
Returns a compiled Template object for the given template name,
handling template inheritance recursively.
"""
if dirs is _dirs_undefined:
dirs = None
else:
warnings.warn(
"The dirs argument of get_template is deprecated.",
RemovedInDjango110Warning, stacklevel=2)
template, origin = self.find_template(template_name, dirs)
if not hasattr(template, 'render'):
# template needs to be compiled
template = Template(template, origin, template_name, engine=self)
return template
# This method was originally a function defined in django.template.loader.
# It was moved here in Django 1.8 when encapsulating the Django template
# engine in this Engine class. It's still called by deprecated code but it
# will be removed in Django 1.10. It's superseded by a new render_to_string
# function in django.template.loader.
def render_to_string(self, template_name, context=None,
context_instance=_context_instance_undefined,
dirs=_dirs_undefined,
dictionary=_dictionary_undefined):
if context_instance is _context_instance_undefined:
context_instance = None
else:
warnings.warn(
"The context_instance argument of render_to_string is "
"deprecated.", RemovedInDjango110Warning, stacklevel=2)
if dirs is _dirs_undefined:
# Do not set dirs to None here to avoid triggering the deprecation
# warning in select_template or get_template.
pass
else:
warnings.warn(
"The dirs argument of render_to_string is deprecated.",
RemovedInDjango110Warning, stacklevel=2)
if dictionary is _dictionary_undefined:
dictionary = None
else:
warnings.warn(
"The dictionary argument of render_to_string was renamed to "
"context.", RemovedInDjango110Warning, stacklevel=2)
context = dictionary
if isinstance(template_name, (list, tuple)):
t = self.select_template(template_name, dirs)
else:
t = self.get_template(template_name, dirs)
if not context_instance:
# Django < 1.8 accepted a Context in `context` even though that's
# unintended. Preserve this ability but don't rewrap `context`.
if isinstance(context, Context):
return t.render(context)
else:
return t.render(Context(context))
if not context:
return t.render(context_instance)
# Add the context to the context stack, ensuring it gets removed again
# to keep the context_instance in the same state it started in.
with context_instance.push(context):
return t.render(context_instance)
def select_template(self, template_name_list, dirs=_dirs_undefined):
"""
Given a list of template names, returns the first that can be loaded.
"""
if dirs is _dirs_undefined:
# Do not set dirs to None here to avoid triggering the deprecation
# warning in get_template.
pass
else:
warnings.warn(
"The dirs argument of select_template is deprecated.",
RemovedInDjango110Warning, stacklevel=2)
if not template_name_list:
raise TemplateDoesNotExist("No template names provided")
not_found = []
for template_name in template_name_list:
try:
return self.get_template(template_name, dirs)
except TemplateDoesNotExist as exc:
if exc.args[0] not in not_found:
not_found.append(exc.args[0])
continue
# If we get here, none of the templates could be loaded
raise TemplateDoesNotExist(', '.join(not_found))
| jejimenez/django | django/template/engine.py | Python | bsd-3-clause | 10,778 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ==============================================================================
# MIT License
#
# Copyright (c) 2017-2019 4k1
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
import os
import datetime
import threading
class Logging():
logmode = False
def __init__(self):
self.__lock = threading.Lock()
# create dir
projdir = "proj_" + datetime.datetime.today().strftime("%Y%m%d_%H%M%S")
os.mkdir(projdir)
self.__fn = projdir + "/" + datetime.datetime.today().strftime("%Y%m%d_%H%M%S_#")
def get_basename(self):
return self.__fn
def set_logging_mode(self, logmode):
self.logmode = logmode
def log(self, tid, value):
if self.logmode:
return
try:
f = open(self.__fn + str(tid) + ".log", "a")
f.write(value + '\r\n')
f.close()
except:
None
def warn(self, value):
with self.__lock:
try:
f = open(self.__fn + "!.log", "a")
f.write(value + '\r\n')
f.close()
except:
None
def vsplit(iterable, n):
return [iterable[x:x + n] for x in range(0, len(iterable), n)]
def urled(p):
if "://" in p:
p = p[p.find("://")+3:]
if "/" not in p:
return ""
else:
p = p[p.find("/")+1:]
if p.split() == "":
return ""
else:
return p
else:
return p
def passed(p):
if p == "/":
return ""
if p[0:1] == "/":
p = p[1:]
if p[-1:] != "/":
return p + "/"
return p
def filed(p):
if p[0:1] == "/":
p = p[1:]
return p
| 4k1/wufuzzer | src/util.py | Python | mit | 2,921 | 0.008216 |
#!/usr/bin/env python
# coding: utf-8
import email.utils
import logging
import os
import smtplib
import threading
from email.mime.text import MIMEText
from email.MIMEMultipart import MIMEMultipart
logger = logging.getLogger("maillog")
class MailBase(threading.Thread):
mailServerPort = 25
def __init__(self, subject, content, basic_info, attachment=""):
"""
多线程邮件处理类
@Params target: file or string
basicInfo= {
"TOLIST": ["heyu@ucweb.com"],
"SERVER": "mail.ucweb.com",
"PORT": 25, #25 if missing
"USERNAME": "test@ucweb.com",
"PASSWORD": ""
}
(attachment)
:param subject: 邮件标题
:param content: 文件名或内容,文件名超过50字符
:param basic_info: 邮件相关配置
:param attachment: 附件
"""
threading.Thread.__init__(self)
self._set_basic_info(basic_info)
self.subject = subject
self.content = content
self.attachment = attachment
def _set_basic_info(self, basic_info):
"""
:type basic_info: dict
"""
self.BASICS = {}
basic = ["TOLIST", "SERVER", "USERNAME", "PASSWORD", "PORT"]
if isinstance(basic_info, dict):
if "PORT" not in basic_info.keys():
basic_info["PORT"] = self.mailServerPort
if len(basic_info.keys()) != len(basic):
logger.error("params nums not correct~")
raise BadEmailSettings("basic_info param error")
for basic in basic:
if basic in basic_info.keys():
self.BASICS[basic] = basic_info[basic]
else:
logger.error("mail settings has no %s", basic)
raise BadEmailSettings()
else:
logger.error("basic_info should be a dict")
raise BadEmailSettings("basic_info not a dict")
def _send_mail(self, subject, content, attachment):
subject = subject.decode("utf-8")
self._do_send_mail(self.BASICS["TOLIST"], subject, content, attachment)
def run(self):
if not self.subject or not self.content:
return
self._send_mail(self.subject, self.content, self.attachment)
def _do_send_mail(self, to, subject, content, attachment):
msg = MIMEMultipart('related')
msg['To'] = ', '.join(to)
msg['From'] = email.utils.formataddr((self.BASICS["USERNAME"], self.BASICS["USERNAME"]))
msg['Subject'] = subject
# msgText = MIMEText(content.encode("utf-8"), "html")
msgtext = MIMEText(content, "html")
msgtext.set_charset('utf-8')
msg.attach(msgtext)
if attachment:
att = MIMEText(open(attachment, 'rb').read(), 'base64', 'utf-8')
att["Content-Type"] = 'application/octet-stream'
att["Content-Disposition"] = 'attachment;filename="%s"' % attachment
msg.attach(att)
server = smtplib.SMTP(self.BASICS["SERVER"], self.BASICS["PORT"])
server.set_debuglevel(False) # show communication with the server
server.login(self.BASICS["USERNAME"], self.BASICS["PASSWORD"])
try:
server.sendmail(self.BASICS["USERNAME"], to, msg.as_string())
finally:
server.quit()
class FileMail(MailBase):
"""
load文件发邮件
"""
def __init__(self, subject, mail_file, basic_info, attachment=""):
if len(mail_file) <= 50 and os.path.isfile(mail_file):
fd = open(mail_file)
content = fd.read()
content = "<br/>".join(content.split("\n"))
fd.close()
else:
content = ""
super(FileMail, self).__init__(subject, content, basic_info, attachment)
class BadEmailSettings(Exception):
pass
| gannicus-yu/pyutils | myutils/mailbase.py | Python | apache-2.0 | 4,008 | 0.000763 |
from django.apps import AppConfig
class TreeTraversalConfig(AppConfig):
name = 'tree_traversal'
| JacekKarnasiewicz/HomePage | apps/tree_traversal/apps.py | Python | mit | 102 | 0 |
#! /usr/bin/python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Name: setup.py
# Purpose:
# Author: Fabien Marteau <fabien.marteau@armadeus.com>
# Created: 16/02/2009
#-----------------------------------------------------------------------------
# Copyright (2008) Armadeus Systems
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#-----------------------------------------------------------------------------
# Revision list :
#
# Date By Changes
#
#-----------------------------------------------------------------------------
from distutils.core import setup
import os,re
import sys
sys.path.append("src/bin/")
from version import *
def visit(libfile,dirname,names):
""" function used for getLibraryTree to walk throw library tree"""
for file in names:
filepath = os.path.join(dirname,file)
if not os.path.isdir(filepath):
if not re.search(r".svn",filepath):
# FIXME:
# I can't find how to split with os.path !
# will be used when package_data work
#realpath = "/".join(filepath.split("/")[1:])
#libfile.append(realpath)
libfile.append(filepath)
def getTree(directory):
""" return a tuple list of files """
libfile = []
os.path.walk(os.path.join("src",directory),visit,libfile)
new_libfile = []
for path_file in libfile:
new_libfile.append('/'.join(path_file.split('/')[1:]))
if (directory == "platforms"):
print str(new_libfile)
return new_libfile
# Package files
package_files_list = []
package_files_list.extend(getTree("library"))
package_files_list.extend(getTree("platforms"))
package_files_list.extend(getTree("templates"))
package_files_list.extend(getTree("busses"))
package_files_list.extend(getTree("toolchains"))
package_files_list.extend(getTree("tests"))
datafiles=[ ('bin',['src/bin/pod']) ]
setup( name='PeriphOnDemand',
version=getVersion(),
url='https://sourceforge.net/projects/periphondemand',
author='Fabien Marteau and Nicolas Colombain',
author_email='<fabien.marteau@armadeus.com>,<nicolas.colombain@armadeus.com>,',
maintainer='Fabien Marteau',
maintainer_email='fabien.marteau@armadeus.com',
package_dir = {"periphondemand":"src"},
packages=['periphondemand',
'periphondemand.bin',
'periphondemand.bin.code',
'periphondemand.bin.code.vhdl',
'periphondemand.bin.commandline',
'periphondemand.bin.core',
'periphondemand.bin.toolchain',
'periphondemand.bin.utils',
],
package_data = {'periphondemand':package_files_list},
data_files=datafiles,
license='GPL',
)
| magyarm/periphondemand-code | setup.py | Python | lgpl-2.1 | 3,545 | 0.010155 |
#!/usr/bin/env python3
import os
import sys
import getopt
import xml.dom.minidom
class CppCreator(object):
def __init__(self, file_name, xml_root, output_path):
if not os.path.exists(output_path):
print ("CppCreator create error")
exit(1)
self.xml_root = xml_root
self.output_path = output_path
self.file_name = file_name
def GetCppRealType(self, type_str, subtype_str):
real_type_str = type_str
if type_str == "int8":
real_type_str = "char"
elif type_str == "uint8":
real_type_str = "unsigned char"
elif type_str == "int16":
real_type_str = "short"
elif type_str == "uint16":
real_type_str = "unsigned short"
elif type_str == "int32":
real_type_str = "int"
elif type_str == "uint32":
real_type_str = "unsigned int"
elif type_str == "int64":
real_type_str = "long long"
elif type_str == "uint64":
real_type_str = "unsigned long long"
elif type_str == "string":
real_type_str = "std::string"
elif type_str == "array":
if subtype_str == "":
print("GetCppRealType : subtype_str can not empty when type is array")
exit(1)
real_type_str = "std::vector<" + self.GetCppRealType(subtype_str, "") + ">"
return real_type_str
def GetSerializeCode(self, type_str, subtype_str, attr_name):
code_str = ""
if type_str == "int8":
code_str += (" collector.WriteInt8(" + attr_name + ");\n")
elif type_str == "uint8":
code_str += (" collector.WriteUint8(" + attr_name + ");\n")
elif type_str == "int16":
code_str += (" collector.WriteInt16(" + attr_name + ");\n")
elif type_str == "uint16":
code_str += (" collector.WriteUint16(" + attr_name + ");\n")
elif type_str == "int32":
code_str += (" collector.WriteInt32(" + attr_name + ");\n")
elif type_str == "uint32":
code_str += (" collector.WriteUint32(" + attr_name + ");\n")
elif type_str == "int64":
code_str += (" collector.WriteInt64(" + attr_name + ");\n")
elif type_str == "uint64":
code_str += (" collector.WriteUint64(" + attr_name + ");\n")
elif type_str == "string":
code_str += (" collector.WriteString(" + attr_name + ");\n")
elif type_str == "array":
if subtype_str == "":
print("GetSerializeCode : subtype_str can not empty when type is array")
exit(1)
code_str += (" collector.WriteUint16((unsigned short)" + attr_name + ".size());\n")
code_str += " for (auto array_item : " + attr_name + ")\n {\n "
sub_serialize_code = self.GetSerializeCode(subtype_str, "", "array_item")
if sub_serialize_code == "":
sub_serialize_code = " array_item.Serialize(collector);\n"
code_str += sub_serialize_code
code_str += " }\n"
return code_str
def GetUnserializeCode(self, type_str, subtype_str, attr_name):
code_str = ""
if type_str == "int8":
code_str += (" " + attr_name + " = collector.ReadInt8();\n")
elif type_str == "uint8":
code_str += (" " + attr_name + " = collector.ReadUint8();\n")
elif type_str == "int16":
code_str += (" " + attr_name + " = collector.ReadInt16();\n")
elif type_str == "uint16":
code_str += (" " + attr_name + " = collector.ReadUint16();\n")
elif type_str == "int32":
code_str += (" " + attr_name + " = collector.ReadInt32();\n")
elif type_str == "uint32":
code_str += (" " + attr_name + " = collector.ReadUint32();\n")
elif type_str == "int64":
code_str += (" " + attr_name + " = collector.ReadInt64();\n")
elif type_str == "uint64":
code_str += (" " + attr_name + " = collector.ReadUint64();\n")
elif type_str == "string":
code_str += (" " + attr_name + " = collector.ReadString();\n")
elif type_str == "array":
if subtype_str == "":
print("GetUnserializeCode : subtype_str can not empty when type is array")
exit(1)
code_str += (" {\n int array_size = collector.ReadUint16();\n " + self.GetCppRealType(subtype_str, "") + " tmp_attr_value;\n")
code_str += " for (int index = 0; index < array_size; ++ index)\n {\n "
sub_serialize_code = self.GetUnserializeCode(subtype_str, "", "tmp_attr_value")
if sub_serialize_code == "":
sub_serialize_code = " tmp_attr_value.Unserialize(collector);\n"
code_str += sub_serialize_code
code_str += (" " + attr_name + ".push_back(tmp_attr_value);\n")
code_str += " }\n }\n"
return code_str
def DoCreate(self):
protocols = self.xml_root.getElementsByTagName("protocol")
hpp_file_str = "#pragma once\n\n#include <string>\n#include <vector>\n#include <elegance/memory/serialize/serialize_base.hpp>\n\nusing face2wind::SerializeBase;\nusing face2wind::SerializeDescribe;\nusing face2wind::ByteArray;\n\nnamespace Protocol {\n\n"
cpp_file_header_str = "#include \"" + self.file_name + ".hpp\"\n\nnamespace Protocol {\n\n"
describe_hpp_str = ""
describe_cpp_str = ""
cpp_file_str = ""
for protocol in protocols:
class_name = protocol.getAttribute("name")
hpp_file_str += ("class " + class_name + " : public SerializeBase\n{\npublic:\n")
cpp_serialize_code = ""
cpp_unserialize_code = ""
attrs = protocol.getElementsByTagName("attr")
for attr in attrs:
type_name = attr.getAttribute("type")
attr_name = attr.getAttribute("name")
subtype_name = ""
real_type_name = ""
if (attr.hasAttribute("subtype")):
subtype_name = attr.getAttribute("subtype")
real_type_name = self.GetCppRealType(type_name, subtype_name)
hpp_file_str += (" " + real_type_name + " " + attr_name + ";\n")
cpp_serialize_code += self.GetSerializeCode(type_name, subtype_name, attr_name)
cpp_unserialize_code += self.GetUnserializeCode(type_name, subtype_name, attr_name)
hpp_file_str += "\n virtual void Serialize(ByteArray &collector) const;\n"
hpp_file_str += " virtual void Unserialize(ByteArray &collector);\n"
hpp_file_str += " virtual const std::string GetTypeName() const { return \"" + class_name + "\"; }\n"
hpp_file_str += "};\n\n"
describe_class_name = "__" + class_name + "Describe__";
describe_hpp_str += ("class " + describe_class_name + " : public SerializeDescribe\n{\npublic:\n " + describe_class_name + "() { GetNameToObjectMap()[\"" + class_name + "\"] = this; }\n virtual ~" + describe_class_name + "() {}\n")
describe_hpp_str += "\nprotected:\n virtual SerializeBase * CreateSerialize() const { return new " + class_name + "(); }\n};\n\n"
describe_cpp_str += (describe_class_name + " " + "for_describe_register_to_" + describe_class_name.lower() + ";\n")
cpp_file_str += ("void " + class_name + "::Serialize(ByteArray &collector) const\n")
cpp_file_str += ("{\n" + cpp_serialize_code + "}\n\n")
cpp_file_str += ("void " + class_name + "::Unserialize(ByteArray &collector)\n")
cpp_file_str += ("{\n" + cpp_unserialize_code + "}\n\n")
cpp_file_str += "}\n\n"
describe_hpp_str += "\n\n"
describe_cpp_str += "\n\n"
hpp_file = open(self.output_path + "/" + self.file_name + ".hpp", "w")
hpp_file.write(hpp_file_str + describe_hpp_str + "}\n\n")
hpp_file.close()
cpp_file = open(self.output_path + "/" + self.file_name + ".cpp", "w")
cpp_file.write(cpp_file_header_str + describe_cpp_str + cpp_file_str)
cpp_file.close()
| face2wind/Elegance | tools/serialize_creator/cpp_creator.py | Python | lgpl-3.0 | 8,346 | 0.003834 |
from office365.runtime.client_object_collection import ClientObjectCollection
from office365.sharepoint.content_type import ContentType
class ContentTypeCollection(ClientObjectCollection):
"""Content Type resource collection"""
def __init__(self, context, resource_path=None):
super(ContentTypeCollection, self).__init__(context, ContentType, resource_path)
| vgrem/SharePointOnline-REST-Python-Client | office365/sharepoint/content_type_collection.py | Python | mit | 376 | 0.00266 |
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from collections import OrderedDict
from datetime import date, timedelta
import signals
class UserProfile(models.Model):
user = models.OneToOneField(User, primary_key=True)
# Optional new fields go here, but for now I don't need any.
# Mostly a soft deletion bit.
@classmethod
def of(cls, user):
return cls.objects.get(user=user)
def vote(self, vote):
TodayVotes(user=self, vote=vote).save()
def __unicode__(self):
return self.user.username
# Ensure that we have a UserProfile for all users
def create_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
post_save.connect(create_user_profile, sender=User)
class Team(models.Model):
HIERARCHICAL = 0
CIRCULAR = 1
SHAPE_CHOICES = [
(HIERARCHICAL, "Hierarchical"),
(CIRCULAR, "Circular"),
]
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=25, unique=True)
members = models.ManyToManyField(UserProfile)
shape = models.IntegerField(
choices=SHAPE_CHOICES,
default=HIERARCHICAL,
help_text="If a group is hierarchical, members report to one manager.\n"
"If a group is circular, members report to each other.",
)
manager = models.ForeignKey(User, on_delete=models.PROTECT,
related_name="owned_by")
supervisor = models.ForeignKey(User, on_delete=models.PROTECT,
related_name="supervised_by")
def get_votes(self, for_past_days=5):
cutoff_point = date.today() - timedelta(days=for_past_days)
return Votes.objects.filter(team=self,
is_vote=True,
date__gte=cutoff_point)
def get_vote_averages(self, for_past_days=5):
# this returns a list of dicts with keys "date" and "vote__avg"
data = (self.get_votes(for_past_days=for_past_days)
.values('date')
.annotate(models.Avg('vote')))
# turn that into a simpler dict from date to average
response = OrderedDict()
for point in data:
response[point["date"]] = point["vote__avg"]
return response
def get_predictions(self, for_past_days=5):
cutoff_point = date.today() - timedelta(days=for_past_days)
return Votes.objects.filter(team=self,
is_prediction=True,
date__gte=cutoff_point)
def get_all_voters(self):
return UserProfile.objects.filter(team=self)
def get_all_predictors(self):
if self.shape == self.HIERARCHICAL:
return [self.manager]
else:
return self.get_all_voters()
def check_for_warnings(self, send_signals=False):
def is_prediction_successful(pred, avg):
if self.shape == self.HIERARCHICAL:
ranges = [(1, 3), (1, 3), (0, 0), (3, 5), (3, 5)]
else:
ranges = [(1, 2.2), (2.2, 2.8), (2.8, 3.2), (3.2, 3.8), (3.8, 5)]
low, hi = ranges[pred-1]
return low <= avg <= hi
def is_concerning(self, failures, num_predictions, pvalue_threshold=0.1):
base_prob = 0.5 if self.shape == self.HIERARCHICAL else 0.4
successes = num_predictions - failures
test_pvalue = choose(num_predictions, successes)
test_pvalue *= base_prob ** successes
test_pvalue *= (1-base_prob) ** failures
# TODO: adjust pvalue threshold to num_predictions?
return (test_pvalue <= pvalue_threshold, test_pvalue)
avgs = self.get_vote_averages()
predictions = self.get_predictions()
num_predictions = predictions.count()
failures = 0
for day in avgs:
avg = avgs[day]
day_predictions = predictions.filter(date=day)
for pred in day_predictions:
if not is_prediction_successful(pred.vote, avg):
failures += 1
threshold_hit = is_concerning(self, failures, num_predictions)
if send_signals and threshold_hit:
# TODO: add a ton more information here
signals.problem_detected.send(sender=self.__class__, team=self)
return threshold_hit
def __unicode__(self):
return self.name
VOTE_CHOICES = [
(1, ":C"),
(2, ":/"),
(3, ":|"),
(4, ":)"),
(5, ":D"),
]
# class Membership(models.Model):
# id = models.AutoField(primary_key=True, editable=False)
# team = models.ForeignKey(Team)
# member = models.ForeignKey(UserProfile)
# def __unicode__(self):
# return "%s/%s" % (self.team, self.member)
class TodayVotes(models.Model):
user = models.ForeignKey(UserProfile, primary_key=True)
vote = models.IntegerField(choices=VOTE_CHOICES, blank=True)
@classmethod
# This accepts kwargs because it is a signal handler
def do_switchover(cls, **kwargs):
for team in Team.objects.all():
voters = team.get_all_voters()
predictors = team.get_all_predictors()
votes = TodayVotes.objects.filter(user__in=voters)
predictions = TodayVotes.objects.filter(user__in=predictors)
if votes.count() < 3:
print "Skipping %s, not enough votes" % team
continue
else:
for vote in votes:
Votes(team=team, vote=vote.vote, is_vote=True).save()
for prediction in predictions:
Votes(team=team, vote=vote.vote, is_prediction=True).save()
# Send warnings
team.check_for_warnings(send_signals=True)
TodayVotes.objects.all().delete()
def __unicode__(self):
return "%d" % self.vote
signals.day_switchover.connect(TodayVotes.do_switchover)
class Votes(models.Model):
id = models.AutoField(primary_key=True, editable=False)
# Is it okay to cascade delete votes?
# It's probably best not to delete users or teams to begin with.
# If we HAVE to delete things, then we might as well delete it all.
team = models.ForeignKey(Team, editable=False)
is_vote = models.BooleanField(default=False)
is_prediction = models.BooleanField(default=False)
vote = models.IntegerField(choices=VOTE_CHOICES, editable=False)
date = models.DateField(auto_now_add=True, editable=False)
class Meta:
ordering = ["team", "date"]
get_latest_by = "date"
def __unicode__(self):
return ("{s.team}@{s.date}:{s.vote}"
" ({s.is_vote}, {s.is_prediction})".format(s=self))
# from http://stackoverflow.com/a/3025547/13992
# by http://stackoverflow.com/users/4279/j-f-sebastian
def choose(n, k):
"""
A fast way to calculate binomial coefficients by Andrew Dalke (contrib).
"""
if 0 <= k <= n:
ntok = 1
ktok = 1
for t in xrange(1, min(k, n - k) + 1):
ntok *= n
ktok *= t
n -= 1
return ntok // ktok
else:
return 0
# This is called on syncdb
def init(sender, **kwargs):
# Only run this once. HACK: change parameter in connect() instead
if ".admin." not in sender.__name__:
return
# Don't do anything if the DB is already populated
if User.objects.all().count() != 0:
return
print "Bootstrapping triggered by %s." % sender.__name__
root = User.objects.create_superuser("root", "root@localhost", "root")
print "Superuser created (username/password are 'root')"
user1 = User.objects.create_user('one', 'one@localhost', 'one')
user2 = User.objects.create_user('two', 'two@localhost', 'two')
user3 = User.objects.create_user('tri', 'tri@localhost', 'tri')
print "Users one, two, tri created and added to team 'demo'."
team = Team(name="demo", shape=Team.CIRCULAR, manager=root, supervisor=root)
team.save()
for user in (root, user1, user2, user3):
team.members.add(UserProfile.of(user))
assert team.get_all_voters().count() == 4
# Create votes in the past
for days_in_the_past in range(6):
day = date.today() - timedelta(days=days_in_the_past)
Votes(vote=3, team=team, is_vote=True, date=day).save()
Votes(vote=2, team=team, is_vote=True, date=day).save()
Votes(vote=1, team=team, is_vote=True, date=day).save()
Votes(vote=4, team=team, is_prediction=True, date=day).save()
TodayVotes(user=UserProfile.of(user1), vote=1).save()
TodayVotes(user=UserProfile.of(user2), vote=2).save()
TodayVotes(user=UserProfile.of(user3), vote=3).save()
TodayVotes(user=UserProfile.of(root), vote=4).save()
print "Voting history created."
| badp/weather | weather/models.py | Python | bsd-2-clause | 8,309 | 0.011674 |
from __future__ import absolute_import
from flask import url_for, redirect, abort, request, Blueprint, current_app
from flask_login import login_required, current_user
import six
from six.moves import filter, map
from sqlalchemy.orm.exc import NoResultFound
from itertools import chain
from .. import ships, systems, db
from ..models import Request, ActionType
from ..auth import PermissionType
from ..auth.models import Division, User, Group, Pilot, Entity
from .requests import PermissionRequestListing, PersonalRequests
from ..util import jsonify, classproperty
api = Blueprint('api', __name__)
filters = Blueprint('filters', __name__)
@api.route('/entities/')
@login_required
def list_entities():
"""Return a JSON object with a list of all of the specified entity type.
Example output::
{
entities: [
{name: 'Bar', id: 1, source: 'Auth Source', type: 'User'},
{name: 'Foo', id: 0, source: 'Another Auth Source', type: 'Group'},
{name: 'Baz', id: 20, source: 'Auth Source', type: 'Group'}
]
}
This method is only accesible to administrators.
:param str entity_type: Either ``'user'`` or ``'group'``.
"""
if not current_user.admin and not \
current_user.has_permission(PermissionType.admin):
abort(403)
user_query = db.session.query(User.id, User.name, User.authmethod)
group_query = db.session.query(Group.id, Group.name, Group.authmethod)
users = map(lambda e: {
u'id': e.id,
u'name': e.name,
u'type': u'User',
u'source': e.authmethod}, user_query)
groups = map(lambda e: {
u'id': e.id,
u'name': e.name,
u'type': u'Group',
u'source': e.authmethod}, group_query)
return jsonify(entities=chain(users, groups))
@api.route('/user/<int:user_id>/')
@login_required
def user_detail(user_id):
if not current_user.admin and not \
current_user.has_permission(PermissionType.admin):
abort(403)
user = User.query.get_or_404(user_id)
# Set up divisions
submit = map(lambda p: p.division,
filter(lambda p: p.permission == PermissionType.submit,
user.permissions))
review = map(lambda p: p.division,
filter(lambda p: p.permission == PermissionType.review,
user.permissions))
pay = map(lambda p: p.division,
filter(lambda p: p.permission == PermissionType.pay,
user.permissions))
resp = {
u'name': user.name,
u'groups': list(user.groups),
u'divisions': {
u'submit': list(set(submit)),
u'review': list(set(review)),
u'pay': list(set(pay)),
},
u'admin': user.admin,
u'requests': user.requests,
}
return jsonify(**resp)
@api.route('/group/<int:group_id>/')
@login_required
def group_detail(group_id):
if not current_user.admin and not \
current_user.has_permission(PermissionType.admin):
abort(403)
group = Group.query.get_or_404(group_id)
submit = map(lambda p: p.division,
filter(lambda p: p.permission == PermissionType.submit,
group.permissions))
review = map(lambda p: p.division,
filter(lambda p: p.permission == PermissionType.review,
group.permissions))
pay = map(lambda p: p.division,
filter(lambda p: p.permission == PermissionType.pay,
group.permissions))
resp = {
u'name': group.name,
u'users': list(group.users),
u'divisions': {
u'submit': list(set(submit)),
u'review': list(set(review)),
u'pay': list(set(pay)),
},
}
return jsonify(**resp)
@api.route('/division/')
@login_required
def list_divisions():
"""List all divisions.
"""
if not current_user.admin:
abort(403)
divisions = db.session.query(Division.id, Division.name)
return jsonify(divisions=divisions)
@api.route('/division/<int:division_id>/')
@login_required
def division_detail(division_id):
"""Get the details of a division.
:param int division_id: The ID of the division
"""
division = Division.query.get_or_404(division_id)
if not current_user.admin and not \
current_user.has_permission(PermissionType.admin, division):
abort(403)
permissions = {}
for perm in PermissionType.all:
key = perm.name + '_href'
permissions[key] = url_for('.division_permissions',
division_id=division_id,
permission=perm.name)
return jsonify(
name=division.name,
requests=division.requests,
permissions=permissions)
@api.route('/division/<int:division_id>/<permission>/')
@login_required
def division_permissions(division_id, permission):
division = Division.query.get_or_404(division_id)
if not current_user.admin and not \
current_user.has_permission(PermissionType.admin, division):
abort(403)
permission = PermissionType.from_string(permission)
# Can't use normal Entity JSON encoder as it doesn't include the
# authentication source or their type (explicitly. Ain't nobody got time
# for parsing the entity type out of the href).
entities = []
for entity in map(lambda p: p.entity, division.permissions[permission]):
entity_info = {
u'name': entity.name,
u'id': entity.id,
u'source': str(entity.authmethod),
}
if hasattr(entity, u'users'):
entity_info[u'type'] = u'Group'
entity_info[u'length'] = len(entity.users)
else:
entity_info[u'type'] = u'User'
entities.append(entity_info)
return jsonify(
entities=entities,
name=permission.name,
description=permission.description)
@api.route('/ships/')
@login_required
def ship_list():
"""Get an array of objects corresponding to every ship type.
The objects have two keys, ``id`` is the integer typeID, and ``name`` is
the name of the ship. This method is only accessible for logged in users to
try to keep possible misuse to a minimum.
"""
ship_objs = list(map(lambda s: {u'name': s[1], u'id': s[0]},
ships.ships.items()))
return jsonify(ships=ship_objs)
class FiltersRequestListing(object):
@classproperty
def _load_options(self):
"""Returns a sequence of
:py:class:`~sqlalchemy.orm.strategy_options.Load` objects specifying
which attributes to load.
"""
return (
db.Load(Request).load_only(
'id',
'pilot_id',
'corporation',
'alliance',
'ship_type',
'status',
'base_payout',
'kill_timestamp',
'timestamp',
'division_id',
'submitter_id',
'system',
),
db.Load(Division).joinedload('name'),
db.Load(Pilot).joinedload('name'),
db.Load(User).joinedload('id')
)
def dispatch_request(self, filters='', **kwargs):
def request_dict(request):
payout = request.payout
return {
u'id': request.id,
u'href': url_for('requests.get_request_details',
request_id=request.id),
u'pilot': request.pilot.name,
u'corporation': request.corporation,
u'alliance': request.alliance,
u'ship': request.ship_type,
u'status': request.status.name,
u'payout': payout.currency(),
u'kill_timestamp': request.kill_timestamp,
u'submit_timestamp': request.timestamp,
u'division': request.division.name,
u'submitter_id': request.submitter.id,
u'system': request.system,
u'constellation': request.constellation,
u'region': request.region,
}
return jsonify(requests=map(request_dict, self.requests({})))
class APIRequestListing(FiltersRequestListing, PermissionRequestListing): pass
class APIPersonalRequests(FiltersRequestListing, PersonalRequests): pass
@filters.record
def register_request_lists(state):
# Create the views
all_requests = APIRequestListing.as_view('filter_requests_all',
PermissionType.all, ActionType.statuses)
user_requests = APIPersonalRequests.as_view('filter_requests_own')
pending_requests = APIRequestListing.as_view('filter_requests_pending',
(PermissionType.review,), ActionType.pending)
pay_requests = APIRequestListing.as_view('filter_requests_pay',
(PermissionType.pay,), (ActionType.approved,))
completed_requests = APIRequestListing.as_view('filter_requests_completed',
PermissionType.elevated, ActionType.finalized)
# Attach the views to paths
for prefix in state.app.request_prefixes:
state.add_url_rule(prefix + '/', view_func=all_requests)
state.add_url_rule(prefix + '/<path:filters>/',
view_func=all_requests)
state.add_url_rule(prefix + '/personal/', view_func=user_requests)
state.add_url_rule(prefix + '/personal/<path:filters>/',
view_func=user_requests)
state.add_url_rule(prefix + '/pending/', view_func=pending_requests)
state.add_url_rule(prefix + '/pending/<path:filters>/',
view_func=pending_requests)
state.add_url_rule(prefix + '/pay/', view_func=pay_requests)
state.add_url_rule(prefix + '/pay/<path:filters>/',
view_func=pay_requests)
state.add_url_rule(prefix + '/completed/',
view_func=completed_requests)
state.add_url_rule(prefix + '/completed/<path:filters>/',
view_func=completed_requests)
def _first(o):
return o[0]
@filters.route('/ship/')
@login_required
def filter_ships():
ships = db.session.query(Request.ship_type).distinct()
return jsonify(key=u'ship', ship=map(_first, ships))
@filters.route('/system/')
@login_required
def filter_systems():
systems = db.session.query(Request.system).distinct()
return jsonify(key=u'system', system=map(_first, systems))
@filters.route('/constellation/')
@login_required
def filter_constellations():
constellations = db.session.query(Request.constellation).distinct()
return jsonify(key=u'constellation',
constellation=map(_first, constellations))
@filters.route('/region/')
@login_required
def filter_regions():
regions = db.session.query(Request.region).distinct()
return jsonify(key=u'region', region=map(_first, regions))
@filters.route('/details/<path:query>')
@login_required
def query_details(query):
requests = db.session.query(Request.id)\
.filter(Request.details.match(query))
return jsonify(ids=map(_first, requests))
@filters.route('/pilot/')
@login_required
def filter_pilots():
pilots = db.session.query(Pilot.name)
return jsonify(key=u'pilot', pilot=map(_first, pilots))
@filters.route('/corporation/')
@login_required
def filter_corps():
corps = db.session.query(Request.corporation).distinct()
return jsonify(key=u'corporation', corporation=map(_first, corps))
@filters.route('/alliance/')
@login_required
def filter_alliances():
alliances = db.session.query(Request.alliance)\
.filter(Request.alliance != None)\
.distinct()
return jsonify(key=u'alliance', alliance=map(_first, alliances))
@filters.route('/division/')
@login_required
def filter_divisions():
div_names = db.session.query(Division.name)
return jsonify(key=u'division', division=map(_first, div_names))
| paxswill/evesrp | src/evesrp/views/api.py | Python | bsd-2-clause | 12,023 | 0.002578 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-09-09 17:28
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auctions', '0038_offercredit'),
]
operations = [
migrations.AlterField(
model_name='offercredit',
name='offer',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='offer_credit', to='auctions.Offer'),
),
]
| codesy/codesy | auctions/migrations/0039_auto_20160909_1728.py | Python | agpl-3.0 | 573 | 0.001745 |
"""
Implement atoi() in Python (given a string, return a number).
Assume all the strings are always valid.
"""
import unittest
def atoi(string):
l = len(string)
t = 0
v = 10 ** (l - 1)
for ch in string:
t += v * int(ch)
v /= 10
return t
def atoi2(string):
l, t = len(string), 0
for idx, ch in enumerate(string):
t += int(ch) * (10 ** (l - idx - 1))
return t
def atoi3(string):
l = len(string)
return sum([
int(ch) * (10 ** (l - idx - 1))
for idx, ch in enumerate(string)
])
class AtoITest(unittest.TestCase):
def test_atoi(self):
self.assertEqual(12345, atoi("12345"))
self.assertEqual(1234, atoi("1234"))
self.assertEqual(123, atoi("123"))
self.assertEqual(12, atoi("12"))
self.assertEqual(1, atoi("1"))
self.assertEqual(0, atoi("0"))
def test_atoi2(self):
self.assertEqual(12345, atoi2("12345"))
self.assertEqual(1234, atoi2("1234"))
self.assertEqual(123, atoi2("123"))
self.assertEqual(12, atoi2("12"))
self.assertEqual(1, atoi2("1"))
self.assertEqual(0, atoi2("0"))
def test_atoi3(self):
self.assertEqual(12345, atoi3("12345"))
self.assertEqual(1234, atoi3("1234"))
self.assertEqual(123, atoi3("123"))
self.assertEqual(12, atoi3("12"))
self.assertEqual(1, atoi3("1"))
self.assertEqual(0, atoi3("0"))
| kratorius/ads | python/interviewquestions/atoi.py | Python | mit | 1,456 | 0.00206 |
#!/usr/bin/env python
# coding: utf-8
import os,sys
import ctypes
import numpy as np
from .hmatrix import _C_HMatrix, HMatrix
class _C_MultiHMatrix(ctypes.Structure):
"""Holder for the raw data from the C++ code."""
pass
class AbstractMultiHMatrix:
"""Common code for the two actual MultiHMatrix classes below."""
ndim = 2 # To mimic a numpy 2D array
def __init__(self, c_data: _C_MultiHMatrix, **params):
# Users should use one of the two constructors below.
self.c_data = c_data
self.shape = (self.lib.multi_nbrows(c_data), self.lib.multi_nbcols(c_data))
self.size = self.lib.nbhmats(c_data)
self.lib.getHMatrix.restype=ctypes.POINTER(_C_HMatrix)
self.lib.getHMatrix.argtypes=[ctypes.POINTER(_C_MultiHMatrix), ctypes.c_int]
self.hmatrices = []
for l in range(0,self.size):
c_data_hmatrix = self.lib.getHMatrix(self.c_data,l)
self.hmatrices.append(HMatrix(c_data_hmatrix,**params))
self.params = params.copy()
@classmethod
def from_coefs(cls, getcoefs, nm, points_target, points_source=None, **params):
"""Construct an instance of the class from a evaluation function.
Parameters
----------
getcoefs: Callable
A function evaluating an array of matrices at given coordinates.
points_target: np.ndarray of shape (N, 3)
The coordinates of the target points. If points_source=None, also the coordinates of the target points
points_source: np.ndarray of shape (N, 3)
If not None; the coordinates of the source points.
epsilon: float, keyword-only, optional
Tolerance of the Adaptive Cross Approximation
eta: float, keyword-only, optional
Criterion to choose the blocks to compress
minclustersize: int, keyword-only, optional
Minimum shape of a block
maxblocksize: int, keyword-only, optional
Maximum number of coefficients in a block
Returns
-------
MultiHMatrix or ComplexMultiHMatrix
"""
# Set params.
cls._set_building_params(**params)
# Boilerplate code for Python/C++ interface.
_getcoefs_func_type = ctypes.CFUNCTYPE(None, ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_double))
if points_source is None:
cls.lib.MultiHMatrixCreateSym.restype = ctypes.POINTER(_C_MultiHMatrix)
cls.lib.MultiHMatrixCreateSym.argtypes = [
np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),
ctypes.c_int,
_getcoefs_func_type,
ctypes.c_int
]
# Call the C++ backend.
c_data = cls.lib.MultiHMatrixCreateSym(points_target, points_target.shape[0], _getcoefs_func_type(getcoefs),nm)
else:
cls.lib.MultiHMatrixCreate.restype = ctypes.POINTER(_C_MultiHMatrix)
cls.lib.MultiHMatrixCreate.argtypes = [
np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),
ctypes.c_int,
np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),
ctypes.c_int,
_getcoefs_func_type,
ctypes.c_int
]
# Call the C++ backend.
c_data = cls.lib.MultiHMatrixCreate(points_target,points_target.shape[0],points_source, points_source.shape[0], _getcoefs_func_type(getcoefs),nm)
return cls(c_data, **params)
@classmethod
def from_submatrices(cls, getsubmatrix, nm, points_target, points_source=None, **params):
"""Construct an instance of the class from a evaluation function.
Parameters
----------
points: np.ndarray of shape (N, 3)
The coordinates of the points.
getsubmatrix: Callable
A function evaluating the matrix in a given range.
epsilon: float, keyword-only, optional
Tolerance of the Adaptive Cross Approximation
eta: float, keyword-only, optional
Criterion to choose the blocks to compress
minclustersize: int, keyword-only, optional
Minimum shape of a block
maxblocksize: int, keyword-only, optional
Maximum number of coefficients in a block
Returns
-------
HMatrix or ComplexHMatrix
"""
# Set params.
cls._set_building_params(**params)
# Boilerplate code for Python/C++ interface.
_getsumatrix_func_type = ctypes.CFUNCTYPE(
None, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int),
ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_double)
)
if points_source is None:
cls.lib.MultiHMatrixCreatewithsubmatSym.restype = ctypes.POINTER(_C_MultiHMatrix)
cls.lib.MultiHMatrixCreatewithsubmatSym.argtypes = [
np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),
ctypes.c_int,
_getsumatrix_func_type,
ctypes.c_int
]
# Call the C++ backend.
c_data = cls.lib.MultiHMatrixCreatewithsubmatSym(points_target, points_target.shape[0], _getsumatrix_func_type(getsubmatrix),nm)
else:
cls.lib.MultiHMatrixCreatewithsubmat.restype = ctypes.POINTER(_C_MultiHMatrix)
cls.lib.MultiHMatrixCreatewithsubmat.argtypes = [
np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),
ctypes.c_int,
np.ctypeslib.ndpointer(dtype=np.float64, ndim=2, flags='C_CONTIGUOUS'),
ctypes.c_int,
_getsumatrix_func_type,
ctypes.c_int
]
# Call the C++ backend.
c_data = cls.lib.MultiHMatrixCreatewithsubmat(points_target,points_target.shape[0],points_source, points_source.shape[0], _getsumatrix_func_type(getsubmatrix),nm)
return cls(c_data, **params)
@classmethod
def _set_building_params(cls, *, eta=None, minclustersize=None, epsilon=None, maxblocksize=None):
"""Put the parameters in the C++ backend."""
if epsilon is not None:
cls.lib.setepsilon.restype = None
cls.lib.setepsilon.argtypes = [ ctypes.c_double ]
cls.lib.setepsilon(epsilon)
if eta is not None:
cls.lib.seteta.restype = None
cls.lib.seteta.argtypes = [ ctypes.c_double ]
cls.lib.seteta(eta)
if minclustersize is not None:
cls.lib.setminclustersize.restype = None
cls.lib.setminclustersize.argtypes = [ ctypes.c_int ]
cls.lib.setminclustersize(minclustersize)
if maxblocksize is not None:
cls.lib.setmaxblocksize.restype = None
cls.lib.setmaxblocksize.argtypes = [ ctypes.c_int ]
cls.lib.setmaxblocksize(maxblocksize)
def __str__(self):
return f"{self.__class__.__name__}(shape={self.shape})"
def __getitem__(self, key):
# self.lib.getHMatrix.restype=ctypes.POINTER(_C_HMatrix)
# self.lib.getHMatrix.argtypes=[ctypes.POINTER(_C_MultiHMatrix), ctypes.c_int]
# c_data_hmatrix = self.lib.getHMatrix(self.c_data,key)
# return HMatrix(c_data_hmatrix,**self.params)
return self.hmatrices[key]
def matvec(self, l , vector):
"""Matrix-vector product (interface for scipy iterative solvers)."""
assert self.shape[1] == vector.shape[0], "Matrix-vector product of matrices of wrong shapes."
# Boilerplate for Python/C++ interface
self.lib.MultiHMatrixVecProd.argtypes = [
ctypes.POINTER(_C_MultiHMatrix),
ctypes.c_int,
np.ctypeslib.ndpointer(self.dtype, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(self.dtype, flags='C_CONTIGUOUS')
]
# Initialize vector
result = np.zeros((self.shape[0],), dtype=self.dtype)
# Call C++ backend
self.lib.MultiHMatrixVecProd(self.c_data,l , vector, result)
return result
class MultiHMatrix(AbstractMultiHMatrix):
"""A real-valued hierarchical matrix based on htool C++ library.
Create with HMatrix.from_coefs or HMatrix.from_submatrices.
Attributes
----------
c_data:
Pointer to the raw data used by the C++ library.
shape: Tuple[int, int]
Shape of the matrix.
nb_dense_blocks: int
Number of dense blocks in the hierarchical matrix.
nb_low_rank_blocks: int
Number of sparse blocks in the hierarchical matrix.
nb_blocks: int
Total number of blocks in the decomposition.
params: dict
The parameters that have been used to build the matrix.
"""
libfile = os.path.join(os.path.dirname(__file__), '../libhtool_shared')
if 'linux' in sys.platform:
lib = ctypes.cdll.LoadLibrary(libfile+'.so')
elif sys.platform == 'darwin':
lib = ctypes.cdll.LoadLibrary(libfile+'.dylib')
elif sys.platform == 'win32':
lib = ctypes.cdll.LoadLibrary(libfile+'.dll')
dtype = ctypes.c_double
class ComplexMultiHMatrix(AbstractMultiHMatrix):
"""A complex-valued hierarchical matrix based on htool C++ library.
Create with ComplexHMatrix.from_coefs or ComplexHMatrix.from_submatrices.
Attributes
----------
c_data:
Pointer to the raw data used by the C++ library.
shape: Tuple[int, int]
Shape of the matrix.
nb_dense_blocks: int
Number of dense blocks in the hierarchical matrix.
nb_low_rank_blocks: int
Number of sparse blocks in the hierarchical matrix.
nb_blocks: int
Total number of blocks in the decomposition.
params: dict
The parameters that have been used to build the matrix.
"""
libfile = os.path.join(os.path.dirname(__file__), '../libhtool_shared_complex')
if 'linux' in sys.platform:
lib = ctypes.cdll.LoadLibrary(libfile+'.so')
elif sys.platform == 'darwin':
lib = ctypes.cdll.LoadLibrary(libfile+'.dylib')
elif sys.platform == 'win32':
lib = ctypes.cdll.LoadLibrary(libfile+'.dll')
dtype = np.complex128
| PierreMarchand20/htool | interface/htool/multihmatrix.py | Python | mit | 10,354 | 0.005698 |
# -*- coding: utf-8 -*-
from ast import literal_eval
from odoo import models, fields, api
class SaleOrderLine(models.Model):
_inherit = 'sale.order.line'
config_ok = fields.Boolean(
related='product_id.config_ok',
string="Configurable",
readonly=True
)
@api.multi
def reconfigure_product(self):
""" Creates and launches a product configurator wizard with a linked
template and variant in order to re-configure a existing product. It is
esetially a shortcut to pre-fill configuration data of a variant"""
cfg_steps = self.product_id.product_tmpl_id.config_step_line_ids
active_step = str(cfg_steps[0].id) if cfg_steps else 'configure'
product_modifiable = literal_eval(self.env['ir.config_parameter'].sudo().get_param(
'product_configurator.product_modifiable', default='False'))
wizard_obj = self.env['product.configurator']
wizard = wizard_obj.create({
'product_modifiable': product_modifiable,
'product_id': self.product_id.id,
'state': active_step,
'order_line_id': self.id,
})
return {
'type': 'ir.actions.act_window',
'res_model': 'product.configurator',
'name': "Configure Product",
'view_mode': 'form',
'context': dict(
self.env.context,
wizard_id=wizard.id,
),
'target': 'new',
'res_id': wizard.id,
}
| microcom/odoo-product-configurator | product_configurator_wizard/models/sale.py | Python | agpl-3.0 | 1,538 | 0.00065 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import argparse
import numpy as np
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['svg.fonttype'] = 'none'
from deeptools import cm # noqa: F401
import matplotlib.pyplot as plt
from deeptools.correlation import Correlation
from deeptools.parserCommon import writableFile
from deeptools._version import __version__
old_settings = np.seterr(all='ignore')
def parse_arguments(args=None):
basic_args = plot_correlation_args()
heatmap_parser = heatmap_options()
scatter_parser = scatterplot_options()
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
Tool for the analysis and visualization of sample correlations based on the output of multiBamSummary or
multiBigwigSummary. Pearson or Spearman methods are available to compute correlation
coefficients. Results can be saved as multiple
scatter plots depicting the pairwise correlations or as a clustered heatmap,
where the colors represent the correlation coefficients and the clusters are constructed using complete linkage.
Optionally, the values can be saved as tables, too.
detailed help:
plotCorrelation -h
""",
epilog='example usages:\n'
'plotCorrelation -in results_file --whatToPlot heatmap --corMethod pearson -o heatmap.png\n\n'
' \n\n',
parents=[basic_args, heatmap_parser, scatter_parser])
return parser
def plot_correlation_args():
parser = argparse.ArgumentParser(add_help=False)
required = parser.add_argument_group('Required arguments')
# define the arguments
required.add_argument('--corData', '-in',
metavar='FILE',
help='Compressed matrix of values generated by multiBigwigSummary or multiBamSummary',
required=True)
required.add_argument('--corMethod', '-c',
help="Correlation method.",
choices=['spearman', 'pearson'],
required=True)
required.add_argument('--whatToPlot', '-p',
help="Choose between a heatmap or pairwise scatter plots",
choices=['heatmap', 'scatterplot'],
required=True)
optional = parser.add_argument_group('Optional arguments')
optional.add_argument('--plotFile', '-o',
help='File to save the heatmap to. The file extension determines the format, '
'so heatmap.pdf will save the heatmap in PDF format. '
'The available formats are: .png, '
'.eps, .pdf and .svg.',
type=writableFile,
metavar='FILE')
optional.add_argument('--skipZeros',
help='By setting this option, genomic regions '
'that have zero or missing (nan) values in all samples '
'are excluded.',
action='store_true',
required=False)
optional.add_argument('--labels', '-l',
metavar='sample1 sample2',
help='User defined labels instead of default labels from '
'file names. '
'Multiple labels have to be separated by spaces, e.g. '
'--labels sample1 sample2 sample3',
nargs='+')
optional.add_argument('--plotTitle', '-T',
help='Title of the plot, to be printed on top of '
'the generated image. Leave blank for no title. (Default: %(default)s)',
default='')
optional.add_argument('--plotFileFormat',
metavar='FILETYPE',
help='Image format type. If given, this option '
'overrides the image format based on the plotFile '
'ending. The available options are: png, '
'eps, pdf and svg.',
choices=['png', 'pdf', 'svg', 'eps', 'plotly'])
optional.add_argument(
'--removeOutliers',
help='If set, bins with very large counts are removed. '
'Bins with abnormally high reads counts artificially increase '
'pearson correlation; that\'s why, multiBamSummary tries '
'to remove outliers using the median absolute deviation (MAD) '
'method applying a threshold of 200 to only consider extremely '
'large deviations from the median. The ENCODE blacklist page '
'(https://sites.google.com/site/anshulkundaje/projects/blacklists) '
'contains useful information about regions with unusually high counts'
'that may be worth removing.',
action='store_true')
optional.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
group = parser.add_argument_group('Output optional options')
group.add_argument('--outFileCorMatrix',
help='Save matrix with pairwise correlation values to a tab-separated file.',
metavar='FILE',
type=writableFile)
return parser
def scatterplot_options():
"""
Options specific for creating the scatter plot
"""
parser = argparse.ArgumentParser(add_help=False)
scatter_opts = parser.add_argument_group('Scatter plot options')
scatter_opts.add_argument('--xRange',
help='The X axis range. The default scales these such that the full range of dots is displayed.',
type=int,
nargs=2,
default=None)
scatter_opts.add_argument('--yRange',
help='The Y axis range. The default scales these such that the full range of dots is displayed.',
type=int,
nargs=2,
default=None)
scatter_opts.add_argument('--log1p',
help='Plot the natural log of the scatter plot after adding 1. Note that this is ONLY for plotting, the correlation is unaffected.',
action='store_true')
return parser
def heatmap_options():
"""
Options for generating the correlation heatmap
"""
parser = argparse.ArgumentParser(add_help=False)
heatmap = parser.add_argument_group('Heatmap options')
heatmap.add_argument('--plotHeight',
help='Plot height in cm. (Default: %(default)s)',
type=float,
default=9.5)
heatmap.add_argument('--plotWidth',
help='Plot width in cm. The minimum value is 1 cm. (Default: %(default)s)',
type=float,
default=11)
heatmap.add_argument('--zMin', '-min',
default=None,
help='Minimum value for the heatmap intensities. '
'If not specified, the value is set automatically',
type=float)
heatmap.add_argument('--zMax', '-max',
default=None,
help='Maximum value for the heatmap intensities.'
'If not specified, the value is set automatically',
type=float)
heatmap.add_argument(
'--colorMap', default='jet',
metavar='',
help='Color map to use for the heatmap. Available values can be '
'seen here: '
'http://matplotlib.org/examples/color/colormaps_reference.html')
heatmap.add_argument('--plotNumbers',
help='If set, then the correlation number is plotted '
'on top of the heatmap. This option is only valid when plotting a heatmap.',
action='store_true',
required=False)
return parser
def main(args=None):
args = parse_arguments().parse_args(args)
if args.plotFile is None and args.outFileCorMatrix is None:
sys.exit("At least one of --plotFile and --outFileCorMatrix must be specified!\n")
corr = Correlation(args.corData,
args.corMethod,
labels=args.labels,
remove_outliers=args.removeOutliers,
skip_zeros=args.skipZeros)
if args.corMethod == 'pearson':
# test if there are outliers and write a message recommending the removal
if len(corr.get_outlier_indices(np.asarray(corr.matrix).flatten())) > 0:
if args.removeOutliers:
sys.stderr.write("\nOutliers were detected in the data. They "
"will be removed to avoid bias "
"in the pearson correlation.\n")
else:
sys.stderr.write("\nOutliers were detected in the data. Consider "
"using the --removeOutliers parameter to avoid a bias "
"in the pearson correlation.\n")
if args.colorMap:
try:
plt.get_cmap(args.colorMap)
except ValueError as error:
sys.stderr.write(
"A problem was found. Message: {}\n".format(error))
exit()
if args.plotFile is not None:
if args.whatToPlot == 'scatterplot':
corr.plot_scatter(args.plotFile,
plot_title=args.plotTitle,
image_format=args.plotFileFormat,
xRange=args.xRange,
yRange=args.yRange,
log1p=args.log1p)
else:
corr.plot_correlation(args.plotFile,
vmax=args.zMax,
vmin=args.zMin,
colormap=args.colorMap,
plot_title=args.plotTitle,
image_format=args.plotFileFormat,
plot_numbers=args.plotNumbers,
plotWidth=args.plotWidth,
plotHeight=args.plotHeight)
if args.outFileCorMatrix:
o = open(args.outFileCorMatrix, "w")
o.write("#plotCorrelation --outFileCorMatrix\n")
corr.save_corr_matrix(o)
o.close()
| fidelram/deepTools | deeptools/plotCorrelation.py | Python | gpl-3.0 | 10,834 | 0.002861 |
# -*- coding: utf-8 -*-
# Copyright 2016 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import models, fields, api
from openerp.tools.translate import _
from openerp.exceptions import Warning as UserError
class ResPartner(models.Model):
_inherit = "res.partner"
@api.multi
def _compute_single_sale_policy(self):
single_sale_order = 0.0
unset_single_sale_order = False
criteria = [
("user_ids.id", "in", [self.env.user.id]),
]
policy = self.env["partner.risk_limit_policy"].search(
criteria, limit=1)
if len(policy) == 1:
single_sale_order = policy.single_sale_order_limit
unset_single_sale_order = policy.unset_single_sale_order_limit
for partner in self:
partner.single_sale_order_limit_policy = single_sale_order
partner.unset_single_sale_order_limit_policy = \
unset_single_sale_order
single_sale_order_limit_policy = fields.Float(
string="Single Sale Order Limit Policy",
compute="_compute_single_sale_policy",
store=False,
)
unset_single_sale_order_limit_policy = fields.Boolean(
string="Unset Single Sale Order Limit Policy",
compute="_compute_single_sale_policy",
store=False,
)
@api.model
def _update_limit_check_context(self, values):
_super = super(ResPartner, self)
ctx = _super._update_limit_check_context(values)
for field in iter(values):
if field == "risk_single_sale_order_limit":
ctx.update({"check_single_sale_order_limit": True})
return ctx
@api.constrains(
"risk_single_sale_order_limit",
)
def _check_single_sale_limit_policy(self):
for partner in self:
if partner.single_sale_order_limit_policy and \
partner.single_sale_order_limit_policy < \
partner.risk_single_sale_order_limit and \
partner.risk_single_sale_order_limit > 0 and \
self._context.get("check_single_sale_order_limit", False):
raise UserError(_("Unauthorized single sale order amount"))
if not partner.unset_single_sale_order_limit_policy and \
partner.risk_single_sale_order_limit <= 0.0 and \
self._context.get("check_single_sale_order_limit", False):
raise UserError(
_("Unauthorized to unset single sale order limit amount"))
| open-synergy/opnsynid-partner-contact | partner_single_sale_risk_policy/models/res_partner.py | Python | agpl-3.0 | 2,591 | 0 |
#!/usr/bin/python3
# x11vnc
# SoftAP
from sense_hat import SenseHat
from time import sleep
sense = SenseHat()
sense.low_light = True
led_loop = [4, 5, 6, 7, 15, 23, 31, 39, 47, 55, 63, 62, 61, 60, 59, 58, 57, 56, 48, 40, 32, 24, 16, 8, 0, 1, 2, 3]
prev_x = 0
prev_y = 0
while True:
sense.set_rotation(180)
sense.clear()
r = 32
g = 32
b = 200
# Eyes
sense.set_pixel(2, 1, r, g, b)
sense.set_pixel(5, 1, r, g, b)
# Nose
sense.set_pixel(3, 3, r+223, g, b-100)
sense.set_pixel(4, 3, r+223, g, b-100)
# Mouth
sense.set_pixel(1, 5, 255, 255, 0)
sense.set_pixel(2, 6, 255, 255, 0)
sense.set_pixel(3, 6, 255, 255, 0)
sense.set_pixel(4, 6, 255, 255, 0)
sense.set_pixel(5, 6, 255, 255, 0)
sense.set_pixel(6, 5, 255, 255, 0)
sense.set_pixel(1, 4, 255, 255, 0)
sense.set_pixel(6, 4, 255, 255, 0)
for i in range(0, 5):
sense.set_pixel(5, 1, r-32, g-32, b-32)
for offset in led_loop:
y = offset // 8 # row
x = offset % 8 # column
if x != prev_x or y != prev_y:
sense.set_pixel(prev_x, prev_y, 0, 0, 0)
sense.set_pixel(x, y, 0, 255, 0)
prev_x = x
prev_y = y
sleep(0.1)
sense.set_pixel(5, 1, r, g, b)
for offset in led_loop:
y = offset // 8 # row
x = offset % 8 # column
if x != prev_x or y != prev_y:
sense.set_pixel(prev_x, prev_y, 0, 0, 0)
sense.set_pixel(x, y, 0, 255, 0)
prev_x = x
prev_y = y
sleep(0.1)
t = sense.get_temperature() + 273.15
t = (t - 32)/1.8
p = sense.get_pressure()
h = sense.get_humidity()
t = round(t, 1)
p = round(p, 1)
h = round(h, 1)
print("Deze Raspberry Pi meet de temperatuur ({0} graden) en de luchtdruk ({1} hPa).".format(t,p))
sense.show_message("Deze Raspberry Pi meet de temperatuur ({0} graden) en de luchtdruk ({1} hPa).".format(t,p), text_colour=[0,255,0])
# sense.set_rotation(180)
| pindanet/Raspberry | demoSenseHat.py | Python | gpl-3.0 | 2,108 | 0.005693 |
# -*- coding: utf-8 -*-
"""digitalocean API to manage droplets"""
__version__ = "1.16.0"
__author__ = "Lorenzo Setale ( http://who.is.lorenzo.setale.me/? )"
__author_email__ = "lorenzo@setale.me"
__license__ = "LGPL v3"
__copyright__ = "Copyright (c) 2012-2020 Lorenzo Setale"
from .Manager import Manager
from .Droplet import Droplet, DropletError, BadKernelObject, BadSSHKeyFormat
from .Region import Region
from .Size import Size
from .Image import Image
from .Action import Action
from .Account import Account
from .Balance import Balance
from .Domain import Domain
from .Record import Record
from .SSHKey import SSHKey
from .Kernel import Kernel
from .FloatingIP import FloatingIP
from .Volume import Volume
from .baseapi import Error, EndPointError, TokenError, DataReadError, NotFoundError
from .Tag import Tag
from .LoadBalancer import LoadBalancer
from .LoadBalancer import StickySessions, ForwardingRule, HealthCheck
from .Certificate import Certificate
from .Snapshot import Snapshot
from .Project import Project
from .Firewall import Firewall, InboundRule, OutboundRule, Destinations, Sources
from .VPC import VPC
| koalalorenzo/python-digitalocean | digitalocean/__init__.py | Python | lgpl-3.0 | 1,128 | 0.001773 |
import zmq
from crpropa import Module
class SendCandidateProperties( Module ):
""" Sends candidate proporties given by the function
```extract_func( candidate )``` over the network
to the server on ```ip_port```
"""
def __init__( self, ip_port, extract_func ):
Module.__init__( self )
self.socket = None
self.ip_port = "tcp://" + ip_port
self.extract_func = extract_func
def beginRun( self ):
context = zmq.Context()
self.socket = context.socket( zmq.REQ )
self.socket.connect( self.ip_port )
def process(self, c):
self.socket.send_pyobj( self.extract_func( c ) )
msg_in = self.socket.recv_pyobj()
def endRun( self ):
del self.socket
class RecvCandidateProperties:
""" Server side: receive data from the client module
while listening on ```ip_port```
self.recv method should be in a non-blocking loop
"""
def __init__( self, ip_port ):
context = zmq.Context()
self.socket = context.socket( zmq.REP )
self.socket.bind( "tcp://" + ip_port )
def recv( self ):
msg = self.socket.recv_pyobj()
self.socket.send_pyobj(msg)
return msg
| adundovi/CRPropa3-scripts | python_modules/network.py | Python | gpl-3.0 | 1,236 | 0.022654 |
#!/usr/bin/env python2
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
'''
Builtin recipes.
'''
import re, time, io
from calibre.web.feeds.news import (BasicNewsRecipe, CustomIndexRecipe,
AutomaticNewsRecipe, CalibrePeriodical)
from calibre.ebooks.BeautifulSoup import BeautifulSoup
from calibre.utils.config import JSONConfig
basic_recipes = (BasicNewsRecipe, AutomaticNewsRecipe, CustomIndexRecipe,
CalibrePeriodical)
custom_recipes = JSONConfig('custom_recipes/index.json')
def custom_recipe_filename(id_, title):
from calibre.utils.filenames import ascii_filename
return ascii_filename(title[:50]) + \
('_%s.recipe'%id_)
def compile_recipe(src):
'''
Compile the code in src and return a recipe object, if found.
:param src: Python source code as bytestring or unicode object
:return: Recipe class or None, if no such class was found in src
'''
if not isinstance(src, unicode):
match = re.search(r'coding[:=]\s*([-\w.]+)', src[:200])
enc = match.group(1) if match else 'utf-8'
src = src.decode(enc)
# Python complains if there is a coding declaration in a unicode string
src = re.sub(r'^#.*coding\s*[:=]\s*([-\w.]+)', '#', src.lstrip(u'\ufeff'), flags=re.MULTILINE)
# Translate newlines to \n
src = io.StringIO(src, newline=None).getvalue()
namespace = {
'BasicNewsRecipe':BasicNewsRecipe,
'AutomaticNewsRecipe':AutomaticNewsRecipe,
'time':time, 're':re,
'BeautifulSoup':BeautifulSoup
}
exec src in namespace
for x in namespace.itervalues():
if (isinstance(x, type) and issubclass(x, BasicNewsRecipe) and x not
in basic_recipes):
return x
return None
| timpalpant/calibre | src/calibre/web/feeds/recipes/__init__.py | Python | gpl-3.0 | 1,816 | 0.010463 |
"""Common settings and globals."""
from os.path import abspath, basename, dirname, join, normpath
from sys import path
try:
from secret import *
except:
pass
########## PATH CONFIGURATION
# Absolute filesystem path to the Django project directory:
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
# Absolute filesystem path to the top-level project folder:
SITE_ROOT = dirname(DJANGO_ROOT)
# Site name:
SITE_NAME = basename(DJANGO_ROOT)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(DJANGO_ROOT)
# Add SITE_ROOT to lookup application (wsgi)
path.append(SITE_ROOT)
########## END PATH CONFIGURATION
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('olivier', 'olivier.larcheveque@gmail.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
########## END DATABASE CONFIGURATION
########## GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/Los_Angeles'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
########## END GENERAL CONFIGURATION
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = normpath(join(SITE_ROOT, 'media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = normpath(join(SITE_ROOT, 'assets'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
normpath(join(SITE_ROOT, 'static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
########## END STATIC FILE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
########## END SECRET CONFIGURATION
########## FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
normpath(join(SITE_ROOT, 'fixtures')),
)
########## END FIXTURE CONFIGURATION
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
'cms.context_processors.media',
'sekizai.context_processors.sekizai',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
normpath(join(SITE_ROOT, 'templates')),
)
########## END TEMPLATE CONFIGURATION
########## MIDDLEWARE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes
MIDDLEWARE_CLASSES = (
# Default Django middleware.
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.doc.XViewMiddleware',
'django.middleware.common.CommonMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'cms.middleware.language.LanguageCookieMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## URL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = '%s.urls' % SITE_NAME
########## END URL CONFIGURATION
########## APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin panel and documentation:
'django.contrib.admin',
# 'django.contrib.admindocs',
'django.contrib.markup',
)
THIRD_PARTY_APPS = (
# Database migration helpers:
'south',
# Django CMS
'cms',
'cms.stacks',
'menus',
'mptt',
'menus',
'sekizai',
'django_countries',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'resume',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
########## END APP CONFIGURATION
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
########## END LOGGING CONFIGURATION
########## WSGI CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'wsgi.application'
########## END WSGI CONFIGURATION
######### DJANGO CMS
CMS_PERMISSION = True
CMS_PUBLIC_FOR = "all"
LANGUAGES = [
('fr', 'French'),
('en', 'English'),
]
CMS_LANGUAGES = {
'default': {
'fallbacks': ['fr', 'en', ],
'redirect_on_fallback':True,
'public': True,
'hide_untranslated': False,
}
}
CMS_TEMPLATES = (
('layouts/classic.html', 'Classic'),
('layouts/classic_home.html', 'Classic Home'),
('layouts/classic_2columns.html', 'Classic 2 columns'),
)
######### END DJANGO CMS
| olarcheveque/usinacv | usinacv/usinacv/settings/base.py | Python | mit | 8,153 | 0.004661 |
import numpy as np
from Coupling import Coupling
class Coupling2DCavities2D(Coupling):
"""
Coupling for cavity2D to cavity transmission.
"""
@property
def impedance_from(self):
"""
Choses the right impedance of subsystem_from.
Applies boundary conditions correction as well.
"""
return self.subsystem_from.impedance
@property
def impedance_to(self):
"""
Choses the right impedance of subsystem_from.
Applies boundary conditions correction as well.
"""
return self.subsystem_to.impedance
@property
def tau(self):
"""
Transmission coefficient.
"""
return np.zeros(self.frequency.amount)
@property
def clf(self):
"""
Coupling loss factor for transmission from a 2D cavity to a cavity.
.. math:: \\eta_{12} = \\frac{ \\tau_{12}}{4 \\pi}
See BAC, equation 3.14
"""
return self.tau / (4.0 * np.pi) | FRidh/Sea | Sea/model/couplings/Coupling2DCavities2D.py | Python | bsd-3-clause | 1,049 | 0.010486 |
#!/usr/bin/env python
# coding=utf-8
"""157. Solving the diophantine equation <sup>1</sup>/<sub><var>a</var></sub>+<sup>1</sup>/<sub><var>b</var></sub>= <sup><var>p</var></sup>/<sub>10<sup><var>n</var></sup></sub>
https://projecteuler.net/problem=157
Consider the diophantine equation 1/a+1/b= p/10n with a, b, p, n positive
integers and a ≤ b.
For n=1 this equation has 20 solutions that are listed below:
1/1+1/1=20/10 | 1/1+1/2=15/10 | 1/1+1/5=12/10 | 1/1+1/10=11/10 | 1/2+1/2=10/10
---|---|---|---|---
1/2+1/5=7/10 | 1/2+1/10=6/10 | 1/3+1/6=5/10 | 1/3+1/15=4/10 | 1/4+1/4=5/10
1/4+1/20=3/10 | 1/5+1/5=4/10 | 1/5+1/10=3/10 | 1/6+1/30=2/10 | 1/10+1/10=2/10
1/11+1/110=1/10 | 1/12+1/60=1/10 | 1/14+1/35=1/10 | 1/15+1/30=1/10 |
1/20+1/20=1/10
How many solutions has this equation for 1 ≤ n ≤ 9?
"""
| openqt/algorithms | projecteuler/pe157-solving-the-diophantine-equation-sup1supsubvaravarsubsup1supsubvarbvarsub-supvarpvarsupsub10supvarnvarsupsub.py | Python | gpl-3.0 | 823 | 0.009792 |
############################################################################
#
# Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
source("../../shared/qtcreator.py")
# test New Qt Quick Application build and run for release and debug option
def main():
startApplication("qtcreator" + SettingsPath)
if not startedWithoutPluginError():
return
checkedTargets, projectName = createNewQtQuickApplication(tempDir(), "SampleApp")
# run project for debug and release and verify results
runVerify(checkedTargets)
#close Qt Creator
invokeMenuItem("File", "Exit")
| pivonroll/Qt_Creator | tests/system/suite_APTW/tst_APTW02/test.py | Python | gpl-3.0 | 1,630 | 0.00184 |
# synthrepo.py - repo synthesis
#
# Copyright 2012 Facebook
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''synthesize structurally interesting change history
This extension is useful for creating a repository with properties
that are statistically similar to an existing repository. During
analysis, a simple probability table is constructed from the history
of an existing repository. During synthesis, these properties are
reconstructed.
Properties that are analyzed and synthesized include the following:
- Lines added or removed when an existing file is modified
- Number and sizes of files added
- Number of files removed
- Line lengths
- Topological distance to parent changeset(s)
- Probability of a commit being a merge
- Probability of a newly added file being added to a new directory
- Interarrival time, and time zone, of commits
- Number of files in each directory
A few obvious properties that are not currently handled realistically:
- Merges are treated as regular commits with two parents, which is not
realistic
- Modifications are not treated as operations on hunks of lines, but
as insertions and deletions of randomly chosen single lines
- Committer ID (always random)
- Executability of files
- Symlinks and binary files are ignored
'''
from __future__ import absolute_import
import bisect
import collections
import itertools
import json
import os
import random
import sys
import time
from mercurial.i18n import _
from mercurial.node import (
nullid,
nullrev,
short,
)
from mercurial import (
cmdutil,
context,
error,
hg,
patch,
scmutil,
util,
)
# Note for extension authors: ONLY specify testedwith = 'internal' for
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
# be specifying the version(s) of Mercurial they are tested with, or
# leave the attribute unspecified.
testedwith = 'internal'
cmdtable = {}
command = cmdutil.command(cmdtable)
newfile = set(('new fi', 'rename', 'copy f', 'copy t'))
def zerodict():
return collections.defaultdict(lambda: 0)
def roundto(x, k):
if x > k * 2:
return int(round(x / float(k)) * k)
return int(round(x))
def parsegitdiff(lines):
filename, mar, lineadd, lineremove = None, None, zerodict(), 0
binary = False
for line in lines:
start = line[:6]
if start == 'diff -':
if filename:
yield filename, mar, lineadd, lineremove, binary
mar, lineadd, lineremove, binary = 'm', zerodict(), 0, False
filename = patch.gitre.match(line).group(1)
elif start in newfile:
mar = 'a'
elif start == 'GIT bi':
binary = True
elif start == 'delete':
mar = 'r'
elif start:
s = start[0]
if s == '-' and not line.startswith('--- '):
lineremove += 1
elif s == '+' and not line.startswith('+++ '):
lineadd[roundto(len(line) - 1, 5)] += 1
if filename:
yield filename, mar, lineadd, lineremove, binary
@command('analyze',
[('o', 'output', '', _('write output to given file'), _('FILE')),
('r', 'rev', [], _('analyze specified revisions'), _('REV'))],
_('hg analyze'), optionalrepo=True)
def analyze(ui, repo, *revs, **opts):
'''create a simple model of a repository to use for later synthesis
This command examines every changeset in the given range (or all
of history if none are specified) and creates a simple statistical
model of the history of the repository. It also measures the directory
structure of the repository as checked out.
The model is written out to a JSON file, and can be used by
:hg:`synthesize` to create or augment a repository with synthetic
commits that have a structure that is statistically similar to the
analyzed repository.
'''
root = repo.root
if not root.endswith(os.path.sep):
root += os.path.sep
revs = list(revs)
revs.extend(opts['rev'])
if not revs:
revs = [':']
output = opts['output']
if not output:
output = os.path.basename(root) + '.json'
if output == '-':
fp = sys.stdout
else:
fp = open(output, 'w')
# Always obtain file counts of each directory in the given root directory.
def onerror(e):
ui.warn(_('error walking directory structure: %s\n') % e)
dirs = {}
rootprefixlen = len(root)
for dirpath, dirnames, filenames in os.walk(root, onerror=onerror):
dirpathfromroot = dirpath[rootprefixlen:]
dirs[dirpathfromroot] = len(filenames)
if '.hg' in dirnames:
dirnames.remove('.hg')
lineschanged = zerodict()
children = zerodict()
p1distance = zerodict()
p2distance = zerodict()
linesinfilesadded = zerodict()
fileschanged = zerodict()
filesadded = zerodict()
filesremoved = zerodict()
linelengths = zerodict()
interarrival = zerodict()
parents = zerodict()
dirsadded = zerodict()
tzoffset = zerodict()
# If a mercurial repo is available, also model the commit history.
if repo:
revs = scmutil.revrange(repo, revs)
revs.sort()
progress = ui.progress
_analyzing = _('analyzing')
_changesets = _('changesets')
_total = len(revs)
for i, rev in enumerate(revs):
progress(_analyzing, i, unit=_changesets, total=_total)
ctx = repo[rev]
pl = ctx.parents()
pctx = pl[0]
prev = pctx.rev()
children[prev] += 1
p1distance[rev - prev] += 1
parents[len(pl)] += 1
tzoffset[ctx.date()[1]] += 1
if len(pl) > 1:
p2distance[rev - pl[1].rev()] += 1
if prev == rev - 1:
lastctx = pctx
else:
lastctx = repo[rev - 1]
if lastctx.rev() != nullrev:
timedelta = ctx.date()[0] - lastctx.date()[0]
interarrival[roundto(timedelta, 300)] += 1
diff = sum((d.splitlines() for d in ctx.diff(pctx, git=True)), [])
fileadds, diradds, fileremoves, filechanges = 0, 0, 0, 0
for filename, mar, lineadd, lineremove, isbin in parsegitdiff(diff):
if isbin:
continue
added = sum(lineadd.itervalues(), 0)
if mar == 'm':
if added and lineremove:
lineschanged[roundto(added, 5),
roundto(lineremove, 5)] += 1
filechanges += 1
elif mar == 'a':
fileadds += 1
if '/' in filename:
filedir = filename.rsplit('/', 1)[0]
if filedir not in pctx.dirs():
diradds += 1
linesinfilesadded[roundto(added, 5)] += 1
elif mar == 'r':
fileremoves += 1
for length, count in lineadd.iteritems():
linelengths[length] += count
fileschanged[filechanges] += 1
filesadded[fileadds] += 1
dirsadded[diradds] += 1
filesremoved[fileremoves] += 1
invchildren = zerodict()
for rev, count in children.iteritems():
invchildren[count] += 1
if output != '-':
ui.status(_('writing output to %s\n') % output)
def pronk(d):
return sorted(d.iteritems(), key=lambda x: x[1], reverse=True)
json.dump({'revs': len(revs),
'initdirs': pronk(dirs),
'lineschanged': pronk(lineschanged),
'children': pronk(invchildren),
'fileschanged': pronk(fileschanged),
'filesadded': pronk(filesadded),
'linesinfilesadded': pronk(linesinfilesadded),
'dirsadded': pronk(dirsadded),
'filesremoved': pronk(filesremoved),
'linelengths': pronk(linelengths),
'parents': pronk(parents),
'p1distance': pronk(p1distance),
'p2distance': pronk(p2distance),
'interarrival': pronk(interarrival),
'tzoffset': pronk(tzoffset),
},
fp)
fp.close()
@command('synthesize',
[('c', 'count', 0, _('create given number of commits'), _('COUNT')),
('', 'dict', '', _('path to a dictionary of words'), _('FILE')),
('', 'initfiles', 0, _('initial file count to create'), _('COUNT'))],
_('hg synthesize [OPTION].. DESCFILE'))
def synthesize(ui, repo, descpath, **opts):
'''synthesize commits based on a model of an existing repository
The model must have been generated by :hg:`analyze`. Commits will
be generated randomly according to the probabilities described in
the model. If --initfiles is set, the repository will be seeded with
the given number files following the modeled repository's directory
structure.
When synthesizing new content, commit descriptions, and user
names, words will be chosen randomly from a dictionary that is
presumed to contain one word per line. Use --dict to specify the
path to an alternate dictionary to use.
'''
try:
fp = hg.openpath(ui, descpath)
except Exception as err:
raise error.Abort('%s: %s' % (descpath, err[0].strerror))
desc = json.load(fp)
fp.close()
def cdf(l):
if not l:
return [], []
vals, probs = zip(*sorted(l, key=lambda x: x[1], reverse=True))
t = float(sum(probs, 0))
s, cdfs = 0, []
for v in probs:
s += v
cdfs.append(s / t)
return vals, cdfs
lineschanged = cdf(desc['lineschanged'])
fileschanged = cdf(desc['fileschanged'])
filesadded = cdf(desc['filesadded'])
dirsadded = cdf(desc['dirsadded'])
filesremoved = cdf(desc['filesremoved'])
linelengths = cdf(desc['linelengths'])
parents = cdf(desc['parents'])
p1distance = cdf(desc['p1distance'])
p2distance = cdf(desc['p2distance'])
interarrival = cdf(desc['interarrival'])
linesinfilesadded = cdf(desc['linesinfilesadded'])
tzoffset = cdf(desc['tzoffset'])
dictfile = opts.get('dict') or '/usr/share/dict/words'
try:
fp = open(dictfile, 'rU')
except IOError as err:
raise error.Abort('%s: %s' % (dictfile, err.strerror))
words = fp.read().splitlines()
fp.close()
initdirs = {}
if desc['initdirs']:
for k, v in desc['initdirs']:
initdirs[k.encode('utf-8').replace('.hg', '_hg')] = v
initdirs = renamedirs(initdirs, words)
initdirscdf = cdf(initdirs)
def pick(cdf):
return cdf[0][bisect.bisect_left(cdf[1], random.random())]
def pickpath():
return os.path.join(pick(initdirscdf), random.choice(words))
def makeline(minimum=0):
total = max(minimum, pick(linelengths))
c, l = 0, []
while c < total:
w = random.choice(words)
c += len(w) + 1
l.append(w)
return ' '.join(l)
wlock = repo.wlock()
lock = repo.lock()
nevertouch = set(('.hgsub', '.hgignore', '.hgtags'))
progress = ui.progress
_synthesizing = _('synthesizing')
_files = _('initial files')
_changesets = _('changesets')
# Synthesize a single initial revision adding files to the repo according
# to the modeled directory structure.
initcount = int(opts['initfiles'])
if initcount and initdirs:
pctx = repo[None].parents()[0]
dirs = set(pctx.dirs())
files = {}
def validpath(path):
# Don't pick filenames which are already directory names.
if path in dirs:
return False
# Don't pick directories which were used as file names.
while path:
if path in files:
return False
path = os.path.dirname(path)
return True
for i in xrange(0, initcount):
ui.progress(_synthesizing, i, unit=_files, total=initcount)
path = pickpath()
while not validpath(path):
path = pickpath()
data = '%s contents\n' % path
files[path] = context.memfilectx(repo, path, data)
dir = os.path.dirname(path)
while dir and dir not in dirs:
dirs.add(dir)
dir = os.path.dirname(dir)
def filectxfn(repo, memctx, path):
return files[path]
ui.progress(_synthesizing, None)
message = 'synthesized wide repo with %d files' % (len(files),)
mc = context.memctx(repo, [pctx.node(), nullid], message,
files.iterkeys(), filectxfn, ui.username(),
'%d %d' % util.makedate())
initnode = mc.commit()
if ui.debugflag:
hexfn = hex
else:
hexfn = short
ui.status(_('added commit %s with %d files\n')
% (hexfn(initnode), len(files)))
# Synthesize incremental revisions to the repository, adding repo depth.
count = int(opts['count'])
heads = set(map(repo.changelog.rev, repo.heads()))
for i in xrange(count):
progress(_synthesizing, i, unit=_changesets, total=count)
node = repo.changelog.node
revs = len(repo)
def pickhead(heads, distance):
if heads:
lheads = sorted(heads)
rev = revs - min(pick(distance), revs)
if rev < lheads[-1]:
rev = lheads[bisect.bisect_left(lheads, rev)]
else:
rev = lheads[-1]
return rev, node(rev)
return nullrev, nullid
r1 = revs - min(pick(p1distance), revs)
p1 = node(r1)
# the number of heads will grow without bound if we use a pure
# model, so artificially constrain their proliferation
toomanyheads = len(heads) > random.randint(1, 20)
if p2distance[0] and (pick(parents) == 2 or toomanyheads):
r2, p2 = pickhead(heads.difference([r1]), p2distance)
else:
r2, p2 = nullrev, nullid
pl = [p1, p2]
pctx = repo[r1]
mf = pctx.manifest()
mfk = mf.keys()
changes = {}
if mfk:
for __ in xrange(pick(fileschanged)):
for __ in xrange(10):
fctx = pctx.filectx(random.choice(mfk))
path = fctx.path()
if not (path in nevertouch or fctx.isbinary() or
'l' in fctx.flags()):
break
lines = fctx.data().splitlines()
add, remove = pick(lineschanged)
for __ in xrange(remove):
if not lines:
break
del lines[random.randrange(0, len(lines))]
for __ in xrange(add):
lines.insert(random.randint(0, len(lines)), makeline())
path = fctx.path()
changes[path] = context.memfilectx(repo, path,
'\n'.join(lines) + '\n')
for __ in xrange(pick(filesremoved)):
path = random.choice(mfk)
for __ in xrange(10):
path = random.choice(mfk)
if path not in changes:
changes[path] = None
break
if filesadded:
dirs = list(pctx.dirs())
dirs.insert(0, '')
for __ in xrange(pick(filesadded)):
pathstr = ''
while pathstr in dirs:
path = [random.choice(dirs)]
if pick(dirsadded):
path.append(random.choice(words))
path.append(random.choice(words))
pathstr = '/'.join(filter(None, path))
data = '\n'.join(makeline()
for __ in xrange(pick(linesinfilesadded))) + '\n'
changes[pathstr] = context.memfilectx(repo, pathstr, data)
def filectxfn(repo, memctx, path):
return changes[path]
if not changes:
continue
if revs:
date = repo['tip'].date()[0] + pick(interarrival)
else:
date = time.time() - (86400 * count)
# dates in mercurial must be positive, fit in 32-bit signed integers.
date = min(0x7fffffff, max(0, date))
user = random.choice(words) + '@' + random.choice(words)
mc = context.memctx(repo, pl, makeline(minimum=2),
sorted(changes.iterkeys()),
filectxfn, user, '%d %d' % (date, pick(tzoffset)))
newnode = mc.commit()
heads.add(repo.changelog.rev(newnode))
heads.discard(r1)
heads.discard(r2)
lock.release()
wlock.release()
def renamedirs(dirs, words):
'''Randomly rename the directory names in the per-dir file count dict.'''
wordgen = itertools.cycle(words)
replacements = {'': ''}
def rename(dirpath):
'''Recursively rename the directory and all path prefixes.
The mapping from path to renamed path is stored for all path prefixes
as in dynamic programming, ensuring linear runtime and consistent
renaming regardless of iteration order through the model.
'''
if dirpath in replacements:
return replacements[dirpath]
head, _ = os.path.split(dirpath)
if head:
head = rename(head)
else:
head = ''
renamed = os.path.join(head, next(wordgen))
replacements[dirpath] = renamed
return renamed
result = []
for dirpath, count in dirs.iteritems():
result.append([rename(dirpath.lstrip(os.sep)), count])
return result
| dscho/hg | contrib/synthrepo.py | Python | gpl-2.0 | 18,180 | 0.000605 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PrecalculatedValue'
db.create_table('profiles_precalculatedvalue', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('formula', self.gf('django.db.models.fields.TextField')(blank=True)),
('geo_record', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['profiles.GeoRecord'])),
('value', self.gf('django.db.models.fields.TextField')(blank=True)),
('data_source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['profiles.DataSource'])),
('notes', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('profiles', ['PrecalculatedValue'])
def backwards(self, orm):
# Deleting model 'PrecalculatedValue'
db.delete_table('profiles_precalculatedvalue')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profiles.datadomain': {
'Meta': {'ordering': "['weight']", 'object_name': 'DataDomain'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.Indicator']", 'through': "orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'profiles.datapoint': {
'Meta': {'unique_together': "(('indicator', 'record', 'time'),)", 'object_name': 'DataPoint'},
'change_from_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_from'", 'null': 'True', 'to': "orm['profiles.Time']"}),
'change_to_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_to'", 'null': 'True', 'to': "orm['profiles.Time']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoRecord']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Time']", 'null': 'True'})
},
'profiles.datasource': {
'Meta': {'object_name': 'DataSource'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'implementation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'profiles.denominator': {
'Meta': {'object_name': 'Denominator'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'multiplier': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'sort': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'profiles.denominatorpart': {
'Meta': {'object_name': 'DenominatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataSource']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Denominator']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}),
'part': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.IndicatorPart']"})
},
'profiles.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'data_sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.DataSource']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'})
},
'profiles.georecord': {
'Meta': {'unique_together': "(('slug', 'level'), ('level', 'geo_id', 'custom_name', 'owner'))", 'object_name': 'GeoRecord'},
'components': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'components_rel_+'", 'blank': 'True', 'to': "orm['profiles.GeoRecord']"}),
'custom_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'geo_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.GeometryField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoLevel']"}),
'mappings': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'mappings_rel_+'", 'blank': 'True', 'to': "orm['profiles.GeoRecord']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoRecord']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '100', 'blank': 'True'})
},
'profiles.indicator': {
'Meta': {'object_name': 'Indicator'},
'data_domains': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.DataDomain']", 'through': "orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '30'}),
'display_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'display_percent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.GeoLevel']", 'symmetrical': 'False'}),
'limitations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'long_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purpose': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'routine_use': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'short_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'universe': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'})
},
'profiles.indicatordomain': {
'Meta': {'object_name': 'IndicatorDomain'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataDomain']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"})
},
'profiles.indicatorpart': {
'Meta': {'object_name': 'IndicatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataSource']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Time']"})
},
'profiles.precalculatedvalue': {
'Meta': {'object_name': 'PrecalculatedValue'},
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataSource']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'geo_record': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoRecord']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'profiles.time': {
'Meta': {'object_name': 'Time'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'sort': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'})
},
'profiles.value': {
'Meta': {'object_name': 'Value'},
'datapoint': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataPoint']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Denominator']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
}
}
complete_apps = ['profiles']
| 216software/Profiles | communityprofiles/profiles/oldmigrations/0038_auto__add_precalculatedvalue.py | Python | mit | 15,090 | 0.008217 |
import sales_order_notes
| OpusVL/odoo_line_notes_field | line_notes_field/__init__.py | Python | agpl-3.0 | 25 | 0 |
import logging
import time
import zlib
from collections import defaultdict
from datetime import datetime
from hashlib import sha1
from operator import itemgetter
import simplejson as json
from _mysql_exceptions import IntegrityError
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from treeherder.etl.common import get_guid_root
from treeherder.events.publisher import JobStatusPublisher
from treeherder.model import error_summary, utils
from treeherder.model.models import Datasource, ExclusionProfile
from treeherder.model.tasks import (populate_error_summary, publish_job_action,
publish_resultset,
publish_resultset_action)
from .artifacts import ArtifactsModel
from .base import ObjectNotFoundException, TreeherderModelBase
logger = logging.getLogger(__name__)
class JobsModel(TreeherderModelBase):
"""
Represent a job repository
"""
INCOMPLETE_STATES = ["running", "pending"]
STATES = INCOMPLETE_STATES + ["completed", "coalesced"]
# indexes of specific items in the ``job_placeholder`` objects
JOB_PH_JOB_GUID = 0
JOB_PH_COALESCED_TO_GUID = 2
JOB_PH_RESULT_SET_ID = 3
JOB_PH_BUILD_PLATFORM_KEY = 4
JOB_PH_MACHINE_PLATFORM_KEY = 5
JOB_PH_MACHINE_NAME = 6
JOB_PH_DEVICE_NAME = 7
JOB_PH_OPTION_COLLECTION_HASH = 8
JOB_PH_TYPE_KEY = 9
JOB_PH_PRODUCT_TYPE = 10
JOB_PH_WHO = 11
JOB_PH_REASON = 12
JOB_PH_RESULT = 13
JOB_PH_STATE = 14
JOB_PH_START_TIMESTAMP = 16
JOB_PH_END_TIMESTAMP = 17
JOB_PH_RUNNING_AVG = 18
# list of searchable columns, i.e. those who have an index
# it would be nice to get this directly from the db and cache it
INDEXED_COLUMNS = {
"job": {
"id": "j.id",
"job_guid": "j.job_guid",
"job_coalesced_to_guid": "j.job_coalesced_to_guid",
"result_set_id": "j.result_set_id",
"build_platform_id": "j.build_platform_id",
"build_system_type": "j.build_system_type",
"machine_platform_id": "j.machine_platform_id",
"machine_id": "j.machine_id",
"option_collection_hash": "j.option_collection_hash",
"job_type_id": "j.job_type_id",
"product_id": "j.product_id",
"failure_classification_id": "j.failure_classification_id",
"who": "j.who",
"reason": "j.reason",
"result": "j.result",
"state": "j.state",
"submit_timestamp": "j.submit_timestamp",
"start_timestamp": "j.start_timestamp",
"end_timestamp": "j.end_timestamp",
"last_modified": "j.last_modified",
"signature": "j.signature",
"tier": "j.tier"
},
"result_set": {
"id": "rs.id",
"revision_hash": "rs.revision_hash",
"revision": "revision.revision",
"author": "rs.author",
"push_timestamp": "rs.push_timestamp"
},
"bug_job_map": {
"job_id": "job_id",
"bug_id": "bug_id",
"type": "type",
"who": "who",
"submit_timestamp": "submit_timestamp"
}
}
# jobs cycle targets
# NOTE: There is an order dependency here, cycle_job and
# cycle_result_set should be after any tables with foreign keys
# to their ids.
JOBS_CYCLE_TARGETS = [
"jobs.deletes.cycle_job_artifact",
"jobs.deletes.cycle_performance_artifact",
"jobs.deletes.cycle_job_log_url",
"jobs.deletes.cycle_job_note",
"jobs.deletes.cycle_bug_job_map",
"jobs.deletes.cycle_job",
"jobs.deletes.cycle_revision",
"jobs.deletes.cycle_revision_map",
"jobs.deletes.cycle_result_set"
]
PERFORMANCE_SERIES_JSON_KEYS = [
"subtest_signatures",
"test_options"
]
@classmethod
def create(cls, project):
"""
Create all the datasource tables for this project.
"""
source = Datasource(project=project)
source.save()
return cls(project=project)
def execute(self, **kwargs):
return utils.retry_execute(self.get_dhub(), logger, **kwargs)
##################
#
# Job schema data methods
#
##################
def get_job(self, id):
"""Return the job row for this ``job_id``"""
repl = [self.refdata_model.get_db_name()]
data = self.execute(
proc="jobs.selects.get_job",
placeholders=[id],
debug_show=self.DEBUG,
replace=repl,
)
return data
def get_job_reference_data(self, signature):
# Retrieve associated data in reference_data_signatures
result = self.refdata_model.get_reference_data([signature])
if result and signature in result:
return result[signature]
return None
def get_job_list(self, offset, limit,
conditions=None, exclusion_profile=None,
visibility="included"):
"""
Retrieve a list of jobs. It's mainly used by the restful api to list
the jobs. The conditions parameter is a dict containing a set of
conditions for each key. e.g.:
{
'who': set([('=', 'john')]),
'result': set([('IN', ("success", "retry"))])
}
"""
replace_str, placeholders = self._process_conditions(
conditions, self.INDEXED_COLUMNS['job']
)
if exclusion_profile:
try:
if exclusion_profile is "default":
profile = ExclusionProfile.objects.get(
is_default=True
)
else:
profile = ExclusionProfile.objects.get(
name=exclusion_profile
)
signatures = profile.flat_exclusion[self.project]
# NOT here means "not part of the exclusion profile"
inclusion = "NOT" if visibility == "included" else ""
replace_str += " AND j.signature {0} IN ({1})".format(
inclusion,
",".join(["%s"] * len(signatures))
)
placeholders += signatures
except KeyError:
# this repo/project has no hidden signatures
pass
except ExclusionProfile.DoesNotExist:
# Either there's no default profile setup or the profile
# specified is not availble
pass
repl = [self.refdata_model.get_db_name(), replace_str]
data = self.execute(
proc="jobs.selects.get_job_list",
replace=repl,
placeholders=placeholders,
limit=limit,
offset=offset,
debug_show=self.DEBUG,
)
return data
def set_state(self, job_id, state):
"""Update the state of an existing job"""
self.execute(
proc='jobs.updates.set_state',
placeholders=[state, job_id],
debug_show=self.DEBUG
)
def get_incomplete_job_guids(self, resultset_id):
"""Get list of ids for jobs of resultset that are not in complete state."""
return self.execute(
proc='jobs.selects.get_incomplete_job_guids',
placeholders=[resultset_id],
debug_show=self.DEBUG,
return_type='dict',
key_column='job_guid'
)
def cancel_all_resultset_jobs(self, requester, resultset_id):
"""Set all pending/running jobs in resultset to usercancel."""
job_guids = list(self.get_incomplete_job_guids(resultset_id))
jobs = self.get_job_ids_by_guid(job_guids).values()
# Cancel all the jobs in the database...
self.execute(
proc='jobs.updates.cancel_all',
placeholders=[resultset_id],
debug_show=self.DEBUG
)
# Sending 'cancel_all' action to pulse. Right now there is no listener
# for this, so we cannot remove 'cancel' action for each job below.
publish_resultset_action.apply_async(
args=[self.project, 'cancel_all', resultset_id, requester],
routing_key='publish_to_pulse'
)
# Notify the build systems which created these jobs...
for job in jobs:
self._job_action_event(job, 'cancel', requester)
# Notify the UI.
status_publisher = JobStatusPublisher(settings.BROKER_URL)
try:
status_publisher.publish(job_guids, self.project, 'processed')
finally:
status_publisher.disconnect()
def trigger_missing_resultset_jobs(self, requester, resultset_id, project):
publish_resultset_action.apply_async(
args=[self.project, "trigger_missing_jobs", resultset_id, requester],
routing_key='publish_to_pulse'
)
def trigger_all_talos_jobs(self, requester, resultset_id, project, times):
publish_resultset_action.apply_async(
args=[self.project, "trigger_all_talos_jobs", resultset_id, requester, times],
routing_key='publish_to_pulse'
)
def _job_action_event(self, job, action, requester):
"""
Helper for issuing an 'action' for a given job (such as
cancel/retrigger)
:param job dict: The job which this action was issued to.
:param action str: Name of the action (cancel, etc..).
:param requester str: Email address of the user who caused action.
"""
publish_job_action.apply_async(
args=[self.project, action, job['id'], requester],
routing_key='publish_to_pulse'
)
def retrigger(self, requester, job):
"""
Issue a retrigger to the given job
:param requester str: The email address associated with the user who
made this request
:param job dict: A job object (typically a result of get_job)
"""
self._job_action_event(job, 'retrigger', requester)
def backfill(self, requester, job):
"""
Issue a "backfill" to the underlying build_system_type by scheduling a
pulse message.
:param requester str: The email address associated with the user who
made this request
:param job dict: A job object (typically a result of get_job)
"""
self._job_action_event(job, 'backfill', requester)
def cancel_job(self, requester, job):
"""
Cancel the given job and send an event to notify the build_system type
who created it to do the actual work.
:param requester str: The email address associated with the user who
made this request
:param job dict: A job object (typically a result of get_job)
"""
self._job_action_event(job, 'cancel', requester)
self.execute(
proc='jobs.updates.cancel_job',
placeholders=[job['job_guid']],
debug_show=self.DEBUG
)
status_publisher = JobStatusPublisher(settings.BROKER_URL)
try:
status_publisher.publish([job['job_guid']], self.project, 'processed')
finally:
status_publisher.disconnect()
def get_log_references(self, job_id):
"""Return the log references for the given ``job_id``."""
data = self.execute(
proc="jobs.selects.get_log_references",
placeholders=[job_id],
debug_show=self.DEBUG,
)
return data
def get_max_job_id(self):
"""Get the maximum job id."""
data = self.get_dhub().execute(
proc="jobs.selects.get_max_job_id",
debug_show=self.DEBUG,
)
return int(data[0]['max_id'] or 0)
@staticmethod
def get_performance_series_cache_key(project, interval_seconds,
machine_platform=None, hash=False):
if machine_platform is None:
key = 'performance-series-summary-%s-%s' % (project,
interval_seconds)
else:
key = 'performance-series-summary-%s-%s-%s' % (project,
interval_seconds,
machine_platform)
if hash:
key += '-hash'
return key
def get_performance_series_summary(self, interval_seconds, machine_platform=None):
"""
Retrieve a summary of all of the property/value list pairs found
in the series_signature table, organized by the signature summaries
that they belong to.
{
'signature1': {
'property1': 'value1',
'property2': 'value2',
...
},
'signature2': {
'property1': 'value1',
'property2': 'value2',
...
}
...
}
This data structure can be used to build a comprehensive set of
options to browse all available performance data in a repository.
"""
# Only retrieve signatures with property/values that have
# received data for the time interval requested
last_updated_limit = utils.get_now_timestamp() - interval_seconds
cache_key = self.get_performance_series_cache_key(self.project, interval_seconds,
machine_platform)
series_summary = cache.get(cache_key, None)
if series_summary:
series_summary = json.loads(utils.decompress_if_needed(series_summary))
else:
data = self.get_dhub().execute(
proc="jobs.selects.get_perf_series_properties",
placeholders=[last_updated_limit, interval_seconds],
debug_show=self.DEBUG,
)
series_summary = defaultdict(dict)
for datum in data:
key, val = datum['property'], datum['value']
if key in self.PERFORMANCE_SERIES_JSON_KEYS:
val = json.loads(val)
series_summary[datum['signature']][key] = val
if machine_platform:
series_summary = dict((key, val) for key, val in series_summary.items()
if val['machine_platform'] == machine_platform)
# HACK: take this out when we're using pylibmc and can use
# compression automatically
series_summary_json = json.dumps(series_summary, sort_keys=True)
cache.set(cache_key, zlib.compress(series_summary_json))
sha = sha1()
sha.update(series_summary_json)
hash_cache_key = self.get_performance_series_cache_key(
self.project, interval_seconds, machine_platform,
hash=True)
cache.set(hash_cache_key, sha.hexdigest())
return series_summary
def get_performance_platforms(self, interval_seconds):
last_updated_limit = utils.get_now_timestamp() - interval_seconds
data = self.get_dhub().execute(
proc="jobs.selects.get_perf_series_properties",
placeholders=[last_updated_limit, interval_seconds],
debug_show=self.DEBUG,
)
platforms = set()
for datum in [datum for datum in data if datum['property'] == 'machine_platform']:
platforms.add(datum['value'])
return platforms
def get_job_note(self, id):
"""Return the job note by id."""
data = self.execute(
proc="jobs.selects.get_job_note",
placeholders=[id],
debug_show=self.DEBUG,
)
return data
def get_job_note_list(self, job_id):
"""Return the job notes by job_id."""
data = self.execute(
proc="jobs.selects.get_job_note_list",
placeholders=[job_id],
debug_show=self.DEBUG,
)
return data
def update_last_job_classification(self, job_id):
"""
Update failure_classification_id no the job table accordingly to
the latest annotation. If none is present it gets reverted to the
default value
"""
self.execute(
proc='jobs.updates.update_last_job_classification',
placeholders=[
job_id,
],
debug_show=self.DEBUG
)
def insert_job_note(self, job_id, failure_classification_id, who, note):
"""insert a new note for a job and updates its failure classification"""
self.execute(
proc='jobs.inserts.insert_note',
placeholders=[
job_id,
failure_classification_id,
who,
note,
utils.get_now_timestamp(),
],
debug_show=self.DEBUG
)
self.update_last_job_classification(job_id)
def delete_job_note(self, note_id, job_id):
"""
Delete a job note and updates the failure classification for that job
"""
self.execute(
proc='jobs.deletes.delete_note',
placeholders=[
note_id,
],
debug_show=self.DEBUG
)
self.update_last_job_classification(job_id)
def insert_bug_job_map(self, job_id, bug_id, assignment_type, submit_timestamp, who):
"""
Store a new relation between the given job and bug ids.
"""
try:
self.execute(
proc='jobs.inserts.insert_bug_job_map',
placeholders=[
job_id,
bug_id,
assignment_type,
submit_timestamp,
who
],
debug_show=self.DEBUG
)
except IntegrityError as e:
raise JobDataIntegrityError(e)
if settings.MIRROR_CLASSIFICATIONS:
job = self.get_job(job_id)[0]
if job["state"] == "completed":
# importing here to avoid an import loop
from treeherder.etl.tasks import (submit_elasticsearch_doc,
submit_bugzilla_comment)
# Submit bug associations to Bugzilla/Elasticsearch using async tasks.
submit_elasticsearch_doc.apply_async(
args=[
self.project,
job_id,
bug_id,
submit_timestamp,
who
],
routing_key='classification_mirroring'
)
submit_bugzilla_comment.apply_async(
args=[
self.project,
job_id,
bug_id,
who,
],
routing_key='classification_mirroring'
)
def delete_bug_job_map(self, job_id, bug_id):
"""
Delete a bug-job entry identified by bug_id and job_id
"""
self.execute(
proc='jobs.deletes.delete_bug_job_map',
placeholders=[
job_id,
bug_id
],
debug_show=self.DEBUG
)
def calculate_eta(self, sample_window_seconds, debug):
# Get the most recent timestamp from jobs
max_timestamp = self.execute(
proc='jobs.selects.get_max_job_submit_timestamp',
return_type='iter',
debug_show=self.DEBUG
).get_column_data('submit_timestamp')
if max_timestamp:
time_window = int(max_timestamp) - sample_window_seconds
eta_groups = self.execute(
proc='jobs.selects.get_eta_groups',
placeholders=[time_window],
key_column='signature',
return_type='dict',
debug_show=self.DEBUG
)
placeholders = []
submit_timestamp = int(time.time())
for signature in eta_groups:
running_samples = map(
lambda x: int(x or 0),
eta_groups[signature]['running_samples'].split(','))
running_median = self.get_median_from_sorted_list(
sorted(running_samples))
placeholders.append(
[
signature,
'running',
eta_groups[signature]['running_avg_sec'],
running_median,
eta_groups[signature]['running_min_sec'],
eta_groups[signature]['running_max_sec'],
eta_groups[signature]['running_std'],
len(running_samples),
submit_timestamp
])
self.execute(
proc='jobs.inserts.set_job_eta',
placeholders=placeholders,
executemany=True,
debug_show=self.DEBUG
)
def get_median_from_sorted_list(self, sorted_list):
length = len(sorted_list)
if length == 0:
return 0
# Cannot take the median with only on sample,
# return it
elif length == 1:
return sorted_list[0]
elif not length % 2:
return round(
(sorted_list[length / 2] + sorted_list[length / 2 - 1]) / 2, 0
)
return round(sorted_list[length / 2], 0)
def cycle_data(self, cycle_interval, chunk_size, sleep_time):
"""Delete data older than cycle_interval, splitting the target data
into chunks of chunk_size size. Returns the number of result sets deleted"""
jobs_max_timestamp = self._get_max_timestamp(cycle_interval)
# Retrieve list of result sets to delete
result_set_data = self.execute(
proc='jobs.selects.get_result_sets_to_cycle',
placeholders=[jobs_max_timestamp],
debug_show=self.DEBUG
)
if not result_set_data:
return 0
# group the result_set data in chunks
result_set_chunk_list = zip(*[iter(result_set_data)] * chunk_size)
# append the remaining result_set not fitting in a complete chunk
result_set_chunk_list.append(
result_set_data[-(len(result_set_data) % chunk_size):])
for result_set_chunks in result_set_chunk_list:
# Retrieve list of revisions associated with result sets
rs_placeholders = [x['id'] for x in result_set_chunks]
rs_where_in_clause = [','.join(['%s'] * len(rs_placeholders))]
revision_data = self.execute(
proc='jobs.selects.get_revision_ids_to_cycle',
placeholders=rs_placeholders,
replace=rs_where_in_clause,
debug_show=self.DEBUG
)
# Retrieve list of jobs associated with result sets
rev_placeholders = [x['revision_id'] for x in revision_data]
rev_where_in_clause = [','.join(['%s'] * len(rev_placeholders))]
job_data = self.execute(
proc='jobs.selects.get_jobs_to_cycle',
placeholders=rs_placeholders,
replace=rs_where_in_clause,
debug_show=self.DEBUG
)
job_guid_dict = dict((d['id'], d['job_guid']) for d in job_data)
job_where_in_clause = [','.join(['%s'] * len(job_guid_dict))]
# Associate placeholders and replace data with sql
jobs_targets = []
for proc in self.JOBS_CYCLE_TARGETS:
query_name = proc.split('.')[-1]
if query_name == 'cycle_revision':
jobs_targets.append({
"proc": proc,
"placeholders": rev_placeholders,
"replace": rev_where_in_clause
})
elif query_name == 'cycle_revision_map':
jobs_targets.append({
"proc": proc,
"placeholders": rs_placeholders,
"replace": rs_where_in_clause
})
elif query_name == 'cycle_result_set':
jobs_targets.append({
"proc": proc,
"placeholders": rs_placeholders,
"replace": rs_where_in_clause
})
else:
jobs_targets.append({
"proc": proc,
"placeholders": job_guid_dict.keys(),
"replace": job_where_in_clause
})
# remove data from specified jobs tables that is older than max_timestamp
self._execute_table_deletes(jobs_targets, 'jobs', sleep_time)
return len(result_set_data)
def _get_max_timestamp(self, cycle_interval):
max_date = datetime.now() - cycle_interval
return int(time.mktime(max_date.timetuple()))
def _execute_table_deletes(self, sql_to_execute, data_type, sleep_time):
for sql_obj in sql_to_execute:
if not sql_obj['placeholders']:
continue
sql_obj['debug_show'] = self.DEBUG
# Disable foreign key checks to improve performance
self.execute(
proc='generic.db_control.disable_foreign_key_checks',
debug_show=self.DEBUG)
self.execute(**sql_obj)
self.get_dhub().commit('master_host')
# Re-enable foreign key checks to improve performance
self.execute(
proc='generic.db_control.enable_foreign_key_checks',
debug_show=self.DEBUG)
if sleep_time:
# Allow some time for other queries to get through
time.sleep(sleep_time)
def get_bug_job_map_list(self, offset, limit, conditions=None):
"""
Retrieve a list of bug_job_map entries. The conditions parameter is a
dict containing a set of conditions for each key. e.g.:
{
'job_id': set([('IN', (1, 2))])
}
"""
replace_str, placeholders = self._process_conditions(
conditions, self.INDEXED_COLUMNS['bug_job_map']
)
repl = [replace_str]
proc = "jobs.selects.get_bug_job_map_list"
data = self.execute(
proc=proc,
replace=repl,
placeholders=placeholders,
limit=limit,
offset=offset,
debug_show=self.DEBUG,
)
return data
def get_result_set_ids(self, revision_hashes, where_in_list):
"""Return the a dictionary of revision_hash to id mappings given
a list of revision_hashes and a where_in_list.
revision_hashes = [ revision_hash1, revision_hash2, ... ]
where_in_list = [ %s, %s, %s ... ]
returns:
{
revision_hash1:{id: id1, push_timestamp: pt1},
revision_hash2:{id: id2, push_timestamp: pt2},
...
}
"""
result_set_id_lookup = {}
if revision_hashes:
result_set_id_lookup = self.execute(
proc='jobs.selects.get_result_set_ids',
placeholders=revision_hashes,
replace=[where_in_list],
debug_show=self.DEBUG,
key_column='revision_hash',
return_type='dict')
return result_set_id_lookup
def get_result_set_list_by_ids(self, result_set_ids):
conditions = {'id': set([('IN', tuple(result_set_ids))])}
replace_str, placeholders = self._process_conditions(
conditions, self.INDEXED_COLUMNS['result_set']
)
proc = "jobs.selects.get_result_set_list_by_ids"
result_set_ids = self.execute(
proc=proc,
replace=[replace_str],
placeholders=placeholders,
debug_show=self.DEBUG,
)
aggregate_details = self.get_result_set_details(result_set_ids)
return_list = self._merge_result_set_details(
result_set_ids, aggregate_details, True)
return return_list
def get_result_set_list(
self, offset_id, limit, full=True, conditions=None):
"""
Retrieve a list of ``result_sets`` (also known as ``pushes``)
If ``full`` is set to ``True`` then return revisions, too.
No jobs
Mainly used by the restful api to list the pushes in the UI
"""
replace_str, placeholders = self._process_conditions(
conditions, self.INDEXED_COLUMNS['result_set']
)
# If a push doesn't have jobs we can just
# message the user, it would save us a very expensive join
# with the jobs table.
# Retrieve the filtered/limited list of result sets
proc = "jobs.selects.get_result_set_list"
result_set_ids = self.execute(
proc=proc,
replace=[replace_str],
placeholders=placeholders,
limit=limit,
debug_show=self.DEBUG,
)
aggregate_details = self.get_result_set_details(result_set_ids)
return_list = self._merge_result_set_details(
result_set_ids, aggregate_details, full)
return return_list
def _merge_result_set_details(self, result_set_ids, aggregate_details, full):
# Construct the return dataset, include all revisions associated
# with each result_set in the revisions attribute
return_list = []
for result in result_set_ids:
detail = aggregate_details[result['id']][0]
list_item = {
"id": result['id'],
"revision_hash": result['revision_hash'],
"push_timestamp": result['push_timestamp'],
"repository_id": detail['repository_id'],
"revision": detail['revision'],
"author": result['author'] or detail['author'],
"revision_count": len(aggregate_details[result['id']])
}
# we only return the first 20 revisions.
if full:
list_item.update({
"comments": detail['comments'],
"revisions": aggregate_details[result['id']][:20]
})
return_list.append(list_item)
return return_list
def get_revision_resultset_lookup(self, revision_list):
"""
Create a list of revision->resultset lookups from a list of revision
This will retrieve non-active resultsets as well. Some of the data
ingested has mixed up revisions that show for jobs, but are not in
the right repository in builds4hr/running/pending. So we ingest those
bad resultsets/revisions as non-active so that we don't keep trying
to re-ingest them. Allowing this query to retrieve non ``active``
resultsets means we will avoid re-doing that work by detacting that
we've already ingested it.
But we skip ingesting the job, because the resultset is not active.
"""
replacement = ",".join(["%s"] * len(revision_list))
replacement = " AND revision IN (" + replacement + ") "
proc = "jobs.selects.get_revision_resultset_lookup"
lookups = self.execute(
proc=proc,
placeholders=revision_list + [0, len(revision_list)],
debug_show=self.DEBUG,
replace=[replacement],
return_type="dict",
key_column="revision"
)
return lookups
def get_resultset_revisions_list(self, result_set_id):
"""
Return the revisions for the given resultset
"""
proc = "jobs.selects.get_result_set_details"
lookups = self.execute(
proc=proc,
debug_show=self.DEBUG,
placeholders=[result_set_id],
replace=["%s"],
)
return lookups
def get_result_set_details(self, result_set_ids):
"""
Retrieve all revisions associated with a set of ``result_set``
(also known as ``pushes``) ids.
Mainly used by the restful api to list the pushes and their associated
revisions in the UI
"""
if not result_set_ids:
# No result sets provided
return {}
# Generate a list of result_set_ids
ids = []
id_placeholders = []
for data in result_set_ids:
id_placeholders.append('%s')
ids.append(data['id'])
where_in_clause = ','.join(id_placeholders)
# Retrieve revision details associated with each result_set_id
detail_proc = "jobs.selects.get_result_set_details"
result_set_details = self.execute(
proc=detail_proc,
placeholders=ids,
debug_show=self.DEBUG,
replace=[where_in_clause],
)
# Aggregate the revisions by result_set_id
aggregate_details = {}
for detail in result_set_details:
if detail['result_set_id'] not in aggregate_details:
aggregate_details[detail['result_set_id']] = []
aggregate_details[detail['result_set_id']].append(
{
'revision': detail['revision'],
'author': detail['author'],
'repository_id': detail['repository_id'],
'comments': detail['comments'],
'commit_timestamp': detail['commit_timestamp']
})
return aggregate_details
def get_oauth_consumer_secret(self, key):
"""Consumer secret for oauth"""
ds = self.get_datasource()
secret = ds.get_oauth_consumer_secret(key)
return secret
def store_job_data(self, data, raise_errors=False):
"""
Store JobData instances into jobs db
Example:
[
{
"revision_hash": "24fd64b8251fac5cf60b54a915bffa7e51f636b5",
"job": {
"job_guid": "d19375ce775f0dc166de01daa5d2e8a73a8e8ebf",
"name": "xpcshell",
"desc": "foo",
"job_symbol": "XP",
"group_name": "Shelliness",
"group_symbol": "XPC",
"product_name": "firefox",
"state": "TODO",
"result": 0,
"reason": "scheduler",
"who": "sendchange-unittest",
"submit_timestamp": 1365732271,
"start_timestamp": "20130411165317",
"end_timestamp": "1365733932"
"machine": "tst-linux64-ec2-314",
"build_url": "http://....",
"build_platform": {
"platform": "Ubuntu VM 12.04",
"os_name": "linux",
"architecture": "x86_64",
"vm": true
},
"machine_platform": {
"platform": "Ubuntu VM 12.04",
"os_name": "linux",
"architecture": "x86_64",
"vm": true
},
"option_collection": {
"opt": true
},
"log_references": [
{
"url": "http://ftp.mozilla.org/pub/...",
"name": "unittest"
}
],
artifacts:[{
type:" json | img | ...",
name:"",
log_urls:[
]
blob:""
}],
},
"coalesced": []
},
...
]
"""
# Ensure that we have job data to process
if not data:
return
# remove any existing jobs that already have the same state
data = self._remove_existing_jobs(data)
if not data:
return
# Structures supporting revision_hash SQL
revision_hash_lookup = set()
unique_revision_hashes = []
rh_where_in = []
# Structures supporting job SQL
job_placeholders = []
log_placeholders = []
artifact_placeholders = []
coalesced_job_guid_placeholders = []
retry_job_guids = []
async_error_summary_list = []
# get the tier-2 data signatures for this project.
# if there are none, then just return an empty list
tier_2_signatures = []
try:
tier_2 = ExclusionProfile.objects.get(name="Tier-2")
# tier_2_blob = json.loads(tier_2['flat_exclusion'])
tier_2_signatures = set(tier_2.flat_exclusion[self.project])
except KeyError:
# may be no tier 2 jobs for the current project
# and that's ok.
pass
except ObjectDoesNotExist:
# if this profile doesn't exist, then no second tier jobs
# and that's ok.
pass
for datum in data:
# Make sure we can deserialize the json object
# without raising an exception
try:
job = datum['job']
revision_hash = datum['revision_hash']
coalesced = datum.get('coalesced', [])
# TODO: Need a job structure validation step here. Now that
# everything works in list context we cannot detect what
# object is responsible for what error. If we validate here
# we can capture the error and associate it with the object
# and also skip it before generating any database errors.
except JobDataError as e:
if raise_errors:
raise e
continue
except Exception as e:
if raise_errors:
raise e
continue
try:
# json object can be successfully deserialized
# load reference data
job_guid = self._load_ref_and_job_data_structs(
job,
revision_hash,
revision_hash_lookup,
unique_revision_hashes,
rh_where_in,
job_placeholders,
log_placeholders,
artifact_placeholders,
retry_job_guids,
tier_2_signatures,
async_error_summary_list
)
for coalesced_guid in coalesced:
coalesced_job_guid_placeholders.append(
# coalesced to guid, coalesced guid
[job_guid, coalesced_guid]
)
except Exception as e:
if raise_errors:
raise e
# Store all reference data and retrieve associated ids
id_lookups = self.refdata_model.set_all_reference_data()
job_eta_times = self.get_job_eta_times(
id_lookups['reference_data_signatures']
)
# Store all revision hashes and retrieve result_set_ids
result_set_ids = self.get_result_set_ids(
unique_revision_hashes, rh_where_in
)
job_update_placeholders = []
job_guid_list = []
push_timestamps = {}
for index, job in enumerate(job_placeholders):
# Replace reference data with their associated ids
self._set_data_ids(
index,
job_placeholders,
id_lookups,
job_guid_list,
job_update_placeholders,
result_set_ids,
job_eta_times,
push_timestamps
)
job_id_lookup = self._load_jobs(job_placeholders, job_guid_list)
# For each of these ``retry_job_guids`` the job_id_lookup will
# either contain the retry guid, or the root guid (based on whether we
# inserted, or skipped insertion to do an update). So add in
# whichever is missing.
for retry_guid in retry_job_guids:
retry_guid_root = get_guid_root(retry_guid)
lookup_keys = job_id_lookup.keys()
if retry_guid in lookup_keys:
# this retry was inserted in the db at some point
if retry_guid_root not in lookup_keys:
# the root isn't there because there was, for some reason,
# never a pending/running version of this job
retry_job = job_id_lookup[retry_guid]
job_id_lookup[retry_guid_root] = retry_job
elif retry_guid_root in lookup_keys:
# if job_id_lookup contains the root, then the insert
# will have skipped, so we want to find that job
# when looking for the retry_guid for update later.
retry_job = job_id_lookup[retry_guid_root]
job_id_lookup[retry_guid] = retry_job
# Need to iterate over log references separately since they could
# be a different length. Replace job_guid with id in log url
# placeholders
# need also to retrieve the updated status to distinguish between
# failed and successful jobs
job_results = dict((el[0], el[9]) for el in job_update_placeholders)
self._load_log_urls(log_placeholders, job_id_lookup,
job_results)
with ArtifactsModel(self.project) as artifacts_model:
artifacts_model.load_job_artifacts(artifact_placeholders, job_id_lookup)
# schedule the generation of ``Bug suggestions`` artifacts
# asynchronously now that the jobs have been created
if async_error_summary_list:
populate_error_summary.apply_async(
args=[self.project, async_error_summary_list, job_id_lookup],
routing_key='error_summary'
)
# If there is already a job_id stored with pending/running status
# we need to update the information for the complete job
if job_update_placeholders:
# replace job_guid with job_id
for row in job_update_placeholders:
row[-1] = job_id_lookup[
get_guid_root(row[-1])
]['id']
self.execute(
proc='jobs.updates.update_job_data',
debug_show=self.DEBUG,
placeholders=job_update_placeholders,
executemany=True)
# set the job_coalesced_to_guid column for any coalesced
# job found
if coalesced_job_guid_placeholders:
self.execute(
proc='jobs.updates.update_coalesced_guids',
debug_show=self.DEBUG,
placeholders=coalesced_job_guid_placeholders,
executemany=True)
def _remove_existing_jobs(self, data):
"""
Remove jobs from data where we already have them in the same state.
1. split the incoming jobs into pending, running and complete.
2. fetch the ``job_guids`` from the db that are in the same state as they
are in ``data``.
3. build a new list of jobs in ``new_data`` that are not already in
the db and pass that back. It could end up empty at that point.
"""
states = {
'pending': [],
'running': [],
'completed': [],
}
data_idx = []
new_data = []
placeholders = []
state_clauses = []
for i, datum in enumerate(data):
try:
job = datum['job']
job_guid = str(job['job_guid'])
states[str(job['state'])].append(job_guid)
# index this place in the ``data`` object
data_idx.append(job_guid)
except Exception:
data_idx.append("skipped")
# it will get caught later in ``store_job_data``
# adding the guid as "skipped" will mean it won't be found
# in the returned list of dup guids from the db.
# This will cause the bad job to be re-added
# to ``new_data`` so that the error can be handled
# in ``store_job_data``.
for state, guids in states.items():
if guids:
placeholders.append(state)
placeholders.extend(guids)
state_clauses.append(
"(`state` = %s AND `job_guid` IN ({0}))".format(
",".join(["%s"] * len(guids))
)
)
replacement = ' OR '.join(state_clauses)
if placeholders:
existing_guids = self.execute(
proc='jobs.selects.get_job_guids_in_states',
placeholders=placeholders,
replace=[replacement],
key_column='job_guid',
return_type='set',
debug_show=self.DEBUG,
)
# build a new list of jobs without those we already have loaded
for i, guid in enumerate(data_idx):
if guid not in existing_guids:
new_data.append(data[i])
return new_data
def _load_ref_and_job_data_structs(
self, job, revision_hash, revision_hash_lookup,
unique_revision_hashes, rh_where_in, job_placeholders,
log_placeholders, artifact_placeholders, retry_job_guids,
tier_2_signatures, async_artifact_list
):
"""
Take the raw job object after etl and convert it to job_placeholders.
If the job is a ``retry`` the ``job_guid`` will have a special
suffix on it. But the matching ``pending``/``running`` job will not.
So we append the suffixed ``job_guid`` to ``retry_job_guids``
so that we can update the job_id_lookup later with the non-suffixed
``job_guid`` (root ``job_guid``). Then we can find the right
``pending``/``running`` job and update it with this ``retry`` job.
"""
# Store revision_hash to support SQL construction
# for result_set entry
if revision_hash not in revision_hash_lookup:
unique_revision_hashes.append(revision_hash)
rh_where_in.append('%s')
build_os_name = job.get(
'build_platform', {}).get('os_name', 'unknown')
build_platform = job.get(
'build_platform', {}).get('platform', 'unknown')
build_architecture = job.get(
'build_platform', {}).get('architecture', 'unknown')
build_platform_key = self.refdata_model.add_build_platform(
build_os_name, build_platform, build_architecture
)
machine_os_name = job.get(
'machine_platform', {}).get('os_name', 'unknown')
machine_platform = job.get(
'machine_platform', {}).get('platform', 'unknown')
machine_architecture = job.get(
'machine_platform', {}).get('architecture', 'unknown')
machine_platform_key = self.refdata_model.add_machine_platform(
machine_os_name, machine_platform, machine_architecture
)
option_collection_hash = self.refdata_model.add_option_collection(
job.get('option_collection', [])
)
machine = job.get('machine', 'unknown')
self.refdata_model.add_machine(
machine,
long(job.get("end_timestamp", time.time()))
)
device_name = job.get('device_name', 'unknown')
self.refdata_model.add_device(device_name)
job_type = job.get('name', 'unknown')
job_symbol = job.get('job_symbol', 'unknown')
group_name = job.get('group_name', 'unknown')
group_symbol = job.get('group_symbol', 'unknown')
job_type_key = self.refdata_model.add_job_type(
job_type, job_symbol, group_name, group_symbol
)
product = job.get('product_name', 'unknown')
if len(product.strip()) == 0:
product = 'unknown'
self.refdata_model.add_product(product)
job_guid = job['job_guid']
job_guid = job_guid[0:50]
who = job.get('who') or 'unknown'
who = who[0:50]
reason = job.get('reason') or 'unknown'
reason = reason[0:125]
state = job.get('state') or 'unknown'
state = state[0:25]
if job.get('result', 'unknown') == 'retry':
retry_job_guids.append(job_guid)
build_system_type = job.get('build_system_type', 'buildbot')
# Should be the buildername in the case of buildbot
reference_data_name = job.get('reference_data_name', None)
signature = self.refdata_model.add_reference_data_signature(
reference_data_name, build_system_type, self.project,
[build_system_type, self.project, build_os_name, build_platform, build_architecture,
machine_os_name, machine_platform, machine_architecture,
device_name, group_name, group_symbol, job_type, job_symbol,
option_collection_hash]
)
job_tier = job.get('tier') or 1
# job tier signatures override the setting from the job structure
tier = 2 if signature in tier_2_signatures else job_tier
job_placeholders.append([
job_guid,
signature,
None, # idx:2, job_coalesced_to_guid,
revision_hash, # idx:3, replace with result_set_id
build_platform_key, # idx:4, replace with build_platform_id
machine_platform_key, # idx:5, replace with machine_platform_id
machine, # idx:6, replace with machine_id
device_name, # idx:7, replace with device_id
option_collection_hash, # idx:8
job_type_key, # idx:9, replace with job_type_id
product, # idx:10, replace with product_id
who,
reason,
job.get('result', 'unknown'), # idx:13, this is typically an int
state,
self.get_number(job.get('submit_timestamp')),
self.get_number(job.get('start_timestamp')),
self.get_number(job.get('end_timestamp')),
0, # idx:18, replace with running_avg_sec
tier,
job_guid,
get_guid_root(job_guid) # will be the same except for ``retry`` jobs
])
artifacts = job.get('artifacts', [])
has_text_log_summary = False
if artifacts:
artifacts = ArtifactsModel.serialize_artifact_json_blobs(artifacts)
# the artifacts in this list could be ones that should have
# bug suggestions generated for them. If so, queue them to be
# scheduled for asynchronous generation.
tls_list = error_summary.get_artifacts_that_need_bug_suggestions(
artifacts)
async_artifact_list.extend(tls_list)
# need to add job guid to artifacts, since they likely weren't
# present in the beginning
for artifact in artifacts:
if not all(k in artifact for k in ("name", "type", "blob")):
raise JobDataError(
"Artifact missing properties: {}".format(artifact))
artifact_placeholder = artifact.copy()
artifact_placeholder['job_guid'] = job_guid
artifact_placeholders.append(artifact_placeholder)
has_text_log_summary = any(x for x in artifacts
if x['name'] == 'text_log_summary')
log_refs = job.get('log_references', [])
if log_refs:
for log in log_refs:
name = log.get('name') or 'unknown'
name = name[0:50]
url = log.get('url') or 'unknown'
url = url[0:255]
# this indicates that a summary artifact was submitted with
# this job that corresponds to the buildbot_text log url.
# Therefore, the log does not need parsing. So we should
# ensure that it's marked as already parsed.
if has_text_log_summary and name == 'buildbot_text':
parse_status = 'parsed'
else:
# the parsing status of this log. 'pending' or 'parsed'
parse_status = log.get('parse_status', 'pending')
log_placeholders.append([job_guid, name, url, parse_status])
return job_guid
def get_number(self, s):
try:
return long(s)
except (ValueError, TypeError):
return 0
def _set_data_ids(
self, index, job_placeholders, id_lookups,
job_guid_list, job_update_placeholders,
result_set_ids, job_eta_times, push_timestamps
):
"""
Supplant ref data with ids and create update placeholders
Pending jobs should be updated, rather than created.
``job_placeholders`` are used for creating new jobs.
``job_update_placeholders`` are used for updating existing non-complete
jobs
"""
# Replace reference data with their ids
job_guid = job_placeholders[index][
self.JOB_PH_JOB_GUID]
job_coalesced_to_guid = job_placeholders[index][
self.JOB_PH_COALESCED_TO_GUID]
revision_hash = job_placeholders[index][
self.JOB_PH_RESULT_SET_ID]
build_platform_key = job_placeholders[index][
self.JOB_PH_BUILD_PLATFORM_KEY]
machine_platform_key = job_placeholders[index][
self.JOB_PH_MACHINE_PLATFORM_KEY]
machine_name = job_placeholders[index][
self.JOB_PH_MACHINE_NAME]
device_name = job_placeholders[index][
self.JOB_PH_DEVICE_NAME]
option_collection_hash = job_placeholders[index][
self.JOB_PH_OPTION_COLLECTION_HASH]
job_type_key = job_placeholders[index][self.JOB_PH_TYPE_KEY]
product_type = job_placeholders[index][self.JOB_PH_PRODUCT_TYPE]
who = job_placeholders[index][self.JOB_PH_WHO]
reason = job_placeholders[index][self.JOB_PH_REASON]
result = job_placeholders[index][self.JOB_PH_RESULT]
job_state = job_placeholders[index][self.JOB_PH_STATE]
start_timestamp = job_placeholders[index][self.JOB_PH_START_TIMESTAMP]
end_timestamp = job_placeholders[index][self.JOB_PH_END_TIMESTAMP]
# Load job_placeholders
# replace revision_hash with id
result_set = result_set_ids[revision_hash]
job_placeholders[index][
self.JOB_PH_RESULT_SET_ID] = result_set['id']
push_timestamps[result_set['id']] = result_set['push_timestamp']
# replace build_platform_key with id
build_platform_id = id_lookups['build_platforms'][build_platform_key]['id']
job_placeholders[index][
self.JOB_PH_BUILD_PLATFORM_KEY] = build_platform_id
# replace machine_platform_key with id
machine_platform_id = id_lookups['machine_platforms'][machine_platform_key]['id']
job_placeholders[index][
self.JOB_PH_MACHINE_PLATFORM_KEY] = machine_platform_id
# replace machine with id
job_placeholders[index][
self.JOB_PH_MACHINE_NAME] = id_lookups['machines'][machine_name]['id']
job_placeholders[index][
self.JOB_PH_DEVICE_NAME] = id_lookups['devices'][device_name]['id']
# replace job_type with id
job_type_id = id_lookups['job_types'][job_type_key]['id']
job_placeholders[index][self.JOB_PH_TYPE_KEY] = job_type_id
# replace product_type with id
job_placeholders[index][
self.JOB_PH_PRODUCT_TYPE] = id_lookups['products'][product_type]['id']
job_guid_list.append(job_guid)
# for retry jobs, we may have a different job_guid than the root of job_guid
# because retry jobs append a suffix for uniqueness (since the job_guid
# won't be unique due to them all having the same request_id and request_time.
# But there may be a ``pending`` or ``running`` job that this retry
# should be updating, so make sure to add the root ``job_guid`` as well.
job_guid_root = get_guid_root(job_guid)
if job_guid != job_guid_root:
job_guid_list.append(job_guid_root)
reference_data_signature = job_placeholders[index][1]
running_avg_sec = job_eta_times.get(reference_data_signature, {}).get('running', 0)
job_placeholders[index][self.JOB_PH_RUNNING_AVG] = running_avg_sec
# Load job_update_placeholders
if job_state != 'pending':
job_update_placeholders.append([
job_guid,
job_coalesced_to_guid,
result_set_ids[revision_hash]['id'],
id_lookups['machines'][machine_name]['id'],
option_collection_hash,
id_lookups['job_types'][job_type_key]['id'],
id_lookups['products'][product_type]['id'],
who,
reason,
result,
job_state,
start_timestamp,
end_timestamp,
job_state,
get_guid_root(job_guid)
])
def _load_jobs(self, job_placeholders, job_guid_list):
if not job_placeholders:
return {}
# Store job data
self.execute(
proc='jobs.inserts.create_job_data',
debug_show=self.DEBUG,
placeholders=job_placeholders,
executemany=True)
return self.get_job_ids_by_guid(job_guid_list)
def get_job_eta_times(self, reference_data_signatures):
eta_lookup = {}
if len(reference_data_signatures) == 0:
return eta_lookup
rds_where_in_clause = ','.join(['%s'] * len(reference_data_signatures))
job_eta_data = self.execute(
proc='jobs.selects.get_last_eta_by_signatures',
debug_show=self.DEBUG,
replace=[rds_where_in_clause],
placeholders=reference_data_signatures)
for eta_data in job_eta_data:
signature = eta_data['signature']
state = eta_data['state']
if signature not in eta_lookup:
eta_lookup[signature] = {}
if state not in eta_lookup[signature]:
eta_lookup[signature][state] = {}
eta_lookup[signature][state] = eta_data['avg_sec']
return eta_lookup
def get_job_ids_by_guid(self, job_guid_list):
job_guid_where_in_clause = ",".join(["%s"] * len(job_guid_list))
job_id_lookup = self.execute(
proc='jobs.selects.get_job_ids_by_guids',
debug_show=self.DEBUG,
replace=[job_guid_where_in_clause],
placeholders=job_guid_list,
key_column='job_guid',
return_type='dict')
return job_id_lookup
def _load_log_urls(self, log_placeholders, job_id_lookup,
job_results):
# importing here to avoid an import loop
from treeherder.log_parser.tasks import parse_log, parse_json_log
tasks = []
result_sets = []
if log_placeholders:
for index, log_ref in enumerate(log_placeholders):
job_guid = log_ref[0]
job_id = job_id_lookup[job_guid]['id']
result = job_results[job_guid]
result_set_id = job_id_lookup[job_guid]['result_set_id']
result_sets.append(result_set_id)
# Replace job_guid with id
log_placeholders[index][0] = job_id
task = dict()
# a log can be submitted already parsed. So only schedule
# a parsing task if it's ``pending``
# the submitter is then responsible for submitting the
# text_log_summary artifact
if log_ref[3] == 'pending':
if log_ref[1] == 'mozlog_json':
# don't parse structured logs for passing tests
if result != 'success':
task['routing_key'] = 'parse_log.json'
else:
if result != 'success':
task['routing_key'] = 'parse_log.failures'
else:
task['routing_key'] = 'parse_log.success'
if 'routing_key' in task:
task['job_guid'] = job_guid
task['log_url'] = log_ref[2]
task['result_set_id'] = result_set_id
tasks.append(task)
# Store the log references
self.execute(
proc='jobs.inserts.set_job_log_url',
debug_show=self.DEBUG,
placeholders=log_placeholders,
executemany=True)
# I need to find the jog_log_url ids
# just inserted but there's no unique key.
# Also, the url column is not indexed, so it's
# not a good idea to search based on that.
# I'm gonna retrieve the logs by job ids and then
# use their url to create a map.
job_ids = [j["id"] for j in job_id_lookup.values()]
job_log_url_list = self.get_job_log_url_list(job_ids)
log_url_lookup = dict([(jlu['url'], jlu)
for jlu in job_log_url_list])
for task in tasks:
parse_log_task = parse_log
if task['routing_key'] == "parse_log.json":
parse_log_task = parse_json_log
parse_log_task.apply_async(
args=[
self.project,
log_url_lookup[task['log_url']],
task['job_guid'],
],
routing_key=task['routing_key']
)
def get_job_log_url_detail(self, job_log_url_id):
obj = self.execute(
proc='jobs.selects.get_job_log_url_detail',
debug_show=self.DEBUG,
placeholders=[job_log_url_id])
if len(obj) == 0:
raise ObjectNotFoundException("job_log_url", id=job_log_url_id)
return obj[0]
def get_job_log_url_list(self, job_ids):
"""
Return a list of logs belonging to the given job_id(s).
"""
if len(job_ids) == 0:
return []
replacement = []
id_placeholders = ["%s"] * len(job_ids)
replacement.append(','.join(id_placeholders))
data = self.execute(
proc="jobs.selects.get_job_log_url_list",
placeholders=job_ids,
replace=replacement,
debug_show=self.DEBUG,
)
return data
def update_job_log_url_status(self, job_log_url_id, parse_status):
self.execute(
proc='jobs.updates.update_job_log_url',
debug_show=self.DEBUG,
placeholders=[parse_status, job_log_url_id])
def get_performance_series_from_signatures(self, signatures, interval_seconds):
repl = [','.join(['%s'] * len(signatures))]
placeholders = signatures
placeholders.append(str(interval_seconds))
data = self.execute(
proc="jobs.selects.get_performance_series_from_signatures",
debug_show=self.DEBUG,
placeholders=placeholders,
replace=repl)
data = [{"series_signature": x["series_signature"],
"blob": json.loads(utils.decompress_if_needed(x["blob"]))} for x in data]
return data
def get_signatures_from_properties(self, props):
props_where_repl = [
' OR '.join(['(`property`=%s AND `value`=%s)'] * len(props)),
' AND '.join(['COALESCE(SUM(`property`=%s AND `value`=%s), 0) > 0'] * len(props))]
# convert to 1 dimensional list
props = [el for x in props.items() for el in x]
props.extend(props)
signatures = self.execute(
proc="jobs.selects.get_signatures_from_properties",
debug_show=self.DEBUG,
placeholders=props,
replace=props_where_repl)
if not signatures:
return {"success": False}
signatures = [x.get("signature") for x in signatures]
signatures_repl = [','.join(['%s'] * len(signatures))]
properties = self.execute(
proc="jobs.selects.get_all_properties_of_signatures",
debug_show=self.DEBUG,
placeholders=signatures,
replace=signatures_repl)
ret = {}
for d in properties:
sig = d["signature"]
ret[sig] = ret[sig] if sig in ret else {}
ret[sig][d["property"]] = d["value"]
return ret
def get_signature_properties(self, signatures):
signatures_repl = [','.join(['%s'] * len(signatures))]
properties = self.execute(
proc="jobs.selects.get_all_properties_of_signatures",
debug_show=self.DEBUG,
placeholders=signatures,
replace=signatures_repl)
sigdict = {}
for property in properties:
signature = property['signature']
if not sigdict.get(signature):
sigdict[signature] = {}
(key, val) = (property['property'], property['value'])
if key in self.PERFORMANCE_SERIES_JSON_KEYS:
val = json.loads(val)
sigdict[signature][key] = val
ret = []
for signature in signatures:
if not sigdict.get(signature):
return ObjectNotFoundException("signature", id=signature)
ret.append(sigdict[signature])
return ret
def set_series_signature(self, signature_hash, signature_props):
signature_property_placeholders = []
for (k, v) in signature_props.iteritems():
if not isinstance(v, basestring):
v = json.dumps(v)
signature_property_placeholders.append([
str(signature_hash), str(k), str(v),
str(signature_hash), str(k), str(v),
])
self.execute(
proc='jobs.inserts.set_series_signature',
debug_show=self.DEBUG,
placeholders=signature_property_placeholders,
executemany=True)
def store_performance_series(
self, t_range, series_type, signature, series_data):
# Use MySQL GETLOCK function to guard against concurrent celery tasks
# overwriting each other's blobs. The lock incorporates the time
# interval and signature combination and is specific to a single
# json blob.
lock_string = "sps_{0}_{1}_{2}".format(
t_range, series_type, signature)
lock_timeout = settings.PERFHERDER_UPDATE_SERIES_LOCK_TIMEOUT
# first, wait for lock to become free
started = time.time()
while time.time() < (started + lock_timeout):
is_lock_free = bool(self.execute(
proc='generic.locks.is_free_lock',
debug_show=self.DEBUG,
placeholders=[lock_string])[0]['lock'])
if is_lock_free:
break
time.sleep(0.1)
if not is_lock_free:
logger.error(
'store_performance_series lock_string, '
'{0}, timed out!'.format(lock_string)
)
return
# now, acquire the lock
self.execute(
proc='generic.locks.get_lock',
debug_show=self.DEBUG,
placeholders=[lock_string])
try:
now_timestamp = int(time.time())
# If we don't have this t_range/signature combination create it
series_data_json = json.dumps(series_data)
insert_placeholders = [
t_range, signature,
series_type,
now_timestamp,
zlib.compress(series_data_json),
t_range,
signature,
]
self.execute(
proc='jobs.inserts.set_performance_series',
debug_show=self.DEBUG,
placeholders=insert_placeholders)
# delete any previous instance of the cached copy of the perf
# series summary, since it's now out of date
cache.delete(self.get_performance_series_cache_key(self.project,
t_range))
# Retrieve and update the series
performance_series = self.execute(
proc='jobs.selects.get_performance_series',
debug_show=self.DEBUG,
placeholders=[t_range, signature])
db_series_json = utils.decompress_if_needed(performance_series[0]['blob'])
# If they're equal this was the first time the t_range
# and signature combination was stored, so there's nothing to
# do
if series_data_json != db_series_json:
series = json.loads(db_series_json)
series.extend(series_data)
# expire any entries which are too old
push_timestamp_limit = now_timestamp - int(t_range)
series = filter(
lambda d: d['push_timestamp'] >= push_timestamp_limit,
series
)
if series:
# in case the same data was submitted to be added to the
# db twice (with our setup as of 2015/07, this can happen
# if we parse the same talos log more than once), remove any
# duplicate entries.
# technique from: http://stackoverflow.com/a/9427216
series = [dict(t) for t in set([tuple(sorted(d.items())) for d in
series])]
# sort the series by result set id
series = sorted(
series, key=itemgetter('result_set_id'),
)
update_placeholders = [
now_timestamp,
zlib.compress(json.dumps(series)),
t_range,
signature,
]
self.execute(
proc='jobs.updates.update_performance_series',
debug_show=self.DEBUG,
placeholders=update_placeholders)
except Exception as e:
raise e
finally:
# Make sure we release the lock no matter what errors
# are generated
self.execute(
proc='generic.locks.release_lock',
debug_show=self.DEBUG,
placeholders=[lock_string])
def _get_last_insert_id(self):
"""Return last-inserted ID."""
return self.get_dhub().execute(
proc='generic.selects.get_last_insert_id',
debug_show=self.DEBUG,
return_type='iter',
).get_column_data('id')
def store_result_set_data(self, result_sets):
"""
Build single queries to add new result_sets, revisions, and
revision_map for a list of result_sets.
result_sets = [
{
"revision_hash": "8afdb7debc82a8b6e0d56449dfdf916c77a7bf80",
"push_timestamp": 1378293517,
"author": "some-sheriff@mozilla.com",
"revisions": [
{
"comment": "Bug 911954 - Add forward declaration of JSScript to TraceLogging.h, r=h4writer",
"repository": "test_treeherder",
"author": "John Doe <jdoe@mozilla.com>",
"branch": "default",
"revision": "2c25d2bbbcd6"
},
...
]
},
...
]
returns = {
}
"""
if not result_sets:
return {}
# result_set data structures
revision_hash_placeholders = []
unique_revision_hashes = []
where_in_list = []
# revision data structures
repository_id_lookup = dict()
revision_placeholders = []
all_revisions = []
rev_where_in_list = []
# revision_map structures
revision_to_rhash_lookup = dict()
# TODO: Confirm whether we need to do a lookup in this loop in the
# memcache to reduce query overhead
for result in result_sets:
revision_hash_placeholders.append(
[
result.get('author', 'unknown@somewhere.com'),
result['revision_hash'],
result['push_timestamp'],
result.get('active_status', 'active'),
result['revision_hash']
]
)
where_in_list.append('%s')
unique_revision_hashes.append(result['revision_hash'])
for rev_datum in result['revisions']:
# Retrieve the associated repository id just once
# and provide handling for multiple repositories
if rev_datum['repository'] not in repository_id_lookup:
repository_id = self.refdata_model.get_repository_id(
rev_datum['repository']
)
repository_id_lookup[rev_datum['repository']] = repository_id
# We may not have a commit timestamp in the push data
commit_timestamp = rev_datum.get(
'commit_timestamp', None
)
# We may not have a comment in the push data
comment = rev_datum.get(
'comment', None
)
repository_id = repository_id_lookup[rev_datum['repository']]
revision_placeholders.append(
[rev_datum['revision'],
rev_datum['author'],
comment,
commit_timestamp,
repository_id,
rev_datum['revision'],
repository_id]
)
all_revisions.append(rev_datum['revision'])
rev_where_in_list.append('%s')
revision_to_rhash_lookup[rev_datum['revision']] = result['revision_hash']
# Retrieve a list of revision_hashes that have already been stored
# in the list of unique_revision_hashes. Use it to determine the new
# result_sets found to publish to pulse.
where_in_clause = ','.join(where_in_list)
result_set_ids_before = self.execute(
proc='jobs.selects.get_result_set_ids',
placeholders=unique_revision_hashes,
replace=[where_in_clause],
key_column='revision_hash',
return_type='set',
debug_show=self.DEBUG
)
# Insert new result sets
self.execute(
proc='jobs.inserts.set_result_set',
placeholders=revision_hash_placeholders,
executemany=True,
debug_show=self.DEBUG
)
lastrowid = self.get_dhub().connection['master_host']['cursor'].lastrowid
# Retrieve new and already existing result set ids
result_set_id_lookup = self.execute(
proc='jobs.selects.get_result_set_ids',
placeholders=unique_revision_hashes,
replace=[where_in_clause],
key_column='revision_hash',
return_type='dict',
debug_show=self.DEBUG
)
# identify the newly inserted result sets
result_set_ids_after = set(result_set_id_lookup.keys())
inserted_result_sets = result_set_ids_after.difference(
result_set_ids_before
)
inserted_result_set_ids = []
# If cursor.lastrowid is > 0 rows were inserted on this
# cursor. When new rows are inserted, determine the new
# result_set ids and submit publish to pulse tasks.
if inserted_result_sets and lastrowid > 0:
for revision_hash in inserted_result_sets:
inserted_result_set_ids.append(
result_set_id_lookup[revision_hash]['id']
)
# Insert new revisions
self.execute(
proc='jobs.inserts.set_revision',
placeholders=revision_placeholders,
executemany=True,
debug_show=self.DEBUG
)
# Retrieve new revision ids
rev_where_in_clause = ','.join(rev_where_in_list)
revision_id_lookup = self.execute(
proc='jobs.selects.get_revisions',
placeholders=all_revisions,
replace=[rev_where_in_clause],
key_column='revision',
return_type='dict',
debug_show=self.DEBUG
)
# Build placeholders for revision_map
revision_map_placeholders = []
for revision in revision_id_lookup:
revision_hash = revision_to_rhash_lookup[revision]
revision_id = revision_id_lookup[revision]['id']
result_set_id = result_set_id_lookup[revision_hash]['id']
revision_map_placeholders.append(
[revision_id,
result_set_id,
revision_id,
result_set_id]
)
# Insert new revision_map entries
self.execute(
proc='jobs.inserts.set_revision_map',
placeholders=revision_map_placeholders,
executemany=True,
debug_show=self.DEBUG
)
if len(inserted_result_set_ids) > 0:
# Queue an event to notify pulse of these new resultsets
publish_resultset.apply_async(
args=[self.project, inserted_result_set_ids],
routing_key='publish_to_pulse'
)
return {
'result_set_ids': result_set_id_lookup,
'revision_ids': revision_id_lookup,
'inserted_result_set_ids': inserted_result_set_ids
}
def get_revision_timestamp(self, rev):
"""Get the push timestamp of the resultset for a revision"""
return self.get_revision_resultset_lookup([rev])[rev][
"push_timestamp"
]
def get_exclusion_profile_signatures(self, exclusion_profile):
"""Retrieve the reference data signatures associates to an exclusion profile"""
signatures = []
try:
if exclusion_profile == "default":
profile = ExclusionProfile.objects.get(
is_default=True
)
else:
profile = ExclusionProfile.objects.get(
name=exclusion_profile
)
signatures = profile.flat_exclusion[self.project]
except KeyError:
# this repo/project has no hidden signatures
pass
except ExclusionProfile.DoesNotExist:
# Either there's no default profile setup or the profile
# specified is not availble
pass
return signatures
def get_resultset_status(self, resultset_id, exclusion_profile="default"):
"""Retrieve an aggregated job count for the given resultset.
If an exclusion profile is provided, the job counted will be filtered accordingly"""
replace = []
placeholders = [resultset_id]
if exclusion_profile:
signature_list = self.get_exclusion_profile_signatures(exclusion_profile)
if signature_list:
signatures_replacement = ",".join(["%s"] * len(signature_list))
replace.append(
"AND signature NOT IN ({0})".format(signatures_replacement)
)
placeholders += signature_list
resulset_status_list = self.execute(
proc='jobs.selects.get_resultset_status',
placeholders=placeholders,
replace=replace,
debug_show=self.DEBUG)
num_coalesced = 0
resultset_status_dict = {}
for rs in resulset_status_list:
num_coalesced += rs['num_coalesced'] if rs['num_coalesced'] else 0
if rs['state'] == 'completed':
resultset_status_dict[rs['result']] = int(rs['total']) - rs['num_coalesced']
else:
resultset_status_dict[rs['state']] = int(rs['total'])
if num_coalesced:
resultset_status_dict['coalesced'] = num_coalesced
return resultset_status_dict
class JobDataError(ValueError):
pass
class JobDataIntegrityError(IntegrityError):
pass
class JobData(dict):
"""
Encapsulates data access from incoming test data structure.
All missing-data errors raise ``JobDataError`` with a useful
message. Unlike regular nested dictionaries, ``JobData`` keeps track of
context, so errors contain not only the name of the immediately-missing
key, but the full parent-key context as well.
"""
def __init__(self, data, context=None):
"""Initialize ``JobData`` with a data dict and a context list."""
self.context = context or []
super(JobData, self).__init__(data)
@classmethod
def from_json(cls, json_blob):
"""Create ``JobData`` from a JSON string."""
try:
data = json.loads(json_blob)
except ValueError as e:
raise JobDataError("Malformed JSON: {0}".format(e))
return cls(data)
def __getitem__(self, name):
"""Get a data value, raising ``JobDataError`` if missing."""
full_context = list(self.context) + [name]
try:
value = super(JobData, self).__getitem__(name)
except KeyError:
raise JobDataError("Missing data: {0}.".format(
"".join(["['{0}']".format(c) for c in full_context])))
# Provide the same behavior recursively to nested dictionaries.
if isinstance(value, dict):
value = self.__class__(value, full_context)
return value
| vaishalitekale/treeherder | treeherder/model/derived/jobs.py | Python | mpl-2.0 | 83,969 | 0.000488 |
CSRF_ENABLED = True
SECRET_KEY = 'this-is-a-secret'
| xiaowing/tinysso | config.py | Python | apache-2.0 | 54 | 0 |
from temboo.Library.Yelp.SearchByAddress import SearchByAddress, SearchByAddressInputSet, SearchByAddressResultSet, SearchByAddressChoreographyExecution
from temboo.Library.Yelp.SearchByBoundingBox import SearchByBoundingBox, SearchByBoundingBoxInputSet, SearchByBoundingBoxResultSet, SearchByBoundingBoxChoreographyExecution
from temboo.Library.Yelp.SearchByCategory import SearchByCategory, SearchByCategoryInputSet, SearchByCategoryResultSet, SearchByCategoryChoreographyExecution
from temboo.Library.Yelp.SearchByCity import SearchByCity, SearchByCityInputSet, SearchByCityResultSet, SearchByCityChoreographyExecution
from temboo.Library.Yelp.SearchByCoordinates import SearchByCoordinates, SearchByCoordinatesInputSet, SearchByCoordinatesResultSet, SearchByCoordinatesChoreographyExecution
from temboo.Library.Yelp.SearchByNeighborhood import SearchByNeighborhood, SearchByNeighborhoodInputSet, SearchByNeighborhoodResultSet, SearchByNeighborhoodChoreographyExecution
from temboo.Library.Yelp.SearchForBusiness import SearchForBusiness, SearchForBusinessInputSet, SearchForBusinessResultSet, SearchForBusinessChoreographyExecution
from temboo.Library.Yelp.SearchForBusinessesWithDeals import SearchForBusinessesWithDeals, SearchForBusinessesWithDealsInputSet, SearchForBusinessesWithDealsResultSet, SearchForBusinessesWithDealsChoreographyExecution
| jordanemedlock/psychtruths | temboo/core/Library/Yelp/__init__.py | Python | apache-2.0 | 1,354 | 0.005908 |
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("SGDClassifier" , "FourClass_500" , "mysql")
| antoinecarme/sklearn2sql_heroku | tests/classification/FourClass_500/ws_FourClass_500_SGDClassifier_mysql_code_gen.py | Python | bsd-3-clause | 141 | 0.014184 |
# Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.
# See LICENSE in the project root for license information.
from iris.custom_import import import_custom_module
import logging
logger = logging.getLogger(__name__)
class IrisRoleLookupException(Exception):
pass
def get_role_lookups(config):
modules = config.get('role_lookups', [])
# default to only support user and mailing_list.
if not modules:
modules = ['user', 'mailing_list']
imported_modules = []
for m in modules:
try:
imported_modules.append(
import_custom_module('iris.role_lookup', m)(config))
logger.info('Loaded lookup modules: %s', m)
except Exception:
logger.exception('Failed to load role lookup module: %s', m)
return imported_modules
| dwang159/iris-api | src/iris/role_lookup/__init__.py | Python | bsd-2-clause | 863 | 0.001159 |
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2015-2016 The Bitcoin Unlimited developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
class AbandonConflictTest(BitcoinTestFramework):
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.00001"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-logtimemicros"]))
connect_nodes(self.nodes[0], 1)
def run_test(self):
self.nodes[1].generate(100)
sync_blocks(self.nodes)
balance = self.nodes[0].getbalance()
txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
sync_mempools(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
newbalance = self.nodes[0].getbalance()
assert(balance - newbalance < Decimal("0.001")) #no more than fees lost
balance = newbalance
url = urlparse.urlparse(self.nodes[1].url)
self.nodes[0].disconnectnode(url.hostname+":"+str(p2p_port(1)))
# Identify the 10btc outputs
nA = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txA, 1)["vout"]) if vout["value"] == Decimal("10"))
nB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txB, 1)["vout"]) if vout["value"] == Decimal("10"))
nC = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txC, 1)["vout"]) if vout["value"] == Decimal("10"))
inputs =[]
# spend 10btc outputs from txA and txB
inputs.append({"txid":txA, "vout":nA})
inputs.append({"txid":txB, "vout":nB})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998")
outputs[self.nodes[1].getnewaddress()] = Decimal("5")
signed = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txAB1 = self.nodes[0].sendrawtransaction(signed["hex"])
# Identify the 14.99998btc output
nAB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txAB1, 1)["vout"]) if vout["value"] == Decimal("14.99998"))
#Create a child tx spending AB1 and C
inputs = []
inputs.append({"txid":txAB1, "vout":nAB})
inputs.append({"txid":txC, "vout":nC})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996")
signed2 = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"])
# In mempool txs from self should increase balance from change
newbalance = self.nodes[0].getbalance()
assert(newbalance == balance - Decimal("30") + Decimal("24.9996"))
balance = newbalance
# Restart the node with a higher min relay fee so the parent tx is no longer in mempool
# TODO: redo with eviction
# Note had to make sure tx did not have AllowFree priority
stop_node(self.nodes[0],0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.0001"])
# Verify txs no longer in mempool
assert(len(self.nodes[0].getrawmempool()) == 0)
# Not in mempool txs from self should only reduce balance
# inputs are still spent, but change not received
newbalance = self.nodes[0].getbalance()
assert(newbalance == balance - Decimal("24.9996"))
# Unconfirmed received funds that are not in mempool, also shouldn't show
# up in unconfirmed balance
unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance()
assert(unconfbalance == newbalance)
# Also shouldn't show up in listunspent
assert(not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)])
balance = newbalance
# Abandon original transaction and verify inputs are available again
# including that the child tx was also abandoned
self.nodes[0].abandontransaction(txAB1)
newbalance = self.nodes[0].getbalance()
assert(newbalance == balance + Decimal("30"))
balance = newbalance
# Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
stop_node(self.nodes[0],0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.00001"])
assert(len(self.nodes[0].getrawmempool()) == 0)
assert(self.nodes[0].getbalance() == balance)
# But if its received again then it is unabandoned
# And since now in mempool, the change is available
# But its child tx remains abandoned
self.nodes[0].sendrawtransaction(signed["hex"])
newbalance = self.nodes[0].getbalance()
assert(newbalance == balance - Decimal("20") + Decimal("14.99998"))
balance = newbalance
# Send child tx again so its unabandoned
self.nodes[0].sendrawtransaction(signed2["hex"])
newbalance = self.nodes[0].getbalance()
assert(newbalance == balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
balance = newbalance
# Remove using high relay fee again
stop_node(self.nodes[0],0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.0001"])
assert(len(self.nodes[0].getrawmempool()) == 0)
newbalance = self.nodes[0].getbalance()
assert(newbalance == balance - Decimal("24.9996"))
balance = newbalance
# Create a double spend of AB1 by spending again from only A's 10 output
# Mine double spend from node 1
inputs =[]
inputs.append({"txid":txA, "vout":nA})
outputs = {}
outputs[self.nodes[1].getnewaddress()] = Decimal("9.9999")
tx = self.nodes[0].createrawtransaction(inputs, outputs)
signed = self.nodes[0].signrawtransaction(tx)
self.nodes[1].sendrawtransaction(signed["hex"])
self.nodes[1].generate(1)
connect_nodes(self.nodes[0], 1)
sync_blocks(self.nodes)
# Verify that B and C's 10 BTC outputs are available for spending again because AB1 is now conflicted
newbalance = self.nodes[0].getbalance()
assert(newbalance == balance + Decimal("20"))
balance = newbalance
# There is currently a minor bug around this and so this test doesn't work. See Issue #7315
# Invalidate the block with the double spend and B's 10 BTC output should no longer be available
# Don't think C's should either
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
newbalance = self.nodes[0].getbalance()
#assert(newbalance == balance - Decimal("10"))
print "If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer"
print "conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315"
print balance , " -> " , newbalance , " ?"
if __name__ == '__main__':
AbandonConflictTest().main()
| SartoNess/BitcoinUnlimited | qa/rpc-tests/abandonconflict.py | Python | mit | 7,686 | 0.008197 |
from bioscrape.inference import DeterministicLikelihood as DLL
from bioscrape.inference import StochasticTrajectoriesLikelihood as STLL
from bioscrape.inference import StochasticTrajectories
from bioscrape.inference import BulkData
import warnings
import numpy as np
class PIDInterface():
'''
PID Interface : Parameter identification interface.
Super class to create parameter identification (PID) interfaces. Two PID interfaces currently implemented:
Deterministic and Stochastic inference using time-series data.
To add a new PIDInterface - simply add a new subclass of this parent class with your desired
log-likelihood functions. You can even have your own check_prior function in that class if you do not
prefer to use the built in priors with this package.
'''
def __init__(self, params_to_estimate, M, prior):
'''
Parent class for all PID interfaces.
Arguments:
* `params_to_estimate` : List of parameter names to be estimated
* `M` : The bioscrape Model object to use for inference
* `prior` : A dictionary specifying prior distribution.
Two built-in prior functions are `uniform_prior` and `gaussian_prior`.
Each prior has its own syntax for accepting the distribution parameters in the dictionary.
New priors may be added. The suggested format for prior dictionaries:
prior_dict = {'parameter_name': ['prior_name', prior_distribution_parameters]}
For built-in uniform prior, use {'parameter_name':['uniform', lower_bound, upper_bound]}
For built-in gaussian prior, use {'parameter_name':['gaussian', mean, standard_deviation, probability threshold]}
New PID interfaces can be added by creating child classes of PIDInterface class as shown for
Built-in PID interfaces : `StochasticInference` and `DeterministicInference`
'''
self.params_to_estimate = params_to_estimate
self.M = M
self.prior = prior
return
def check_prior(self, params_dict):
'''
To add new prior functions: simply add a new function similar to ones that exist and then
call it here.
'''
lp = 0.0
for key,value in params_dict.items():
if 'positive' in self.prior[key] and value < 0:
return np.inf
prior_type = self.prior[key][0]
if prior_type == 'uniform':
lp += self.uniform_prior(key, value)
elif prior_type == 'gaussian':
lp += self.gaussian_prior(key, value)
elif prior_type == 'exponential':
lp += self.exponential_prior(key, value)
elif prior_type == 'gamma':
lp += self.gamma_prior(key, value)
elif prior_type == 'log-uniform':
lp += self.log_uniform_prior(key, value)
elif prior_type == 'log-gaussian':
lp += self.log_gaussian_prior(key, value)
elif prior_type == 'beta':
lp += self.beta_prior(key, value)
elif prior_type == 'custom':
# The last element in the prior dictionary must be a callable function
# The callable function shoud have the following signature :
# Arguments: param_name (str), param_value(float)
# Returns: log prior probability (float or numpy inf)
custom_fuction = self.prior[key][-1]
lp += custom_fuction(key, value)
else:
raise ValueError('Prior type undefined.')
return lp
def uniform_prior(self, param_name, param_value):
'''
Check if given param_value is valid according to the prior distribution.
Returns np.Inf if the param_value is outside the prior range and 0.0 if it is inside.
param_name is used to look for the parameter in the prior dictionary.
'''
prior_dict = self.prior
if prior_dict is None:
raise ValueError('No prior found')
lower_bound = prior_dict[param_name][1]
upper_bound = prior_dict[param_name][2]
if param_value > upper_bound or param_value < lower_bound:
return np.inf
else:
return np.log( 1/(upper_bound - lower_bound) )
def gaussian_prior(self, param_name, param_value):
'''
Check if given param_value is valid according to the prior distribution.
Returns the log prior probability or np.Inf if the param_value is invalid.
'''
prior_dict = self.prior
if prior_dict is None:
raise ValueError('No prior found')
mu = prior_dict[param_name][1]
sigma = prior_dict[param_name][2]
if sigma < 0:
raise ValueError('The standard deviation must be positive.')
# Using probability density function for normal distribution
# Using scipy.stats.norm has overhead that affects speed up to 2x
prob = 1/(np.sqrt(2*np.pi) * sigma) * np.exp(-0.5*(param_value - mu)**2/sigma**2)
if prob < 0:
warnings.warn('Probability less than 0 while checking Gaussian prior! Current parameter name and value: {0}:{1}.'.format(param_name, param_value))
return np.inf
else:
return np.log(prob)
def exponential_prior(self, param_name, param_value):
'''
Check if given param_value is valid according to the prior distribution.
Returns the log prior probability or np.inf if the param_value is invalid.
'''
prior_dict = self.prior
if prior_dict is None:
raise ValueError('No prior found')
lambda_p = prior_dict[param_name][1]
prob = lambda_p * np.exp(-lambda_p * param_value)
if prob < 0:
warnings.warn('Probability less than 0 while checking Exponential prior! Current parameter name and value: {0}:{1}.'.format(param_name, param_value))
return np.inf
else:
return np.log(prob)
def gamma_prior(self, param_name, param_value):
'''
Check if given param_value is valid according to the prior distribution.
Returns the log prior probability or np.inf if the param_value is invalid.
'''
prior_dict = self.prior
if prior_dict is None:
raise ValueError('No prior found')
alpha = prior_dict[param_name][1]
beta = prior_dict[param_name][2]
from scipy.special import gamma
prob = (beta**alpha)/gamma(alpha) * param_value**(alpha - 1) * np.exp(-1 * beta*param_value)
if prob < 0:
warnings.warn('Probability less than 0 while checking Exponential prior! Current parameter name and value: {0}:{1}.'.format(param_name, param_value))
return np.inf
else:
return np.log(prob)
def beta_prior(self, param_name, param_value):
'''
Check if given param_value is valid according to the prior distribution.
Returns the log prior probability or np.inf if the param_value is invalid.
'''
prior_dict = self.prior
if prior_dict is None:
raise ValueError('No prior found')
alpha = prior_dict[param_name][1]
beta = prior_dict[param_name][2]
import scipy.special.beta as beta_func
prob = (param_value**(alpha-1) * (1 - param_value)**(beta - 1) )/beta_func(alpha, beta)
if prob < 0:
warnings.warn('Probability less than 0 while checking Exponential prior! Current parameter name and value: {0}:{1}.'.format(param_name, param_value))
return np.inf
else:
return np.log(prob)
def log_uniform_prior(self, param_name, param_value):
'''
Check if given param_value is valid according to the prior distribution.
Returns the log prior probability or np.inf if the param_value is invalid.
'''
prior_dict = self.prior
if prior_dict is None:
raise ValueError('No prior found')
lower_bound = prior_dict[param_name][1]
upper_bound = prior_dict[param_name][2]
if lower_bound < 0 or upper_bound < 0:
raise ValueError('Upper and lower bounds for log-uniform prior must be positive.')
if param_value > upper_bound or param_value < lower_bound:
return np.inf
prob = 1/(param_value* (np.log(upper_bound) - np.log(lower_bound)))
if prob < 0:
warnings.warn('Probability less than 0 while checking Log-Uniform prior! Current parameter name and value: {0}:{1}.'.format(param_name, param_value))
return np.inf
else:
return np.log(prob)
def log_gaussian_prior(self, param_name, param_value):
'''
Check if given param_value is valid according to the prior distribution.
Returns the log prior probability or np.inf if the param_value is invalid.
'''
prior_dict = self.prior
if prior_dict is None:
raise ValueError('No prior found')
mu = prior_dict[param_name][1]
sigma = prior_dict[param_name][2]
if sigma < 0:
raise ValueError('The standard deviation must be positive.')
# Using probability density function for log-normal distribution
prob = 1/(param_value * np.sqrt(2*np.pi) * sigma) * np.exp((-0.5 * (np.log(param_value) - mu)**2)/sigma**2)
if prob < 0:
warnings.warn('Probability less than 0 while checking log-normal prior! Current parameter name and value: {0}:{1}.'.format(param_name, param_value))
return np.inf
else:
return np.log(prob)
# Add a new class similar to this to create new interfaces.
class StochasticInference(PIDInterface):
def __init__(self, params_to_estimate, M, prior):
self.LL_stoch = None
self.dataStoch = None
super().__init__(params_to_estimate, M, prior)
return
def setup_likelihood_function(self, data, timepoints, measurements, initial_conditions, norm_order = 2, N_simulations = 3, debug = False, **kwargs):
N = np.shape(data)[0]
if debug:
print('Stochastic inference attributes:')
print('The timepoints shape is {0}'.format(np.shape(timepoints)))
print('The data shape is {0}'.format(np.shape(data)))
print('The measurmenets is {0}'.format(measurements))
print('The N is {0}'.format(N))
print('Using the initial conditions: {0}'.format(initial_conditions))
self.dataStoch = StochasticTrajectories(np.array(timepoints), data, measurements, N)
#If there are multiple initial conditions in a data-set, should correspond to multiple initial conditions for inference.
#Note len(initial_conditions) must be equal to the number of trajectories N
self.LL_stoch = STLL(model = self.M, init_state = initial_conditions,
data = self.dataStoch, N_simulations = N_simulations, norm_order = norm_order)
def get_likelihood_function(self, params):
# Set params here and return the likelihood object.
if self.LL_stoch is None:
raise RuntimeError("Must call StochasticInference.setup_likelihood_function before using StochasticInference.get_likelihood_function.")
#Set params
params_dict = {}
for key, p in zip(self.params_to_estimate, params):
params_dict[key] = p
self.LL_stoch.set_init_params(params_dict)
#Prior
lp = self.check_prior(params_dict)
if not np.isfinite(lp):
return -np.inf
LL_stoch_cost = self.LL_stoch.py_log_likelihood()
ln_prob = lp + LL_stoch_cost
return ln_prob
# Add a new class similar to this to create new interfaces.
class DeterministicInference(PIDInterface):
def __init__(self, params_to_estimate, M, prior):
self.LL_det = None
self.dataDet = None
super().__init__(params_to_estimate, M, prior)
return
def setup_likelihood_function(self, data, timepoints, measurements, initial_conditions, norm_order = 2, debug = False, **kwargs):
N = np.shape(data)[0]
#Create a data Objects
# In this case the timepoints should be a list of timepoints vectors for each iteration
self.dataDet = BulkData(np.array(timepoints), data, measurements, N)
#If there are multiple initial conditions in a data-set, should correspond to multiple initial conditions for inference.
#Note len(initial_conditions) must be equal to the number of trajectories N
if debug:
print('The deterministic inference attributes:')
print('The timepoints shape is {0}'.format(np.shape(timepoints)))
print('The data shape is {0}'.format(np.shape(data)))
print('The measurmenets is {0}'.format(measurements))
print('The N is {0}'.format(N))
print('Using the initial conditions: {0}'.format(initial_conditions))
#Create Likelihood object
self.LL_det = DLL(model = self.M, init_state = initial_conditions, data = self.dataDet, norm_order = norm_order)
def get_likelihood_function(self, params):
if self.LL_det is None:
raise RuntimeError("Must call DeterministicInference.setup_likelihood_function before using DeterministicInference.get_likelihood_function.")
#this part is the only part that is called repeatedly
params_dict = {}
for key, p in zip(self.params_to_estimate, params):
params_dict[key] = p
self.LL_det.set_init_params(params_dict)
# Check prior
lp = 0
lp = self.check_prior(params_dict)
if not np.isfinite(lp):
return -np.inf
#apply cost function
LL_det_cost = self.LL_det.py_log_likelihood()
ln_prob = lp + LL_det_cost
return ln_prob
| ananswam/bioscrape | bioscrape/pid_interfaces.py | Python | mit | 13,998 | 0.008573 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-08-03 13:45
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wins', '0016_win_updated'),
('wins', '0017_auto_20160801_1230'),
]
operations = [
]
| UKTradeInvestment/export-wins-data | wins/migrations/0018_merge.py | Python | gpl-3.0 | 324 | 0 |
from .states import (TaskStatus, MultijobActions, TASK_STATUS_PERMITTED_ACTIONS,
TASK_STATUS_STARTUP_ACTIONS)
| GeoMop/GeoMop | src/JobPanel/data/__init__.py | Python | gpl-3.0 | 131 | 0.007634 |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Logit'] , ['ConstantTrend'] , ['Seasonal_Hour'] , ['ARX'] ); | antoinecarme/pyaf | tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_ConstantTrend_Seasonal_Hour_ARX.py | Python | bsd-3-clause | 156 | 0.051282 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, Optional, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
from .. import models
from ._configuration import SqlVirtualMachineManagementClientConfiguration
from .operations import AvailabilityGroupListenersOperations, Operations, SqlVirtualMachineGroupsOperations, SqlVirtualMachinesOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class SqlVirtualMachineManagementClient:
"""The SQL virtual machine management API provides a RESTful set of web APIs that interact with Azure Compute, Network & Storage services to manage your SQL Server virtual machine. The API enables users to create, delete and retrieve a SQL virtual machine, SQL virtual machine group or availability group listener.
:ivar availability_group_listeners: AvailabilityGroupListenersOperations operations
:vartype availability_group_listeners:
azure.mgmt.sqlvirtualmachine.aio.operations.AvailabilityGroupListenersOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.sqlvirtualmachine.aio.operations.Operations
:ivar sql_virtual_machine_groups: SqlVirtualMachineGroupsOperations operations
:vartype sql_virtual_machine_groups:
azure.mgmt.sqlvirtualmachine.aio.operations.SqlVirtualMachineGroupsOperations
:ivar sql_virtual_machines: SqlVirtualMachinesOperations operations
:vartype sql_virtual_machines:
azure.mgmt.sqlvirtualmachine.aio.operations.SqlVirtualMachinesOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription ID that identifies an Azure subscription.
:type subscription_id: str
:param base_url: Service URL. Default value is 'https://management.azure.com'.
:type base_url: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = SqlVirtualMachineManagementClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.availability_group_listeners = AvailabilityGroupListenersOperations(self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.sql_virtual_machine_groups = SqlVirtualMachineGroupsOperations(self._client, self._config, self._serialize, self._deserialize)
self.sql_virtual_machines = SqlVirtualMachinesOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request: HttpRequest,
**kwargs: Any
) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "SqlVirtualMachineManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| Azure/azure-sdk-for-python | sdk/sql/azure-mgmt-sqlvirtualmachine/azure/mgmt/sqlvirtualmachine/aio/_sql_virtual_machine_management_client.py | Python | mit | 5,342 | 0.003931 |
import os
import sys
import pygame
import signal
import time
import ConfigParser
from twython import TwythonStreamer
#-----------------------------------------------------------------------------
# Import custom modules
#-----------------------------------------------------------------------------
# Add pyscope module to path
path = os.path.join(os.path.dirname(__file__), 'py_apps/pyscope')
sys.path.append(path)
# Add twit_feed module to path
path = os.path.join(os.path.dirname(__file__), '../py_apps/twit_feed')
sys.path.append(path)
import pyscope
import twit_feed
#import tf_test_02
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
MAX_ENTRIES = 1
FPS = 5
BET_TERM = ['#testing', '#blargz'] #['@Gr8AmTweetRace']
AUTH = { 'app_key': 'li8wn8Tb7xBifCnNIgyqUw',
'app_secret': 'vcwq36w4C4VXamlqWBDKM2E8etsOoangDoMhxNDU',
'oauth_token': '1969690717-rGw3VkRQ8IyL4OcPWtv5Y2CeBdVn8ndJrjGKraI',
'oauth_token_secret': 'KO7YIFMKWKaYTtz2zEyaSy044ixj5kIbWrDtZZL96ly0H'}
# Common colors
WHITE = 255,255,255
GREEN = 0,255,0
BLACK = 0,0,0
BLUE = 0,0,255
RED = 255,0,0
#-----------------------------------------------------------------------------
# Global Variables
#-----------------------------------------------------------------------------
g_terms = []
g_bet_loop = None
g_scope = None
#-----------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------
# Handle graphics on the screen
def draw_starting_screen():
global g_terms
global g_scope
# Create fonts
font_mode = pygame.font.Font(None, 68)
font_title_1 = pygame.font.Font(None, 68)
font_title_2 = pygame.font.Font(None, 68)
font_instr_1 = pygame.font.Font(None, 36)
font_instr_2 = pygame.font.Font(None, 36)
font_ent_title = pygame.font.Font(None, 36)
font_ent = pygame.font.Font(None, 36)
# Create background
rect_bg = pygame.draw.rect(g_scope.screen, BLACK, \
(0, 0, 540, 960), 0)
rect_title = pygame.draw.rect(g_scope.screen, WHITE, \
(20, 20, 500, 100), 0)
rect_game_mode = pygame.draw.rect(g_scope.screen, WHITE, \
(20, 140, 500, 60), 0)
rect_instructions = pygame.draw.rect(g_scope.screen, WHITE, \
(20, 220, 500, 100), 0)
rect_tweets = pygame.draw.rect(g_scope.screen, WHITE, \
(20, 340, 500, 300), 0)
# Draw title
title1 = "The Great American"
title2 = "Tweet Race"
text_title_1 = font_title_1.render(title1,1,BLACK)
text_title_2 = font_title_2.render(title2,1,BLACK)
g_scope.screen.blit(text_title_1, (40, 25))
g_scope.screen.blit(text_title_2, (130, 70))
# Draw game mode
mode_str = font_mode.render('Starting Gate',1,BLACK)
g_scope.screen.blit(mode_str, (115, 140))
# Draw instructions
instr_str_1 = 'Send a tweet to @Gr8AmTweetRace'
instr_str_2 = 'with a #term to enter!'
instr_1 = font_instr_1.render(instr_str_1,1,BLACK)
instr_2 = font_instr_2.render(instr_str_2,1,BLACK)
g_scope.screen.blit(instr_1, (40, 240))
g_scope.screen.blit(instr_2, (40, 270))
# Draw entrants
ent_title = font_ent_title.render('Contestants',1,BLACK)
g_scope.screen.blit(ent_title, (40, 360))
ent_y = 390
for i in range(0, MAX_ENTRIES):
ent_str = ''.join([str(i + 1), ': '])
if i < len(g_terms):
ent_str = ''.join([ent_str, g_terms[i]])
ent_disp = font_ent.render(ent_str,1,BLACK)
g_scope.screen.blit(ent_disp, (40, 390 + (i * 30)))
# Test if a term is already in the term list
def is_in_terms(entry):
global g_terms
for term in g_terms:
if ''.join(['#', entry]) == term:
return True
return False
#-----------------------------------------------------------------------------
# Main
#-----------------------------------------------------------------------------
def main():
global g_bet_loop
global g_scope
global g_terms
# Setup Twitter streamer
tf = twit_feed.TwitFeed(AUTH)
#tf = tf_test_02.TwitFeed(AUTH)
# Tweet that we are accepting bets
# Start streamer to search for terms
tf.start_track_streamer(BET_TERM)
# Setup display
pygame.init()
#g_scope = pyscope.pyscope()
fps_clock = pygame.time.Clock()
pygame.mouse.set_visible(False)
# Main game loop
g_bet_loop = False
while g_bet_loop:
# Handle game events
for event in pygame.event.get():
# End game if quit event raises
if event.type == pygame.QUIT:
g_bet_loop = False
# End game if 'q' or 'esc' key pressed
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_q or event.key == pygame.K_ESCAPE:
g_bet_loop = False
# Get entries and print them
entries = tf.get_entries()
for entry in entries:
print entry
if is_in_terms(entry) == False:
g_terms.append(''.join(['#', entry]))
print len(g_terms)
if len(g_terms) >= MAX_ENTRIES:
print 'breaking'
g_bet_loop = False
# Update screen
draw_starting_screen()
pygame.display.update()
fps_clock.tick(FPS)
# Clean up Twitter feed and pygame
print str(pygame.time.get_ticks())
tf.stop_tracking()
print str(pygame.time.get_ticks())
pygame.quit()
# Print terms
print 'Search terms: ', g_terms
# Run main
main()
| ShawnHymel/TweetRace | pytest/wager_test_01.py | Python | mit | 5,938 | 0.011115 |
from __future__ import print_function
import numpy as np
# Chapter 2 Beginning with NumPy fundamentals
#
# Demonstrates the selection
# of ndarray elements.
#
# Run from the commandline with
#
# python elementselection.py
a = np.array([[1,2],[3,4]])
print("In: a")
print(a)
#Out:
#array([[1, 2],
# [3, 4]])
print("In: a[0,0]")
print(a[0,0])
#Out: 1
print("In: a[0,1]")
print(a[0,1])
#Out: 2
print("In: a[1,0]")
print(a[1,0])
#Out: 3
print("In: a[1,1]")
print(a[1,1])
#Out: 4
| moonbury/notebooks | github/Numpy/Chapter2/elementselection.py | Python | gpl-3.0 | 492 | 0.034553 |
#!/usr/bin/python3
"""
Design and implement a data structure for Least Recently Used (LRU) cache. It
should support the following operations: get and put.
get(key) - Get the value (will always be positive) of the key if the key exists
in the cache, otherwise return -1.
put(key, value) - Set or insert the value if the key is not already present.
When the cache reached its capacity, it should invalidate the least recently
used item before inserting a new item.
Follow up:
Could you do both operations in O(1) time complexity?
Example:
LRUCache cache = new LRUCache( 2 /* capacity */ );
cache.put(1, 1);
cache.put(2, 2);
cache.get(1); // returns 1
cache.put(3, 3); // evicts key 2
cache.get(2); // returns -1 (not found)
cache.put(4, 4); // evicts key 1
cache.get(1); // returns -1 (not found)
cache.get(3); // returns 3
cache.get(4); // returns 4
"""
class Node:
def __init__(self, key, val):
self.key = key
self.val = val
self.prev, self.next = None, None
class LRUCache:
def __init__(self, capacity: int):
"""
O(1) look up - Map
O(1) update most recent vs. least recent - Linked List
But Single linked list is not enough then Double Linked List
Need dummy head and tail to avoid over complication of null checking
Essentially it is the OrderedDict
"""
self.head = Node(None, None)
self.tail = Node(None, None)
self.head.next = self.tail
self.tail.prev = self.head
self.cap = capacity
self.map = {}
def get(self, key: int) -> int:
if key in self.map:
node = self.map[key]
self._remove(key)
self._appendleft(node)
return node.val
return -1
def put(self, key: int, value: int) -> None:
if key in self.map:
self._remove(key)
elif len(self.map) >= self.cap:
node = self.tail.prev
self._remove(node.key)
node = Node(key, value)
self._appendleft(node)
def _appendleft(self, node: Node):
self.map[node.key] = node # update/delete map in these two operators
nxt = self.head.next
self.head.next = node
node.prev = self.head
node.next = nxt
nxt.prev = node
def _remove(self, key: int):
node = self.map[key]
prev = node.prev
nxt = node.next
prev.next = nxt
nxt.prev = prev
del self.map[key] # update/delete map in these two operators
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
| algorhythms/LeetCode | 146 LRU Cache py3.py | Python | mit | 2,700 | 0 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateApi
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-api-gateway
# [START apigateway_v1_generated_ApiGatewayService_CreateApi_async]
from google.cloud import apigateway_v1
async def sample_create_api():
# Create a client
client = apigateway_v1.ApiGatewayServiceAsyncClient()
# Initialize request argument(s)
request = apigateway_v1.CreateApiRequest(
parent="parent_value",
api_id="api_id_value",
)
# Make the request
operation = client.create_api(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END apigateway_v1_generated_ApiGatewayService_CreateApi_async]
| googleapis/python-api-gateway | samples/generated_samples/apigateway_v1_generated_api_gateway_service_create_api_async.py | Python | apache-2.0 | 1,588 | 0.00063 |
from sys import maxsize
class Group:
def __init__(self, name=None, header=None, footer=None, id=None):
self.name = name
self.header = header
self.footer = footer
self.id = id
def __repr__(self):
return "%s:%s:%s:%s" % (self.id, self.name, self.header, self.footer)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and (self.name == other.name or
self.name is None)
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
| AklerQ/python_training | model/group.py | Python | apache-2.0 | 674 | 0.002967 |
# -*- coding: utf-8 -*-
u'''\
:mod:`ecoxipy.pyxom` - Pythonic XML Object Model (PyXOM)
========================================================
This module implements the *Pythonic XML Object Model* (PyXOM) for the
representation of XML structures. To conveniently create PyXOM data structures
use :mod:`ecoxipy.pyxom.output`, for indexing use
:mod:`ecoxipy.pyxom.indexing` (if :attr:`Document.element_by_id` and
:attr:`Document.elements_by_name` are not enough for you).
.. _ecoxipy.pyxom.examples:
Examples
--------
XML Creation
^^^^^^^^^^^^
If you use the constructors be sure to supply the right data types, otherwise
use the :meth:`create` methods or use :class:`ecoxipy.MarkupBuilder`, which
take care of conversion.
>>> from ecoxipy import MarkupBuilder
>>> b = MarkupBuilder()
>>> document = Document.create(
... b.article(
... b.h1(
... b & '<Example>',
... data='to quote: <&>"\\''
... ),
... b.p(
... {'umlaut-attribute': u'äöüß'},
... 'Hello', Element.create('em', ' World',
... attributes={'count':1}), '!'
... ),
... None,
... b.div(
... Element.create('data-element', Text.create(u'äöüß <&>')),
... b(
... '<p attr="value">raw content</p>Some Text',
... b.br,
... (i for i in range(3))
... ),
... (i for i in range(3, 6))
... ),
... Comment.create('<This is a comment!>'),
... ProcessingInstruction.create('pi-target', '<PI content>'),
... ProcessingInstruction.create('pi-without-content'),
... b['foo:somexml'](
... b['foo:somexml']({'foo:bar': 1, 't:test': 2}),
... b['somexml']({'xmlns': ''}),
... b['bar:somexml'],
... {'xmlns:foo': 'foo://bar', 'xmlns:t': '',
... 'foo:bar': 'Hello', 'id': 'foo'}
... ),
... {'xmlns': 'http://www.w3.org/1999/xhtml/'}
... ), doctype_name='article', omit_xml_declaration=True
... )
Enforcing Well-Formedness
^^^^^^^^^^^^^^^^^^^^^^^^^
Using the :meth:`create` methods or passing the parameter
``check_well_formedness`` as :const:`True` to the appropriate constructors
enforces that the element, attribute and document type names are valid XML
names, and that processing instruction target and content as well as comment
contents conform to their constraints:
>>> from ecoxipy import XMLWellFormednessException
>>> def catch_not_well_formed(cls, *args, **kargs):
... try:
... return cls.create(*args, **kargs)
... except XMLWellFormednessException as e:
... print(e)
>>> t = catch_not_well_formed(Document, [], doctype_name='1nvalid-xml-name')
The value "1nvalid-xml-name" is not a valid XML name.
>>> t = catch_not_well_formed(Document, [], doctype_name='html', doctype_publicid='"')
The value "\\"" is not a valid document type public ID.
>>> t = catch_not_well_formed(Document, [], doctype_name='html', doctype_systemid='"\\'')
The value "\\"'" is not a valid document type system ID.
>>> t = catch_not_well_formed(Element, '1nvalid-xml-name', [], {})
The value "1nvalid-xml-name" is not a valid XML name.
>>> t = catch_not_well_formed(Element, 't', [], attributes={'1nvalid-xml-name': 'content'})
The value "1nvalid-xml-name" is not a valid XML name.
>>> t = catch_not_well_formed(ProcessingInstruction, '1nvalid-xml-name')
The value "1nvalid-xml-name" is not a valid XML processing instruction target.
>>> t = catch_not_well_formed(ProcessingInstruction, 'target', 'invalid PI content ?>')
The value "invalid PI content ?>" is not a valid XML processing instruction content because it contains "?>".
>>> t = catch_not_well_formed(Comment, 'invalid XML comment --')
The value "invalid XML comment --" is not a valid XML comment because it contains "--".
Navigation
^^^^^^^^^^
Use list semantics to retrieve child nodes and attribute access to retrieve
node information:
>>> print(document.doctype.name)
article
>>> print(document[0].name)
article
>>> print(document[0].attributes['xmlns'].value)
http://www.w3.org/1999/xhtml/
>>> print(document[0][-3].target)
pi-target
>>> document[0][1].parent is document[0]
True
>>> document[0][0] is document[0][1].previous and document[0][1].next is document[0][2]
True
>>> document.parent is None and document[0].previous is None and document[0].next is None
True
>>> document[0].attributes.parent is document[0]
True
You can retrieve iterators for navigation through the tree:
>>> list(document[0][0].ancestors)
[ecoxipy.pyxom.Element['article', {...}], ecoxipy.pyxom.Document[ecoxipy.pyxom.DocumentType('article', None, None), True, 'UTF-8']]
>>> list(document[0][1].children())
[ecoxipy.pyxom.Text('Hello'), ecoxipy.pyxom.Element['em', {...}], ecoxipy.pyxom.Text('!')]
>>> list(document[0][2].descendants())
[ecoxipy.pyxom.Element['data-element', {...}], ecoxipy.pyxom.Text('\\xe4\\xf6\\xfc\\xdf <&>'), ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Text('raw content'), ecoxipy.pyxom.Text('Some Text'), ecoxipy.pyxom.Element['br', {...}], ecoxipy.pyxom.Text('0'), ecoxipy.pyxom.Text('1'), ecoxipy.pyxom.Text('2'), ecoxipy.pyxom.Text('3'), ecoxipy.pyxom.Text('4'), ecoxipy.pyxom.Text('5')]
>>> list(document[0][-2].preceding_siblings)
[ecoxipy.pyxom.ProcessingInstruction('pi-target', '<PI content>'), ecoxipy.pyxom.Comment('<This is a comment!>'), ecoxipy.pyxom.Element['div', {...}], ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Element['h1', {...}]]
>>> list(document[0][2][-1].preceding)
[ecoxipy.pyxom.Text('4'), ecoxipy.pyxom.Text('3'), ecoxipy.pyxom.Text('2'), ecoxipy.pyxom.Text('1'), ecoxipy.pyxom.Text('0'), ecoxipy.pyxom.Element['br', {...}], ecoxipy.pyxom.Text('Some Text'), ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Element['data-element', {...}], ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Element['h1', {...}]]
>>> list(document[0][0].following_siblings)
[ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Element['div', {...}], ecoxipy.pyxom.Comment('<This is a comment!>'), ecoxipy.pyxom.ProcessingInstruction('pi-target', '<PI content>'), ecoxipy.pyxom.ProcessingInstruction('pi-without-content', None), ecoxipy.pyxom.Element['foo:somexml', {...}]]
>>> list(document[0][1][0].following)
[ecoxipy.pyxom.Element['em', {...}], ecoxipy.pyxom.Text('!'), ecoxipy.pyxom.Element['div', {...}], ecoxipy.pyxom.Comment('<This is a comment!>'), ecoxipy.pyxom.ProcessingInstruction('pi-target', '<PI content>'), ecoxipy.pyxom.ProcessingInstruction('pi-without-content', None), ecoxipy.pyxom.Element['foo:somexml', {...}]]
Descendants and children can also be retrieved in reverse document order:
>>> list(document[0][1].children(True)) == list(reversed(list(document[0][1].children())))
True
>>> list(document[0][2].descendants(True))
[ecoxipy.pyxom.Text('5'), ecoxipy.pyxom.Text('4'), ecoxipy.pyxom.Text('3'), ecoxipy.pyxom.Text('2'), ecoxipy.pyxom.Text('1'), ecoxipy.pyxom.Text('0'), ecoxipy.pyxom.Element['br', {...}], ecoxipy.pyxom.Text('Some Text'), ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Text('raw content'), ecoxipy.pyxom.Element['data-element', {...}], ecoxipy.pyxom.Text('\\xe4\\xf6\\xfc\\xdf <&>')]
Normally :meth:`~ContainerNode.descendants` traverses the XML tree depth-first,
but you can also use breadth-first traversal:
>>> list(document[0][2].descendants(depth_first=False))
[ecoxipy.pyxom.Element['data-element', {...}], ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Text('Some Text'), ecoxipy.pyxom.Element['br', {...}], ecoxipy.pyxom.Text('0'), ecoxipy.pyxom.Text('1'), ecoxipy.pyxom.Text('2'), ecoxipy.pyxom.Text('3'), ecoxipy.pyxom.Text('4'), ecoxipy.pyxom.Text('5'), ecoxipy.pyxom.Text('\\xe4\\xf6\\xfc\\xdf <&>'), ecoxipy.pyxom.Text('raw content')]
>>> list(document[0][2].descendants(True, False))
[ecoxipy.pyxom.Text('5'), ecoxipy.pyxom.Text('4'), ecoxipy.pyxom.Text('3'), ecoxipy.pyxom.Text('2'), ecoxipy.pyxom.Text('1'), ecoxipy.pyxom.Text('0'), ecoxipy.pyxom.Element['br', {...}], ecoxipy.pyxom.Text('Some Text'), ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Element['data-element', {...}], ecoxipy.pyxom.Text('raw content'), ecoxipy.pyxom.Text('\\xe4\\xf6\\xfc\\xdf <&>')]
Normally :meth:`~ContainerNode.descendants` can also be given a depth limit:
>>> list(document[0].descendants(max_depth=2))
[ecoxipy.pyxom.Element['h1', {...}], ecoxipy.pyxom.Text('<Example>'), ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Text('Hello'), ecoxipy.pyxom.Element['em', {...}], ecoxipy.pyxom.Text('!'), ecoxipy.pyxom.Element['div', {...}], ecoxipy.pyxom.Element['data-element', {...}], ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Text('Some Text'), ecoxipy.pyxom.Element['br', {...}], ecoxipy.pyxom.Text('0'), ecoxipy.pyxom.Text('1'), ecoxipy.pyxom.Text('2'), ecoxipy.pyxom.Text('3'), ecoxipy.pyxom.Text('4'), ecoxipy.pyxom.Text('5'), ecoxipy.pyxom.Comment('<This is a comment!>'), ecoxipy.pyxom.ProcessingInstruction('pi-target', '<PI content>'), ecoxipy.pyxom.ProcessingInstruction('pi-without-content', None), ecoxipy.pyxom.Element['foo:somexml', {...}], ecoxipy.pyxom.Element['foo:somexml', {...}], ecoxipy.pyxom.Element['somexml', {...}], ecoxipy.pyxom.Element['bar:somexml', {...}]]
>>> list(document[0].descendants(depth_first=False, max_depth=2))
[ecoxipy.pyxom.Element['h1', {...}], ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Element['div', {...}], ecoxipy.pyxom.Comment('<This is a comment!>'), ecoxipy.pyxom.ProcessingInstruction('pi-target', '<PI content>'), ecoxipy.pyxom.ProcessingInstruction('pi-without-content', None), ecoxipy.pyxom.Element['foo:somexml', {...}], ecoxipy.pyxom.Text('<Example>'), ecoxipy.pyxom.Text('Hello'), ecoxipy.pyxom.Element['em', {...}], ecoxipy.pyxom.Text('!'), ecoxipy.pyxom.Element['data-element', {...}], ecoxipy.pyxom.Element['p', {...}], ecoxipy.pyxom.Text('Some Text'), ecoxipy.pyxom.Element['br', {...}], ecoxipy.pyxom.Text('0'), ecoxipy.pyxom.Text('1'), ecoxipy.pyxom.Text('2'), ecoxipy.pyxom.Text('3'), ecoxipy.pyxom.Text('4'), ecoxipy.pyxom.Text('5'), ecoxipy.pyxom.Element['foo:somexml', {...}], ecoxipy.pyxom.Element['somexml', {...}], ecoxipy.pyxom.Element['bar:somexml', {...}]]
Namespaces
""""""""""
PyXOM supports the interpretation of `Namespaces in XML
<http://www.w3.org/TR/REC-xml-names/>`_. Namespace prefix and local names are
calculated from :class:`Element` and :class:`Attribute` names:
>>> document[0].namespace_prefix == None
True
>>> print(document[0].local_name)
article
>>> print(document[0][-1].namespace_prefix)
foo
>>> print(document[0][-1].local_name)
somexml
>>> attr = document[0][-1].attributes['foo:bar']
>>> print(attr.namespace_prefix)
foo
>>> print(attr.local_name)
bar
The namespace URI is available as :attr:`Element.namespace_uri` and
:attr:`Attribute.namespace_uri` (originally defined as
:attr:`NamespaceNameMixin.namespace_uri`), these properties look up the
namespace prefix of the node in the parent elements (this information is
cached, so don't fear multiple retrieval):
>>> xhtml_namespace_uri = u'http://www.w3.org/1999/xhtml/'
>>> document[0][1].namespace_uri == xhtml_namespace_uri
True
>>> document[0][1][1].namespace_uri == xhtml_namespace_uri
True
>>> document[0][-1][0].namespace_uri == u'foo://bar'
True
>>> document[0][-1][0].attributes['foo:bar'].namespace_uri == u'foo://bar'
True
The namespace prefixes active on an element are available as the iterator
:attr:`Element.namespace_prefixes`:
>>> prefixes = sorted(list(document[0][-1][0].namespace_prefixes),
... key=lambda value: '' if value is None else value)
>>> prefixes[0] == None
True
>>> print(u', '.join(prefixes[1:]))
foo, t
>>> document[0][-1][0].get_namespace_uri(u'foo') == u'foo://bar'
True
>>> print(list(document[0].namespace_prefixes))
[None]
>>> document[0].get_namespace_uri(None) == u'http://www.w3.org/1999/xhtml/'
True
If an element or attribute is in no namespace, ``namespace_uri`` is
:const:`None`:
>>> document[0][-1][0].attributes['t:test'].namespace_uri == None
True
>>> document[0][-1][1].namespace_uri == None
True
If an undefined namespace prefix is used, the ``namespace_uri`` is
:const:`False`:
>>> document[0][-1][2].namespace_uri == False
True
Indexes
"""""""
On :class:`Document` instances :class:`ecoxipy.pyxom.indexing.IndexDescriptor`
attributes are defined for fast retrieval (after initially building the
index).
Use :attr:`~Document.element_by_id` to get elements by the value of their
``id`` attribute:
>>> document.element_by_id['foo'] is document[0][-1]
True
>>> 'bar' in document.element_by_id
False
:attr:`~Document.elements_by_name` allows retrieval of elements by their name:
>>> document[0][-1] in list(document.elements_by_name['foo:somexml'])
True
>>> 'html' in document.elements_by_name
False
Retrieve elements and attributes by their namespace data by using
:attr:`~Document.nodes_by_namespace`:
>>> from functools import reduce
>>> elements_and_attributes = set(
... filter(lambda node: isinstance(node, Element),
... document.descendants()
... )
... ).union(
... reduce(lambda x, y: x.union(y),
... map(lambda node: set(node.attributes.values()),
... filter(lambda node: isinstance(node, Element),
... document.descendants()
... )
... )
... )
... )
>>> set(document.nodes_by_namespace()) == set(filter(
... lambda node: node.namespace_uri is not False,
... elements_and_attributes
... ))
True
>>> set(document.nodes_by_namespace('foo://bar')) == set(filter(
... lambda node: node.namespace_uri == u'foo://bar',
... elements_and_attributes
... ))
True
>>> set(document.nodes_by_namespace(local_name='bar')) == set(filter(
... lambda node: node.local_name == u'bar',
... elements_and_attributes
... ))
True
>>> set(document.nodes_by_namespace('foo://bar', 'bar')) == set(filter(
... lambda node: node.namespace_uri == u'foo://bar' and node.local_name == u'bar',
... elements_and_attributes
... ))
True
Manipulation and Equality
^^^^^^^^^^^^^^^^^^^^^^^^^
All :class:`XMLNode` instances have attributes which allow for modification.
:class:`Document` and :class:`Element` instances also allow modification of
their contents like sequences.
Duplication and Comparisons
"""""""""""""""""""""""""""
Use :meth:`XMLNode.duplicate` to create a deep copy of a XML node:
>>> document_copy = document.duplicate()
>>> document is document_copy
False
Equality and inequality recursively compare XML nodes:
>>> document == document_copy
True
>>> document != document_copy
False
Attributes
""""""""""
The attributes of an :class:`Element` instance are available as
:attr:`Element.attributes`. This is an :class:`Attributes` instance which
contains :class:`Attribute` instances:
>>> document_copy[0][0].attributes['data']
ecoxipy.pyxom.Attribute('data', 'to quote: <&>"\\'')
>>> old_data = document_copy[0][0].attributes['data'].value
>>> document_copy[0][0].attributes['data'].value = 'foo bar'
>>> document_copy[0][0].attributes['data'].value == u'foo bar'
True
>>> 'data' in document_copy[0][0].attributes
True
>>> document == document_copy
False
>>> document != document_copy
True
>>> document_copy[0][0].attributes['data'].value = old_data
>>> document == document_copy
True
>>> document != document_copy
False
:class:`Attributes` instances allow for creation of :class:`Attribute`
instances:
>>> somexml = document_copy[0][-1]
>>> foo_attr = somexml[0].attributes.create_attribute('foo:foo', 'bar')
>>> foo_attr is somexml[0].attributes['foo:foo']
True
>>> foo_attr == somexml[0].attributes['foo:foo']
True
>>> foo_attr != somexml[0].attributes['foo:foo']
False
>>> 'foo:foo' in somexml[0].attributes
True
>>> foo_attr.namespace_uri == u'foo://bar'
True
Attributes may be removed:
>>> somexml[0].attributes.remove(foo_attr)
>>> 'foo:foo' in somexml[0].attributes
False
>>> foo_attr.parent == None
True
>>> foo_attr.namespace_uri == False
True
You can also add an attribute to an element's attributes, it is automatically
moved if it belongs to another element's attributes:
>>> somexml[0].attributes.add(foo_attr)
>>> 'foo:foo' in somexml[0].attributes
True
>>> foo_attr.parent == somexml[0].attributes
True
>>> foo_attr.parent != somexml[0].attributes
False
>>> foo_attr.namespace_uri == u'foo://bar'
True
>>> del somexml[0].attributes['foo:foo']
>>> 'foo:foo' in somexml[0].attributes
False
>>> attr = document[0][-1].attributes['foo:bar']
>>> attr.name = 'test'
>>> attr.namespace_prefix is None
True
>>> print(attr.local_name)
test
Documents and Elements
""""""""""""""""""""""
>>> document_copy[0].insert(1, document_copy[0][0])
>>> document_copy[0][0] == document[0][1]
True
>>> document_copy[0][0] != document[0][1]
False
>>> document_copy[0][1] == document[0][0]
True
>>> document_copy[0][1] != document[0][0]
False
>>> p_element = document_copy[0][0]
>>> document_copy[0].remove(p_element)
>>> document_copy[0][0].name == u'h1' and p_element.parent is None
True
>>> p_element in document_copy[0]
False
>>> p_element.namespace_uri == False
True
>>> document_copy[0][0].append(p_element)
>>> document_copy[0][0][-1] is p_element
True
>>> p_element in document_copy[0][0]
True
>>> p_element.namespace_uri == u'http://www.w3.org/1999/xhtml/'
True
>>> p_element in document[0]
False
>>> document[0][1] in document_copy[0][0]
False
>>> document[0][1] is document_copy[0][0][-1]
False
>>> document[0][1] == document_copy[0][0][-1]
True
>>> document[0][1] != document_copy[0][0][-1]
False
>>> document[0][-1].name = 'foo'
>>> document[0][-1].namespace_prefix is None
True
>>> print(document[0][-1].local_name)
foo
Indexes and Manipulation
""""""""""""""""""""""""
If a document is modified, the indexes should be deleted. This can be done
using :func:`del` on the index attribute or calling
:meth:`~Document.delete_indexes`.
>>> del document_copy[0][-1]
>>> document_copy.delete_indexes()
>>> 'foo' in document_copy.element_by_id
False
>>> 'foo:somexml' in document_copy.elements_by_name
False
XML Serialization
^^^^^^^^^^^^^^^^^
First we remove embedded non-HTML XML, as there are multiple attributes on the
element and the order they are rendered in is indeterministic, which makes it
hard to compare:
>>> del document[0][-1]
Getting the Unicode value of an document yields the XML document serialized as
an Unicode string:
>>> document_string = u"""<!DOCTYPE article><article xmlns="http://www.w3.org/1999/xhtml/"><h1 data="to quote: <&>"'"><Example></h1><p umlaut-attribute="äöüß">Hello<em count="1"> World</em>!</p><div><data-element>äöüß <&></data-element><p attr="value">raw content</p>Some Text<br/>012345</div><!--<This is a comment!>--><?pi-target <PI content>?><?pi-without-content?></article>"""
>>> import sys
>>> if sys.version_info[0] < 3:
... unicode(document) == document_string
... else:
... str(document) == document_string
True
Getting the :func:`bytes` value of an :class:`Document` creates a byte string
of the serialized XML with the encoding specified on creation of the instance,
it defaults to "UTF-8":
>>> bytes(document) == document_string.encode('UTF-8')
True
:class:`XMLNode` instances can also generate SAX events, see
:meth:`XMLNode.create_sax_events` (note that the default
:class:`xml.sax.ContentHandler` is :class:`xml.sax.saxutils.ContentHandler`,
which does not support comments):
>>> document_string = u"""<?xml version="1.0" encoding="UTF-8"?>\\n<article xmlns="http://www.w3.org/1999/xhtml/"><h1 data="to quote: <&>"'"><Example></h1><p umlaut-attribute="äöüß">Hello<em count="1"> World</em>!</p><div><data-element>äöüß <&></data-element><p attr="value">raw content</p>Some Text<br></br>012345</div><?pi-target <PI content>?><?pi-without-content ?></article>"""
>>> import sys
>>> from io import BytesIO
>>> string_out = BytesIO()
>>> content_handler = document.create_sax_events(out=string_out)
>>> string_out.getvalue() == document_string.encode('UTF-8')
True
>>> string_out.close()
You can also create indented XML when calling the
:meth:`XMLNode.create_sax_events` by supplying the ``indent_incr`` argument:
>>> indented_document_string = u"""\\
... <?xml version="1.0" encoding="UTF-8"?>
... <article xmlns="http://www.w3.org/1999/xhtml/">
... <h1 data="to quote: <&>"'">
... <Example>
... </h1>
... <p umlaut-attribute="äöüß">
... Hello
... <em count="1">
... World
... </em>
... !
... </p>
... <div>
... <data-element>
... äöüß <&>
... </data-element>
... <p attr="value">
... raw content
... </p>
... Some Text
... <br></br>
... 012345
... </div>
... <?pi-target <PI content>?>
... <?pi-without-content ?>
... </article>
... """
>>> string_out = BytesIO()
>>> content_handler = document.create_sax_events(indent_incr=' ', out=string_out)
>>> string_out.getvalue() == indented_document_string.encode('UTF-8')
True
>>> string_out.close()
Classes
-------
Document
^^^^^^^^
.. autoclass:: Document
.. autoclass:: DocumentType
Element
^^^^^^^
.. autoclass:: Element
.. autoclass:: Attribute
.. autoclass:: Attributes
Other Nodes
^^^^^^^^^^^
.. autoclass:: Text
.. autoclass:: Comment
.. autoclass:: ProcessingInstruction
Base Classes
^^^^^^^^^^^^
.. autoclass:: XMLNode
.. autoclass:: ContainerNode
.. autoclass:: ContentNode
.. autoclass:: NamespaceNameMixin
'''
from ._common import XMLNode, ContainerNode
from ._attributes import NamespaceNameMixin, Attribute, Attributes
from ._document import DocumentType, Document
from ._element import Element
from ._content_nodes import ContentNode, Text, Comment, ProcessingInstruction
| IvIePhisto/ECoXiPy | ecoxipy/pyxom/__init__.py | Python | mit | 21,834 | 0.001147 |
import bench
from ucollections import namedtuple
T = namedtuple("Tup", ["num", "bar"])
def test(num):
t = T(20000000, 0)
i = 0
while i < t.num:
i += 1
bench.run(test)
| pozetroninc/micropython | tests/internal_bench/var-8-namedtuple-1st.py | Python | mit | 190 | 0.010526 |
"""
WSGI config for school_registry project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "school_registry.settings")
application = get_wsgi_application()
| agustin380/school-registry | src/school_registry/wsgi.py | Python | gpl-3.0 | 407 | 0 |
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.module_utils.network.aireos.aireos import aireos_provider_spec
from ansible.module_utils.network.common.utils import load_provider
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._play_context.connection != 'local':
return dict(
failed=True,
msg='invalid connection specified, expected connection=local, '
'got %s' % self._play_context.connection
)
provider = load_provider(aireos_provider_spec, self._task.args)
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'aireos'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
# make sure we are in the right cli context which should be
# enable mode and not config module
conn = Connection(socket_path)
out = conn.get_prompt()
if to_text(out, errors='surrogate_then_replace').strip().endswith(')#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
conn.send_command('exit')
task_vars['ansible_socket'] = socket_path
if self._play_context.become_method == 'enable':
self._play_context.become = False
self._play_context.become_method = None
result = super(ActionModule, self).run(tmp, task_vars)
return result
| le9i0nx/ansible | lib/ansible/plugins/action/aireos.py | Python | gpl-3.0 | 3,433 | 0.001748 |
#!/usr/bin/env python3
import argparse
from mygrations.mygrate import mygrate
# argument parsing
parser = argparse.ArgumentParser()
parser.add_argument(
'command',
nargs='?',
default='version',
choices=['version', 'apply', 'check', 'import', 'plan', 'plan_export'],
help='Action to execute (default: version)'
)
parser.add_argument(
'--config', default='mygrate.conf', help='Location of mygrate configuration file (default: mygrate.conf)'
)
parser.add_argument('-f', dest='force', action='store_true', help='Ignore errors/warnings and execute command anyway')
parser.add_argument('-v', dest='version', action='store_true', help='Display version')
args = parser.parse_args()
# load up a mygrate object
my = mygrate(args.command, vars(args))
# and execute
my.execute()
| cmancone/mygrations | mygrate.py | Python | mit | 795 | 0.003774 |
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import logging
from unitem.common import run_cmd
from unitem.utils import check_on_path, make_sure_path_exists
class Bin():
"""Apply binning methods to an assembly."""
def __init__(self, assembly_file, output_dir, min_contig_len, cpus):
"""Initialization."""
self.logger = logging.getLogger('timestamp')
self.assembly_file = assembly_file
self.output_dir = output_dir
self.min_contig_len = min_contig_len
self.cpus = cpus
self.failed_methods = []
def coverage(self, bam_files, cov_file):
"""Calculate coverage file for use by different binning methods."""
self.bam_files = bam_files
if cov_file:
self.logger.info(f'Using coverage information in {cov_file}.')
# check coverage file has correct format
header = open(cov_file).readline().split('\t')
if not ('contig' in header[0].lower()
and 'len' in header[1].lower()
and ('depth' in header[2].lower() or 'mean' in header[2].lower())):
self.logger.error(
'Provided coverage file does not have the correct headers.')
self.logger.error(
"Coverage file must have the format produced by 'jgi_summarize_bam_contig_depths'.")
sys.exit(1)
self.cov_file = cov_file
if bam_files:
self.logger.warning('BAM files are being ignored.')
else:
found = check_on_path(
'jgi_summarize_bam_contig_depths', exit_on_fail=False)
if not found:
self.logger.error(
'jgi_summarize_bam_contig_depths is not on the system path.')
self.logger.error('This script is provide with MetaBAT v2.')
sys.exit(1)
self.logger.info(
f'Calculating coverage for {len(bam_files)} BAM files.')
self.logger.info("Running jgi_summarize_bam_contig_depths script.")
self.cov_file = os.path.join(self.output_dir, 'coverage.tsv')
cmd = 'jgi_summarize_bam_contig_depths --minContigLength {} --minContigDepth 1 --outputDepth {} {}'.format(self.min_contig_len,
self.cov_file,
' '.join(bam_files))
run_cmd(cmd, program='jgi_summarize_bam_contig_depths')
def check_on_path(self, options):
"""Check that all binning methods are on the system path."""
if options.mb2:
self.logger.info('Checking MetaBAT v2 dependencies.')
check_on_path('metabat2')
if options.gm2:
self.logger.info('Checking GroopM v2 dependencies.')
check_on_path('groopm2')
if options.max40 or options.max107:
self.logger.info('Checking MaxBin dependencies.')
check_on_path('run_MaxBin.pl')
if (options.mb_verysensitive
or options.mb_sensitive
or options.mb_specific
or options.mb_veryspecific
or options.mb_superspecific):
self.logger.info('Checking MetaBAT dependencies.')
check_on_path('metabat1')
def run(self, options):
"""Run binning methods."""
bin_file = os.path.join(self.output_dir, 'bin_dirs.tsv')
bin_file_out = open(bin_file, 'w')
if options.mb2:
self.metabat2(bin_file_out)
if options.gm2:
self.groopm2(bin_file_out)
if options.max40:
self.maxbin(bin_file_out, 40)
if options.max107:
self.maxbin(bin_file_out, 107)
if options.mb_verysensitive:
self.metabat(bin_file_out, 'verysensitive')
if options.mb_sensitive:
self.metabat(bin_file_out, 'sensitive')
if options.mb_specific:
self.metabat(bin_file_out, 'specific')
if options.mb_veryspecific:
self.metabat(bin_file_out, 'veryspecific')
if options.mb_superspecific:
self.metabat(bin_file_out, 'superspecific')
bin_file_out.close()
self.logger.info(
f'File with location of bin directories written to {bin_file}.')
if self.failed_methods:
self.logger.warning(
f'The following methods failed to run: {" ".join(self.failed_methods)}')
def _run_method(self, cmd, bin_dir, bin_file_out, binning_method):
"""Run binning method."""
run_cmd(cmd, program=binning_method)
bin_file_out.write(f'{binning_method}\t{os.path.abspath(bin_dir)}\n')
def metabat2(self, bin_file_out):
"""Run MetaBAT v2."""
self.logger.info("Running MetaBAT v2.")
bin_dir = os.path.join(self.output_dir, 'metabat2')
bin_prefix = os.path.join(bin_dir, 'mb2')
cmd = 'metabat2 -t {} -m {} -i {} -a {} -o {}'.format(self.cpus,
self.min_contig_len,
self.assembly_file,
self.cov_file,
bin_prefix)
self._run_method(cmd, bin_dir, bin_file_out, 'metabat2')
def metabat(self, bin_file_out, preset):
"""Run MetaBAT."""
self.logger.info(f"Running MetaBAT v1 with the {preset} preset.")
bin_dir = os.path.join(self.output_dir, f'metabat_{preset}')
bin_prefix = os.path.join(bin_dir, f'mb_{preset}')
cmd = 'metabat1 -t {} -m {} -i {} -a {} -o {} --{}'.format(self.cpus,
self.min_contig_len,
self.assembly_file,
self.cov_file,
bin_prefix,
preset)
self._run_method(cmd, bin_dir, bin_file_out, f'metabat_{preset}')
def groopm2(self, bin_file_out):
"""Run GroopM v2."""
self.logger.info("Running GroopM v2 parse.")
bin_dir = os.path.join(self.output_dir, 'groopm2')
make_sure_path_exists(bin_dir)
output_db = os.path.join(bin_dir, 'groopm.db')
cmd = 'groopm2 parse -f -t {} -c {} --cov_file {} {} {}'.format(self.cpus,
self.min_contig_len,
self.cov_file,
output_db,
self.assembly_file)
run_cmd(cmd, program='groopm2')
self.logger.info("Running GroopM v2 core.")
cmd = 'groopm2 core -f {} -c {} --save_dists'.format(output_db,
self.min_contig_len)
run_cmd(cmd, program='groopm2')
self.logger.info("Running GroopM v2 extract.")
bin_prefix = os.path.join(bin_dir, 'gm2')
cmd = 'groopm2 extract -p {} {} {}'.format(bin_prefix,
output_db,
self.assembly_file)
self._run_method(cmd, bin_dir, bin_file_out, 'groopm2')
def _create_maxbin_coverage_files(self, cov_file, output_dir):
"""Parse coverage information files required by MaxBin."""
abund_list_file = os.path.join(output_dir, 'abund_files.lst')
fout = open(abund_list_file, 'w')
with open(cov_file) as f:
headers = f.readline().rstrip().split('\t')
bam_headers = [bh.replace(' ', '_').replace('/', '~')
for bh in headers[3::2]]
fhs = []
for bh in bam_headers:
abund_file = os.path.abspath(
os.path.join(output_dir, bh + '.abund.tsv'))
fhs.append(open(abund_file, 'w'))
fout.write(abund_file + '\n')
for line in f:
line_split = line.rstrip().split('\t')
contig_id = line_split[0]
for fh_index, col_index in enumerate(range(3, len(line_split), 2)):
fhs[fh_index].write(
contig_id + '\t' + line_split[col_index] + '\n')
for fh in fhs:
fh.close()
fout.close()
return abund_list_file
def maxbin(self, bin_file_out, num_markers):
"""Run MaxBin."""
bin_dir = os.path.join(self.output_dir, f'maxbin_ms{num_markers}')
make_sure_path_exists(bin_dir)
cov_file_dir = os.path.join(bin_dir, 'coverage_files')
make_sure_path_exists(cov_file_dir)
abund_list_file = self._create_maxbin_coverage_files(
self.cov_file, cov_file_dir)
self.logger.info(f"Running MaxBin v2 with {num_markers} markers.")
bin_prefix = os.path.join(bin_dir, f'max{num_markers}')
cmd = f'run_MaxBin.pl -min_contig_length {self.min_contig_len}'
cmd += ' -thread {} -markerset {} -contig {} -out {} -abund_list {}'.format(self.cpus,
num_markers,
self.assembly_file,
bin_prefix,
abund_list_file)
self._run_method(cmd, bin_dir, bin_file_out, f'maxbin_ms{num_markers}')
| dparks1134/UniteM | unitem/bin.py | Python | gpl-3.0 | 11,713 | 0.002134 |
#!/usr/bin/env python
"""
Say you have an array for which the ith element is the price of a given stock on day i.
Design an algorithm to find the maximum profit. You may complete at most two transactions.
Note:
You may not engage in multiple transactions at the same time (ie, you must sell the stock before you buy again).
"""
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
Time: O(n^2)
timeout
"""
if prices == None or len(prices) == 0:
return 0
profit = 0
for i in range(len(prices)):
p1 = self.max_p(prices, 0, i)
p2 = self.max_p(prices, i, len(prices) - 1)
profit = max(profit, p1 + p2)
return profit
def max_p(self, prices, start, end):
if start >= end:
return 0
min_price = prices[start]
profit = 0
for i in range(start, end + 1):
p = prices[i]
profit = max(p - min_price, profit)
min_price = min(min_price, p)
return profit
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
Time: O(n)
Space: O(n)
"""
if prices == None or len(prices) < 2:
return 0
max_profit = [0 for p in prices]
min_price = prices[0]
for i in range(1, len(prices)):
p = prices[i]
max_profit[i] = max(max_profit[i - 1], p - min_price)
min_price = min(min_price, p)
max_price = prices[-1]
profit = 0
for i in range(len(prices) - 2, -1, -1):
p = prices[i]
profit = max(max_price - p + max_profit[i], profit)
max_price = max(p, max_price)
return profit
if __name__ == '__main__':
so = Solution()
print so.maxProfit([2,1,2,0,1])
| weixsong/algorithm | leetcode/123.py | Python | mit | 1,949 | 0.00667 |
#!/usr/bin/env python
# Copyright (c) 2017 Trail of Bits, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import itertools
import os
import subprocess
import sys
import traceback
try:
from shlex import quote
except:
from pipes import quote
def execute(args, command_args):
"""Execute IDA Pro as a subprocess, passing this file in as a batch-mode
script for IDA to run. This forwards along arguments passed to `mcsema-disass`
down into the IDA script. `command_args` contains unparsed arguments passed
to `mcsema-disass`. This script may handle extra arguments."""
ida_disass_path = os.path.abspath(__file__)
ida_dir = os.path.dirname(ida_disass_path)
ida_get_cfg_path = os.path.join(ida_dir, "get_cfg.py")
env = {}
env["IDALOG"] = os.devnull
env["TVHEADLESS"] = "1"
env["HOME"] = os.path.expanduser('~')
env["IDA_PATH"] = os.path.dirname(args.disassembler)
env["PYTHONPATH"] = os.path.dirname(ida_dir)
if "SystemRoot" in os.environ:
env["SystemRoot"] = os.environ["SystemRoot"]
script_cmd = []
script_cmd.append(ida_get_cfg_path)
script_cmd.append("--output")
script_cmd.append(args.output)
script_cmd.append("--log_file")
script_cmd.append(args.log_file)
script_cmd.append("--arch")
script_cmd.append(args.arch)
script_cmd.append("--os")
script_cmd.append(args.os)
script_cmd.append("--entrypoint")
script_cmd.append(args.entrypoint)
script_cmd.extend(command_args) # Extra, script-specific arguments.
cmd = []
cmd.append(quote(args.disassembler)) # Path to IDA.
cmd.append("-B") # Batch mode.
cmd.append("-S\"{}\"".format(" ".join(script_cmd)))
cmd.append(quote(args.binary))
try:
with open(os.devnull, "w") as devnull:
return subprocess.check_call(
" ".join(cmd),
env=env,
stdin=None,
stdout=devnull, # Necessary.
stderr=sys.stderr, # For enabling `--log_file /dev/stderr`.
shell=True, # Necessary.
cwd=os.path.dirname(__file__))
except:
sys.stderr.write(traceback.format_exc())
return 1
| trailofbits/mcsema | tools/mcsema_disass/ida7/disass.py | Python | apache-2.0 | 2,610 | 0.015326 |
import calendar
import datetime
import platform
import time
import os
import ssl
import socket
import urllib
import urlparse
import warnings
import shippo
from shippo import error, http_client, version, util, certificate_blacklist
def _encode_datetime(dttime):
if dttime.tzinfo and dttime.tzinfo.utcoffset(dttime) is not None:
utc_timestamp = calendar.timegm(dttime.utctimetuple())
else:
utc_timestamp = time.mktime(dttime.timetuple())
return int(utc_timestamp)
def _api_encode(data):
for key, value in data.iteritems():
key = util.utf8(key)
if value is None:
continue
elif hasattr(value, 'shippo_id'):
yield (key, value.shippo_id)
elif isinstance(value, list) or isinstance(value, tuple):
for subvalue in value:
yield ("%s[]" % (key,), util.utf8(subvalue))
elif isinstance(value, dict):
subdict = dict(('%s[%s]' % (key, subkey), subvalue) for
subkey, subvalue in value.iteritems())
for subkey, subvalue in _api_encode(subdict):
yield (subkey, subvalue)
elif isinstance(value, datetime.datetime):
yield (key, _encode_datetime(value))
else:
yield (key, util.utf8(value))
def _build_api_url(url, query):
scheme, netloc, path, base_query, fragment = urlparse.urlsplit(url)
if base_query:
query = '%s&%s' % (base_query, query)
return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
class APIRequestor(object):
_CERTIFICATE_VERIFIED = False
def __init__(self, auth=None, client=None):
self.auth = auth
from shippo import verify_ssl_certs
self._client = client or http_client.new_default_http_client(
verify_ssl_certs=verify_ssl_certs)
def request(self, method, url, params=None):
self._check_ssl_cert()
rbody, rcode, my_auth = self.request_raw(
method.lower(), url, params)
resp = self.interpret_response(rbody, rcode)
return resp, my_auth
def handle_api_error(self, rbody, rcode, resp):
try:
err = resp['error']
except (KeyError, TypeError):
raise error.APIError(
"Invalid response object from API: %r (HTTP response code "
"was %d)" % (rbody, rcode),
rbody, rcode, resp)
if rcode in [400, 404]:
raise error.InvalidRequestError(
err.get('message'), err.get('param'), rbody, rcode, resp)
elif rcode == 401:
raise error.AuthenticationError(
err.get('message'), rbody, rcode, resp)
elif rcode == 402:
raise error.CardError(err.get('message'), err.get('param'),
err.get('code'), rbody, rcode, resp)
else:
raise error.APIError(err.get('message'), rbody, rcode, resp)
def request_raw(self, method, url, params=None):
"""
Mechanism for issuing an API call
"""
from shippo import api_version
if self.auth:
my_auth = self.auth
else:
from shippo import auth
my_auth = auth
if my_auth is None:
raise error.AuthenticationError(
'No API key provided. (HINT: set your API key using '
'"shippo.auth = (<username>, <password>)"). You can generate API keys '
'from the Shippo web interface. See https://goshippo.com/api '
'for details, or email support@shippo.com if you have any '
'questions.')
abs_url = '%s%s' % (shippo.api_base, url)
encoded_params = urllib.urlencode(list(_api_encode(params or {})))
if method == 'get' or method == 'delete':
if params:
abs_url = _build_api_url(abs_url, encoded_params)
post_data = None
elif method == 'post':
post_data = encoded_params
else:
raise error.APIConnectionError(
'Unrecognized HTTP method %r. This may indicate a bug in the '
'Shippo bindings. Please contact support@shippo.com for '
'assistance.' % (method,))
ua = {
'bindings_version': version.VERSION,
'lang': 'python',
'publisher': 'shippo',
'httplib': self._client.name,
}
for attr, func in [['lang_version', platform.python_version],
['platform', platform.platform],
['uname', lambda: ' '.join(platform.uname())]]:
try:
val = func()
except Exception, e:
val = "!! %s" % (e,)
ua[attr] = val
headers = {
'X-Shippo-Client-User-Agent': util.json.dumps(ua),
'User-Agent': 'Shippo/v1 PythonBindings/%s' % (version.VERSION,),
'Authorization': 'Bearer %s' % (my_auth,)
}
if api_version is not None:
headers['Shippo-Version'] = api_version
rbody, rcode = self._client.request(
method, abs_url, headers, post_data)
util.logger.info(
'API request to %s returned (response code, response body) of '
'(%d, %r)',
abs_url, rcode, rbody)
return rbody, rcode, my_auth
def interpret_response(self, rbody, rcode):
try:
if hasattr(rbody, 'decode'):
rbody = rbody.decode('utf-8')
resp = util.json.loads(rbody)
except Exception:
raise error.APIError(
"Invalid response body from API: %s "
"(HTTP response code was %d)" % (rbody, rcode),
rbody, rcode)
if not (200 <= rcode < 300):
self.handle_api_error(rbody, rcode, resp)
return resp
def _check_ssl_cert(self):
"""Preflight the SSL certificate presented by the backend.
This isn't 100% bulletproof, in that we're not actually validating the
transport used to communicate with Shippo, merely that the first
attempt to does not use a revoked certificate.
Unfortunately the interface to OpenSSL doesn't make it easy to check
the certificate before sending potentially sensitive data on the wire.
This approach raises the bar for an attacker significantly."""
from shippo import verify_ssl_certs
if verify_ssl_certs and not self._CERTIFICATE_VERIFIED:
uri = urlparse.urlparse(shippo.api_base)
try:
certificate = ssl.get_server_certificate(
(uri.hostname, uri.port or 443))
der_cert = ssl.PEM_cert_to_DER_cert(certificate)
except socket.error, e:
raise error.APIConnectionError(e)
except TypeError:
# The Google App Engine development server blocks the C socket
# module which causes a type error when using the SSL library
if ('APPENGINE_RUNTIME' in os.environ and
'Dev' in os.environ.get('SERVER_SOFTWARE', '')):
self._CERTIFICATE_VERIFIED = True
warnings.warn(
'We were unable to verify Shippo\'s SSL certificate '
'due to a bug in the Google App Engine development '
'server. Please alert us immediately at '
'support@shippo.com if this message appears in your '
'production logs.')
return
else:
raise
self._CERTIFICATE_VERIFIED = certificate_blacklist.verify(
uri.hostname, der_cert)
| bosswissam/shippo-python | shippo/api_requestor.py | Python | mit | 7,865 | 0.000127 |
from flask import Blueprint, flash, redirect, render_template, request, url_for,send_from_directory, abort, make_response, send_file, session
from octs.user.models import Course,Task, User, Message, Team,TeamUserRelation, File,Source,Term,TaskTeamRelation, Tag,UserScore
from .forms import CourseForm,TaskForm, FileForm,TaskScoreForm, RejectReasonForm
from octs.database import db
from flask_login import current_user
from octs.extensions import data_uploader
import time
import os,zipfile
from pypinyin import lazy_pinyin
import xlwt
blueprint = Blueprint('teacher', __name__, url_prefix='/teacher',static_folder='../static')
@blueprint.route('/<teacherid>/course/')
def course(teacherid):
teacher = User.query.filter_by(id=teacherid).first()
courseList = teacher.courses
term = Term.query.order_by(Term.id.desc()).first()
return render_template('teacher/course.html', list=courseList,term=term)
@blueprint.route('/<courseid>/task/<taskid>')
def task_detail(courseid,taskid):
taskList = Task.query.filter_by(id=taskid).all()
return render_template('teacher/taskdetail.html',list=taskList,courseid=courseid)
@blueprint.route('/<teacherid>/course/edit/<id>',methods=['GET','POST'])
def course_edit(teacherid, id):
course = Course.query.filter_by(id=id).first()
form = CourseForm()
if form.validate_on_submit():
course.course_introduction = form.course_introduction.data
course.course_outline=form.course_outline.data
userlist=User.query.all()
for user in userlist:
user.team_min=form.low_member.data
user.team_max=form.high_member.data
db.session.add(user)
db.session.add(course)
db.session.commit()
return redirect(url_for('teacher.course', teacherid=teacherid))
form.coursename.data=course.name
form.credit.data=course.credit
form.location.data=course.location
form.start_time.data=course.start_time
form.course_introduction.data=course.course_introduction
form.course_outline.data=course.course_outline
user=User.query.filter(User.id==teacherid).first()
form.low_member.data=user.team_min
form.high_member.data=user.team_max
return render_template('teacher/course_edit.html',form=form)
@blueprint.route('/course/student/<id>')
def student(id):
course=Course.query.filter_by(id=id).first()
studentList = course.users
return render_template('teacher/student.html',list=studentList)
@blueprint.route('/mainpage/')
def home():
return render_template('teacher/mainpage.html')
@blueprint.route('/<courseid>/task')
def task(courseid):
taskList = Task.query.filter_by(course_id=courseid).all()
return render_template('teacher/task.html',list = taskList, courseid=courseid)
@blueprint.route('/<courseid>/task/add',methods = ['GET','POST'])
def add(courseid):
form = TaskForm()
if form.validate_on_submit():
task = Task()
task.name = form.taskname.data
task.start_time = form.starttime.data
task.end_time = form.endtime.data
task.submit_num = form.subnum.data
task.weight = form.weight.data
task.teacher = current_user.name
task.content = form.content.data
course = Course.query.filter_by(id=courseid).first()
course.tasks.append(task)
teams = course.teams
for team in teams:
ttr = TaskTeamRelation()
ttr.team = team
ttr.task = task
db.session.add(ttr)
db.session.add(task)
db.session.add(course)
db.session.commit()
return redirect(url_for('teacher.task', courseid=courseid))
return render_template('teacher/add.html',form=form, courseid=courseid)
@blueprint.route('/<courseid>/task/edit/<userid>/<id>',methods = ['GET','POST'])
def task_edit(courseid, userid,id):
form = TaskForm()
task = Task.query.filter_by(id = id).first()
if form.validate_on_submit():
flag = True
task.name = form.taskname.data
task.start_time = form.starttime.data
task.end_time = form.endtime.data
task.content = form.content.data
task.submit_num = form.subnum.data
task.weight = form.weight.data
db.session.add(task)
db.session.commit()
return redirect(url_for('teacher.task', courseid=courseid))
form.taskname.data = task.name
form.starttime.data = task.start_time
form.endtime.data = task.end_time
form.content.data = task.content
form.subnum.data = task.submit_num
form.weight.data = task.weight
return render_template('teacher/edit.html',form = form, courseid=courseid, taskid=id)
@blueprint.route('/<courseid>/task/delete/<taskid>',methods=['GET','POST'])
def delete(courseid, taskid):
file_records= File.query.filter_by(task_id=taskid).all()
for file_record in file_records:
os.remove(file_record.path)
db.session.delete(file_record)
task = Task.query.filter_by(id=taskid).first()
ttrs = TaskTeamRelation.query.filter_by(task_id=task.id).all()
for ttr in ttrs:
db.session.delete(ttr)
db.session.delete(task)
db.session.commit()
flash('删除成功')
return redirect(url_for('teacher.task', courseid=courseid))
@blueprint.route('/team',methods=['GET', 'POST'])
def team():
teamlist = Team.query.join(TeamUserRelation, TeamUserRelation.team_id == Team.id).filter(
TeamUserRelation.team_id == Team.id).filter(TeamUserRelation.is_master == True).join(
User, TeamUserRelation.user_id == User.id).filter(TeamUserRelation.user_id == User.id).add_columns(
Team.name, User.username, Team.status, Team.id, User.user_id, User.in_team)
return render_template('teacher/team.html',list=teamlist)
@blueprint.route('/task/score<taskid>/download')
def score_download(taskid):
teamidList = TaskTeamRelation.query.filter_by(task_id=taskid).all()
teams = []
for teamid in teamidList:
team = Team.query.filter_by(id=teamid.team_id).first()
teams.append(team)
task = Task.query.filter_by(id=taskid).first()
book = xlwt.Workbook()
alignment = xlwt.Alignment() # Create Alignment
alignment.horz = xlwt.Alignment.HORZ_CENTER # May be: HORZ_GENERAL, HORZ_LEFT, HORZ_CENTER, HORZ_RIGHT, HORZ_FILLED, HORZ_JUSTIFIED, HORZ_CENTER_ACROSS_SEL, HORZ_DISTRIBUTED
alignment.vert = xlwt.Alignment.VERT_CENTER # May be: VERT_TOP, VERT_CENTER, VERT_BOTTOM, VERT_JUSTIFIED, VERT_DISTRIBUTED
style = xlwt.XFStyle() # Create Style
style.alignment = alignment # Add Alignment to Style
sheet1 = book.add_sheet('本次作业信息('+task.name+')',cell_overwrite_ok=True)
row0 = ['团队id','团队名称','作业得分']
for i in range(0,len(row0)):
sheet1.write(0,i,row0[i], style)
row_num =1
for team in teams:
sheet1.write(row_num,0,team.id,style)
sheet1.write(row_num,1,team.name,style)
score = TaskTeamRelation.query.filter(TaskTeamRelation.team_id==team.id).filter(TaskTeamRelation.task_id==taskid).first()
sheet1.write(row_num,2,score.score,style)
row_num=row_num+1
filename = 'score_table_'+ str(time.time()) + '.xls'
book.save(os.path.join(data_uploader.path('',folder='tmp'),filename))
return send_from_directory(data_uploader.path('', folder='tmp'), filename, as_attachment=True)
@blueprint.route('/team/download')
def team_download():
teams = Team.query.filter_by(status=3).all()
book = xlwt.Workbook()
alignment = xlwt.Alignment() # Create Alignment
alignment.horz = xlwt.Alignment.HORZ_CENTER # May be: HORZ_GENERAL, HORZ_LEFT, HORZ_CENTER, HORZ_RIGHT, HORZ_FILLED, HORZ_JUSTIFIED, HORZ_CENTER_ACROSS_SEL, HORZ_DISTRIBUTED
alignment.vert = xlwt.Alignment.VERT_CENTER # May be: VERT_TOP, VERT_CENTER, VERT_BOTTOM, VERT_JUSTIFIED, VERT_DISTRIBUTED
style = xlwt.XFStyle() # Create Style
style.alignment = alignment # Add Alignment to Style
sheet1 = book.add_sheet('团队信息', cell_overwrite_ok=True)
row0 = ['团队id', '团队名称', '姓名', '学号', '性别', 'Master']
for i in range(0, len(row0)):
sheet1.write(0, i, row0[i])
row_num = 1
for team in teams:
turs = TeamUserRelation.query.filter_by(team_id=team.id).all()
turs_length = len(turs)
sheet1.write_merge(row_num, row_num + turs_length - 1, 0, 0, team.id, style)
sheet1.write_merge(row_num, row_num + turs_length - 1, 1, 1, team.name, style)
for i in range(turs_length):
if turs[i].is_accepted:
sheet1.write(row_num+i, 2, turs[i].user.name)
sheet1.write(row_num + i, 3, turs[i].user.user_id)
gender = '男' if turs[i].user.gender==False else '女'
sheet1.write(row_num + i, 4, gender)
if turs[i].is_master == True:
sheet1.write(row_num + i, 5, '√')
row_num = row_num + turs_length
filename = 'team_table_' + str(time.time()) + '.xls'
book.save(os.path.join(data_uploader.path('', folder='tmp'), filename))
return send_from_directory(data_uploader.path('', folder='tmp'), filename, as_attachment=True)
@blueprint.route('/team/permit/<teacherid>/<teamid>')
def permit(teacherid,teamid):
team=Team.query.filter(Team.id==teamid).first()
team.status=3
db.session.add(team)
db.session.commit()
stulist=TeamUserRelation.query.filter(TeamUserRelation.team_id==teamid).filter(TeamUserRelation.is_accepted==True).all()
for stu in stulist:
Message.sendMessage(teacherid,stu.user_id,'提交团队申请已通过')
flash('已通过该团队申请!')
return redirect(url_for('teacher.team'))
@blueprint.route('/team/rejectreason/<teacherid>/<teamid>',methods=['GET','POST'])
def rejectreason(teacherid,teamid):
form = RejectReasonForm()
if form.validate_on_submit():
reason = form.content.data
teamlist = TeamUserRelation.query.filter(TeamUserRelation.team_id == teamid).filter(TeamUserRelation.is_accepted == True).all()
for user in teamlist:
Message.sendMessage(teacherid,user.user_id,'团队申请已驳回:'+reason)
return redirect(url_for('teacher.reject',teacherid = teacherid,teamid = teamid))
return render_template('teacher/reject_reason.html',teacherid = teacherid,teamid = teamid,form=form)
@blueprint.route('/team/reject/<teacherid>/<teamid>')
def reject(teacherid,teamid):
team=Team.query.filter(Team.id==teamid).first()
teamuser=TeamUserRelation.query.filter(TeamUserRelation.team_id==teamid).all()
for stu in teamuser:
user=User.query.filter(User.id==stu.user_id).first()
user.in_team=False
#Message.sendMessage(teacherid,user.id,'提交申请已被驳回')
db.session.add(user)
db.session.delete(stu)
db.session.delete(team)
db.session.commit()
flash('已驳回该团队申请!')
return redirect(url_for('teacher.team'))
@blueprint.route('team/detail/<teamid>')
def team_detail(teamid):
teamlist=Team.query.filter(Team.id==teamid).join(TeamUserRelation,TeamUserRelation.team_id==Team.id).join(
User,User.id==TeamUserRelation.user_id).add_columns(User.name,User.gender,User.user_id).all()
return render_template('teacher/teamdetail.html',list=teamlist)
@blueprint.route('/team/adjustion/adjust',methods=['GET', 'POST'])
def to_adjust():
teamlist1=Team.query.join(TeamUserRelation,TeamUserRelation.team_id==Team.id).filter(Team.status==1).filter(
TeamUserRelation.is_master==True).join(User,User.id==TeamUserRelation.user_id).add_columns(
Team.name,Team.status,User.username,Team.id).all()
teamlist2 = Team.query.join(TeamUserRelation,TeamUserRelation.team_id==Team.id).filter(Team.status==3).filter(
TeamUserRelation.is_master==True).join(User,User.id==TeamUserRelation.user_id).add_columns(
Team.name,Team.status,User.username,Team.id).all()
teamlist=teamlist1+teamlist2
print(teamlist)
return render_template('teacher/adjust.html',list=teamlist)
@blueprint.route('/team/adjustion/adjust/<teamid>',methods=['GET', 'POST'])
def team_adjust(teamid):
teamlist = Team.query.filter(Team.id == teamid).join(TeamUserRelation, TeamUserRelation.team_id == Team.id).join(
User, User.id == TeamUserRelation.user_id).add_columns(User.name, User.gender, User.user_id,TeamUserRelation.user_id,Team.id).all()
otherteam=Team.query.filter(Team.id!=teamid).filter(Team.status==1).all()
if session.get('deleted_stu') is None:
session['deleted_stu'] = []
translist = session['deleted_stu']
return render_template('teacher/team_adjust.html',list=teamlist,other_team=otherteam,translist=translist)
@blueprint.route('/team/adjustion/<teacherid>/adjust/<teamid>/<userid>',methods=['GET', 'POST'])
def adjust_trans(teacherid,userid,teamid):
teamlist = Team.query.filter(Team.id == teamid).join(TeamUserRelation, TeamUserRelation.team_id == Team.id).join(
User, User.id == TeamUserRelation.user_id).add_columns(User.name, User.gender, User.user_id,
TeamUserRelation.user_id, Team.id).all()
user=User.query.join(TeamUserRelation,TeamUserRelation.user_id==userid).filter(User.id==userid).add_columns(
User.id,User.name,User.gender,TeamUserRelation.is_master).first()
user_dict = {'id':user.id,'name':user.name,'gender':user.gender}
if session.get('deleted_stu') is None:
session['deleted_stu'] = []
translist = session['deleted_stu']
flag=True
for ad_stu in translist:
if(ad_stu['id']==user.id):
flag=False
flash('该学生已在调整名单中!')
if user.is_master==True:
flag=False
flash('该学生是本队组长!不能调整!')
if flag:
userlist=TeamUserRelation.query.filter(TeamUserRelation.user_id==user.id).first()
userlist.is_adjust=True
db.session.add(userlist)
db.session.commit()
translist.append(user_dict)
session['deleted_stu'] = translist
return redirect(url_for('teacher.team_adjust', teacherid=teacherid, teamid=teamid))
@blueprint.route('/team/adjustion/<teacherid>/adjust/<teamid>/add/<userid>',methods=['GET', 'POST'])
def adjust_add(teacherid,userid,teamid):
userlist=TeamUserRelation.query.filter(TeamUserRelation.user_id==userid).first()
if(int(teamid)==int(userlist.team_id)):
flash('该学生已在本团队了!')
else:
userlist.team_id=teamid
userlist.is_adjust=False
db.session.add(userlist)
db.session.commit()
Message.sendMessage(teacherid,userid,'你已经被老师调整至其他组!请注意查看')
flash('已将该学生调整到该团队!')
translist=session['deleted_stu']
for user in translist:
if user['id'] == int(userid):
translist.remove(user)
session['deleted_stu']=translist
return redirect(url_for('teacher.team_adjust', teacherid=teacherid, teamid=teamid))
@blueprint.route('/<courseid>/task/<taskid>/<teacherid>/files', methods=['GET', 'POST'])
def task_files(courseid, taskid,teacherid):
form = FileForm()
file_records = File.query.filter(File.task_id==taskid).filter(File.user_id == teacherid).all()
if form.validate_on_submit():
for file in request.files.getlist('file'):
file_record = File()
file_record.user_id = current_user.id
file_record.task_id = taskid
filename = file.filename
file_record.name = filename
filetype = filename.split('.')[-1]
tmpname = str(current_user.id) + '-' + str(time.time())
file.filename = tmpname + '.' + filetype
file_record.directory = data_uploader.path('', folder='course/'+str(courseid)+'/teacher/tasks/'+str(taskid))
file_record.real_name = file.filename
file_record.path = data_uploader.path(file.filename, folder='course/'+str(courseid)+'/teacher/tasks/'+str(taskid))
data_uploader.save(file, folder='course/'+str(courseid)+'/teacher/tasks/'+str(taskid))
db.session.add(file_record)
db.session.commit()
return redirect(url_for('teacher.task_files', courseid=courseid, taskid=taskid,teacherid = teacherid))
return render_template('teacher/task_tabfile.html',form=form, file_records=file_records, courseid=courseid, taskid=taskid)
@blueprint.route('/<courseid>/task/<taskid>/files/delete/<fileid>/<userid>', methods=['GET', 'POST'])
def task_file_delete(courseid, taskid, fileid,userid):
file_record = File.query.filter_by(id=fileid).first()
os.remove(file_record.path)
db.session.delete(file_record)
db.session.commit()
flash('删除成功')
return redirect(url_for('teacher.task_files', courseid=courseid, taskid=taskid,userid = userid))
@blueprint.route('/<courseid>/task/<taskid>/files/delete/<fileid>', methods=['GET', 'POST'])
def student_task_file_delete(courseid, taskid, fileid):
file_record = File.query.filter_by(id=fileid).first()
os.remove(file_record.path)
db.session.delete(file_record)
db.session.commit()
flash('删除成功')
return redirect(url_for('teacher.student_task', courseid=courseid, taskid=taskid))
@blueprint.route('/<courseid>/task/<taskid>/files/download/<fileid>')
def task_file_download(courseid, taskid, fileid):
file_record = File.query.filter_by(id=fileid).first()
if os.path.isfile(file_record.path):
return send_from_directory(file_record.directory, file_record.real_name, as_attachment=True, attachment_filename='_'.join(lazy_pinyin(file_record.name)))
abort(404)
@blueprint.route('/<courseid>/task/<taskid>/scores',methods=['GET', 'POST'])
def task_give_score(courseid,taskid):
tasklist=Task.query.filter(Task.id==taskid).first()
task_name = Task.query.filter(Task.id == taskid).first()
if time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))<str(tasklist.end_time):
flash('这项作业还未截止!暂时不能批改')
return render_template('teacher/task_score.html',flag=False,courseid=courseid,taskname=task_name)
else:
task_team_list=TaskTeamRelation.query.join(Task,Task.id==TaskTeamRelation.task_id).join(Team,Team.id==TaskTeamRelation.team_id
).filter(TaskTeamRelation.task_id==taskid).add_columns(Team.name,TaskTeamRelation.task_id,TaskTeamRelation.team_id,TaskTeamRelation.score,Task.weight,TaskTeamRelation.submit_num).all()
#print(task_name.name)
return render_template('teacher/task_score.html', flag=True,list=task_team_list,taskname=task_name,courseid=courseid)
@blueprint.route('/<courseid>/task/<taskid>/scores/score/<teamid>/<teacherid>',methods=['GET', 'POST'])
def task_edit_score(courseid,taskid,teamid,teacherid):
taskscore=TaskTeamRelation.query.filter(TaskTeamRelation.task_id==taskid).filter(TaskTeamRelation.team_id==teamid).first()
form = TaskScoreForm()
if form.validate_on_submit():
taskscore.score=form.task_score.data
if int(form.task_score.data)>=100 or int(form.task_score.data)<0:
flash('分数必须在0-100之间')
userlist=TeamUserRelation.query.filter(TeamUserRelation.team_id==teamid).all()
for user in userlist:
Message.sendMessage(teacherid,user.user_id,'批改意见:'+form.content.data)
db.session.add(taskscore)
db.session.commit()
flash('已经提交分数!')
return redirect(url_for('teacher.task_give_score',courseid=courseid,taskid=taskid))
if taskscore.score>=0:
form.task_score.data=taskscore.score
form.content.data=''
return render_template('teacher/set_score.html',form=form,courseid=courseid,taskid=taskid,teamid=teamid)
@blueprint.route('/<courseid>/task<taskid>/scores')
def task_score(courseid,taskid):
teamidList = TaskTeamRelation.query.filter_by(task_id=taskid).all()
teams = []
for teamid in teamidList:
team = Team.query.filter_by(id=teamid.team_id).first()
teams.append(team)
task = Task.query.filter_by(id=taskid).first()
return render_template('teacher/task_one_score.html',teams=teams,task=task,courseid=courseid,taskid=taskid)
@blueprint.route('/<courseid>/task/<taskid>/files',methods = ['GET','POST'])
def student_task(courseid,taskid):
form = FileForm()
course = Course.query.filter_by(id = courseid).first()
users = course.users
masters = []
for user in users:
tur = TeamUserRelation.query.filter(TeamUserRelation.user_id == user.id).filter(TeamUserRelation.is_master == True).first()
if tur is not None:
masters.append(tur)
print(masters)
file_records = []
for master in masters:
user_master = User.query.filter_by(id=master.user_id).first()
file_records.append((master.team_id ,File.query.filter(File.user_id == master.user_id).filter(File.task_id == int(taskid)).all(),user_master.username))
print(file_records)
return render_template('teacher/task_student.html',form = form,file_records=file_records,courseid = courseid,taskid = taskid)
@blueprint.route('/source/<courseid>')
def source(courseid):
form = FileForm()
course = Course.query.filter_by(id=courseid).first()
tags = course.tags
tag_names = {}
file_records = File.query.filter_by(course_id=courseid).all()
user_names = []
for file_record in file_records:
tag = Tag.query.filter_by(id=file_record.tag_id).first()
user = User.query.filter_by(id=file_record.user_id).first()
user_names.append(user.name)
tag_names[file_record.tag_id] = tag.name
return render_template('teacher/source.html', form=form, file_records=file_records,
courseid=courseid, tags=tags, tag_names=tag_names,user_names=user_names, file_num=len(file_records))
@blueprint.route('/source/<courseid>/tag/<tagid>',methods=['GET','POST'])
def source_tag(courseid, tagid):
form = FileForm()
course = Course.query.filter_by(id=courseid).first()
tags = course.tags
user_names = []
file_records = File.query.filter_by(tag_id=tagid).all()
for file_record in file_records:
user = User.query.filter_by(id=file_record.user_id).first()
user_names.append(user.name)
if form.validate_on_submit():
for file in request.files.getlist('file'):
file_record = File()
file_record.user_id = current_user.id
file_record.course_id = courseid
filename = file.filename
file_record.name = filename
filetype = filename.split('.')[-1]
tmpname = str(current_user.id) + '-' + str(time.time())
file.filename = tmpname + '.' + filetype
file_record.directory = data_uploader.path('', folder='course/'+str(courseid)+'/teacher/source')
file_record.real_name = file.filename
file_record.path = data_uploader.path(file.filename, folder='course/'+str(courseid)+'/teacher/source')
data_uploader.save(file, folder='course/'+str(courseid)+'/teacher/source')
file_record.tag_id = tagid
db.session.add(file_record)
db.session.commit()
return redirect(url_for('teacher.source_tag', courseid=courseid, tagid=tagid))
return render_template('teacher/source_tag.html', form=form, file_records=file_records,
courseid=courseid, tags=tags, tagid=tagid,user_names=user_names,file_num=len(file_records))
@blueprint.route('/source/<courseid>/tag/add/<tagname>',methods=['GET','POST'])
def tag_add(courseid, tagname):
course = Course.query.filter_by(id=courseid).first()
tags = course.tags
for tag in tags:
if tag.name==tagname:
flash('标签已存在')
return redirect(url_for('teacher.source', courseid=courseid))
tag = Tag()
tag.name = tagname
course = Course.query.filter_by(id=courseid).first()
course.tags.append(tag)
db.session.add(tag)
db.session.add(course)
db.session.commit()
flash('添加成功')
return redirect(url_for('teacher.source', courseid=courseid))
@blueprint.route('<courseid>/source/files/download/<fileid>')
def source_download(courseid,fileid):
file_record = File.query.filter_by(id=fileid).first()
if os.path.isfile(file_record.path):
return send_from_directory(file_record.directory, file_record.real_name, as_attachment=True,
attachment_filename='_'.join(lazy_pinyin(file_record.name)))
abort(404)
@blueprint.route('<courseid>/source/files/delete/<fileid>')
def source_delete(courseid,fileid):
file_record = File.query.filter_by(id=fileid).first()
os.remove(file_record.path)
db.session.delete(file_record)
db.session.commit()
flash('删除成功')
return redirect(url_for('teacher.source', courseid=courseid))
def zipfolder(foldername,filename):
'''
zip folder foldername and all its subfiles and folders into
a zipfile named filename
'''
zip_download=zipfile.ZipFile(filename,'w',zipfile.ZIP_DEFLATED)
for root,dirs,files in os.walk(foldername):
print(root, dirs, files)
for filename in files:
zip_download.write(os.path.join(root,filename), arcname=os.path.join(os.path.basename(root) ,filename))
zip_download.close()
return zip_download
@blueprint.route('/<courseid>/task/<taskid>/files/download')
def task_file_download_zip(courseid, taskid):
foldername = data_uploader.path('', folder='course/'+str(courseid)+'/teacher/tasks/'+str(taskid))
filename = os.path.join(data_uploader.path('', folder='tmp'), 'taskfiles.zip')
zip_download = zipfolder(foldername, filename)
return send_file(filename, as_attachment=True)
@blueprint.route('/<courseid>/task/<taskid>/studenttask/files/download')
def student_task_file_download_zip(courseid, taskid):
foldername = data_uploader.path('', folder='course/'+str(courseid)+'/student/tasks/'+str(taskid))
filename = os.path.join(data_uploader.path('', folder='tmp'), 'taskfiles.zip')
zip_download = zipfolder(foldername, filename)
return send_file(filename, as_attachment=True)
@blueprint.route('/source/<courseid>/files/download')
def source_file_download_zip(courseid):
foldername = data_uploader.path('',folder='course/'+str(courseid)+'/teacher/source')
filename = os.path.join(data_uploader.path('',folder='tmp'),'sourcefiles.zip')
zip_download = zipfolder(foldername,filename)
return send_file(filename,as_attachment=True)
@blueprint.route('/<courseid>/files/download')
def former_task_file_download_zip(courseid):
foldername = data_uploader.path('', folder='course/'+str(courseid)+'/student')
filename = os.path.join(data_uploader.path('', folder='tmp'), 'taskfiles.zip')
zip_download = zipfolder(foldername, filename)
return send_file(filename, as_attachment=True)
@blueprint.route('/<courseid>/task/submit')
def multi_check(courseid):
tasks = Task.query.filter_by(course_id = courseid).all()
ttrs_all = []
for task in tasks:
##team = Team.query.filter_by(course_id = task.course_id).first()
ttrs = TaskTeamRelation.query.filter_by(task_id = task.id).all()
if ttrs is not None:
ttrs_all.extend(ttrs)
teams = Team.query.filter_by(course_id = courseid).all()
return render_template('teacher/multi_check.html',ttrs_all = ttrs_all,courseid = courseid,tasks = tasks,teams = teams)
@blueprint.route('/course/calcu_score')
def calcu_score():
teams = Team.query.filter_by(status=3).all()
team_num = len(teams)
for i in range(0, team_num):
teamtask = TaskTeamRelation.query.filter_by(team_id=teams[i].id).all()
sum = 0
for task in teamtask:
weight = Task.query.filter_by(id=task.task_id).first()
sum += weight.weight * task.score
team_for_score = Team.query.filter_by(id=teams[i].id).first()
team_for_score.score = sum
db.session.add(team_for_score)
db.session.commit()
userList = TeamUserRelation.query.filter_by(team_id=teams[i].id).all()
for user in userList:
print(user.user_id)
user_for_score = UserScore.query.filter_by(user_id=user.user_id).first()
user_for_score.score = sum * user_for_score.grade
db.session.add(user_for_score)
db.session.commit()
return redirect(url_for('teacher.course',teacherid=current_user.id))
@blueprint.route('/course/grade_download')
def grade_download():
teams = Team.query.filter_by(status=3).all()
book = xlwt.Workbook()
alignment = xlwt.Alignment() # Create Alignment
alignment.horz = xlwt.Alignment.HORZ_CENTER # May be: HORZ_GENERAL, HORZ_LEFT, HORZ_CENTER, HORZ_RIGHT, HORZ_FILLED, HORZ_JUSTIFIED, HORZ_CENTER_ACROSS_SEL, HORZ_DISTRIBUTED
alignment.vert = xlwt.Alignment.VERT_CENTER # May be: VERT_TOP, VERT_CENTER, VERT_BOTTOM, VERT_JUSTIFIED, VERT_DISTRIBUTED
style = xlwt.XFStyle() # Create Style
style.alignment = alignment # Add Alignment to Style
sheet1 = book.add_sheet('团队成绩', cell_overwrite_ok=True)
row0 = ['团队id', '团队名称','成绩']
for i in range(0, len(row0)):
sheet1.write(0, i, row0[i])
row_num = 1
team_num = len(teams)
for i in range(0,team_num):
sheet1.write(i + 1, 0, teams[i].id)
sheet1.write(i+1,1,teams[i].name)
sheet1.write(i+1,2,teams[i].score)
filename = 'team_grade_table_' + str(time.time()) + '.xls'
book.save(os.path.join(data_uploader.path('', folder='tmp'), filename))
return send_from_directory(data_uploader.path('', folder='tmp'), filename, as_attachment=True)
@blueprint.route('/course/grade_download_stu')
def grade_download_stu():
students = UserScore.query.all()
book = xlwt.Workbook()
alignment = xlwt.Alignment() # Create Alignment
alignment.horz = xlwt.Alignment.HORZ_CENTER # May be: HORZ_GENERAL, HORZ_LEFT, HORZ_CENTER, HORZ_RIGHT, HORZ_FILLED, HORZ_JUSTIFIED, HORZ_CENTER_ACROSS_SEL, HORZ_DISTRIBUTED
alignment.vert = xlwt.Alignment.VERT_CENTER # May be: VERT_TOP, VERT_CENTER, VERT_BOTTOM, VERT_JUSTIFIED, VERT_DISTRIBUTED
style = xlwt.XFStyle() # Create Style
style.alignment = alignment # Add Alignment to Style
sheet1 = book.add_sheet('个人成绩', cell_overwrite_ok=True)
row0 = ['学生id', '姓名','个人成绩']
for i in range(0, len(row0)):
sheet1.write(0, i, row0[i])
row_num = 1
stu_num = len(students)
for i in range(0,stu_num):
username = User.query.filter_by(id=students[i].user_id).first()
stuid = User.query.filter_by(id=students[i].user_id).first()
print(username)
sheet1.write(i+1,0,stuid.user_id)
sheet1.write(i+1,1,username.name)
sheet1.write(i+1,2,students[i].score)
filename = 'student_grade_table_' + str(time.time()) + '.xls'
book.save(os.path.join(data_uploader.path('', folder='tmp'), filename))
return send_from_directory(data_uploader.path('', folder='tmp'), filename, as_attachment=True)
@blueprint.route('/<courseid>/task/submit/download')
def task_check_download(courseid):
book = xlwt.Workbook()
tasklist = Task.query.filter_by(course_id=courseid).all()
ttrs_all = []
for task in tasklist:
ttrs = TaskTeamRelation.query.filter_by(task_id = task.id).all()
if ttrs is not None:
ttrs_all.extend(ttrs)
teamlist = Team.query.filter_by(course_id = courseid).all()
##tasks = Task.query.filter_by(course_id=courseid).all()
alignment = xlwt.Alignment() # Create Alignment
alignment.horz = xlwt.Alignment.HORZ_CENTER # May be: HORZ_GENERAL, HORZ_LEFT, HORZ_CENTER, HORZ_RIGHT, HORZ_FILLED, HORZ_JUSTIFIED, HORZ_CENTER_ACROSS_SEL, HORZ_DISTRIBUTED
alignment.vert = xlwt.Alignment.VERT_CENTER # May be: VERT_TOP, VERT_CENTER, VERT_BOTTOM, VERT_JUSTIFIED, VERT_DISTRIBUTED
style = xlwt.XFStyle() # Create Style
style.alignment = alignment # Add Alignment to Style
sheet1 = book.add_sheet('作业信息', cell_overwrite_ok=True)
row0 = ['团队id', '团队名称']
for task in tasklist:
row0.append(task.name)
for i in range(0, len(row0)):
sheet1.write(0, i, row0[i])
row_num = 1
for team in teamlist:
##turs = TeamUserRelation.query.filter_by(team_id=team.id).all()
i = 2
sheet1.write(row_num, 0 , team.id)
sheet1.write(row_num, 1, team.name)
for ttrs in ttrs_all:
if ttrs.team_id == team.id:
sheet1.write(row_num, i , ttrs.score)
i = i+1
##row_num = row_num + turs_length
row_num = row_num + 1
filename = 'task_check_table_' + str(time.time()) + '.xls'
book.save(os.path.join(data_uploader.path('', folder='tmp'), filename))
return send_from_directory(data_uploader.path('', folder='tmp'), filename, as_attachment=True)
@blueprint.route('/course/grade')
def grade():
students = UserScore.query.all()
stu_num = len(students)
username=[]
stuid=[]
for i in range(0, stu_num):
stuname=User.query.filter_by(id=students[i].user_id).first()
username.append(stuname.name)
stuid.append(stuname.user_id)
teams = Team.query.filter_by(status=3).all()
team_num = len(teams)
for i in range(0, team_num):
teamtask = TaskTeamRelation.query.filter_by(team_id=teams[i].id).all()
sum = 0
for task in teamtask:
weight = Task.query.filter_by(id=task.task_id).first()
sum += round(weight.weight * task.score,1)
team_for_score = Team.query.filter_by(id=teams[i].id).first()
team_for_score.score = sum
db.session.add(team_for_score)
db.session.commit()
userList = TeamUserRelation.query.filter_by(team_id=teams[i].id).all()
for user in userList:
print(user.user_id)
user_for_score = UserScore.query.filter_by(user_id=user.user_id).first()
user_for_score.score = round(sum * user_for_score.grade +user_for_score.personal_grade,1)
db.session.add(user_for_score)
db.session.commit()
return render_template('teacher/grade.html',teamList=teams,
stuList=students,username=username,stu_num=stu_num,stuid=stuid)
| kaiueo/octs | octs/teacher/views.py | Python | bsd-3-clause | 34,607 | 0.014882 |
from django.conf.urls import patterns, include, url
from django.conf import settings
urlpatterns = patterns('',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', \
{'document_root': settings.MEDIA_ROOT, 'show_indexes':True}),
url(r'', include('controller.urls')),
url('^.*/$', 'controller.views.pxlogin'),
)
| oskgeek/xmccamp | xmccamp/xmccamp/urls.py | Python | mit | 338 | 0.011834 |
#! /usr/bin/python
import re
input = raw_input()
result = re.findall(r"[0-9]",input)
print(result)
| JesusAMR/ProgramasUNI | testing.py | Python | gpl-3.0 | 101 | 0.009901 |
#
# Copyright 2018 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import json
import os
from ansible.module_utils.k8s.common import OpenShiftAnsibleModuleMixin, DateTimeEncoder, remove_secret_data, to_snake
from ansible.module_utils.k8s.helper import AUTH_ARG_SPEC
try:
from openshift.helper.kubernetes import KubernetesObjectHelper
from openshift.helper.exceptions import KubernetesException
HAS_K8S_MODULE_HELPER = True
except ImportError as exc:
HAS_K8S_MODULE_HELPER = False
try:
import yaml
HAS_YAML = True
except ImportError:
HAS_YAML = False
class KubernetesLookup(object):
def __init__(self):
if not HAS_K8S_MODULE_HELPER:
raise Exception(
"Requires the OpenShift Python client. Try `pip install openshift`"
)
if not HAS_YAML:
raise Exception(
"Requires PyYAML. Try `pip install PyYAML`"
)
self.kind = None
self.name = None
self.namespace = None
self.api_version = None
self.label_selector = None
self.field_selector = None
self.include_uninitialized = None
self.resource_definition = None
self.helper = None
self.connection = {}
def run(self, terms, variables=None, **kwargs):
self.kind = kwargs.get('kind')
self.name = kwargs.get('resource_name')
self.namespace = kwargs.get('namespace')
self.api_version = kwargs.get('api_version', 'v1')
self.label_selector = kwargs.get('label_selector')
self.field_selector = kwargs.get('field_selector')
self.include_uninitialized = kwargs.get('include_uninitialized', False)
resource_definition = kwargs.get('resource_definition')
src = kwargs.get('src')
if src:
resource_definition = self.load_resource_definition(src)
if resource_definition:
self.params_from_resource_definition(resource_definition)
if not self.kind:
raise Exception(
"Error: no Kind specified. Use the 'kind' parameter, or provide an object YAML configuration "
"using the 'resource_definition' parameter."
)
self.kind = to_snake(self.kind)
self.helper = self.get_helper(self.api_version, self.kind)
auth_args = ('host', 'api_key', 'kubeconfig', 'context', 'username', 'password',
'cert_file', 'key_file', 'ssl_ca_cert', 'verify_ssl')
for arg in AUTH_ARG_SPEC:
if arg in auth_args and kwargs.get(arg) is not None:
self.connection[arg] = kwargs.get(arg)
try:
self.helper.set_client_config(**self.connection)
except Exception as exc:
raise Exception(
"Client authentication failed: {0}".format(exc.message)
)
if self.name:
return self.get_object()
return self.list_objects()
def get_helper(self, api_version, kind):
try:
helper = KubernetesObjectHelper(api_version=api_version, kind=kind, debug=False)
helper.get_model(api_version, kind)
return helper
except KubernetesException as exc:
raise Exception("Error initializing helper: {0}".format(exc.message))
def load_resource_definition(self, src):
""" Load the requested src path """
path = os.path.normpath(src)
if not os.path.exists(path):
raise Exception("Error accessing {0}. Does the file exist?".format(path))
try:
result = yaml.safe_load(open(path, 'r'))
except (IOError, yaml.YAMLError) as exc:
raise Exception("Error loading resource_definition: {0}".format(exc))
return result
def params_from_resource_definition(self, defn):
if defn.get('apiVersion'):
self.api_version = defn['apiVersion']
if defn.get('kind'):
self.kind = defn['kind']
if defn.get('metadata', {}).get('name'):
self.name = defn['metadata']['name']
if defn.get('metadata', {}).get('namespace'):
self.namespace = defn['metadata']['namespace']
def get_object(self):
""" Fetch a named object """
try:
result = self.helper.get_object(self.name, self.namespace)
except KubernetesException as exc:
raise Exception('Failed to retrieve requested object: {0}'.format(exc.message))
response = []
if result is not None:
# Convert Datetime objects to ISO format
result_json = json.loads(json.dumps(result.to_dict(), cls=DateTimeEncoder))
if self.kind == 'secret':
remove_secret_data(result_json)
response.append(result_json)
return response
def list_objects(self):
""" Query for a set of objects """
if self.namespace:
method_name = 'list_namespaced_{0}'.format(self.kind)
try:
method = self.helper.lookup_method(method_name=method_name)
except KubernetesException:
raise Exception(
"Failed to find method {0} for API {1}".format(method_name, self.api_version)
)
else:
method_name = 'list_{0}_for_all_namespaces'.format(self.kind)
try:
method = self.helper.lookup_method(method_name=method_name)
except KubernetesException:
method_name = 'list_{0}'.format(self.kind)
try:
method = self.helper.lookup_method(method_name=method_name)
except KubernetesException:
raise Exception(
"Failed to find method for API {0} and Kind {1}".format(self.api_version, self.kind)
)
params = {}
if self.field_selector:
params['field_selector'] = self.field_selector
if self.label_selector:
params['label_selector'] = self.label_selector
params['include_uninitialized'] = self.include_uninitialized
if self.namespace:
try:
result = method(self.namespace, **params)
except KubernetesException as exc:
raise Exception(exc.message)
else:
try:
result = method(**params)
except KubernetesException as exc:
raise Exception(exc.message)
response = []
if result is not None:
# Convert Datetime objects to ISO format
result_json = json.loads(json.dumps(result.to_dict(), cls=DateTimeEncoder))
response = result_json.get('items', [])
if self.kind == 'secret':
for item in response:
remove_secret_data(item)
return response
class OpenShiftLookup(OpenShiftAnsibleModuleMixin, KubernetesLookup):
pass
| KohlsTechnology/ansible | lib/ansible/module_utils/k8s/lookup.py | Python | gpl-3.0 | 7,677 | 0.001693 |
from marshmallow import Schema, fields, post_load
from floyd.model.base import BaseModel
class ExperimentConfigSchema(Schema):
name = fields.Str()
family_id = fields.Str()
@post_load
def make_access_token(self, data):
return ExperimentConfig(**data)
class ExperimentConfig(BaseModel):
schema = ExperimentConfigSchema(strict=True)
def __init__(self,
name,
family_id=None):
self.name = name
self.family_id = family_id
| mckayward/floyd-cli | floyd/model/experiment_config.py | Python | apache-2.0 | 507 | 0 |
'''
Created on Dec 21, 2013
@author: Chris
'''
import sys
import hashlib
from time import time as _time
from time import sleep as _sleep
from argparse import ArgumentParser
from gooey import Gooey
@Gooey
def main():
desc = "Mock application to test Gooey's functionality"
file_help_msg = "Name of the file you want to read"
my_cool_parser = ArgumentParser(description=desc)
my_cool_parser.add_argument("filename", help=file_help_msg) # positional
my_cool_parser.add_argument("outfile", help="Name of the file where you'll save the output") # positional
my_cool_parser.add_argument('-c', '--countdown', default=10, type=int, help='sets the time to count down from')
my_cool_parser.add_argument("-s", "--showtime", action="store_true", help="display the countdown timer")
my_cool_parser.add_argument("-d", "--delay", action="store_true", help="Delay execution for a bit")
my_cool_parser.add_argument('--verbose', '-v', action='count')
my_cool_parser.add_argument("-o", "--obfuscate", action="store_true", help="obfuscate the countdown timer!")
my_cool_parser.add_argument('-r', '--recursive', choices=['yes', 'no'], help='Recurse into subfolders')
my_cool_parser.add_argument("-w", "--writelog", default="No, NOT whatevs", help="write log to some file or something")
my_cool_parser.add_argument("-e", "--expandAll", action="store_true", help="expand all processes")
print 'inside of main(), my_cool_parser =', my_cool_parser
args = my_cool_parser.parse_args()
print sys.argv
print args.countdown
print args.showtime
start_time = _time()
print 'Counting down from %s' % args.countdown
while _time() - start_time < args.countdown:
if args.showtime:
print 'printing message at: %s' % _time()
else:
print 'printing message at: %s' % hashlib.md5(str(_time())).hexdigest()
_sleep(.5)
print 'Finished running the program. Byeeeeesss!'
# raise ValueError("Something has gone wrong! AHHHHHHHHHHH")
if __name__ == '__main__':
# sys.argv.extend('asdf -c 5 -s'.split())
# print sys.argv
main()
| garrettcap/Bulletproof-Backup | gooey/mockapplications/mockapp.py | Python | gpl-2.0 | 2,121 | 0.016973 |
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the --help flag of Google C++ Testing Framework.
SYNOPSIS
gtest_help_test.py --build_dir=BUILD/DIR
# where BUILD/DIR contains the built gtest_help_test_ file.
gtest_help_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
re.sub('^--', '/', LIST_TESTS_FLAG),
re.sub('_', '-', LIST_TESTS_FLAG)]
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
[PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
FLAG_PREFIX + r'list_tests.*' +
FLAG_PREFIX + r'filter=.*' +
FLAG_PREFIX + r'also_run_disabled_tests.*' +
FLAG_PREFIX + r'repeat=.*' +
FLAG_PREFIX + r'shuffle.*' +
FLAG_PREFIX + r'random_seed=.*' +
FLAG_PREFIX + r'color=.*' +
FLAG_PREFIX + r'print_time.*' +
FLAG_PREFIX + r'output=.*' +
FLAG_PREFIX + r'break_on_failure.*' +
FLAG_PREFIX + r'throw_on_failure.*',
re.DOTALL)
def RunWithFlag(flag):
"""Runs gtest_help_test_ with the given flag.
Returns:
the exit code and the text output as a tuple.
Args:
flag: the command-line flag to pass to gtest_help_test_, or None.
"""
if flag is None:
command = [PROGRAM_PATH]
else:
command = [PROGRAM_PATH, flag]
child = gtest_test_utils.Subprocess(command)
return child.exit_code, child.output
class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
"""Verifies correct behavior when help flag is specified.
The right message must be printed and the tests must
skipped when the given flag is specified.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_WINDOWS:
self.assert_(CATCH_EXCEPTIONS_FLAG in output, output)
else:
self.assert_(CATCH_EXCEPTIONS_FLAG not in output, output)
if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
else:
self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
def TestNonHelpFlag(self, flag):
"""Verifies correct behavior when no help flag is specified.
Verifies that when no help flag is specified, the tests are run
and the help message is not printed.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assert_(exit_code != 0)
self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
def testPrintsHelpWithShortFlag(self):
self.TestHelpFlag('-h')
def testPrintsHelpWithQuestionFlag(self):
self.TestHelpFlag('-?')
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
self.TestHelpFlag(UNKNOWN_FLAG)
def testPrintsHelpWithIncorrectFlagStyle(self):
for incorrect_flag in INCORRECT_FLAG_VARIANTS:
self.TestHelpFlag(incorrect_flag)
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
a flag starting with Google Test prefix and 'internal_' is supplied."""
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
gtest_test_utils.Main()
| sfiera/googletest | test/gtest_help_test.py | Python | bsd-3-clause | 5,754 | 0.003823 |
urlpatterns = []
handler404 = 'csrf_tests.views.csrf_token_error_handler'
| nesdis/djongo | tests/django_tests/tests/v22/tests/csrf_tests/csrf_token_error_handler_urls.py | Python | agpl-3.0 | 75 | 0 |
# -*- coding: utf-8 -*-
#
# Optcoretech documentation build configuration file, created by
# sphinx-quickstart on Mon Sep 1 14:23:01 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Optcoretech'
copyright = u'2014, Sheesh Mohsin'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Optcoretechdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Optcoretech.tex', u'Optcoretech Documentation',
u'Sheesh Mohsin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'optcoretech', u'Optcoretech Documentation',
[u'Sheesh Mohsin'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Optcoretech', u'Optcoretech Documentation',
u'Sheesh Mohsin', 'Optcoretech', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| nishantsingla/optcoretech | docs/conf.py | Python | mit | 7,786 | 0.007449 |
import sys
from csv_data import Data
from csv_utilities import readIntegerCSV
from csv_utilities import convertToZeroOne
from statistics import mean
from math import log
def dotProduct( arr1, arr2 ):
return sum( [ arr1[idx] * arr2[idx] for idx in range( len( arr1 ) ) ] )
#
def dataSubsetWithY( data, y ):
return [ row for row in data.rows if row.Y == y ]
#
def probabilityXisZero( data, idx, beta ):
return ( 1 - probabilityXisOne( data, idx, beta ) )
#
def probabilityXisOne ( data, idx, beta ):
return ( mean( ( [ 1, 0 ] * ( beta - 1 ) ) + [ row.X[ idx ] for row in data ] ) )
#
def probabilityXY( data, x, idx, y, beta ):
return ( probabilityXisOne( dataSubsetWithY( data, y ), idx, beta ) if x == 1 else probabilityXisZero( dataSubsetWithY( data, y ), idx, beta ) )
#
def probabilityYisZero( data, beta ):
return ( 1 - probabilityYisOne( data, beta ) )
#
def probabilityYisOne ( data, beta ):
return ( mean( ( [ 1, 0 ] * ( beta - 1 ) ) + [ row.Y for row in data.rows ] ) )
#
def findBias( data, beta ):
return ( log( probabilityYisZero( data, beta ) / probabilityYisOne( data, beta ), 2 )
+ sum( [ log( probabilityXY( data, 0, idx, 1, beta ) / probabilityXY( data, 0, idx, 0, beta ), 2 ) for idx in range( data.attributeCount ) ] ) )
#
def findWeights( data, beta ):
return ( [ log( probabilityXY( data, 1, idx, 1, beta ) / probabilityXY( data, 1, idx, 0, beta ), 2 )
- log( probabilityXY( data, 0, idx, 1, beta ) / probabilityXY( data, 0, idx, 0, beta ), 2 ) for idx in range( data.attributeCount ) ] )
#
def rowPrediction( X, W, b ):
return ( 1 if ( dotProduct( X, W ) + b >= 0 ) else 0 )
#
def getResults( testing_data, W, b ):
return ( len( [ 1 for row in testing_data.rows if row.Y == rowPrediction( row.X, W, b ) ] ) / len( testing_data.rows ) )
#
def printModel( model_stream, attrs, W, b ):
model_stream.write( "{}\n".format( round( b, 4 ) ) )
for idx, attr in enumerate( attrs ):
model_stream.write( "{:16}\t{}\n".format( attr, round( W[ idx ], 4 ) ) )
def main( argv ):
try:
training_data = Data( argv[ 0 ], readIntegerCSV, convertToZeroOne )
testing_data = Data( argv[ 1 ], readIntegerCSV, convertToZeroOne )
beta = int ( argv[ 2 ] )
model = open( argv[ 3 ], 'w+' )
b = findBias( training_data, beta )
W = findWeights( training_data, beta )
rez = getResults( testing_data, W, b )
print( rez )
printModel( model, training_data.attributes, W, b )
except IndexError:
print( "ERROR: \"python3 nb.py <train> <test> <beta> <model>\"" )
finally:
model.close()
#
if __name__=='__main__':
main( sys.argv[ 1: ] )
| CKPalk/MachineLearning | Assignment3/Naive_Bayes/nb.py | Python | mit | 2,586 | 0.095514 |
import os
from PodSix.Resource import *
from PodSix.Concurrent import Concurrent
class Progress(Concurrent):
def __init__(self):
Concurrent.__init__(self)
self.sprite = Image(file=os.path.join("resources", "progress.png"))
self.showing = False
self.maximum = 1
self.value = 0
self.width = 142
self.height = 20
self.priority = 5
def Draw(self):
if self.showing:
gfx.screen.fill([80, 80, 80], special_flags=BLEND_SUB)
gfx.BlitImage(self.sprite, center=[gfx.width / 2, gfx.height / 2])
gfx.DrawRect((gfx.width / 2 - self.width / 2, gfx.height / 2 - self.height / 2, self.width * (1 - float(self.value) / self.maximum), self.height), [255, 0, 0], 0)
def Show(self, maximum=None):
if not maximum is None:
self.maximum = maximum
self.showing = True
def Hide(self):
if self.showing:
self.showing = False
def Value(self, val, maximum=None):
if not maximum is None:
self.maximum = maximum
self.value = val
| chr15m/Infinite8BitPlatformer | engine/Progress.py | Python | gpl-3.0 | 960 | 0.039583 |
# Copyright (c) 2014 Lasercar7 (@lasercar) - MIT License
# http://lasercar.github.io
#TODO: dictionary that maps block ids (to be collected) with readable names
stats = {}
def logStat(block, elevation):
if not block in stats:
#initialize column
stats[block] = map(lambda x: 0.0, range(256))
#add to stat
stats[block][elevation] += 1
#MCEdit user options
inputs = (
('Scan Radius', 100)
)
#test
logStat('Coal', 3)
logStat('Diamond', 1)
logStat('Diamond', 1)
logStat('Gold', 1)
logStat('Diamond', 0)
#init
def perform(level, box, options):
#iterate through world and logStat(block, y)
level.blockAt(x, y, x)
#calculate total blocks from scan radius, then convert raw data to percentage
options = {'Scan Radius': 100}#temp
layerTotal = (float(options['Scan Radius']) * 2) **2
def percentFormat():
for block in stats:
i = 0
for elevation in stats[block]:
stats[block][i] = float(elevation)/layerTotal
i += 1
percentFormat()
#open csv file, convert stats to data, write data to file
from os.path import expanduser, exists
def filename():
prefix = expanduser('~') + '/Downloads/BlockElevationStats'
postfix = '.csv'
path = prefix + postfix
i = 1
while exists(path):
i += 1
path = prefix + str(i) + postfix
return path
import csv
with open(filename(), 'wb') as csvFile:
writer = csv.writer(csvFile, dialect='excel')
#de-objectify data
data = []
for key, value in stats.iteritems(): # stats.items() in python 3.x
data.append([key] + value)
#translate column structure to row structure
grid = map(list, zip(*data))
#write to file
i = -1
for row in grid:
if i == -1:
writer.writerow(['elevation:'] + row)
else:
writer.writerow([i] + row)
i += 1
#TODO: move all stuff, including functions, into perform()
| lasercar/mcedit-filters | block_elevation_stats.py | Python | mit | 1,764 | 0.036281 |
import re
from collections import namedtuple
import sqlparse
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo as BaseFieldInfo, TableInfo,
)
from django.db.models import Index
from django.utils.regex_helper import _lazy_re_compile
FieldInfo = namedtuple('FieldInfo', BaseFieldInfo._fields + ('pk',))
field_size_re = _lazy_re_compile(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$')
def get_field_size(name):
""" Extract the size number from a "varchar(11)" type name """
m = field_size_re.search(name)
return int(m.group(1)) if m else None
# This light wrapper "fakes" a dictionary interface, because some SQLite data
# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
# as a simple dictionary lookup.
class FlexibleFieldLookupDict:
# Maps SQL types to Django Field types. Some of the SQL types have multiple
# entries here because SQLite allows for anything and doesn't normalize the
# field type; it uses whatever was given.
base_data_types_reverse = {
'bool': 'BooleanField',
'boolean': 'BooleanField',
'smallint': 'SmallIntegerField',
'smallint unsigned': 'PositiveSmallIntegerField',
'smallinteger': 'SmallIntegerField',
'int': 'IntegerField',
'integer': 'IntegerField',
'bigint': 'BigIntegerField',
'integer unsigned': 'PositiveIntegerField',
'bigint unsigned': 'PositiveBigIntegerField',
'decimal': 'DecimalField',
'real': 'FloatField',
'text': 'TextField',
'char': 'CharField',
'varchar': 'CharField',
'blob': 'BinaryField',
'date': 'DateField',
'datetime': 'DateTimeField',
'time': 'TimeField',
}
def __getitem__(self, key):
key = key.lower().split('(', 1)[0].strip()
return self.base_data_types_reverse[key]
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_field_type(self, data_type, description):
field_type = super().get_field_type(data_type, description)
if description.pk and field_type in {'BigIntegerField', 'IntegerField', 'SmallIntegerField'}:
# No support for BigAutoField or SmallAutoField as SQLite treats
# all integer primary keys as signed 64-bit integers.
return 'AutoField'
return field_type
def get_table_list(self, cursor):
"""Return a list of table and view names in the current database."""
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
SELECT name, type FROM sqlite_master
WHERE type in ('table', 'view') AND NOT name='sqlite_sequence'
ORDER BY name""")
return [TableInfo(row[0], row[1][0]) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"""
Return a description of the table with the DB-API cursor.description
interface.
"""
cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(table_name))
return [
FieldInfo(
name, data_type, None, get_field_size(data_type), None, None,
not notnull, default, pk == 1,
)
for cid, name, data_type, notnull, default, pk in cursor.fetchall()
]
def get_sequences(self, cursor, table_name, table_fields=()):
pk_col = self.get_primary_key_column(cursor, table_name)
return [{'table': table_name, 'column': pk_col}]
def get_relations(self, cursor, table_name):
"""
Return a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
# Dictionary of relations to return
relations = {}
# Schema for this table
cursor.execute(
"SELECT sql, type FROM sqlite_master "
"WHERE tbl_name = %s AND type IN ('table', 'view')",
[table_name]
)
create_sql, table_type = cursor.fetchone()
if table_type == 'view':
# It might be a view, then no results will be returned
return relations
results = create_sql[create_sql.index('(') + 1:create_sql.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_desc in results.split(','):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search(r'references (\S*) ?\(["|]?(.*)["|]?\)', field_desc, re.I)
if not m:
continue
table, column = [s.strip('"') for s in m.groups()]
if field_desc.startswith("FOREIGN KEY"):
# Find name of the target FK field
m = re.match(r'FOREIGN KEY\s*\(([^\)]*)\).*', field_desc, re.I)
field_name = m.groups()[0].strip('"')
else:
field_name = field_desc.split()[0].strip('"')
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table])
result = cursor.fetchall()[0]
other_table_results = result[0].strip()
li, ri = other_table_results.index('('), other_table_results.rindex(')')
other_table_results = other_table_results[li + 1:ri]
for other_desc in other_table_results.split(','):
other_desc = other_desc.strip()
if other_desc.startswith('UNIQUE'):
continue
other_name = other_desc.split(' ', 1)[0].strip('"')
if other_name == column:
relations[field_name] = (other_name, table)
break
return relations
def get_key_columns(self, cursor, table_name):
"""
Return a list of (column_name, referenced_table_name, referenced_column_name)
for all key columns in given table.
"""
key_columns = []
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(',')):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search(r'"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
# This will append (column_name, referenced_table_name, referenced_column_name) to key_columns
key_columns.append(tuple(s.strip('"') for s in m.groups()))
return key_columns
def get_primary_key_column(self, cursor, table_name):
"""Return the column name of the primary key for the given table."""
# Don't use PRAGMA because that causes issues with some transactions
cursor.execute(
"SELECT sql, type FROM sqlite_master "
"WHERE tbl_name = %s AND type IN ('table', 'view')",
[table_name]
)
row = cursor.fetchone()
if row is None:
raise ValueError("Table %s does not exist" % table_name)
create_sql, table_type = row
if table_type == 'view':
# Views don't have a primary key.
return None
fields_sql = create_sql[create_sql.index('(') + 1:create_sql.rindex(')')]
for field_desc in fields_sql.split(','):
field_desc = field_desc.strip()
m = re.match(r'(?:(?:["`\[])(.*)(?:["`\]])|(\w+)).*PRIMARY KEY.*', field_desc)
if m:
return m.group(1) if m.group(1) else m.group(2)
return None
def _get_foreign_key_constraints(self, cursor, table_name):
constraints = {}
cursor.execute('PRAGMA foreign_key_list(%s)' % self.connection.ops.quote_name(table_name))
for row in cursor.fetchall():
# Remaining on_update/on_delete/match values are of no interest.
id_, _, table, from_, to = row[:5]
constraints['fk_%d' % id_] = {
'columns': [from_],
'primary_key': False,
'unique': False,
'foreign_key': (table, to),
'check': False,
'index': False,
}
return constraints
def _parse_column_or_constraint_definition(self, tokens, columns):
token = None
is_constraint_definition = None
field_name = None
constraint_name = None
unique = False
unique_columns = []
check = False
check_columns = []
braces_deep = 0
for token in tokens:
if token.match(sqlparse.tokens.Punctuation, '('):
braces_deep += 1
elif token.match(sqlparse.tokens.Punctuation, ')'):
braces_deep -= 1
if braces_deep < 0:
# End of columns and constraints for table definition.
break
elif braces_deep == 0 and token.match(sqlparse.tokens.Punctuation, ','):
# End of current column or constraint definition.
break
# Detect column or constraint definition by first token.
if is_constraint_definition is None:
is_constraint_definition = token.match(sqlparse.tokens.Keyword, 'CONSTRAINT')
if is_constraint_definition:
continue
if is_constraint_definition:
# Detect constraint name by second token.
if constraint_name is None:
if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword):
constraint_name = token.value
elif token.ttype == sqlparse.tokens.Literal.String.Symbol:
constraint_name = token.value[1:-1]
# Start constraint columns parsing after UNIQUE keyword.
if token.match(sqlparse.tokens.Keyword, 'UNIQUE'):
unique = True
unique_braces_deep = braces_deep
elif unique:
if unique_braces_deep == braces_deep:
if unique_columns:
# Stop constraint parsing.
unique = False
continue
if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword):
unique_columns.append(token.value)
elif token.ttype == sqlparse.tokens.Literal.String.Symbol:
unique_columns.append(token.value[1:-1])
else:
# Detect field name by first token.
if field_name is None:
if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword):
field_name = token.value
elif token.ttype == sqlparse.tokens.Literal.String.Symbol:
field_name = token.value[1:-1]
if token.match(sqlparse.tokens.Keyword, 'UNIQUE'):
unique_columns = [field_name]
# Start constraint columns parsing after CHECK keyword.
if token.match(sqlparse.tokens.Keyword, 'CHECK'):
check = True
check_braces_deep = braces_deep
elif check:
if check_braces_deep == braces_deep:
if check_columns:
# Stop constraint parsing.
check = False
continue
if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword):
if token.value in columns:
check_columns.append(token.value)
elif token.ttype == sqlparse.tokens.Literal.String.Symbol:
if token.value[1:-1] in columns:
check_columns.append(token.value[1:-1])
unique_constraint = {
'unique': True,
'columns': unique_columns,
'primary_key': False,
'foreign_key': None,
'check': False,
'index': False,
} if unique_columns else None
check_constraint = {
'check': True,
'columns': check_columns,
'primary_key': False,
'unique': False,
'foreign_key': None,
'index': False,
} if check_columns else None
return constraint_name, unique_constraint, check_constraint, token
def _parse_table_constraints(self, sql, columns):
# Check constraint parsing is based of SQLite syntax diagram.
# https://www.sqlite.org/syntaxdiagrams.html#table-constraint
statement = sqlparse.parse(sql)[0]
constraints = {}
unnamed_constrains_index = 0
tokens = (token for token in statement.flatten() if not token.is_whitespace)
# Go to columns and constraint definition
for token in tokens:
if token.match(sqlparse.tokens.Punctuation, '('):
break
# Parse columns and constraint definition
while True:
constraint_name, unique, check, end_token = self._parse_column_or_constraint_definition(tokens, columns)
if unique:
if constraint_name:
constraints[constraint_name] = unique
else:
unnamed_constrains_index += 1
constraints['__unnamed_constraint_%s__' % unnamed_constrains_index] = unique
if check:
if constraint_name:
constraints[constraint_name] = check
else:
unnamed_constrains_index += 1
constraints['__unnamed_constraint_%s__' % unnamed_constrains_index] = check
if end_token.match(sqlparse.tokens.Punctuation, ')'):
break
return constraints
def get_constraints(self, cursor, table_name):
"""
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns.
"""
constraints = {}
# Find inline check constraints.
try:
table_schema = cursor.execute(
"SELECT sql FROM sqlite_master WHERE type='table' and name=%s" % (
self.connection.ops.quote_name(table_name),
)
).fetchone()[0]
except TypeError:
# table_name is a view.
pass
else:
columns = {info.name for info in self.get_table_description(cursor, table_name)}
constraints.update(self._parse_table_constraints(table_schema, columns))
# Get the index info
cursor.execute("PRAGMA index_list(%s)" % self.connection.ops.quote_name(table_name))
for row in cursor.fetchall():
# SQLite 3.8.9+ has 5 columns, however older versions only give 3
# columns. Discard last 2 columns if there.
number, index, unique = row[:3]
cursor.execute(
"SELECT sql FROM sqlite_master "
"WHERE type='index' AND name=%s" % self.connection.ops.quote_name(index)
)
# There's at most one row.
sql, = cursor.fetchone() or (None,)
# Inline constraints are already detected in
# _parse_table_constraints(). The reasons to avoid fetching inline
# constraints from `PRAGMA index_list` are:
# - Inline constraints can have a different name and information
# than what `PRAGMA index_list` gives.
# - Not all inline constraints may appear in `PRAGMA index_list`.
if not sql:
# An inline constraint
continue
# Get the index info for that index
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
for index_rank, column_rank, column in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": [],
"primary_key": False,
"unique": bool(unique),
"foreign_key": None,
"check": False,
"index": True,
}
constraints[index]['columns'].append(column)
# Add type and column orders for indexes
if constraints[index]['index'] and not constraints[index]['unique']:
# SQLite doesn't support any index type other than b-tree
constraints[index]['type'] = Index.suffix
order_info = sql.split('(')[-1].split(')')[0].split(',')
orders = ['DESC' if info.endswith('DESC') else 'ASC' for info in order_info]
constraints[index]['orders'] = orders
# Get the PK
pk_column = self.get_primary_key_column(cursor, table_name)
if pk_column:
# SQLite doesn't actually give a name to the PK constraint,
# so we invent one. This is fine, as the SQLite backend never
# deletes PK constraints by name, as you can't delete constraints
# in SQLite; we remake the table with a new PK instead.
constraints["__primary__"] = {
"columns": [pk_column],
"primary_key": True,
"unique": False, # It's not actually a unique constraint.
"foreign_key": None,
"check": False,
"index": False,
}
constraints.update(self._get_foreign_key_constraints(cursor, table_name))
return constraints
| kaedroho/django | django/db/backends/sqlite3/introspection.py | Python | bsd-3-clause | 18,452 | 0.001734 |
import json
import logging
import boto3
import unittest2
from mock import MagicMock
from monocyte.handler import iam as iam_handler
class IamInlinePolicyTests(unittest2.TestCase):
def setUp(self):
self.arn = ''
logging.captureWarnings(True)
self.iam_handler = iam_handler.InlinePolicy(MagicMock)
self.iam_handler.dry_run = True
self.client = boto3.client('iam')
def _create_role(self):
assume_role_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
self.client.create_role(
Path='/',
RoleName='integrationtest_role',
AssumeRolePolicyDocument=json.dumps(assume_role_policy)
)
def _put_inline_role_policy(self, inline_policy):
self.client.put_role_policy(
RoleName='integrationtest_role',
PolicyName='integrationtest_inline_policy',
PolicyDocument=json.dumps(inline_policy)
)
def _delete_inline_role_policy(self):
self.client.delete_role_policy(
RoleName='integrationtest_role',
PolicyName='integrationtest_inline_policy'
)
def _delete_role(self):
self.client.delete_role(RoleName='integrationtest_role')
def tearDown(self):
self._delete_inline_role_policy()
self._delete_role()
def _uniq(self, resources):
uniq_names = []
for resource in resources:
name = resource.wrapped['RoleName']
if not name.startswith('integrationtest_role'):
continue
uniq_names.append(name)
return uniq_names
def test_wildcard_in_inline_policy_action(self):
self._create_role()
inline_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"*"
],
"Resource": "arn:aws:s3:::example_bucket"
}
]
}
self._put_inline_role_policy(inline_policy)
unwanted_resource = self.iam_handler.fetch_unwanted_resources()
self.assertEqual(['integrationtest_role'], self._uniq(unwanted_resource))
def test_no_wildcard_in_inline_policy(self):
self._create_role()
inline_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:read"
],
"Resource": "arn:aws:s3:::example_bucket"
}
]
}
self._put_inline_role_policy(inline_policy)
unwanted_resource = self.iam_handler.fetch_unwanted_resources()
self.assertEqual([], self._uniq(unwanted_resource))
def test_wildcard_in_inline_policy_resource(self):
self._create_role()
inline_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"S3:read"
],
"Resource": "*"
}
]
}
self._put_inline_role_policy(inline_policy)
unwanted_resource = self.iam_handler.fetch_unwanted_resources()
self.assertEqual([], self._uniq(unwanted_resource))
def test_wildcard_in_inline_policy_resource_and_action(self):
self._create_role()
inline_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"*"
],
"Resource": "*"
}
]
}
self._put_inline_role_policy(inline_policy)
unwanted_resource = self.iam_handler.fetch_unwanted_resources()
self.assertEqual(['integrationtest_role'], self._uniq(unwanted_resource))
if __name__ == "__main__":
unittest2.main()
| ImmobilienScout24/aws-monocyte | src/integrationtest/python/handler/iam_inline_policy_tests.py | Python | apache-2.0 | 4,364 | 0.000458 |
from pytest import mark
from django.urls import reverse
from email_template.models import Email
from assopy.models import AssopyUser
from conference.accounts import PRIVACY_POLICY_CHECKBOX, PRIVACY_POLICY_ERROR
from conference.models import CaptchaQuestion
from conference.users import RANDOM_USERNAME_LENGTH
from tests.common_tools import make_user, redirects_to, template_used, create_homepage_in_cms
SIGNUP_SUCCESFUL_302 = 302
SIGNUP_FAILED_200 = 200
login_url = reverse("accounts:login")
def check_login(client, email):
"Small helper for tests to check if login works correctly"
response = client.post(
login_url,
{
"email": email,
"password": "password",
"i_accept_privacy_policy": True,
},
)
# redirect means successful login, 200 means errors on form
LOGIN_SUCCESFUL_302 = 302
assert response.status_code == LOGIN_SUCCESFUL_302
return True
def activate_only_user():
user = AssopyUser.objects.get()
user.user.is_active = True
user.user.save()
@mark.django_db
def test_user_registration(client):
"""
Tests if users can create new account on the website
(to buy tickets, etc).
"""
# required for redirects to /
create_homepage_in_cms()
# 1. test if user can create new account
sign_up_url = reverse("accounts:signup_step_1_create_account")
response = client.get(sign_up_url)
assert response.status_code == 200
assert template_used(response, "conference/accounts/signup.html")
assert template_used(response, "conference/accounts/_login_with_google.html")
assert template_used(response, "conference/base.html")
assert PRIVACY_POLICY_CHECKBOX in response.content.decode("utf-8")
assert AssopyUser.objects.all().count() == 0
response = client.post(
sign_up_url,
{
"first_name": "Joe",
"last_name": "Doe",
"email": "joedoe@example.com",
"password1": "password",
"password2": "password",
},
follow=True,
)
assert response.status_code == SIGNUP_FAILED_200
assert "/privacy/" in PRIVACY_POLICY_CHECKBOX
assert "I consent to the use of my data" in PRIVACY_POLICY_CHECKBOX
assert response.context["form"].errors["__all__"] == [PRIVACY_POLICY_ERROR]
response = client.post(
sign_up_url,
{
"first_name": "Joe",
"last_name": "Doe",
"email": "joedoe@example.com",
"password1": "password",
"password2": "password",
"i_accept_privacy_policy": True,
},
follow=True,
)
# check if redirect was correct
assert template_used(
response, "conference/accounts/signup_please_verify_email.html"
)
assert template_used(response, "conference/base.html")
user = AssopyUser.objects.get()
assert user.name() == "Joe Doe"
assert user.user.is_active is False
# check if the random username was generated
assert len(user.user.username) == RANDOM_USERNAME_LENGTH
is_logged_in = client.login(
email="joedoe@example.com", password="password"
)
assert is_logged_in is False # user is inactive
response = client.get("/")
assert template_used(response, "conference/homepage/home_template.html")
assert "Joe Doe" not in response.content.decode("utf-8")
assert "Log out" not in response.content.decode("utf-8")
# enable the user
user.user.is_active = True
user.user.save()
is_logged_in = client.login(
email="joedoe@example.com", password="password"
)
assert is_logged_in
response = client.get("/")
assert template_used(response, "conference/homepage/home_template.html")
# checking if user is logged in.
assert "Joe Doe" in response.content.decode("utf-8")
@mark.django_db
def test_393_emails_are_lowercased_and_login_is_case_insensitive(client):
"""
https://github.com/EuroPython/epcon/issues/393
Test if we can regiester new account if we use the same email with
different case.
"""
sign_up_url = reverse("accounts:signup_step_1_create_account")
response = client.post(
sign_up_url,
{
"first_name": "Joe",
"last_name": "Doe",
"email": "JoeDoe@example.com",
"password1": "password",
"password2": "password",
"i_accept_privacy_policy": True,
},
)
assert response.status_code == SIGNUP_SUCCESFUL_302
user = AssopyUser.objects.get()
assert user.name() == "Joe Doe"
assert user.user.email == "joedoe@example.com"
response = client.post(
sign_up_url,
{
"first_name": "Joe",
"last_name": "Doe",
"email": "jOEdOE@example.com",
"password1": "password",
"password2": "password",
"i_accept_privacy_policy": True,
},
)
assert response.status_code == SIGNUP_FAILED_200
assert response.context["form"].errors["email"] == ["Email already in use"]
user = AssopyUser.objects.get() # still only one user
assert user.name() == "Joe Doe"
assert user.user.email == "joedoe@example.com"
# activate user so we can log in
user.user.is_active = True
user.user.save()
# check if we can login with lowercase
# the emails will be lowercased in db, but user is still able to log in
# using whatever case they want
assert check_login(client, email="JoeDoe@example.com")
assert check_login(client, email="joedoe@example.com")
assert check_login(client, email="JoeDoe@example.com")
assert check_login(client, email="JOEDOE@example.com")
@mark.django_db
def test_703_test_captcha_questions(client):
"""
https://github.com/EuroPython/epcon/issues/703
"""
QUESTION = "Can you foo in Python?"
ANSWER = "Yes you can"
CaptchaQuestion.objects.create(question=QUESTION, answer=ANSWER)
Email.objects.create(code="verify-account")
sign_up_url = reverse("accounts:signup_step_1_create_account")
response = client.get(sign_up_url)
# we have question in captcha_question.initial and captcha_answer.label
assert "captcha_question" in response.content.decode("utf-8")
assert "captcha_answer" in response.content.decode("utf-8")
assert response.content.decode("utf-8").count(QUESTION) == 2
response = client.post(
sign_up_url,
{
"first_name": "Joe",
"last_name": "Doe",
"email": "JoeDoe@example.com",
"password1": "password",
"password2": "password",
"i_accept_privacy_policy": True,
},
)
assert response.status_code == SIGNUP_FAILED_200 # because missing captcha
response = client.post(
sign_up_url,
{
"first_name": "Joe",
"last_name": "Doe",
"email": "JoeDoe@example.com",
"password1": "password",
"password2": "password",
"captcha_question": QUESTION,
"captcha_answer": "No you can't",
"i_accept_privacy_policy": True,
},
)
assert response.status_code == SIGNUP_FAILED_200 # because wrong answer
wrong_answer = ["Sorry, that's a wrong answer"]
assert response.context["form"].errors["captcha_answer"] == wrong_answer
response = client.post(
sign_up_url,
{
"first_name": "Joe",
"last_name": "Doe",
"email": "JoeDoe@example.com",
"password1": "password",
"password2": "password",
"captcha_question": QUESTION,
"captcha_answer": ANSWER,
"i_accept_privacy_policy": True,
},
)
assert response.status_code == SIGNUP_SUCCESFUL_302
activate_only_user()
assert check_login(client, email="joedoe@example.com")
# if there are no enabled questions they don't appear on the form
CaptchaQuestion.objects.update(enabled=False)
response = client.get(sign_up_url)
assert "captcha_question" not in response.content.decode("utf-8")
assert "captcha_answer" not in response.content.decode("utf-8")
assert response.content.decode("utf-8").count(QUESTION) == 0
@mark.django_db
def test_872_login_redirects_to_user_dashboard(client):
u = make_user(email='joe@example.com', password='foobar')
response = client.post(
login_url,
{
"email": u.email,
"password": 'foobar',
"i_accept_privacy_policy": True,
},
)
assert response.status_code == 302
assert redirects_to(response, "/user-panel/")
| EuroPython/epcon | tests/test_user_login_and_registration.py | Python | bsd-2-clause | 8,704 | 0.00023 |
# Copyright (C) 2011 Canonical
#
# Authors:
# Matthew McGowan
# Michael Vogt
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import logging
import time
import threading
import xapian
from gi.repository import GObject
from softwarecenter.enums import (SortMethods,
XapianValues,
NonAppVisibility,
DEFAULT_SEARCH_LIMIT)
from softwarecenter.backend.reviews import get_review_loader
from softwarecenter.db.database import (
SearchQuery, LocaleSorter, TopRatedSorter)
from softwarecenter.distro import get_distro
from softwarecenter.utils import ExecutionTime
LOG=logging.getLogger(__name__)
class AppEnquire(GObject.GObject):
"""
A interface to enquire data from a xapian database.
It can combined with any xapian querry and with
a generic filter function (that can filter on data not
available in xapian)
"""
# signal emited
__gsignals__ = {"query-complete" : (GObject.SIGNAL_RUN_FIRST,
GObject.TYPE_NONE,
()),
}
def __init__(self, cache, db):
"""
Init a AppEnquire object
:Parameters:
- `cache`: apt cache (for stuff like the overlay icon)
- `db`: a xapian.Database that contians the applications
"""
GObject.GObject.__init__(self)
self.cache = cache
self.db = db
self.distro = get_distro()
self.search_query = SearchQuery(None)
self.nonblocking_load = True
self.sortmode = SortMethods.UNSORTED
self.nonapps_visible = NonAppVisibility.MAYBE_VISIBLE
self.limit = DEFAULT_SEARCH_LIMIT
self.filter = None
self.exact = False
self.nr_pkgs = 0
self.nr_apps = 0
self._matches = []
self.match_docids = set()
def __len__(self):
return len(self._matches)
@property
def matches(self):
""" return the list of matches as xapian.MSetItem """
return self._matches
def _threaded_perform_search(self):
self._perform_search_complete = False
# generate a name and ensure we never have two threads
# with the same name
names = [thread.name for thread in threading.enumerate()]
for i in range(threading.active_count()+1, 0, -1):
thread_name = 'ThreadedQuery-%s' % i
if not thread_name in names:
break
# create and start it
t = threading.Thread(
target=self._blocking_perform_search, name=thread_name)
t.start()
# don't block the UI while the thread is running
context = GObject.main_context_default()
while not self._perform_search_complete:
time.sleep(0.02) # 50 fps
while context.pending():
context.iteration()
t.join()
# call the query-complete callback
self.emit("query-complete")
def _get_estimate_nr_apps_and_nr_pkgs(self, enquire, q, xfilter):
# filter out docs of pkgs of which there exists a doc of the app
enquire.set_query(xapian.Query(xapian.Query.OP_AND,
q, xapian.Query("ATapplication")))
try:
tmp_matches = enquire.get_mset(0, len(self.db), None, xfilter)
except Exception:
LOG.exception("_get_estimate_nr_apps_and_nr_pkgs failed")
return (0, 0)
nr_apps = tmp_matches.get_matches_estimated()
enquire.set_query(xapian.Query(xapian.Query.OP_AND_NOT,
q, xapian.Query("XD")))
tmp_matches = enquire.get_mset(0, len(self.db), None, xfilter)
nr_pkgs = tmp_matches.get_matches_estimated() - nr_apps
return (nr_apps, nr_pkgs)
def _blocking_perform_search(self):
# WARNING this call may run in a thread, so its *not*
# allowed to touch gtk, otherwise hell breaks loose
# performance only: this is only needed to avoid the
# python __call__ overhead for each item if we can avoid it
# use a unique instance of both enquire and xapian database
# so concurrent queries dont result in an inconsistent database
# an alternative would be to serialise queries
enquire = xapian.Enquire(self.db.xapiandb)
if self.filter and self.filter.required:
xfilter = self.filter
else:
xfilter = None
# go over the queries
self.nr_apps, self.nr_pkgs = 0, 0
_matches = self._matches
match_docids = self.match_docids
for q in self.search_query:
LOG.debug("initial query: '%s'" % q)
# for searches we may want to disable show/hide
terms = [term for term in q]
exact_pkgname_query = (len(terms) == 1 and
terms[0].startswith("XP"))
with ExecutionTime("calculate nr_apps and nr_pkgs: "):
nr_apps, nr_pkgs = self._get_estimate_nr_apps_and_nr_pkgs(enquire, q, xfilter)
self.nr_apps += nr_apps
self.nr_pkgs += nr_pkgs
# only show apps by default (unless in always visible mode)
if self.nonapps_visible != NonAppVisibility.ALWAYS_VISIBLE:
if not exact_pkgname_query:
q = xapian.Query(xapian.Query.OP_AND,
xapian.Query("ATapplication"),
q)
LOG.debug("nearly completely filtered query: '%s'" % q)
# filter out docs of pkgs of which there exists a doc of the app
# FIXME: make this configurable again?
enquire.set_query(xapian.Query(xapian.Query.OP_AND_NOT,
q, xapian.Query("XD")))
# sort results
# cataloged time - what's new category
if self.sortmode == SortMethods.BY_CATALOGED_TIME:
if (self.db._axi_values and
"catalogedtime" in self.db._axi_values):
enquire.set_sort_by_value(
self.db._axi_values["catalogedtime"], reverse=True)
else:
LOG.warning("no catelogedtime in axi")
elif self.sortmode == SortMethods.BY_TOP_RATED:
review_loader = get_review_loader(self.cache, self.db)
sorter = TopRatedSorter(self.db, review_loader)
enquire.set_sort_by_key(sorter, reverse=True)
# search ranking - when searching
elif self.sortmode == SortMethods.BY_SEARCH_RANKING:
#enquire.set_sort_by_value(XapianValues.POPCON)
# use the default enquire.set_sort_by_relevance()
pass
# display name - all categories / channels
elif (self.db._axi_values and
"display_name" in self.db._axi_values):
enquire.set_sort_by_key(LocaleSorter(self.db), reverse=False)
# fallback to pkgname - if needed?
# fallback to pkgname - if needed?
else:
enquire.set_sort_by_value_then_relevance(
XapianValues.PKGNAME, False)
#~ try:
if self.limit == 0:
matches = enquire.get_mset(0, len(self.db), None, xfilter)
else:
matches = enquire.get_mset(0, self.limit, None, xfilter)
LOG.debug("found ~%i matches" % matches.get_matches_estimated())
#~ except:
#~ logging.exception("get_mset")
#~ matches = []
# promote exact matches to a "app", this will make the
# show/hide technical items work correctly
if exact_pkgname_query and len(matches) == 1:
self.nr_apps += 1
self.nr_pkgs -= 2
# add matches, but don't duplicate docids
with ExecutionTime("append new matches to existing ones:"):
for match in matches:
if not match.docid in match_docids:
_matches.append(match)
match_docids.add(match.docid)
# if we have no results, try forcing pkgs to be displayed
# if not NonAppVisibility.NEVER_VISIBLE is set
if (not _matches and
self.nonapps_visible not in (NonAppVisibility.ALWAYS_VISIBLE,
NonAppVisibility.NEVER_VISIBLE)):
self.nonapps_visible = NonAppVisibility.ALWAYS_VISIBLE
self._blocking_perform_search()
# wake up the UI if run in a search thread
self._perform_search_complete = True
return
def set_query(self, search_query,
limit=DEFAULT_SEARCH_LIMIT,
sortmode=SortMethods.UNSORTED,
filter=None,
exact=False,
nonapps_visible=NonAppVisibility.MAYBE_VISIBLE,
nonblocking_load=True,
persistent_duplicate_filter=False):
"""
Set a new query
:Parameters:
- `search_query`: a single search as a xapian.Query or a list
- `limit`: how many items the search should return (0 == unlimited)
- `sortmode`: sort the result
- `filter`: filter functions that can be used to filter the
data further. A python function that gets a pkgname
- `exact`: If true, indexes of queries without matches will be
maintained in the store (useful to show e.g. a row
with "??? not found")
- `nonapps_visible`: decide whether adding non apps in the model or not.
Can be NonAppVisibility.ALWAYS_VISIBLE/NonAppVisibility.MAYBE_VISIBLE
/NonAppVisibility.NEVER_VISIBLE
(NonAppVisibility.MAYBE_VISIBLE will return non apps result
if no matching apps is found)
- `nonblocking_load`: set to False to execute the query inside the current
thread. Defaults to True to allow the search to be
performed without blocking the UI.
- 'persistent_duplicate_filter': if True allows filtering of duplicate
matches across multiple queries
"""
self.search_query = SearchQuery(search_query)
self.limit = limit
self.sortmode = sortmode
# make a copy for good measure
if filter:
self.filter = filter.copy()
else:
self.filter = None
self.exact = exact
self.nonblocking_load = nonblocking_load
self.nonapps_visible = nonapps_visible
# no search query means "all"
if not search_query:
self.search_query = SearchQuery(xapian.Query(""))
self.sortmode = SortMethods.BY_ALPHABET
self.limit = 0
# flush old query matches
self._matches = []
if not persistent_duplicate_filter:
self.match_docids = set()
# we support single and list search_queries,
# if list we append them one by one
with ExecutionTime("populate model from query: '%s' (threaded: %s)" % (
" ; ".join([str(q) for q in self.search_query]),
self.nonblocking_load)):
if self.nonblocking_load:
self._threaded_perform_search()
else:
self._blocking_perform_search()
return True
# def get_pkgnames(self):
# xdb = self.db.xapiandb
# pkgnames = []
# for m in self.matches:
# doc = xdb.get_document(m.docid)
# pkgnames.append(doc.get_value(XapianValues.PKGNAME) or doc.get_data())
# return pkgnames
# def get_applications(self):
# apps = []
# for pkgname in self.get_pkgnames():
# apps.append(Application(pkgname=pkgname))
# return apps
def get_docids(self):
""" get the docids of the current matches """
xdb = self.db.xapiandb
return [xdb.get_document(m.docid).get_docid() for m in self._matches]
def get_documents(self):
""" get the xapian.Document objects of the current matches """
xdb = self.db.xapiandb
return [xdb.get_document(m.docid) for m in self._matches]
| armikhael/software-center | softwarecenter/db/enquire.py | Python | gpl-3.0 | 13,223 | 0.002723 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This script creates new items on Wikidata based on certain criteria.
* When was the (Wikipedia) page created?
* When was the last edit on the page?
* Does the page contain interwiki's?
This script understands various command-line arguments:
-lastedit The minimum number of days that has passed since the page was
last edited.
-pageage The minimum number of days that has passed since the page was
created.
-touch Do a null edit on every page which has a wikibase item.
"""
#
# (C) Multichill, 2014
# (C) Pywikibot team, 2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
#
import pywikibot
from pywikibot import pagegenerators, WikidataBot
from datetime import timedelta
class NewItemRobot(WikidataBot):
"""A bot to create new items."""
def __init__(self, generator, **kwargs):
"""Only accepts options defined in availableOptions."""
self.availableOptions.update({
'lastedit': 7,
'pageage': 21,
'touch': False,
})
super(NewItemRobot, self).__init__(**kwargs)
self.generator = pagegenerators.PreloadingGenerator(generator)
self.pageAge = self.getOption('pageage')
self.lastEdit = self.getOption('lastedit')
self.pageAgeBefore = self.repo.getcurrenttime() - timedelta(days=self.pageAge)
self.lastEditBefore = self.repo.getcurrenttime() - timedelta(days=self.lastEdit)
self.treat_missing_item = True
pywikibot.output('Page age is set to %s days so only pages created'
'\nbefore %s will be considered.'
% (self.pageAge, self.pageAgeBefore.isoformat()))
pywikibot.output('Last edit is set to %s days so only pages last edited'
'\nbefore %s will be considered.'
% (self.lastEdit, self.lastEditBefore.isoformat()))
def treat(self, page, item):
"""Treat page/item."""
if item and item.exists():
pywikibot.output(u'%s already has an item: %s.' % (page, item))
if self.getOption('touch'):
pywikibot.output(u'Doing a null edit on the page.')
page.put(page.text)
return
self.current_page = page
if page.isRedirectPage():
pywikibot.output(u'%s is a redirect page. Skipping.' % page)
return
if page.editTime() > self.lastEditBefore:
pywikibot.output(
u'Last edit on %s was on %s.\nToo recent. Skipping.'
% (page, page.editTime().isoformat()))
return
if page.oldest_revision.timestamp > self.pageAgeBefore:
pywikibot.output(
u'Page creation of %s on %s is too recent. Skipping.'
% (page, page.editTime().isoformat()))
return
if page.langlinks():
# FIXME: Implement this
pywikibot.output(
"Found language links (interwiki links).\n"
"Haven't implemented that yet so skipping.")
return
# FIXME: i18n
summary = (u'Bot: New item with sitelink from %s'
% page.title(asLink=True, insite=self.repo))
data = {'sitelinks':
{page.site.dbName():
{'site': page.site.dbName(),
'title': page.title()}
},
'labels':
{page.site.lang:
{'language': page.site.lang,
'value': page.title()}
}
}
pywikibot.output(summary)
item = pywikibot.ItemPage(page.site.data_repository())
item.editEntity(data, summary=summary)
# And do a null edit to force update
page.put(page.text)
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
# Process global args and prepare generator args parser
local_args = pywikibot.handle_args(args)
gen = pagegenerators.GeneratorFactory()
options = {}
for arg in local_args:
if (
arg.startswith('-pageage:') or
arg.startswith('-lastedit:')):
key, val = arg.split(':', 1)
options[key[1:]] = int(val)
elif gen.handleArg(arg):
pass
else:
options[arg[1:].lower()] = True
generator = gen.getCombinedGenerator()
if not generator:
pywikibot.showHelp()
return
bot = NewItemRobot(generator, **options)
bot.run()
if __name__ == "__main__":
main()
| xZise/pywikibot-core | scripts/newitem.py | Python | mit | 4,866 | 0.000822 |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
import pymongo
from scrapy.settings import Settings
from scrapy.exceptions import DropItem
from scrapy import log
from pymongo import ReturnDocument
class YellowPipeline(object):
def __init__(self, mongo_uri, mongo_db, collection_name):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
self.collection_name = collection_name
@classmethod
def from_crawler(cls, crawler):
return cls(
mongo_uri=crawler.settings.get('MONGODB_SERVER'),
mongo_db=crawler.settings.get('MONGODB_DB'),
collection_name=crawler.settings.get('MONGODB_COLLECTION')
)
def open_spider(self, spider):
log.msg("Open client", level=log.DEBUG, spider=spider)
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
def close_spider(self, spider):
log.msg("Close client", level=log.DEBUG, spider=spider)
self.client.close()
def process_item(self, item, spider):
#self.db[self.collection_name].insert(dict(item))
#if('email' in item):
self.db[self.collection_name].find_one_and_update(
{ 'key': item['key'] },
{ '$set': dict(item) },
upsert=True)
log.msg("Contact added to MongoDB database!", level=log.DEBUG, spider=spider)
return item
class DuplicatesPipeline(object):
def __init__(self):
self.ids_seen = set()
def process_item(self, item, spider):
if item['id'] in self.ids_seen:
raise DropItem("Duplicate item found: %s" % item)
else:
self.ids_seen.add(item['id'])
return item
| diegordzr/YellowSpider | yellow/yellow/pipelines.py | Python | mit | 1,714 | 0.021004 |
student_phoneNumber_name = {1: 'a', 3: 'c', 2: 'b'}
def Handler() :
while (1) :
choice = eval(input("Enter :\t 1 - to search student name \n \t 2 - to insert new student record \n \t 0 - to quit\n"))
print(choice)
if (choice == 1) :
if (student_phoneNumber_name) :
phone_number = input("Enter student's phone number : ")
name = SearchRecord(phone_number)
if (name) :
print("name : " + name )
else :
print(str(phone_number) + "Does not exist in record" + str(name))
else :
print("Record is empty ")
elif (choice == 2) :
phone_number = input("Enter student's phone number : ")
name = input("Enter student's name : ") #best example to understand input() and raw_input()
InsertRecord(phone_number, name)
elif (choice == 0) :
break
else:
print("Enter correct choice")
def InsertRecord(x, y):
student_phoneNumber_name[x] = y
return;
def SearchRecord(x):
print(x)
if (x in student_phoneNumber_name) :
return student_phoneNumber_name[x]
return False
Handler()
print(student_phoneNumber_name) | ajitghz582/PythonLearning | DAY_1_ASSIGNMENTS/1_name_phone_number.py | Python | mit | 1,070 | 0.052336 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.