repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
tableau/TabPy
|
tabpy/tabpy_server/handlers/endpoint_handler.py
|
Python
|
mit
| 4,926 | 0.001015 |
"""
HTTP handeler to serve specific endpoint request like
http://myserver:9004/endpoints/mymodel
For how generic endpoints requests is served look
at endpoints_handler.py
"""
import json
import logging
import shutil
from tabpy.tabpy_server.common.util import format_exception
from tabpy.tabpy_server.handlers import ManagementHandler
from tabpy.tabpy_server.handlers.base_handler import STAGING_THREAD
from tabpy.tabpy_server.management.state import get_query_object_path
from tabpy.tabpy_server.psws.callbacks import on_state_change
from tabpy.tabpy_server.handlers.util import AuthErrorStates
from tornado import gen
class EndpointHandler(ManagementHandler):
def initialize(self, app):
super(EndpointHandler, self).initialize(app)
def get(self, endpoint_name):
if self.should_fail_with_auth_error() != AuthErrorStates.NONE:
self.fail_with_auth_error()
return
self.logger.log(logging.DEBUG, f"Processing GET for /endpoints/{endpoint_name}")
self._add_CORS_header()
if not endpoint_name:
self.write(json.dumps(self.tabpy_state.get_endpoints()))
else:
if endpoint_name in self.tabpy_state.get_endpoints():
self.write(json.dumps(self.tabpy_state.get_endpoints()[endpoint_name]))
else:
self.error_out(
404,
"Unknown endpoint",
info=f"Endpoint {endpoint_name} is not found",
)
@gen.coroutine
def put(self, name):
if self.should_fail_with_auth_error() != AuthErrorStates.NONE:
self.fail_with_auth_error()
return
self.logger.log(logging.DEBUG, f"Processing PUT for /endpoints/{name}")
try:
if not self.request.body:
self.error_out(400, "Input body cannot be empty")
self.finish()
return
try:
request_data = json.loads(self.request.body.decode("utf-8"))
except BaseException as ex:
self.error_out(
400, log_message="Failed to decode input body", info=str(ex)
)
self.finish()
return
# check if endpoint exists
endpoints = self.tabpy_state.get_endpoints(name)
if len(endpoints) == 0:
self.error_out(404, f"endpoint {name} does not exist.")
self.finish()
return
new_version = int(endpoints[name]["version"]) + 1
self.logger.log(logging.INFO, f"Endpoint info: {request_data}")
err_msg = yield self._add_or_update_endpoint(
"update", name, new_version, request_data
)
if err_msg:
self.error_out(400, err_msg)
self.finish()
else:
self.write(self.tabpy_state.get_endpoints(name))
self.finish()
except Exception as e:
err_msg = format_exception(e, "update_endpoint")
self.error_out(500, err_msg)
self.finish()
@gen.coroutine
def delete(self, name):
if self.should_fail_with_auth_error() != AuthErrorStates.NONE:
self.fail_with_auth_error()
return
self.logger.log(logging.DEBUG, f"Processing DELETE for /endpoints/{name}")
try:
endpoints = self.tabpy_state.get_endpoin
|
ts(name)
if len(endpoints) == 0:
self.error_out(404, f"endpoint {name} does not exist.")
self.finish()
return
# update state
try:
endpoint_info = self.tabpy_state.delete_end
|
point(name)
except Exception as e:
self.error_out(400, f"Error when removing endpoint: {e.message}")
self.finish()
return
# delete files
if endpoint_info["type"] != "alias":
delete_path = get_query_object_path(
self.settings["state_file_path"], name, None
)
try:
yield self._delete_po_future(delete_path)
except Exception as e:
self.error_out(400, f"Error while deleting: {e}")
self.finish()
return
self.set_status(204)
self.finish()
except Exception as e:
err_msg = format_exception(e, "delete endpoint")
self.error_out(500, err_msg)
self.finish()
on_state_change(
self.settings, self.tabpy_state, self.python_service, self.logger
)
@gen.coroutine
def _delete_po_future(self, delete_path):
future = STAGING_THREAD.submit(shutil.rmtree, delete_path)
ret = yield future
raise gen.Return(ret)
|
jamesbeebop/evennia
|
evennia/contrib/tutorial_examples/cmdset_red_button.py
|
Python
|
bsd-3-clause
| 9,705 | 0.001443 |
"""
This defines the cmdset for the red_button. Here we have defined
the commands and the cmdset in the same module, but if you
have many different commands to merge it is often better
to define the cmdset separately, picking and choosing from
among the available commands as to what should be included in the
cmdset - this way you can often re-use the commands too.
"""
import random
from evennia import Command, CmdSet
# Some simple commands for the red button
# ------------------------------------------------------------
# Commands defined on the red button
# ------------------------------------------------------------
class CmdNudge(Command):
"""
Try to nudge the button's lid
Usage:
nudge lid
This command will have you try to
push the lid of the button away.
"""
key = "nudge lid" # two-word command name!
aliases = ["nudge"]
locks = "cmd:all()"
def func(self):
"""
nudge the lid. Random chance of success to open it.
"""
rand = random.random()
if rand < 0.5:
self.caller.msg("You nudge at the lid. It seems stuck.")
elif rand < 0.7:
self.caller.msg("You move the lid back and forth. It won't budge.")
else:
self.caller.msg("You manage to get a nail under the lid.")
self.caller.execute_cmd("open lid")
class CmdPush(Command):
"""
Push the red button
Usage:
push button
"""
key = "push button"
aliases = ["push", "press button", "press"]
locks = "cmd:all()"
def func(self):
"""
Note that we choose to implement this with checking for
if the lid is open/closed. This is because this command
is likely to be tried regardless of the state of the lid.
An alternative would be to make two versions of this command
and tuck them into the cmdset linked to the Open and Closed
lid-state respectively.
"""
if self.obj.db.lid_open:
string = "You reach out to press the big red button ..."
string += "\n\nA BOOM! A bright light blinds you!"
string += "\nThe world goes dark ..."
self.caller.msg(string)
self.caller.location.msg_contents(
"%s presses the button. BOOM! %s is blinded by a flash!"
% (self.caller.name, self.caller.name),
exclude=self.caller,
)
# the button's method will handle all setup of scripts etc.
self.obj.press_button(self.caller)
else:
string = "You cannot push the button - there is a glass lid covering it."
self.caller.msg(string)
class CmdSmashGlass(Command):
"""
smash glass
Usage:
smash glass
Try to smash the glass of the button.
"""
key = "smash glass"
aliases = ["smash lid", "break lid", "smash"]
locks = "cmd:all()"
def func(self):
"""
The lid won't open, but there is a small chance
of causing the lamp to break.
"""
rand = random.random()
if rand < 0.2:
string = "You smash your hand against the glass"
string += " with all your might. The lid won't budge"
string += " but you cause quite the tremor through the button's mount."
string += "\nIt looks like the button's lamp stopped working for the time being."
self.obj.lamp_works = False
elif rand < 0.6:
string = "You hit the lid hard. It doesn't move an inch."
else:
string = "You place a well-aimed fist against the glass of the lid."
string += " Unfortunately all you get is a pain in your hand. Maybe"
string += " you should just try to open the lid instead?"
self.caller.msg(string)
self.caller.location.msg_contents(
"%s tries to smash the glass of the button." % (self.caller.name), exclude=self.caller
)
class CmdOpenLid(Command):
"""
open lid
Usage:
open lid
"""
key = "open lid"
aliases = ["open button", "open"]
locks = "cmd:all()"
def func(self):
"simply call the right function."
if self.obj.db.lid_locked:
self.caller.msg("This lid seems locked in place for the moment.")
return
string = "\nA ticking sound is heard, like a winding mechanism. Seems "
string += "the lid will soon close again."
self.caller.msg(string)
self.caller.location.msg_contents(
"%s opens the lid of the button." % (self.caller.name), exclude=self.caller
)
# add the relevant cmdsets to button
self.obj.cmdset.add(LidClosedCmdSet)
# call object method
self.obj.open_lid()
class CmdCloseLid(Command):
"""
close the lid
Usage:
close lid
Closes the lid of the red button.
"""
key = "close lid"
aliases = ["close"]
locks = "cmd:all()"
def func(self):
"Close the lid"
self.obj.close_lid()
# this will clean out scripts dependent on lid being open.
self.caller.msg("You close the button's lid. It clicks back into place.")
self.caller.location.msg_contents(
"%s closes the button's lid." % (self.caller.name), exclude=self.caller
)
class CmdBlindLook(Command):
"""
Looking around in darkness
Usage:
look <obj>
... not that there's much to see in the dark.
"""
key = "look"
aliases = ["l", "get", "examine", "ex", "feel", "listen"]
locks = "cmd:all()"
def func(self):
"This replaces all the senses when blinded."
# we decide what to reply based on which command was
# actually tried
if self.cmdstring == "get":
string = "You fumble around blindly without finding anything."
elif self.cmdstring == "examine":
string = "You try to examine your surroundings, but can't see a thing."
elif self.cmdstring == "listen":
string = "You are deafened by the boom."
elif self.cmdstring == "feel":
string = "You fumble around, hands outstretched. You bump your knee."
else:
# trying to look
string = "You are temporarily blinded by the flash. "
string += "Until it wears off, all you can do is feel around blindly."
self.caller.msg(string)
self.caller.location.msg_contents(
"%s stumbles around, blinded." % (self.caller.name), exclude=self.caller
)
class CmdBlindHelp(Command):
"""
Help function while in the blinded state
Usage:
help
"""
key = "help"
aliases = "h"
locks = "cmd:all()"
def func(self):
"Give a message."
self.caller.msg("You are beyond help ... until you can see again.")
# ---------------------------------------------------------------
# Command sets for the red button
# -------------------------------------------------
|
--------------
# We next tuck these commands into their respective command sets.
# (note that we are overdoing the cdmset separation a bit here
# to show how it works).
class DefaultCmdSet(CmdSet):
"""
The default cmdset always sits
on the button object and whereas other
command sets may be added/merge onto it
and hide it, removing them will always
bring it back. It's added to the object
|
using obj.cmdset.add_default().
"""
key = "RedButtonDefault"
mergetype = "Union" # this is default, we don't really need to put it here.
def at_cmdset_creation(self):
"Init the cmdset"
self.add(CmdPush())
class LidClosedCmdSet(CmdSet):
"""
A simple cmdset tied to the redbutton object.
It contains the commands that launches the other
command sets, making the red button a self-contained
item (i.e. you don't have to manually add any
scripts etc to it when creating it).
"""
key = "LidClosedCmdSet"
# default Union is used *except* if we are adding to a
# cmdset named LidOpenCmdSet - this one w
|
xbmc/xbmc-antiquated
|
xbmc/lib/libPython/Python/Lib/plat-sunos5/IN.py
|
Python
|
gpl-2.0
| 28,151 | 0.005044 |
# Generated by h2py from /usr/include/netinet/in.h
# Included from sys/feature_tests.h
# Included from sys/isa_defs.h
_CHAR_ALIGNMENT = 1
_SHORT_ALIGNMENT = 2
_INT_ALIGNMENT = 4
_LONG_ALIGNMENT = 8
_LONG_LONG_ALIGNMENT = 8
_DOUBLE_ALIGNMENT = 8
_LONG_DOUBLE_ALIGNMENT = 16
_POINTER_ALIGNMENT = 8
_MAX_ALIGNMENT = 16
_ALIGNMENT_REQUIRED = 1
_CHAR_ALIGNMENT = 1
_SHORT_ALIGNMENT = 2
_INT_ALIGNMENT = 4
_LONG_ALIGNMENT = 4
_LONG_LONG_ALIGNMENT = 4
_DOUBLE_ALIGNMENT = 4
_LONG_DOUBLE_ALIGNMENT = 4
_POINTER_ALIGNMENT = 4
_MAX_ALIGNMENT = 4
_ALIGNMENT_REQUIRED = 0
_CHAR_ALIGNMENT = 1
_SHORT_ALIGNMENT = 2
_INT_ALIGNMENT = 4
_LONG_LONG_ALIGNMENT = 8
_DOUBLE_ALIGNMENT = 8
_ALIGNMENT_REQUIRED = 1
_LONG_ALIGNMENT = 4
_LONG_DOUBLE_ALIGNMENT = 8
_POINTER_ALIGNMENT = 4
_MAX_ALIGNMENT = 8
_LONG_ALIGNMENT = 8
_LONG_DOUBLE_ALIGNMENT = 16
_POINTER_ALIGNMENT = 8
_MAX_ALIGNMENT = 16
_POSIX_C_SOURCE = 1
_LARGEFILE64_SOURCE = 1
_LARGEFILE_SOURCE = 1
_FILE_OFFSET_BITS = 64
_FILE_OFFSET_BITS = 32
_POSIX_C_SOURCE = 199506L
_POSIX_PTHREAD_SEMANTICS = 1
_XOPEN_VERSION = 500
_XOPEN_VERSION
|
= 4
_XOPEN_VERSION = 3
from TYPES import *
# Included from sys/stream.h
# Included from sys/vnode.h
from TYPES import *
# Included from sys/t_lock.h
# Included from sys/machlock.h
from TYPES import *
LOCK_HELD_VALUE = 0xff
def SPIN_LOCK(pl): return ((pl) > iplt
|
ospl(LOCK_LEVEL))
def LOCK_SAMPLE_INTERVAL(i): return (((i) & 0xff) == 0)
CLOCK_LEVEL = 10
LOCK_LEVEL = 10
DISP_LEVEL = (LOCK_LEVEL + 1)
PTR24_LSB = 5
PTR24_MSB = (PTR24_LSB + 24)
PTR24_ALIGN = 32
PTR24_BASE = 0xe0000000
# Included from sys/param.h
from TYPES import *
_POSIX_VDISABLE = 0
MAX_INPUT = 512
MAX_CANON = 256
UID_NOBODY = 60001
GID_NOBODY = UID_NOBODY
UID_NOACCESS = 60002
MAX_TASKID = 999999
MAX_MAXPID = 999999
DEFAULT_MAXPID = 999999
DEFAULT_JUMPPID = 100000
DEFAULT_MAXPID = 30000
DEFAULT_JUMPPID = 0
MAXUID = 2147483647
MAXPROJID = MAXUID
MAXLINK = 32767
NMOUNT = 40
CANBSIZ = 256
NOFILE = 20
NGROUPS_UMIN = 0
NGROUPS_UMAX = 32
NGROUPS_MAX_DEFAULT = 16
NZERO = 20
NULL = 0L
NULL = 0
CMASK = 022
CDLIMIT = (1L<<11)
NBPS = 0x20000
NBPSCTR = 512
UBSIZE = 512
SCTRSHFT = 9
SYSNAME = 9
PREMOTE = 39
MAXPATHLEN = 1024
MAXSYMLINKS = 20
MAXNAMELEN = 256
NADDR = 13
PIPE_BUF = 5120
PIPE_MAX = 5120
NBBY = 8
MAXBSIZE = 8192
DEV_BSIZE = 512
DEV_BSHIFT = 9
MAXFRAG = 8
MAXOFF32_T = 0x7fffffff
MAXOFF_T = 0x7fffffffffffffffl
MAXOFFSET_T = 0x7fffffffffffffffl
MAXOFF_T = 0x7fffffffl
MAXOFFSET_T = 0x7fffffff
def btodb(bytes): return \
def dbtob(db): return \
def lbtodb(bytes): return \
def ldbtob(db): return \
NCARGS32 = 0x100000
NCARGS64 = 0x200000
NCARGS = NCARGS64
NCARGS = NCARGS32
FSHIFT = 8
FSCALE = (1<<FSHIFT)
def DELAY(n): return drv_usecwait(n)
def mmu_ptob(x): return ((x) << MMU_PAGESHIFT)
def mmu_btop(x): return (((x)) >> MMU_PAGESHIFT)
def mmu_btopr(x): return ((((x) + MMU_PAGEOFFSET) >> MMU_PAGESHIFT))
def mmu_ptod(x): return ((x) << (MMU_PAGESHIFT - DEV_BSHIFT))
def ptod(x): return ((x) << (PAGESHIFT - DEV_BSHIFT))
def ptob(x): return ((x) << PAGESHIFT)
def btop(x): return (((x) >> PAGESHIFT))
def btopr(x): return ((((x) + PAGEOFFSET) >> PAGESHIFT))
def dtop(DD): return (((DD) + NDPP - 1) >> (PAGESHIFT - DEV_BSHIFT))
def dtopt(DD): return ((DD) >> (PAGESHIFT - DEV_BSHIFT))
_AIO_LISTIO_MAX = (4096)
_AIO_MAX = (-1)
_MQ_OPEN_MAX = (32)
_MQ_PRIO_MAX = (32)
_SEM_NSEMS_MAX = INT_MAX
_SEM_VALUE_MAX = INT_MAX
# Included from sys/unistd.h
_CS_PATH = 65
_CS_LFS_CFLAGS = 68
_CS_LFS_LDFLAGS = 69
_CS_LFS_LIBS = 70
_CS_LFS_LINTFLAGS = 71
_CS_LFS64_CFLAGS = 72
_CS_LFS64_LDFLAGS = 73
_CS_LFS64_LIBS = 74
_CS_LFS64_LINTFLAGS = 75
_CS_XBS5_ILP32_OFF32_CFLAGS = 700
_CS_XBS5_ILP32_OFF32_LDFLAGS = 701
_CS_XBS5_ILP32_OFF32_LIBS = 702
_CS_XBS5_ILP32_OFF32_LINTFLAGS = 703
_CS_XBS5_ILP32_OFFBIG_CFLAGS = 705
_CS_XBS5_ILP32_OFFBIG_LDFLAGS = 706
_CS_XBS5_ILP32_OFFBIG_LIBS = 707
_CS_XBS5_ILP32_OFFBIG_LINTFLAGS = 708
_CS_XBS5_LP64_OFF64_CFLAGS = 709
_CS_XBS5_LP64_OFF64_LDFLAGS = 710
_CS_XBS5_LP64_OFF64_LIBS = 711
_CS_XBS5_LP64_OFF64_LINTFLAGS = 712
_CS_XBS5_LPBIG_OFFBIG_CFLAGS = 713
_CS_XBS5_LPBIG_OFFBIG_LDFLAGS = 714
_CS_XBS5_LPBIG_OFFBIG_LIBS = 715
_CS_XBS5_LPBIG_OFFBIG_LINTFLAGS = 716
_SC_ARG_MAX = 1
_SC_CHILD_MAX = 2
_SC_CLK_TCK = 3
_SC_NGROUPS_MAX = 4
_SC_OPEN_MAX = 5
_SC_JOB_CONTROL = 6
_SC_SAVED_IDS = 7
_SC_VERSION = 8
_SC_PASS_MAX = 9
_SC_LOGNAME_MAX = 10
_SC_PAGESIZE = 11
_SC_XOPEN_VERSION = 12
_SC_NPROCESSORS_CONF = 14
_SC_NPROCESSORS_ONLN = 15
_SC_STREAM_MAX = 16
_SC_TZNAME_MAX = 17
_SC_AIO_LISTIO_MAX = 18
_SC_AIO_MAX = 19
_SC_AIO_PRIO_DELTA_MAX = 20
_SC_ASYNCHRONOUS_IO = 21
_SC_DELAYTIMER_MAX = 22
_SC_FSYNC = 23
_SC_MAPPED_FILES = 24
_SC_MEMLOCK = 25
_SC_MEMLOCK_RANGE = 26
_SC_MEMORY_PROTECTION = 27
_SC_MESSAGE_PASSING = 28
_SC_MQ_OPEN_MAX = 29
_SC_MQ_PRIO_MAX = 30
_SC_PRIORITIZED_IO = 31
_SC_PRIORITY_SCHEDULING = 32
_SC_REALTIME_SIGNALS = 33
_SC_RTSIG_MAX = 34
_SC_SEMAPHORES = 35
_SC_SEM_NSEMS_MAX = 36
_SC_SEM_VALUE_MAX = 37
_SC_SHARED_MEMORY_OBJECTS = 38
_SC_SIGQUEUE_MAX = 39
_SC_SIGRT_MIN = 40
_SC_SIGRT_MAX = 41
_SC_SYNCHRONIZED_IO = 42
_SC_TIMERS = 43
_SC_TIMER_MAX = 44
_SC_2_C_BIND = 45
_SC_2_C_DEV = 46
_SC_2_C_VERSION = 47
_SC_2_FORT_DEV = 48
_SC_2_FORT_RUN = 49
_SC_2_LOCALEDEF = 50
_SC_2_SW_DEV = 51
_SC_2_UPE = 52
_SC_2_VERSION = 53
_SC_BC_BASE_MAX = 54
_SC_BC_DIM_MAX = 55
_SC_BC_SCALE_MAX = 56
_SC_BC_STRING_MAX = 57
_SC_COLL_WEIGHTS_MAX = 58
_SC_EXPR_NEST_MAX = 59
_SC_LINE_MAX = 60
_SC_RE_DUP_MAX = 61
_SC_XOPEN_CRYPT = 62
_SC_XOPEN_ENH_I18N = 63
_SC_XOPEN_SHM = 64
_SC_2_CHAR_TERM = 66
_SC_XOPEN_XCU_VERSION = 67
_SC_ATEXIT_MAX = 76
_SC_IOV_MAX = 77
_SC_XOPEN_UNIX = 78
_SC_PAGE_SIZE = _SC_PAGESIZE
_SC_T_IOV_MAX = 79
_SC_PHYS_PAGES = 500
_SC_AVPHYS_PAGES = 501
_SC_COHER_BLKSZ = 503
_SC_SPLIT_CACHE = 504
_SC_ICACHE_SZ = 505
_SC_DCACHE_SZ = 506
_SC_ICACHE_LINESZ = 507
_SC_DCACHE_LINESZ = 508
_SC_ICACHE_BLKSZ = 509
_SC_DCACHE_BLKSZ = 510
_SC_DCACHE_TBLKSZ = 511
_SC_ICACHE_ASSOC = 512
_SC_DCACHE_ASSOC = 513
_SC_MAXPID = 514
_SC_STACK_PROT = 515
_SC_THREAD_DESTRUCTOR_ITERATIONS = 568
_SC_GETGR_R_SIZE_MAX = 569
_SC_GETPW_R_SIZE_MAX = 570
_SC_LOGIN_NAME_MAX = 571
_SC_THREAD_KEYS_MAX = 572
_SC_THREAD_STACK_MIN = 573
_SC_THREAD_THREADS_MAX = 574
_SC_TTY_NAME_MAX = 575
_SC_THREADS = 576
_SC_THREAD_ATTR_STACKADDR = 577
_SC_THREAD_ATTR_STACKSIZE = 578
_SC_THREAD_PRIORITY_SCHEDULING = 579
_SC_THREAD_PRIO_INHERIT = 580
_SC_THREAD_PRIO_PROTECT = 581
_SC_THREAD_PROCESS_SHARED = 582
_SC_THREAD_SAFE_FUNCTIONS = 583
_SC_XOPEN_LEGACY = 717
_SC_XOPEN_REALTIME = 718
_SC_XOPEN_REALTIME_THREADS = 719
_SC_XBS5_ILP32_OFF32 = 720
_SC_XBS5_ILP32_OFFBIG = 721
_SC_XBS5_LP64_OFF64 = 722
_SC_XBS5_LPBIG_OFFBIG = 723
_PC_LINK_MAX = 1
_PC_MAX_CANON = 2
_PC_MAX_INPUT = 3
_PC_NAME_MAX = 4
_PC_PATH_MAX = 5
_PC_PIPE_BUF = 6
_PC_NO_TRUNC = 7
_PC_VDISABLE = 8
_PC_CHOWN_RESTRICTED = 9
_PC_ASYNC_IO = 10
_PC_PRIO_IO = 11
_PC_SYNC_IO = 12
_PC_FILESIZEBITS = 67
_PC_LAST = 67
_POSIX_VERSION = 199506L
_POSIX2_VERSION = 199209L
_POSIX2_C_VERSION = 199209L
_XOPEN_XCU_VERSION = 4
_XOPEN_REALTIME = 1
_XOPEN_ENH_I18N = 1
_XOPEN_SHM = 1
_POSIX2_C_BIND = 1
_POSIX2_CHAR_TERM = 1
_POSIX2_LOCALEDEF = 1
_POSIX2_C_DEV = 1
_POSIX2_SW_DEV = 1
_POSIX2_UPE = 1
# Included from sys/mutex.h
from TYPES import *
def MUTEX_HELD(x): return (mutex_owned(x))
# Included from sys/rwlock.h
from TYPES import *
def RW_READ_HELD(x): return (rw_read_held((x)))
def RW_WRITE_HELD(x): return (rw_write_held((x)))
def RW_LOCK_HELD(x): return (rw_lock_held((x)))
def RW_ISWRITER(x): return (rw_iswriter(x))
# Included from sys/semaphore.h
# Included from sys/thread.h
from TYPES import *
# Included from sys/klwp.h
from TYPES import *
# Included from sys/condvar.h
from TYPES import *
# Included from sys/time.h
# Included from sys/types32.h
# Included from sys/int_types.h
TIME32_MAX = INT32_MAX
TIME32_MIN = INT32_MIN
def TIMEVAL_OVERFLOW(tv): return \
from TYPES import *
DST_NONE = 0
DST_USA = 1
DST_AUST = 2
DST_WET = 3
DST_MET = 4
DST_EET = 5
DST_CAN = 6
DST_GB = 7
DST_RUM = 8
DST_TUR = 9
DST_AUSTALT = 10
ITIMER_REAL = 0
ITIMER_VIRTUAL = 1
ITIMER_PROF = 2
ITIMER_REALPROF = 3
def ITIMERVAL_OVERFLOW(itv): return \
SEC = 1
MILLISEC = 1000
MICROSEC = 1000000
NANOSEC = 1000000
|
Alex-Just/gymlog
|
gymlog/main/tests/test_models.py
|
Python
|
mit
| 476 | 0 |
# from test_plus.test import TestCase
#
#
# class TestUser(TestCase):
#
# def setUp(self):
# self.user = self.make_user()
#
# def test__str__(self):
# self.assertEqual(
# self.user.__str__()
|
,
# 'testuser' # This is the default username for self.make_user()
# )
#
# def test_get_absolute_url(self):
# self.assertEqual(
# self.user.get_absolute_url(),
#
|
'/users/testuser/'
# )
|
zokeber/django-galeria
|
setup.py
|
Python
|
bsd-3-clause
| 1,378 | 0.002177 |
#/usr/bin/env python
import codecs
import os
import sys
from setuptools import setup, find_packages
if 'publish' in sys.argv:
os.system('python setup.py sdist upload')
sys.exit()
read = lambda filepath: codecs.open(filepath, 'r', 'utf-8').read()
# Dynamically calculate the version based on galeria.VERSION.
version = __import__('galeria').get_version()
setup(
name='django-galeria',
version=version,
description='Pluggable gallery/portfolio application for Django projects',
long_description=read(os.path.join(os.path.dirname(__file__), 'README.rst')),
author
|
='Guilherme Gondim',
a
|
uthor_email='semente+django-galeria@taurinus.org',
maintainer='Guilherme Gondim',
maintainer_email='semente+django-galeria@taurinus.org',
license='BSD License',
url='https://bitbucket.org/semente/django-galeria/',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=['django-mptt']
)
|
rackerlabs/instrumented-ceilometer
|
ceilometer/openstack/common/rpc/proxy.py
|
Python
|
apache-2.0
| 9,444 | 0 |
# Copyright 2012-2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A helper class for proxy objects to remote APIs.
For more information about rpc API version numbers, see:
rpc/dispatcher.py
"""
from ceilometer.openstack.common import rpc
from ceilometer.openstack.common.rpc import common as rpc_common
from ceilometer.openstack.common.rpc import serializer as rpc_serializer
class RpcProxy(object):
"""A helper class for rpc clients.
This class is a wrapper around the RPC client API. It allows you to
specify the topic and API version in a single place. This is intended to
be used as a base class for a class that implements the client side of an
rpc API.
"""
# The default namespace, which can be overridden in a subclass.
RPC_API_NAMESPACE = None
def __init__(self, topic, default_version, version_cap=None,
serializer=None):
"""Initialize an RpcProxy.
:param topic: The topic to use for all messages.
:param default_version: The default API version to request in all
outgoing messages. This can be overridden on a per-message
basis.
:param version_cap: Optionally cap the maximum version used for sent
messages.
:param serializer: Optionaly (de-)serialize entities with a
provided helper.
"""
self.topic = topic
self.default_version = default_version
self.version_cap = version_cap
if serializer is None:
serializer = rpc_serializer.NoOpSerializer()
self.serializer = serializer
super(RpcProxy, self).__init__()
def _set_version(self, msg, vers):
"""Helper method to set the version in a message.
:param msg: The message having a version added to it.
:param vers: The version number to add to the message.
"""
v = vers if vers else self.default_version
if (self.version_cap and not
rpc_common.version_is_compatible(self.version_cap, v)):
raise rpc_common.RpcVersionCapError(version_cap=self.version_cap)
msg['version'] = v
def _get_topic(self, topic):
"""Return the topic to use for a message."""
return topic if topic else self.topic
def can_send_version(self, version):
"""Check to see if a version is compatible with the version cap."""
return (not self.version_cap or
rpc_common.version_is_compatible(self.version_cap, version))
@staticmethod
def make_namespaced_msg(method, namespace, **kwargs):
return {'method': method, 'namespace': namespace, 'args': kwargs}
def make_msg(self, method, **kwargs):
return self.make_namespaced_msg(method, self.RPC_API_NAMESPACE,
**kwargs)
def _serialize_msg_args(self, context, k
|
wargs):
"""Helper method called to serialize message arguments.
This calls our serializer on eac
|
h argument, returning a new
set of args that have been serialized.
:param context: The request context
:param kwargs: The arguments to serialize
:returns: A new set of serialized arguments
"""
new_kwargs = dict()
for argname, arg in kwargs.iteritems():
new_kwargs[argname] = self.serializer.serialize_entity(context,
arg)
return new_kwargs
def call(self, context, msg, topic=None, version=None, timeout=None):
"""rpc.call() a remote method.
:param context: The request context
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:param timeout: (Optional) A timeout to use when waiting for the
response. If no timeout is specified, a default timeout will be
used that is usually sufficient.
:returns: The return value from the remote method.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
real_topic = self._get_topic(topic)
try:
result = rpc.call(context, real_topic, msg, timeout)
return self.serializer.deserialize_entity(context, result)
except rpc.common.Timeout as exc:
raise rpc.common.Timeout(
exc.info, real_topic, msg.get('method'))
def multicall(self, context, msg, topic=None, version=None, timeout=None):
"""rpc.multicall() a remote method.
:param context: The request context
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:param timeout: (Optional) A timeout to use when waiting for the
response. If no timeout is specified, a default timeout will be
used that is usually sufficient.
:returns: An iterator that lets you process each of the returned values
from the remote method as they arrive.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
real_topic = self._get_topic(topic)
try:
result = rpc.multicall(context, real_topic, msg, timeout)
return self.serializer.deserialize_entity(context, result)
except rpc.common.Timeout as exc:
raise rpc.common.Timeout(
exc.info, real_topic, msg.get('method'))
def cast(self, context, msg, topic=None, version=None):
"""rpc.cast() a remote method.
:param context: The request context
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:returns: None. rpc.cast() does not wait on any return value from the
remote method.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.cast(context, self._get_topic(topic), msg)
def fanout_cast(self, context, msg, topic=None, version=None):
"""rpc.fanout_cast() a remote method.
:param context: The request context
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the requested API version in this
message.
:returns: None. rpc.fanout_cast() does not wait on any return value
from the remote method.
"""
self._set_version(msg, version)
msg['args'] = self._serialize_msg_args(context, msg['args'])
rpc.fanout_cast(context, self._get_topic(topic), msg)
def cast_to_server(self, context, server_params, msg, topic=None,
version=None):
"""rpc.cast_to_server() a remote method.
:param context: The request context
:param server_params: Server parameters. See rpc.cast_to_server() for
details.
:param msg: The message to send, including the method and args.
:param topic: Override the topic for this message.
:param version: (Optional) Override the re
|
varunnaganathan/django
|
tests/auth_tests/test_auth_backends.py
|
Python
|
bsd-3-clause
| 25,359 | 0.001814 |
from __future__ import unicode_literals
from datetime import date
from django.contrib.auth import (
BACKEND_SESSION_KEY, SESSION_KEY, authenticate, get_user,
)
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.hashers import MD5PasswordHasher
from django.contrib.auth.models import AnonymousUser, Group, Permission, User
from django.contrib.auth.tests.custom_user import CustomUser, ExtensionUser
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.http import HttpRequest
from django.test import (
SimpleTestCase, TestCase, modify_settings, override_settings,
)
from .models import CustomPermissionsUser, UUIDUser
class CountingMD5PasswordHasher(MD5PasswordHasher):
"""Hasher that counts how many times it computes a hash."""
calls = 0
def encode(self, *args, **kwargs):
type(self).calls += 1
return super(CountingMD5PasswordHasher, self).encode(*args, **kwargs)
class BaseModelBackendTest(object):
"""
A base class for tests that need to validate the ModelBackend
with different User models. Subclasses should define a class
level UserModel attribute, and a create_users() method to
construct two users for test purposes.
"""
backend = 'django.contrib.auth.backends.ModelBackend'
def setUp(self):
self.patched_settings = modify_settings(
AUTHENTICATION_BACKENDS={'append': self.backend},
)
self.patched_settings.enable()
self.create_users()
def tearDown(self):
self.patched_settings.disable()
# The custom_perms test messes with ContentTypes, which will
# be cached; flush the cache to ensure there are no side effects
# Refs #14975, #14925
ContentType.objects.clear_cache()
def test_has_perm(self):
user = self.UserModel._default_manager.get(pk=self.user.pk)
self.assertEqual(user.has_perm('auth.test'), False)
user.is_staff = True
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
user.is_superuser = True
user.save()
self.assertEqual(user.has_perm('auth.test'), True)
user.is_staff = True
user.is_superuser = True
user.is_active = False
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
def test_custom_perms(self):
user = self.UserModel._default_manager.get(pk=self.user.pk)
content_type = ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
user.user_permissions.add(perm)
# reloading user to purge the _perm_cache
user = self.UserModel._default_manager.get(pk=self.user.pk)
self.assertEqual(user.get_all_permissions() == {'auth.test'}, True)
self.assertEqual(user.get_group_permissions(), set())
self.assertEqual(user.has_module_perms('
|
Group'), False)
self.assertEqual(user.has_module_perms('auth'), True)
perm = Permission.objects.create(name='test2', content_type=content_type, codename='test2')
user.user_permissions.add(perm)
perm = Permission.objects.create(name='test3', content_type=content_type, codename='test3')
user.user_permissions.add(perm)
user = self.UserModel._default_manager.get(pk=self.user.p
|
k)
self.assertEqual(user.get_all_permissions(), {'auth.test2', 'auth.test', 'auth.test3'})
self.assertEqual(user.has_perm('test'), False)
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), True)
perm = Permission.objects.create(name='test_group', content_type=content_type, codename='test_group')
group = Group.objects.create(name='test_group')
group.permissions.add(perm)
user.groups.add(group)
user = self.UserModel._default_manager.get(pk=self.user.pk)
exp = {'auth.test2', 'auth.test', 'auth.test3', 'auth.test_group'}
self.assertEqual(user.get_all_permissions(), exp)
self.assertEqual(user.get_group_permissions(), {'auth.test_group'})
self.assertEqual(user.has_perms(['auth.test3', 'auth.test_group']), True)
user = AnonymousUser()
self.assertEqual(user.has_perm('test'), False)
self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), False)
def test_has_no_object_perm(self):
"""Regressiontest for #12462"""
user = self.UserModel._default_manager.get(pk=self.user.pk)
content_type = ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
user.user_permissions.add(perm)
self.assertEqual(user.has_perm('auth.test', 'object'), False)
self.assertEqual(user.get_all_permissions('object'), set())
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.get_all_permissions(), {'auth.test'})
def test_anonymous_has_no_permissions(self):
"""
#17903 -- Anonymous users shouldn't have permissions in
ModelBackend.get_(all|user|group)_permissions().
"""
backend = ModelBackend()
user = self.UserModel._default_manager.get(pk=self.user.pk)
content_type = ContentType.objects.get_for_model(Group)
user_perm = Permission.objects.create(name='test', content_type=content_type, codename='test_user')
group_perm = Permission.objects.create(name='test2', content_type=content_type, codename='test_group')
user.user_permissions.add(user_perm)
group = Group.objects.create(name='test_group')
user.groups.add(group)
group.permissions.add(group_perm)
self.assertEqual(backend.get_all_permissions(user), {'auth.test_user', 'auth.test_group'})
self.assertEqual(backend.get_user_permissions(user), {'auth.test_user', 'auth.test_group'})
self.assertEqual(backend.get_group_permissions(user), {'auth.test_group'})
user.is_anonymous = lambda: True
self.assertEqual(backend.get_all_permissions(user), set())
self.assertEqual(backend.get_user_permissions(user), set())
self.assertEqual(backend.get_group_permissions(user), set())
def test_inactive_has_no_permissions(self):
"""
#17903 -- Inactive users shouldn't have permissions in
ModelBackend.get_(all|user|group)_permissions().
"""
backend = ModelBackend()
user = self.UserModel._default_manager.get(pk=self.user.pk)
content_type = ContentType.objects.get_for_model(Group)
user_perm = Permission.objects.create(name='test', content_type=content_type, codename='test_user')
group_perm = Permission.objects.create(name='test2', content_type=content_type, codename='test_group')
user.user_permissions.add(user_perm)
group = Group.objects.create(name='test_group')
user.groups.add(group)
group.permissions.add(group_perm)
self.assertEqual(backend.get_all_permissions(user), {'auth.test_user', 'auth.test_group'})
self.assertEqual(backend.get_user_permissions(user), {'auth.test_user', 'auth.test_group'})
self.assertEqual(backend.get_group_permissions(user), {'auth.test_group'})
user.is_active = False
user.save()
self.assertEqual(backend.get_all_permissions(user), set())
self.assertEqual(backend.get_user_permissions(user), set())
self.assertEqual(backend.get_group_permissions(user), set())
def test_get_all_superuser_permissions(self):
"""A superuser has all permissions. Refs #14795."""
user = self.UserModel._default_manager.get(pk=self.superuser.pk)
self.assertEqual(len(user.get_all_permissions()), len(Permission.objects.all()))
@override_settings(PASSWORD_HASHERS=['auth_tests.test_auth_backends.CountingMD5PasswordHasher'])
def test_authentication_timing(self):
"""Hasher is run once regardless of whether the user exists
|
dajohnso/cfme_tests
|
cfme/intelligence/reports/dashboards.py
|
Python
|
gpl-2.0
| 10,188 | 0.001276 |
# -*- coding: utf-8 -*-
"""Page model for Cloud Intel / Reports / Dashboards"""
from navmazing import NavigateToAttribute, NavigateToSibling
from widgetastic.widget import Text, Checkbox
from widgetastic_manageiq import SummaryFormItem, DashboardWidgetsPicker
from widgetastic_patternfly import Button, Input
from utils.appliance import Navigatable
from utils.appliance.implementations.ui import navigator, CFMENavigateStep, navigate_to
from utils.pretty import Pretty
from utils.update import Updateable
from . import CloudIntelReportsView
class DashboardAllGroupsView(CloudIntelReportsView):
title = Text("#explorer_title_text")
@property
def is_displayed(self):
return (
self.in_intel_reports and
self.title.text == 'Dashboards for "{}"'.format(self.context["object"].group) and
self.dashboards.is_opened and
self.dashboards.tree.currently_selected == [
"All Dashboards",
"All Groups",
self.context["object"].group
]
)
class DashboardFormCommon(CloudIntelReportsView):
title = Text("#explorer_title_text")
basic_information = Text(".//div[@id='form_div']/h3")
name = Input(name="name")
tab_title = Input(name="description")
locked = Checkbox("locked")
sample_dashboard = Text(".//div[@id='form_widgets_div']/h3")
widgets = DashboardWidgetsPicker(
"form_widgets_div",
select_id="widget",
names_locator=".//a[starts-with(@id, 'w_')]/..",
remove_locator=".//div[contains(@title, {})]//a/i"
)
cancel_button = Button("Cancel")
class NewDashboardView(DashboardFormCommon):
add_button = Button("Add")
@property
def is_displayed(self):
return (
self.in_intel_reports and
self.title.text == "Adding a new dashboard" and
self.dashboards.is_opened and
self.dashboards.tree.currently_selected == [
"All Dashboards",
"All Groups",
self.context["object"].group
]
)
class EditDashboardView(DashboardFormCommon):
save_button = Button("Save")
reset_button = Button("Reset")
@property
def is_displayed(self):
return (
self.in_intel_reports and
self.title.text == "Editing Dashboard {}".format(self.context["object"].name) and
self.dashboards.is_opened and
self.dashboards.tree.currently_selected == [
"All Dashboards",
"All Groups",
self.context["object"].group,
self.context["object"].name
]
)
class EditDefaultDashboardView(EditDashboardView):
@property
def is_displayed(self):
return (
self.in_intel_reports and
self.title.text == "Editing Dashboard {}".format(self.context["object"].name) and
self.dashboards.is_opened and
self.dashboards.tree.currently_selected == [
"All Dashboards",
"{} ({})".format(self.context["object"].title, self.context["object"].name)
]
)
class DashboardDetailsView(CloudIntelReportsView):
SAMPLE_DASHBOARD_ROOT = ".//div[@id='modules']"
ITEM_TITLE_LOCATOR = ".//h3[contains(@class, 'panel-title')]"
title = Text("#explorer_title_text")
name = SummaryFormItem("Basic Information", "Name")
tab_title = SummaryFormItem("Basic Information", "Tab Title")
@property
def selected_items(self):
items = []
for el in self.browser.elements(self.ITEM_TITLE_LOCATOR, self.SAMPLE_DASHBOARD_ROOT):
items.append(self.browser.text(
|
el))
return items
@property
def is_displayed(self):
return (
self.in_intel_reports and
self.title.text == 'Dashboard "{} ({})"'.format(
self.context["object"].title,
self.context["object"].name
) and
self.dashboards.is_opened and
self.dashboards.tree.currently_selected == [
"All Dashboards",
|
"All Groups",
self.context["object"].group,
self.context["object"].name
]
)
class DefaultDashboardDetailsView(DashboardDetailsView):
@property
def is_displayed(self):
return (
self.in_intel_reports and
self.title.text == 'Dashboard "{} ({})"'.format(
self.context["object"].title,
self.context["object"].name
) and
self.dashboards.is_opened and
self.dashboards.tree.currently_selected == [
"All Dashboards",
"{} ({})".format(self.context["object"].title, self.context["object"].name)
]
)
class Dashboard(Updateable, Pretty, Navigatable):
pretty_attrs = ["name", "group", "title", "widgets"]
def __init__(self, name, group, title=None, locked=None, widgets=None, appliance=None):
Navigatable.__init__(self, appliance)
self.name = name
self.title = title
self.locked = locked
self.widgets = widgets
self._group = group
@property
def group(self):
return self._group
def create(self, cancel=False):
"""Create this Dashboard in the UI."""
view = navigate_to(self, "Add")
view.fill({
"name": self.name,
"tab_title": self.title,
"locked": self.locked,
"widgets": self.widgets
})
view.add_button.click()
view = self.create_view(DashboardAllGroupsView)
assert view.is_displayed
view.flash.assert_no_error()
view.flash.assert_message('Dashboard "{}" was saved'.format(self.name))
def update(self, updates):
"""Update this Dashboard in the UI.
Args:
updates: Provided by update() context manager.
"""
view = navigate_to(self, "Edit")
changed = view.fill(updates)
if changed:
view.save_button.click()
else:
view.cancel_button.click()
for attr, value in updates.items():
setattr(self, attr, value)
view = self.create_view(DashboardDetailsView)
assert view.is_displayed
view.flash.assert_no_error()
if changed:
view.flash.assert_message('Dashboard "{}" was saved'.format(self.name))
else:
view.flash.assert_message(
'Edit of Dashboard "{}" was cancelled by the user'.format(self.name))
def delete(self, cancel=False):
"""Delete this Dashboard in the UI.
Args:
cancel: Whether to cancel the deletion (default False).
"""
view = navigate_to(self, "Details")
view.configuration.item_select(
"Delete this Dashboard from the Database",
handle_alert=not cancel
)
if cancel:
assert view.is_displayed
view.flash.assert_no_error()
else:
view = self.create_view(DashboardAllGroupsView)
assert view.is_displayed
view.flash.assert_no_error()
class DefaultDashboard(Updateable, Pretty, Navigatable):
pretty_attrs = ["name", "title", "widgets"]
def __init__(self, title="Default Dashboard", locked=None, widgets=None, appliance=None):
Navigatable.__init__(self, appliance)
self.title = title
self.locked = locked
self.widgets = widgets
@property
def name(self):
"""Name of Default Dashboard cannot be changed."""
return "default"
def update(self, updates):
"""Update Default Dashboard in the UI.
Args:
updates: Provided by update() context manager.
"""
view = navigate_to(self, "Edit")
changed = view.fill(updates)
if changed:
view.save_button.click()
else:
view.cancel_button.click()
view = self.create_view(DefaultDashboardDetailsView)
assert view.is_displayed
if changed:
|
akloster/bokeh
|
examples/plotting/server/burtin.py
|
Python
|
bsd-3-clause
| 4,826 | 0.005387 |
# The plot server must be running
# Go to http://localhost:5006/bokeh to view this plot
from collections import OrderedDict
from math import log, sqrt
import numpy as np
import pandas as pd
from six.moves import cStringIO as StringIO
from bokeh.plotting import figure, show, output_server
antibiotics = """
bacteria, penicillin, streptomycin, neomycin, gram
Mycobacterium tuberculosis, 800, 5, 2, negative
Salmonella schottmuelleri, 10, 0.8, 0.09, negative
Proteus vulgaris, 3, 0.1, 0.1, negative
Klebsiella pneumoniae, 850, 1.2, 1, negative
Brucella abortus, 1, 2, 0.02, negative
Pseudomonas aeruginosa, 850, 2, 0.4, negative
Escherichia coli, 100, 0.4, 0.1, negative
Salmonella (Eberthella) typhosa, 1, 0.4, 0.008, negative
Aerobacter aerogenes, 870, 1, 1.6, negative
Brucella antracis, 0.001, 0.01, 0.007, positive
Streptococcus fecalis, 1, 1, 0.1, positive
Staphylococcus aureus, 0.03, 0.03, 0.001, positive
Staphylococcus albus, 0.007, 0.1, 0.001, positive
Streptococcus hemolyticus, 0.001, 14, 10, positive
Streptococcus viridans, 0.005, 10, 40, positive
Diplococcus pneumoniae, 0.005, 11, 10, positive
"""
drug_color = OrderedDict([
("Penicillin", "#0d3362"),
("Streptomycin", "#c64737"),
("Neomycin", "black" ),
])
gram_color = {
"positive" : "#aeaeb8",
"negative" : "#e69584",
}
df = pd.read_csv(StringIO(antibiotics),
skiprows=1,
skipinitialspace=True,
engine='python')
width = 800
height = 800
inner_radius = 90
outer_radius = 300 - 10
minr = sqrt(log(.001 * 1E4))
maxr = sqrt(log(1000 * 1E4))
a = (outer_radius - inner_radius) / (minr - maxr)
b = inner_radius - a * maxr
def rad(mic):
return a * np.sqrt(np.log(mic * 1E4)) + b
big_angle = 2.0 * np.pi / (len(df) + 1)
small_angle = big_angle / 7
x = np.zeros(len(df))
y = np.zeros(len(df))
output_server("burtin")
p = figure(plot_width=width, plot_height=height, title="",
x_axis_type=None, y_axis_type=None,
x_range=[-420, 420], y_range=[-420, 420],
min_border=0, outline_line_color="black",
background_fill="#f0e1d2", border_fill="#f0e1d2")
p.line(x+1, y+1, alpha=0)
# annular wedges
angles = np.pi/2 - big_angle/2 - df.index.to_series()*big_angle
colors = [gram_color[gram] for gram in df.gram]
p.annular_wedge(
x, y, inner_radius, outer_radius, -big_angle+angles, angles, color=colors,
)
# small wedges
p.annular_wedge(x, y, inner_radius, rad(df.penicillin),
-big_angle+angles+5*small_angle, -big_angle+angles+6*small_angle,
color=drug_color['Penicillin'])
p.annular_wedge(x, y, inner_radius, rad(df.streptomycin),
-big_angle+angles+3*small_angle, -big_angle+angles+4*small_angle,
color=drug_color['Streptomycin'])
p.annular_wedge(x, y, inner_radius, rad(df.neomycin),
-big_angle+angles+1*small_angle, -big_angle+angles+2*small_angle,
color=drug_color['Neomycin'])
# circular axes and lables
labels = np.power(10.0, np.arange(-3, 4))
radii = a * np.sqrt(np.log(labels * 1E4)) + b
p.circle(x, y, radius=radii, fill_color=None, lin
|
e_color="white")
p.text(x[:-1], radii[:-1], [str(r) for r in labels[:-1]],
text_font_size="8pt", text_align="center", text_baseline="middle")
# radial axes
p.annular_wedge(x, y, inner_radius-10, outer_radius+10,
-bi
|
g_angle+angles, -big_angle+angles, color="black")
# bacteria labels
xr = radii[0]*np.cos(np.array(-big_angle/2 + angles))
yr = radii[0]*np.sin(np.array(-big_angle/2 + angles))
label_angle=np.array(-big_angle/2+angles)
label_angle[label_angle < -np.pi/2] += np.pi # easier to read labels on the left side
p.text(xr, yr, df.bacteria, angle=label_angle,
text_font_size="9pt", text_align="center", text_baseline="middle")
# OK, these hand drawn legends are pretty clunky, will be improved in future release
p.circle([-40, -40], [-370, -390], color=list(gram_color.values()), radius=5)
p.text([-30, -30], [-370, -390], text=["Gram-" + gr for gr in gram_color.keys()],
text_font_size="7pt", text_align="left", text_baseline="middle")
p.rect([-40, -40, -40], [18, 0, -18], width=30, height=13,
color=list(drug_color.values()))
p.text([-15, -15, -15], [18, 0, -18], text=list(drug_color.keys()),
text_font_size="9pt", text_align="left", text_baseline="middle")
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
show(p)
|
chrism333/xpcc
|
scons/site_tools/system_design.py
|
Python
|
bsd-3-clause
| 7,787 | 0.038012 |
#!/usr/bin/env python
#
# Copyright (c) 2009, Roboterclub Aachen e.V.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Roboterclub Aachen e.V. nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY ROBOTERCLUB AACHEN E.V. ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ROBOTERCLUB AACHEN E.V. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
import SCons
import SCons.Errors
# TODO make this more robust against whitespace etc.
includeExpression = re.compile(r'<include>(\S+)</include>', re.M)
# -----------------------------------------------------------------------------
def find_includes(env, file, include_path):
""" Find include directives in an XML file """
files = []
line_count = 0
for line in open(file).readlines():
line_count = line_count + 1
match = includeExpression.search(line)
if match:
filename = match.group(1)
relative_to_file = os.path.join(os.path.dirname(os.path.abspath(file)), filename)
relative_to_include_path = os.path.join(include_path, filename)
# 1.) include file name can be absolut
if os.path.isabs(filename):
files.append(filename)
# 2.) it could be a path relative to the files path
# this works just like #include "{filename}" in C/C++
elif os.path.isfile(relative_to_file):
files.append(relative_to_file)
# 3.) it could be a path relative to the include path
elif os.path.isfile(relative_to_include_path):
files.append(relative_to_include_path)
# 4.) Error!
else:
env.Error("Could not find include file '%s' in '%s:%s'" % (filename, file, line_count))
return files
def xml_include_scanner(node, env, path, arg=None):
""" Generates the dependencies for the XML files """
abspath, targetFilename = os.path.split(node.get_abspath())
stack = [targetFilename]
dependencies = [targetFilename]
while stack:
nextFile = stack.pop()
files = find_includes(env, os.path.join(abspath, nextFile), abspath)
for file in files:
if file not in dependencies:
stack.append(file)
dependencies.extend(files)
dependencies.remove(targetFilename)
return dependencies
# -----------------------------------------------------------------------------
def packet_emitter(target, source, env):
try:
path = env['path']
except KeyError:
path = '.'
target = [os.path.join(path, "packets.cpp"),
os.path.join(path, "packets.hpp")]
return (target, source)
def identifier_emitter(target, source, env):
try:
path = env['path']
except KeyError:
path = '.'
target = [os.path.join(path, "identifier.hpp")]
return (target, source)
def postman_emitter(target, source, env):
try:
path = env['path']
except KeyError:
path = '.'
target = [os.path.join(path, "postman.cpp"),
os.path.join(path, "postman.hpp")]
return (target, source)
def communication_emitter(target, source, env):
try:
path = env['path']
except KeyError:
path = '.'
target = [os.path.join(path, "communication.hpp")]
return (target, source)
def xpcc_task_caller_emitter(target, source, env):
try:
path = env['path']
except KeyError:
path = '.'
target = [os.path.join(path, "caller.hpp")]
return (target, source)
# -----------------------------------------------------------------------------
def generate(env, **kw):
env.SetDefault(XPCC_SYSTEM_DESIGN_SCANNERS = {})
env['XPCC_SYSTEM_DESIGN_SCANNERS']['XML'] = SCons.Script.Scanner(
function = xml_include_scanner,
skeys = ['.xml'])
env['BUILDERS']['SystemCppPackets'] = \
SCons.Script.Builder(
action = SCons.Action.Action(
'python "${XPCC_SYSTEM_BUILDER}/cpp_packets.py" ' \
'--source_path ${TARGETS[0].dir} ' \
'--header_path ${TARGETS[1].dir} ' \
'--dtdpath "${dtdPath}" ' \
'--namespace "${namespace}" ' \
'$SOURCE',
cmdstr="$SYSTEM_CPP_PACKETS_COMSTR"),
emitter = packet_emitter,
source_scanner = env['XPCC_SYSTEM_DESIGN_SCANNERS']['XML'],
single_source = True,
target_factory = env.fs.Entry,
src_suffix = ".xml")
env['BUILDERS']['SystemCppIdentifier'] = \
SCons.Script.Builder(
action = SCons.Action.Action(
'python "${XPCC_SYSTEM_BUILDER}/cpp_identifier.py" ' \
'--outpath ${TARGET.dir} ' \
'--dtdpath "${dtdPath}" ' \
'--namespace "${namespace}" ' \
'$SOURCE',
cmdstr="$SYSTEM_CPP_IDENTIFIER_COMSTR"),
emitter = identifier_emitter,
source_scanner = env['XPCC_SYSTEM_DESIGN_SCANNERS']['XML'],
single_source = True,
target_factory = env.fs.Entry,
src_suffix = ".xml")
env['BUILDERS']['SystemCppPostman'] = \
SCons.Script.Builder(
action = SCons.Action.Action(
'python "${XPCC_SYSTEM_BUILDER}/cpp_postman.py" ' \
'--container "${container}" ' \
'--outpath ${TARGET.dir} ' \
'--dtdpath "${dtdPath}" ' \
'--namespace "${namespace}" ' \
'$SOURCE',
cmdstr="$SYSTEM_CPP_POSTMAN_COMSTR"),
emitter = postman_emitter,
source_scanner = env['XPCC_SYSTEM_DESIGN_SCANNERS']['XML'],
single_source = True,
target_factory = env.fs.Entry,
src_suffix = ".xml")
env['BUILDERS']['SystemCppCommunication'] = \
SCons.Script.Builder(
action = SCons.Action.Action(
'python "${XPCC_SYSTEM_BUILDER}/cpp_communication.py" ' \
'--outpath ${TARGET.dir} ' \
'--dtdpath "${dtdPath}" ' \
'--namespace "${namespace}" ' \
'$SOURCE',
cmdstr="$SYSTEM_CPP_COMMUNICATION_COMSTR"),
emitter = communication_emitter,
source_scanner = env['XP
|
CC_SYSTEM_DESIGN_SCANNERS']['XML'],
single_source = True,
target_factory = env.fs.Entry,
src_suffix = ".xml")
env['BUILDERS']['SystemCppXpccTaskCaller'] = \
SCons.Script.Builder(
action = SCons.Action.Action(
'python "${XPCC_SYSTEM_BUILDER}/cpp_xpcc_task
|
_caller.py" ' \
'--outpath ${TARGET.dir} ' \
'--dtdpath "${dtdPath}" ' \
'--namespace "${namespace}" ' \
'$SOURCE',
cmdstr="$SYSTEM_CPP_XPCC_TASK_CALLER_COMSTR"),
emitter = xpcc_task_caller_emitter,
source_scanner = env['XPCC_SYSTEM_DESIGN_SCANNERS']['XML'],
single_source = True,
target_factory = env.fs.Entry,
src_suffix = ".xml")
if SCons.Script.ARGUMENTS.get('verbose') != '1':
env['SYSTEM_CPP_PACKETS_COMSTR'] = "Generate packets from: $SOURCE"
env['SYSTEM_CPP_IDENTIFIER_COMSTR'] = "Generate identifier from: $SOURCE"
env['SYSTEM_CPP_POSTMAN_COMSTR'] = "Generate postman from: $SOURCE"
env['SYSTEM_CPP_COMMUNICATION_COMSTR'] = "Generate communication stubs from: $SOURCE"
env['SYSTEM_CPP_XPCC_TASK_CALLER_COMSTR'] = "Generate xpcc task callers from: $SOURCE"
def exists(env):
return True
|
zzsza/TIL
|
python/sacred/my_command.py
|
Python
|
mit
| 345 | 0.002899 |
from sacred import Experiment
ex = Experiment('my_commands')
@ex.config
def cfg():
name = 'kyle'
@ex.command
def greet(name):
print('Hello {}! Nice to greet you!'.format(name))
@ex.command
def shout():
print
|
('WHAZZZUUUUUUUUUUP!!!????')
@ex.automain
def main():
print('This is jus
|
t the main command. Try greet or shout.')
|
Noysena/TAROT
|
SFile.py
|
Python
|
gpl-3.0
| 8,161 | 0.010538 |
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 7 14:53:55 2016
@author: nu
"""
import numpy as np
import timeit, os, sys
from astropy.coordinates import SkyCoord
from astropy.table import Column
from astroquery.vizier import Vizier
from astropy import units as u
from TAROT_PL import TarotPIP
from Filter_data import (reoffset, Lsep_candi, tbmagnitude, XYlimit, W_to_P)
#from Data_graph import candidateplot
from renametxt import Renametxt
""" This algorithm will run a file at a time by in put directly """
start = timeit.default_timer()
#start import file
input_fits = raw_input("Please put FITS [/path_file/file.fits]: "); fitsfile = Renametxt(input_fits)
savefolder = '/home/tarot/Documents/Data_Output/' # where to keep file, change where you want here
if not os.path.exists(savefolder):
savefolder = '/tmp/'
print("Output is at %s" %savefolder)
#Use algorithm from TarotPip
""" SExdata is data extract by Sextractor which keep in 'output.cat'
Catalog is obtain from USNO-B1.0 and store in Table format
Ccata is the coordinate of catalog in SkyCoord [RA, DEC]
Cdata is the coordinate of object from image in SkyCoord [RA, DEC]"""
TAROT_data = TarotPIP(fitsfile); TAROT_data.fitsfileinfo();
try:
SExdata, Catalog, Ccata, Cdata = TAROT_data.readData()
except(ValueError, IOError):
print("%s: check data table, catalog\n"%TAROT_data.image)
sys.exit(0)
#hdu = fits.open(fitsfile); tbdata = hdu[0].data; hdu.close();
""" Start to math by using 'match_to_catalog_sky' import from 'Astropy'
idx is the index of object in catalog that math with Cdata
d2d is the angular separation between Ccata and Cdata in degree
d3d is 3 dimension distace [we don't use it
matches are the closest Ccata with Cdata in SkyCoord [RA, DEC]"""
try:
idx, d2d, d3d, matches = TAROT_data.Data_match_catalog()
except (ValueError, IOError):
print("\n Matching error !!!, check 'Data_match_catalog function'")
sys.exit(1)
#Calibration data with catalog
""" Calibration the object in image to catalog and then re-matching again
idx, d2d, matches are the same meaning but value and coordinate may change
Cdata_offset is the new SkyCoord of Cdata after calibration (or off-set)"""
try:
idx, d2d, d3d, matches, Cdata_offset = reoffset(d2d, matches, Cdata, Ccata)
except (ValueError, IOError):
print("\n offset coordinate error !!!, check 'reoffset function'")
sys.exit(2)
# Sellect candidate from high angular distance (assume it is new object in field)
""" idx_candi_catalog is the idx for catalog
idx_candi_data is the idx for data [Cation there are two data's SkyCoord]"""
try:
idx_candi_catalog, idx_candi_data = Lsep_candi(3, idx, d2d, SExdata) #10*std of d2d
except(ValueError, IOError):
print("\n Candidate sellected error !!!, check 'Lsep_candi function'")
sys.exit(3)
#cut candidate near the edge of image
try:
XYcandi, idx_XYcandi_data, idx_XYcandi_cata = XYlimit(Cdata_offset,idx_candi_data, idx_candi_catalog, TAROT_data.new_wcs)
except(ValueError, IOError):
print("\nProblem with limit candidate in the edge !!!, check 'idx_XYcandi_cata' function")
sys.exit(4)
#convert world to pixel
try:
Catalog_WTP, Matches_WTP, Cdata_offset_WTP = W_to_P(Ccata, Cdata_offset, matches, TAROT_data.new_wcs)
except(ValueError, IOError):
print("\n Convert wcs error!!!, check 'W_to_P function'")
sys.exit(5)
#Sellect candidate by local angular separation (among 10 objects that next to each other)
""" d2d_candi is in dagree, the candidate sellected by using the angular separation
Cdata_candi, Ccata_candi are the SkyCoord [RA, DEC] of candidate
for data(off-set) and catalog, repectively but
cata_candi is the information of candidate in catalog (all info. available)"""
#d2d_candi = d2d[idx_candi_data]
Cdata_candi = Cdata_offset[idx_XYcandi_data]
cata_candi = Catalog[idx_XYcandi_cata]
Ccata_candi = Ccata[idx_XYcandi_cata]
#Magnitude comparison and sellection
""" There is mismatch between magnitude of data and catalog """
tbmag = tbmagnitude(idx, SExdata, Catalog)
Cdata_col_ra = Column(Cdata_offset.ra, name='ra')
Cdata_col_dec = Column(Cdata_offset.dec, name='dec')
Ang_sept_candidate = Column((d2d.arcsec*u.arcsec), name='Separation')
tbmag.add_column(Cdata_col_ra,index=None)
tbmag.add_column(Cdata_col_dec,index=None)
tbmag.add_column(Ang_sept_candidate,index=None)
Candi_d2d = tbmag[idx_XYcandi_data]
""" Candidate can be create in a tables,
by angular distance (d2d), in ASCII or HTML file
"""
#HTML
#savename_html = os.path.join(savefolder, TAROT_data.fname + 'html_d2d_candidate.dat')
#Candi_d2d.write(savename_html, format='ascii.html')
#ASCII
#savename_ascii = os.paht.join(savefolder, TAROT_data.fname + 'ascii_d2d_candidate.dat')
#Candi_d2d.write(savename_ascii, format='ascii')
print(':::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::')
print('\nMedain of separation : STD %3.2f\" : %3.2f\"\n' %(np.median(d2d.arcsec), np.std(d2d.arcsec)))
print('Number of candidate\t\t:%d\n' %len(Candi_d2d))
print('-----------------------------------------------------------------\n')
print Candi_d2d
print("\n")
line0_reg_ds9 = "global color=green dashlist=8 3 width=1 font=\"helvetica 10 normal roman\" select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1\n"
line1_reg_ds9 = "icrs\n"
Candi_reg = open("/tmp/Candi_region_Gaia.reg", 'w')
Candi_reg.write(line0_reg_ds9)
Candi_reg.write(line1_reg_ds9)
for i in range(len(Candi_d2d)):
Candi_reg.write("circle(%f, %f, 16.0\") # color=red text={Gaia_%d}\n" %(Candi_d2d["ra"][i], Candi_d2d["dec"][i], i))
Candi_reg.close()
#print("circle(%f, %f, 16.0\") # color=red text={Gaia_%d}" %(Candi_d2d["ra"][i], Candi_d2d["dec"][i], i))
print("\n")
print(':::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::')
#Filter candidate by checking with catalog USNO and NOMAD1
confirm_1 = []; confirm_2 = [];
confirm_candi_0 = []; confirm_candi_1 = []; confirm_candi_2 = [];
for i in range(len(Cdata_candi)):
confirm_candi_1 = Vizier(catalog="USNO-B1.0", row_limit=-1).query_region(Cdata_candi[i], radius=10*u.arcsec, verbose=False)
if not confirm_candi_1:
confirm_1.append(i)
for i in range(len(Cdata_candi)):
confirm_candi_2 = Vizier(catalog="NOMAD1", row_limit=-1).query_region(Cdata_candi[i], radius=10*u.arcsec, verbose=False)
if not confirm_candi_2:
confirm_2.append(i)
# Write candidate to disk
savename_ascii = os.path.join(savefolder,TAROT_data.fname + '.candi.dat')
info_candi_1 = Candi_d2d[confirm_1]
info_candi_1.write(savename_ascii, format='ascii') # Candidate after check with USNO-B1.0 and write to disk
info_candi_2 = Candi_d2d[confirm_2]
print("\n::::::::::::::::::::::: Potential candidate check with USNO-B1.0 :::::::::::::::::::::\n")
try:
print(info_candi_1)
print("\n")
Candi_reg1 = open("/tmp/Candi_region_USNO.txt", 'w')
Candi_reg1.write(line0_reg_ds9)
Candi_reg1.write(line1_reg_ds9)
for i in range(len(info_candi_1)):
Candi_reg1.write("circle(%f, %f, 16.0\") # color=blue text={USNO_%d}\n" %(info_candi_1["ra"][i], info_candi_1["dec"][i], i))
Candi_reg1.close()
print("Number of Candidate %d" %len(info_candi_1))
print("\n"*2)
except(ValueError, NameError):
print("No candidate in USNO-B1.0\n\n")
print("\n::::::::::::::::::::::: Potential candidate check with NOMAD1 :::::::::::::::::::::\n")
try:
print(info_candi_2)
print("\n")
Candi_reg2 = open("/tmp/Candi_region_NOMAD1.txt", 'w')
Candi_reg2.write(line0_reg_ds9)
Candi_reg2.write(line1_reg_ds9)
for i in range(len(info_candi_2)):
Candi_reg2.wr
|
ite("circle(%f, %f, 16.0\") # color=green text={NOMAD1_%d}\n" %(info_candi_2["ra"][i], info_candi_2["dec"][i], i))
Candi_reg2.close()
print("Number of Candidate %d" %len(info_candi_2))
print("\n"*2)
except(ValueError, NameError):
print("No candidate in NOMAD1\n\n")
stop = timeit.default_timer(
|
)
runtime = stop - start
print("\nRuntime = %2.2f" %runtime)
#graph0 = candidateplot(TAROT_data.tbdata,XYcandi['Xpix'],XYcandi['Ypix'], 'Candidate by angular separation')
|
jhpyle/docassemble
|
docassemble_webapp/docassemble/webapp/alembic/versions/9be372ec38bc_alter_database_for_mysql_compatibility.py
|
Python
|
mit
| 3,895 | 0.001284 |
"""alter database for mysql compatibility
Revision ID: 9be372ec38bc
Revises: 4328f2c08f05
Create Date: 2020-02-16 15:43:35.276655
"""
from alembic import op
import sqlalchemy as sa
from docassemble.webapp.database import dbtableprefix, dbprefix, daconfig
import sys
# revision identifiers, used by Alembic.
revision = '9be372ec38bc'
down_revision = '4328f2c08f05'
branch_labels = None
depends_on = None
def upgrade():
if dbprefix.startswith('postgresql') and not daconfig.get('force text to varchar upgrade', False):
sys.stderr.write("Not changing text type to varchar type because underlying database is PostgreSQL\n")
else:
op.alter_column(
table_name='userdict',
column_name='filename',
type_=sa.String(255)
)
op.alter_column(
table_name='userdictkeys',
column_name='filename',
type_=sa.String(255)
)
op.alter_column(
table_name='chatlog',
column_name='filename',
type_=sa.String(255)
)
op.alter_column(
table_name='uploads',
column_name='filename',
type_=sa.String(255)
)
op.alter_column(
table_name='uploads',
column_name='yamlfile',
type_=sa.String(255)
)
op.alter_column(
table_name='objectstorage',
column_name='key',
type_=sa.String(1024)
)
op.alter_column(
table_name='speaklist',
|
column_name='filename',
type_=sa.String(255)
)
op.alter_column(
table_name='shortener',
column_name='filename',
type_=sa.String(255)
)
op.alter_column(
table_name='shortener',
column_name='key',
t
|
ype_=sa.String(255)
)
op.alter_column(
table_name='machinelearning',
column_name='key',
type_=sa.String(1024)
)
op.alter_column(
table_name='machinelearning',
column_name='group_id',
type_=sa.String(1024)
)
op.alter_column(
table_name='globalobjectstorage',
column_name='key',
type_=sa.String(1024)
)
op.create_index(dbtableprefix + 'ix_uploads_yamlfile', 'uploads', ['yamlfile'])
def downgrade():
op.alter_column(
table_name='userdict',
column_name='filename',
type_=sa.Text()
)
op.alter_column(
table_name='userdictkeys',
column_name='filename',
type_=sa.Text()
)
op.alter_column(
table_name='chatlog',
column_name='filename',
type_=sa.Text()
)
op.alter_column(
table_name='uploads',
column_name='filename',
type_=sa.Text()
)
op.alter_column(
table_name='uploads',
column_name='yamlfile',
type_=sa.Text()
)
op.alter_column(
table_name='objectstorage',
column_name='key',
type_=sa.Text()
)
op.alter_column(
table_name='speaklist',
column_name='filename',
type_=sa.Text()
)
op.alter_column(
table_name='shortener',
column_name='filename',
type_=sa.Text()
)
op.alter_column(
table_name='shortener',
column_name='key',
type_=sa.Text()
)
op.alter_column(
table_name='machinelearning',
column_name='key',
type_=sa.Text()
)
op.alter_column(
table_name='machinelearning',
column_name='group_id',
type_=sa.Text()
)
op.alter_column(
table_name='globalobjectstorage',
column_name='key',
type_=sa.Text()
)
op.drop_index(dbtableprefix + 'ix_uploads_yamlfile', table_name='uploads')
|
sl2017/campos
|
campos_event/__openerp__.py
|
Python
|
agpl-3.0
| 3,874 | 0.001807 |
# -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of CampOS Event,
# an Odoo module.
#
# Copyright (c) 2015 Stein & Gabelgaard ApS
# http://www.steingabelgaard.dk
# Hans Henrik Gaelgaard
#
# CampOS Event is free software:
# you can redistribute it and/or modify it under the terms of the GNU
# Affero General Public License as published by the Free Software
# Foundation,either version 3 of the License, or (at your option) any
# later version.
#
# CampOS Event is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with CampOS Event.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "CampOS Event",
'summary': """
Scout Camp Management Solution""",
# 'description': put the module description in README.rst
'author': "Hans Henrik Gabelgaard",
'website': "http://www.steingabelgaard.dk",
# Categories can be used to filter modules in modules listing
# Check http://goo.gl/0TfwzD for the full list
'category': 'Uncategorized',
'version': '0.1',
'license': 'AGPL-3',
# any module necessary for this one to work correctly
'depends': [
'base',
'mail',
'event',
'website',
'portal',
'survey',
'website_event_register_free',
'base_suspend_security',
'website_jquery_ui',
'base_geoengine',
'geoengine_partner',
'web_widget_color',
'project_issue',
'project_model_to_issue',
'website_event_track',
'web_widget_datepicker_options',
],
# always loaded
'data': [
'views/res_country.xml',
'views/product_template.xml',
'views/campos_staff_del_prod.xml',
'security/campos_event_security.xml',
'security/campos_subcamp_exception.xml',
'security/campos_subcamp.xml',
'security/campos_registration_view.xml',
'security/campos_function_view.xml',
'security/ir.model.access.csv',
'security/ir.rule.csv',
'security/campos_staff_del_prod.xml',
'data/campos.municipality.csv',
'data/campos.scout.org.csv',
'data/job_ask_project.xml',
'views/templates.xml',
'views/participant_view.xml',
'views/event_registration_view.xml',
'views/committee_view.xml',
'views/municipality_view.xml
|
',
"views/scout_org_view.xml",
"views/res_partner_view.xml",
"views/job_view.xml",
"views/job_template.xml",
"views/mail_templates.xml",
"views/confirm_
|
template.xml",
"views/event_view.xml",
#"views/portal_menu.xml",
"views/res_users_view.xml",
'views/campos_menu.xml',
'views/campos_subcamp_exception.xml',
'views/campos_subcamp.xml',
'views/event_partner_reg_template.xml',
'views/meeting_proposal_template.xml',
'views/event_track_view.xml',
'views/campos_camp_area.xml',
'data/camp_area_committee.xml',
'data/participant_number.xml',
'security/campos_par_tag.xml',
'views/campos_par_tag.xml',
'security/campos_reg_tag.xml',
'views/campos_reg_tag.xml',
'views/extern_jobber_template.xml',
],
# only loaded in demonstration mode
'demo': [
'demo.xml',
],
}
|
hasegaw/IkaLog
|
ikalog/inputs/win/directshow.py
|
Python
|
apache-2.0
| 4,592 | 0.000436 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 Takeshi HASEGAWA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is dist
|
ributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions an
|
d
# limitations under the License.
#
import time
import threading
import cv2
from ikalog.utils import *
from ikalog.inputs.win.videoinput_wrapper import VideoInputWrapper
from ikalog.inputs import VideoInput
class DirectShow(VideoInput):
# override
def _enumerate_sources_func(self):
return self._videoinput_wrapper.get_device_list()
def read_raw(self):
if self._device_id is None:
return None
frame = self._videoinput_wrapper.get_pixels(
self._device_id,
parameters=(
self._videoinput_wrapper.VI_BGR +
self._videoinput_wrapper.VI_VERTICAL_FLIP
)
)
return frame
# override
def _read_frame_func(self):
frame = self.read_raw()
return frame
# override
def _initialize_driver_func(self):
pass
# override
def _cleanup_driver_func(self):
pass
# override
def _is_active_func(self):
return (self._device_id is not None)
# override
def _select_device_by_index_func(self, source, width=1280, height=720, framerate=59.94):
device_id = int(source)
vi = self._videoinput_wrapper
self.lock.acquire()
try:
if self._device_id is not None:
raise Exception('Need to deinit the device')
formats = [
{'width': width, 'height': height, 'framerate': None},
{'width': width, 'height': height, 'framerate': framerate},
]
for fmt in formats:
if fmt['framerate']:
vi.set_framerate(device_id, fmt['framerate'])
retval = vi.init_device(
device_id,
flags=self._videoinput_wrapper.DS_RESOLUTION,
width=fmt['width'],
height=fmt['height'],
)
if retval:
self._source_width = vi.get_frame_width(device_id)
self._source_height = vi.get_frame_height(device_id)
success = \
(width == self._source_width) and (
height == self._source_height)
if success or (not self.cap_optimal_input_resolution):
self._device_id = device_id
break
vi.deinit_device(device_id)
# end of for loop
if self._device_id is None:
IkaUtils.dprint(
'%s: Failed to init the capture device %d' %
(self, device_id)
)
finally:
self.lock.release()
# override
def _select_device_by_name_func(self, source):
IkaUtils.dprint('%s: Select device by name "%s"' % (self, source))
try:
index = self.enumerate_sources().index(source)
except ValueError:
IkaUtils.dprint('%s: Input "%s" not found' % (self, source))
return False
IkaUtils.dprint('%s: "%s" -> %d' % (self, source, index))
self._select_device_by_index_func(index)
def __init__(self):
self.strict_check = False
self._device_id = None
self._warned_resolution = False
self._videoinput_wrapper = VideoInputWrapper()
super(DirectShow, self).__init__()
if __name__ == "__main__":
obj = DirectShow()
list = obj.enumerate_sources()
for n in range(len(list)):
IkaUtils.dprint("%d: %s" % (n, list[n]))
dev = input("Please input number (or name) of capture device: ")
obj.select_source(dev)
k = 0
while k != 27:
frame = obj.read_frame()
if frame is not None:
cv2.imshow(obj.__class__.__name__, frame)
k = cv2.waitKey(1)
if k == ord('s'):
import time
cv2.imwrite('screenshot_%d.png' % int(time.time()), frame)
|
genius1611/Keystone
|
keystone/contrib/extensions/admin/__init__.py
|
Python
|
apache-2.0
| 1,232 | 0.000812 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License
from keystone.contrib.extensions.admin.osksadm.extension_handler\
import ExtensionHandler as KSADMExtensionHandler
from keystone.contrib.extensions.admin.oskscatalog.extension_handler\
import ExtensionHandler as KSCATALOGExtensionHandler
def configure_extensions(mapper, options):
#TODO: Make extensions configurable.
|
ksadm_extenion_handler = KSADMExtensionHandler()
ksadm_extenion_handler.map_extension_methods(mapper, options)
kscatalog_extension_handler = KSCATALOGExtensionHandler()
kscatalog_extension_handler.map_extension_methods(mapper, options
|
)
|
SecurityFTW/cs-suite
|
tools/Scout2/AWSScout2/services/vpc.py
|
Python
|
gpl-3.0
| 7,331 | 0.005866 |
# -*- coding: utf-8 -*-
import netaddr
from opinel.utils.aws import get_name
from opinel.utils.globals import manage_dictionary
from opinel.utils.fs import load_data, read_ip_ranges
from AWSScout2.utils import ec2_classic, get_keys
from AWSScout2.configs.regions import RegionalServiceConfig, RegionConfig
from AWSScout2.configs.vpc import VPCConfig as SingleVPCConfig
########################################
# Globals
########################################
protocols_dict = load_data('protocols.json', 'protocols')
########################################
# VPCRegionConfig
########################################
class VPCRegionConfig(RegionConfig):
"""
VPC configuration for a single AWS region
"""
def parse_customer_gateway(self, global_params, region, cgw):
cgw['id'] = cgw.pop('CustomerGatewayId')
self.customer_gateways[cgw['id']] = cgw
def parse_flow_log(self, global_params, region, fl):
"""
:param global_params:
:param region:
:param fl:
:return:
"""
get_name(fl, fl, 'FlowLogId')
fl_id = fl.pop('FlowLogId')
self.flow_logs[fl_id] = fl
def parse_network_acl(self, global_params, region, network_acl):
"""
:param global_params:
:param region:
:param network_acl:
:return:
"""
vpc_id = network_acl['VpcId']
network_acl['id'] = network_acl.pop('NetworkAclId')
get_name
|
(network_acl, network_acl, 'id')
manage_dictionary(network_acl, 'rules', {})
network_acl['rules']['ingress'] = self.__parse_network_acl_entries(network_acl['Entries'], False)
|
network_acl['rules']['egress'] = self.__parse_network_acl_entries(network_acl['Entries'], True)
network_acl.pop('Entries')
# Save
manage_dictionary(self.vpcs, vpc_id, SingleVPCConfig(self.vpc_resource_types))
self.vpcs[vpc_id].network_acls[network_acl['id']] = network_acl
def __parse_network_acl_entries(self, entries, egress):
"""
:param entries:
:param egress:
:return:
"""
acl_dict = {}
for entry in entries:
if entry['Egress'] == egress:
acl = {}
for key in ['RuleAction', 'RuleNumber']:
acl[key] = entry[key]
acl['CidrBlock'] = entry['CidrBlock'] if 'CidrBlock' in entry else entry['Ipv6CidrBlock']
acl['protocol'] = protocols_dict[entry['Protocol']]
if 'PortRange' in entry:
from_port = entry['PortRange']['From'] if entry['PortRange']['From'] else 1
to_port = entry['PortRange']['To'] if entry['PortRange']['To'] else 65535
acl['port_range'] = from_port if from_port == to_port else str(from_port) + '-' + str(to_port)
else:
acl['port_range'] = '1-65535'
acl_dict[acl.pop('RuleNumber')] = acl
return acl_dict
def parse_route_table(self, global_params, region, rt):
route_table = {}
vpc_id = rt['VpcId']
get_name(rt, route_table, 'VpcId') # TODO: change get_name to have src then dst
get_keys(rt, route_table, ['Routes', 'Associations', 'PropagatingVgws'])
# Save
manage_dictionary(self.vpcs, vpc_id, SingleVPCConfig(self.vpc_resource_types))
self.vpcs[vpc_id].route_tables[rt['RouteTableId']] = route_table
def parse_subnet(self, global_params, region, subnet):
"""
:param global_params:
:param region:
:param subnet:
:return:
"""
vpc_id = subnet['VpcId']
manage_dictionary(self.vpcs, vpc_id, SingleVPCConfig(self.vpc_resource_types))
subnet_id = subnet['SubnetId']
get_name(subnet, subnet, 'SubnetId')
subnet['flow_logs'] = []
# Save
manage_dictionary(self.vpcs, vpc_id, SingleVPCConfig(self.vpc_resource_types))
self.vpcs[vpc_id].subnets[subnet_id] = subnet
def parse_vpc(self, global_params, region_name, vpc):
"""
:param global_params:
:param region_name:
:param vpc:
:return:
"""
vpc_id = vpc['VpcId']
# Save
manage_dictionary(self.vpcs, vpc_id, SingleVPCConfig(self.vpc_resource_types))
self.vpcs[vpc_id].name = get_name(vpc, {}, 'VpcId')
def parse_vpn_connection(self, global_params, region_name, vpnc):
vpnc['id'] = vpnc.pop('VpnConnectionId')
self.vpn_connections[vpnc['id']] = vpnc
def parse_vpn_gateway(self, global_params, region_name, vpng):
vpng['id'] = vpng.pop('VpnGatewayId')
self.vpn_gateways[vpng['id']] = vpng
########################################
# VPCConfig
########################################
class VPCConfig(RegionalServiceConfig):
"""
VPC configuration for all AWS regions
"""
region_config_class = VPCRegionConfig
def __init__(self, service_metadata, thread_config):
super(VPCConfig, self).__init__(service_metadata, thread_config)
########################################
##### VPC analysis functions
########################################
#
# Add a display name for all known CIDRs
#
known_cidrs = {'0.0.0.0/0': 'All'}
def put_cidr_name(aws_config, current_config, path, current_path, resource_id, callback_args):
if 'cidrs' in current_config:
cidr_list = []
for cidr in current_config['cidrs']:
if type(cidr) == dict:
cidr = cidr['CIDR']
if cidr in known_cidrs:
cidr_name = known_cidrs[cidr]
else:
cidr_name = get_cidr_name(cidr, callback_args['ip_ranges'], callback_args['ip_ranges_name_key'])
known_cidrs[cidr] = cidr_name
cidr_list.append({'CIDR': cidr, 'CIDRName': cidr_name})
current_config['cidrs'] = cidr_list
#
# Read display name for CIDRs from ip-ranges files
#
aws_ip_ranges = {} # read_ip_ranges(aws_ip_ranges_filename, False)
def get_cidr_name(cidr, ip_ranges_files, ip_ranges_name_key):
for filename in ip_ranges_files:
ip_ranges = read_ip_ranges(filename, local_file = True)
for ip_range in ip_ranges:
ip_prefix = netaddr.IPNetwork(ip_range['ip_prefix'])
cidr = netaddr.IPNetwork(cidr)
if cidr in ip_prefix:
return ip_range[ip_ranges_name_key].strip()
for ip_range in aws_ip_ranges:
ip_prefix = netaddr.IPNetwork(ip_range['ip_prefix'])
cidr = netaddr.IPNetwork(cidr)
if cidr in ip_prefix:
return 'Unknown CIDR in %s %s' % (ip_range['service'], ip_range['region'])
return 'Unknown CIDR'
#
# Propagate VPC names in VPC-related services (info only fetched during EC2 calls)
#
def propagate_vpc_names(aws_config, current_config, path, current_path, resource_id, callback_args):
if resource_id == ec2_classic:
current_config['name'] = ec2_classic
else:
target_path = copy.deepcopy(current_path)
target_path[1] = 'ec2'
target_path.append(resource_id)
target_path.append('Name')
target_path = '.'.join(target_path)
current_config['name'] = get_value_at(aws_config, target_path, target_path)
|
alper/volunteer_planner
|
notifications/migrations/0004_auto_20151003_2033.py
|
Python
|
agpl-3.0
| 847 | 0.001181 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('organizations', '0002_migrate_locatio
|
ns_to_facilities'),
('notifications', '0003_auto_20150912_2049'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='location',
field=models.ForeignKey(verbose_name='facility', to='organizations.Facility'),
),
migrations.RenameField(
model_name='
|
notification',
old_name='location',
new_name='facility',
),
migrations.AlterField(
model_name='notification',
name='facility',
field=models.ForeignKey(to='organizations.Facility'),
),
]
|
dading/iphone_order
|
util.py
|
Python
|
apache-2.0
| 3,365 | 0.012184 |
#__author__ = 'hello'
# -*- coding: cp936 -*-
import re
import os
import random
import json
import string
import ctypes
from myexception import *
PATH = './img/'
dm2 = ctypes.WinDLL('./CrackCaptchaAPI.dll')
if not os.path.exists('./img'):
os.mkdir('./img')
def str_tr(content):
instr = "0123456789"
outstr ="QAEDTGUJOL"
trantab = string.maketrans(instr,outstr)
return content.translate(trantab)
def getHid():
import wmi
m = wmi.WMI()
a = ''
b = ''
for cpu in m.Win32_Processor():
a = cpu.Processorid.strip()
for bd in m.Win32_BIOS():
b= bd.SerialNumber.strip()
return a+b
def getEightRandomString():
return ''.join(random.sample(string.ascii_letters,8))
d
|
ef getCToken(content):
s = ''
pattern = re.compile('securityCToken = "([+-]?\d*)"')
match = pattern.search(content)
if match:
s = match.group(1)
return s
def GetCaptcha(content):
global PATH
filename = ''.join(random.sample(string.ascii_letters,8))
filename += '.jpg'
filename = PATH+filename
img = None
try:
img = open(filename,'wb')
img.write(content)
except IOError:
raise FileCanNotCreate('open file error')
finally:
|
if img:
img.close()
dm2.D2File.argtypes=[ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_short, ctypes.c_int, ctypes.c_char_p]
dm2.D2File.restype = ctypes.c_int
key = ctypes.c_char_p('fa6fd217145f273b59d7e72c1b63386e')
id = ctypes.c_long(54)
user = ctypes.c_char_p('test')
pas = ctypes.c_char_p('test')
timeout = ctypes.c_short(30)
result = ctypes.create_string_buffer('/0'*100)
ret = -1
ret = dm2.D2File(key,user, pas, filename,timeout,id,(result))
if ret > 0:
return result.value
elif ret == -101:
raise D2FILE(u'Óà¶î²»×ã,ÐèÒª³äÖµ')
elif ret > -199:
raise D2FILE('user info error')
elif ret == -208:
raise D2FILE('software can not user')
elif ret == -210:
raise D2FILE('invalid user')
elif ret == -301:
raise D2FILE('can not find dll')
else:
raise D2FILE(u'ʶ±ð¿â³ö´í')
def GetTimeSlot(content,num):
try:
timeslot = json.loads(content)
slotLen = len(timeslot['timeSlots'])
if num < slotLen:
return timeslot['timeSlots'][num]['startTime'],timeslot['timeSlots]'[num]['timeslotID']]
elif slotLen > 0:
return timeslot['timeSlots'][slotLen-1]['startTime'],timeslot['timeSlots]'[slotLen-1]['timeslotID']]
except ValueError,e:
raise NoJsonData('')
def sendEmail(count):
import smtplib
from email.mime.text import MIMEText
from email.header import Header
smtpserver = 'smtp.163.com'
sender = 'sghcarbon@163.com'
receiver = 'sghcarbon@163.com'
subject = u'Ô¤¶©¸öÊý'
user = 'sghcarbon'
pas = 'carbon216'
content = getHid()+u'Ô¤¶©¸öÊý:'+str(count)
msg = MIMEText(content,'plain','utf-8')
msg['Subject'] = Header(subject,'utf-8')
msg['From'] = sender
msg['To'] = receiver
try:
send_smtp = smtplib.SMTP()
send_smtp.connect(smtpserver)
send_smtp.login(user,pas)
send_smtp.sendmail(sender,receiver,msg.as_string())
send_smtp.close()
print 'ok'
except:
print 'error'
|
zqfan/leetcode
|
algorithms/576. Out of Boundary Paths/solution.py
|
Python
|
gpl-3.0
| 918 | 0 |
class Solution(object):
def findPaths(self, m, n, N, i, j):
"""
:type m: int
:type n: int
:type N: int
:type i: int
:type j: int
:rtype: int
"""
MOD = 10000
|
00007
paths = 0
cur = {(i, j): 1}
for i in xrange(N):
next = collections.defaultdict(int)
for (x, y), cnt in cur.iteritems():
for dx, dy in [[-1, 0], [0, 1], [1, 0], [0, -1]]:
nx = x + dx
ny = y + dy
if nx < 0 or ny < 0 or nx >= m or ny >= n:
paths += cnt
paths %= MOD
else:
next[(nx, ny)] += c
|
nt
next[(nx, ny)] %= MOD
cur = next
return paths
# 94 / 94 test cases passed.
# Status: Accepted
# Runtime: 232 ms
# beats 75.36 %
|
rcucui/Pisa-util-fix
|
sx/pisa3/pisa_util.py
|
Python
|
apache-2.0
| 26,330 | 0.006077 |
# -*- coding: ISO-8859-1 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__reversion__ = "$Revision: 20 $"
__author__ = "$Author: holtwick $"
__date__ = "$Date: 2007-10-09 12:58:24 +0200 (Di, 09 Okt 2007) $"
from reportlab.lib.units import inch, cm
from reportlab.lib.styles import *
from reportlab.lib.enums import *
from reportlab.lib.colors import *
from reportlab.lib.pagesizes import *
from reportlab.pdfbase import pdfmetrics
# from reportlab.platypus import *
# from reportlab.platypus.flowables import Flowable
# from reportlab.platypus.tableofcontents import TableOfContents
# from reportlab.platypus.para import Para, PageNumberObject, UNDERLINE, HotLink
import reportlab
import copy
import types
import os
import os.path
import pprint
import sys
import string
import re
import base64
import urlparse
import mimetypes
import urllib2
import urllib
import httplib
import tem
|
pfile
import shutil
rgb_re = re.compile("^.*?rgb[(]([0-9]+).*?([0-9]+).*?([0-9]+)[)].*?[ ]*$")
_reportlab_version = tuple(map(int, reportlab.Version.split('.')))
if _reportlab_version < (2,1):
rais
|
e ImportError("Reportlab Version 2.1+ is needed!")
REPORTLAB22 = _reportlab_version >= (2, 2)
#if not(reportlab.Version[0] == "2" and reportlab.Version[2] >= "1"):
# raise ImportError("Reportlab Version 2.1+ is needed!")
#
#REPORTLAB22 = (reportlab.Version[0] == "2" and reportlab.Version[2] >= "2")
# print "***", reportlab.Version, REPORTLAB22, reportlab.__file__
import logging
log = logging.getLogger("ho.pisa")
try:
import cStringIO as StringIO
except:
import StringIO
try:
import pyPdf
except:
pyPdf = None
try:
from reportlab.graphics import renderPM
except:
renderPM = None
try:
from reportlab.graphics import renderSVG
except:
renderSVG = None
def ErrorMsg():
"""
Helper to get a nice traceback as string
"""
import traceback, sys, cgi
type = value = tb = limit = None
type, value, tb = sys.exc_info()
list = traceback.format_tb(tb, limit) + traceback.format_exception_only(type, value)
return "Traceback (innermost last):\n" + "%-20s %s" % (
string.join(list[: - 1], ""),
list[ - 1])
def toList(value):
if type(value) not in (types.ListType, types.TupleType):
return [value]
return list(value)
def flatten(x):
"""flatten(sequence) -> list
copied from http://kogs-www.informatik.uni-hamburg.de/~meine/python_tricks
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
Examples:
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]"""
result = []
for el in x:
#if isinstance(el, (list, tuple)):
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result
def _toColor(arg, default=None):
'''try to map an arbitrary arg to a color instance'''
if isinstance(arg, Color): return arg
tArg = type(arg)
if tArg in (types.ListType, types.TupleType):
assert 3 <= len(arg) <= 4, 'Can only convert 3 and 4 sequences to color'
assert 0 <= min(arg) and max(arg) <= 1
return len(arg) == 3 and Color(arg[0], arg[1], arg[2]) or CMYKColor(arg[0], arg[1], arg[2], arg[3])
elif tArg == types.StringType:
C = getAllNamedColors()
s = arg.lower()
if C.has_key(s): return C[s]
try:
return toColor(eval(arg))
except:
pass
try:
return HexColor(arg)
except:
if default is None:
raise ValueError('Invalid color value %r' % arg)
return default
def getColor(value, default=None):
" Convert to color value "
try:
original = value
if isinstance(value, Color):
return value
value = str(value).strip().lower()
if value == "transparent" or value == "none":
return default
if value in COLOR_BY_NAME:
return COLOR_BY_NAME[value]
if value.startswith("#") and len(value) == 4:
value = "#" + value[1] + value[1] + value[2] + value[2] + value[3] + value[3]
elif rgb_re.search(value):
# e.g., value = "<css function: rgb(153, 51, 153)>", go figure:
r, g, b = [int(x) for x in rgb_re.search(value).groups()]
value = "#%02x%02x%02x" % (r, g, b)
else:
# Shrug
pass
# XXX Throws illegal in 2.1 e.g. toColor('none'),
# therefore we have a workaround here
return _toColor(value)
except ValueError, e:
log.warn("Unknown color %r", original)
return default
def getBorderStyle(value, default=None):
# log.debug(value)
if value and (str(value).lower() not in ("none", "hidden")):
return value
return default
mm = cm / 10.0
dpi96 = (1.0 / 96.0 * inch)
_absoluteSizeTable = {
"1": 50.0 / 100.0,
"xx-small": 50.0 / 100.0,
"x-small": 50.0 / 100.0,
"2": 75.0 / 100.0,
"small": 75.0 / 100.0,
"3": 100.0 / 100.0,
"medium": 100.0 / 100.0,
"4": 125.0 / 100.0,
"large": 125.0 / 100.0,
"5": 150.0 / 100.0,
"x-large": 150.0 / 100.0,
"6": 175.0 / 100.0,
"xx-large": 175.0 / 100.0,
"7": 200.0 / 100.0,
"xxx-large": 200.0 / 100.0,
#"xx-small" : 3./5.,
#"x-small": 3./4.,
#"small": 8./9.,
#"medium": 1./1.,
#"large": 6./5.,
#"x-large": 3./2.,
#"xx-large": 2./1.,
#"xxx-large": 3./1.,
}
_relativeSizeTable = {
"larger": 1.25,
"smaller": 0.75,
"+4": 200.0 / 100.0,
"+3": 175.0 / 100.0,
"+2": 150.0 / 100.0,
"+1": 125.0 / 100.0,
"-1": 75.0 / 100.0,
"-2": 50.0 / 100.0,
"-3": 25.0 / 100.0,
}
MIN_FONT_SIZE = 1.0
def getSize(value, relative=0, base=None, default=0.0):
"""
Converts strings to standard sizes
"""
try:
original = value
if value is None:
return relative
elif type(value) is types.FloatType:
return value
elif type(value) is types.IntType:
return float(value)
elif type(value) in (types.TupleType, types.ListType):
value = "".join(value)
value = str(value).strip().lower().replace(",", ".")
if value[ - 2:] == 'cm':
return float(value[: - 2].strip()) * cm
elif value[ - 2:] == 'mm':
return (float(value[: - 2].strip()) * mm) # 1mm = 0.1cm
elif value[ - 2:] == 'in':
return float(value[: - 2].strip()) * inch # 1pt == 1/72inch
elif value[ - 2:] == 'inch':
return float(value[: - 4].strip()) * inch # 1pt == 1/72inch
elif value[ - 2:] == 'pt':
return float(value[: - 2].strip())
elif value[ - 2:] == 'pc':
return float(value[: - 2].strip()) * 12.0 # 1pc == 12pt
elif value[ - 2:] == 'px':
return float(value[: - 2].strip()) * dpi96 # XXX W3C says, use 96pdi http://www.w3.org/TR/CSS21/syndata.html#length-units
elif value[ - 1:] == 'i': # 1pt == 1/72inch
return float(value[: - 1].strip()) * inch
elif value in ("none", "0", "auto"):
return 0.0
elif relative:
if value[ - 2:] == 'em': # XXX
return (float(value[: - 2].strip()) * relative) # 1em = 1 * fontSize
elif value[ - 2:] == 'ex': # XXX
return (float(value[: - 2].strip()) *
|
bigfootproject/OSMEF
|
data_processing/aggregate_old.py
|
Python
|
apache-2.0
| 4,348 | 0.00483 |
#!/usr/bin/python
import sys, os, re
import json
import argparse
import pprint
arg_parser = argparse.ArgumentParser(description='Define tests')
arg_parser.add_argument('-p', '--pretty-print', action="store_true", help="select human friendly output, default is CSV")
arg_parser.add_argument('-i', '--info', action="store_true", help="show info about the data available in the specified directory")
arg_parser.add_argument('-k', '--show-keys', action="store_true", help="show available keys")
arg_parser.add_argument('-a', '--all-connections', action="store_true", help="extract results for all connections")
arg_parser.add_argument('-c', '--concurrent', default=0, help="filter results with specified concurrency", type=int)
arg_parser.add_argument('in_dir', help="Input directory contatining JSON files")
arg_parser.add_argument('keys', nargs=argparse.REMAINDER, help="keys to extract")
args = arg_parser.parse_args()
def load_json(fname):
return json.load(open(fname, "r"))
def load_all(src_dir):
data = {}
file_list = os.listdir(src_dir)
for f in file_list:
if not os.path.splitext(f)[1] == ".json":
continue
fp = os.path.join(src_dir, f)
try:
data[f] = load_json(fp)
except ValueError:
print("Skipping corrupted file: %s" % f)
continue
return data
def dotkey(tree_root, dotted_key):
dotted_key = dotted_key.split(".")
value = tree_root
|
for key in dotted_key:
value = value[key]
return value
def get_keys(f):
keys = []
t = data[f]
unvisited = list(t.keys())
while len(unvisited) > 0:
k = unvisited.pop()
child = dotkey(t, k)
if type(child) != dict:
keys.append(k)
else:
for kname in child.keys():
unvisited.append(k+"."+kname)
return keys
# unvisited
|
+= t[k]
# values = []
# k = key.split(".")
# for d in data:
# values.append(get_value(d, k))
# return values
def print_csv_header(columns):
out = "measurement"
for title in columns:
out += ", " + title
print(out)
def get_values_measurement(tree, keys):
out = []
for key in keys:
try:
out.append(dotkey(tree, key))
except KeyError:
out.append("N/A")
return out
def print_values(measure, values):
if args.pretty_print:
print("Measure: %s" % measure)
for v in values:
print("\t%s" % (v,))
else:
s = measure
for v in values:
s += "," + str(v)
print(s)
def expand_keys(template_measure):
"""For each key that contains conn_N will add all other conn_* keys with the
same suffix"""
new_keys = args.keys[:]
all_keys = get_keys(template_measure)
for ukey in args.keys:
match = re.search(r"conn_[0-9]+\.", ukey)
if match:
suffix = ukey[match.end():]
new_keys.remove(ukey)
for skey in all_keys:
if re.search(suffix+"$", skey):
new_keys.append(skey)
return new_keys
def filter_measures(data, concurrent):
"""Return a filtered data dictionary containing only the selected concurrent number"""
measures = list(data.keys())
for measure in measures:
conc = get_values_measurement(data[measure], ["concurrent"])[0]
if conc != concurrent:
del data[measure]
return data
data = load_all(args.in_dir)
if args.info:
descrs = get_all_values("name")
print("These measurements are available:")
for d in sorted(descrs, key=lambda x: int(x.split("_")[0])):
print(d, ":", descrs[d][0])
sys.exit(0)
if args.show_keys:
f = sorted(data.keys())[-1]
print("Reading keys from file %s" % f)
ks = get_keys(f)
for k in sorted(ks):
print(k)
sys.exit(0)
if args.all_connections and args.concurrent == 0:
print("Error: -a requires -c")
sys.exit(1)
if args.concurrent != 0:
data = filter_measures(data, args.concurrent)
if args.all_connections:
new_keys = expand_keys(list(data.keys())[0])
else:
new_keys = args.keys[:]
if not args.pretty_print:
print_csv_header(new_keys)
for measure in data.keys():
values = get_values_measurement(data[measure], new_keys)
print_values(measure, values)
|
Dahlgren/HTPC-Manager
|
htpc/updater.py
|
Python
|
mit
| 21,554 | 0.002366 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Update HTPC Manager from Github. Either through git command or tarball.
Updater and SourceUpdater written by styxit
https://github.com/styxit
Git updater written by mbw2001
https://github.com/mbw2001
Used as reference:
- https://github.com/mrkipling/maraschino
- https://github.com/midgetspy/Sick-Beard/
"""
import os
from threading import Thread
import urllib2
import subprocess
import re
from json import loads
import cherrypy
import htpc
import logging
import tarfile
import shutil
import platform
from apscheduler.triggers.interval import IntervalTrigger
from htpc.root import do_restart
# configure git repo
gitUser = 'Hellowlol'
gitRepo = 'HTPC-Manager'
class Updater(object):
""" Main class """
def __init__(self):
self.logger = logging.getLogger('htpc.updater')
self.updateEngineName = 'Unknown'
# Set update engine. Use git updater or update from source.
self.updateEngine = self.getEngine()
# Check for updates automatically
htpc.SCHED.add_job(self.update_needed, trigger=IntervalTrigger(hours=6))
""" Determine the update method """
def getEngine(self):
self.logger.debug("Selecting Update engine.")
gitDir = os.path.normcase(os.path.join(htpc.RUNDIR, '.git'))
validGitDir = os.path.isdir(gitDir)
# If valid Git dir and git command succeeded, use Git updater
if validGitDir and self.test_git():
self.logger.info('Using GitUpdater engine')
self.updateEngineName = 'Git'
return GitUpdater()
else: # Otherwise update from Sourece
self.logger.info('Using SourceUpdater engine')
self.updateEngineName = 'Source'
return SourceUpdater()
def test_git(self):
self.logger.debug("Checking if git is installed")
gp = htpc.settings.get('git_path', 'git')
alternative_gp = []
# osx people who start htpc-mamanger from launchd have a broken path, so try a hail-mary attempt for them
if platform.system().lower() == 'darwin':
alternative_gp.append('/usr/local/git/bin/git')
if platform.system().lower() == 'windows':
if gp != gp.lower():
alternative_gp.append(gp.lower())
# Comment out the line beflow to test the source updater
# alternative_gp += ["%USERPROFILE%\AppData\Local\GitHub\PORTAB~1\bin\git.exe", "C:\Program Files (x86)\Git\bin\git.exe"]
# Returns a empty string if failed
output = GitUpdater().git_exec(gp, 'version')
if output:
# Found a working git path.
self.logger.debug("Found git path %s" % gp)
htpc.settings.set('git_path', gp)
return True
if alternative_gp and not output:
self.logger.debug("Checking for alternate git location")
for current_gp in alternative_gp:
self.logger.debug("Testing git path %s" % current_gp)
output = GitUpdater().git_exec(current_gp, 'version')
if output:
self.logger.debug("Found git path %s and it works!" % current_gp)
self.logger.debug("Saving git path %s to settings" % current_gp)
htpc.settings.set('git_path', current_gp)
return True
return False
@cherrypy.expose()
@cherrypy.tools.json_out()
def index(self, force=False):
""" Update on POST. Check for new updates on GET. """
if cherrypy.request.method.upper() == 'POST':
Thread(target=self.updateEngine.update).start()
return 1
if cherrypy.request.method.upper() == 'POST' and force:
self.check_update()
Thread(target=self.updateEngine.update).start()
return 1
else:
return self.check_update()
@cherrypy.expose()
@cherrypy.tools.json_out()
def updatenow(self):
Thread(target=self.updateEngine.update).start()
@cherrypy.expose()
@cherrypy.tools.json_out()
def status(self):
""" method to determine if HTPC Manager is currently updating """
return self.updateEngine.UPDATING
def check_update(self):
"""
Check for updates
Returns dict() with the following indexes:
UpdateNeeded True if an update is needed, False if an update is not needed OR not possible
latestVersion Commit hash of the most recent commit
currentVersion Commit hash for the version currently in use
versionsBehind How many versions is the current version behind the latest version
"""
output = {'updateNeeded': True, 'latestVersion': 'Unknown', 'currentVersion': 'Unknown', 'versionsBehind': 'Unknown'}
self.logger.info("Checking for updates from %s." % self.updateEngineName)
# Get current and latest version
# current can return True, False, Unknown, and SHA
current = self.updateEngine.current()
htpc.CURRENT_HASH = current
# Can return True, False
latest = self.updateEngine.latest()
htpc.LATEST_HASH = latest
self.logger.debug("Latest commit is %s" % latest)
self.logger.debug("Current commit is %s" % current)
if latest is False:
self.logger.error("Failed to determine the latest version for HTPC Manager.")
else:
output['latestVersion'] = latest
if current is False:
self.logger.error("Failed to determine the current version for HTPC Manager.")
else:
output['currentVersion'] = current
# If current or latest failed, updating is not possible
if current is False or latest is False:
self.logger.debug("Cancel update.")
output['updateNeeded'] = False
return output
# If HTPC Manager is up to date, updating is not needed
if current == latest and current != "Unknown":
self.logger.info("HTPC Manager is Up-To-Date.")
output['versionsBehind'] = 0
htpc.COMMITS_BEHIND = 0
output['updateNeeded'] = False
else:
behind = self.behind_by(current, latest)
htpc.COMMITS_BEHIND = behind
output['versionsBehind'] = behind
self.logger.info("Currently " + str(output['versionsBehi
|
nd']) + " commits behind.")
return output
|
def behind_by(self, current, latest):
""" Check how many commits between current and latest """
self.logger.debug('Checking how far behind latest')
try:
url = 'https://api.github.com/repos/%s/%s/compare/%s...%s' % (gitUser, gitRepo, current, latest)
result = loads(urllib2.urlopen(url).read())
behind = int(result['total_commits'])
self.logger.debug('Behind: ' + str(behind))
return behind
except Exception, e:
self.logger.error(str(e))
self.logger.error('Could not determine how far behind')
return 'Unknown'
@cherrypy.expose()
@cherrypy.tools.json_out()
def branches(self):
return self.updateEngine.branches()
def update_needed(self):
self.logger.info("Running update_needed")
update_avail = self.check_update()
# returns true or false
if update_avail.get("updateNeeded"):
if htpc.settings.get('app_check_for_updates', False):
self.logger.debug("Add update footer")
# Used for the notification footer
htpc.UPDATE_AVAIL = True
else:
htpc.UPDATE_AVAIL = False
# Since im stupid, protect me please.. srsly its for myself.
if htpc.UPDATE_AVAIL and htpc.settings.get("app_auto_update", False) and not htpc.DEBUG:
self.logger.debug("Auto updating now!")
Thread(target=self.updateEngine.update).start()
class GitUpdater():
""" Class to update HTPC Manager using git commands. """
def __init__(self):
""" Set GitHub settings on load """
self.UPDATING = 0
se
|
neuroticnerd/django-demo-app
|
django_demo/accounts/views.py
|
Python
|
mit
| 1,886 | 0 |
from django.conf import settings
from django.contrib import auth
from django.contrib.auth.forms import AuthenticationForm
from django.utils.decorators import method_decorator
from django.utils.http import is_safe_url
from django.views.decorators.debug import sensitive_post_parameters
from django.views import generic
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
class LoginView(generic.FormView):
success_url = sett
|
ings.LOGIN_REDIRECT_URL
form_
|
class = AuthenticationForm
redirect_param = getattr(settings, 'REDIRECT_FIELD_NAME', 'next')
template_name = 'accounts/login.html'
@method_decorator(sensitive_post_parameters('password'))
@method_decorator(csrf_protect)
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
request.session.set_test_cookie()
return super(LoginView, self).dispatch(request, *args, **kwargs)
def form_valid(self, form):
auth.login(self.request, form.get_user())
if self.request.session.test_cookie_worked():
self.request.session.delete_test_cookie()
return super(LoginView, self).form_valid(form)
def form_invalid(self, form):
return self.render_to_response(self.get_context_data(form=form))
def get_success_url(self):
redirect_to = self.request.GET.get(self.redirect_param)
if not is_safe_url(url=redirect_to, host=self.request.get_host()):
redirect_to = self.success_url
return redirect_to
class LogoutView(generic.RedirectView):
permanent = False
pattern_name = 'main:landing'
def get(self, request, *args, **kwargs):
auth.logout(request)
return super(LogoutView, self).get(request, *args, **kwargs)
class ProfileView(generic.TemplateView):
template_name = 'accounts/profile_detail.html'
|
lcy0321/pbaadhcpserver
|
pbaadhcpserver.py
|
Python
|
gpl-3.0
| 15,974 | 0.00144 |
#!/usr/bin/python2
# -*- encoding: utf-8 -*-
#pylint: disable=W0105
import argparse
import logging
import configparser
import requests
from libpydhcpserver.dhcp import DHCPServer
class PBAADHCPServer(DHCPServer):
def __init__(
self, server_address, server_port, client_port, aaserver_addr,
proxy_port=None, response_interface=None,
response_interface_qtags=None
):
self._aaserver_addr = aaserver_addr
DHCPServer.__init__(
self,
server_address=server_address,
server_port=server_port,
client_port=client_port,
proxy_port=proxy_port,
response_interface=response_interface,
response_interface_qtags=response_in
|
terface_qtags
)
def _handleDHCPDecline(self, packet, source_address, port):
"""Processes a DECLINE packet.
Override from DHCPServer.
Send the packet's info to the AA server.
Args:
packet (dhcp_types.packet.DHCPPacket):
The packet to be processed.
source_address (dhcp.Address):
The address from which the request was received.
port (int):
|
The port on which the packet was received.
"""
logging.info('recieved DHCPDECLINE from: %s:%s',
source_address.ip, source_address.port)
logging.debug('\n%s\n', packet)
self._get_client_options(
'DHCP_DECLINE', self._get_packet_info(packet))
def _handleDHCPDiscover(self, packet, source_address, port):
"""Processes a DISCOVER packet.
Override from DHCPServer.
Send the packet's info to the AA server, get the response,
and send it back to the client.
Args:
packet (dhcp_types.packet.DHCPPacket):
The packet to be processed.
source_address (dhcp.Address):
The address from which the request was received.
port (int):
The port on which the packet was received.
"""
logging.info('recieved DHCPDISCOVER from: %s:%s',
source_address.ip, source_address.port)
logging.debug('\n%s\n', packet)
[msg_type, options] = self._get_client_options(
'DHCP_DISCOVER', self._get_packet_info(packet))
self._send_dhcp_msg(packet, msg_type, options, source_address, port)
def _handleDHCPInform(self, packet, source_address, port):
"""Processes a INFORM packet.
Override from DHCPServer.
Send the packet's info to the AA server, get the response,
and send it back to the client.
Args:
packet (dhcp_types.packet.DHCPPacket):
The packet to be processed.
source_address (dhcp.Address):
The address from which the request was received.
port (int):
The port on which the packet was received.
"""
logging.info('recieved DHCPINFORM from: %s:%s',
source_address.ip, source_address.port)
logging.debug('\n%s\n', packet)
[msg_type, options] = self._get_client_options(
'DHCP_INFORM', self._get_packet_info(packet))
self._send_dhcp_msg(packet, msg_type, options, source_address, port)
def _handleDHCPLeaseQuery(self, packet, source_address, port):
"""Processes a LEASEQUERY packet.
Override from DHCPServer.
Ignore the packet.
Args:
packet (dhcp_types.packet.DHCPPacket):
The packet to be processed.
source_address (dhcp.Address):
The address from which the request was received.
port (int):
The port on which the packet was received.
"""
logging.info('recieved DHCPLEASEQUERY from: %s:%s',
source_address.ip, source_address.port)
logging.debug('\n%s\n', packet)
def _handleDHCPRelease(self, packet, source_address, port):
"""Processes a RELEASE packet.
Override from DHCPServer.
Send the packet's info to the AA server.
Args:
packet (dhcp_types.packet.DHCPPacket):
The packet to be processed.
source_address (dhcp.Address):
The address from which the request was received.
port (int):
The port on which the packet was received.
"""
logging.info('recieved DHCPRELEASE from: %s:%s',
source_address.ip, source_address.port)
logging.debug('\n%s\n', packet)
self._get_client_options(
'DHCP_RELEASE', self._get_packet_info(packet))
def _handleDHCPRequest(self, packet, source_address, port):
"""Processes a REQUEST packet.
Override from DHCPServer.
Send the packet's info to the AA server, get the response,
and send it back to the client.
Args:
packet (dhcp_types.packet.DHCPPacket):
The packet to be processed.
source_address (dhcp.Address):
The address from which the request was received.
port (int):
The port on which the packet was received.
"""
logging.info('recieved DHCPREQUEST from: %s:%s',
source_address.ip, source_address.port)
logging.debug('\n%s\n', packet)
[msg_type, options] = self._get_client_options(
'DHCP_REQUEST', self._get_packet_info(packet))
self._send_dhcp_msg(packet, msg_type, options, source_address, port)
def get_next_dhcp_packet(self, timeout=60, packet_buffer=2048):
"""Make the _getNextDHCPPacket method public."""
return self._getNextDHCPPacket(timeout, packet_buffer)
def _send_dhcp_msg(
self, packet, msg_type, options, source_address, port
):
"""Send the DHCP packet to the client.
Set the options of the DHCP packet, and send it to the client.
Args:
packet (dhcp_types.packet.DHCPPacket):
The packet to send to the client.
msg_type (str):
The message type of the DHCP packet.
options (dict):
The options of the DHCP packet.
source_address (dhcp.Address):
The address from which the request was received.
port (int):
The port on which the packet was received.
"""
if msg_type is None:
logging.warning('Ignore a packet: message type is None.')
return
for option, value in options.items():
packet.setOption(option, value)
# packet.setOption('server_identifier', self._server_address)
if msg_type == 'DHCP_OFFER':
packet.transformToDHCPOfferPacket()
elif msg_type == 'DHCP_ACK':
packet.transformToDHCPAckPacket()
elif msg_type == 'DHCP_NAK':
packet.transformToDHCPNakPacket()
else:
logging.warning('Incorrect message type: %s.', msg_type)
logging.warning('Packet will be ignored.')
return
logging.info('send %s to %s:%s',
msg_type, source_address.ip, port)
logging.debug('\n%s\n', packet)
self._sendDHCPPacket(packet, source_address, port)
@staticmethod
def _get_packet_info(packet):
"""Return a dict of the infomation of the DHCP packet.
Fetch the information from the DHCP packet, and return them
in dict type.
Args:
packet (dhcp_types.packet.DHCPPacket):
The packet to be processed.
Returns:
dict: The information of the packet.
"""
info = {}
# fetch the information from these fields
for field_name in [
'op', 'hty
|
danithaca/berrypicking
|
python/excercise/march31.py
|
Python
|
gpl-2.0
| 644 | 0.007764 |
def find_number(x):
str_x = str(x)
if len(str_x) == 1:
raise Exception()
left_mo
|
st = str_x[0]
try:
small_from_rest = find_number(int(str_x[1:]))
return int(left_most + str(small_from_rest))
except:
#
|
min() will throw exception if parameter is empty list, meaning no digit is greater than the left_most digit.
new_left_most = min([c for c in str_x[1:] if c > left_most])
# assumption: no repeated digit
rest_of_digits = ''.join(sorted([c for c in str_x if c != new_left_most]))
y = new_left_most + rest_of_digits
return int(y)
print(find_number(5346))
|
Sophist-UK/Sophist_picard
|
test/formats/test_asf.py
|
Python
|
gpl-2.0
| 6,957 | 0.00115 |
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2019-2020 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from mutagen.asf import ASFByteArrayAttribute
from test.picardtestcase import (
PicardTestCase,
create_fake_png,
)
from picard.formats import (
asf,
ext_to_format,
)
from .common import (
CommonTests,
load_metadata,
load_raw,
save_metadata,
save_raw,
skipUnlessTestfile,
)
from .coverart import CommonCoverArtTests
# prevent unittest to run tests in those classes
class CommonAsfTests:
class AsfTestCase(CommonTests.TagFormatsTestCase):
def test_supports_tag(self):
fmt = ext_to_format(self.testfile_ext[1:])
self
|
.assertTrue(fmt.supports_tag('copyright'))
self.assertTrue(fmt.supports_tag('compilation'))
self.assertTrue(fmt.sup
|
ports_tag('bpm'))
self.assertTrue(fmt.supports_tag('djmixer'))
self.assertTrue(fmt.supports_tag('discnumber'))
self.assertTrue(fmt.supports_tag('lyrics:lead'))
self.assertTrue(fmt.supports_tag('~length'))
for tag in self.replaygain_tags.keys():
self.assertTrue(fmt.supports_tag(tag))
@skipUnlessTestfile
def test_ci_tags_preserve_case(self):
# Ensure values are not duplicated on repeated save and are saved
# case preserving.
tags = {
'Replaygain_Album_Peak': '-6.48 dB'
}
save_raw(self.filename, tags)
loaded_metadata = load_metadata(self.filename)
loaded_metadata['replaygain_album_peak'] = '1.0'
save_metadata(self.filename, loaded_metadata)
raw_metadata = load_raw(self.filename)
self.assertIn('Replaygain_Album_Peak', raw_metadata)
self.assertEqual(raw_metadata['Replaygain_Album_Peak'][0], loaded_metadata['replaygain_album_peak'])
self.assertEqual(1, len(raw_metadata['Replaygain_Album_Peak']))
self.assertNotIn('REPLAYGAIN_ALBUM_PEAK', raw_metadata)
def _test_invalid_picture(self, invalid_picture_data):
png_data = create_fake_png(b'x')
tags = {
'WM/Picture': [
ASFByteArrayAttribute(invalid_picture_data),
ASFByteArrayAttribute(
asf.pack_image("image/png", png_data)
)
]
}
save_raw(self.filename, tags)
metadata = load_metadata(self.filename)
self.assertEqual(1, len(metadata.images))
self.assertEqual(png_data, metadata.images[0].data)
@skipUnlessTestfile
def test_ignore_invalid_wm_picture(self):
# A picture that cannot be unpacked
self._test_invalid_picture(b'notapicture')
class ASFTest(CommonAsfTests.AsfTestCase):
testfile = 'test.asf'
supports_ratings = True
expected_info = {
'length': 92,
'~channels': '2',
'~sample_rate': '44100',
'~bitrate': '128.0',
}
class WMATest(CommonAsfTests.AsfTestCase):
testfile = 'test.wma'
supports_ratings = True
expected_info = {
'length': 139,
'~channels': '2',
'~sample_rate': '44100',
'~bitrate': '64.0',
}
unexpected_info = ['~video']
class WMVTest(CommonAsfTests.AsfTestCase):
testfile = 'test.wmv'
supports_ratings = True
expected_info = {
'length': 565,
'~channels': '2',
'~sample_rate': '44100',
'~bitrate': '128.0',
'~video': '1',
}
class AsfUtilTest(PicardTestCase):
test_cases = [
# Empty MIME, description and data
(('', b'', 2, ''), b'\x02\x00\x00\x00\x00\x00\x00\x00\x00'),
# MIME, description set, 1 byte data
(('M', b'x', 2, 'D'), b'\x02\x01\x00\x00\x00M\x00\x00\x00D\x00\x00\x00x'),
# Empty MIME and description, 3 byte data
(('', b'abc', 0, ''), b'\x00\x03\x00\x00\x00\x00\x00\x00\x00abc'),
]
def test_pack_and_unpack_image(self):
mime = 'image/png'
image_data = create_fake_png(b'x')
image_type = 4
description = 'testing'
tag_data = asf.pack_image(mime, image_data, image_type, description)
expected_length = 5 + 2 * len(mime) + 2 + 2 * len(description) + 2 + len(image_data)
self.assertEqual(tag_data[0], image_type)
self.assertEqual(len(tag_data), expected_length)
self.assertEqual(image_data, tag_data[-len(image_data):])
unpacked = asf.unpack_image(tag_data)
self.assertEqual(mime, unpacked[0])
self.assertEqual(image_data, unpacked[1])
self.assertEqual(image_type, unpacked[2])
self.assertEqual(description, unpacked[3])
def test_pack_image(self):
for args, expected in self.test_cases:
self.assertEqual(expected, asf.pack_image(*args))
def test_unpack_image(self):
for expected, packed in self.test_cases:
self.assertEqual(expected, asf.unpack_image(packed))
def test_unpack_image_value_errors(self):
self.assertRaisesRegex(ValueError, "unpack_from requires a buffer of at least 5 bytes",
asf.unpack_image, b'')
self.assertRaisesRegex(ValueError, "unpack_from requires a buffer of at least 5 bytes",
asf.unpack_image, b'\x02\x01\x00\x00')
self.assertRaisesRegex(ValueError, "mime: missing data",
asf.unpack_image, b'\x00\x00\x00\x00\x00')
self.assertRaisesRegex(ValueError, "mime: missing data",
asf.unpack_image, b'\x04\x19\x00\x00\x00a\x00')
self.assertRaisesRegex(ValueError, "desc: missing data",
asf.unpack_image, b'\x04\x19\x00\x00\x00a\x00\x00\x00a\x00')
self.assertRaisesRegex(ValueError, "image data size mismatch",
asf.unpack_image, b'\x04\x19\x00\x00\x00a\x00\x00\x00a\x00\x00\x00x')
class AsfCoverArtTest(CommonCoverArtTests.CoverArtTestCase):
testfile = 'test.asf'
class WmaCoverArtTest(CommonCoverArtTests.CoverArtTestCase):
testfile = 'test.wma'
|
ttfseiko/openerp-trunk
|
openerp/addons/account_asset/account_asset.py
|
Python
|
agpl-3.0
| 29,177 | 0.008568 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
class account_asset_category(osv.osv):
_name = 'account.asset.category'
_description = 'Asset category'
_columns = {
'name': fields.char('Name', size=64, required=True, select=1),
'note': fields.text('Note'),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic account'),
'account_asset_id': fields.many2one('account.account', 'Asset Account', required=True, domain=[('type','=','other')]),
'account_depreciation_id': fields.many2one('account.account', 'Depreciation Account', required=True, domain=[('type','=','other')]),
'account_expense_depreciation_id': fields.many2one('account.account', 'Depr. Expense Account', required=True, domain=[('type','=','other')]),
'journal_id': fields.many2one('account.journal', 'Journal', required=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
'method': fields.selection([('linear','Linear'),('degressive','Degressive')], 'Computation Method', required=True, help="Choose the method to use to compute the amount of depreciation lines.\n"\
" * Linear: Calculated on basis of: Gross Value / Number of Depreciations\n" \
" * Degressive: Calculated on basis of: Residual Value * Degressive Factor"),
'method_number': fields.integer('Number of Depreciations', help="The number of depreciations needed to depreciate your asset"),
'method_period': fields.integer('Period Length', help="State here the time between 2 depreciations, in months", required=True),
'method_progress_factor': fields.float('Degressive Factor'),
'method_time': fields.selection([('number','Number of Depreciations'),('end','Ending Date')], 'Time Method', required=True,
help="Choose the method to use to compute the dates and number of depreciation lines.\n"\
" * Number of Depreciations: Fix the number of depreciation lines and the time between 2 depreciations.\n" \
" * Ending Date: Choose the time between 2 depreciations and the date the depreciations won't go beyond."),
'method_end': fields.date('Ending date'),
'prorata':fields.boolean('Prorata Temporis', help='Indicates that the first depreciation entry for this asset have to be done from the purchase date instead of the first January'),
'open_asset': fields.boolean('Skip Draft State', help="Check this if you want to automatically confirm the assets of this category when created by invoices."),
}
_defaults = {
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'account.asset.category', context=context),
'method': 'linear',
'method_number': 5,
'method_time': 'number',
'method_period': 12,
'method_progress_factor': 0.3,
}
def onchange_account_asset(self, cr, uid, ids, account_asset_id, context=None):
res = {'value':{}}
if account_asset_id:
res['value'] = {'account_depreciation_id': account_asset_id}
return res
class account_asset_asset(osv.osv):
_name = 'account.asset.asset'
_description = 'Asset'
def unlink(self, cr, uid, ids, context=None):
for asset in self.browse(cr, uid, ids, context=context):
if asset.account_move_line_ids:
raise osv.except_osv(_('Error!'), _('You cannot delete an asset that contains posted depreciation lines.'))
return super(account_asset_asset, self).unlink(cr, uid, ids, context=context)
def _get_period(self, cr, uid, context=None):
periods = self.pool.get('account.period').find(cr, uid, context=context)
if periods:
return periods[0]
else:
return False
def _get_last_depreciation_date(self, cr, uid, ids, context=None):
"""
@param id: ids of a account.asset.asset objects
@return: Returns a dictionary of the effective dates of the last depreciation entry made for given asset ids. If there isn't any, return the purchase date of this asset
"""
cr.execute("""
SELECT a.id as id, COALESCE(MAX(l.date),a.purchase_date) AS date
FROM account_asset_asset a
LEFT JOIN account_move_line l ON (l.asset_id = a.id)
WHERE a.id IN %s
GROUP BY a.id, a.purchase_date """, (tuple(ids),))
return dict(cr.fetchall())
def _compute_board_amount(self, cr, uid, asset, i, residual_amount, amount_to_depr, undone_dotation_number, posted_depreciation_line_ids, total_days, depreciation_date, context=None):
#by default amount = 0
amount = 0
if i == undone_dotation_number:
amount = residual_amount
else:
if asset.method == 'linear':
amount = amount_to_depr / (undone_dotation_number - len(posted_depreciation_line_ids))
if asset.prorata:
amount = amount_to_depr / asset.method_number
days = total_days - float(depreciation_date.strftime('%j'))
if i == 1:
amount = (amount_to_depr / asset.method_number) / total_days * days
elif i == undone_dotation_number:
amount = (amount_to_depr / asset.method_number) / total_days * (total_days - days)
elif asset.method == 'degressive':
amount = residual_amount * asset.method_progress_factor
|
if asset.prorata:
|
days = total_days - float(depreciation_date.strftime('%j'))
if i == 1:
amount = (residual_amount * asset.method_progress_factor) / total_days * days
elif i == undone_dotation_number:
amount = (residual_amount * asset.method_progress_factor) / total_days * (total_days - days)
return amount
def _compute_board_undone_dotation_nb(self, cr, uid, asset, depreciation_date, total_days, context=None):
undone_dotation_number = asset.method_number
if asset.method_time == 'end':
end_date = datetime.strptime(asset.method_end, '%Y-%m-%d')
undone_dotation_number = 0
while depreciation_date <= end_date:
depreciation_date = (datetime(depreciation_date.year, depreciation_date.month, depreciation_date.day) + relativedelta(months=+asset.method_period))
undone_dotation_number += 1
if asset.prorata:
undone_dotation_number += 1
return undone_dotation_number
def compute_depreciation_board(self, cr, uid, ids, context=None):
depreciation_lin_obj = self.pool.get('account.asset.depreciation.line')
currency_obj = self.pool.get('res.currency')
for asset in self.browse(cr, uid, ids, context=context):
if a
|
andersonsilvade/5semscript
|
Projeto/backend/apps/classificacaodtm_app/facade.py
|
Python
|
mit
| 2,641 | 0.004165 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from gaegraph.business_base import NodeSearch, DeleteNode
from classificacaodtm_
|
app.commands import ListClassi
|
ficacaodtmCommand, SaveClassificacaodtmCommand, UpdateClassificacaodtmCommand, \
ClassificacaodtmPublicForm, ClassificacaodtmDetailForm, ClassificacaodtmShortForm
def save_classificacaodtm_cmd(**classificacaodtm_properties):
"""
Command to save Classificacaodtm entity
:param classificacaodtm_properties: a dict of properties to save on model
:return: a Command that save Classificacaodtm, validating and localizing properties received as strings
"""
return SaveClassificacaodtmCommand(**classificacaodtm_properties)
def update_classificacaodtm_cmd(classificacaodtm_id, **classificacaodtm_properties):
"""
Command to update Classificacaodtm entity with id equals 'classificacaodtm_id'
:param classificacaodtm_properties: a dict of properties to update model
:return: a Command that update Classificacaodtm, validating and localizing properties received as strings
"""
return UpdateClassificacaodtmCommand(classificacaodtm_id, **classificacaodtm_properties)
def list_classificacaodtms_cmd():
"""
Command to list Classificacaodtm entities ordered by their creation dates
:return: a Command proceed the db operations when executed
"""
return ListClassificacaodtmCommand()
def classificacaodtm_detail_form(**kwargs):
"""
Function to get Classificacaodtm's detail form.
:param kwargs: form properties
:return: Form
"""
return ClassificacaodtmDetailForm(**kwargs)
def classificacaodtm_short_form(**kwargs):
"""
Function to get Classificacaodtm's short form. just a subset of classificacaodtm's properties
:param kwargs: form properties
:return: Form
"""
return ClassificacaodtmShortForm(**kwargs)
def classificacaodtm_public_form(**kwargs):
"""
Function to get Classificacaodtm'spublic form. just a subset of classificacaodtm's properties
:param kwargs: form properties
:return: Form
"""
return ClassificacaodtmPublicForm(**kwargs)
def get_classificacaodtm_cmd(classificacaodtm_id):
"""
Find classificacaodtm by her id
:param classificacaodtm_id: the classificacaodtm id
:return: Command
"""
return NodeSearch(classificacaodtm_id)
def delete_classificacaodtm_cmd(classificacaodtm_id):
"""
Construct a command to delete a Classificacaodtm
:param classificacaodtm_id: classificacaodtm's id
:return: Command
"""
return DeleteNode(classificacaodtm_id)
|
annoviko/pyclustering
|
pyclustering/cluster/tests/kmeans_templates.py
|
Python
|
gpl-3.0
| 5,793 | 0.008113 |
"""!
@brief Test templates for K-Means clustering module.
@authors Andrei Novikov (pyclustering@yandex.ru)
@date 2014-2020
@copyright BSD-3-Clause
"""
from pyclustering.tests.assertion import assertion
from pyclustering.cluster.encoder import type_encoding, cluster_encoder
from pyclustering.cluster.kmeans import kmeans, kmeans_observer, kmeans_visualizer
from pyclustering.utils import read_sample
from pyclustering.utils.metric import distance_metric, type_metric
from random import random
import numpy
class KmeansTestTemplates:
@staticmethod
def templateLengthProcessData(data, start_centers, expected_cluster_length, ccore, **kwargs):
if isinstance(data, str):
sample = read_sample(data)
else:
sample = data
metric = kwargs.get('metric', distance_m
|
etric(type_metric.EUCLIDEAN_SQUARE))
itermax = kwargs.get('itermax', 200)
kmeans_instance = kmeans(sample, start_centers, 0.001, ccore, metric=metric, itermax=itermax)
kmeans_instance.process()
clusters = kmeans_instance.get_clusters()
centers = kmeans_instance.get_centers()
wce = kmeans_instance.get_t
|
otal_wce()
if itermax == 0:
assertion.eq(start_centers, centers)
assertion.eq([], clusters)
assertion.eq(0.0, wce)
return
expected_wce = 0.0
for index_cluster in range(len(clusters)):
for index_point in clusters[index_cluster]:
expected_wce += metric(sample[index_point], centers[index_cluster])
assertion.eq(expected_wce, wce)
obtained_cluster_sizes = [len(cluster) for cluster in clusters]
assertion.eq(len(sample), sum(obtained_cluster_sizes))
assertion.eq(len(clusters), len(centers))
for center in centers:
assertion.eq(len(sample[0]), len(center))
if expected_cluster_length is not None:
obtained_cluster_sizes.sort()
expected_cluster_length.sort()
assertion.eq(obtained_cluster_sizes, expected_cluster_length)
@staticmethod
def templatePredict(path_to_file, initial_centers, points, expected_closest_clusters, ccore, **kwargs):
sample = read_sample(path_to_file)
metric = kwargs.get('metric', distance_metric(type_metric.EUCLIDEAN_SQUARE))
itermax = kwargs.get('itermax', 200)
kmeans_instance = kmeans(sample, initial_centers, 0.001, ccore, metric=metric, itermax=itermax)
kmeans_instance.process()
closest_clusters = kmeans_instance.predict(points)
assertion.eq(len(expected_closest_clusters), len(closest_clusters))
assertion.true(numpy.array_equal(numpy.array(expected_closest_clusters), closest_clusters))
@staticmethod
def templateClusterAllocationOneDimensionData(ccore_flag):
input_data = [ [random()] for _ in range(10) ] + [ [random() + 3] for _ in range(10) ] + [ [random() + 5] for _ in range(10) ] + [ [random() + 8] for _ in range(10) ]
kmeans_instance = kmeans(input_data, [ [0.0], [3.0], [5.0], [8.0] ], 0.025, ccore_flag)
kmeans_instance.process()
clusters = kmeans_instance.get_clusters()
assertion.eq(4, len(clusters))
for cluster in clusters:
assertion.eq(10, len(cluster))
@staticmethod
def templateEncoderProcedures(filename, initial_centers, number_clusters, ccore_flag):
sample = read_sample(filename)
kmeans_instance = kmeans(sample, initial_centers, 0.025, ccore_flag)
kmeans_instance.process()
clusters = kmeans_instance.get_clusters()
encoding = kmeans_instance.get_cluster_encoding()
encoder = cluster_encoder(encoding, clusters, sample)
encoder.set_encoding(type_encoding.CLUSTER_INDEX_LABELING)
encoder.set_encoding(type_encoding.CLUSTER_OBJECT_LIST_SEPARATION)
encoder.set_encoding(type_encoding.CLUSTER_INDEX_LIST_SEPARATION)
assertion.eq(number_clusters, len(clusters))
@staticmethod
def templateCollectEvolution(filename, initial_centers, number_clusters, ccore_flag):
sample = read_sample(filename)
observer = kmeans_observer()
kmeans_instance = kmeans(sample, initial_centers, 0.025, ccore_flag, observer=observer)
kmeans_instance.process()
assertion.le(1, len(observer))
for i in range(len(observer)):
assertion.le(1, len(observer.get_centers(i)))
for center in observer.get_centers(i):
assertion.eq(len(sample[0]), len(center))
assertion.le(1, len(observer.get_clusters(i)))
@staticmethod
def templateShowClusteringResultNoFailure(filename, initial_centers, ccore_flag):
sample = read_sample(filename)
kmeans_instance = kmeans(sample, initial_centers, 0.025, ccore_flag)
kmeans_instance.process()
clusters = kmeans_instance.get_clusters()
centers = kmeans_instance.get_centers()
figure = kmeans_visualizer.show_clusters(sample, clusters, centers, initial_centers)
kmeans_visualizer.close(figure)
@staticmethod
def templateAnimateClusteringResultNoFailure(filename, initial_centers, ccore_flag):
sample = read_sample(filename)
observer = kmeans_observer()
kmeans_instance = kmeans(sample, initial_centers, 0.025, ccore_flag, observer=observer)
kmeans_instance.process()
kmeans_visualizer.animate_cluster_allocation(sample, observer)
|
ANR-COMPASS/shesha
|
shesha/util/writers/common/fits.py
|
Python
|
gpl-3.0
| 5,305 | 0.00754 |
import numpy as np
from shesha.util.writers.common import dm
from shesha.util.writers.common import wfs
from shesha.util.writers.common import imat
from astropy.io import fits
def wfs_to_fits_hdu(sup, wfs_id):
"""Return a fits Header Data Unit (HDU) representation of a single WFS
Args:
sup : (compasSSupervisor) : supervisor
wfs_id : (int) : index of the WFS in the supervisor
Returns:
hdu : (ImageHDU) : fits representation of the WFS
"""
hdu_name = "WFS" + str(wfs_id)
X,Y = wfs.get_subap_pos_meter(sup, wfs_id)
valid_subap = np.array([X,Y],dtype=np.float64)
hdu = fits.ImageHDU( valid_subap, name=hdu_name)
hdu.header["NSSP"] = sup.config.p_wfss[wfs_id].get_nxsub()
hdu.header["SSPSIZE"] = sup.config.p_wfss[wfs_id].get_subapd()
return hdu
def dm_to_fits_hdu(sup, dm_id):
"""Return a fits Header Data Unit (HDU) representation of a single DM
Args:
sup : (compasSSu
|
pervisor) : supervisor
wfs_id : (int) : index of the DM in the supervisor
Returns:
hdu : (ImageHDU) : fits representation of the DM
"""
hdu_name = "DM" + str(dm_id)
X,Y = dm.get_actu_pos_meter(sup, dm_id)
valid_subap = np.array([X,Y],dtype=np.float64)
hdu = fits.ImageHDU( valid_subap, name=hdu_name)
hdu.header["NACTU"] = sup.config.p_dms[d
|
m_id].get_nact()
hdu.header["PITCH"] = sup.config.p_dms[dm_id].get_pitch()
hdu.header["COUPLING"] = sup.config.p_dms[dm_id].get_coupling()
hdu.header["ALT"] = sup.config.p_dms[dm_id].get_alt()
return hdu
def dm_influ_to_fits_hdu(sup, dm_id, *, influ_index=-1):
"""Return a fits Header Data Unit (HDU) holding the influence functions of a specific DM
Args:
sup : (compasSSupervisor) : supervisor
wfs_id : (int) : index of the DM in the supervisor
Kwargs:
influ_index : (int) : (optional) default -1, index of the actuator to get the influence function from. -1 : get all influence functions
Returns:
hdu : (ImageHDU) : hdu holding the DM influence functions
"""
hdu_name = "INFLU_DM" + str(dm_id)
if influ_index < 0 :
influ_fct = sup.config.p_dms[dm_id].get_influ().astype(np.float64)
else :
influ_fct = sup.config.p_dms[dm_id].get_influ()[:,:,influ_index].astype(np.float64)
hdu = fits.ImageHDU( influ_fct, name=hdu_name)
return hdu
def write_data(file_name, sup, *, wfss_indices=None, dms_indices=None,
controller_id=0, influ=0, compose_type="controller"):
""" Write data for yao compatibility
write into a single fits:
* number of valide subapertures
* number of actuators
* subapertures position (2-dim array x,y) in meters centered
* actuator position (2-dim array x,y) in pixels starting from 0
* interaction matrix (2*nSubap , nactu)
* command matrix (nacy , 2*nSubap)
Args:
file_name : (str) : data file name
sup : (compasSSupervisor) : supervisor
Kargs:
wfss_indices : (list[int]) : optional, default all, list of the wfs indices to include
dms_indices : (list[int]) : optional, default all, list of the DM indices to include
controller_id : (int) : optional, index of the controller passed to yao
influ : (int) : optional, actuator index for the influence function
compose_type : (str) : optional, possibility to specify split tomography case ("controller" or "splitTomo")
"""
print("writing data to" + file_name)
hdul=fits.HDUList([])
# setting list of wfs and dm
conf = sup.config
if(wfss_indices is None):
wfss_indices = np.arange(len(conf.p_wfss))
if(dms_indices is None):
dms_indices = []
for i in range(len(conf.p_dms)):
if( conf.p_dms[i].type != "tt"):
dms_indices.append(i)
#cout the number of lgs
n_lgs = 0
for i in wfss_indices :
if(conf.p_wfss[i].get_gsalt() > 0):
n_lgs += 1
#primary hdu contains only keywords for sanity check
hdu = fits.PrimaryHDU(np.zeros(1,dtype=np.int32))
hdu.header["DIAM"] = conf.p_tel.get_diam()
hdu.header["COBS"] = conf.p_tel.get_cobs()
hdu.header["NLGS"] = n_lgs
hdu.header["NNGS"] = len(wfss_indices) - n_lgs
hdu.header["NDM" ] = len(dms_indices)
hdu.header["PIXSIZE"] = conf.p_geom.get_pixsize()
#add primary hdu to list
hdul.append(hdu)
# add wfss
for i in wfss_indices:
hdul.append( wfs_to_fits_hdu(sup, i))
# add dm
for i in dms_indices:
hdul.append(dm_to_fits_hdu(sup, i))
hdul.append(dm_influ_to_fits_hdu(sup, i, influ_index = influ))
if(controller_id > -1):
# IMAT
interaction_mat=imat.compose_imat(sup, compose_type=compose_type,
controller_id=controller_id)
hdu_imat=fits.ImageHDU(interaction_mat,name="IMAT")
# CMAT
hdu_cmat=fits.ImageHDU(sup.rtc.get_command_matrix(controller_id),
name="CMAT")
print("\t* number of subaperture per WFS")
print("\t* subapertures position")
print("\t* number of actuator per DM")
print("\t* actuators position")
print("\t* Imat")
print("\t* Cmat")
hdul.writeto(file_name, overwrite=1)
|
lukedeo/fancy-cnn
|
datasets/yelp/yelp_w2v.py
|
Python
|
mit
| 1,086 | 0.003683 |
"""
Comp
|
ute WordVectors using Yelp Data
"""
from gensim.models.
|
word2vec import Word2Vec
from util.language import detect_language, tokenize_text
from data_handling import get_reviews_data
# Set to true for zero in in English reviews. Makes the process much slower
FILTER_ENGLISH = True
# Name for output w2v model file
OUTPUT_MODEL_FILE = "w2v_yelp_100_alpha_0.025_window_4"
PICKLED_DATA = "/home/alfredo/deep-nlp/data/reviews.pickle."
NUM_PARTITIONS = 2 # Use all data
reviews_texts, _, _, _, _ = get_reviews_data(range(1, NUM_PARTITIONS), PICKLED_DATA)
# Each review will be considered a sentence
sentences = []
for num, text in enumerate(reviews_texts):
if num % 10000 == 0:
print "%d out of %d reviews read" % (num, len(reviews_texts))
if FILTER_ENGLISH:
if detect_language(text) == u"english":
sentences.append(tokenize_text(text))
else:
sentences.append(text)
# Build a w2v model
w2v = Word2Vec(sentences=sentences, size=100, alpha=0.025, window=4, min_count=2, sample=1e-5, workers=4, negative=10)
w2v.save(OUTPUT_MODEL_FILE)
|
GdZ/scriptfile
|
software/googleAppEngine/lib/PyAMF/doc/tutorials/examples/actionscript/bytearray/python/settings.py
|
Python
|
mit
| 2,999 | 0.003334 |
# Django settings for python project.
DEBUG = True
import logging
LOG_LEVEL = logging.INFO
if DEBUG:
LOG_LEVEL = logging.DEBUG
logging.basicConfig(
level = LOG_LEVEL,
format = '[%(asctime)s %(name)s %(levelname)s] %(message)s',
)
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = '' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://www.postgresql.org/docs/8.1/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
# although not all variations may be possible on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMI
|
N_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '!q2sh7ue8^=bu&wj9tb9&4fx^dayk=wnxo^mtd)xmw1y2)6$w$'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDL
|
EWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.doc.XViewMiddleware',
)
ROOT_URLCONF = 'python.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
)
|
3DGenomes/tadbit
|
_pytadbit/tools/tadbit_map.py
|
Python
|
gpl-3.0
| 26,222 | 0.00492 |
"""
information needed
- path to FASTQ
- path to reference genome
- path to indexed reference genome
- read number (1/2)
- restriction enzyme used
- species name
- chromosome names (optional)
- descriptive fields (optional, e.g. --descr=flowcell:C68AEACXX,lane:4,index:24nf)
mapping strategy
- iterative/fragment
- mapper
"""
from __future__ import print_function
from os import path, remove, system
from string import ascii_letters
from random import random
from shutil import copyfile
from multiprocessing import cpu_count
from subprocess import PIPE, STDOUT, Popen
from pickle import load, UnpicklingError
from argparse import HelpFormatter
from traceback import print_exc
import logging
import sqlite3 as lite
import time
from pytadbit.mapping.restriction_enzymes import RESTRICTION_ENZYMES, identify_re
from pytadbit.utils.fastq_utils import quality_plot
from pytadbit.utils.file_handling import which, mkdir, is_fastq
from pytadbit.mapping.full_mapper import full_mapping, fast_fragment_mapping
from pytadbit.parsers.genome_parser import parse_fasta
from pytadbit.utils.sqlite_utils import get_path_id, add_path, print_db, retry
from pytadbit.utils.sqlite_utils import get_jobid, already_run, digest_parameters
from pytadbit import get_dependencies_version
DESC = "Map Hi-C reads and organize results in an output working directory"
def run(opts):
check_options(opts)
launch_time = time.localtime()
# hash that gonna be append to output file names
param_hash = digest_parameters(opts, get_md5=True)
# create tmp directory
if not opts.tmp:
temp_dir = opts.workdir + '_tmp_r%d_%s' % (opts.read, param_hash)
else:
temp_dir = path.join(opts.tmp,
'TADbit_tmp_r%d_%s' % (opts.read, param_hash))
# QC plot
fig_path = path.join(opt
|
s.workdir,
'%s_%s_%s.png' % (path.split(opts.fastq)[-1],
'-'.join(map(str, opts.renz)), param_hash))
logging.info('Generating Hi-C QC plot')
dangling_ends, ligated = quality_plot(opts.fastq, r_enz=opts.renz,
nreads=100000, paired=Fal
|
se,
savefig=fig_path)
for renz in dangling_ends:
logging.info(' - Dangling-ends (sensu-stricto): %.3f%%', dangling_ends[renz])
for renz in ligated:
logging.info(' - Ligation sites: %.3f%%', ligated[renz])
if opts.skip_mapping:
save_to_db(opts, dangling_ends, ligated, fig_path, [], launch_time, time.localtime())
return
# Mapping
if opts.fast_fragment:
mkdir(path.join(opts.workdir, '03_filtered_reads'))
logging.info('parsing genomic sequence')
try:
# allows the use of pickle genome to make it faster
genome_seq = load(open(opts.genome[0],'rb'))
except (UnpicklingError, KeyError):
genome_seq = parse_fasta(opts.genome)
logging.info('mapping %s and %s to %s', opts.fastq, opts.fastq2, opts.workdir)
outfiles = fast_fragment_mapping(opts.index, opts.fastq, opts.fastq2,
opts.renz, genome_seq,
path.join(opts.workdir, '03_filtered_reads',
'all_r1-r2_intersection_%s.tsv' % param_hash),
clean=not opts.keep_tmp, get_nread=True,
mapper_binary=opts.mapper_binary,
mapper_params=opts.mapper_param, suffix=param_hash,
temp_dir=temp_dir, nthreads=opts.cpus)
else:
logging.info('mapping %s read %s to %s', opts.fastq, opts.read, opts.workdir)
outfiles = full_mapping(opts.index, opts.fastq,
path.join(opts.workdir,
'01_mapped_r%d' % (opts.read)), mapper=opts.mapper,
r_enz=opts.renz, temp_dir=temp_dir, nthreads=opts.cpus,
frag_map=not opts.iterative, clean=not opts.keep_tmp,
windows=opts.windows, get_nread=True, skip=opts.skip,
suffix=param_hash, mapper_binary=opts.mapper_binary,
mapper_params=opts.mapper_param)
# adjust line count
if opts.skip:
for i, (out, _) in enumerate(outfiles[1:], 1):
outfiles[i] = out, outfiles[i-1][1] - sum(1 for _ in open(outfiles[i-1][0]))
finish_time = time.localtime()
# save all job information to sqlite DB
save_to_db(opts, dangling_ends, ligated, fig_path, outfiles, launch_time, finish_time)
try:
save_to_db(opts, dangling_ends, ligated, fig_path, outfiles, launch_time, finish_time)
except Exception as e:
# release lock
remove(path.join(opts.workdir, '__lock_db'))
print_exc()
exit(1)
# write machine log
try:
while path.exists(path.join(opts.workdir, '__lock_log')):
time.sleep(0.5)
open(path.join(opts.workdir, '__lock_log'), 'a').close()
with open(path.join(opts.workdir, 'trace.log'), "a") as mlog:
mlog.write('\n'.join([
('# MAPPED READ%s\t%d\t%s' % (opts.read, num, out))
for out, num in outfiles]) + '\n')
# release lock
try:
remove(path.join(opts.workdir, '__lock_log'))
except OSError:
pass
except Exception as e:
# release lock
remove(path.join(opts.workdir, '__lock_db'))
print_exc()
exit(1)
# clean
if not opts.keep_tmp:
logging.info('cleaning temporary files')
system('rm -rf ' + temp_dir)
def check_options(opts):
if not opts.mapper_binary:
if opts.mapper == 'gem':
opts.mapper_binary = 'gem-mapper'
else:
opts.mapper_binary = opts.mapper
opts.mapper_binary = which(opts.mapper_binary)
if not opts.mapper_binary:
raise Exception('\n\nERROR: Mapper binary not found, for GEM install it from:'
'\nhttps://sourceforge.net/projects/gemlibrary/files/gem-library/Binary%20pre-release%202/'
'\n - Download the GEM-binaries-Linux-x86_64-core_i3 if'
'have a recent computer, the '
'GEM-binaries-Linux-x86_64-core_2 otherwise\n - '
'Uncompress with "tar xjvf GEM-binaries-xxx.tbz2"\n - '
'Copy the binary gem-mapper to /usr/local/bin/ for '
'example (somewhere in your PATH).\n\nNOTE: GEM does '
'not provide any binary for MAC-OS.')
opts.gem_version = 0
if opts.mapper == 'gem':
opts.gem_version = None
try:
out, _ = Popen([opts.mapper_binary,'--version'], stdout=PIPE,
stderr=STDOUT, universal_newlines=True).communicate()
opts.gem_version = int(out[1])
except ValueError as e:
opts.gem_version = 2
print('Falling to gem v2')
if opts.fast_fragment:
if opts.gem_version < 3:
raise Exception('ERROR: Fast fragment mapping needs GEM v3')
if not opts.fastq2 or not path.exists(opts.fastq2):
raise Exception('ERROR: Fast fragment mapping needs both fastq files. '
'Please specify --fastq2')
if opts.read != 0:
raise Exception('ERROR: Fast fragment mapping needs to be specified with --read 0')
if not opts.genome: raise Exception('ERROR: Fast fragment mapping needs '
'the genome parameter.')
# check RE name
if opts.renz == ['CHEC
|
google-research/federated
|
compressed_communication/aggregators/comparison_methods/one_bit_sgd_test.py
|
Python
|
apache-2.0
| 8,373 | 0.00203 |
# Copyright 2022, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from absl.testing import parameterized
import tensorflow as tf
import tensorflow_federated as tff
from compressed_communication.aggregators.comparison_methods import one_bit_sgd
_test_integer_tensor_type = (tf.int32, (3,))
_test_float_struct_type = [(tf.float32, (2,)), (tf.float32, (3,))]
_test_float_tensor_type = (tf.float32, (3,))
class OneBitSGDComputationTest(tff.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
("float_tensor", _test_float_tensor_type))
def test_one_bit_sgd_properties(self, value_type):
factory = one_bit_sgd.OneBitSGDFactory()
value_type = tff.to_type(value_type)
process = factory.create(value_type)
self.assertIsInstance(process, tff.templates.AggregationProcess)
server_state_type = tff.type_at_server(())
expected_initialize_type = tff.FunctionType(
parameter=None, result=server_state_type)
self.assert_types_equivalent(process.initialize.type_signature,
expected_initialize_type)
expected_measurements_type = tff.type_at_server(
collections.OrderedDict(
avg_bitrate=tf.float32,
avg_distortion=tf.float32))
expected_next_type = tff.FunctionType(
parameter=collections.OrderedDict(
state=server_state_type, value=tff.type_at_clients(value_type)),
result=tff.templates.MeasuredProcessOutput(
state=server_state_type,
result=tff.type_at_server(value_type),
measurements=expected_measurements_type))
self.assert_types_equivalent(process.next.type_signature,
expected_next_type)
@parameterized.named_parameters(
("integer_tensor", _test_integer_tensor_type),
("float_struct", _test_float_struct_type))
def test_one_bit_sgd_create_raises(self, value_type):
factory = one_bit_sgd.OneBitSGDFactory()
value_type = tff.to_type(value_type)
self.assertRaises(ValueError, factory.create, value_type)
class OneBitSGDExecutionTest(tff.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
("float_tensor", _test_float_tensor_type))
def test_correctness_positive(self, value_type):
factory = one_bit_sgd.OneBitSGDFactory()
value_type = tff.to_type(value_type)
process = factory.create(value_type)
state = process.initialize()
client_values = [tf.ones(value_type.shape, value_type.dtype)
for _ in range(2)]
expected_result = tf.ones(value_type.shape, value_type.dtype) * 2
bitstring_length = tf.size(expected_result, out_type=tf.float32) + 64.
expected_avg_bitrate = bitstring_length / tf.size(expected_result,
out_type=tf.float32)
expected_measurements = collections.OrderedDict(
avg_bitrate=expected_avg_bitrate,
avg_distortion=0.0)
output = process.next(state, client_values)
self.assertAllClose(output.result, expected_result)
self.assertAllClose(output.measurements, expected_measurements)
@parameterized.named_parameters(
("float_tensor", _test_float_tensor_type))
def test_correctness_negative(self, value_type):
factory = one_bit_sgd.OneBitSGDFactory()
value_type = tff.to_type(value_type)
process = factory.create(value_type)
state = process.initialize()
client_values = [-1.0 * tf.ones(value_type.shape, value_type.dtype)
for _ in range(2)]
expected_result = tf.ones(value_type.shape, value_type.dtype) * -2
bitstring_length = tf.size(expected_result, out_type=tf.float32) + 64.
expected_avg_bitrate = bitstring_length / tf.size(expected_result,
out_type=tf.float32)
expected_measurements = collections.OrderedDict(
avg_bitrate=expected_avg_bitrate,
avg_distortion=0.0)
output = process.next(state, client_values)
self.assertAllClose(output.result, expected_result)
self.assertAllClose(output.measurements, expected_measurements)
@parameterized.named_parameters(
("float_tensor", _test_float_tensor_type))
def test_correctness_positive_negative(self, value_type):
factory = one_bit_sgd.OneBitSGDFactory()
value_type = tff.to_type(value_type)
process = factory.create(value_type)
state = process.initialize()
client_values = [[0.0, 2.0, -1.0] for _ in range(2)]
expected_result = [2.0, 2.0, -2.0]
bitstring_length = tf.size(expected_result, out_type=tf.float32) + 64.
expected_avg_bitrate = bitstring_length / tf.size(expected_result,
out_type=tf.float32)
expected_measurements = collections.OrderedDict(
avg_bitrate=expected_avg_bitrate,
avg_distortion=2./3.)
output = process.next(state, client_values)
self.assertAllClose(output.result, expected_result)
self.assertAllClose(output.measurements, expected_measurements)
@parameterized.named_parameters(
("float_tensor", _test_float_tensor_type))
def test_correctness_nonzero_threshold(self, value_type):
factory = one_bit_sgd.OneBitSGDFactory(2.)
value_type = tff.to_type(value_type)
process = factory.create(value_type)
state = process.initialize()
client_values = [[-1.0, 1.0, 2.0] for _ in range(2)]
expected_result = [0.0, 0.0, 4.0]
bitstring_length = tf.size(expected_result, out_type=tf.float32) + 64.
expected_avg_bitrate = bitstring_length / tf.size(expected_result,
out_type=tf.float32)
expected_measurements = collections.OrderedDict(
avg_bitrate=expected_avg_bitrate,
avg_distortion=2./3.)
output = process.next(state, client_values)
self.assertAllClose(output.result, expected_result)
self.assertAllClose(output.measurements, expected_measurements)
@parameterized.named_parameters(
("float_tensor", _test_float_tensor_type))
def test_correctness_one_cli
|
ent(self, value_type):
factory = one_bit_sgd.OneBitSGDFactory(2.)
value_type = tff.to_type(value_type)
process = factory.create(value_type)
state = process.initialize()
client_values = [[-1.0, 1.0, 2.0]]
|
expected_result = [0.0, 0.0, 2.0]
bitstring_length = tf.size(expected_result, out_type=tf.float32) + 64.
expected_avg_bitrate = bitstring_length / tf.size(expected_result,
out_type=tf.float32)
expected_measurements = collections.OrderedDict(
avg_bitrate=expected_avg_bitrate,
avg_distortion=2./3.)
output = process.next(state, client_values)
self.assertAllClose(output.result, expected_result)
self.assertAllClose(output.measurements, expected_measurements)
@parameterized.named_parameters(
("float_tensor", _test_float_tensor_type))
def test_correctness_different_clients(self, value_type):
factory = one_bit_sgd.OneBitSGDFactory(2.)
value_type = tff.to_type(value_type)
process = factory.create(value_type)
state = process.initialize()
client_values = [[-1.0, 1.0, 2.0], [1.0, 1.0, 1.0]]
expected_result = [1.0, 1.0, 3.0]
bitstring_length = tf.size(expected_result, out_type=tf.float32) + 64.
expected_avg_bitrate = bitstring_length / tf.size(expected_result,
out_type=tf.float32)
expected_measurements = collections.OrderedDict(
avg_bitrate=expected_avg_bitrate,
avg_distortion=2./6.)
output = process.next(
|
XiaJieCom/change
|
document/Service/nagios/nrpe/check_url.py
|
Python
|
lgpl-2.1
| 489 | 0.03272 |
#!/usr/bin/p
|
ython
import sys
import requests
try:
url = sys.argv[1]
r = requests.get('http://%s' %url ,timeout=3)
except requests.exceptions.Timeout:
print 'url timeout\n%s' %url
sys.exit(2)
except:
print 'url error \n%s' %url
sys.exit(2)
url_status = r.statu
|
s_code
if url_status == 200:
print 'url_status %s\n%s' %(url_status,url)
sys.exit(0)
else:
print 'url_status %s\n%s' %(url_status,url)
sys.exit(2)
|
kyle8998/Practice-Coding-Questions
|
CTCI/Chapter1/1.3-URLify.py
|
Python
|
unlicense
| 1,208 | 0.005795 |
# CTCI 1.3
# URLify
import unittest
# My Solution
#--------------------------------
|
-----------------------------------------------
# CTCI Solution
def urlify(string, length):
'''function replaces single spaces with %20 and removes trailing spaces'''
new_index = len(string)
for i in reversed(range(length)):
if string[i] == ' ':
# Replace spaces
string[new_index - 3:new_index] = '%20'
new_index -= 3
else:
|
# Move characters
string[new_index - 1] = string[i]
new_index -= 1
return string
#-------------------------------------------------------------------------------
#Testing
class Test(unittest.TestCase):
'''Test Cases'''
# Using lists because Python strings are immutable
data = [
(list('much ado about nothing '), 22,
list('much%20ado%20about%20nothing')),
(list('Mr John Smith '), 13, list('Mr%20John%20Smith'))]
def test_urlify(self):
for [test_string, length, expected] in self.data:
actual = urlify(test_string, length)
self.assertEqual(actual, expected)
if __name__ == "__main__":
unittest.main()
|
msiemens/PyGitUp
|
PyGitUp/tests/test_rebase_error.py
|
Python
|
mit
| 1,166 | 0.001715 |
# System imports
import os
from os.path import join
import pytest
from git import *
from PyGitUp.git_wrapper import RebaseError
from PyGitUp.tests import basepath, write_file, init_master, update_file, testfile_name
test_name = 'rebase_error'
repo_path = join(basepath, test_name + os.sep)
def setup():
master_path, master = init_master(test_name)
# Prepare master repo
master.git.checkout(b=test_name)
# Clone to test repo
path = join(basepath, test_name)
master.clone(path, b=test_name)
repo = Repo(path, odbt=GitCmdObjectDB)
assert repo.working_dir == path
# Modify file in master
update_file(maste
|
r, test_name)
# Modify file in our repo
contents = 'completely changed!'
repo_file = join(path, testfile_name)
write_file(repo_file, contents)
repo.index.add([repo_file])
repo.index.commit(test_name)
# Modify file
|
in master
update_file(master, test_name)
def test_rebase_error():
""" Run 'git up' with a failing rebase """
os.chdir(repo_path)
from PyGitUp.gitup import GitUp
gitup = GitUp(testing=True)
with pytest.raises(RebaseError):
gitup.run()
|
open-dcs/dcs
|
examples/python/ui.py
|
Python
|
mit
| 1,007 | 0.004965 |
#!/usr/bin/env python
#! -*- coding: utf-8 -*-
from gi.repository import Cld
from gi.repository import DcsCore as dc
|
from gi.repository import DcsUI as du
from gi.repository import Gtk
class DcsExample(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="DCS Example")
c
|
onfig = Cld.XmlConfig.with_file_name("examples/cld.xml")
self.context = Cld.Context.from_config(config)
self.chan = self.context.get_object("ai0")
self.dev = self.context.get_object("dev0")
self.dev.open()
if(not self.dev.is_open):
print "Open device " + self.dev.id + " failed"
#self.task = self.context.get_object("tk0")
#self.task.run()
self.aictl = du.AIControl("/ai0")
self.aictl.connect("request_object", self.offer)
self.add(self.aictl)
def offer(self, widget):
widget.offer_cld_object(self.chan)
win = DcsExample()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
|
MediaFire/mediafire-python-open-sdk
|
tests/test_smoke.py
|
Python
|
bsd-2-clause
| 2,879 | 0.000347 |
#!/usr/bin/python
import io
import os
import unittest
import logging
import uuid
from mediafire import MediaFireApi, MediaFireUploader, UploadSession
from mediafire.uploader import UPLOAD_SIMPLE_LIMIT_BYTES
APP_ID = '42511'
MEDIAFIRE_EMAIL = os.environ.get('MEDIAFIRE_EMAIL')
MEDIAFIRE_PASSWORD = os.environ.get('MEDIAFIRE_PASSWORD')
class MediaFireSmokeBaseTestCase(object):
"""Smoke tests for API"""
class BaseTest(unittest.TestCase):
def setUp(self):
# Reset logging to info to avoid leaking credentials
logger = logging.getLogger('mediafire.api')
logger.setLevel(logging.INFO)
self.api = MediaFireApi()
session = self.api.user_get_session_token(
app_id=APP_ID, email=MEDIAFIRE_EMAIL,
password=MEDIAFIRE_PASSWORD)
self.api.session = session
@unittest.skipIf('CI' not in os.environ, "Running outside CI environment")
class MediaFireSmokeSimpleTest(MediaFire
|
SmokeBaseTestCase.BaseTest):
"""Simple tests"""
def test_user_get_info(self):
result = self.api.user_get_info()
self.assertEqual(result["user_info"]["display_name"],
u"Coalmine Smoketest")
@unittest.skipIf('CI' not in os.environ, "Running outside CI environment")
|
class MediaFireSmokeWithDirectoryTest(MediaFireSmokeBaseTestCase.BaseTest):
"""Smoke tests requiring temporary directory"""
def setUp(self):
super(MediaFireSmokeWithDirectoryTest, self).setUp()
folder_uuid = str(uuid.uuid4())
result = self.api.folder_create(foldername=folder_uuid)
self.folder_key = result["folder_key"]
def tearDown(self):
self.api.folder_purge(self.folder_key)
def test_upload_small(self):
"""Test simple upload"""
# make sure we most likely will get upload/simple
data = b'This is a tiny file content: ' + os.urandom(32)
fd = io.BytesIO(data)
uploader = MediaFireUploader(self.api)
with UploadSession(self.api):
result = uploader.upload(fd, 'smallfile.txt',
folder_key=self.folder_key)
self.assertIsNotNone(result.quickkey)
self.assertEqual(result.action, 'upload/simple')
def test_upload_large(self):
"""Test large file upload"""
# make sure we will get upload/resumable, prefix + 4MiB
data = b'Long line is long: ' + os.urandom(UPLOAD_SIMPLE_LIMIT_BYTES)
fd = io.BytesIO(data)
uploader = MediaFireUploader(self.api)
with UploadSession(self.api):
result = uploader.upload(fd, 'bigfile.txt',
folder_key=self.folder_key)
self.assertIsNotNone(result.quickkey)
self.assertEqual(result.action, 'upload/resumable')
if __name__ == "__main__":
unittest.main()
|
youtube/cobalt
|
third_party/llvm-project/lldb/packages/Python/lldbsuite/test/dotest_args.py
|
Python
|
bsd-3-clause
| 13,110 | 0.002517 |
from __future__ import print_function
from __future__ import absolute_import
# System modules
import argparse
import sys
import multiprocessing
import os
import textwrap
# Third-party modules
# LLDB modules
from . import configuration
class ArgParseNamespace(object):
pass
def parse_args(parser, argv):
""" Returns an argument object. LLDB_TEST_ARGUMENTS environment variable can
be used to pass additional arguments.
"""
args = ArgParseNamespace()
if ('LLDB_TEST_ARGUMENTS' in os.environ):
print(
"Arguments passed through environment: '%s'" %
os.environ['LLDB_TEST_ARGUMENTS'])
args = parser.parse_args([sys.argv[0]].__add__(
os.environ['LLDB_TEST_ARGUMENTS'].split()), namespace=args)
return parser.parse_args(args=argv, namespace=args)
def default_thread_count():
# Check if specified in the environment
num_threads_str = os.environ.get("LLDB_TEST_THREADS")
if num_threads_str:
return int(num_threads_str)
else:
return multiprocessing.cpu_count()
def create_parser():
parser = argparse.ArgumentParser(
description='description',
prefix_chars='+-',
add_help=False)
group = None
# Helper function for boolean options (group will point to the current
# group when executing X)
X = lambda optstr, helpstr, **kwargs: group.add_argument(
optstr, help=helpstr, action='store_true', **kwargs)
group = parser.add_argument_group('Help')
group.add_argument(
'-h',
'--help',
dest='h',
action='store_true',
help="Print this help message and exit. Add '-v' for more detailed help.")
# C and Python toolchain options
group = parser.add_argument_group('Toolchain options')
group.add_argument(
'-A',
'--arch',
metavar='arch',
dest='arch',
help=textwrap.dedent('''Specify the architecture(s) to test. This option can be specified more than once'''))
group.add_argument('-C', '--compiler', metavar='compiler', dest='compiler', help=textwrap.dedent(
'''Specify the compiler(s) used to build the inferior executables. The compiler path can be an executable basename or a full path to a compiler executable. This option can be specified multiple times.'''))
if sys.platform == 'darwin':
group.add_argument('--apple-sdk', metavar='apple_sdk', dest='apple_sdk', default="macosx", help=textwrap.dedent(
'''Specify the name of the Apple SDK (macosx, macosx.internal, iphoneos, iphoneos.internal, or path to SDK) and use the appropriate tools from that SDK's toolchain.'''))
# FIXME? This won't work for different extra flags according to each arch.
group.add_argument(
'-E',
metavar='extra-flags',
help=textwrap.dedent('''Specify the extra flags to be passed to the toolchain when building the inferior programs to be debugged
suggestions: do not lump the "-A arch1 -A arch2" together such that the -E option applies to only one of the architectures'''))
group.add_argument('--dsy
|
mutil', metavar='dsymutil', dest='dsymutil', help=textwrap.dedent('Specify which dsymutil to use.'))
# Test filtering options
group = parser.add_argument_group('Test filtering options')
group.add_argument(
'-f',
metavar='filterspec',
action='append',
help='Specify a filter, which consists of the test class name, a dot, followed by the test method, to only admit such test into the test suite') # FIXME: Example?
X('-l', "Don't
|
skip long running tests")
group.add_argument(
'-p',
metavar='pattern',
help='Specify a regexp filename pattern for inclusion in the test suite')
group.add_argument('--excluded', metavar='exclusion-file', action='append', help=textwrap.dedent(
'''Specify a file for tests to exclude. File should contain lists of regular expressions for test files or methods,
with each list under a matching header (xfail files, xfail methods, skip files, skip methods)'''))
group.add_argument(
'-G',
'--category',
metavar='category',
action='append',
dest='categoriesList',
help=textwrap.dedent('''Specify categories of test cases of interest. Can be specified more than once.'''))
group.add_argument(
'--skip-category',
metavar='category',
action='append',
dest='skipCategories',
help=textwrap.dedent('''Specify categories of test cases to skip. Takes precedence over -G. Can be specified more than once.'''))
# Configuration options
group = parser.add_argument_group('Configuration options')
group.add_argument(
'--framework',
metavar='framework-path',
help='The path to LLDB.framework')
group.add_argument(
'--executable',
metavar='executable-path',
help='The path to the lldb executable')
group.add_argument(
'--server',
metavar='server-path',
help='The path to the debug server executable to use')
group.add_argument(
'--out-of-tree-debugserver',
dest='out_of_tree_debugserver',
action='store_true',
help='A flag to indicate an out-of-tree debug server is being used')
group.add_argument(
'-s',
metavar='name',
help='Specify the name of the dir created to store the session files of tests with errored or failed status. If not specified, the test driver uses the timestamp as the session dir name')
group.add_argument(
'-S',
'--session-file-format',
default=configuration.session_file_format,
metavar='format',
help='Specify session file name format. See configuration.py for a description.')
group.add_argument(
'-y',
type=int,
metavar='count',
help="Specify the iteration count used to collect our benchmarks. An example is the number of times to do 'thread step-over' to measure stepping speed.")
group.add_argument(
'-#',
type=int,
metavar='sharp',
dest='sharp',
help='Repeat the test suite for a specified number of times')
group.add_argument('--channel', metavar='channel', dest='channels', action='append', help=textwrap.dedent(
"Specify the log channels (and optional categories) e.g. 'lldb all' or 'gdb-remote packets' if no categories are specified, 'default' is used"))
group.add_argument(
'--log-success',
dest='log_success',
action='store_true',
help="Leave logs/traces even for successful test runs (useful for creating reference log files during debugging.)")
group.add_argument(
'--codesign-identity',
metavar='Codesigning identity',
default='lldb_codesign',
help='The codesigning identity to use')
group.add_argument(
'--build-dir',
dest='test_build_dir',
metavar='Test build directory',
default='lldb-test-build.noindex',
help='The root build directory for the tests. It will be removed before running.')
# Configuration options
group = parser.add_argument_group('Remote platform options')
group.add_argument(
'--platform-name',
dest='lldb_platform_name',
metavar='platform-name',
help='The name of a remote platform to use')
group.add_argument(
'--platform-url',
dest='lldb_platform_url',
metavar='platform-url',
help='A LLDB platform URL to use when connecting to a remote platform to run the test suite')
group.add_argument(
'--platform-working-dir',
dest='lldb_platform_working_dir',
metavar='platform-working-dir',
help='The directory to use on the remote platform.')
# Test-suite behaviour
group = parser.add_argument_group('Runtime behaviour options')
X('-d', 'Suspend the process after launch to wait indefinitely for a debugger to attach')
X('-q', "Don't print extra output from this script.")
|
mbr0wn/gnuradio
|
gr-analog/examples/fm_demod.py
|
Python
|
gpl-3.0
| 2,750 | 0.003273 |
#!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import
|
gr
from gnuradio import blocks
from gnuradio import filter
from gnuradio import analog
from gnuradio import audio
from gnuradio.filter import firdes
from gnuradio.fft import window
import sys, ma
|
th
# Create a top_block
class build_graph(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
input_rate = 200e3 # rate of a broadcast FM station
audio_rate = 44.1e3 # Rate we send the signal to the speaker
# resample from the output of the demodulator to the rate of
# the audio sink.
resamp_rate = audio_rate / input_rate
# use a file as a dummy source. Replace this with a real radio
# receiver to capture signals over-the-air.
src = blocks.file_source(gr.sizeof_gr_complex, "dummy.dat", True)
# Set the demodulator using the same deviation as the receiver.
max_dev = 75e3
fm_demod_gain = input_rate / (2*math.pi*max_dev/8.0)
fm_demod = analog.quadrature_demod_cf(fm_demod_gain)
# Create a filter for the resampler and filter the audio
# signal to 15 kHz. The nfilts is the number of filters in the
# arbitrary resampler. It logically operates at a rate of
# nfilts*input_rate, so we make those adjustments when
# building the filter.
volume = 0.20
nfilts = 32
resamp_taps = firdes.low_pass_2(volume*nfilts, # gain
nfilts*input_rate, # sampling rate
15e3, # low pass cutoff freq
1e3, # width of trans. band
60, # stop band attenuaton
window.WIN_KAISER)
# Build the resampler and filter
resamp_filter = filter.pfb_arb_resampler_fff(resamp_rate,
resamp_taps, nfilts)
# sound card as final sink You may have to add a specific
# device name as a second argument here, something like
# "pulse" if using pulse audio or "plughw:0,0".
audio_sink = audio.sink(int(audio_rate))
# now wire it all together
self.connect(src, fm_demod)
self.connect(fm_demod, resamp_filter)
self.connect(resamp_filter, (audio_sink,0))
def main(args):
tb = build_graph()
tb.start() # fork thread and return
input('Press Enter to quit: ')
tb.stop()
if __name__ == '__main__':
main(sys.argv[1:])
|
RDCEP/atlas-viewer
|
run.py
|
Python
|
apache-2.0
| 45 | 0.022222 |
f
|
rom atlas_web
|
import app
app.run(debug=True)
|
FuzzyHobbit/acme-tiny
|
acme_tiny.py
|
Python
|
mit
| 9,077 | 0.004407 |
#!/usr/bin/env python
import argparse, subprocess, json, os, sys, base64, binascii, time, hashlib, re, copy, textwrap, logging
try:
from urllib.request import urlopen # Python 3
except ImportError:
from urllib2 import urlopen # Python 2
#DEFAULT_CA = "https://acme-staging.api.letsencrypt.org"
DEFAULT_CA = "https://acme-v01.api.letsencrypt.org"
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.StreamHandler())
LOGGER.setLevel(logging.INFO)
def get_crt(account_key,
|
csr, acme_dir, log=LOGGER, CA=DEFAULT_CA):
# helper function base64 encode for jose spec
def _b64(b):
return base64.urlsafe_b64encode(b).decode('utf8').replace("=", "")
# parse account key to get public
|
key
log.info("Parsing account key...")
proc = subprocess.Popen(["openssl", "rsa", "-in", account_key, "-noout", "-text"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode != 0:
raise IOError("OpenSSL Error: {0}".format(err))
pub_hex, pub_exp = re.search(
r"modulus:\n\s+00:([a-f0-9\:\s]+?)\npublicExponent: ([0-9]+)",
out.decode('utf8'), re.MULTILINE|re.DOTALL).groups()
pub_exp = "{0:x}".format(int(pub_exp))
pub_exp = "0{0}".format(pub_exp) if len(pub_exp) % 2 else pub_exp
header = {
"alg": "RS256",
"jwk": {
"e": _b64(binascii.unhexlify(pub_exp)),
"kty": "RSA",
"n": _b64(binascii.unhexlify(re.sub(r"(\s|:)", "", pub_hex))),
},
}
accountkey_json = json.dumps(header['jwk'], sort_keys=True, separators=(',', ':'))
thumbprint = _b64(hashlib.sha256(accountkey_json.encode('utf8')).digest())
# helper function make signed requests
def _send_signed_request(url, payload):
payload64 = _b64(json.dumps(payload).encode('utf8'))
protected = copy.deepcopy(header)
protected["nonce"] = urlopen(CA + "/directory").headers['Replay-Nonce']
protected64 = _b64(json.dumps(protected).encode('utf8'))
proc = subprocess.Popen(["openssl", "dgst", "-sha256", "-sign", account_key],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate("{0}.{1}".format(protected64, payload64).encode('utf8'))
if proc.returncode != 0:
raise IOError("OpenSSL Error: {0}".format(err))
data = json.dumps({
"header": header, "protected": protected64,
"payload": payload64, "signature": _b64(out),
})
try:
resp = urlopen(url, data.encode('utf8'))
return resp.getcode(), resp.read()
except IOError as e:
return e.code, e.read()
# find domains
log.info("Parsing CSR...")
proc = subprocess.Popen(["openssl", "req", "-in", csr, "-noout", "-text"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode != 0:
raise IOError("Error loading {0}: {1}".format(csr, err))
domains = set([])
common_name = re.search(r"Subject:.*? CN=([^\s,;/]+)", out.decode('utf8'))
if common_name is not None:
domains.add(common_name.group(1))
subject_alt_names = re.search(r"X509v3 Subject Alternative Name: \n +([^\n]+)\n", out.decode('utf8'), re.MULTILINE|re.DOTALL)
if subject_alt_names is not None:
for san in subject_alt_names.group(1).split(", "):
if san.startswith("DNS:"):
domains.add(san[4:])
# get the certificate domains and expiration
log.info("Registering account...")
code, result = _send_signed_request(CA + "/acme/new-reg", {
"resource": "new-reg",
"agreement": "https://letsencrypt.org/documents/LE-SA-v1.0.1-July-27-2015.pdf",
})
if code == 201:
log.info("Registered!")
elif code == 409:
log.info("Already registered!")
else:
raise ValueError("Error registering: {0} {1}".format(code, result))
# verify each domain
for domain in domains:
log.info("Verifying {0}...".format(domain))
# get new challenge
code, result = _send_signed_request(CA + "/acme/new-authz", {
"resource": "new-authz",
"identifier": {"type": "dns", "value": domain},
})
if code != 201:
raise ValueError("Error requesting challenges: {0} {1}".format(code, result))
# make the challenge file
challenge = [c for c in json.loads(result.decode('utf8'))['challenges'] if c['type'] == "http-01"][0]
token = re.sub(r"[^A-Za-z0-9_\-]", "_", challenge['token'])
keyauthorization = "{0}.{1}".format(token, thumbprint)
wellknown_path = os.path.join(acme_dir, token)
with open(wellknown_path, "w") as wellknown_file:
wellknown_file.write(keyauthorization)
# check that the file is in place
wellknown_url = "http://{0}/.well-known/acme-challenge/{1}".format(domain, token)
try:
resp = urlopen(wellknown_url)
resp_data = resp.read().decode('utf8').strip()
assert resp_data == keyauthorization
except (IOError, AssertionError):
os.remove(wellknown_path)
raise ValueError("Wrote file to {0}, but couldn't download {1}".format(
wellknown_path, wellknown_url))
# notify challenge are met
code, result = _send_signed_request(challenge['uri'], {
"resource": "challenge",
"keyAuthorization": keyauthorization,
})
if code != 202:
raise ValueError("Error triggering challenge: {0} {1}".format(code, result))
# wait for challenge to be verified
while True:
try:
resp = urlopen(challenge['uri'])
challenge_status = json.loads(resp.read().decode('utf8'))
except IOError as e:
raise ValueError("Error checking challenge: {0} {1}".format(
e.code, json.loads(e.read().decode('utf8'))))
if challenge_status['status'] == "pending":
time.sleep(2)
elif challenge_status['status'] == "valid":
log.info("{0} verified!".format(domain))
os.remove(wellknown_path)
break
else:
raise ValueError("{0} challenge did not pass: {1}".format(
domain, challenge_status))
# get the new certificate
log.info("Signing certificate...")
proc = subprocess.Popen(["openssl", "req", "-in", csr, "-outform", "DER"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
csr_der, err = proc.communicate()
code, result = _send_signed_request(CA + "/acme/new-cert", {
"resource": "new-cert",
"csr": _b64(csr_der),
})
if code != 201:
raise ValueError("Error signing certificate: {0} {1}".format(code, result))
# return signed certificate!
log.info("Certificate signed!")
return """-----BEGIN CERTIFICATE-----\n{0}\n-----END CERTIFICATE-----\n""".format(
"\n".join(textwrap.wrap(base64.b64encode(result).decode('utf8'), 64)))
def main(argv):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""\
This script automates the process of getting a signed TLS certificate from
Let's Encrypt using the ACME protocol. It will need to be run on your server
and have access to your private account key, so PLEASE READ THROUGH IT! It's
only ~200 lines, so it won't take long.
===Example Usage===
python acme_tiny.py --account-key ./account.key --csr ./domain.csr --acme-dir /usr/share/nginx/html/.well-known/acme-challenge/ > signed.crt
===================
===Example Crontab Renewal (once per month)===
0 0 1 * * python /path/to/acme_tiny.py --account-key /path/to/account.key --csr /path/to/domain.csr --acme-dir /usr/share/nginx/html/.well-known/acme-challenge/ > /path/to/signed.crt 2>> /var/log/acme_tiny.log
|
tommy-u/enable
|
enable/controls.py
|
Python
|
bsd-3-clause
| 22,474 | 0.02207 |
#-------------------------------------------------------------------------------
#
# Define standard Enable 'control' components, like text/image labels,
# push buttons, radio buttons, check boxes, and so on.
#
# Written by: David C. Morrill
#
# Date: 10/10/2003
#
# (c) Copyright 2003 by Enthought, Inc.
#
# Classes defined: Label
# RadioButton
# CheckBox
#
#-------------------------------------------------------------------------------
from __future__ import with_statement
# Major library imports
import os.path
# Enthought library imports
from enable.colors import ColorTrait
from traits.api import Bool, Delegate, HasTraits, Str, Trait, \
TraitPrefixList
from traitsui.api import View, Group
# Local relative imports
from component import Component
from base import LEFT, RIGHT, TOP, BOTTOM, HCENTER, VCENTER, EMBOSSED, ENGRAVED, \
transparent_color, xy_in_bounds, add_rectangles
from enable_traits import spacing_trait, padding_trait, margin_trait,\
border_size_trait, image_trait
from enable_traits import position_trait, font_trait, engraving_trait
from radio_group import RadioStyle, RadioGroup
#-------------------------------------------------------------------------------
# Constants:
#-------------------------------------------------------------------------------
empty_text_info = ( 0, 0, 0, 0 )
LEFT_OR_RIGHT = LEFT | RIGHT
TOP_OR_BOTTOM = TOP | BOTTOM
orientation_trait = Trait( 'text', TraitPrefixList( [ 'text', 'component' ] ) )
class LabelTraits ( HasTraits ):
text = Str
font = font_trait
text_position = position_trait("left")
color = ColorTrait("black")
shadow_color = ColorTrait("white")
style = engraving_trait
image = image_trait
image_position = position_trait("left")
image_orientation = orientation_trait
spacing_height = spacing_trait
spacing_width = spacing_trait
padding_left = padding_trait
padding_right = padding_trait
padding_top = padding_trait
padding_bottom = padding_trait
margin_left = margin_trait
margin_right = margin_trait
margin_top = margin_trait
margin_bottom = margin_trait
border_size = border_size_trait
border_color = ColorTrait("black")
bg_color = ColorTrait("clear")
enabled = Bool(True)
selected = Bool(False)
#---------------------------------------------------------------------------
# Trait view definitions:
#---------------------------------------------------------------------------
traits_view = View(
Group( 'enabled', 'se
|
lected',
id = 'component' ),
Group( 'text', ' ',
'font', ' ',
'color', ' ',
'shadow_color', ' ',
'style',
id = 'text',
|
style = 'custom' ),
Group( 'bg_color{Background Color}', '_',
'border_color', '_',
'border_size',
id = 'border',
style = 'custom' ),
Group( 'text_position', '_',
'image_position', '_',
'image_orientation', ' ',
'image',
id = 'position',
style = 'custom' ),
Group( 'spacing_height', 'spacing_width', '_',
'padding_left', 'padding_right',
'padding_top', 'padding_bottom', '_',
'margin_left', 'margin_right',
'margin_top', 'margin_bottom',
id = 'margin' )
)
default_label_traits = LabelTraits()
#-------------------------------------------------------------------------------
# 'Label' class:
#-------------------------------------------------------------------------------
LabelTraitDelegate = Delegate( 'delegate', redraw = True )
LayoutLabelTraitDelegate = LabelTraitDelegate( layout = True )
LabelContentDelegate = LayoutLabelTraitDelegate( content = True )
class Label ( Component ):
#---------------------------------------------------------------------------
# Trait definitions:
#---------------------------------------------------------------------------
delegate = Trait( default_label_traits )
text = LabelContentDelegate
font = LabelContentDelegate
text_position = LayoutLabelTraitDelegate
color = LabelTraitDelegate
shadow_color = LabelTraitDelegate
style = LabelTraitDelegate
image = LayoutLabelTraitDelegate
image_position = LayoutLabelTraitDelegate
image_orientation = LayoutLabelTraitDelegate
spacing_height = LayoutLabelTraitDelegate
spacing_width = LayoutLabelTraitDelegate
padding_left = LayoutLabelTraitDelegate
padding_right = LayoutLabelTraitDelegate
padding_top = LayoutLabelTraitDelegate
padding_bottom = LayoutLabelTraitDelegate
margin_left = LayoutLabelTraitDelegate
margin_right = LayoutLabelTraitDelegate
margin_top = LayoutLabelTraitDelegate
margin_bottom = LayoutLabelTraitDelegate
border_size = LayoutLabelTraitDelegate
border_color = LabelTraitDelegate
bg_color = LabelTraitDelegate
enabled = LabelTraitDelegate
selected = LabelTraitDelegate
#---------------------------------------------------------------------------
# Trait view definitions:
#---------------------------------------------------------------------------
traits_view = View(
Group( '<component>', 'enabled', 'selected',
id = 'component' ),
Group( '<links>', 'delegate',
id = 'links' ),
Group( 'text', ' ',
'font', ' ',
'color', ' ',
'shadow_color', ' ',
'style',
id = 'text',
style = 'custom' ),
Group( 'bg_color{Background Color}', '_',
'border_color', '_',
'border_size',
id = 'border',
style = 'custom' ),
Group( 'text_position', '_',
'image_position', '_',
'image_orientation', ' ',
'image',
id = 'position',
style = 'custom' ),
Group( 'spacing_height', 'spacing_width', '_',
'padding_left', 'padding_right',
'padding_top', 'padding_bottom', '_',
'margin_left', 'margin_right',
'margin_top', 'margin_bottom',
id = 'margin' )
)
colorchip_map = {
'fg_color': 'color',
'bg_color': 'bg_color',
'shadow_color': 'shadow_color',
'alt_color': 'border_color'
}
#---------------------------------------------------------------------------
# Initialize the object:
#---------------------------------------------------------------------------
def __init__ ( self, text = '', **traits ):
self.text = text
Component.__init__( self, **traits )
#---------------------------------------------------------------------------
# Handle any trait being modified:
#---------------------------------------------------------------------------
def _anytrait_changed ( self, name, old, new ):
trait = self.trait( name )
if trait.content:
self.update_text()
if trait.redraw:
if trait.layout:
self.layout()
self.redraw()
#---------------------------------------------------------------------------
# Return the components that contain a specified (x,y) point:
#---------------------------------------------------------------------------
def _components_at ( self, x, y ):
if self._in_margins( x, y ):
return [ self ]
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/sankey/hoverlabel/_bordercolor.py
|
Python
|
mit
| 482 | 0.002075 |
import _plotly_utils.basevalidators
class BordercolorValidator(
|
_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bordercolor", parent_name="sankey.hoverlabel", **kwargs
):
super(BordercolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
|
)
|
modoboa/modoboa
|
modoboa/policyd/handlers.py
|
Python
|
isc
| 1,435 | 0 |
"""App related signal handlers."""
import redis
from django.conf import settings
from django.db.models import signals
from django.dispatch import receiver
from modoboa.admin import models as admin_models
from . import constants
def set_message_limit(instance, key):
"""Store message limit in Redis."""
old_message_limit = instance._loaded_values.get("message_limit")
if old_message_limit == instance.message_limit:
return
rclient = redis.Redis(
host=settings.REDIS_HOST,
port=settings.REDIS_PORT,
db=settings.REDIS_QUOTA_DB
)
if instance.message_limit is None:
# delete existing key
if rclient.hexists(constants.RED
|
IS_HASHNAME, key):
r
|
client.hdel(constants.REDIS_HASHNAME, key)
return
if old_message_limit is not None:
diff = instance.message_limit - old_message_limit
else:
diff = instance.message_limit
rclient.hincrby(constants.REDIS_HASHNAME, key, diff)
@receiver(signals.post_save, sender=admin_models.Domain)
def set_domain_message_limit(sender, instance, created, **kwargs):
"""Store domain message limit in Redis."""
set_message_limit(instance, instance.name)
@receiver(signals.post_save, sender=admin_models.Mailbox)
def set_mailbox_message_limit(sender, instance, created, **kwargs):
"""Store mailbox message limit in Redis."""
set_message_limit(instance, instance.full_address)
|
OCA/stock-logistics-workflow
|
stock_return_request/hooks.py
|
Python
|
agpl-3.0
| 3,276 | 0.001526 |
# Copyright 2019 Tecnativa - David
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import logging
from odoo import SUPERUSER_ID, api
_logger = logging.getLogger(__name__)
def pre_init_hook(cr):
"""Speed up the installation of the module on an existing Odoo instance"""
cr.execute(
"""
SELECT column_name
FROM information_schema.columns
WHERE table_name='stock_move' AND
column_name='qty_returnable'
"""
)
if not cr.fetchone():
_logger.info("Creating field qty_returnable on stock_move")
cr.execute(
"""
ALTER TABLE stock_move ADD COLUMN qty_returnable float;
"""
)
cr.execute(
"""
|
UPDATE stock_move SET qty_returnable = 0
WHERE state IN ('draft', 'cancel')
"""
)
cr.execute(
"""
UPDATE stock_move SET qty_returnable = product_uom_qty
WHERE state = 'done'
|
"""
)
def post_init_hook(cr, registry):
"""Set moves returnable qty on hand"""
with api.Environment.manage():
env = api.Environment(cr, SUPERUSER_ID, {})
moves_draft = env["stock.move"].search([("state", "in", ["draft", "cancel"])])
moves_no_return_pendant = env["stock.move"].search(
[
("returned_move_ids", "=", False),
("state", "not in", ["draft", "cancel", "done"]),
]
)
moves_by_reserved_availability = {}
for move in moves_no_return_pendant:
moves_by_reserved_availability.setdefault(move.reserved_availability, [])
moves_by_reserved_availability[move.reserved_availability].append(move.id)
for qty, ids in moves_by_reserved_availability.items():
cr.execute(
"UPDATE stock_move SET qty_returnable = %s " "WHERE id IN %s",
(qty, tuple(ids)),
)
moves_no_return_done = env["stock.move"].search(
[
("returned_move_ids", "=", False),
("state", "=", "done"),
]
)
# Recursively solve quantities
updated_moves = moves_no_return_done + moves_draft + moves_no_return_pendant
remaining_moves = env["stock.move"].search(
[
("returned_move_ids", "!=", False),
("state", "=", "done"),
]
)
while remaining_moves:
_logger.info("{} moves left...".format(len(remaining_moves)))
remaining_moves, updated_moves = update_qty_returnable(
cr, remaining_moves, updated_moves
)
def update_qty_returnable(cr, remaining_moves, updated_moves):
for move in remaining_moves:
if all([x in updated_moves for x in move.returned_move_ids]):
quantity_returned = sum(move.returned_move_ids.mapped("qty_returnable"))
quantity = move.product_uom_qty - quantity_returned
cr.execute(
"UPDATE stock_move SET qty_returnable = %s " "WHERE id = %s",
(quantity, move.id),
)
remaining_moves -= move
updated_moves += move
return remaining_moves, updated_moves
|
alexeyqu/zadolbali_corpus
|
crawler/zadolbali/zadolbali/spiders/stories.py
|
Python
|
mit
| 1,809 | 0.006081 |
# -*- coding:utf8 -*-
from scrapy import Request
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.loader.processors import Join
from scrapy.loader import ItemLoader
from scrapy.selector import HtmlXPathSelector, Selector
from zadolbali.items import StoryItem
class StoryLoader(ItemLoader):
default_output_processor = Join(' ')
class StoriesSpider(CrawlSpider):
name = 'stories'
allowed_domains = ['zadolba.li']
start_urls = ['http://zadolba.li/']
rules = (
Rule(LinkExtractor(allow=('/[0-9]{8}', )), callback='parse_day', follow=True),
)
def parse_day(self, response):
for url in response.xpath('//div[@class="story"]/h2/a/@href').extract():
request = Request(StoriesSpider.start_urls[0] + str(url)[1:], callback=self.parse_story)
request.meta['date'] = response.url.split('/')[-1]
yield request
def parse_story(self, response):
hxs = HtmlXPathSelector(response)
loader = StoryLoader(StoryItem(), hxs)
loader.add_xpath('id', '//div[@cla
|
ss="story"]/div[@class="id"]/span/text()')
loader.add_xpath('title', '//div[@class="story"]/h1/text()')
loader.add_value('published', str(response.request.meta['date']))
loader.add_xpath('tags', '//div[@
|
class="story"]/div[@class="meta"]/div[@class="tags"]/ul/li/a/@href')
loader.add_xpath('text', 'string(//div[@class="story"]/div[@class="text"])')
loader.add_xpath('likes', 'string(//div[@class="story"]/div[@class="actions"]//div[@class="rating"])')
loader.add_xpath('hrefs', '//div[@class="story"]/div[@class="text"]//a/@href')
loader.add_value('hrefs', '')
loader.add_value('url', str(response.url))
return loader.load_item()
|
theskyinflames/bpulse-go-client
|
vendor/github.com/youtube/vitess/py/vtdb/vtgate_cursor.py
|
Python
|
apache-2.0
| 9,742 | 0.005646 |
# Copyright 2012, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""VTGateCursor, and StreamVTGateCursor."""
import itertools
import operator
import re
from vtdb import base_cursor
from vtdb import dbexceptions
write_sql_pattern = re.compile(r'\s*(insert|update|delete)', re.IGNORECASE)
def ascii_lower(string):
"""Lower-case, but only in the ASCII range."""
return string.encode('utf8').lower().decode('utf8')
class VTGateCursorMixin(object):
def connection_list(self):
return [self._conn]
def is_writable(self):
return self._writable
class VTGateCursor(base_cursor.BaseListCursor, VTGateCursorMixin):
"""A cursor for execute statements to VTGate.
Results are stored as a list.
"""
def __init__(
self, connection, tablet_type, keyspace=None,
shards=None, keyspace_ids=None, keyranges=None,
writable=False, as_transaction=False, single_db=False,
twopc=False):
"""Init VTGateCursor.
Args:
connection: A PEP0249 connection object.
tablet_type: Str tablet_type.
keyspace: Str keyspace or None if batch API will be used.
shards: List of strings.
keyspace_ids: Struct('!Q').packed keyspace IDs.
keyranges: Str keyranges.
writable: True if writable.
as_transaction: True if an executemany call is its own transaction.
single_db: True if single db transaction is needed.
twopc: True if 2-phase commit is needed.
"""
super(VTGateCursor, self).__init__(single_db=single_db, twopc=twopc)
self._conn = connection
self._writable = writable
self.description = None
self.index = None
self.keyspace = keyspace
self.shards = shards
self.keyspace_ids = keyspace_ids
self.keyranges = keyranges
self.lastrowid = None
self.results = None
self.routing = None
self.rowcount = 0
self.tablet_type = tablet_type
self.as_transaction = as_transaction
self._clear_batch_state()
# pass kwargs here in case higher level APIs need to push more data through
# for instance, a key value for shard mapping
def execute(self, sql, bind_variables, **kwargs):
"""Perform a query, return the number of rows affected."""
self._clear_list_state()
self._clear_batch_state()
if self._handle_transaction_sql(sql):
return
entity_keyspace_id_map = kwargs.pop('entity_keyspace_id_map', None)
entity_column_name = kwargs.pop('entity_column_name', None)
write_query = bool(write_sql_pattern.match(sql))
# NOTE: This check may also be done at higher layers but adding it
# here for completion.
if write_query:
if not self.is_writable():
raise dbexceptions.ProgrammingError('DML on a non-writable cursor', sql)
if entity_keyspace_id_map:
raise dbexceptions.ProgrammingError(
'entity_keyspace_id_map is not allowed for write queries')
# FIXME(alainjobart): the entity_keyspace_id_map should be in the
# cursor, same as keyspace_ids, shards, keyranges, to avoid this hack.
if entity_keyspace_id_map:
shards = None
keyspace_ids = None
keyranges = None
else:
shards = self.shards
keyspace_ids = self.keyspace_ids
keyranges = self.keyranges
self.results, self.rowcount, self.lastrowid, self.description = (
self.connection._execute( # pylint: disable=protected-access
sql,
bind_variables,
tablet_type=self.tablet_type,
keyspace_name=self.keyspace,
shards=shards,
keyspace_ids=keyspace_ids,
|
keyranges=keyranges,
entity_keyspace_id_map=entity_keyspace_id_map,
entity_column_name=entity_column_name,
not_in_transaction=not self.is_writable(),
effective_caller_id=self.effective_caller_id,
**kwargs))
return self.rowcount
def fetch_aggregate_function(self, func):
return func(row[0] for row in self.fetchall())
def fetch_aggregate(self,
|
order_by_columns, limit):
"""Fetch from many shards, sort, then remove sort columns.
A scatter query may return up to limit rows. Sort all results
manually order them, and return the first rows.
This is a special-use function.
Args:
order_by_columns: The ORDER BY clause. Each element is either a
column, [column, 'ASC'], or [column, 'DESC'].
limit: Int limit.
Returns:
Smallest rows, with up to limit items. First len(order_by_columns)
columns are stripped.
"""
sort_columns = []
desc_columns = []
for order_clause in order_by_columns:
if isinstance(order_clause, (tuple, list)):
sort_columns.append(order_clause[0])
if ascii_lower(order_clause[1]) == 'desc':
desc_columns.append(order_clause[0])
else:
sort_columns.append(order_clause)
# sort the rows and then trim off the prepended sort columns
if sort_columns:
sorted_rows = list(sort_row_list_by_columns(
self.fetchall(), sort_columns, desc_columns))[:limit]
else:
sorted_rows = itertools.islice(self.fetchall(), limit)
neutered_rows = [row[len(order_by_columns):] for row in sorted_rows]
return neutered_rows
def _clear_batch_state(self):
"""Clear state that allows traversal to next query's results."""
self.result_sets = []
self.result_set_index = None
def close(self):
super(VTGateCursor, self).close()
self._clear_batch_state()
def executemany(self, sql, params_list, **kwargs):
"""Execute multiple statements in one batch.
This adds len(params_list) result_sets to self.result_sets. Each
result_set is a (results, rowcount, lastrowid, fields) tuple.
Each call overwrites the old result_sets. After execution, nextset()
is called to move the fetch state to the start of the first
result set.
Args:
sql: The sql text, with %(format)s-style tokens. May be None.
params_list: A list of the keyword params that are normally sent
to execute. Either the sql arg or params['sql'] must be defined.
**kwargs: passed as is to connection._execute_batch.
"""
if sql:
sql_list = [sql] * len(params_list)
else:
sql_list = [params.get('sql') for params in params_list]
bind_variables_list = [params['bind_variables'] for params in params_list]
keyspace_list = [params['keyspace'] for params in params_list]
keyspace_ids_list = [params.get('keyspace_ids') for params in params_list]
shards_list = [params.get('shards') for params in params_list]
self._clear_batch_state()
# Find other _execute_batch calls in test code.
self.result_sets = self.connection._execute_batch( # pylint: disable=protected-access
sql_list, bind_variables_list, keyspace_list, keyspace_ids_list,
shards_list,
self.tablet_type, self.as_transaction, self.effective_caller_id,
**kwargs)
self.nextset()
def nextset(self):
"""Move the fetch state to the start of the next result set.
self.(results, rowcount, lastrowid, description) will be set to
the next result_set, and the fetch-commands will work on this
result set.
Returns:
True if another result set exists, False if not.
"""
if self.result_set_index is None:
self.result_set_index = 0
else:
self.result_set_index += 1
self._clear_list_state()
if self.result_set_index < len(self.result_sets):
self.results, self.rowcount, self.lastrowid, self.description = (
self.result_sets[self.result_set_index])
return True
else:
self._clear_batch_state()
return None
class StreamVTGateCursor(base_cursor.BaseStreamCursor, VTGateCursorMixin):
"""A cursor for streaming statements to VTGate.
Results are returned as a generator.
"""
def __init__(
self, connection, tablet_type, keyspace=None,
shards=None, keyspace_ids=None,
keyranges=None, writable=False):
super(StreamVTGateCursor, self).__init__()
self._conn = connection
self._writable = writable
|
johnmgregoire/JCAPRamanDataProcess
|
PlateAlignViaEdge_v4.py
|
Python
|
bsd-3-clause
| 16,899 | 0.016214 |
import sys,os, pickle, numpy, pylab, operator, itertools
import cv2
from shutil import copy as copyfile
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import matplotlib.pyplot as plt
from DataParseApp import dataparseDialog
from sklearn.decomposition import NMF
projectpath=os.path.split(os.path.abspath(__file__))[0]
sys.path.append(os.path.join(projectpath,'ui'))
pythoncodepath=os.path.split(projectpath)[0]
jcapdataprocesspath=os.path.join(pythoncodepath, 'JCAPDataProcess')
sys.path.append(jcapdataprocesspath)
from VisualizeDataApp import visdataDialog
sys.path.append(os.path.join(jcapdataprocesspath,'AuxPrograms'))
from fcns_ui import *
from fcns_io import *
platemapvisprocesspath=os.path.join(pythoncodepath, 'JCAPPlatemapVisualize')
sys.path.append(platemapvisprocesspath)
from plate_image_align_Dialog import plateimagealignDialog
import numpy as np
###############UPDATE THIS TO BE THE FOLDER CONTAINING parameters.py
paramsfolder=r'K:\users\hte\Raman\39664\20170607analysis_tippy-top_1mmblobs'
#paramsfolder=r'K:\users\hte\Raman\33444\20170608analysis'
#if not paramsfolder is None:
sys.path.append(paramsfolder)
from parameters import *
#else:
# plateidstr='3344'
#
# pathd={'ramanfile':r'K:\users\hte\Raman\33444\HSS_33444_map-1-_CRR-EM-copy.txt'}
# pathd['mainfolder']=os.path.split(pathd['ramanfile'])[0]
# pathd['savefolder']=os.path.join(pathd['mainfolder'], '20170607analysis')
# pathd['infopck']=pathd['ramanfile'][:-4]+'__info.pck'
# pathd['allspectra']=os.path.join(pathd['savefolder'],'allspectra.npy')
# pathd['nmfdata']=os.path.join(pathd['savefolder'],'nmf4.pck')
# pathd['edges']=os.path.join(pathd['savefolder'],'edges.png')
# pathd['mapfill']=os.path.join(pathd['savefolder'],'blobmap.png')
# pathd['blobd']=os.path.join(pathd['savefolder'],'blobd.pck')
# pathd['alignedsamples']=os.path.join(pathd['savefolder'],'alignedsamples.png')
# pathd['alignedsamplestxt']=os.path.join(pathd['savefolder'],'alignedsamples.txt')
# pathd['spectrafolder']=os.path.join(pathd['savefolder'],'sample_spectra')
# pathd['map']=os.path.join(pathd['spectrafolder'],'raman_sample_index_map.map')
# pathd['samplepixels']=os.path.join(pathd['spectrafolder'],'samplepixels.png')
# pathd['udibasepath']=os.path.join(pathd['savefolder'],'ave_rmn_')
#
# udi_ternary_projection_inds=[0, 1, 2]#only used for the all.udi file
#
# sample_list=[1850,1851,1852,1853,1854,1855,1905,1906,1907,1908,1909,1910,1911,1912,1913,1914,1915,1916,1917,1918,1919,1969,1970,1971,1972,1973,1974,1975,1976,1977,1978,1979,1980,1981,1982,1983,2033,2034,2035,2036,2037,2038,2039,2040,2041,2042,2043,2044,2045,2046,2047,2097,2098,2099,2100,2101,2102,2103,2104,2105,2106,2107,2108,2109,2110,2111]
# dx_smp=1.
# dy_smp=1.
#
# default_sample_blob_dict=dict({}, \
# smp_is_square=0, smp_width=1., bcknd_is_square=0, bcknd_min_widt
|
h=1.3, bcknd_max_width=1.4, removedups=1\
# )
#
# show_help_messages=True
platemappath=getplatemappath_plateid(plateidstr)
if not os.path.isdir(pathd['mainfolder']):
print 'NOT A VALID FOLDER'
if not os.path.isdir(pathd['savefolder']):
os.mkd
|
ir(pathd['savefolder'])
if not os.path.isdir(pathd['spectrafolder']):
os.mkdir(pathd['spectrafolder'])
class MainMenu(QMainWindow):
def __init__(self, previousmm, execute=True, **kwargs):
super(MainMenu, self).__init__(None)
self.parseui=dataparseDialog(self, title='Visualize ANA, EXP, RUN data', **kwargs)
self.alignui=plateimagealignDialog(self, manual_image_init_bool=False)
if execute:
self.parseui.exec_()
def doNMF(datan,n_components=4):
# from Mitsu
#alternatively PCA ... might me faster
nmf=NMF(n_components=n_components,init='nndsvd')
data_decomp_all=nmf.fit_transform(datan)
data_components_all=nmf.components_
return data_decomp_all,data_components_all
def rgb_comp(arr2d, affine=True):
cmy_cmyk=lambda a:a[:3]*(1.-a[3])+a[3]
rgb_cmy=lambda a:1.-a
rgb_cmyk=lambda a:rgb_cmy(cmy_cmyk(a))
return numpy.array([rgb_cmyk(a) for a in arr2d])
def imGen(data_decomp_all,ramaninfod,cmykindeces=[3, 2, 1, 0]):
cmykvals=copy.copy(data_decomp_all[:, cmykindeces])
cmykvals/=cmykvals.max(axis=0)[numpy.newaxis, :]
img=numpy.reshape(rgb_comp(cmykvals), (ramaninfod['xshape'], ramaninfod['yshape'], 3))
return img
def findEdges(img_gray, sigma = 0.33):
#this uses automatic thresholding from one of the cv2 tutorials
v = np.median(img_gray[img_gray>0])
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edges = cv2.Canny(np.uint8(img_gray),lower,upper)
return edges
def findContours(edges):
#the contours are now found by searching the most external convex hull
#this way mos of the not fully closed samples are detected as well
im2, contours, hierarchy = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_KCOS)
iWithContour = cv2.drawContours(edges, contours, -1, (255,20,100), 5)
mapimage = np.zeros_like(edges)
#this fills the contours
for i in range(len(contours)):
cv2.drawContours(mapimage, contours, i, color=255, thickness=-1)
#this is to calculate the center of each contour
x=[]
y=[]
for c in contours:
# compute the center of the contour
M = cv2.moments(c)
try:
x.append(M['m10']/(M['m00']))
y.append(M['m01']/(M['m00']))
except:
#this was nessesary as the divisor is sometimes 0
#yield good results but should be done with caution
x.append(M['m10']/(M['m00']+1e-23))
y.append(M['m01']/(M['m00']+1e-23))
return iWithContour, mapimage, contours, x, y
mainapp=QApplication(sys.argv)
form=MainMenu(None, execute=False)
#form.show()
#form.setFocus()
#mainapp.exec_()
parseui=form.parseui
alignui=form.alignui
parseui.rawpathLineEdit.setText(pathd['ramanfile'])
parseui.infopathLineEdit.setText(pathd['infopck'])
parseui.getinfo(ramaninfop=pathd['infopck'], ramanfp=pathd['ramanfile'])#opens or creates
if os.path.isfile(pathd['allspectra']):
with open(pathd['allspectra'], mode='rb') as f:
fullramandataarray=numpy.load(f)
elif 1:
fullramandataarray=parseui.readfullramanarray(pathd['ramanfile'])#opens or creates
with open(pathd['allspectra'], mode='wb') as f:
numpy.save(f, fullramandataarray)
ramaninfod=parseui.ramaninfod
#parseui.exec_()
#ramaninfod['number of spectra']
#ramaninfod['xdata']
#ramaninfod['ydata']
#ramaninfod['Wavenumbers_str']
#ramaninfod['Spectrum 0 index']
ramaninfod['xdata']/=1000.
ramaninfod['ydata']/=1000.#convert to mm
ramaninfod['xshape']= len(np.unique(ramaninfod['xdata']))
ramaninfod['yshape']= len(np.unique(ramaninfod['ydata']))
ramaninfod['dx']= (ramaninfod['xdata'].max()-ramaninfod['xdata'].min())/(ramaninfod['xshape']-1)
ramaninfod['dy']= (ramaninfod['ydata'].max()-ramaninfod['ydata'].min())/(ramaninfod['yshape']-1)
nx=dx_smp/ramaninfod['dx']
ny=dy_smp/ramaninfod['dy']
ntot=nx*ny
ramanreshape=lambda arr: np.reshape(arr, (ramaninfod['xshape'], ramaninfod['yshape'])).T[::-1, ::-1]
ramannewshape=(ramaninfod['yshape'], ramaninfod['xshape'])
image_of_x=ramanreshape(ramaninfod['xdata'])
image_of_y=ramanreshape(ramaninfod['ydata'])
#extent=[ramaninfod['xdata'].max(), ramaninfod['xdata'].min(), ramaninfod['ydata'].min(), ramaninfod['ydata'].max()]
#extent=[ramaninfod['xdata'].max(), ramaninfod['xdata'].min(), ramaninfod['ydata'].max(), ramaninfod['ydata'].min()]
extent=[image_of_x[0, 0], image_of_x[-1, -1], image_of_y[0, 0], image_of_y[-1, -1]]
def ramanimshow(im, **kwargs):
plt.imshow(im, origin='lower', interpolation='none', aspect=1, extent=extent, **kwargs)
if os.path.isfile(pathd['nmfdata']):
with open(pathd['nmfdata'], mode='rb') as f:
tempd=pickle.load(f)
data_decomp_all,data_components_all,rgbimagedata=[tempd[k] for k in 'data_decomp_all,data_components_all,rgbimagedata'.split(',')]
else:
data_decomp_all,data_components_all = doNMF(fullramandataarray,4)
#rgbimagedata=imGen(data_decomp_all,ramaninfod)
rgbimagedata
|
liyu1990/tensorflow
|
tensorflow/python/ops/control_flow_ops_test.py
|
Python
|
apache-2.0
| 3,164 | 0.006953 |
"""Tests for control_flow_ops.py."""
import tensorflow.python.platform
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import ops
from tensorflow.python.framework.test_util import TensorFlowTestCase
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import standard_ops as tf
from tensorflow.python.platform import googletest
class GroupTestCase(TensorFlowTestCase):
def _StripNode(self, nd):
snode = graph_pb2.NodeDef(name=nd.name, op=nd.op, input=nd.input)
if nd.device:
snode.device = nd.device
return snode
def _StripGraph(self, gd):
"""Copy gd keeping only, node.name, node.op, node.input, and node.device."""
return graph_pb2.GraphDef(node=[self._StripNode(nd) for nd in gd.node])
def testGroup_NoDevices(self):
with ops.Graph().as_default() as g:
a = tf.constant(0, name="a")
b = tf.constant(0, name="b")
c = tf.constant(0, name="c")
tf.group(a.op, b.op, c.op, name="root")
gd = g.as_graph_def()
self.assertProtoEquals("""
node { name: "a" op: "Const"}
node { name: "b" op: "Const"}
node { name: "c" op: "Const"}
node { name: "root" op: "NoOp" input: "^a" input: "^b" input: "^c" }
""", self._StripGraph(gd))
def testGroup_OneDevice(self):
with ops.Graph().as_default() as g:
with g.device("/task:0"):
a = tf.constant(0, name="a")
b = tf.constant(0, name="b")
tf.group(a.op, b.op, name="root")
gd = g.as_graph_def()
self.assertProtoEquals("""
node { name: "a" op: "Const" device: "/task:0" }
node { name: "b" op: "Const" device: "/task:0" }
node { name: "root" op: "NoOp" input: "^a" input: "^b" device: "/task:0" }
""", self._StripGraph(gd))
def testGroup_MultiDevice(self):
with ops.Graph().as_default() as g:
with g.device("/task:0")
|
:
|
a = tf.constant(0, name="a")
b = tf.constant(0, name="b")
with g.device("/task:1"):
c = tf.constant(0, name="c")
d = tf.constant(0, name="d")
with g.device("/task:2"):
tf.group(a.op, b.op, c.op, d.op, name="root")
gd = g.as_graph_def()
self.assertProtoEquals("""
node { name: "a" op: "Const" device: "/task:0"}
node { name: "b" op: "Const" device: "/task:0"}
node { name: "c" op: "Const" device: "/task:1"}
node { name: "d" op: "Const" device: "/task:1"}
node { name: "root/NoOp" op: "NoOp" input: "^a" input: "^b"
device: "/task:0" }
node { name: "root/NoOp_1" op: "NoOp" input: "^c" input: "^d"
device: "/task:1" }
node { name: "root" op: "NoOp" input: "^root/NoOp" input: "^root/NoOp_1"
device: "/task:2" }
""", self._StripGraph(gd))
class ShapeTestCase(TensorFlowTestCase):
def testShape(self):
with ops.Graph().as_default():
tensor = tf.constant([1.0, 2.0])
self.assertEquals([2], tensor.get_shape())
self.assertEquals([2],
control_flow_ops.with_dependencies(
[tf.constant(1.0)], tensor).get_shape())
if __name__ == "__main__":
googletest.main()
|
nonZero/demos-python
|
src/examples/short/object_oriented/static_method_1.py
|
Python
|
gpl-3.0
| 881 | 0.001135 |
#!/usr/bin/pyth
|
on3
'''
This is a * sort * of static method but is ugly since the
function is really global and not in the class.
'''
class Book:
num = 0
def __init__(self, price):
self.__price = price
Book.num += 1
def printit(self):
print('price is', self.__price)
def setPrice(self, newprice):
self.__price = newprice
def getNumBooks():
return Book.num
# lets create some books...
b1 = Book(
|
14)
b2 = Book(13)
# lets access the static member and the static methods...
print('Book.num (direct access) is ', Book.num)
print('getNumBooks() is ', getNumBooks())
try:
print(b1.getNumBooks())
except AttributeError as e:
print('no,cannot access the static method via the instance')
# access the static member through an instance...
print(b1.num)
print(b2.num)
b3 = Book(12)
print(b1.num)
print(b2.num)
print(b3.num)
|
shacknetisp/vepybot
|
plugins/protocols/irc/auth/nickserv.py
|
Python
|
mit
| 4,789 | 0.000418 |
# -*- coding: utf-8 -*-
import bot
import time
"""
load irc/auth/nickserv
nickserv set password hunter2
config set modules.nickserv.enabled True
config set modules.nickserv.ghost True
nickserv register email@do.main
nickserv verify register myaccount c0d3numb3r
nickserv identify
"""
class M_NickServ(bot.Module):
index = "nickserv"
def register(self):
self.addhook("recv", "recv", self.recv)
self.addhook("login", "login", self.login)
self.addhook("nickinuse", "433", self.nickinuse)
self.addsetting("=name", "")
self.addsetting("=password", "")
self.addsetting("enabled", False)
self.addsetting("auto", True)
self.addsetting("ghost", True)
self.lastns = ""
self.lastnstime = time.time()
self.ghosting = True
self.addcommand(self.register_c, "register",
"Register with NickServ.",
["[-name=account name]", "email"])
self.addcommand(self.verify_c, "verify register",
"Verify with NickServ.", ["account", "code"])
|
self.addcommand(self.identify_c, "identify",
"Identify with NickServ.", [])
self.addcommand(self.setp, "set password",
"Set the NickServ password.", ["password"])
self.addcommand(self.setn, "set name",
"Set the NickServ name.", ["[name]"])
def setn(self, context, args):
args.default("name", "")
self.setsetting("name", args.getstr("name"))
return "Set name to: %s" % self.getsetti
|
ng('name')
def setp(self, context, args):
args.default("password", "")
self.setsetting("password", args.getstr("password"))
return "Set password to: %s" % self.getsetting('password')
def name(self):
return self.getsetting("name") or self.server.settings.get(
'server.user.nick')
def recv(self, context):
if context.user[0]:
if context.code('notice') and context.user[0].lower() == 'nickserv':
if context.reciever == self.server.nick:
if self.lastns and time.time() - self.lastnstime < 30:
self.server.sendto("NOTICE", self.lastns,
"NickServ -- %s" % (
context.text,
))
if self.ghosting:
self.server.setnick(self.server.wantnick)
self.ghosting = False
def nickinuse(self, r):
if (self.getsetting("enabled") and
self.getsetting("password") and self.getsetting("ghost")):
self.server.setnick(self.server.nick + "_")
self.server.sendto("PRIVMSG", "nickserv", "GHOST %s %s" % (
self.server.wantnick,
self.getsetting("password"),
))
self.ghosting = True
r.append(True)
def identify(self):
self.server.log("AUTH", "Identifying with NickServ.")
self.server.sendto("PRIVMSG", "nickserv", "IDENTIFY %s %s" % (
self.name(),
self.getsetting("password"),
))
def identify_c(self, context, args):
context.exceptrights(["admin"])
if not self.getsetting("enabled"):
return "NickServ is disabled."
if not self.getsetting("password"):
return "There is no password set."
self.identify()
self.lastns = context.user[0]
self.lastnstime = time.time()
def register_c(self, context, args):
if not self.getsetting("enabled"):
return "NickServ is disabled."
if not self.getsetting("password"):
return "There is no password set."
self.server.sendto("PRIVMSG", "nickserv", "REGISTER %s %s %s" % (
self.name() if args.getbool('name') else '',
self.getsetting("password"),
args.getstr('email'),
))
self.lastns = context.user[0]
self.lastnstime = time.time()
def verify_c(self, context, args):
if not self.getsetting("enabled"):
return "NickServ is disabled."
if not self.getsetting("password"):
return "There is no password set."
self.server.sendto("PRIVMSG", "nickserv", "VERIFY REGISTER %s %s" % (
args.getstr('account'),
args.getstr('code'),
))
self.lastns = context.user[0]
self.lastnstime = time.time()
def login(self):
if self.getsetting("enabled") and self.getsetting("password"):
if self.getsetting("auto"):
self.identify()
bot.register.module(M_NickServ)
|
zhuyue1314/MITMf
|
plugins/BeefAutorun.py
|
Python
|
gpl-3.0
| 3,993 | 0.022039 |
#!/usr/bin/env python2.7
# Copyright (c) 2014-2016 Marcello Salvati
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
import logging
import sys
import json
import threading
from time import sleep
from core.beefapi import BeefAPI
from core.utils import SystemConfig, shutdown
from plugins.plugin import Plugin
from plugins.Inject import Inject
mitmf_logger = logging.getLogger("mitmf")
class BeefAutorun(Inject, Plugin):
name = "BeEFAutorun"
optname = "beefauto"
desc = "Injects BeEF hooks & autoruns modules based on Browser and/or OS type"
version = "0.3"
has_opts = False
def initialize(self, options):
self.options = options
self.ip_address = SystemConfig.getIP(options.interface)
Inject.initialize(self, options)
self.tree_info.append("Mode: {}".format(self.config['BeEFAutorun']['mode']))
beefconfig = self.config['MITMf']['BeEF']
self.html_payload = '<script type="text/javascript" src="http://{}:{}/hook.js"></script>'.format(self.ip_address, beefconfig['beefport'])
self.beef = BeefAPI({"host": beefconfig['beefip'], "port": beefconfig['beefport']})
if not self.beef.login(beefconfig['user'], beefconfig['pass']):
shutdown("[BeEFAutorun] Error logging in to BeEF!")
def startThread(self):
self.autorun()
def onConfigChange(self):
self.initialize(self.options)
def autorun(self):
already_ran = []
already_hooked = []
while True:
mode = self.config['BeEFAutorun']['mode']
for hook in self.beef.hooked_browsers.online:
if hook.session not in already_hooked:
mitmf_logger.info("{} [BeEFAutorun] Joined the horde! [id:{}, type:{}-{}, os:{}]".format(hook.ip, hook.id, hook.name, hook.version, hook.os))
already_hooked.append(hook.session)
self.black_i
|
ps.append(hook.ip)
if mode == 'oneshot':
if hook.session not in already_ran:
self.execModules(hook)
already_ran.append(hook.session)
elif mode == 'loop':
self.execModules(hook)
sleep(10)
sleep(1)
def execModules(self, hook):
all_modules
|
= self.config['BeEFAutorun']["ALL"]
targeted_modules = self.config['BeEFAutorun']["targets"]
if all_modules:
mitmf_logger.info("{} [BeEFAutorun] Sending generic modules".format(hook.ip))
for module, options in all_modules.iteritems():
for m in self.beef.modules.findbyname(module):
resp = m.run(hook.session, json.loads(options))
if resp["success"] == 'true':
mitmf_logger.info('{} [BeEFAutorun] Sent module {}'.format(hook.ip, m.id))
else:
mitmf_logger.info('{} [BeEFAutorun] Error sending module {}'.format(hook.ip, m.id))
sleep(0.5)
if (hook.name and hook.os):
for os in targeted_modules:
if (os == hook.os) or (os in hook.os):
mitmf_logger.info("{} [BeEFAutorun] Sending targeted modules".format(hook.ip))
for browser in targeted_modules[os]:
if browser == hook.name:
for module, options in targeted_modules[os][browser].iteritems():
for m in self.beef.modules.findbyname(module):
resp = m.run(hook.session, json.loads(options))
if resp["success"] == 'true':
mitmf_logger.info('{} [BeEFAutorun] Sent module {}'.format(hook.ip, m.id))
else:
mitmf_logger.info('{} [BeEFAutorun] Error sending module {}'.format(hook.ip, m.id))
sleep(0.5)
|
instantshare/instantshare
|
src/tools/shorturl.py
|
Python
|
gpl-2.0
| 726 | 0.001377 |
import logging
from abc import abstractmethod, ABCMeta
from urllib import request
class UrlShortener(metaclass=ABCMeta):
@abstractmethod
def shorten(self, url: str) -> str:
pass
def log(self, url):
logging.info("Short URL: {}".for
|
mat(url))
class Off(UrlShortener):
def shorten(self, url: str):
return url
class TinyURL(UrlShortener):
def shorten(self, url: str) -> str:
response = request.urlopen("http://tinyurl.com/api-create.php?url={}".format(url))
url = str(response.read(), encoding="ascii")
self.log(url)
return url
def get_url_shortener(name: str) -> UrlShortener:
if name == "tinyurl":
|
return TinyURL()
return Off()
|
Cadasta/cadasta-platform
|
cadasta/config/settings/default.py
|
Python
|
agpl-3.0
| 17,780 | 0 |
"""
Django settings for cadasta project.
Generated by 'django-admin startproject' using Django 1.8.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.utils.translation import ugettext_lazy as _
from .languages import FORM_LANGS # noqa
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@=fy$)xx+6yjo*us@&+m6$14@l-s6#atg(msm=9%)9@%b7l%h('
ALLOWED_HOSTS = ['*']
AUTH_USER_MODEL = 'accounts.User'
SITE_ID = 1
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.gis',
'django.contrib.humanize',
'corsheaders',
'core',
'geography',
'accounts',
'organization',
'spatial',
'questionnaires',
'resources',
'buckets',
'party',
'xforms',
'search',
'tasks',
'django_filters',
'crispy_forms',
'parsley',
'widget_tweaks',
'django_countries',
'leaflet',
'rest_framework',
'rest_framework_gis',
'rest_framework.authtoken',
'rest_framework_docs',
'djoser',
'tutelary',
'allauth',
'allauth.account',
'allauth.socialaccount',
'sass_processor',
'simple_history',
'jsonattrs',
'compressor',
'django_otp',
)
MIDDLEWARE_CLASSES = (
'corsheaders.middleware.CorsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'audit_log.middleware.UserLoggingMiddleware',
'simple_history.middleware.HistoryRequestMiddleware',
'accounts.middleware.UserLanguageMiddleware',
'django_otp.middleware.OTPMiddleware',
)
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework_tmp_scoped_token.TokenAuth',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_VERSIONING_CLASS':
'rest_framework.versioning.NamespaceVersioning',
'DEFAULT_VERSION': 'v1',
'EXCEPTION_HANDLER': 'core.views.api.exception_handler',
'DEFAULT_PAGINATION_CLASS':
'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 100,
'HTML_SELECT_CUTOFF': 100,
}
SITE_NAME = 'Cadasta'
BASE_TEMPLATE_DIR = os.path.join(os.path.dirname(BASE_DIR), 'templates')
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_TEMPLATE_DIR,
os.path.join(BASE_TEMPLATE_DIR, 'allauth')],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader'
],
},
},
]
AUTHENTICATION_BACKENDS = [
'core.backends.Auth',
'django.contrib.auth.backends.ModelBackend',
'accounts.backends.AuthenticationBackend',
'accounts.backends.PhoneAuthenticationBackend'
]
ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
DJOSER = {
'SITE_NAME': SITE_NAME,
'SET_PASSWORD_RETYPE': True,
'PASSWORD_RESET_CONFIRM_RETYPE': True,
'PASSWORD_RESET_CONFIRM_URL':
'account/password/reset/confirm/{uid}/{token}',
'ACTIVATION_URL': 'account/activate/{uid}/{token}',
# 'SEND_ACTIVATION_EMAIL': True,
'SERIALIZERS': {
'set_password_retype': 'accounts.serializers.ChangePasswordSerializer'
}
}
CORS_ORIGIN_ALLOW_ALL = False
LOGIN_REDIRECT_URL = '/dashboard/'
LOGIN_URL = '/account/login/'
LOGOUT_URL = '/account/logout/'
WSGI_APPLICATION = 'config.wsgi.application'
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL = LOGIN_URL
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 2
ACCOUNT_FORMS = {
'signup': 'accounts.forms.RegisterForm',
'profile': 'accounts.forms.ProfileForm',
}
ACCOUNT_ADAPTER = 'accounts.adapter.DefaultAccountAdapter'
ACCOUNT_LOGOUT_ON_GET = True
ACCOUNT_LOGOUT_REDIRECT_URL = LOGIN_URL
ACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 86400
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': ('django.contrib.auth.'
'password_validation.UserAttributeSimilarityValidator'),
},
{
'NAME':
'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 10,
}
},
{
'NAME':
'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.NumericPasswordValidator',
},
{
'NAME':
'accounts.validators.CharacterTypePasswordValidator'
|
},
{
'NAME':
'accounts.validators.EmailSimilarityValidator'
},
]
OSM_ATTRIBUTION = _(
"Base map data © <a href=\"http://openstreetmap.org\">"
"OpenStreetMap</a> contributors under "
"<a href=\"http://opendatacommons.org/licenses/odbl/\">ODbL</a>"
)
DIGITALGLOBE_ATTRIBUTION = _("Imagery © DigitalGlobe")
DIGITALGLOBE_TILESET_URL_FORMAT = (
'https://{{s
|
}}.tiles.mapbox.com/v4/digitalglobe.{}'
'/{{z}}/{{x}}/{{y}}.png?access_token='
'pk.eyJ1IjoiZGlnaXRhbGdsb2JlIiwiYSI6ImNpaHhtenBmZjAzYW1'
'1a2tvY2p3MnpjcGcifQ.vF1gH0mGgK31yeHC1k1Tqw'
)
LEAFLET_CONFIG = {
'TILES': [
(
_("OpenStreetMap"),
'https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png',
{'attribution': OSM_ATTRIBUTION,
'maxZoom': 19}
),
(
_("+Vivid imagery"),
DIGITALGLOBE_TILESET_URL_FORMAT.format('n6ngnadl'),
{'attribution': DIGITALGLOBE_ATTRIBUTION,
'maxZoom': 22}
),
(
_("Recent imagery"),
DIGITALGLOBE_TILESET_URL_FORMAT.format('nal0g75k'),
{'attribution': DIGITALGLOBE_ATTRIBUTION,
'maxZoom': 22}
),
(
_("+Vivid imagery with OpenStreetMap"),
DIGITALGLOBE_TILESET_URL_FORMAT.format('n6nhclo2'),
{'attribution': (OSM_ATTRIBUTION, DIGITALGLOBE_ATTRIBUTION),
'maxZoom': 22}
),
(
_("Recent imagery with OpenStreetMap"),
DIGITALGLOBE_TILESET_URL_FORMAT.format('nal0mpda'),
{'attribution': (OSM_ATTRIBUTION, DIGITALGLOBE_ATTRIBUTION),
'maxZoom': 22}
),
],
'RESET_VIEW': False,
'PLUGINS': {
'draw': {
'js': '/static/leaflet/draw/leaflet.draw.js'
},
'groupedlayercontrol': {
'js': '/static/js/leaflet.groupedlayercontrol.min.js',
'css': '/static/css/leaflet.groupedlayercontrol.min.css'
}
}
}
# Invalid names for Cadasta organizations, projects, and usernames
CADASTA_INVALID_ENTITY_NAMES = ['add', 'new']
# Internationalizatio
|
sirmar/tetris
|
tetris/visibles/component.py
|
Python
|
mit
| 1,102 | 0.002722 |
"""
Base class for all nodes in the scene graph. It is implemented
using th
|
e composite pattern.
Responsibilities:
- Hold the relative position to its parent.
- Blit itself on the parent.
- Dirty flag itself to trigger regeneration of surface.
"""
class Component(object):
def __init__(self):
self._position = (0, 0)
self._dirty = True
self._surface = None
def draw(self, parent):
self._recreate_surface()
if self._surface and parent:
parent.blit(self._surface, self._position)
|
def set_position(self, position):
self._position = position
def surface(self):
return None
def dirty(self):
self._dirty = True
def _recreate_surface(self):
if self._dirty:
self._surface = self.surface()
self._dirty = False
"""
Decorator to mark component methods that change the look
of the surface and therefor need to trigger regeneration.
"""
def recreate_surface(function):
def wrapper(self, *args):
self.dirty()
return function(self, *args)
return wrapper
|
gditzler/bio-course-materials
|
blast/get_seq.py
|
Python
|
gpl-3.0
| 1,093 | 0.014639 |
from Bio import Entrez
from Bio import SeqIO
from Bio import Seq
from Bio.Alphabet import IUPAC
genomes = ["Escherichia coli str. K-12 substr. MC4100 complete genome","Escherichia coli Nissle 1917, complete genome","Escherichia coli LY180, complete genome"]
genomes_short = ["K12","Nissle","LY180"]
for n,genome in enumerate(genomes):
Entrez.email = "fake@drexel.edu"
handle = Entrez.esearch(db="nucleotide", term=genome)
records = Entrez.read(handle)
handle.close()
handle = Entrez.efetch(db="nucleotide", id=records['IdList'][0], rettype="gb",
|
retmode="text")
record = SeqIO.read(handle, "genbank")
handle.close()
mygenes = ["thrA","mogA","dnaK","nhaA","ksgA"]
output_handle=open("seq"+str(n+1)+".fna","w")
for feature in record.features:
if feature.type=='CDS':
if 'gene' in feature.qualifiers:
if feature.qualifiers['gene'][0] in mygenes:
output_handle.write(">%s_%s\n%s\n" % (feature.qualifiers['gene'][0], genomes_short[n], str(feature.extract(record.seq))))
ou
|
tput_handle.close()
|
benogle/pylons_common
|
pylons_common/lib/date.py
|
Python
|
mit
| 3,799 | 0.010266 |
from pylons_common.lib.log import create_logger
from pylons_common.lib.utils import pluralize
logger = create_logger('pylons_common.lib.datetime')
from datetime import datetime, timedelta
DATE_FORMAT_ACCEPT = [u'%Y-%m-%d %H:%M:%S', u'%Y-%m-%d %H:%M:%SZ', u'%Y-%m-%d', u'%m-%d-%Y', u'%m/%d/%Y', u'%m.%d.%Y', u'%b %d, %Y']
popular_timezones = [u'US/Eastern', u'US/Central', u'US/Mountain', u'US/Pacific', u'US/Alaska', u'US/Hawaii', u'US/Samoa',
u'Europe/London', u'Europe/Paris', u'Europe/Istanbul', u'Europe/Moscow',
u'America/Puerto_Rico', u'America/Buenos_Aires', u'America/Sao_Paulo',
u'Asia/Dubai', u'Asia/Calcutta', u'Asia/Rangoon', u'Asia/Bangkok', u'Asia/Hong_Kong', u'Asia/Tokyo',
u'Australia/Brisbane', u'Australia/Sydney',
u'Pacific/Fiji']
def convert_date(value):
"""
converts a string into a datetime object
"""
if not value:
return None
if isinstance(value, datetime):
return value
def try_parse(val, format):
try:
dt = datetime.strptime(val, format)
except ValueError:
dt = None
return dt
converted_value = None
for format in DATE_FORMAT_ACCEPT:
converted_value = converted_value or try_parse(value, format)
if not converted_value:
raise ValueError('Cannot convert supposed date %s' % value)
return converted_value
def get_timezones():
import pytz
timezones = {0:u'UTC'}
for tzname in pytz.common_timezones:
tzname = tzname.decode('utf-8')
tz = pytz.timezone(tzname)
dt = datetime.utcnow()
# in theory, this is more elegant, but tz.dst (timezone daylight savings - 0 if off 1 if on) is returning 0 for everything
#offset =
|
tz.utcoffset(dt) - tz.dst(dt)
# we do this try/except to avoid the possibility that pytz fails at localization
# see https://bugs.
|
launchpad.net/pytz/+bug/207500
try:
offset = dt.replace(tzinfo=pytz.utc) - tz.localize(dt)
seconds = offset.days * 86400 + offset.seconds
minutes = seconds / 60
hours = minutes / 60
# adjust for offsets that are greater than 12 hours (these are repeats of other offsets)
if hours > 12:
hours = hours - 24
elif hours < -11:
hours = hours + 24
this_tz = timezones.get(hours, None)
if not this_tz:
timezones[hours] = tzname
elif tzname in popular_timezones:
# overwrite timezones with popular ones if equivalent
timezones[hours] = tzname
except:
logger.exception("Localization failure for timezone " + tzname)
return timezones
def relative_date_str(date, now=None, time=False):
'''
Will return a string like 'Today', 'Tomorrow' etc.
'''
if not now: now = datetime.utcnow()
if not date: return 'unknown'
diff = date.date() - now.date()
def day_time(day_str):
return '%s%s' % (day_str, time and ' at %s' % date.strftime("%I:%M %p") or '')
if diff.days == 0:
return day_time('Today')
elif diff.days == -1:
return day_time('Yesterday')
elif diff.days == 1:
return day_time('Tomorrow')
elif diff.days < 0 and diff.days >= -7:#Within one week back
return '%s ago' % pluralize(-diff.days, '{0} days', '1 day')
elif diff.days > 0 and diff.days < 7:#Within one week back
return 'in %s' % pluralize(diff.days, '{0} days', '1 day')
else:
return date.strftime("%b %e, %Y")## on 10/03/1980
def now():
return datetime.utcnow()
|
m4773rcl0ud/launchpaddings
|
launchpad_utils.py
|
Python
|
gpl-3.0
| 4,442 | 0.00045 |
#
# This file contains functions and constants to talk
# to and from a Novation Launchpad via MIDI.
#
# Created by paul for mididings.
from mididings import *
# MEASURES - constants useful for the Pad
side = list(range(0, 8))
longside = list(range(0, 9))
step = 16 # vertical gap on pad
FirstCtrl = 104 # ctrl of first toprow key
# COLORS
# Colors on the Launchpad are determined by event velocity/value.
# Each key can be lit with red or green light (or both),
# with brightness 0 (off) - 3 (max).
# For convenience, define also the constants:
black = 4 # better not to use zero
red = 3
orange = 19
green = 48
yellow = 51 # better not to use 127
# If you want a darker variant of the above, use fractions (thirds).
# For example, green*2/3 is darker green. (Not for orange!)
def color(g, r):
"This gives the Launchpad color given the amount of green and red."
if g + r == 0:
return black # not zero
else:
return (16 * g) + r
# KEYS
# Each key on the Launchpad is activated by a MIDI event.
# The square keys and the right keys are notes,
# the top keys are control events.
# Rows and columns given the keys (starting from 0)
def
|
row(x):
"This tells the row of the event (square or right)"
return x // step
def column(x):
"This tells us the column of event (right = 8)"
return x % step
def topcol(x):
"The same as colums, but for the top row"
return x - FirstCtrl
# Now the inverses: functions that point exactly to a key on the Launchpad
def ri
|
ght(row):
"This gives the note of a right key at position row"
return (row * step) + 8
def square(row, col):
"This gives the note of a square key at position row,col"
return (row * step) + col
def top(col):
"This gives the ctrl of a top key at position col"
return col + FirstCtrl
# KEY FILTERS
# First filters for notes from square, top, and right keys.
OnlySquare = Filter(NOTE) >> KeyFilter(notes=[square(i, j)
for i in side for j in side])
OnlyRight = KeyFilter(notes=[right(i) for i in side])
OnlyTop = Filter(CTRL) >> CtrlFilter(FirstCtrl + i for i in side)
# Now filters for rows, colums, and single keys.
def RowSqFilter(row):
"This selects only notes from specified row"
return KeyFilter(row * step, right(row)) # no right
def RowFilter(row):
"This selects only notes from specified row"
return KeyFilter(row * step, right(row) + 1) # also right
def ColumnFilter(col):
"This selects only notes from specified column"
return KeyFilter(notes=[square(i, col) for i in side])
def TopFilter(col):
"This selects only specified key from top row"
return CtrlFilter(top(col))
def RightFilter(row):
"This selects only specified key from right"
return KeyFilter(right(row))
def SquareFilter(row, col):
"This selects only specified key from square"
return KeyFilter(square(row, col))
# KEY GENERATORS
def SquareKey(row, col):
"This creates square note with given row and column"
return Key(square(row, col))
def RightKey(row):
"This creates right note with given row"
return Key(right(row))
def TopKey(col, val):
"This creates top ctrl with given column"
return Ctrl(top(col), val)
# NOTES
A = 21
B = 23
C = 24
D = 26
E = 28
F = 29
G = 31
Octave = 12 # semitones
minors = { # scale
0: 0, # interval in semitones
1: 2,
2: 3,
3: 5,
4: 7,
5: 8,
6: 10,
7: 12,
}
minharms = { # scale
0: 0, # interval in semitones
1: 2,
2: 3,
3: 5,
4: 7,
5: 8,
6: 10,
7: 11, # harmonic
}
majors = {
0: 0,
1: 2,
2: 4,
3: 5,
4: 7,
5: 9,
6: 11,
7: 12,
}
dorics = {
0: 0,
1: 2,
2: 3,
3: 5,
4: 7,
5: 9,
6: 10,
7: 12,
}
phrygians = {
0: 0,
1: 1,
2: 3,
3: 5,
4: 7,
5: 8,
6: 10,
7: 12,
}
# I only use these scales - feel free to add your own!
# Now the same thing, but to feed into Transpose:
Minor = [minors[i] - i for i in side]
MinHarm = [minharms[i] - i for i in side]
Major = [majors[i] - i for i in side]
Doric = [dorics[i] - i for i in side]
Phrygian = [phrygians[i] - i for i in side]
# How to use it in practice:
def OctFilter(col, tonic):
return KeyFilter(notes=[(tonic + col + (i * Octave)) for i in longside])
def MakeScale(tonic, scale):
return [OctFilter(i, tonic) >> Transpose(scale[i]) for i in side]
|
mfrey/RIOT
|
tests/xtimer_usleep/tests/01-run.py
|
Python
|
lgpl-2.1
| 2,231 | 0.001345 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
# Copyright (C) 2017 Francisco Acosta <francisco.acosta@inria.fr>
# 2017 Freie Universität Berlin
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import sys
import time
from testrunner import run
US_PER_SEC = 1000000
INTERNAL_JITTER = 0.05
EXTERNAL_JITTER = 0.15
class InvalidTimeout(Exception):
pass
def testfunc(child):
child.expect(u"Running test (\\d+) times with (\\d+) distinct sleep times")
RUNS = int(child.match.group(1))
SLEEP_TIMES_NUMOF = int(child.match.group(2))
try:
child.expect_exact(u"Please hit any key and then ENTER to continue")
child.sendline(u"a")
start_test = time.time()
for m in range(RUNS):
for n in range(SLEEP_TIMES_NUMOF):
child.expect(u"Slept for (\\d+) us \\(expected: (\\d+) us\\) Offset: (-?\\d+) us")
sleep_time = int(child.match.group(1))
exp = int(child.match.group(2))
upper_bound = exp + (exp * INTERNAL_JITTER)
if not (exp < sleep_time < upper_bound):
delta = (upper_bound-exp)
error = min(upper_bound-sleep_time, sleep_time-exp)
raise InvalidTimeout("Invalid timeout %d, expected %d < timeout < %d"
"\nHost max error\t%d\nerror\t\t%d" %
(sleep_time, exp, upper_bound,
|
delta, error))
testtime = (time.time() - start_test) * US_PER_SEC
child.expect(u"Test ran for (\\d+) us")
exp = int(child.match.group(1))
lower_bound = exp - (exp * EXTERNAL_JITTER)
upper_bound = exp + (exp * EXTERNAL_JITTER)
if not (lower_bound < testtime < upper_bound)
|
:
raise InvalidTimeout("Host timer measured %d us (client measured %d us)" %
(testtime, exp))
except InvalidTimeout as e:
print(e)
sys.exit(1)
if __name__ == "__main__":
sys.exit(run(testfunc))
|
MSPARP/newparp
|
newparp/helpers/tags.py
|
Python
|
agpl-3.0
| 1,613 | 0 |
from flask import g
import re
from sqlalchemy import and_
from sqlalchemy.orm.exc import NoResultFound
from newparp.model import (
CharacterTag,
Tag,
)
special_char_regex = re.compile("[\\ \\./]+")
underscore_strip_regex = re.compile("^_+|_+$")
def name_from_alias(alias):
# 1. Change to lowercase.
# 2. Change spaces to underscores.
# 3. Change . and / to underscores because they screw up the routing.
# 4. Strip extra underscores from the start and end.
return underscore_strip_regex.sub(
"",
special_char_regex.sub("_", alias)
).lower()
def character_tags_from_form(form):
tag_dict = {}
for tag_type in ("fandom", "character", "gender"):
for alias in form[tag_type].split(","):
alias = alias.strip()
if alias == "":
continue
name = name_from_alias(alias)
if name == "":
continue
tag_dict[(
|
tag_type, name)] = alias
character_tags = []
used_ids = set()
for (tag_type, name), alias in tag_dict.items():
try:
tag = g.db.query(Tag).filter(and_(
Tag.type == tag_type, Tag.name == name,
)).one()
except NoResultFound:
tag = Tag(type=tag_type, name=name)
g.db.add(tag)
g.db.flush()
tag_id = (tag.synonym_id or tag.id)
|
# Remember IDs to skip synonyms.
if tag_id in used_ids:
continue
used_ids.add(tag_id)
character_tags.append(CharacterTag(tag_id=tag_id, alias=alias))
return character_tags
|
nesdis/djongo
|
tests/django_tests/tests/v22/tests/custom_managers/tests.py
|
Python
|
agpl-3.0
| 25,648 | 0.001716 |
from django.db import models
from django.test import TestCase
from .models import (
Book, Car, CustomManager, CustomQuerySet, DeconstructibleCustomManager,
FastCarAsBase, FastCarAsDefault, FunPerson, OneToOneRestrictedModel,
Person, PersonFromAbstract, PersonManager, PublishedBookManager,
RelatedModel, RestrictedModel,
)
class CustomManagerTests(TestCase):
custom_manager_names = [
'custom_queryset_default_manager',
'custom_queryset_custom_manager',
]
@classmethod
def setUpTestData(cls):
cls.b1 = Book.published_objects.create(
title="How to program", author="Rodney Dangerfield", is_published=True)
cls.b2 = Book.published_objects.create(
title="How to be smart", author="Albert Einstein", is_published=False)
cls.p1 = Person.objects.create(first_name="Bugs", last_name="Bunny", fun=True)
cls.droopy = Person.objects.create(first_name="Droopy", last_name="Dog", fun=False)
def test_custom_manager_basic(self):
"""
Test a custom Manager method.
"""
self.assertQuerysetEqual(
Person.objects.get_fun_people(), [
"Bugs Bunny"
],
str
)
def test_queryset_copied_to_default(self):
"""
The methods of a custom QuerySet are properly copied onto the
default Manager.
"""
for manager_name in self.custom_manager_names:
with self.subTest(manager_name=manager_name):
manager = getattr(Person, manager_name)
# Public methods are copied
manager.public_method()
# Private methods are not copied
with self.assertRaises(AttributeError):
manager._private_method()
def test_manager_honors_queryset_only(self):
for manager_name in self.custom_manager_names:
with self.subTest(manager_name=manager_name):
manager = getattr(Person, manager_name)
# Methods with queryset_only=False are copied even if they are private.
manager._optin_private_method()
# Methods with queryset_only=True aren't copied even if they are public.
msg = "%r object has no attribute 'optout_public_method'" % manager.__class__.__name__
with self.assertRaisesMessage(AttributeError, msg):
manager.optout_public_method()
def test_manager_use_queryset_methods(self):
"""
Custom manager will use the queryset methods
"""
for manager_name in self.custom_manager_names:
with self.subTest(manager_name=manager_name):
manager = getattr(Person, manager_name)
queryset = manager.filter()
self.assertQuerysetEqual(queryset, ["Bugs Bunny"], str)
self.assertIs(queryset._filter_CustomQuerySet, True)
# Specialized querysets inherit from our custom queryset.
queryset = manager.values_list('first_name', flat=True).filter()
self.assertEqual(list(queryset), ["Bugs"])
self.assertIs(queryset._filter_CustomQuerySet, True)
self.assertIsInstance(queryset.values(), CustomQuerySet)
self.assertIsInstance(queryset.values().values(), CustomQuerySet)
self.assertIsInstance(queryset.values_list().values(), CustomQuerySet)
def test_init_args(self):
"""
The custom manager __init__() argument has been set.
"""
self.assertEqual(Person.custom_queryset_custom_manager.init_arg, 'hello')
def test_manager_attributes(self):
"""
Custom manager method is only available on the manager and not on
querysets.
"""
Person.custom_queryset_custom_manager.manager_only()
msg = "'CustomQuerySet' object has no attribute 'manager_only'"
with self.assertRaisesMessage(AttributeError, msg):
Person.custom_queryset_custom_manager.all().manager_only()
def test_queryset_and_manager(self):
"""
Queryset method doesn't override the custom manager method.
"""
queryset = Person.custom_queryset_custom_manager.filter()
self.assertQuerysetEqual(queryset, ["Bugs Bunny"], str)
self.assertIs(queryset._filter_CustomManager, True)
def test_related_manager(self):
"""
The related managers extend the default manager.
"""
self.assertIsInstance(self.droopy.books, PublishedBookManager)
|
self.a
|
ssertIsInstance(self.b2.authors, PersonManager)
def test_no_objects(self):
"""
The default manager, "objects", doesn't exist, because a custom one
was provided.
"""
msg = "type object 'Book' has no attribute 'objects'"
with self.assertRaisesMessage(AttributeError, msg):
Book.objects
def test_filtering(self):
"""
Custom managers respond to usual filtering methods
"""
self.assertQuerysetEqual(
Book.published_objects.all(), [
"How to program",
],
lambda b: b.title
)
def test_fk_related_manager(self):
Person.objects.create(first_name="Bugs", last_name="Bunny", fun=True, favorite_book=self.b1)
Person.objects.create(first_name="Droopy", last_name="Dog", fun=False, favorite_book=self.b1)
FunPerson.objects.create(first_name="Bugs", last_name="Bunny", fun=True, favorite_book=self.b1)
FunPerson.objects.create(first_name="Droopy", last_name="Dog", fun=False, favorite_book=self.b1)
self.assertQuerysetEqual(
self.b1.favorite_books.order_by('first_name').all(), [
"Bugs",
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.fun_people_favorite_books.all(), [
"Bugs",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.favorite_books(manager='boring_people').all(), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.favorite_books(manager='fun_people').all(), [
"Bugs",
],
lambda c: c.first_name,
ordered=False,
)
def test_gfk_related_manager(self):
Person.objects.create(first_name="Bugs", last_name="Bunny", fun=True, favorite_thing=self.b1)
Person.objects.create(first_name="Droopy", last_name="Dog", fun=False, favorite_thing=self.b1)
FunPerson.objects.create(first_name="Bugs", last_name="Bunny", fun=True, favorite_thing=self.b1)
FunPerson.objects.create(first_name="Droopy", last_name="Dog", fun=False, favorite_thing=self.b1)
self.assertQuerysetEqual(
self.b1.favorite_things.all(), [
"Bugs",
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.fun_people_favorite_things.all(), [
"Bugs",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.favorite_things(manager='boring_people').all(), [
"Droopy",
],
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.b1.favorite_things(manager='fun_people').all(), [
"Bugs",
],
lambda c: c.first_name,
ordered=False,
)
def test_m2m_related_manager(self):
bugs = Person.objects.create(first_name="Bugs", last_name="Bunny", fun=True)
self.b1.authors.add(bugs)
droopy = Person.objects.create(first_name="Droopy", last_name="Dog", fun=False)
self.b1.authors.add(droopy)
|
evanmiltenburg/python-for-text-analysis
|
Extra_Material/Examples/Separate_Files/main_dir_script.py
|
Python
|
apache-2.0
| 78 | 0 |
def hello_world():
"Fu
|
nction that says hello."
print("Hel
|
lo, world!")
|
namhyung/uftrace
|
tests/t001_basic.py
|
Python
|
gpl-2.0
| 530 | 0.001887 |
#!/usr/bin/env pyt
|
hon
from runtest import TestBase
class TestCase(TestBase):
def __init__(self):
TestBase.__init__(self, 'abc', """
# DURATION TID FUNCTION
62.202 us [28141] | __cxa_atexit();
[28141] | main() {
[28
|
141] | a() {
[28141] | b() {
[28141] | c() {
0.753 us [28141] | getpid();
1.430 us [28141] | } /* c */
1.915 us [28141] | } /* b */
2.405 us [28141] | } /* a */
3.005 us [28141] | } /* main */
""")
|
salas106/lahorie
|
lahorie/utils/sql.py
|
Python
|
mit
| 886 | 0.002257 |
# -*- coding: utf8 -*-
"""
The ``dbs`` module
===================
Contain all functions to access to main site db or any sql-lite db, in a secure way
"""
import sqlalchemy
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.sql import join
__all__ = ['join', 'create_engine_session', 'auto_map_orm']
def create_engine_session(engine_url, echo=True):
"""
Create a sql session
engine is the rfc1738 compliant url
http://docs.sqlalchemy.org/en/latest/di
|
alects/index.html
:param engine_url:
:param echo:
:return:
"
|
""
engine = sqlalchemy.create_engine(engine_url, echo=echo)
session_class = sessionmaker(bind=engine)
session = session_class()
return engine, session
def auto_map_orm(engine):
base_class = automap_base()
base_class.prepare(engine, reflect=True)
|
jeffstieler/bedrock-ansible
|
lib/trellis/plugins/vars/version.py
|
Python
|
mit
| 2,048 | 0.007324 |
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import __version__
from ansible.errors import AnsibleError
from distutils.version import LooseVersion
from operator import eq, ge, gt
from sys import version_info
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
version_requirement = '2.5.0.0'
version_tested_max = '2.7.5'
python3_required_version = '2.5.3'
if version_info[0] == 3 and not ge(LooseVersion(__version__), LooseVersion(python3_required_version)):
raise AnsibleError(('Ansible >= {} is required when using Python 3.\n'
'Either downgrade to Python 2 or update your Ansible version to {}.').format(python3_required_version, python3_required_version))
if not ge(LooseVersion(__version__), LooseVersion(version_requirement)):
raise AnsibleError(('Trellis no longer supports Ansible {}.\n'
'Please upgrade to Ansible {} or higher.').format(__version__, versi
|
on_requirement))
elif gt(LooseVersion(__version__), LooseVersion(version_tested_max)):
display.warning(u'You Ansible version is {} but this version of Trellis has only been tested for '
u'compatability with Ansible {} -> {}. It is advisable to check for Trellis updates or '
u'downgrade your Ansible version.'.format(
|
__version__, version_requirement, version_tested_max))
if eq(LooseVersion(__version__), LooseVersion('2.5.0')):
display.warning(u'You Ansible version is {}. Consider upgrading your Ansible version to avoid '
u'erroneous warnings such as `Removed restricted key from module data...`'.format(__version__))
# Import BaseVarsPlugin after Ansible version check.
# Otherwise import error for Ansible versions older than 2.4 would prevent display of version check message.
from ansible.plugins.vars import BaseVarsPlugin
class VarsModule(BaseVarsPlugin):
def get_vars(self, loader, path, entities, cache=True):
return {}
|
OrlyMar/gasistafelice
|
gasistafelice/rest/views/blocks/account_state.py
|
Python
|
agpl-3.0
| 3,887 | 0.009262 |
from gasistafelice.rest.views.blocks.base import BlockWithList
from django.utils.translation import ugettext as _, ugettext_lazy as _lazy
#------------------------------------------------------------------------------#
# #
#---------------------
|
---------------------------------------------------------#
class Block(BlockWithList):
BLOCK_NAME = "account_state"
BLOCK_DESCRIPTION = _("Economic state")
BLOCK_VALID_RESOURCE_TYPES = ["gas", "site"]
def _get_resource_list(self, request):
return request.resource.accounts
# TODO fero CHECK
# THIS IS USEFUL FOR USER ACTIONS: add/update/delete
# # Calculate allowed user actions
# #
# user
|
_actions = []
#
# if settings.CAN_CHANGE_CONFIGURATION_VIA_WEB == True:
# user = request.user
# if can_write_to_resource(user,res):
# if resource_type in ['container', 'node', 'target', 'measure']:
#
# if (resource_type in ['target', 'measure']):
# if res.suspended:
# user_actions.append('resume')
# else:
# user_actions.append('suspend')
# else:
# user_actions.append('resume')
# user_actions.append('suspend')
# TODO fero CHECK
# THIS IS USEFUL FOR ADD/REMOVE NEW GAS
# elif args == "new_note":
# return self.add_new_note(request, resource_type, resource_id)
# elif args == "remove_note":
# return self.remove_note(request, resource_type, resource_id)
#------------------------------------------------------------------------------#
# #
#------------------------------------------------------------------------------#
# TODO fero CHECK
# THIS IS USEFUL FOR ADD/REMOVE NEW GAS
# def add_new_note(self,request, resource_type, resource_id):
# resource = request.resource
#
# if request.POST:
#
# #title = request.REQUEST.get('title');
# body = request.REQUEST.get('body');
#
# new_comment = Comment(content_object = resource
# ,site = DjangoSite.objects.all()[0]
# ,user = request.user
# ,user_name = request.user.username
# ,user_email = request.user.email
# ,user_url = ''
# ,comment = body
# ,ip_address = None
# ,is_public = True
# ,is_removed = False
# )
#
# new_comment.save()
#
# return HttpResponse('<div id="response" resource_type="%s" resource_id="%s" class="success">ok</div>' % (resource.resource_type, resource.id))
#
# return HttpResponse('')
#
# #------------------------------------------------------------------------------#
# # #
# #------------------------------------------------------------------------------#
#
# def remove_note(self, request, resource_type, resource_id):
#
# resource = request.resource
#
# note_id = request.REQUEST.get('note_id')
#
# note = Comment.objects.get(id=note_id)
# note.delete()
#
# return HttpResponse('<div id="response" resource_type="%s" resource_id="%s" class="success">ok</div>' % (resource.resource_type, resource.id))
|
dkamotsky/program-y
|
src/test/aiml_tests/person_tests/test_person_aiml.py
|
Python
|
mit
| 904 | 0.005531 |
import unittest
import os
from test.aiml_tests.client import TestClient
from programy.config.brain import BrainFileConfiguration
class BasicTestClient(TestClient):
def __init__(self):
TestClient.__init__(self)
def load_configuration(self, arguments):
super(BasicTestClient, self).load_configuration(arguments)
self.configuration.brain_configuration._aiml_files = BrainFileConfiguration(files=os.path.dirname(__file__))
|
self.configuration.brain_configuration._person = os.path.dirname(__file__)+"/person.txt"
class PersonAIMLTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
PersonAIMLTests.test_client = BasicTestClient()
def test_person(self):
response = PersonAIMLT
|
ests.test_client.bot.ask_question("test", "TEST PERSON")
self.assertIsNotNone(response)
self.assertEqual(response, "This is your2 cat")
|
pdl30/pychiptools
|
scripts/pychip_diff_bind.py
|
Python
|
gpl-2.0
| 376 | 0 |
#!/usr/bin/python
|
########################################################################
# 1 August 2014
# Patrick Lombard, Centre for Stem Stem Research
# Core Bioinformatics Group
# University of Cambridge
# All right reserved.
########################################################################
import pychiptools.call_diff_bind
pychi
|
ptools.call_diff_bind.main()
|
blabla1337/skf-flask
|
skf/api/checklist_category/endpoints/checklist_category_update.py
|
Python
|
agpl-3.0
| 1,240 | 0.003226 |
from flask import request
from flask_restplus import Resource
from skf.api.security import security_headers, validate_privilege
from skf.api.checklist_category.business import update_checklist_category
from skf.api.checklist_category.serializers import checklist_type_update, message
from skf.api.kb.parsers import authorization
from skf.api.restplus import api
from skf.api.security import log, val_num, val_alpha, val_alpha_num, val_alpha_num_special
ns = api.namespac
|
e('checklist_category', description='Operations related to checklist items')
@ns.route('/update/<int:id>')
@api.doc(params={'id': 'The checklist category id'})
@api.response(404, 'Validation error', message)
class ChecklistCategoryUpdate(Resource):
@api.expect(authorization, checklist_type_update)
@api.response(400, 'No results found', message)
def put(self, id):
"""
Update a checklist type.
|
* Privileges required: **edit**
"""
data = request.json
val_num(id)
val_alpha_num_special(data.get('name'))
val_alpha_num_special(data.get('description'))
validate_privilege(self, 'edit')
result = update_checklist_category(id, data)
return result, 200, security_headers()
|
flaviogrossi/billiard
|
billiard/pool.py
|
Python
|
bsd-3-clause
| 64,479 | 0 |
# -*- coding: utf-8 -*-
#
# Module providing the `Pool` class for managing a process pool
#
# multiprocessing/pool.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import
#
# Imports
#
import errno
import itertools
import os
import platform
import signal
import sys
import threading
import time
import warnings
from collections import deque
from functools import partial
from . import cpu_count, get_context
from . import util
from .common import pickle_loads, reset_signals, restart_state
from .compat import get_errno, send_offset
from .einfo import ExceptionInfo
from .dummy import DummyProcess
from .exceptions import (
CoroStop,
RestartFreqExceeded,
SoftTimeLimitExceeded,
Terminated,
TimeLimitExceeded,
TimeoutError,
WorkerLostError,
)
from .five import Empty, Queue, range, values, reraise, monotonic
from .util import Finalize, debug
PY3 = sys.version_info[0] == 3
if platform.system() == 'Windows': # pragma: no cover
# On Windows os.kill calls TerminateProcess which cannot be
# handled by # any process, so this is needed to terminate the task
# *and its children* (if any).
from ._win import kill_processtree as _kill # noqa
SIGKILL = signal.SIGTERM
else:
from os import kill as _kill # noqa
SIGKILL = signal.SIGKILL
try:
TIMEOUT_MAX = threading.TIMEOUT_MAX
except AttributeError: # pragma: no cover
TIMEOUT_MAX = 1e10 # noqa
if sys.version_info >= (3, 3):
_Semaphore = threading.Semaphore
else:
# Semaphore is a factory function pointing to _Semaphore
_Semaphore = threading._Semaphore # noqa
SIGMAP = dict(
(getattr(signal, n), n) for n in dir(signal) if n.startswith('SIG')
)
#
# Constants representing the state of a pool
#
RUN = 0
CLOSE = 1
TERMINATE = 2
#
# Constants representing the state of a job
#
ACK = 0
READY = 1
TASK = 2
NACK = 3
DEATH = 4
#
# Exit code constants
#
EX_OK = 0
EX_FAILURE = 1
EX_RECYCLE = 0x9B
# Signal used for soft time limits.
SIG_SOFT_TIMEOUT = getattr(signal, "SIGUSR1", None)
#
# Miscellaneous
#
LOST_WORKER_TIMEOUT = 10.0
EX_OK = getattr(os, "EX_OK", 0)
job_counter = itertools.count()
Lock = threading.Lock
def _get_send_offset(connection):
try:
native = connection.send_offset
except AttributeError:
native = None
if native is None:
return partial(send_offset, connection.fileno())
return native
def human_status(status):
if (status or 0) < 0:
try:
return 'signal {0} ({1})'.format(-status, SIGMAP[-status])
except KeyError:
return 'signal {0}'.format(-status)
return 'exitcode {0}'.format(status)
def mapstar(args):
return list(map(*args))
def starmapstar(args):
return list(itertools.starmap(args[0], args[1]))
def error(msg, *args, **kwargs):
if util._logger:
util._logger.error(msg, *args, **kwargs)
def stop_if_not_current(thread, timeout=None):
if thread is not threading.current_thread():
thread.stop(timeout)
class LaxBoundedSemaphore(_Semaphore):
"""Semaphore that checks that # release is <= # acquires,
but ignores if # releases >= value."""
def __init__(self, value=1, verbose=None):
if PY3:
_Semaphore.__init__(self, value)
else:
_Semaphore.__init__(self, value, verbose)
self._initial_value = value
def grow(self):
if PY3:
cond = self._cond
else:
cond = self._Semaphore__cond
with cond:
self._initial_value += 1
self._Semaphore__value += 1
cond.notify()
def shrink(self):
self._initial_value -= 1
self.acquire()
if PY3:
def release(self):
cond = self._cond
with cond:
if self._value < self._initial_value:
self._value += 1
cond.notify_all()
def clear(self):
while self._value < self._initial_value:
_Semaphore.release(self)
else:
def release(self): # noqa
cond = self._Semaphore__cond
with cond:
if self._Semaphore__value < self._initial_value:
self._Semaphore__value += 1
cond.notifyAll()
def clear(self): # noqa
while self._Semaphore__value < self._initial_value:
_Semaphore.release(self)
#
# Exceptions
#
class MaybeEncodingError(Exception):
"""Wraps possible unpickleable errors, so they can be
safely sent through the socket."""
def __init__(self, exc, value):
self.exc = repr(exc)
self.value = repr(value)
super(MaybeEncodingError, self).__init__(self.exc, self.value)
def __repr__(self):
return "<MaybeEncodingError: %s>" % str(self)
def __str__(self):
return "Error sending result: '%r'. Reason: '%r'." % (
self.value, self.exc)
class WorkersJoined(Exception):
"""All workers have terminated."""
def soft_timeout_sighandler(signum, frame):
raise SoftTimeLimitExceeded()
#
# Code run by worker processes
#
class Worker(object):
_controlled_termination = False
_job_terminated = False
def __init__(self, inq, outq, synq=None, initializer=None, initargs=(),
maxtasks=None, sentinel=None, on_exit=None,
sigprotection=True, wrap_exception=True):
assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
self.initializer = initializer
self.initargs = initargs
self.maxtasks = maxtasks
self._shutdown = sentinel
self.on_exit = on_exit
self.sigprotection = sigprotection
self.inq, self.outq, self.synq = inq, outq, synq
self.wrap_exception = wrap_exception # XXX cannot disable yet
self.contribute_to_object(self)
def contribute_to_object(self, obj):
obj.inq, obj.outq, obj.synq = self.inq, self.outq, self.synq
obj.inqW_fd = self.inq._writer.fileno() # inqueue write fd
obj.outqR_fd = self.outq._reader.fileno() # outqueue read fd
if self.synq:
obj.synqR_fd = self.synq._reader.fileno() # synqueue read fd
obj.synqW_fd = self.synq._writer.fileno() # synqueue write fd
obj.send_syn_offset = _get_send_offset(self.synq._writer)
else:
obj.synqR_fd = obj.synqW_fd = obj._send_syn_offset = None
obj._quick_put = self.inq._writer.send
obj._quick_get = self.outq._reader.recv
obj.send_job_offset = _get_send_offset(self.inq._writer)
return obj
def __reduce__(self):
return self.__class__, (
self.inq, self.outq, self.synq, self.initializer,
self.initargs, self.maxtasks, self._shutdown, self.on_exit,
self.sigprotection, self.wrap_exception,
)
def __call__(self):
_exit = sys.exit
_exitcode = [None]
def exit(status=None):
_exitcode[0] = status
return _exit()
sys.exit = exit
pid = os.getpid()
self._make_child_methods()
self.after_fork()
self.on_loop_start(pid=pid) # callback on loop start
|
try:
sys.exit(self.workloop(pid=pid))
except Exception as exc:
error('Pool process %r error: %r', self, exc, exc_info=1)
self._do_exit(pid, _exitcode[0], exc)
finally:
self._do_exit(pid, _exitcode[0], None)
def _do_exit(self, pid, exitcode, exc=None):
if exitcode is None:
exitcode = EX_FAILURE if exc else EX_OK
if self.on_exit is not None:
|
self.on_exit(pid, exitcode)
if sys.platform != 'win32':
try:
self.outq.put((DEATH, (pid, exitcode)))
time.sleep(1)
finally:
os._exit(exitcode)
else:
os._exit(exitcode)
def on_loop_start(self, pid):
pass
def terminate_controlled(self):
self._controlled_termina
|
katychuang/python-data-sci-basics
|
src/numpy_utils.py
|
Python
|
mit
| 3,188 | 0.016311 |
# coding: utf-8
# numpy_utils for Intro to Data Science with Python
# Author: Kat Chuang
# Created: Nov 2014
# --------------------------------------
import numpy
## Stage 2 begin
fieldNames = ['', 'id', 'priceLabel', 'name','brandId', 'brandName', 'imageLink',
'desc', 'vendor', 'patterned', 'material']
dataTypes = [('myint', 'i'), ('myid', 'i'), ('price', 'f8'), ('name', 'a200'),
('brandId', '<i8'), ('brandName', 'a200'), ('imageUrl', '|S500'),
('description', '|S900'), ('vendor', '|S100'), ('pattern', '|S50'), ('material', '|S50'), ]
def load_data(filename):
my_csv = numpy.genfromtxt(filename, delimiter='\t', skip_header=1,
names=fieldNames, invalid_raise=False,
dtype=dataTypes)
return my_csv
#2.a count
def size(my_csv):
print("Length (numpy): {}".format(my_csv.size))
#2.b sum
def calculate_numpy_sum(my_field):
field_in_float = [float(item) for item in my_field]
total = numpy.sum(field_in_float)
return total
#2.c mean
def find_numpy_average(my_field):
field_in_float = [float(item) for item in my_field]
total = calculate_numpy_sum(field_in_float)
size = len(my_field)
average = total / size
return average
#2.d max, min
def numpy_max(my_field_in_float):
return numpy.amax(my_field_in_float)
def numpy_min(my_field_in_float):
return numpy.amin(my_field_in_float)
## Stage 2 end
# --------------------------------------
## Stage 3 begin
from my_utils import filter_col_by_string, filter_col_by_float
## Stage 3 end
# --------------------------------------
## Stage 4 begin
from my_utils import write_to_file, write_brand_and_price_to_file
## Stage 4 end
# --------------------------------------
## Stage 5 begin
import matplotlib.pyplot as plt
plt.style.use('ggplot')
def plot_all_bars(prices_in_float, exported_figure_filename):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
prices = list(map(int, prices_in_float))
X = numpy.arange(len(prices))
width = 0.25
ax.bar(X+width, prices, width)
ax.set_xlim([0, 5055])
fig.savefig(exported_figure_filename)
def create_chart_for_embed(sample, title):
prices = sorted(map(int, sample))
x_axis_ticks = list( range(len(sample)) )
plt.plot(x_axis_ticks, prices, 'g', label='price points', linewidth=2)
def export_chart(sample, title):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
prices = sorted(map(int, sample))
x_axis_ticks = list( range(len(sample)) )
ax.plot(x_axis_ticks, prices, 'g', label='price points', linewidth=
|
2)
ax.set_title(title)
ax.set_xlabel(title)
ax.set_ylabel('Number of Ties')
if len(pric
|
es) > 20:
ax.set_xlim([0, round(len(prices), -1)])
else:
ax.set_xlim([0, len(prices)])
fig.savefig('_charts/' + title + '.png')
def prices_of_list(sampleData):
temp_list = []
for row in sampleData[1:]:
priceCol = float(row[2])
temp_list.append(priceCol)
return temp_list
## Stage 5 end
# --------------------------------------
## Stage 6 begin
## Stage 6 end
# --------------------------------------
|
web2py/pydal
|
tests/sql.py
|
Python
|
bsd-3-clause
| 137,514 | 0.001136 |
# -*- coding: utf-8 -*-
"""
Basic unit tests
"""
from __future__ import print_function
import os
import glob
import datetime
import json
import pickle
from pydal._compat import basestring, StringIO, integer_types,
|
xrange, BytesIO, to_bytes
from pydal import DAL, Field
from pydal.helpers.classes import SQLALL, OpRow
from pydal.objects import Table, Expression, Row
from ._compat import unittest
from ._adapt import (
DEFAULT_URI,
IS_POSTGRESQL,
IS_SQLITE,
IS_
|
MSSQL,
IS_MYSQL,
IS_TERADATA,
IS_NOSQL,
IS_ORACLE,
)
from ._helpers import DALtest
long = integer_types[-1]
print("Testing against %s engine (%s)" % (DEFAULT_URI.partition(":")[0], DEFAULT_URI))
ALLOWED_DATATYPES = [
"string",
"text",
"integer",
"boolean",
"double",
"blob",
"date",
"time",
"datetime",
"upload",
"password",
"json",
"bigint",
]
def setUpModule():
if IS_MYSQL or IS_TERADATA or IS_ORACLE:
db = DAL(DEFAULT_URI, check_reserved=["all"])
def clean_table(db, tablename):
try:
db.define_table(tablename)
except Exception as e:
pass
try:
db[tablename].drop()
except Exception as e:
pass
for tablename in [
"tt",
"t0",
"t1",
"t2",
"t3",
"t4",
"easy_name",
"tt_archive",
"pet_farm",
"person",
]:
clean_table(db, tablename)
db.close()
def tearDownModule():
if os.path.isfile("sql.log"):
os.unlink("sql.log")
for a in glob.glob("*.table"):
os.unlink(a)
class TestFields(DALtest):
def testFieldName(self):
"""
- a "str" something
- not a method or property of Table
- "dotted-notation" friendly:
- a valid python identifier
- not a python keyword
- not starting with underscore or an integer
- not containing dots
Basically, anything alphanumeric, no symbols, only underscore as
punctuation
"""
# Check that Fields cannot start with underscores
self.assertRaises(SyntaxError, Field, "_abc", "string")
# Check that Fields cannot contain punctuation other than underscores
self.assertRaises(SyntaxError, Field, "a.bc", "string")
# Check that Fields cannot be a name of a method or property of Table
for x in ["drop", "on", "truncate"]:
self.assertRaises(SyntaxError, Field, x, "string")
# Check that Fields allows underscores in the body of a field name.
self.assertTrue(
Field("a_bc", "string"),
"Field isn't allowing underscores in fieldnames. It should.",
)
# Check that Field names don't allow a python keyword
self.assertRaises(SyntaxError, Field, "True", "string")
self.assertRaises(SyntaxError, Field, "elif", "string")
self.assertRaises(SyntaxError, Field, "while", "string")
# Check that Field names don't allow a non-valid python identifier
non_valid_examples = ["1x", "xx$%@%", "xx yy", "yy\na", "yy\n"]
for a in non_valid_examples:
self.assertRaises(SyntaxError, Field, a, "string")
# Check that Field names don't allow a unicode string
non_valid_examples = non_valid_examples = [
"ℙƴ☂ℌøἤ",
u"ℙƴ☂ℌøἤ",
u"àè",
u"ṧøмℯ",
u"тεṧт",
u"♥αłüℯṧ",
u"ℊεᾔ℮яαт℮∂",
u"♭ƴ",
u"ᾔ☤ρℌℓ☺ḓ",
]
for a in non_valid_examples:
self.assertRaises(SyntaxError, Field, a, "string")
def testFieldTypes(self):
# Check that string, and password default length is 512
for typ in ["string", "password"]:
self.assertTrue(
Field("abc", typ).length == 512,
"Default length for type '%s' is not 512 or 255" % typ,
)
# Check that upload default length is 512
self.assertTrue(
Field("abc", "upload").length == 512,
"Default length for type 'upload' is not 512",
)
# Check that Tables passed in the type creates a reference
self.assertTrue(
Field("abc", Table(None, "temp")).type == "reference temp",
"Passing a Table does not result in a reference type.",
)
def testFieldLabels(self):
# Check that a label is successfully built from the supplied fieldname
self.assertTrue(
Field("abc", "string").label == "Abc", "Label built is incorrect"
)
self.assertTrue(
Field("abc_def", "string").label == "Abc Def", "Label built is incorrect"
)
def testFieldFormatters(self): # Formatter should be called Validator
# Test the default formatters
for typ in ALLOWED_DATATYPES:
f = Field("abc", typ)
if typ not in ["date", "time", "datetime"]:
isinstance(f.formatter("test"), str)
else:
isinstance(f.formatter(datetime.datetime.now()), str)
def testUploadField(self):
import tempfile
stream = tempfile.NamedTemporaryFile()
content = b"this is the stream content"
stream.write(content)
# rewind before inserting
stream.seek(0)
db = self.connect()
db.define_table(
"tt",
Field(
"fileobj", "upload", uploadfolder=tempfile.gettempdir(), autodelete=True
),
)
f_id = db.tt.insert(fileobj=stream)
row = db.tt[f_id]
(retr_name, retr_stream) = db.tt.fileobj.retrieve(row.fileobj)
# name should be the same
self.assertEqual(retr_name, os.path.basename(stream.name))
# content should be the same
retr_content = retr_stream.read()
self.assertEqual(retr_content, content)
# close streams!
retr_stream.close()
# delete
row.delete_record()
# drop
db.tt.drop()
# this part is triggered only if fs (AKA pyfilesystem) module is installed
try:
from fs.memoryfs import MemoryFS
# rewind before inserting
stream.seek(0)
db.define_table(
"tt", Field("fileobj", "upload", uploadfs=MemoryFS(), autodelete=True)
)
f_id = db.tt.insert(fileobj=stream)
row = db.tt[f_id]
(retr_name, retr_stream) = db.tt.fileobj.retrieve(row.fileobj)
# name should be the same
self.assertEqual(retr_name, os.path.basename(stream.name))
# content should be the same
retr_content = retr_stream.read()
self.assertEqual(retr_content, content)
# close streams
retr_stream.close()
stream.close()
# delete
row.delete_record()
# drop
db.tt.drop()
except ImportError:
pass
def testBlobBytes(self):
# Test blob with latin1 encoded bytes
db = self.connect()
obj = pickle.dumps("0")
db.define_table("tt", Field("aa", "blob"))
self.assertEqual(db.tt.insert(aa=obj), 1)
self.assertEqual(to_bytes(db().select(db.tt.aa)[0].aa), obj)
self.assertEqual(db.tt[1].aa, obj)
self.assertEqual(BytesIO(to_bytes(db.tt[1].aa)).read(), obj)
db.tt.drop()
def testRun(self):
# Test all field types and their return values
db = self.connect()
for ft in ["string", "text", "password", "upload", "blob"]:
db.define_table("tt", Field("aa", ft, default=""))
self.assertEqual(db.tt.insert(aa="ö"), 1)
if not (IS_ORACLE and (ft == "text" or ft == "blob")):
# only verify insert for LOB types in oracle;
# select may create seg fault in test env
self.assertEqual(db().select(db.tt.aa)[0].aa, "ö")
|
DigitalSlideArchive/HistomicsTK
|
histomicstk/preprocessing/color_conversion/rgb_to_hsi.py
|
Python
|
apache-2.0
| 725 | 0 |
"""Placeholder."""
import numpy as np
def rgb_to_hsi(im):
"""Convert to HSI the RGB pixels in im.
Adapted from
https://en.wikipedia.org/wiki/HSL_and_HSV#Hue_and_chroma.
"""
im = np.moveaxis(im, -1, 0)
if len(im) not in (3, 4):
raise ValueError("Expected 3-channel RGB or 4-channel RGBA image;"
|
" received a {}-channel imag
|
e".format(len(im)))
im = im[:3]
hues = (np.arctan2(3**0.5 * (im[1] - im[2]),
2 * im[0] - im[1] - im[2]) / (2 * np.pi)) % 1
intensities = im.mean(0)
saturations = np.where(
intensities, 1 - im.min(0) / np.maximum(intensities, 1e-10), 0)
return np.stack([hues, saturations, intensities], -1)
|
LeMaker/LNdigitalIO
|
examples/presslights.py
|
Python
|
gpl-3.0
| 501 | 0 |
import LNdigitalIO
def switch_pressed(event):
event.chip.output_pins[event.pin_num].turn_on()
def switch_unpressed
|
(event):
event.chip.output_pins[event.pin_num].turn_off()
if __name__ == "__main__":
LNdigital = LNdigitalIO.LNdigitals()
listener = LNdigitalIO.InputEventListener(chip=LNdigital)
for i in range(4):
listener.register(i, LNdigitalIO.IODIR_ON, switch_pressed)
listener.register(i, LNdigitalIO.IODIR_OFF, switch_unpressed)
lis
|
tener.activate()
|
eJRF/ejrf
|
questionnaire/tests/views/test_assign_questions_view.py
|
Python
|
bsd-3-clause
| 13,049 | 0.004598 |
from urllib import quote
from django.test import Client
from questionnaire.forms.assign_question import AssignQuestionForm
from questionnaire.models import Questionnaire, Section, SubSection, Question, Region, QuestionGroup
from questionnaire.models.skip_rule import SkipQuestion
from questionnaire.tests.base_test import BaseTest
from questionnaire.tests.factories.question_group_factory import QuestionGroupFactory
from questionnaire.tests.factories.skip_rule_factory import SkipQuestionRuleFactory, SkipSubsectionRuleFactory
class AssignQuestionViewTest(BaseTest):
def setUp(self):
self.client = Client()
self.user = self.create_user(org="WHO")
self.region = None
self.assign('can_edit_questionnaire', self.user)
self.client.login(username=self.user.username, password='pass')
self.questionnaire = Questionnaire.objects.create(name="JRF 2013 Core English", year=2013, region=self.region)
self.section = Section.objects.create(name="section", questionnaire=self.questionnaire, order=1)
self.subsection = SubSection.objects.create(title="subsection 1", section=self.section, order=1)
self.question1 = Question.objects.create(text='Q1', UID='C00003', answer_type='Number', region=self.region)
self.question2 = Question.objects.create(text='Q2', UID='C00002', answer_type='Number', region=self.region)
self.form_data = {'questions': [self.question1.id, self.question2.id]}
self.url = '/subsection/%d/assign_questions/' % self.subsection.id
def test_get_assign_question_page(self):
response = self.client.get(self.url)
self.assertEqual(200, response.status_code)
templates = [template.name for template in response.templates]
self.assertIn('questionnaires/assign_questions.html', templates)
def test_gets_assign_questions_form_and_subsection_in_context(self):
afro = Region.objects.create(name="Afro")
question_in_region = Question.objects.create(text='not in Region Q', UID='C000R3', answer_type='Number',
region=afro)
response = self.client.get(self.url)
self.assertIsInstance(response.context['assign_question_form'], AssignQuestionForm)
self.assertEqual(2, response.context['questions'].count())
questions_texts = [question.text for question in list(response.context['questions'])]
self.assertIn(self.question1.text, questions_texts)
self.assertIn(self.question2.text, questions_texts)
self.assertNotIn(question_in_region.text, questions_texts)
self.assertEqual('Done', response.context['btn_label'])
def test_GET_puts_list_of_already_used_questions_in_context(self):
question1 = Question.objects.create(text='USed question', UID='C00033', answer_type='Number',
region=self.region)
question1.question_group.create(subsection=self.subsection)
response = self.client.get(self.url)
self.assertEqual(1, len(response.context['active_questions']))
self.assertIn(question1, response.context['active_questions'])
self.assertIn(question1, response.context['questions'])
def test_GET_does_not_put_parent_questions_in_the_context(self):
parent_question = Question.objects.create(text='parent q', UID='C000R3', answer_type='Number')
self.question1.parent = parent_question
self.question1.save()
used_question1 = Question.objects.create(text='USed question', UID='C00033', answer_type='Number',
region=self.region, parent=parent_question)
used_question1.question_group.create(subsection=self.subsection)
response = self.client.get(self.url)
self.assertEqual(3, response.context['questions'].count())
questions_texts = [question.text for question in list(response.context['questions'])]
self.assertIn(self.question1.text, questions_texts)
self.assertIn(self.question2.text, questions_texts)
self.assertIn(used_question1.text, questions_texts)
self.assertNotIn(parent_question.text, questions_texts)
self.assertEqual(1, len(response.context['active_questions']))
self.assertIn(used_question1, response.context['active_questions'])
def test_post_questions_assigns_them_to_subsections_and_get_or_create_group(self):
self.failIf(self.question1.question_group.all())
self.failIf(self.question2.question_group.all())
meta = {'HTTP_REFERER': self.url}
response = self.client.post(self.url, data={'questions': [self.question1.id, self.question2.id]}, **meta)
question_group = self.question1.question_group.all()
self.assertEqual(1, question_group.count())
self.assertEqual(question_group[0], self.question2.question_group.all()[0])
self.assertEqual(self.subsection, question_group[0].subsection)
def test_successful_post_redirect_to_referer_url(self):
meta = {'HTTP_REFERER': self.url}
response = self.client.post(self.url, data={'questions': [self.question1.id, self.question2.id]}, **meta)
self.assertRedirects(response, self.url)
def test_successful_post_display_success_message(self):
referer_url = '/questionnaire/entry/%d/section/%d/' % (self.questionnaire.id, self.section.id)
meta = {'HTTP_REFERER': referer_url}
response = self.client.post(self.url, data={'questions': [self.question1.id, self.question2.id]}, **meta)
message = "Questions successfully assigned to questionnaire."
self.assertIn(message, response.cookies['messages'].value)
def test_with_errors_returns_the_form_with_error(self):
referer_url = '/questionnaire/entry/%d/section/%d/' % (self.questionnaire.id, self.section.id)
meta = {'HTTP_REFERER': referer_url}
response = self.client.post(self.url, data={'questions': []}, **meta)
self.assertIsInstance(response.context['assign_question_form'], AssignQuestionForm)
self.assertIn("This field is required.", response.context['assign_question_form'].errors['questions'])
self.assertEqual(2, response.context['questions'].count())
questions_texts = [question.text for question in list(response.context['questions'])]
self.assertIn(self.question1.text, questions_texts)
self.assertIn(self.question2.text, questions_texts)
self.assertEqual('Done', response.context['btn_label'])
def test_login_required(self):
self.assert_login_required(self.url)
def test_permission_required_for_create_section(self):
self.assert_permission_required(self.url)
user_not_in_same_region = self.create_user(username="asian_chic", group=self.REGIONAL_ADMIN, region="ASEAN",
org="WHO")
self.assign('can_edit_questionnaire', user_not_in_same_region)
self.client.logout()
self.client.login(username='asian_chic', password='pass')
response = self.client.get(self.url)
self.assertRedirects(response, expected_url='/accounts/login/?next=%s' % quote(self.url))
response = self
|
.client.post(self.url)
self.assertRedirects(response, expected_url='/accounts/login/?next=%s' % quote(self.url))
def test_GET_with_hide_param_puts_list_of_only_unused_questions_in_context(self):
question1 = Question.objects.create(text='USed question', UID='C00033', answer_type='Number',
|
region=self.region)
question1.question_group.create(subsection=self.subsection)
hide_url = '/subsection/%d/assign_questions/?hide=1' % self.subsection.id
response = self.client.get(hide_url)
self.assertIn(question1, response.context['active_questions'])
self.assertNotIn(question1, response.context['questions'])
class UnAssignQuestionViewTest(BaseTest):
def setUp(self):
self.client = Client()
self.user = self.create_user(org="WHO")
self.assign('can_edit_questionnaire', self.user)
|
lahwaacz/wiki-scripts
|
tests/parser_helpers/test_wikicode.py
|
Python
|
gpl-3.0
| 18,144 | 0.001488 |
#! /usr/bin/env python3
import mwparserfromhell
from ws.parser_helpers.wikicode import *
class test_get_adjacent_node:
def test_basic(self):
snippet = "[[Arch Linux]] is the best!"
wikicode = mwparserfromhell.parse(snippet)
first = wikicode.get(0)
last = get_adjacent_node(wikicode, first)
assert str(last) == " is the best!"
def test_last_node(self):
snippet = "[[Arch Linux]] is the best!"
wikicode = mwparserfromhell.parse(snippet)
last = get_adjacent_node(wikicode, " is the best!")
assert last == None
def test_whitespace_preserved(self):
snippet = "[[Arch Linux]] \t\n is the best!"
wikicode = mwparserfromhell.parse(snippet)
first = wikicode.get(0)
last = get_adjacent_node(wikicode, first, ignore_whitespace=True)
assert str(last) == " \t\n is the best!"
def test_ignore_whitespace(self):
snippet = "[[Arch Linux]] \t\n [[link]] is the best!"
wikicode = mwparserfromhell.parse(snippet)
first = wikicode.get(0)
wikicode.remove("[[link]]")
last = get_adjacent_node(wikicode, first, ignore_whitespace=True)
assert str(last) == " is the best!"
class test_get_parent_wikicode:
snippet = """\
{{Note|This [[wikipedia:reference]] is to be noted.}}
Some other text.
"""
wikicode = mwparserfromhell.parse(snippet)
def test_toplevel(self):
parent = get_parent_wikicode(self.wikicode, self.wikicode.get(0))
assert str(parent) == self.snippet
def test_nested(self):
note = self.wikicode.filter_templates()[0]
link = self.wikicode.filter_wikilinks()[0]
parent = get_parent_wikicode(self.wikicode, link)
assert str(parent) == str(note.params[0])
class test_remove_and_squash:
@staticmethod
def _do_test(wikicode, remove, expected):
node = wikicode.get(wikicode.index(remove))
remove_and_squash(wikicode, node)
assert str(wikicode) == expected
def test_inside(self):
|
snippet = "Some text with a [[link]] inside."
expected = "Some text with a inside."
wikicode = mwparserfromhell.parse(snippet)
self._do_test(wikicode, "[[link]]", expected)
def test_around(self):
snippet = """\
First paragraph
[[link1]]
Second paragraph
[[link2]]
Third paragraph
"""
wikicode =
|
mwparserfromhell.parse(snippet)
self._do_test(wikicode, "[[link1]]", "First paragraph\n\nSecond paragraph\n[[link2]]\n\nThird paragraph\n")
self._do_test(wikicode, "[[link2]]", "First paragraph\n\nSecond paragraph\n\nThird paragraph\n")
def test_lineend(self):
snippet = """\
Some other text [[link]]
Following sentence.
"""
expected = """\
Some other text
Following sentence.
"""
wikicode = mwparserfromhell.parse(snippet)
self._do_test(wikicode, "[[link]]", expected)
def test_linestart(self):
snippet = """\
Another paragraph.
[[link]] some other text.
"""
expected = """\
Another paragraph.
some other text.
"""
wikicode = mwparserfromhell.parse(snippet)
self._do_test(wikicode, "[[link]]", expected)
def test_lineend_twolinks(self):
snippet = """\
Some other text [[link1]][[link2]]
Following sentence.
"""
expected = """\
Some other text [[link1]]
Following sentence.
"""
wikicode = mwparserfromhell.parse(snippet)
self._do_test(wikicode, "[[link2]]", expected)
def test_linestart_twolinks(self):
snippet = """\
Another paragraph.
[[link1]][[link2]] some other text.
"""
expected = """\
Another paragraph.
[[link2]] some other text.
"""
wikicode = mwparserfromhell.parse(snippet)
self._do_test(wikicode, "[[link1]]", expected)
def test_multiple_nodes(self):
snippet = "[[link1]][[link2]][[link3]]"
wikicode = mwparserfromhell.parse(snippet)
self._do_test(wikicode, "[[link1]]", "[[link2]][[link3]]")
wikicode = mwparserfromhell.parse(snippet)
self._do_test(wikicode, "[[link2]]", "[[link1]][[link3]]")
wikicode = mwparserfromhell.parse(snippet)
self._do_test(wikicode, "[[link3]]", "[[link1]][[link2]]")
def test_multiple_nodes_text(self):
snippet = "foo [[link1]][[link2]][[link3]] bar"
wikicode = mwparserfromhell.parse(snippet)
self._do_test(wikicode, "[[link1]]", "foo [[link2]][[link3]] bar")
wikicode = mwparserfromhell.parse(snippet)
self._do_test(wikicode, "[[link2]]", "foo [[link1]][[link3]] bar")
wikicode = mwparserfromhell.parse(snippet)
self._do_test(wikicode, "[[link3]]", "foo [[link1]][[link2]] bar")
def test_multiple_nodes_spaces(self):
snippet = "foo [[link1]] [[link2]] [[link3]] bar"
wikicode = mwparserfromhell.parse(snippet)
self._do_test(wikicode, "[[link1]]", "foo [[link2]] [[link3]] bar")
wikicode = mwparserfromhell.parse(snippet)
self._do_test(wikicode, "[[link2]]", "foo [[link1]] [[link3]] bar")
wikicode = mwparserfromhell.parse(snippet)
self._do_test(wikicode, "[[link3]]", "foo [[link1]] [[link2]] bar")
def test_multiple_nodes_newlines(self):
snippet = "[[link1]]\n[[link2]]\n[[link3]]"
wikicode = mwparserfromhell.parse(snippet)
self._do_test(wikicode, "[[link1]]", "[[link2]]\n[[link3]]")
wikicode = mwparserfromhell.parse(snippet)
self._do_test(wikicode, "[[link2]]", "[[link1]]\n[[link3]]")
wikicode = mwparserfromhell.parse(snippet)
self._do_test(wikicode, "[[link3]]", "[[link1]]\n[[link2]]")
def test_multiple_newlines(self):
snippet = """\
First paragraph
[[link]]
"""
expected = """\
First paragraph
"""
wikicode = mwparserfromhell.parse(snippet)
self._do_test(wikicode, "[[link]]", expected)
class test_get_section_headings:
@staticmethod
def _do_test(text, expected):
result = get_section_headings(text)
assert result == expected
def test_balanced(self):
snippet = """
foo
== Section 1 ==
bar
=== Section 2===
=Section 3 =
== Section 4 ===
"""
expected = ["Section 1", "Section 2", "Section 3", "Section 4 ="]
self._do_test(snippet, expected)
def test_unbalanced(self):
snippet = """
Invalid section 1 ==
== Invalid section 2
== Valid section 1 =
= Valid section 2 ==
== Valid section 3 = =
= = Valid section 4 ==
"""
expected = [
"= Valid section 1",
"Valid section 2 =",
"= Valid section 3 =",
"= Valid section 4 =",
]
self._do_test(snippet, expected)
def test_levels(self):
snippet = """
= Level 1 =
== Level 2 ==
=== Level 3 ===
==== Level 4 ====
===== Level 5 =====
====== Level 6 ======
======= Invalid level =======
"""
expected = [
"Level 1",
"Level 2",
"Level 3",
"Level 4",
"Level 5",
"Level 6",
"= Invalid level =",
]
self._do_test(snippet, expected)
class test_get_anchors:
def test_simple(self):
snippet = """
== foo ==
== bar ==
== foo ==
== foo_2 ==
== foo 2 ==
"""
expected = ["foo", "bar", "foo_2", "foo_2_2", "foo_2_3"]
result = get_anchors(get_section_headings(snippet))
assert result == expected
def test_complex(self):
snippet = """
== foo_2 ==
== foo_2_2 ==
== foo ==
== foo ==
== foo 2 ==
== foo 2 ==
"""
expected = ["foo_2", "foo_2_2", "foo", "foo_3", "foo_2_3", "foo_2_4"]
result = get_anchors(get_section_headings(snippet))
assert result == expected
def test_casing(self):
snippet = """
=== foo bar ===
=== Foo Bar ===
=== Foo bar ===
=== foo Bar ===
"""
expected = ["foo_bar", "Foo_Bar_2", "Foo_bar_3", "foo_Bar_4"]
result = get_anchors(get_section_headings(snippet))
assert result == expected
def test_strip(self):
snippet = """
== Section with ''wikicode'' ==
== Section with <i>tag</i> ==
== Section with HTML entities Σ, Σ, and Σ ==
== Section with [
|
iohannez/gnuradio
|
gr-filter/python/filter/qa_rational_resampler.py
|
Python
|
gpl-3.0
| 8,932 | 0.004478 |
#!/usr/bin/env python
#
# Copyright 2005-2007,2010,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import print_function
from __future__ import division
from gnuradio import gr, gr_unittest, filter, blocks
import math
import random
import sys
def random_floats(n):
r = []
for x in range(n):
# r.append(float(random.randint(-32768, 32768)))
r.append(float(random.random()))
return tuple(r)
def reference_dec_filter(src_data, decim, taps):
tb = gr.top_block()
src = blocks.vector_source_f(src_data)
op = filter.fir_filter_fff(decim, taps)
dst = blocks.vector_sink_f()
tb.connect(src, op, dst)
tb.run()
result_data = dst.data()
tb = None
return result_data
def reference_interp_filter(src_data, interp, taps):
tb = gr.top_block()
src = blocks.vector_source_f(src_data)
op = filter.interp_fir_filter_fff(interp, taps)
dst = blocks.vector_sink_f()
tb.connect(src, op, dst)
tb.run()
result_data = dst.data()
tb = None
return result_data
def reference_interp_dec_filter(src_data, interp, decim, taps):
tb = gr.top_block()
src = blocks.vector_source_f(src_data)
up = filter.interp_fir_filter_fff(interp, (1,))
dn = filter.fir_filter_fff(decim, taps)
dst = blocks.vector_sink_f()
tb.connect(src, up, dn, dst)
tb.run()
result_data = dst.data()
tb = None
return result_data
class test_rational_resampler (gr_unittest.TestCase):
def setUp(self):
random.seed(0)
def tearDown(self):
pass
def test_000_1_to_1(self):
taps = (-4, 5)
src_data = (234, -4, 23, -56, 45, 98, -23, -7)
xr = (1186, -112, 339, -460, -167, 582)
expected_result = tuple([float(x) for x in xr])
tb = gr.top_block()
src = blocks.vector_source_f(src_data)
op = filter.rational_resampler_base_fff(1, 1, taps)
dst = blocks.vector_sink_f()
tb.connect(src, op)
tb.connect(op, dst)
tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def test_001_interp(self):
taps = [1, 10, 100, 1000, 10000]
src_data = (0, 2, 3, 5, 7, 11, 13, 17)
interpolation = 3
xr = (2,20,200,2003,20030,
300,3005,30050,
500,5007,50070,
700,7011,70110,
1100,11013,110130,
1300,13017,130170,
1700.0,17000.0,170000.0, 0.0)
expected_result = tuple([float(x) for x in xr])
tb = gr.top_block()
src = blocks.vector_source_f(src_data)
op = filter.rational_resampler_base_fff(interpolation, 1, taps)
dst = blocks.vector_sink_f()
tb.connect(src, op)
tb.connect(op, dst)
tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def test_002_interp(self):
taps = random_floats(31)
src_data = random_floats(10000)
interpolation = 3
expected_result = reference_interp_filter(src_data, interpolation, taps)
tb = gr.top_block()
src = blocks.vector_source_f(src_data)
op = filter.rational_resampler_base_fff(interpolation, 1, taps)
dst = blocks.vector_sink_f()
tb.connect(src, op)
tb.connect(op, dst)
tb.run()
result_data = dst.data()
N = 1000
offset = len(taps)-1
self.assertEqual(expected_result[offset:offset+N], result_data[0:N])
def xtest_003_interp(self):
taps = random_floats(9)
src_data = random_floats(10000)
decimation = 3
expected_result = reference_dec_filter(src_data, decimation, taps)
tb = gr.top_block()
src = blocks.vector_source_f(src_data)
op = filter.rational_resampler_base_fff(1, decimation, taps)
dst = blocks.vector_sink_f()
tb.connect(src, op)
tb.connect(op, dst)
tb.run()
result_data = dst.data()
N = 10
offset = 10#len(taps)-1
print(expected_result[100+offset:100+offset+N])
print(result_data[100:100+N])
#self.assertEqual(expected_result[offset:offset+N], result_data[0:N])
# FIXME disabled. Triggers hang on SuSE 10.0
def xtest_004_decim_random_vals(self):
MAX_TAPS = 9
MAX_DECIM = 7
OUTPUT_LEN = 9
random.seed(0) # we want reproducibility
for ntaps in range(1, MAX_TAPS + 1):
for decim in range(1, MAX_DECIM+1):
for ilen in range(ntaps + decim, ntaps + OUTPUT_LEN*decim):
src_data = random_floats(ilen)
taps = random_floats(ntaps)
expected_result = reference_dec_filter(src_data, decim, taps)
tb = gr.top_block()
src = blocks.vector_source_f(src_data)
op = filter.rational_resampler_base_fff(1, decim, taps)
dst = blocks.vector_sink_f()
tb.connect(src, op, dst)
tb.run()
tb = None
result_data = dst.data()
L1 = len(result_data)
L2 = len(expected_result)
L = min(L1, L2)
if False:
sys.stderr.write('delta = %2d: ntaps = %d decim = %d ilen
|
= %d\n' % (L2 - L1, ntaps, dec
|
im, ilen))
sys.stderr.write(' len(result_data) = %d len(expected_result) = %d\n' %
(len(result_data), len(expected_result)))
self.assertEqual(expected_result[0:L], result_data[0:L])
# FIXME disabled. Triggers hang on SuSE 10.0
def xtest_005_interp_random_vals(self):
MAX_TAPS = 9
MAX_INTERP = 7
INPUT_LEN = 9
random.seed(0) # we want reproducibility
for ntaps in range(1, MAX_TAPS + 1):
for interp in range(1, MAX_INTERP+1):
for ilen in range(ntaps, ntaps + INPUT_LEN):
src_data = random_floats(ilen)
taps = random_floats(ntaps)
expected_result = reference_interp_filter(src_data, interp, taps)
tb = gr.top_block()
src = blocks.vector_source_f(src_data)
op = filter.rational_resampler_base_fff(interp, 1, taps)
dst = blocks.vector_sink_f()
tb.connect(src, op, dst)
tb.run()
tb = None
result_data = dst.data()
L1 = len(result_data)
L2 = len(expected_result)
L = min(L1, L2)
#if True or abs(L1-L2) > 1:
if False:
sys.stderr.write('delta = %2d: ntaps = %d interp = %d ilen = %d\n' % (L2 - L1, ntaps, interp, ilen))
#sys.stderr.write(' len(result_data) = %d len(expected_result) = %d\n' %
# (len(result_data), len(expected_result)))
#self.assertEqual(expected_result[0:L], result_data[0:L])
# FIXME check first ntaps+1 answers
self.assertEqual(expected_result[ntaps+1:L], result_data[ntaps+1:L])
def test_006_interp_decim(self):
taps = random_floats(31)
|
Crach1015/plugin.video.superpack
|
zip/plugin.video.SportsDevil/lib/parser.py
|
Python
|
gpl-2.0
| 26,568 | 0.00478 |
# -*- coding: utf-8 -*-
import common
import sys, os, traceback
import time
import random
import re
import urllib
import string
from string import lower
from entities.CList import CList
from entities.CItemInfo import CItemInfo
from entities.CListItem import CListItem
from entities.CRuleItem import CRuleItem
import customReplacements as cr
import customConversions as cc
from utils import decryptionUtils as crypt
from utils import datetimeUtils as dt
from utils import rowbalance as rb
from utils.fileUtils import findInSubdirectory, getFileContent, getFileExtension
from utils.scrapingUtils import findVideoFrameLink, findContentRefreshLink, findRTMP, findJS, findPHP, getHostName, findEmbedPHPLink
from common import getHTML
class ParsingResult(object):
class Code:
SUCCESS = 0
CFGFILE_NOT_FOUND = 1
CFGSYNTAX_INVALID = 2
WEBREQUEST_FAILED = 3
def __init__(self, code, itemsList):
self.code = code
self.list = itemsList
self.message = None
class Parser(object):
"""
returns a list of items
"""
def parse(self, lItem):
url = lItem['url']
cfg = lItem['cfg']
ext = getFileExtension(url)
successfullyScraped = True
tmpList = None
if lItem['catcher']:
catcher = lItem['catcher']
cfg = os.path.join(common.Paths.catchersDir, '__' + catcher + '.cfg')
tmpList = self.__loadLocal(cfg, lItem)
if tmpList and len(tmpList.rules) > 0:
successfullyScraped = self.__loadRemote(tmpList, lItem)
else:
if ext == 'cfg':
tmpList = self.__loadLocal(url, lItem)
if tmpList and tmpList.start != '' and len(tmpList.rules) > 0:
lItem['url'] = tmpList.start
successfullyScraped = self.__loadRemote(tmpList, lItem)
elif cfg:
tmpList = self.__loadLocal(cfg, lItem)
if tmpList and len(tmpList.rules) > 0:
successfullyScraped = self.__loadRemote(tmpList, lItem)
# autoselect
if tmpList and tmpList.skill.find('autoselect') != -1 and len(tmpList.items) == 1:
m = tmpList.items[0]
m_type = m['type']
if m_type == 'rss':
common.log('Autoselect - ' + m['title'])
lItem = m
tmpList = self.parse(lItem).list
if not tmpList:
return ParsingResult(ParsingResult.Code.CFGSYNTAX_INVALID, None)
if tmpList and successfullyScraped == False:
return ParsingResult(ParsingResult.Code.WEBREQUEST_FAILED, tmpList)
# Remove duplicates
if tmpList.skill.find('allowDuplicates') == -1:
urls = []
for i in range(len(tmpList.items)-1,-1,-1):
item = tmpList.items[i]
tmpUrl = item['url']
tmpCfg = item['cfg']
if not tmpCfg:
tmpCfg = ''
if not urls.__contains__(tmpUrl + '|' + tmpCfg):
urls.append(tmpUrl + '|' + tmpCfg)
else:
tmpList.items.remove(item)
return ParsingResult(ParsingResult.Code.SUCCESS, tmpList)
"""
loads cfg, creates list and sets up rules for scraping
"""
def __loadLocal(self, filename, lItem = None):
params = []
#get Parameters
if filename.find('@') != -1:
params = filename.split('@')
filename = params.pop(0)
# get cfg file
cfg = filename
if not os.path.exists(cfg):
cfg = os.path.join(common.Paths.modulesDir, filename)
if not os.path.exists(cfg):
tmpPath = os.path.dirname(os.path.join(common.Paths.modulesDir, lItem["definedIn"]))
cfg = os.path.join(tmpPath ,filename)
if not os.path.exists(cfg):
srchFilename = filename
if filename.find('/') > -1:
srchFilename = srchFilename.split('/')[1]
try:
cfg = findInSubdirectory(srchFilename, common.Paths.modulesDir)
except:
try:
cfg = findInSubdirectory(srchFilename, common.Paths.favouritesFolder)
except:
try:
cfg = findInSubdirectory(srchFilename, common.Paths.customModulesDir)
except:
common.log('File not found: ' + srchFilename)
return None
#load file and apply parameters
data = getFileContent(cfg)
data = cr.CustomReplacements().replace(os.path.dirname(cfg), data, lItem, params)
#log
msg = 'Local file ' + filename + ' opened'
if len(params) > 0:
msg += ' with Parameter(s): '
msg += ",".join(params)
common.log(msg)
outputList = self.__parseCfg(filename, data, lItem)
return outputList
"""
scrape items according to rules and add them to the list
"""
def __loadRemote(self, inputList, lItem):
try:
inputList.curr_url = lItem['url']
count = 0
i = 1
maxits = 2 # 1 optimistic + 1 demystified
ignoreCache = False
|
demystify = False
back = ''
startUrl
|
= inputList.curr_url
#print inputList, lItem
while count == 0 and i <= maxits:
if i > 1:
ignoreCache = True
demystify = True
# Trivial: url is from known streamer
if back:
lItem['referer'] = back
items = self.__parseHtml(inputList.curr_url, '"' + inputList.curr_url + '"', inputList.rules, inputList.skill, inputList.cfg, lItem)
count = len(items)
# try to find items in html source code
if count == 0:
referer = ''
if lItem['referer']:
referer = lItem['referer']
data = common.getHTML(inputList.curr_url, None, referer, False, False, ignoreCache, demystify)
if data == '':
return False
msg = 'Remote URL ' + inputList.curr_url + ' opened'
if demystify:
msg += ' (demystified)'
common.log(msg)
if inputList.section != '':
section = inputList.section
data = self.__getSection(data, section)
if lItem['section']:
section = lItem['section']
data = self.__getSection(data, section)
items = self.__parseHtml(inputList.curr_url, data, inputList.rules, inputList.skill, inputList.cfg, lItem)
count = len(items)
common.log(' -> ' + str(count) + ' item(s) found')
# find rtmp stream
#common.log('Find rtmp stream')
if count == 0:
item = self.__findRTMP(data, startUrl, lItem)
if item:
items = []
items.append(item)
count = 1
# find embedding javascripts
#common.log('Find embedding javascripts')
if count == 0:
item = findJS(data)
if item:
firstJS = item[0]
streamId = firstJS[0]
jsUrl = firstJS[1]
if not jsUrl.startswith('http://'):
jsUrl = urllib.basejoin(startUrl,jsUrl)
streamerName = getHostName(jsUrl)
|
dwadler/QGIS
|
python/plugins/processing/algs/grass7/ext/r_mapcalc.py
|
Python
|
gpl-2.0
| 2,550 | 0.000786 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
r_mapcalc.py
------------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
def checkParameterValuesBeforeExecuting(alg, parameters, context):
""" Verify if we have the right parameters """
if (alg.parameterAsString(parameters, 'expression', context)
and alg.parameterAsString(parameters, 'file', context)):
return False, alg.tr("You need to set either inline expression or a rules file!")
return True, None
def processInputs(alg, parameters, context, feedback):
# We will use the same raster names than in QGIS to name the rasters in GRASS
rasters = alg.parameterAsLayerList(parameters, 'maps', context)
for idx, raster in enumerate(rasters):
rasterName = os.path.splitext(
os.path.basename(raster.source()))[0]
alg.inputLayers.append(raster)
alg.setSessionProjectionFromLayer(raster)
command = 'r.in.gdal input="{0}" output="{1}" --overwrite -o'.format(
os.path.normpath(raster.source()),
rasterName)
alg.commands.append(command)
alg.removeParameter('maps')
alg.postInputs()
def processCommand(alg, parameters, context, feedback):
alg.processCommand(parameters, context, feedback, True)
def processOutputs(alg, parameters, context,
|
feedback):
# We need to export every raster from the GRASSDB
alg.exportRasterLayersIntoDirectory('output_dir',
|
parameters, context,
wholeDB=True)
|
ForgottenBeast/spider_nest
|
database/utils.py
|
Python
|
gpl-3.0
| 2,171 | 0.015661 |
# -*-coding:UTF-8 -*
import sqlite3 as sql
import json
from time import time, strftime
import settings
import logging as lgn
lgn.basicConfig(filename = '/db/db.log',level=lgn.DEBUG,\
format='%(asctime)s %(message)s')
def logit(string,*level):
if(len(level) == 0):
lgn.info(string)
else:
if(level[0] == 10):
lgn.debug(string)
elif(level[0] == 20):
lgn.info(string)
elif(level[0] == 30):
|
lgn.warning(string)
elif(level[0] == 40):
lgn.error(string)
else:
lgn.critical(string)
def clean_listings():
conn,c = get_conn()
#first the easy thing: let's delete all the listings that are not
#linked to any search anymore
c.execute('''delete from listings where id not in
(select li
|
stingid from search_listings)''')
conn.commit()
#now let's delete all the listings older than max age
max_age = time() - settings.n_days_before_del * 86400
c.execute('''delete from listings where date_added <= ?''',(max_age,))
conn.commit()
def get_conn():
conn = sql.connect("/db/spider_nest.db")
c = conn.cursor()
c.execute('''PRAGMA foreign_keys = ON''')
return conn,c
def add_error_listings(site,conn,c):
siteid = c.execute('select id from websites where url = ?',(site,)).fetchone()
searches = c.execute('select id from searches where websiteid = ?',siteid).fetchall()
try:
c.execute('''insert into listings(websiteid,desc,webid,img,url,date_added) values(
?,'NO PLUGIN AVAILABLE','NA','NA','NA',?)''',(siteid[0],time()))
except sql.IntegrityError:
logit('tried to add a no plugin available listing for {} twice. Normal behaviour.'.format(site))
#nothing fancy here, we just tried to add a no plugin available listing
#twice, since it's already there our work here is done
return
listingid = c.execute('select id from listings where websiteid = ?',siteid).fetchone()
for s in searches:
c.execute('''insert into search_listings(searchid,listingid)
values(?,?)''',(s[0],listingid[0]))
conn.commit()
|
urllib3/urllib3
|
src/urllib3/util/url.py
|
Python
|
mit
| 14,809 | 0.000743 |
import re
from typing import Container, NamedTuple, Optional, overload
from ..exceptions import LocationParseError
from .util import to_str
# We only want to normalize urls with an HTTP(S) scheme.
# urllib3 infers URLs without a scheme (None) to be http.
_NORMALIZABLE_SCHEMES = ("http", "https", None)
# Almost all of these patterns were derived from the
# 'rfc3986' module: https://github.com/python-hyper/rfc3986
_PERCENT_RE = re.compile(r"%[a-fA-F0-9]{2}")
_SCHEME_RE = re.compile(r"^(?:[a-zA-Z][a-zA-Z0-9+-]*:|/)")
_URI_RE = re.compile(
r"^(?:([a-zA-Z][a-zA-Z0-9+.-]*):)?"
r"(?://([^\\/?#]*))?"
r"([^?#]*)"
r"(?:\?([^#]*))?"
r"(?:#(.*))?$",
re.UNICODE | re.DOTALL,
)
_IPV4_PAT = r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}"
_HEX_PAT = "[0-9A-Fa-f]{1,4}"
_LS32_PAT = "(?:{hex}:{hex}|{ipv4})".format(hex=_HEX_PAT, ipv4=_IPV4_PAT)
_subs = {"hex": _HEX_PAT, "ls32": _LS32_PAT}
_variations = [
# 6( h16 ":" ) ls32
"(?:%(hex)s:){6}%(ls32)s",
# "::" 5( h16 ":" ) ls32
"::(?:%(hex)s:){5}%(ls32)s",
# [ h16 ] "::" 4( h16 ":" ) ls32
"(?:%(hex)s)?::(?:%(hex)s:){4}%(ls32)s",
# [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32
"(?:(?:%(hex)s:)?%(hex)s)?::(?:%(hex)s:){3}%(ls32)s",
# [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32
"(?:(?:%(hex)s:){0,2}%(hex)s)?::(?:%(hex)s:){2}%(ls32)s",
# [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32
"(?:(?:%(hex)s:){0,3}%(hex)s)?::%(hex)s:%(ls32)s",
# [ *4( h16 ":" ) h16 ] "::" ls32
"(?:(?:%(hex)s:){0,4}%(hex)s)?::%(ls32)s",
# [ *5( h16 ":" ) h16 ] "::" h16
"(?:(?:%(hex)s:){0,5}%(hex)s)?::%(hex)s",
# [ *6( h16 ":" ) h16 ] "::"
"(?:(?:%(hex)s:){0,6}%(hex)s)?::",
]
_UNRESERVED_PAT = (
r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._!\-~"
)
_IPV6_PAT = "(?:" + "|".join([x % _subs for x in _variations]) + ")"
_ZONE_ID_PAT = "(?:%25|%)(?:[" + _UNRESERVED_PAT + "]|%[a-fA-F0-9]{2})+"
_IPV6_ADDRZ_PAT = r"\[" + _IPV6_PAT + r"(?:" + _ZONE_ID_PAT + r")?\]"
_REG_NAME_PAT = r"(?:[^\[\]%:/?#]|%[a-fA-F0-9]{2})*"
_TARGET_RE = re.compile(r"^(/[^?#]*)(?:\?([^#]*))?(?:#.*)?$")
_IPV4_RE = re.compile("^" + _IPV4_PAT + "$")
_IPV6_RE = re.compile("^" + _IPV6_PAT + "$")
_IPV6_ADDRZ_RE = re.compile("^" + _IPV6_ADDRZ_PAT + "$")
_BRACELESS_IPV6_ADDRZ_RE = re.compile("^" + _IPV6_ADDRZ_PAT[2:-2] + "$")
_ZONE_ID_RE = re.compile("(" + _ZONE_ID_PAT + r")\]$")
_HOST_PORT_PAT = ("^(%s|%s|%s)(?::([0-9]{0,5}))?$") % (
_REG_NAME_PAT,
_IPV4_PAT,
_IPV6_ADDRZ_PAT,
)
_HOST_PORT_RE = re.compile(_HOST_PORT_PAT, re.UNICODE | re.DOTALL)
_UNRESERVED_CHARS = set(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._-~"
)
_SUB_DELIM_CHARS = set("!$&'()*+,;=")
_USERINFO_CHARS = _UNRESERVED_CHARS | _SUB_DELIM_CHARS | {":"}
_PATH_CHARS = _USERINFO_CHARS | {"@", "/"}
_QUERY_CHARS = _FRAGMENT_CHARS = _PATH_CHARS | {"?"}
class Url(
NamedTuple(
"Url",
[
("scheme", Optional[str]),
("auth", Optional[str]),
("host", Optional[str]),
("port", Optional[int]),
("path", Optional[str]),
("query", Optional[str]),
("fragment", Optional[str]),
],
)
):
"""
Data structure for representing an HTTP URL. Used as a return value for
:func:`parse_url`. Both the scheme and host are normalized as they are
both case-insensitive according to RFC 3986.
"""
def __new__( # type: ignore[no-untyped-def]
cls,
scheme: Optional[str] = None,
auth: Optional[str] = None,
host: Optional[str] = None,
port: Optional[int] = None,
path: Optional[str] = None,
query: Optional[str] = None,
fragment: Optional[str] = None,
):
if path and not path.startswith("/"):
path = "/" + path
if scheme is not None:
scheme = scheme.lower()
return super().__new__(cls, scheme, auth, host, port, path, query, fragment)
@property
def hostname(self) -> Optional[str]:
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self) -> str:
"""Absolute path including the query string."""
uri = self.path or "/"
if self.query is not None:
uri += "?" + self.query
return uri
@property
def authority(self) -> Optional[str]:
"""
Authority component as defined in RFC 3986 3.2.
This includes userinfo (auth), host and port.
i.e.
userinfo@host:port
"""
userinfo = self.auth
netloc = self.netloc
if netloc is None or userinfo is None:
return netloc
else:
return f"{userinfo}@{netloc}"
@property
def netloc(self) -> Optional[str]:
"""
Network location including host and port.
If you need the equivalent of urllib.parse's ``netloc``,
use the ``authority`` p
|
roperty
|
instead.
"""
if self.host is None:
return None
if self.port:
return f"{self.host}:{self.port}"
return self.host
@property
def url(self) -> str:
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example:
.. code-block:: python
import urllib3
U = urllib3.util.parse_url("https://google.com/mail/")
print(U.url)
# "https://google.com/mail/"
print( urllib3.util.Url("https", "username:password",
"host.com", 80, "/path", "query", "fragment"
).url
)
# "https://username:password@host.com:80/path?query#fragment"
"""
scheme, auth, host, port, path, query, fragment = self
url = ""
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + "://"
if auth is not None:
url += auth + "@"
if host is not None:
url += host
if port is not None:
url += ":" + str(port)
if path is not None:
url += path
if query is not None:
url += "?" + query
if fragment is not None:
url += "#" + fragment
return url
def __str__(self) -> str:
return self.url
@overload
def _encode_invalid_chars(
component: str, allowed_chars: Container[str]
) -> str: # Abstract
...
@overload
def _encode_invalid_chars(
component: None, allowed_chars: Container[str]
) -> None: # Abstract
...
def _encode_invalid_chars(
component: Optional[str], allowed_chars: Container[str]
) -> Optional[str]:
"""Percent-encodes a URI component without reapplying
onto an already percent-encoded component.
"""
if component is None:
return component
component = to_str(component)
# Normalize existing percent-encoded bytes.
# Try to see if the component we're encoding is already percent-encoded
# so we can skip all '%' characters but still encode all others.
component, percent_encodings = _PERCENT_RE.subn(
lambda match: match.group(0).upper(), component
)
uri_bytes = component.encode("utf-8", "surrogatepass")
is_percent_encoded = percent_encodings == uri_bytes.count(b"%")
encoded_component = bytearray()
for i in range(0, len(uri_bytes)):
# Will return a single character bytestring
byte = uri_bytes[i : i + 1]
byte_ord = ord(byte)
if (is_percent_encoded and byte == b"%") or (
byte_ord < 128 and byte.decode() in allowed_chars
):
encoded_component += byte
continue
encoded
|
dwc/pi-monitoring
|
bin/push_temperature.py
|
Python
|
mit
| 1,946 | 0.018499 |
#!/usr/bin/env python
import argparse
import datetime
import os
import re
import requests
import subprocess
import sys
import time
import xively
DEBUG = os.environ["DEBUG"] or false
def read_temperature(from_file):
if DEBUG:
print "Reading temperature from file: %s" % from_file
temperature = None
with open(from_file, 'r') as f:
crc = f.readline()
reading = f.readline()
matches = re.search('t=(\d+)', reading)
if matches:
temperature = float(matches.group(1)) / 1000.0
return temperature
def get_datastream(feed, name):
try:
datastream = feed.datastreams.get(name)
if DEBUG:
print "Found existing datastream"
return datastream
except:
if DEBUG:
print "Creating new datastream"
datastream = feed.datastreams.create(name, tags="units=celsius")
return datastream
def run():
parser = argparse.ArgumentParser(description = 'Push a metric to Xively')
parser.add_argument('--feed', type=str, required=True, help='your Xively feed ID')
parser.add_argument('--key', type=str, required=True, help='your Xively API key')
parser.add_argument('--name', type=str, default='temperature0', help='your Xively datastream n
|
ame')
parser.add_argument('--file', type=str, required=True, help='the file from which to read the temperature')
args = parser.parse_args()
api = xively.XivelyAPIClient(args.key)
feed = api.feeds.get(args.feed)
datastream = get_datastream(feed, args.na
|
me)
datastream.max_value = None
datastream.min_value = None
while True:
temperature = read_temperature(args.file)
if DEBUG:
print "Updating Xively feed with value: %s" % temperature
datastream.current_value = temperature
datastream.at = datetime.datetime.utcnow()
try:
datastream.update()
except Exception as err:
sys.stderr.write('ERROR: %s\n' % str(err))
print "Updated Xively feed, sleeping..."
time.sleep(60)
run()
|
tcpcloud/openvstorage
|
ovs/extensions/os/__init__.py
|
Python
|
apache-2.0
| 644 | 0 |
# Copyright 2015 CloudFounders NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may
|
obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# d
|
istributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This package contains Linux OS distribution extensions
"""
|
Mytho/groceries-api
|
db/versions/20150505_154233_373a21295ab_db_migration.py
|
Python
|
mit
| 1,244 | 0.003215 |
"""db migration
Revision ID: 373a21295ab
Revises: 21f5b2d3905d
Create Date: 2015-05-05 15:42:33.474470
"""
# revision identifiers, used by Alembic.
revision = '373a21295ab'
down_revision = '21f5b2d3905d'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_table('items')
items = op.create_table('items',
|
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('name', sa.String(255), nullable=False),
sa.Column('is_bought', sa.Boolean, default=False, nullable=False),
sa.Column('created', sa.DateTime, default=sa.func.now(),
nullable=False),
sa.Column('modified', sa.DateTime, default=sa.func.now(),
onupdate=sa.func.now(),
|
nullable=False))
def downgrade():
op.drop_table('items')
op.create_table('items',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('name', sa.String(255), nullable=False),
sa.Column('is_bought', sa.Boolean, default=False, nullable=False),
sa.Column('modified', sa.DateTime, default=sa.func.now(),
onupdate=sa.func.now(), nullable=False),
sa.Column('created', sa.DateTime, default=sa.func.now()))
|
cryvate/project-euler
|
project_euler/solutions/problem_56.py
|
Python
|
mit
| 294 | 0.006803 |
from ..libra
|
ry.base import number_to_list
def solve(bound: int=100):
maximal = 0
for a in range(bound):
for b in range(bound):
sum_digits = sum(number_to_list(a ** b))
if sum_digits > maximal:
maximal = sum_digits
re
|
turn maximal
|
mozilla-services/FunkLoad
|
src/funkload/ReportRenderOrg.py
|
Python
|
gpl-2.0
| 6,127 | 0.003754 |
# (C) Copyright 2011 Nuxeo SAS <http://nuxeo.com>
# Author: bdelbosc@nuxeo.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
# 02111-1307, USA.
#
"""Classes that render statistics in emacs org-mode format.
"""
import re
from ReportRenderRst import RenderRst
from ReportRenderRst import BaseRst
import ReportRenderRst
from MonitorPlugins import MonitorPlugins
FL_SITE = "http://funkload.nuxeo.org"
def org_title(title, level=1, newpage=True):
"""Return an org section."""
org = []
if newpage:
org.append("")
org.append("")
org.append("#+BEGIN_LaTeX")
org.append("\\newpage")
org.append('#+END_LaTeX')
org.append('*' * (level - 1) + ' ' + title + '\n')
return '\n'.join(org)
def org_image
|
(self):
org = ["#+BEGIN_LaTeX"]
org.append('\\begin{center}')
for image_name in self.image_nam
|
es:
org.append("\includegraphics[scale=0.5]{{./%s}.png}" % image_name)
org.append('\\end{center}')
org.append('#+END_LaTeX')
return '\n'.join(org) + '\n'
def org_header(self, with_chart=False):
headers = self.headers[:]
if self.with_percentiles:
self._attach_percentiles_header(headers)
org = [self.render_image()]
org.append("#+BEGIN_LaTeX")
org.append("\\tiny")
org.append('#+END_LaTeX')
org.append(' |' + '|'.join(headers) + '|\n |-')
return '\n'.join(org)
def org_footer(self):
org = [' |-']
org.append("#+BEGIN_LaTeX")
org.append("\\normalsize")
org.append('#+END_LaTeX')
return '\n'.join(org)
ReportRenderRst.rst_title = org_title
ReportRenderRst.LI = '-'
BaseRst.render_header = org_header
BaseRst.render_footer = org_footer
BaseRst.render_image = org_image
BaseRst.sep = '|'
class RenderOrg(RenderRst):
"""Render stats in ReST format."""
# number of slowest requests to display
slowest_items = 5
with_chart = True
def __init__(self, config, stats, error, monitor, monitorconfig, options):
options.html = True
RenderRst.__init__(self, config, stats, error, monitor, monitorconfig, options)
def renderHeader(self):
config = self.config
self.append('# -*- mode: org -*-')
self.append('#+TITLE: FunkLoad bench report')
self.append('#+DATE: ' + self.date)
self.append('''#+STYLE: <link rel="stylesheet" type="text/css" href="eon.css" />
#+LaTeX_CLASS: koma-article
#+LaTeX_CLASS_OPTIONS: [a4paper,landscape]
#+LATEX_HEADER: \usepackage[utf8]{inputenc}
#+LATEX_HEADER: \usepackage[en]{babel}
#+LATEX_HEADER: \usepackage{fullpage}
#+LATEX_HEADER: \usepackage[hyperref,x11names]{xcolor}
#+LATEX_HEADER: \usepackage[colorlinks=true,urlcolor=SteelBlue4,linkcolor=Firebrick4]{hyperref}
#+LATEX_HEADER: \usepackage{graphicx}
#+LATEX_HEADER: \usepackage[T1]{fontenc}''')
description = [config['class_description']]
description += ["Bench result of ``%s.%s``: " % (config['class'],
config['method'])]
description += [config['description']]
self.append('#+TEXT: Bench result of =%s.%s=: %s' % (
config['class'], config['method'], ' '.join(description)))
self.append('#+OPTIONS: toc:1')
self.append('')
def renderMonitor(self, host, charts):
"""Render a monitored host."""
description = self.config.get(host, '')
self.append(org_title("%s: %s" % (host, description), 3))
for chart in charts:
self.append('#+BEGIN_LaTeX')
self.append('\\begin{center}')
self.append("\includegraphics[scale=0.5]{{./%s}.png}" % chart[1])
self.append('\\end{center}')
self.append('#+END_LaTeX')
def renderHook(self):
self.rst = [line.replace('``', '=') for line in self.rst]
lapdex = "Apdex_{%s}" % str(self.options.apdex_t)
kv = re.compile("^(\ *\- [^\:]*)\:(.*)")
bold = re.compile("\*\*([^\*]+)\*\*")
link = re.compile("\`([^\<]+)\<([^\>]+)\>\`\_")
ret = []
for line in self.rst:
line = re.sub(kv, lambda m: "%s :: %s\n\n" % (
m.group(1), m.group(2)), line)
line = re.sub(bold, lambda m: "*%s*" % (m.group(1)),
line)
line = re.sub(link, lambda m: "[[%s][%s]]" % (m.group(2),
m.group(1).strip()),
line)
line = line.replace('|APDEXT|', lapdex)
line = line.replace('Apdex*', lapdex)
line = line.replace('Apdex T', 'Apdex_{T}')
line = line.replace('FunkLoad_',
'[[%s][FunkLoad]]' % FL_SITE)
ret.append(line)
self.rst = ret
def createMonitorCharts(self):
"""Create all montirored server charts."""
if not self.monitor or not self.with_chart:
return
self.append(org_title("Monitored hosts", 2))
charts = {}
for host in self.monitor.keys():
charts[host] = self.createMonitorChart(host)
return charts
def createMonitorChart(self, host):
"""Create monitrored server charts."""
charts = []
Plugins = MonitorPlugins()
Plugins.registerPlugins()
Plugins.configure(self.getMonitorConfig(host))
for plugin in Plugins.MONITORS.values():
image_path = ('%s_%s' % (host, plugin.name)).replace("\\", "/")
charts.append((plugin.name, image_path))
return charts
|
mariecpereira/IA369Z
|
deliver/ia870/iainfgen.py
|
Python
|
mit
| 261 | 0.019157 |
# -*- encoding: utf-8 -*-
# Module iainfgen
from numpy import *
def iainfgen(f, Iab):
from iaunion import iaunion
from iadil import iadil
from ianeg import ianeg
A, Bc = I
|
ab
y = iaunio
|
n( iadil(f, A), iadil( ianeg(f), Bc))
return y
|
unor/schemaorg
|
sdordf2csv.py
|
Python
|
apache-2.0
| 11,152 | 0.017755 |
import csv
import rdflib
from rdflib.namespace import RDFS, RDF, OWL
from rdflib.term import URIRef
import threading
from apimarkdown import Markdown
from apirdflib import RDFLIBLOCK
import logging
logging.basicConfig(level=logging.INFO) # dev_appserver.py --log_level debug .
log = logging.getLogger(__name__)
class sdordf2csv():
def __init__(self, queryGraph=None, fullGraph=None, markdownComments=True,excludeAttic=False):
self.setqueryGraph(queryGraph)
self.setfullGraph(fullGraph)
self.setexcludeAttic(excludeAttic)
self.setmarkdownComments(markdownComments)
def setqueryGraph(self,graph=None):
self.queryGraph = graph
def setfullGraph(self,graph=None):
self.fullGraph = graph
def setexcludeAttic(self,state):
self.excludeAttic = state
def setmarkdownComments(self,state):
self.markdown = state
def doQuery(self,graph=None,query=None):
res = None
try:
RDFLIBLOCK.acquire()
res = list(graph.query(query))
finally:
RDFLIBLOCK.release()
return res
def outputCSVtypes(self,file):
atticfilter = ""
if self.excludeAttic:
atticfilter = "FILTER NOT EXISTS {?term schema:isPartOf <http://attic.schema.org>}."
query= ('''select ?term where {
?term a ?type.
BIND(STR(?term) AS ?strVal)
FILTER NOT EXISTS {?term a rdf:Property}.
FILTER(STRLEN(?strVal) >= 18 && SUBSTR(?strVal, 1, 18) = "http://schema.org/").
%s
}
ORDER BY ?term
''') % atticfilter
try:
RDFLIBLOCK.acquire()
types = list(self.queryGraph.query(query))
finally:
RDFLIBLOCK.release()
#log.info( "Types: %s" % len(types))
self.type2CSV(header=True,out=file)
for t in types:
self.type2CSV(term=t.term,header=False,out=file,graph=self.queryGraph)
def outputCSVproperties(self,file):
atticfilter = ""
if self.excludeAttic:
atticfilter = "FILTER NOT EXISTS {?term schema:isPartOf <http://attic.schema.org>}."
query= ('''select ?term where {
?term a rdf:Property.
FILTER EXISTS {?term rdfs:label ?l}.
BIND(STR(?term) AS ?strVal).
FILTER(STRLEN(?strVal) >= 18 && SUBSTR(?strVal, 1, 18) = "http://schema.org/").
%s
}
ORDER BY ?term''') % atticfilter
props = list(self.queryGraph.query(query))
self.prop2CSV(header=True,out=file)
for t in props:
self.prop2CSV(term=t.term,header=False,out=file,graph=self.queryGraph)
def prop2CSV(self,term=None,header=True,out=None,graph=None):
cols = ["id","label","comment","subPropertyOf","equivalentProperty","subproperties","domainIncludes","rangeIncludes","inverseOf","supersedes","supersededBy","isPartOf"]
if not out:
return
writer = csv.writer(out,quoting=csv.QUOTE_ALL,lineterminator='\n')
if header:
writer.writerow(cols)
return
if not graph:
graph = self.queryGraph
if term == None or graph == None:
return
row = [str(term)]
row.append(self.graphValueToCSV(subject=term,predicate=RDFS.label,graph=graph))
row.append(self.getCSVComment(term,graph=self.fullGraph))
row.append(self.getCSVSuperProperties(term,graph=self.fullGraph))
row.append(self.graphValueToCSV(subject=term,predicate=OWL.equivalentProperty,graph=graph))
row.append(self.getCSVSubProperties(term,graph=self.fullGraph))
row.append(self.getCSVDomainIncludes(term,graph=self.fullGraph))
row.append(self.getCSVRangeIncludes(term,graph=self.fullGraph))
row.append(self.graphValueToCSV(subject=term,predicate=URIRef("http://schema.org/inverseOf"),graph=graph))
row.append(self.getCSVsuperseds(term,graph=self.fullGraph))
row.append(self.getCSVSupersededBy(term,graph=self.fullGraph))
row=[s.encode('utf-8') for s in row]
writer.writerow(row)
#print term
def type2CSV(self,term=None,header=True,out=None,graph=None):
cols = ["id","label","comment","subTypeOf","enumerationtype","equivalentClass","properties","subTypes","supersedes","supersededBy","isPartOf"]
if not out:
return
writer = csv.writer(out,quoting=csv.QUOTE_ALL,lineterminator='\n')
if header:
writer.writerow(cols)
return
if not graph:
graph = self.queryGraph
if term ==
|
None or graph == None:
return
if not isinstance(term, URIRef):
term = URIRef(term)
enumType = self.graphValueToCSV(subject=term,predicate=RDF.type,graph=graph)
if enumType.endswith("#Class"):
enumType = ""
row = [str(term)]
row.append(self.graphValueToCSV(subject=term,predicate=RDFS.label,graph=graph))
row.append(self.getCSVComment(term,graph=self.fullGraph))
row.appe
|
nd(self.getCSVSupertypes(term,graph=self.fullGraph))
row.append(enumType)
row.append(self.graphValueToCSV(subject=term,predicate=OWL.equivalentClass,graph=graph))
row.append(self.getCSVTypeProperties(term,graph=self.fullGraph))
row.append(self.getCSVSubtypes(term,graph=self.fullGraph))
row.append(self.getCSVsuperseds(term,graph=self.fullGraph))
row.append(self.getCSVSupersededBy(term,graph=self.fullGraph))
row.append(self.graphValueToCSV(subject=term,predicate=URIRef("http://schema.org/isPartOf"),graph=graph))
row=[s.encode('utf-8') for s in row]
writer.writerow(row)
def graphValueToCSV(self, subject=None, predicate= None, object= None, graph=None):
ret = ""
try:
RDFLIBLOCK.acquire()
ret = str(graph.value(subject=subject,predicate=predicate,object=object))
finally:
RDFLIBLOCK.release()
if ret == None or ret == "None":
ret = ""
return ret
def getCSVSupertypes(self,term=None,graph=None):
query='''select ?sup where{
<%s> rdfs:subClassOf ?sup.
BIND(STR(?sup) AS ?strVal)
FILTER(STRLEN(?strVal) >= 18 && SUBSTR(?strVal, 1, 18) = "http://schema.org/")
}
ORDER BY ?sup''' % term
res = self.doQuery(graph,query)
ret = ', '.join([x.sup for x in res])
return ret
def getCSVTypeProperties(self,term=None,graph=None):
atticfilter = ""
if self.excludeAttic:
atticfilter = "FILTER NOT EXISTS {?prop schema:isPartOf <http://attic.schema.org>.}"
query='''select DISTINCT ?prop where{
?term (^rdfs:subClassOf*) <%s>.
?prop <http://schema.org/domainIncludes> ?term.
%s
}
ORDER BY ?prop''' % (term,atticfilter)
res = self.doQuery(graph,query)
ret = ', '.join([x.prop for x in res])
return ret
def getCSVSubtypes(self,term=None,graph=None):
atticfilter = ""
if self.excludeAttic:
atticfilter = "FILTER NOT EXISTS {?sub schema:isPartOf <http://attic.schema.org>.}"
query='''select ?sub where{
?sub rdfs:subClassOf <%s>.
%s
}
ORDER BY ?sub''' % (term,atticfilter)
res = self.doQuery(graph,query)
ret = ', '.join([x.sub for x in res])
#print "SUBTYPES of %s: '%s'" % (term,ret)
return ret
def getCSVSupersededBy(self,term=None,graph=None):
atticfilter = ""
if self.excludeAttic:
atticfilter = "FILTER NOT EXISTS {?sub schema:isPartOf <http://attic.schema.org>.}"
query='''select ?sup where{
<%s> schema:supersededBy ?sup.
%s
}
ORDER BY ?sup''' % (term,atticfilter)
res = self.doQuery(graph,query)
ret = ', '.join([x.sup for x in res])
#print "%s supercededBy: '%s'" % (term,ret)
return ret
|
dtaht/ns-3-dev-old
|
src/network/bindings/modulegen__gcc_LP64.py
|
Python
|
gpl-2.0
| 507,731 | 0.015106 |
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.network', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## packetbb.h (module 'network'): ns3::PbbAddressLength [enumeration]
module.add_enum('PbbAddressLength', ['IPV4', 'IPV6'])
## ethernet-header.h (module 'network'): ns3::ethernet_header_t [enumeration]
module.add_enum('ethernet_header_t', ['LENGTH', 'VLAN', 'QINQ'])
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'])
## application-container.h (module 'network'): ns3::ApplicationContainer [class]
module.add_class('ApplicationContainer')
## trace-helper.h (module 'network'): ns3::AsciiTraceHelper [class]
module.add_class('AsciiTraceHelper')
## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice [class]
module.add_class('AsciiTraceHelperForDevice', allow_subclassing=True)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## channel-list.h (module 'network'): ns3::ChannelList [class]
module.add_class('ChannelList')
## data-rate.h (module 'network'): ns3::DataRate [class]
module.add_class('DataRate')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress [class]
module.add_class('Inet6SocketAddress')
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress [class]
root_module['ns3::Inet6SocketAddress'].implicitly_converts_to(root_module['ns3::Address'])
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress [class]
module.add_class('InetSocketAddress')
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress [class]
root_module['ns3::InetSocketAddress'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4
|
Address [class]
module.add_class('Ipv4Address')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_cl
|
ass('Ipv6Address')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
module.add_class('Mac48Address')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
## mac64-address.h (module 'network'): ns3::Mac64Address [class]
module.add_class('Mac64Address')
## mac64-address.h (module 'network'): ns3::Mac64Address [class]
root_module['ns3::Mac64Address'].implicitly_converts_to(root_module['ns3::Address'])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer [class]
module.add_class('NetDeviceContainer')
## node-container.h (module 'network'): ns3::NodeContainer [class]
module.add_class('NodeContainer')
## node-list.h (module 'network'): ns3::NodeList [class]
module.add_class('NodeList')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration]
module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', outer_class=root_module['ns3::PacketMetadata'])
## packet-socket-address.h (module 'network'): ns3::PacketSocketAddress [class]
module.add_class('PacketSocketAddress')
## packet-socket-address.h (module 'network'): ns3::PacketSocketAddress [class]
root_module['ns3::PacketSocketAddress'].implicitly_converts_to(root_module['ns3::Address'])
## packet-socket-helper.h (module 'network'): ns3::PacketSocketHelper [class]
module.add_class('PacketSocketHelper')
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', outer_class=root_module['ns3::PacketTagList'])
## packetbb.h (module 'network'): ns3::PbbAddressTlvBlock [class]
module.add_class('PbbAddressTlvBlock')
## packetbb.h (module 'network'): ns3::PbbTlvBlock [class]
module.add_class('PbbTlvBlock')
## pcap-file.h (module 'network'): ns3::PcapFile [class]
module.add_class('PcapFile')
## trace-helper.h (module 'network'): ns3::PcapHelper [class]
module.add_class('PcapHelper')
## trace-helper.h (module 'network'): ns3::PcapHelper [enumeration]
module.add_enum('', ['DLT_NULL', 'DLT_EN10MB', 'DLT_PPP', 'DLT_RAW', 'DLT_IEEE802_11', 'DLT_PRISM_HEADER', 'DLT_IEEE802_11_RADIO'], outer_class=root_module['ns3::PcapHelper'])
## trace-helper.h (module 'network'): ns3::PcapHelperForDevice [class]
modu
|
Bysmyyr/chromium-crosswalk
|
tools/telemetry/telemetry/testing/system_stub.py
|
Python
|
bsd-3-clause
| 14,620 | 0.011833 |
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides stubs for os, sys and subprocess for testing
This test allows one to test code that itself uses os, sys, and subprocess.
"""
import ntpath
import os
import posixpath
import re
import shlex
import sys
class Override(object):
def __init__(self, base_module, module_list):
stubs = {'cloud_storage': CloudStorageModuleStub,
|
'open':
|
OpenFunctionStub,
'os': OsModuleStub,
'perf_control': PerfControlModuleStub,
'raw_input': RawInputFunctionStub,
'subprocess': SubprocessModuleStub,
'sys': SysModuleStub,
'thermal_throttle': ThermalThrottleModuleStub,
'logging': LoggingStub,
'certutils': CertUtilsStub,
'adb_install_cert': AdbInstallCertStub,
'platformsettings': PlatformSettingsStub,
}
self.adb_commands = None
self.os = None
self.subprocess = None
self.sys = None
self._base_module = base_module
self._overrides = {}
for module_name in module_list:
self._overrides[module_name] = getattr(base_module, module_name, None)
setattr(self, module_name, stubs[module_name]())
setattr(base_module, module_name, getattr(self, module_name))
if self.os and self.sys:
self.os.path.sys = self.sys
def __del__(self):
assert not len(self._overrides)
def Restore(self):
for module_name, original_module in self._overrides.iteritems():
if original_module is None:
# This will happen when we override built-in functions, like open.
# If we don't delete the attribute, we will shadow the built-in
# function with an attribute set to None.
delattr(self._base_module, module_name)
else:
setattr(self._base_module, module_name, original_module)
self._overrides = {}
class AdbDevice(object):
def __init__(self):
self.has_root = False
self.needs_su = False
self.shell_command_handlers = {}
self.mock_content = []
self.system_properties = {}
if self.system_properties.get('ro.product.cpu.abi') == None:
self.system_properties['ro.product.cpu.abi'] = 'armeabi-v7a'
def HasRoot(self):
return self.has_root
def NeedsSU(self):
return self.needs_su
def RunShellCommand(self, args, **_kwargs):
if isinstance(args, basestring):
args = shlex.split(args)
handler = self.shell_command_handlers[args[0]]
return handler(args)
def FileExists(self, _):
return False
def ReadFile(self, device_path, as_root=False): # pylint: disable=W0613
return self.mock_content
def GetProp(self, property_name):
return self.system_properties[property_name]
def SetProp(self, property_name, property_value):
self.system_properties[property_name] = property_value
class CloudStorageModuleStub(object):
PUBLIC_BUCKET = 'chromium-telemetry'
PARTNER_BUCKET = 'chrome-partner-telemetry'
INTERNAL_BUCKET = 'chrome-telemetry'
BUCKET_ALIASES = {
'public': PUBLIC_BUCKET,
'partner': PARTNER_BUCKET,
'internal': INTERNAL_BUCKET,
}
# These are used to test for CloudStorage errors.
INTERNAL_PERMISSION = 2
PARTNER_PERMISSION = 1
PUBLIC_PERMISSION = 0
# Not logged in.
CREDENTIALS_ERROR_PERMISSION = -1
class NotFoundError(Exception):
pass
class CloudStorageError(Exception):
pass
class PermissionError(CloudStorageError):
pass
class CredentialsError(CloudStorageError):
pass
def __init__(self):
self.default_remote_paths = {CloudStorageModuleStub.INTERNAL_BUCKET:{},
CloudStorageModuleStub.PARTNER_BUCKET:{},
CloudStorageModuleStub.PUBLIC_BUCKET:{}}
self.remote_paths = self.default_remote_paths
self.local_file_hashes = {}
self.local_hash_files = {}
self.permission_level = CloudStorageModuleStub.INTERNAL_PERMISSION
self.downloaded_files = []
def SetPermissionLevelForTesting(self, permission_level):
self.permission_level = permission_level
def CheckPermissionLevelForBucket(self, bucket):
if bucket == CloudStorageModuleStub.PUBLIC_BUCKET:
return
elif (self.permission_level ==
CloudStorageModuleStub.CREDENTIALS_ERROR_PERMISSION):
raise CloudStorageModuleStub.CredentialsError()
elif bucket == CloudStorageModuleStub.PARTNER_BUCKET:
if self.permission_level < CloudStorageModuleStub.PARTNER_PERMISSION:
raise CloudStorageModuleStub.PermissionError()
elif bucket == CloudStorageModuleStub.INTERNAL_BUCKET:
if self.permission_level < CloudStorageModuleStub.INTERNAL_PERMISSION:
raise CloudStorageModuleStub.PermissionError()
elif bucket not in self.remote_paths:
raise CloudStorageModuleStub.NotFoundError()
def SetRemotePathsForTesting(self, remote_path_dict=None):
if not remote_path_dict:
self.remote_paths = self.default_remote_paths
return
self.remote_paths = remote_path_dict
def GetRemotePathsForTesting(self):
if not self.remote_paths:
self.remote_paths = self.default_remote_paths
return self.remote_paths
# Set a dictionary of data files and their "calculated" hashes.
def SetCalculatedHashesForTesting(self, calculated_hash_dictionary):
self.local_file_hashes = calculated_hash_dictionary
def GetLocalDataFiles(self):
return self.local_file_hashes.keys()
# Set a dictionary of hash files and the hashes they should contain.
def SetHashFileContentsForTesting(self, hash_file_dictionary):
self.local_hash_files = hash_file_dictionary
def GetLocalHashFiles(self):
return self.local_hash_files.keys()
def ChangeRemoteHashForTesting(self, bucket, remote_path, new_hash):
self.remote_paths[bucket][remote_path] = new_hash
def List(self, bucket):
if not bucket or not bucket in self.remote_paths:
bucket_error = ('Incorrect bucket specified, correct buckets:' +
str(self.remote_paths))
raise CloudStorageModuleStub.CloudStorageError(bucket_error)
CloudStorageModuleStub.CheckPermissionLevelForBucket(self, bucket)
return list(self.remote_paths[bucket].keys())
def Exists(self, bucket, remote_path):
CloudStorageModuleStub.CheckPermissionLevelForBucket(self, bucket)
return remote_path in self.remote_paths[bucket]
def Insert(self, bucket, remote_path, local_path):
CloudStorageModuleStub.CheckPermissionLevelForBucket(self, bucket)
if not local_path in self.GetLocalDataFiles():
file_path_error = 'Local file path does not exist'
raise CloudStorageModuleStub.CloudStorageError(file_path_error)
self.remote_paths[bucket][remote_path] = (
CloudStorageModuleStub.CalculateHash(self, local_path))
return remote_path
def GetHelper(self, bucket, remote_path, local_path, only_if_changed):
CloudStorageModuleStub.CheckPermissionLevelForBucket(self, bucket)
if not remote_path in self.remote_paths[bucket]:
if only_if_changed:
return False
raise CloudStorageModuleStub.NotFoundError('Remote file does not exist.')
remote_hash = self.remote_paths[bucket][remote_path]
local_hash = self.local_file_hashes[local_path]
if only_if_changed and remote_hash == local_hash:
return False
self.downloaded_files.append(remote_path)
self.local_file_hashes[local_path] = remote_hash
self.local_hash_files[local_path + '.sha1'] = remote_hash
return remote_hash
def Get(self, bucket, remote_path, local_path):
return CloudStorageModuleStub.GetHelper(self, bucket, remote_path,
local_path, False)
def GetIfChanged(self, local_path, bucket=None):
remote_path = os.path.basename(local_path)
if bucket:
return CloudStorageModuleStub.GetHelper(self, bucket, remote_path,
local_path, True)
result = CloudStorageModuleStub.GetHelper(
self, self.PUBLIC_BUCKET, remote_path, local_path, True)
if not r
|
brendangregg/bcc
|
tools/kvmexit.py
|
Python
|
apache-2.0
| 12,070 | 0.002237 |
#!/usr/bin/env python
#
# kvmexit.py
#
# Display the exit_reason and its statistics of each vm exit
# for all vcpus of all virtual machines. For example:
# $./kvmexit.py
# PID TID KVM_EXIT_REASON COUNT
# 1273551 1273568 EXIT_REASON_MSR_WRITE 6
# 1274253 1274261 EXIT_REASON_EXTERNAL_INTERRUPT 1
# 1274253 1274261 EXIT_REASON_HLT 12
# ...
#
# Besides, we also allow users to specify one pid, tid(s), or one
# pid and its vcpu. See kvmexit_example.txt for more examples.
#
# @PID: each vitual machine's pid in the user space.
# @TID: the user space's thread of each vcpu of that virtual machine.
# @
|
KVM_EXIT_REASON: the reason why the vm exits.
# @COUNT: the counts of the @KVM_EXIT_REASONS.
#
# REQUIRES: Linux 4.7+ (BPF_PROG_TYPE_TRACEPOINT support)
#
# Copyright (c) 2021 ByteDance Inc. All rights reserved.
#
# Author(s):
# Fei Li <lifei.shirley@bytedance.com>
from __future__
|
import print_function
from time import sleep
from bcc import BPF
import argparse
import multiprocessing
import os
import subprocess
#
# Process Arguments
#
def valid_args_list(args):
args_list = args.split(",")
for arg in args_list:
try:
int(arg)
except:
raise argparse.ArgumentTypeError("must be valid integer")
return args_list
# arguments
examples = """examples:
./kvmexit # Display kvm_exit_reason and its statistics in real-time until Ctrl-C
./kvmexit 5 # Display in real-time after sleeping 5s
./kvmexit -p 3195281 # Collpase all tids for pid 3195281 with exit reasons sorted in descending order
./kvmexit -p 3195281 20 # Collpase all tids for pid 3195281 with exit reasons sorted in descending order, and display after sleeping 20s
./kvmexit -p 3195281 -v 0 # Display only vcpu0 for pid 3195281, descending sort by default
./kvmexit -p 3195281 -a # Display all tids for pid 3195281
./kvmexit -t 395490 # Display only for tid 395490 with exit reasons sorted in descending order
./kvmexit -t 395490 20 # Display only for tid 395490 with exit reasons sorted in descending order after sleeping 20s
./kvmexit -T '395490,395491' # Display for a union like {395490, 395491}
"""
parser = argparse.ArgumentParser(
description="Display kvm_exit_reason and its statistics at a timed interval",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("duration", nargs="?", default=99999999, type=int, help="show delta for next several seconds")
parser.add_argument("-p", "--pid", type=int, help="trace this PID only")
exgroup = parser.add_mutually_exclusive_group()
exgroup.add_argument("-t", "--tid", type=int, help="trace this TID only")
exgroup.add_argument("-T", "--tids", type=valid_args_list, help="trace a comma separated series of tids with no space in between")
exgroup.add_argument("-v", "--vcpu", type=int, help="trace this vcpu only")
exgroup.add_argument("-a", "--alltids", action="store_true", help="trace all tids for this pid")
args = parser.parse_args()
duration = int(args.duration)
#
# Setup BPF
#
# load BPF program
bpf_text = """
#include <linux/delay.h>
#define REASON_NUM 69
#define TGID_NUM 1024
struct exit_count {
u64 exit_ct[REASON_NUM];
};
BPF_PERCPU_ARRAY(init_value, struct exit_count, 1);
BPF_TABLE("percpu_hash", u64, struct exit_count, pcpu_kvm_stat, TGID_NUM);
struct cache_info {
u64 cache_pid_tgid;
struct exit_count cache_exit_ct;
};
BPF_PERCPU_ARRAY(pcpu_cache, struct cache_info, 1);
FUNC_ENTRY {
int cache_miss = 0;
int zero = 0;
u32 er = GET_ER;
if (er >= REASON_NUM) {
return 0;
}
u64 cur_pid_tgid = bpf_get_current_pid_tgid();
u32 tgid = cur_pid_tgid >> 32;
u32 pid = cur_pid_tgid;
if (THREAD_FILTER)
return 0;
struct exit_count *tmp_info = NULL, *initial = NULL;
struct cache_info *cache_p;
cache_p = pcpu_cache.lookup(&zero);
if (cache_p == NULL) {
return 0;
}
if (cache_p->cache_pid_tgid == cur_pid_tgid) {
//a. If the cur_pid_tgid hit this physical cpu consecutively, save it to pcpu_cache
tmp_info = &cache_p->cache_exit_ct;
} else {
//b. If another pid_tgid matches this pcpu for the last hit, OR it is the first time to hit this physical cpu.
cache_miss = 1;
// b.a Try to load the last cache struct if exists.
tmp_info = pcpu_kvm_stat.lookup(&cur_pid_tgid);
// b.b If it is the first time for the cur_pid_tgid to hit this pcpu, employ a
// per_cpu array to initialize pcpu_kvm_stat's exit_count with each exit reason's count is zero
if (tmp_info == NULL) {
initial = init_value.lookup(&zero);
if (initial == NULL) {
return 0;
}
pcpu_kvm_stat.update(&cur_pid_tgid, initial);
tmp_info = pcpu_kvm_stat.lookup(&cur_pid_tgid);
// To pass the verifier
if (tmp_info == NULL) {
return 0;
}
}
}
if (er < REASON_NUM) {
tmp_info->exit_ct[er]++;
if (cache_miss == 1) {
if (cache_p->cache_pid_tgid != 0) {
// b.*.a Let's save the last hit cache_info into kvm_stat.
pcpu_kvm_stat.update(&cache_p->cache_pid_tgid, &cache_p->cache_exit_ct);
}
// b.* As the cur_pid_tgid meets current pcpu_cache_array for the first time, save it.
cache_p->cache_pid_tgid = cur_pid_tgid;
bpf_probe_read(&cache_p->cache_exit_ct, sizeof(*tmp_info), tmp_info);
}
return 0;
}
return 0;
}
"""
# format output
exit_reasons = (
"EXCEPTION_NMI",
"EXTERNAL_INTERRUPT",
"TRIPLE_FAULT",
"INIT_SIGNAL",
"N/A",
"N/A",
"N/A",
"INTERRUPT_WINDOW",
"NMI_WINDOW",
"TASK_SWITCH",
"CPUID",
"N/A",
"HLT",
"INVD",
"INVLPG",
"RDPMC",
"RDTSC",
"N/A",
"VMCALL",
"VMCLEAR",
"VMLAUNCH",
"VMPTRLD",
"VMPTRST",
"VMREAD",
"VMRESUME",
"VMWRITE",
"VMOFF",
"VMON",
"CR_ACCESS",
"DR_ACCESS",
"IO_INSTRUCTION",
"MSR_READ",
"MSR_WRITE",
"INVALID_STATE",
"MSR_LOAD_FAIL",
"N/A",
"MWAIT_INSTRUCTION",
"MONITOR_TRAP_FLAG",
"N/A",
"MONITOR_INSTRUCTION",
"PAUSE_INSTRUCTION",
"MCE_DURING_VMENTRY",
"N/A",
"TPR_BELOW_THRESHOLD",
"APIC_ACCESS",
"EOI_INDUCED",
"GDTR_IDTR",
"LDTR_TR",
"EPT_VIOLATION",
"EPT_MISCONFIG",
"INVEPT",
"RDTSCP",
"PREEMPTION_TIMER",
"INVVPID",
"WBINVD",
"XSETBV",
"APIC_WRITE",
"RDRAND",
"INVPCID",
"VMFUNC",
"ENCLS",
"RDSEED",
"PML_FULL",
"XSAVES",
"XRSTORS",
"N/A",
"N/A",
"UMWAIT",
"TPAUSE"
)
#
# Do some checks
#
try:
# Currently, only adapte on intel architecture
cmd = "cat /proc/cpuinfo | grep vendor_id | head -n 1"
arch_info = subprocess.check_output(cmd, shell=True).strip()
if b"Intel" in arch_info:
pass
else:
raise Exception("Currently we only support Intel architecture, please do expansion if needs more.")
# Check if kvm module is loaded
if os.access("/dev/kvm", os.R_OK | os.W_OK):
pass
else:
raise Exception("Please insmod kvm module to use kvmexit tool.")
except Exception as e:
raise Exception("Failed to do precondition check, due to: %s." % e)
try:
if BPF.support_raw_tracepoint_in_module():
# Let's firstly try raw_tracepoint_in_module
func_entry = "RAW_TRACEPOINT_PROBE(kvm_exit)"
get_er = "ctx->args[0]"
else:
# If raw_tp_in_module is not supported, fall back to regular tp
func_entry = "TRACEPOINT_PROBE(kvm, kvm_exit)"
get_er = "args->exit_reason"
except Exception as e:
raise Exception("Failed to catch kvm exit reasons due to: %s" % e)
def find_tid(tgt_dir, tgt_vcpu):
for tid in os.listdir(tgt_dir):
path = t
|
nocarryr/vidhub-control
|
vidhubcontrol/backends/base.py
|
Python
|
gpl-3.0
| 27,623 | 0.004236 |
from loguru import logger
import asyncio
from typing import Optional, List, Dict, ClassVar
from pydispatch import Dispatcher, Property
from pydispatch.properties import ListProperty, DictProperty
from vidhubcontrol.common import ConnectionState, ConnectionManager
class BackendBase(Dispatcher):
"""Base class for communicating with devices
:Events:
.. function:: on_preset_added(backend: BackendBase = self, preset: Preset = preset)
This :class:`~pydispatch.dispatch.Event` is emitted
when a new :class:`Preset` has been added.
.. function:: on_preset_stored(backend: BackendBase = self, preset: Preset = preset)
This :class:`~pydispatch.dispatch.Event` is emitted
when an existing :class:`Preset` has been recorded (updated).
.. function:: on_preset_active(backend: BackendBase, preset: Preset = preset, value: bool = value)
This :class:`~pydispatch.dispatch.Event` is emitted
when an existing :class:`Preset` has determined that its stored
routing information is currently active on the switcher.
"""
device_name: str = Property()
device_model: str = Property()
"""The model name as reported by the device"""
device_id: str = Property()
"""The unique id as reported by the device"""
device_version: str = Property()
"""Firmware version reported by the device"""
connection_manager: ConnectionManager
"""Manager for the device's :class:`~.common.ConnectionState`"""
prelude_parsed: bool = Property(False)
def __init__(self, **kwargs):
self.connection_manager = ConnectionManager()
self.device_name = kwargs.get('device_name')
self.client = None
self.event_loop = kwargs.get('event_loop', asyncio.get_event_loop())
self.bind(device_id=self.on_device_id)
if self.device_id is None:
self.device_id = kwargs.get('device_id')
@property
def connection_state(self) -> ConnectionState:
"""The current :attr:`~.common.ConnectionManager.state` of the
:attr:`connection_manager`
"""
return self.connection_manager.state
@classmethod
async def create_async(cls, **kwargs):
obj = cls(**kwargs)
await obj.connect()
return obj
async def connect(self):
manager = self.connection_manager
async with manager:
if manager.state & ConnectionState.waiting != 0:
state = await manager.wait_for('connected|not_connected')
if manager.state.is_connected:
return self.client
assert ConnectionState.not_connected in manager.state
await manager.set_state('connecting')
await asyncio.sleep(0)
try:
r = await asyncio.wait_for(self.do_connect(), timeout=2)
except asyncio.TimeoutError as exc:
r = False
async with manager:
if r is False and ConnectionState.failure not in manager.state:
await manager.set_failure('unknown')
if ConnectionState.failure in manager.state:
await manager.set_state('not_connected')
else:
if self.client is not None:
self.client = r
await manager.set_state('connected')
return r
async def disconnect(self):
manager = self.connection_manager
async with manager:
if ConnectionState.not_connected in manager.state:
return
elif ConnectionState.disconnecting in manager.state:
await manager.wait_for('not_connected')
return
elif ConnectionState.connecting in manager.state:
state = await manager.wait_for('connected|not_connected')
if state == ConnectionState.not_connected:
return
assert manager.state.is_connected
await manager.set_state('disconnecting')
await asyncio.sleep(0)
await self.do_disconnect()
async with manager:
self.client = None
await manager.set_state('not_connected')
async def _catch_exception(self, e: Exception, is_error: Optional[bool] = False):
if not is_error:
logger.exception(e)
return
exc_type = e.__class__
try:
exc_info = e.args
except:
exc_info = str(e)
async with self.connection_manager as manager:
await manager.set_failure(exc_info, e)
try:
await self.do_disconnect()
finally:
self.client = None
async with self.connection_manager as manager:
await manager.set_state('not_connected')
async def do_connect(self):
raise NotImplementedError()
async def do_disconnect(self):
raise NotImplementedError()
async def get_status(self):
raise NotImplementedError()
def on_device_id(self, instance, value, **kwargs):
if value is None:
return
if self.device_name is None:
self.device_name = value
self.unbind(self.on_device_id)
class VidhubBackendBase(BackendBase):
"""Base class for Videohub devices
Attributes:
num_outputs (int): The number of outputs as reported by the switcher.
num_inputs (int): The number of inputs as reported by the switcher.
crosspoints: This represents the currently active routing of the
switcher. Each element in the ``list`` represents an output (the
zero-based index of the ``list``) with its selected index as the
value (also zero-based).
This is a :class:`pydispatch.properties.ListProperty` and can be
observed using the :meth:`~pydispatch.Dispatcher.bind` method.
output_labels: A ``list`` containing the names of each output
as reported by the switcher
This is a :class:`pydispatch.properties.ListProperty` and can be
observed using the :meth:`~pydispatch.Dispatcher.bind` method.
input_labels: A ``list`` containing the names of each input
as reported by the switcher
This is a :class:`pydispatch.properties.ListProperty` and can be
observed using the :meth:`~pydispatch.Dispatcher.bind` method.
crosspoint_control: This is similar to :attr:`~VidhubBackendBase.crosspoints`
but if modified from outside code, the crosspoint changes will be
set on the device (no method calls required).
:class:`pydispatch.properties.ListProperty`
output_label_control: This is similar to :attr:`~VidhubBackendBase.output_labels`
but if modified from outside code, the label changes will be written
to the device (no method calls required).
:class:`pydispatch.properties.ListProperty`
input_label_control: This is similar to :attr:`~VidhubBackendBase.input_labels`
but if modified from outside code, the label changes will be written
to the device (no method calls required).
:class:`pydispatch.properties.ListProperty`
presets: The currently available (stored) ``list`` of :class:`Preset`
instances
:class:`pydispatch.properties.ListProperty`
"""
crosspoints: List[int] = ListProperty()
output_labels: List[str] = ListProperty()
input_labels: List[str] = ListProperty()
crosspoint_control: List[int] = ListProperty()
output_label_control: List[str] = ListProperty()
input_label_control: List[str] = ListProperty()
presets: List['Preset'] = ListProperty()
num_outputs: int = Property(0)
num_inputs: int = Property(0)
device_type: ClassVar[str] = 'vidhub'
feedback_prop_map = {
'crosspoints':'crosspoint_con
|
trol',
'input_labels':'input_label_control',
'output_labels':'output_label_c
|
ontrol',
}
_events_ = ['on_preset_added', 'on_preset_stored', 'on_preset_active']
def __init__(self, **kwargs):
super().__init__(**kwargs)
|
davidt/reviewboard
|
reviewboard/hostingsvcs/codebasehq.py
|
Python
|
mit
| 20,408 | 0.000049 |
from __future__ import unicode_literals
import logging
from xml.dom.minidom import parseString
from django import forms
from django.utils import six
from django.utils.six.moves.urllib.error import HTTPError, URLError
from django.utils.translation import ugettext_lazy as _, ugettext
from reviewboard.hostingsvcs.errors import (AuthorizationError,
HostingServiceAPIError,
RepositoryError)
from reviewboard.hostingsvcs.forms import (HostingServiceAuthForm,
HostingServiceForm)
from reviewboard.hostingsvcs.service import (HostingService,
HostingServiceClient)
from reviewboard.scmtools.crypto_utils import (decrypt_password,
encrypt_password)
from reviewboard.scmtools.errors import FileNotFoundError
class CodebaseHQAuthForm(HostingServiceAuthForm):
api_key = forms.CharField(
label=_('API key'),
max_length=128,
required=True,
widget=forms.TextInput(attrs={'size': '60'}),
help_text=_('The API key provided to your Codebase account. This is '
'available in My Profile under API Credentials.'))
domain = forms.CharField(
label=_('Codebase domain'),
max_length=128,
required=True,
widget=forms.TextInput(attrs={'size': '60'}),
help_text=_('The subdomain used to access your Codebase account. '
'This is the "<tt>subdomain</tt>" of '
'<tt>subdomain</tt>.codebasehq.com.'))
def get_credentials(self):
credentials = super(CodebaseHQAuthForm, self).get_credentials()
credentials.update({
'domain': self.cleaned_data['domain'],
'api_key': self.cleaned_data['api_key'],
})
return credentials
class Meta(object):
help_texts = {
'hosting_account_username': _(
'The username you use to log into Codebase. This should '
'<em>not</em> include the domain name.'
),
'hosting_account_password': _(
'The password you use to log into Codebase. This is separate '
'from the API key below.'
),
}
class CodebaseHQForm(HostingServiceForm):
codebasehq_project_name = forms.CharField(
label=_('Project name'),
max_length=64,
required=True,
widget=forms.TextInput(attrs={'size': '60'}))
codebasehq_repo_name = forms.CharField(
label=_('Repository short name'),
max_length=128,
|
required=True,
widget=forms.TextInput(attrs={'size':
|
'60'}),
help_text=_('The short name of your repository. This can be found by '
'clicking the Settings button on the right-hand '
'side of the repository browser.'))
class CodebaseHQClient(HostingServiceClient):
"""Client for talking to the Codebase API.
This implements the API methods that the hosting service needs, converting
requests into API calls and those back into structured results.
"""
#: Mimetype used for API requests and responses.
API_MIMETYPE = 'application/xml'
def __init__(self, hosting_service):
"""Initialize the client.
Args:
hosting_service (CodebaseHQ):
The hosting service that owns this client.
"""
self.hosting_service = hosting_service
def api_get_file(self, repository, project_name, repo_name, path,
revision):
"""Return the content of a file in a repository.
Args:
repository (reviewboard.scmtools.models.Repository):
The repository entry in Review Board.
project_name (unicode):
The name of the Codebase project.
repo_name (unicode):
The name of the repository.
path (unicode):
The path to the file in the repository.
revision (unicode):
The revision of the file or commit.
Returns:
bytes:
The contents of the file.
"""
url = '%s/%s/blob/' % (project_name, repo_name)
if repository.tool.name == 'Git':
url += revision
else:
if path.startswith('/'):
path = path[1:]
url += '%s/%s' % (revision, path)
return self.api_get(self.build_api_url(url), raw_content=True)
def api_get_public_keys(self, username):
"""Return information on all public keys for a user.
Args:
username (unicode):
The user to fetch public keys for.
Returns:
dict:
Information on each of the user's public keys.
"""
return self.api_get(self.build_api_url('users/%s/public_keys'
% username))
def api_get_repository(self, project_name, repo_name):
"""Return information on a repository.
Args:
project_name (unicode):
The name of the Codebase project.
repo_name (unicode):
The name of the repository.
Returns:
dict:
Information on the repository.
See https://support.codebasehq.com/kb/repositories for the
data returned.
"""
return self.api_get(
self.build_api_url('%s/%s' % (project_name, repo_name)))
def build_api_url(self, url):
"""Return the URL for an API call.
Args:
url (unicode):
The relative URL for the API call.
Returns:
unicode:
The absolute URL for the API call.
"""
return 'https://api3.codebasehq.com/%s' % url
def api_get(self, url, raw_content=False):
"""Perform an HTTP GET request to the API.
Args:
url (unicode):
The full URL to the API resource.
raw_content (bool, optional):
If set to ``True``, the raw content of the result will be
returned, instead of a parsed XML result.
Returns:
object:
The parsed content of the result, as a dictionary, or the raw
bytes content if ``raw_content`` is ``True``.
"""
hosting_service = self.hosting_service
try:
account_data = hosting_service.account.data
api_username = '%s/%s' % (account_data['domain'],
hosting_service.account.username)
api_key = decrypt_password(account_data['api_key'])
data, headers = self.http_get(
url,
username=api_username,
password=api_key,
headers={
'Accept': self.API_MIMETYPE,
})
if raw_content:
return data
else:
return self.parse_xml(data)
except HTTPError as e:
data = e.read()
msg = six.text_type(e)
rsp = self.parse_xml(data)
if rsp and 'errors' in rsp:
errors = rsp['errors']
if 'error' in errors:
msg = errors['error']
if e.code == 401:
raise AuthorizationError(msg)
else:
raise HostingServiceAPIError(msg, http_code=e.code, rsp=rsp)
except URLError as e:
raise HostingServiceAPIError(e.reason)
def get_xml_text(self, nodes):
"""Return the text contents of a set of XML nodes.
Args:
nodes (list of xml.dom.minidom.Element):
The list of nodes.
Returns:
unicode:
The text content of the nodes.
"""
return ''.join(
node.data
for node in nodes
if node.nodeType == node.TEXT_NODE
)
def parse_xml(self, s):
"""Return the
|
aaronsw/watchdog
|
vendor/rdflib-2.4.0/rdflib/store/SQLite.py
|
Python
|
agpl-3.0
| 18,855 | 0.016547 |
from __future__ import generators
from rdflib import BNode
from rdflib.Literal import Literal
from pprint import pprint
from pysqlite2 import dbapi2
import sha,sys,re,os
from rdflib.term_utils import *
from rdflib.Graph import QuotedGraph
from rdflib.store.REGEXMatching import REGEXTerm, NATIVE_REGEX, PYTHON_REGEX
from rdflib.store.AbstractSQLStore import *
Any = None
#User-defined REGEXP operator
def regexp(expr, item):
r = re.compile(expr)
return r.match(item) is not None
class SQLite(AbstractSQLStore):
"""
SQLite store formula-aware implementation. It stores it's triples in the following partitions:
- Asserted non rdf:type statements
- Asserted rdf:type statements (in a table whic
|
h models Class membership)
The motivation for this partition is primarily query speed and scalability as most graphs will always have more rdf:type statements than others
- All Quoted statements
In addition it persists namespace mappings in a seperate table
"""
context_aware = True
formula_aware = True
transaction_aware = True
reg
|
ex_matching = PYTHON_REGEX
autocommit_default = False
def open(self, home, create=True):
"""
Opens the store specified by the configuration string. If
create is True a store will be created if it does not already
exist. If create is False and a store does not already exist
an exception is raised. An exception is also raised if a store
exists, but there is insufficient permissions to open the
store."""
if create:
db = dbapi2.connect(os.path.join(home,self.identifier))
c=db.cursor()
c.execute(CREATE_ASSERTED_STATEMENTS_TABLE%(self._internedId))
c.execute(CREATE_ASSERTED_TYPE_STATEMENTS_TABLE%(self._internedId))
c.execute(CREATE_QUOTED_STATEMENTS_TABLE%(self._internedId))
c.execute(CREATE_NS_BINDS_TABLE%(self._internedId))
c.execute(CREATE_LITERAL_STATEMENTS_TABLE%(self._internedId))
for tblName,indices in [
(
"%s_asserted_statements",
[
("%s_A_termComb_index",('termComb',)),
("%s_A_s_index",('subject',)),
("%s_A_p_index",('predicate',)),
("%s_A_o_index",('object',)),
("%s_A_c_index",('context',)),
],
),
(
"%s_type_statements",
[
("%s_T_termComb_index",('termComb',)),
("%s_member_index",('member',)),
("%s_klass_index",('klass',)),
("%s_c_index",('context',)),
],
),
(
"%s_literal_statements",
[
("%s_L_termComb_index",('termComb',)),
("%s_L_s_index",('subject',)),
("%s_L_p_index",('predicate',)),
("%s_L_c_index",('context',)),
],
),
(
"%s_quoted_statements",
[
("%s_Q_termComb_index",('termComb',)),
("%s_Q_s_index",('subject',)),
("%s_Q_p_index",('predicate',)),
("%s_Q_o_index",('object',)),
("%s_Q_c_index",('context',)),
],
),
(
"%s_namespace_binds",
[
("%s_uri_index",('uri',)),
],
)]:
for indexName,columns in indices:
c.execute("CREATE INDEX %s on %s (%s)"%(indexName%self._internedId,tblName%(self._internedId),','.join(columns)))
c.close()
db.commit()
db.close()
self._db = dbapi2.connect(os.path.join(home,self.identifier))
self._db.create_function("regexp", 2, regexp)
if os.path.exists(os.path.join(home,self.identifier)):
c = self._db.cursor()
c.execute("SELECT * FROM sqlite_master WHERE type='table'")
tbls = [rt[1] for rt in c.fetchall()]
c.close()
for tn in [tbl%(self._internedId) for tbl in table_name_prefixes]:
if tn not in tbls:
sys.stderr.write("table %s Doesn't exist\n" % (tn));
#The database exists, but one of the partitions doesn't exist
return 0
#Everything is there (the database and the partitions)
return 1
#The database doesn't exist - nothing is there
#return -1
def destroy(self, home):
"""
FIXME: Add documentation
"""
db = dbapi2.connect(os.path.join(home,self.identifier))
c=db.cursor()
for tblsuffix in table_name_prefixes:
try:
c.execute('DROP table %s'%tblsuffix%(self._internedId))
except:
print "unable to drop table: %s"%(tblsuffix%(self._internedId))
#Note, this only removes the associated tables for the closed world universe given by the identifier
print "Destroyed Close World Universe %s ( in SQLite database %s)"%(self.identifier,home)
db.commit()
c.close()
db.close()
os.remove(os.path.join(home,self.identifier))
def EscapeQuotes(self,qstr):
"""
Ported from Ft.Lib.DbUtil
"""
if qstr is None:
return ''
tmp = qstr.replace("\\","\\\\")
tmp = tmp.replace('"', '""')
tmp = tmp.replace("'", "\\'")
return tmp
#This is overridden to leave unicode terms as is
#Instead of converting them to ascii (the default behavior)
def normalizeTerm(self,term):
if isinstance(term,(QuotedGraph,Graph)):
return term.identifier
elif isinstance(term,Literal):
return self.EscapeQuotes(term)
elif term is None or isinstance(term,(list,REGEXTerm)):
return term
else:
return term
#Where Clause utility Functions
#The predicate and object clause builders are modified in order to optimize
#subjects and objects utility functions which can take lists as their last argument (object,predicate - respectively)
def buildSubjClause(self,subject,tableName):
if isinstance(subject,REGEXTerm):
return " REGEXP (%s,"+" %s)"%(tableName and '%s.subject'%tableName or 'subject'),[subject]
elif isinstance(subject,list):
clauseStrings=[]
paramStrings = []
for s in subject:
if isinstance(s,REGEXTerm):
clauseStrings.append(" REGEXP (%s,"+" %s)"%(tableName and '%s.subject'%tableName or 'subject') + " %s")
paramStrings.append(self.normalizeTerm(s))
elif isinstance(s,(QuotedGraph,Graph)):
clauseStrings.append("%s="%(tableName and '%s.subject'%tableName or 'subject')+"%s")
paramStrings.append(self.normalizeTerm(s.identifier))
else:
clauseStrings.append("%s="%(tableName and '%s.subject'%tableName or 'subject')+"%s")
paramStrings.append(self.normalizeTerm(s))
return '('+ ' or '.join(clauseStrings) + ')', paramStrings
elif isinstance(subject,(QuotedGraph,Graph)):
return "%s="%(tableName and '%s.subject'%tableName or 'subject')+"%s",[self.normalizeTerm(subject.identifier)]
else:
return subject is not None and "%s="%(tableName and '%s.subject'%tableName or 'subject')+"%s",[subject] or None
#Capable off taking a list of predicates as well (in which case sub clauses are joined with 'OR')
def buildPredClause(self,predicate,tableName):
if isinstance(predicate,REGEXTerm):
return " REGEXP (%s,"+" %s)"%(tableName and '%s.predicate'%table
|
dymkowsk/mantid
|
Framework/PythonInterface/test/python/mantid/kernel/ConfigServiceTest.py
|
Python
|
gpl-3.0
| 4,880 | 0.002459 |
from __future__ import (absolute_import, division, print_function)
import unittest
import os
import testhelpers
from mantid.kernel import (ConfigService, ConfigServiceImpl, config,
std_vector_str, FacilityInfo, InstrumentInfo)
class ConfigServiceTest(unittest.TestCase):
__dirs_to_rm = []
__init_dir_list = ''
def test_singleton_returns_instance_of_ConfigService(self):
self.assertTrue(isinstance(config, ConfigServiceImpl))
def test_getLocalFilename(self):
local = config.getLocalFilename().lower()
self.assertTrue('local' in local)
def test_getUserFilename(self):
user = config.getUserFilename().lower()
self.assertTrue('user' in user)
def test_getFacilityReturns_A_FacilityInfo_Object(self):
facility = config.getFacility()
self.assertTrue(isinstance(facility,
|
FacilityInfo))
def test_getFacility_With_Name_Returns_A_FacilityInfo_Object(self):
facility = config.getFacility("ISIS")
self.assertTrue(isinstance(facility, FacilityInfo))
self.assertRaises(RuntimeError, config.getFacility, "MadeUpFacility")
def test_getFacilities_Returns_A_FacilityInfo_List(self):
facilities = config.getFacilities()
self.assertTrue(isinstance(facilities[0], FacilityInfo))
def test_getFacilities_and_Facilit
|
y_Names_are_in_sync_and_non_empty(self):
facilities = config.getFacilities()
names = config.getFacilityNames()
self.assertTrue(len(names)>0)
self.assertEquals(len(names),len(facilities))
for i in range(len(names)):
self.assertEquals(names[i],facilities[i].name())
def test_update_and_set_facility(self):
self.assertFalse("TEST" in config.getFacilityNames())
ConfigService.updateFacilities(os.path.join(ConfigService.getInstrumentDirectory(),"IDFs_for_UNIT_TESTING/UnitTestFacilities.xml"))
ConfigService.setFacility("TEST")
self.assertEquals(config.getFacility().name(), "TEST")
self.assertRaises(RuntimeError, config.getFacility, "SNS")
def test_getInstrumentReturns_A_InstrumentInfo_Object(self):
self.assertTrue(isinstance(config.getInstrument("WISH"), InstrumentInfo))
self.assertRaises(RuntimeError, config.getInstrument, "MadeUpInstrument")
def test_service_acts_like_dictionary(self):
test_prop = "algorithms.retained"
self.assertTrue(config.hasProperty(test_prop))
dictcall = config[test_prop]
fncall = config.getString(test_prop)
self.assertEquals(dictcall, fncall)
self.assertNotEqual(config[test_prop], "")
old_value = fncall
config.setString(test_prop, "1")
self.assertEquals(config.getString(test_prop), "1")
config[test_prop] = "2"
self.assertEquals(config.getString(test_prop), "2")
config.setString(test_prop, old_value)
def test_getting_search_paths(self):
"""Retrieve the search paths
"""
paths = config.getDataSearchDirs()
self.assertEquals(type(paths), std_vector_str)
self.assert_(len(paths) > 0)
def test_setting_paths_via_single_string(self):
new_path_list = self._setup_test_areas()
path_str = ';'.join(new_path_list)
config.setDataSearchDirs(path_str)
paths = config.getDataSearchDirs()
# Clean up here do that if the assert fails
# it doesn't bring all the other tests down
self._clean_up_test_areas()
self.assertTrue(len(paths), 2)
self.assertTrue('tmp' in paths[0])
self.assertTrue('tmp_2' in paths[1])
self._clean_up_test_areas()
def test_setting_log_channel_levels(self):
testhelpers.assertRaisesNothing(self, config.setFileLogLevel, 4)
testhelpers.assertRaisesNothing(self, config.setConsoleLogLevel, 4)
def _setup_test_areas(self):
"""Create a new data search path string
"""
self.__init_dir_list = config['datasearch.directories']
# Set new paths - Make a temporary directory so that I know where it is
test_path = os.path.join(os.getcwd(), "tmp")
try:
os.mkdir(test_path)
self.__dirs_to_rm.append(test_path)
except OSError:
pass
test_path_two = os.path.join(os.getcwd(), "tmp_2")
try:
os.mkdir(test_path_two)
self.__dirs_to_rm.append(test_path_two)
except OSError:
pass
return [test_path, test_path_two]
def _clean_up_test_areas(self):
config['datasearch.directories'] = self.__init_dir_list
# Remove temp directories
for p in self.__dirs_to_rm:
try:
os.rmdir(p)
except OSError:
pass
if __name__ == '__main__':
unittest.main()
|
ar0551/Wasp
|
src/ghComp/Wasp_DisCo Rule Group.py
|
Python
|
gpl-3.0
| 3,209 | 0.011218 |
# Wasp: Discrete Design with Grasshopper plug-in (GPL) initiated by Andrea Rossi
#
# This file is part of Wasp.
#
# Copyright (c) 2017, Andrea Rossi <a.rossi.andrea@gmail.com>
# Wasp is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Wasp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Wasp; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0 <https://www.gnu.org/lice
|
nses/gpl.html>
#
# Significant parts of Wasp have been developed by Andrea Rossi
# as part of research on digital materials and discrete design at:
# DDU Digital Design Unit
|
- Prof. Oliver Tessmann
# Technische Universitt Darmstadt
#########################################################################
## COMPONENT INFO ##
#########################################################################
"""
Export Wasp information for DisCo VR software
-
Provided by Wasp 0.5
Args:
NAME: Rule group name. It will be used to activate/deactivate the rules contained in DisCo
GR: Rule grammars to be included in the group
Returns:
RULE_G: Rule Group instance
"""
ghenv.Component.Name = "Wasp_DisCo Rule Group"
ghenv.Component.NickName = 'RuleG'
ghenv.Component.Message = 'v0.5.005'
ghenv.Component.IconDisplayMode = ghenv.Component.IconDisplayMode.application
ghenv.Component.Category = "Wasp"
ghenv.Component.SubCategory = "7 | DisCo VR"
try: ghenv.Component.AdditionalHelpFromDocStrings = "5"
except: pass
import sys
import json
import Rhino.Geometry as rg
import Grasshopper as gh
## add Wasp install directory to system path
wasp_loaded = False
ghcompfolder = gh.Folders.DefaultAssemblyFolder
if ghcompfolder not in sys.path:
sys.path.append(ghcompfolder)
try:
from wasp import __version__
wasp_loaded = True
except:
msg = "Cannot import Wasp. Is the wasp folder available in " + ghcompfolder + "?"
ghenv.Component.AddRuntimeMessage(gh.Kernel.GH_RuntimeMessageLevel.Error, msg)
## if Wasp is installed correctly, load the classes required by the component
if wasp_loaded:
from wasp.disco import DisCoRuleGroup
def main(group_name, rule_grammar):
check_data = True
## check inputs
if group_name is None:
check_data = False
msg = "No group name provided"
ghenv.Component.AddRuntimeMessage(gh.Kernel.GH_RuntimeMessageLevel.Warning, msg)
if len(rule_grammar) == 0:
check_data = False
msg = "No rules grammar provided"
ghenv.Component.AddRuntimeMessage(gh.Kernel.GH_RuntimeMessageLevel.Warning, msg)
if check_data:
return DisCoRuleGroup(group_name, rule_grammar)
else:
return -1
result = main(NAME, GR)
if result != -1:
RULE_G = result
|
gijigae/django-tutorial
|
django_tutorial/contrib/sites/migrations/0002_set_site_domain_and_name.py
|
Python
|
bsd-3-clause
| 949 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "choimirai.com",
|
"name": "Django Tutorial"
}
)
def update_site_backward(apps, schema_editor):
"""Rev
|
ert site domain and name to default."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "example.com",
"name": "example.com"
}
)
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.RunPython(update_site_forward, update_site_backward),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.