text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
import OOMP
newPart = OOMP.oompItem(9019)
newPart.addTag("oompType", "HESH")
newPart.addTag("oompSize", "03")
newPart.addTag("oompColor", "L")
newPart.addTag("oompDesc", "STAN")
newPart.addTag("oompIndex", "01")
OOMP.parts.append(newPart)
| oomlout/oomlout-OOMP | old/OOMPpart_HESH_03_L_STAN_01.py | Python | cc0-1.0 | 241 | 0 |
"""
Derivation and Elementary Trees live here.
"""
from __future__ import print_function
from baal.structures import Entry, ConstituencyTree, consts
from baal.semantics import Predicate, Expression
from collections import deque
from copy import copy, deepcopy
from math import floor, ceil
try:
input = raw_input
except:
pass
def prn_pairs(phead, thead):
pairs = [("-LRB-", "-RRB-"), ("-RSB-", "-RSB-"), ("-LCB-", "-RCB-"),
("--", "--"), (",", ",")]
return any([left.lower()==phead.lower() and right.lower()==thead.lower() for left,right in pairs])
class AttachmentPoint(object):
def __init__(self, free, pos_symbol, gorn, type, seq_index):
self.free = free
self.pos_symbol = pos_symbol
self.gorn = gorn
self.type = type
self.seq_index = seq_index
self.hlf_symbol = None
self.frontier_increment = 0.01
self.frontier = (-1,0)
def __repr__(self):
return "{}@{}".format(self.pos_symbol,self.gorn)
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, deepcopy(v, memo))
return result
@classmethod
def from_tree(cls, tree, address, seq_index, tree_type):
new_point = cls(True, tree.symbol, address, tree_type, seq_index)
if tree.spine_index >= 0:
new_point.frontier = (tree.spine_index, tree.spine_index)
return new_point
@property
def left_frontier(self):
l, r = self.frontier
self.frontier = (l-self.frontier_increment, r)
assert self.frontier[0] > floor(self.frontier[0])
return self.frontier[0]
@property
def right_frontier(self):
l, r = self.frontier
self.frontier = (l, r+self.frontier_increment)
assert self.frontier[1] < ceil(self.frontier[1])
return self.frontier[1]
def sibling_increment(self, left=True):
l, r = self.frontier
if left:
self.frontier = (ceil(l) - 1.0, r)
else:
self.frontier = (l, floor(r) + 1.0)
def match(self, op):
pos_match = self.pos_symbol == op.target['pos_symbol']
gorn_match = ((self.gorn == op.target['target_gorn'])
or op.target['target_gorn'] is None)
hlf_match = self.hlf_symbol == op.target['target_hlf']
type_match = self.type == op.type
fail = []
if not pos_match:
f = "failure because pos:"
f += "self: {}; op: {}".format(str(self.pos_symbol),
str(op.target['pos_symbol']))
fail.append(f)
if not gorn_match:
f = "failure because gorn:"
f += "self: {}; op: {}".format(str(self.gorn),
str(op.target['target_gorn']))
fail.append(f)
if not hlf_match:
f = "failure because hlf:"
f += "self: {}; op: {}".format(str(self.hlf_symbol),
str(op.target['target_hlf']))
fail.append(f)
#if len(fail) > 0:
# print(" & \n".join(fail))
#else:
# print("Success!")
return self.free and pos_match and gorn_match and hlf_match and type_match
def set_path_features(self, hlf_symbol):
self.hlf_symbol = hlf_symbol
def clone(self):
ret = AttachmentPoint(self.free, self.pos_symbol, self.gorn,
self.type, self.seq_index)
ret.hlf_symbol = self.hlf_symbol
ret.frontier = self.frontier
return ret
class AttachmentOperation(object):
"""Represents an elementary tree operation
Used by DerivationTrees when trying to find where an elementary tree should attach
There are two modes to the operation:
1. Use it as a general attachment. In this case it needs to know
the permissable attachments via the pos_symbol (and direction if insertion)
2. Use it in specific attachment. In this case it needs to know
identifying information about the tree it should be attaching to.
Current ideas: hlf_symbol, tree_id, argument_number, gorn_address
Thoughts: gorn_address won't work (for obvious reasons as the tree grows)
tree_id won't work because there might be duplicates
hlf_symbol could work, as long as this semantic form remains
argument_number requires planning, which CSG and others might handle
"""
def __init__(self, target, type):
"""Pass in the already made parameters to make the operation.
Args:
target: dict with keys 'pos_symbol' and 'parameter'
'pos_symbol' is the part of speech this operation looks for
'parameter' is direction for insertions, and argument number
for substitutions
type: the type of operation this is: consts.INSERTION or consts.SUBSTITUTION
Notes:
insertion direction: left means it inserts on the left side
e.g. (NP* (DT a)) inserts left.
the asterisk denotes the attachment point
right means it inserts on the right side
e.g. (*S (. .)) inserts right
the asterisk denotes the attachment point
"""
self.target = target
self.type = type
@property
def is_insertion(self):
return self.type == consts.INSERTION
@property
def direction(self):
if not self.is_insertion:
raise Exception("Not an insertion tree")
else:
return self.target['attach_direction']
def clone(self):
return AttachmentOperation(self.target, self.type)
def set_path_features(self, target_gorn, target_hlf):
if target_hlf is not None:
self.target['target_hlf'] = target_hlf
if target_gorn is not None:
self.target['target_gorn'] = tuple(target_gorn)
@classmethod
def from_tree(cls, tree):
"""Calculate the parameters for the operation from a parse tree
Args:
tree: A ConstituencyParse instance
"""
if tree.adjunct:
target = {'pos_symbol': tree.symbol, 'attach_direction': tree.direction,
'target_gorn': None, 'target_hlf': None}
type = consts.INSERTION
else:
target = {'pos_symbol': tree.symbol, 'attach_direction': "up",
'target_gorn': None, 'target_hlf': None}
type = consts.SUBSTITUTION
return cls(target, type)
return cls(root_op, "", (0,), None, "(ROOT)",
[root_subpoint], [], hlf_symbol="g-1")
class ElementaryTree(object):
"""represent a tree fragment, its operations, and its internal addresses
"""
def __init__(self, op, head, head_address, head_symbol, bracketed_string,
substitution_points, insertion_points,
hlf_symbol=None, tree_id=None, last_type=None, last_index=-1):
self.tree_operation = op
self.head = head
self.head_address = head_address
self.substitution_points = substitution_points
self.insertion_points = insertion_points
self.address = (0,)
self.last_type = last_type
self.last_index = last_index
self.hlf_symbol = hlf_symbol
self.bracketed_string = bracketed_string
self.tree_id = tree_id
self.head_symbol = head_symbol
@classmethod
def from_full_parse_tree(cls, parse_tree):
if parse_tree.symbol == "" and len(parse_tree.children) == 1:
parse_tree.symbol = "ROOT"
_, addressbook = parse_tree.clone()
@classmethod
def from_single_parse_tree(cls, parse_tree):
if parse_tree.save_str().upper() == "(ROOT ROOT)":
return cls.root_tree()
_, addressbook = parse_tree.clone()
head = None
head_address = None
substitution_points = list()
insertion_points = list()
sorted_book = sorted(addressbook.items())
_, root = sorted_book[0]
root_sym = root.symbol
for address, tree in sorted_book:
#if tree.symbol == "ROOT":
# head = "ROOT"
# new_point = AttachmentPoint.from_tree(tree, address, 0, consts.SUBSTITUTION)
# substitution_points.append(new_point)
if tree.lexical:
if head is None:
head = tree.symbol
head_address = address
head_parent = tree.parent
else:
assert prn_pairs(head, tree.symbol)
elif tree.complement:
new_point = AttachmentPoint.from_tree(tree,
address,
len(substitution_points),
consts.SUBSTITUTION)
substitution_points.append(new_point)
elif tree.spine_index >= 0:
new_point = AttachmentPoint.from_tree(tree,
address,
len(insertion_points),
consts.INSERTION)
insertion_points.append(new_point)
else:
print(address, tree)
print("Then what is it?")
op = AttachmentOperation.from_tree(parse_tree)
assert (head is not None and head_address is not None) or head is "ROOT"
return cls(op, head, head_address, head_parent, parse_tree.save_str(),
substitution_points, insertion_points)
@classmethod
def from_bracketed_string(cls, bracketed_string):
parse_tree, _ = ConstituencyTree.make(bracketed_string=bracketed_string)
return cls.from_single_parse_tree(parse_tree)
@classmethod
def root_tree(cls):
root_op = AttachmentOperation({'pos_symbol': 'ROOT', 'attach_direction': None,
'target_gorn': None, 'target_hlf':None},
consts.SUBSTITUTION)
root_subpoint = AttachmentPoint(True, 'ROOT', (0,), consts.SUBSTITUTION, 0)
root_subpoint.hlf_symbol = "g-1"
return cls(root_op, "", (0,), None, "(ROOT)",
[root_subpoint], [], hlf_symbol="g-1")
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, deepcopy(v, memo))
return result
################### INSERTION OPERATION
########################################
def insert(self, op_tree):
new_tree = deepcopy(self)#.clone()
address = new_tree.mark_insertion(op_tree.tree_operation)
op_tree = deepcopy(op_tree)#.clone()
op_tree.address = address
return new_tree, op_tree
def mark_insertion(self, op):
assert self.last_match is not None
assert self.last_match.match(op)
if op.target['attach_direction'] == "left":
op_index = self.last_match.left_frontier
else:
op_index = self.last_match.right_frontier
return self.last_match.gorn + (op_index,)
def matches_inspoint(self, op):
self.last_type = None
self.last_index = -1
for index, point in enumerate(self.insertion_points):
if point.match(op):
self.last_index = index
self.last_type = consts.INSERTION
return True
return False
################### SUBSTITUTION OPERATION
###########################################
def substitute(self, op_tree):
"""update open substitution spots.
Args:
op_tree: an ElementaryTree instance
Notes:
accepts an op_tree that needs to substitute here.
raises an Exception if it can't
"""
new_tree = deepcopy(self)#self.clone()
address = new_tree.mark_substituted(op_tree.tree_operation)
op_tree = deepcopy(op_tree)#.clone()
op_tree.address = address
return new_tree, op_tree
def mark_substituted(self, op):
assert self.last_match is not None
assert self.last_match.match(op)
self.last_match.free = False
match_gorn = self.last_match.gorn
if self.hlf_symbol == 'g-1':
return match_gorn
is_left = match_gorn < self.head_address
for point in self.insertion_points:
if point.gorn == match_gorn[:-1]:
point.sibling_increment(is_left)
return match_gorn
def matches_subpoint(self, op):
"""check to see if operation matches anything on this tree
Args:
op: AttachmentOperation instance
Returns:
True, False
"""
self.last_type = None
self.last_index = -1
for index, point in enumerate(self.substitution_points):
if point.match(op):
self.last_type = consts.SUBSTITUTION
self.last_index = index
return True
return False
##################### UTILITY METHODS
#####################################
def point_iterator(self, ignore_taken=False):
for pt_type, points in zip(['SUB', 'INS'], [self.sub_points, self.ins_points]):
for point in points:
if ignore_taken and not point.free:
continue
yield pt_type, point
@property
def ins_points(self):
return self.insertion_points
@property
def sub_points(self):
return self.substitution_points
@property
def root_pos(self):
return self.tree_operation.target['pos_symbol']
@property
def last_match(self):
if self.last_index < 0:
return None
elif self.last_type == consts.SUBSTITUTION:
return self.substitution_points[self.last_index]
else:
return self.insertion_points[self.last_index]
@property
def is_insertion(self):
return self.tree_operation.is_insertion
@property
def pos_symbol(self):
return self.tree_operation.target['pos_symbol']
def set_path_features(self, target_gorn=None, target_hlf=None,
self_hlf=None, tree_id=None):
"""Set the variables needed to reconstruct paths.
Args
target_gorn: the gorn address of the target operation node
target_hlf: the target hlf symbol of the target operation tree
self_hlf: this tree's hlf symbol
Notes:
The gorn address will identify where in the target tree
The target_hlf will identify which tree; especially important for duplicates
"""
if self_hlf:
for point in self.substitution_points + self.insertion_points:
point.set_path_features(self_hlf)
self.hlf_symbol = self_hlf
if target_gorn or target_hlf:
self.tree_operation.set_path_features(target_gorn, target_hlf)
if tree_id:
self.tree_id = tree_id
def expand_address(self, incoming):
self.expanded_address = incoming
for _, point in self.point_iterator():
point.expanded_address = incoming + point.gorn[1:]
""" a soft deletion to see if i can get rid of this code
def refresh_points(self):
self.tree_operation = self.tree_operation.clone()
self.substitution_points = [sub.clone() for sub in self.substitution_points]
self.insertion_points = [ins.clone() for ins in self.insertion_points]
def clone(self):
new_tree = ElementaryTree(self.tree_operation, self.head,
self.head_address, self.bracketed_string,
self.substitution_points,
self.insertion_points)
new_tree.refresh_points()
if self.last_match:
new_tree.last_type = self.last_type
new_tree.last_index = self.last_index
if self.hlf_symbol:
new_tree.hlf_symbol = self.hlf_symbol
new_tree.address = self.address
new_tree.tree_id = self.tree_id
return new_tree
"""
def __str__(self):
return self.bracketed_string
def __repr__(self):
substr = ", ".join("{}{}@{}".format(sub.pos_symbol,
"-FREE" if sub.free else "-FILLED",
sub.gorn)
for sub in sorted(self.substitution_points,
key=lambda x: x.gorn))
instr = ", ".join("{}@{}".format(ins.pos_symbol, ins.gorn)
for ins in sorted(self.insertion_points,
key=lambda x: x.gorn))
if self.tree_operation.is_insertion:
typestr = "{}*" if self.tree_operation.direction == "left" else "*{}"
else:
typestr = "^{}^"
typestr = typestr.format(self.head)
return "<{}; sub=[{}], ins=[{}]>".format(typestr, substr, instr)
class DerivationTree(object):
"""represent a tree of ElementaryTrees and their attachment addresses.
"""
def __init__(self, elem_tree, children, predicate=None, suppress_predicate=False):
self.elem_tree = elem_tree
self.children = children
self.predicate = predicate
if not suppress_predicate and predicate is None:
self.predicate = self.instantiate_semantics()
@classmethod
def root_tree(cls):
E = ElementaryTree.root_tree()
P = Predicate(name='ROOT', valence=1, hlf_symbol='g-1')
return cls(E, [], P)
@classmethod
def from_single_parse_tree(cls, tree):
elem_tree = ElementaryTree.from_single_parse_tree(tree)
return cls(elem_tree, [])
@classmethod
def from_bracketed(cls, bracketed_string, **kwargs):
elem_tree = ElementaryTree.from_bracketed_string(bracketed_string)
#parse_tree, _ = ConstituencyTree.make(bracketed_string=bracketed_string)
return cls(elem_tree, [], **kwargs)
@property
def E(self):
""" shortcut alias for shorter lines """
return self.elem_tree
@property
def is_insertion(self):
return self.elem_tree.is_insertion
@property
def direction(self):
if self.is_insertion:
return self.E.tree_operation.target['attach_direction']
else:
return "up"
@property
def tree_op(self):
return self.E.tree_operation
@property
def bracketed(self):
return self.E.bracketed_string
@property
def head(self):
return self.E.head
@property
def supertag(self):
return (self.E.root_pos, self.E.head_symbol, self.direction)
@property
def superindex(self):
return (self.head, self.supertag)
@property
def is_root(self):
return "ROOT" in self.E.bracketed_string
@property
def num_children(self):
return sum([child.num_children+1 for child in self.children])
@property
def lexical(self):
out = [self.E.head]
for child in self.children:
out.extend(child.lexical)
return out
def target_gorn(self, adjust_insertion=True):
gorn = self.tree_op.target['target_gorn']
direction = self.tree_op.target['attach_direction']
if self.is_insertion and adjust_insertion:
gorn += ((-100 if direction == "left" else 100), )
return gorn
def accepts_op(self, other_tree):
other_target = other_tree.E.tree_operation.target['pos_symbol']
if other_tree.is_insertion:
points = self.E.insertion_points
else:
points = self.E.substitution_points
for point in points:
if point.pos_symbol == other_target:
return True
return False
def expand_address(self, incoming=None):
incoming = incoming or (0,)
self.E.expand_address(incoming)
self.expanded_address = incoming
for child in self.children:
child_address = incoming + child.E.address[1:]
child.expand_address(child_address)
def all_points(self):
points = list(self.E.point_iterator())
for child in self.children:
points.extend(child.all_points)
return points
def get_spine(self):
tree, _ = ConstituencyTree.make(bracketed_string=self.bracketed)
annotate = lambda t: (t.symbol, ("SUB" if t.complement
else ("INS" if t.adjunct
else "SPINE")))
not_lex = lambda t: not tree.lexical
spine = [[(tree.symbol, self.direction)]]
while not_lex(tree):
if len(tree.children) == 1 and tree.children[0].lexical:
break
spine.append([annotate(c) for c in tree.children if not_lex(c)])
tree = tree.children[tree.spine_index]
return spine
def roll_features(self, parent_head="ROOT"):
"""assumes 1 head.. more thought needed for other forms"""
spine = self.get_spine()
out_ch = [child.head for child in self.children]
out = [(self.head, parent_head, self.bracketed, spine, out_ch)]
for child in self.children:
out.extend(child.roll_features(self.head))
return out
def modo_roll_features(self, parent_head="ROOT", parent_spine=None):
"""v2. mother-daughter roll features
roll up the tree; get the mother-daughter quadruples
"""
parent_spine = parent_spine or ((("ROOT", "SUB"),),)
tree, _ = ConstituencyTree.make(bracketed_string=self.bracketed)
safety = 0
annotate = lambda t: (t.symbol, ("SUB" if t.complement
else ("INS" if t.adjunct
else "SPINE")))
filter_ch = lambda c: c.E.head_symbol in [",", ":", ".", "``","''", "--"]
not_lex = lambda t: not tree.lexical
spine = [[(tree.symbol, self.direction)]]
while not_lex(tree):
if len(tree.children) == 1 and tree.children[0].lexical:
break
spine.append([annotate(c) for c in tree.children if not_lex(c)])
tree = tree.children[tree.spine_index]
safety += 1
if safety == 100:
raise Exception("loop issue")
out = [(self.head, parent_head, self.bracketed, spine, parent_spine)]
for child in self.children:
out.extend(child.modo_roll_features(self.head, spine))
return out
def dcontext_roll_features(self):
"""v3. mother-daughter roll features
roll up the trees; get the node+daughter head context
"""
tree, _ = ConstituencyTree.make(bracketed_string=self.bracketed)
annotate = lambda t: (t.symbol, ("SUB" if t.complement
else ("INS" if t.adjunct
else "SPINE")))
filter_ch = lambda c: c.E.head_symbol in [",", ":", ".", "``","''", "--"]
not_lex = lambda t: not tree.lexical
spine = [[(tree.symbol, self.direction)]]
while not_lex(tree):
if len(tree.children) == 1 and tree.children[0].lexical:
break
spine.append([annotate(c) for c in tree.children if not_lex(c)])
tree = tree.children[tree.spine_index]
hlf_info = (self.E.hlf_symbol, self.E.tree_operation.target['target_hlf'])
child_heads = [child.head for child in self.children]
out = [(self.head, spine, child_heads, self.bracketed, hlf_info)]
for child in self.children:
out.extend(child.dcontext_roll_features())
return out
def learning_features_july2016(self):
'''sequential choice model with a horizon and RTTN
'''
tree, _ = ConstituencyTree.make(bracketed_string=self.bracketed)
annotate = lambda t: (t.symbol, ("SUB" if t.complement
else ("INS" if t.adjunct
else "SPINE")))
not_lex = lambda t: not tree.lexical
spine = [[(tree.symbol, self.direction)]]
while not_lex(tree):
if len(tree.children) == 1 and tree.children[0].lexical:
break
spine.append([annotate(c) for c in tree.children if not_lex(c)])
tree = tree.children[tree.spine_index]
return self.head, spine
def to_constituency(self):
raise Exception("dont use this yet")
import pdb
#pdb.set_trace()
tree, _ = ConstituencyTree.make(bracketed_string=self.bracketed)
for child in sorted(self.children, key=lambda c: c.E.address):
print("*******\n**********")
print("starting child {}".format(child.supertag))
ct = child.to_constituency()
print("----------------------------")
print("finished to constituency for ct")
print("tree is currently {}".format(tree))
print("child's ct: {}".format(ct))
print("-------------------")
print(self.bracketed)
print(child.E.address)
print(str(child))
print("attaching {} to {}".format(child.bracketed, self.bracketed))
self.attach_at(tree, ct, list(child.E.address)[1:])
return tree
def attach_at(self, node, op, address):
raise Exception("dont use this yet")
while len(address) > 1:
node = node.children[address.pop(0)]
if not hasattr(node, "bookkeeper"):
node.bookkeeper = {}
opid = address.pop(0)
assert len(address) == 0
if isinstance(opid, int):
node.children[opid].__dict__.update(op.__dict__)
elif isinstance(opid, float):
if opid > 0:
node.children.extend(op.children)
else:
node.children = op.children + node.children
node.spine_index += len(op.children)
else:
raise Exception("sanity check")
def __str__(self):
if self.E.bracketed_string == "(ROOT)" and len(self.children) == 0:
return "<empty root>"
lexical = self.in_order_lexical()
return " ".join(lexical)
def __repr__(self):
if self.E.bracketed_string == "(ROOT)" and len(self.children) == 0:
return "<empty root>"
descs = self.in_order_descriptive()
return " ".join(descs)
def _check_heads(self, child_prep, next_word, stk_idx, sf_stk, avail_pos):
for (head,hlf), child in child_prep.items():
if head == next_word:
import pdb
#pdb.set_trace()
w_size = child.num_children + 1
low,high = stk_idx, stk_idx+w_size
while high >= stk_idx and low >= 0:
possible = sf_stk[low:high]
if sorted(possible) == sorted(child.lexical):
child_prep.pop((head, hlf))
pos = avail_pos.pop()
return child, pos, low
else:
low -= 1
high -= 1
return None, None, None
def _sort_by_surface_form(self, sf_list, children, positions, left=True):
"""assign spine-out indices that agrees with surface form list (sf_list)
positions start from 0 and go negative when left, positive when right
we want to associate things closer to 0 with words closer to head
"""
#my_possible_positions = [i for i,x in enumerate(sf_list) if x==self.E.head]
#if "down" in [c.E.head for c in children]:
# import pdb
# pdb.set_trace()
#for possible_position in my_possible_positions:
#print("===")
child_prep = {(child.E.head,child.E.hlf_symbol):child for child in children}
pairing = []
avail_pos = sorted(positions)
sf_stk = sf_list[:]
if not left:
avail_pos = avail_pos[::-1]
sf_stk = sf_stk[::-1]
# if the position is so bad that it cuts off the words, just skip it
if not all([(word in sf_stk) for c in children for word in c.lexical]):
raise Exception()
stk_idx = len(sf_stk) - 1
#print("xxx")
domain = set([w for child in children for w in child.lexical])
import pdb
#pdb.set_trace()
while len(avail_pos) > 0 and stk_idx >= 0:
#while len(sf_stk) > 0 and len(pairing)<len(children):
#print("---", possible_position, child_prep.keys(), sf_stk, stk_idx)
next_word = sf_stk[stk_idx]
if next_word not in domain:
#print("trashpop", next_word)
sf_stk.pop()
else:
child, pos, low = self._check_heads(child_prep, next_word, stk_idx, sf_stk, avail_pos)
if child is not None:
stk_idx = low
sf_stk = sf_stk[:low]
pairing.append((child,pos))
stk_idx -= 1
try:
assert len(avail_pos) == 0
yield pairing
except:
raise Exception()
#try:
# assert len(my_possible_positions) > 1
#except:
print("available positions weren't exausted. why?")
print("I thought i had it figured out; multiple of this head word")
print("it partitions string too much.. but i was wrong?")
print("debugging. inspect now.")
import pdb
pdb.set_trace()
def sort_by_surface_form(self, sf_list, children, positions, left=True):
#import pdb
#pdb.set_trace()
#try:
#if self.E.head == "iii":
# import pdb
# pdb.set_trace()
all_pairings = list(self._sort_by_surface_form(sf_list, children, positions, left))
#except IndexError as e:
# print("tried to pop from an empty list... what should I do")
# import pdb
# pdb.set_trace()
if len(all_pairings) == 1:
return all_pairings[0]
else:
#try:
key = lambda item: (item[1], (item[0].E.head, item[0].E.hlf_symbol))
same = lambda p1, p2: tuple(map(key,p1))==tuple(map(key,p2))
if all([same(p1,p2) for p1 in all_pairings for p2 in all_pairings]):
#print("all same anyway, returning")
return all_pairings[0]
else:
dt_check = lambda diffs: any([item[0].E.head_symbol == "DT" for pair in diffs for item in pair])
dt_key = lambda pairing: sum([abs(p) for c,p in pairing if c.E.head_symbol=="DT"])
differences = [(p1,p2) for i,p1 in enumerate(all_pairings)
for j,p2 in enumerate(all_pairings)
if not same(p1,p2) and i<j]
differences = [(x,y) for diff_item in differences for x,y in zip(*diff_item) if x!=y]
if len(differences) == 2 and dt_check(differences):
#print("shortcutting")
out_pairing = max(all_pairings, key=dt_key)
#print("hopefully works: ", out_pairing)
return out_pairing
#return all_pairings[0]
print("Not sure what to do. not all pairings are the same. inspect please")
import pdb
pdb.set_trace()
#except Exception as e:
# print("not exactly sure what is breaking")
# import pdb
# pdb.set_trace()
def surface_index(self, sf_list, num_left):
for i,w in enumerate(sf_list):
if w == self.E.head and i >= num_left:
return i
return -1
def align_gorn_to_surface(self, surface_form):
if len(self.children) == 0:
return
sf_list = surface_form.split(" ")
if self.E.head == "as" and "much" in sf_list:
import pdb
#pdb.set_trace()
left_of = lambda x,me: x.elem_tree.address < me.elem_tree.head_address
left_children = [child for child in self.children if left_of(child, self)]
organizer = {}
num_left = sum([child.num_children+1 for child in left_children])
boundary = max(num_left, self.surface_index(sf_list, num_left))
left_form = " ".join(sf_list[:boundary])
right_form = " ".join(sf_list[boundary+1:])
#### LEFT CHILDREN
for child in left_children:
addr = child.elem_tree.address
level, position = addr[:-1], addr[-1]
organizer.setdefault(level, []).append((child, position))
for level, items in organizer.items():
if len(items) == 1:
continue
children, positions = [x[0] for x in items], [x[1] for x in items]
pairing = self.sort_by_surface_form(sf_list[:boundary], children, positions, True)
for child,position in pairing:
assert child.E.address[:-1] == level
child.E.address = child.E.address[:-1] + (position,)
#### RIGHT CHILDREN
organizer = {}
right_children = [child for child in self.children if not left_of(child, self)]
for child in right_children:
addr = child.elem_tree.address
level, position = addr[:-1], addr[-1]
organizer.setdefault(level, []).append((child, position))
for level, items in organizer.items():
if len(items) == 1:
continue
children, positions = [x[0] for x in items], [x[1] for x in items]
pairing = self.sort_by_surface_form(sf_list[boundary+1:], children, positions, False)
for child,position in pairing:
assert child.E.address[:-1] == level
child.E.address = child.E.address[:-1] + (position,)
for child in left_children:
child.align_gorn_to_surface(left_form)
for child in right_children:
child.align_gorn_to_surface(right_form)
def align_gorn_to_surface_deprecated_march30(self, surface_form):
left_of = lambda x,me: x.elem_tree.address < me.elem_tree.head_address
surface_index = lambda child: surface_form.find(child.elem_tree.head)
left_children = [child for child in self.children if left_of(child, self)]
organizer = {}
#### LEFT CHILDREN
for child in left_children:
addr = child.elem_tree.address
level, position = addr[:-1], addr[-1]
organizer.setdefault(level, []).append((child, position))
for level, items in organizer.items():
if len(items) == 1:
continue
child_list = sorted([c for c,p in items], key=surface_index)
pop_q = deque(sorted([p for c,p in items]))
assert [x!=y for x in pop_q for y in pop_q]
for child in child_list:
addr = child.elem_tree.address
child.elem_tree.address = addr[:-1] + (pop_q.popleft(), )
#### RIGHT CHILDREN
organizer = {}
right_children = [child for child in self.children if not left_of(child, self)]
for child in right_children:
addr = child.elem_tree.address
level, position = addr[:-1], addr[-1]
organizer.setdefault(level, []).append((child, position))
for level, items in organizer.items():
if len(items) == 1:
continue
child_list = sorted([c for c,p in items], key=surface_index)
pop_q = deque(sorted([p for c,p in items]))
for child in child_list:
addr = child.elem_tree.address
child.elem_tree.address = addr[:-1] + (pop_q.popleft(), )
for child in self.children:
child.align_gorn_to_surface(surface_form)
def align_gorn_to_surface_old(self, surface_form):
ins_children = [child for child in self.children if child.is_insertion]
sub_children = [child for child in self.children if not child.is_insertion]
surface_index = lambda child: surface_form.find(child.elem_tree.head)
organizer = {}
for child in ins_children:
addr = child.elem_tree.address
new_addr = addr[:-1] + ((1,) if addr[-1] > 0 else (-1,))
organizer.setdefault(addr, []).append(child)
for proxy_addr, child_list in organizer.items():
if len(child_list) == 1:
continue
offset = min([c.elem_tree.address[-1] for c in child_list])
for i, child in enumerate(sorted(child_list, key=surface_index),0):
last_bit = i+offset
child.elem_tree.address = proxy_addr[:-1] +(last_bit,)
for child in self.children:
child.align_gorn_to_surface(surface_form)
#left_ins = [child for child in ins_children if child.elem_tree.address[-1]<0]
#right_ins = [child for child in ins_children if child.elem_tree.address[-1]>0]
#surface_index = lambda child: surface_form.find(child.elem_tree.head)
#sort_key = lambda ch: ch.elem_tree.address[:-1]+()
def gorn_in_order(self, include_empty=False):
items = [(child.elem_tree.address, child) for child in self.children]
if len(self.E.head) > 0:
items.append((self.elem_tree.head_address, self))
if include_empty:
for point in self.elem_tree.substitution_points:
if all([addr!=point.gorn for addr, _ in items]):
items.append((point.gorn, None))
sorted_items = sorted(items)
return sorted_items
def gorn_pre_order(self, merged=True):
"""Return children sorted by gorn. Use for pre-order walks.
Will also return from inside out.
"""
left_of = lambda x,me: x.elem_tree.address < me.elem_tree.head_address
left_children = [child for child in self.children if left_of(child, self)]
right_children = [child for child in self.children if not left_of(child, self)]
sorted_left = sorted(left_children, key=lambda x: x.elem_tree.address, reverse=True)
#for i,left in enumerate(sorted_left):
# print(i,left.elem_tree.bracketed_string)
# print(i,left.elem_tree.address)
sorted_right = sorted(right_children, key=lambda x: x.elem_tree.address)
#for i,right in enumerate(sorted_right):
# print(i,right.elem_tree.bracketed_string)
# print(i,right.elem_tree.address)
#sorted_children = sorted(self.children, key=lambda x: x.elem_tree.address)
if merged:
return sorted_left + sorted_right
else:
return sorted_left, sorted_right
def learning_features(self, *args):
"""make learning features. currently for dual attender model.
output: features and annotations for pairs (parent, child)
"""
feature_output = []
f1 = "head={}".format(self.E.head)
f2 = "template={}".format(self.E.bracketed_string.replace(self.E.head, ""))
if self.is_root:
my_feats = (f2,)
else:
my_feats = (f1, f2)
for child_type, side in zip(self.gorn_pre_order(False), ("left", "right")):
for i, child in enumerate(child_type):
anno = []
anno.append("dist-from-spine: {}".format(i))
anno.append("dist-from-frontier: {}".format(len(child_type)-i-1))
anno.append("spine-side: {}".format(side))
if child.is_insertion:
anno.append("type=ins")
else:
anno.append("type=sub")
for j, pt in enumerate(self.E.substitution_points):
if pt.gorn == child.E.address:
anno.append("argument-{}".format(j))
child_feats, pairs_below = child.learning_features()
feature_output.extend(pairs_below)
feature_output.append((my_feats, child_feats, tuple(anno)))
return my_feats, feature_output
def _old_learning_features(self, flat=False):
raise Exception("don't use this function anymore")
f1 = "head={}".format(self.elem_tree.head)
f2 = "template={}".format(self.elem_tree.bracketed_string.replace(self.elem_tree.head, ""))
#f4 = "surface=[{}]".format(str(self))
#fulllex = self.in_order_lexical(True)
#f5 = "surface_with_empties=[{}]".format(fulllex)
myfeats = {"f1":f1,"f2":f2,"f3": []}
#"f4":f4,"f5":f5}
allfeats = [myfeats]
first_ins = lambda child: (child.E.address < self.E.head_address and
all([child.E.address < other_child.E.address
for other_child in self.children
if other_child.E.address != child.E.address]))
last_ins = lambda child: (child.E.address > self.E.head_address and
all([child.E.address > other_child.E.address
for other_child in self.children
if other_child.E.address != child.E.address]))
for child in self.children:
# if child is insertion, find out whether it's furthest left or furthest right
# if child is substitution, find out which of the substitution poitns it corresponds to
if first_ins(child):
pass
arrow = "<-" if child.is_insertion else "->"
f3 = "{}{}{}".format(self.elem_tree.head, arrow, child.elem_tree.head)
myfeats['f3'].append(f3)
allfeats.extend(child.learning_features())
if flat:
final_list = []
for featset in allfeats:
for featval in featset.values():
if isinstance(featval, list):
final_list.extend(featval)
else:
final_list.append(featval)
return final_list
return allfeats
def path_reconstruction_features(self):
return (self.E.bracketed_string, self.E.hlf_symbol,
self.E.tree_operation.target['target_hlf'],
self.E.tree_operation.target['target_gorn'])
#return (self.elem_tree.tree_id, self.elem_tree.head)
def pre_order_features(self):
feat_list = [self.path_reconstruction_features()]# for now, just id
for child in self.gorn_pre_order():
feat_list.extend(child.pre_order_features())
return tuple(feat_list)
def pre_order_descriptive(self):
descs = [str(self.elem_tree)]
sorted_children = sorted(self.children, key=lambda x: x.elem_tree.address)
for tree in sorted_children:
descs.extend(tree.pre_order_descriptive())
return descs
def in_order_descriptive(self):
descs = []
for address, tree in self.gorn_in_order():
if tree == self:
descs.append(str(self.elem_tree))
else:
descs.extend(tree.in_order_descriptive())
return descs
def in_order_treeids(self):
treeids = []
for address, tree in self.gorn_in_order():
if tree == self:
treeids.append(tree.elem_tree.tree_id)
else:
treeids.extend(tree.in_order_treeids())
return treeids
def pre_order_lexical(self):
pass
def in_order_lexical(self, include_empties=False):
lexical = []
for address, tree in self.gorn_in_order(include_empties):
if include_empties and tree is None:
lexical.append("<open-sub-point>")
elif tree.elem_tree.head is None:
continue
elif tree == self:
lexical.append(self.elem_tree.head)
else:
lexical.extend(tree.in_order_lexical())
return lexical
def expanded_by_hlf(self, book=None):
if book is None:
self.expand_address()
book = {}
book[self.E.hlf_symbol] = self.expanded_address
for child in self.children:
book = child.expanded_by_hlf(book)
return book
def make_expression(self, top=True):
expr = []
for i, (address, tree) in enumerate(self.gorn_in_order()):
if tree == self:
expr.append(self.predicate)
else:
expr.extend(tree.make_expression(False))
if top:
return Expression.from_iter(expr)
return expr
def lookup_insert(self, index):
return self.elem_tree.insertion_points[index].gorn
def lookup_sub(self, index):
return self.elem_tree.substitution_points[index].gorn
def set_path_features(self, instantiate_semantics=True, *args, **kwargs):
self.elem_tree.set_path_features(*args, **kwargs)
if instantiate_semantics:
self.predicate = self.instantiate_semantics()
def set_insertion_argument(self, arg):
if not self.is_insertion:
raise Exception("Don't call this if it's not insertion..")
self.predicate.substitute(arg, 0)
def instantiate_semantics(self):
num_arguments = len(self.elem_tree.substitution_points)
if self.is_insertion:
num_arguments += 1
predicate = Predicate(self.elem_tree.head,
num_arguments,
self.elem_tree.hlf_symbol)
if self.elem_tree.hlf_symbol is None:
self.elem_tree.set_path_features(self_hlf=predicate.hlf_symbol)
return predicate
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, deepcopy(v, memo))
return result
def clone(self):
children = [child.clone() for child in self.children]
pred = self.predicate.clone()
return self.__class__(self.elem_tree.clone(), children)
def handle_insertion(self, operative, in_place):
"""Check if my elementary tree is the insertion point; if not, recurse
Args:
op_tree: ElementaryTree instance
"""
ThisClass = self.__class__
op_tree = operative.elem_tree
op = op_tree.tree_operation
if self.elem_tree.matches_inspoint(op):
# do the insertting; making new elem tree copies; updating addresses
new_elem_tree, new_op_tree = self.elem_tree.insert(op_tree)
# start making the new composed tree
# create a new clone of the op dtree
if in_place:
new_operative = operative
new_operative.elem_tree = new_op_tree
new_children = self.children
else:
#new_children = [child.clone() for child in self.children]
new_children = deepcopy(self.children)
new_operative = ThisClass.replicate(operative, new_op_tree)
# since it's an insertion, this pred is an argument to the op
new_pred = deepcopy(self.predicate)
# put the predicate into the op
new_operative.set_insertion_argument(new_pred)
# finish off the children
new_children.append(new_operative)
else:
new_elem_tree = deepcopy(self.elem_tree)
new_children = [child.operate(operative, in_place) for child in self.children]
new_pred = deepcopy(self.predicate)
if in_place:
self.elem_tree = new_elem_tree
self.children = new_children
self.predicate = new_pred
return self
else:
return ThisClass(new_elem_tree, new_children)
def handle_substitution(self, operative, in_place=False):
"""Check if my elementary tree is the subpoint; if not, recurse on children
Args:
op_tree: ElementaryTree instance
"""
ThisClass = self.__class__
op_tree = operative.elem_tree
op = op_tree.tree_operation
if self.elem_tree.matches_subpoint(op):
# the purpose of the substitute is to give the op_tree an address
# that adddress is the location of its substituion
# this is important for when we want to order our derived children via gorn
new_elem_tree, new_op_tree = self.elem_tree.substitute(op_tree)
##### HANDLE IN-PLACE-TYPE VS FACTORY-TYPE OPERATION
# the thing coming in is copied
if in_place:
new_operative = operative
new_operative.elem_tree = new_op_tree
new_children = self.children
else:
new_children = deepcopy(self.children)#[child.clone() for child in self.children]
new_operative = ThisClass.replicate(operative, new_op_tree)
new_children.append(new_operative)
##### HANDLE LOGIC STUFF
new_pred = deepcopy(self.predicate)#.clone()
# we put it into its correct spot
if self.is_insertion:
pred_arg_index = new_elem_tree.last_index + 1
else:
pred_arg_index = new_elem_tree.last_index
# abusing terms. substitute here is not a tree substitute, but a logic substitute
# find a better term....................
new_pred.substitute(new_operative.predicate, pred_arg_index)
else:
new_elem_tree = deepcopy(self.elem_tree)#.clone()
new_pred = deepcopy(self.predicate)#.clone()
new_children = [child.operate(operative, in_place) for child in self.children]
if in_place:
self.elem_tree = new_elem_tree
self.children = new_children
self.predicate = new_pred
return self
else:
return ThisClass(new_elem_tree, new_children)
def operate(self, operative, in_place=False):
"""handle the possible operations incoming to this derived tree.
Args:
operative: a DerivationTree instance
Returns:
a new DerivationTree that results from operation
Notes:
An intended operation would know what tree it wants to operate on
and where it wants to do it.
E.G:
(NP* (DT a)) knows it wants to attach to the tree (NP (NN dog))
which is substituted into (S (NP) (VP finds) (NP))
The DerivationTree should know that (NP (NN dog)) was substituted into
the first substitution spot.
Temp QUD:
what is the best way to represent this intended operation?
we could have the DT tree know it wants to attach to tree id X
but that tree id X could be in the tree twice (either NP)
it could know the predicate then?
"""
if operative.elem_tree.tree_operation.type == consts.INSERTION:
return self.handle_insertion(operative, in_place)
elif operative.elem_tree.tree_operation.type == consts.SUBSTITUTION:
return self.handle_substitution(operative, in_place)
@classmethod
def replicate(cls, old_inst, new_elem_tree=None, new_children=None, new_pred=None):
""" this is basically clone but allows etrees, childre, and preds rather than just straight cloning """
new_elem_tree = new_elem_tree or deepcopy(old_inst.elem_tree)#.clone()
new_children = new_children or deepcopy(old_inst.children) #[child.clone() for child in old_inst.children]
new_pred = new_pred or deepcopy(old_inst.predicate)#.clone()
return cls(new_elem_tree, new_children)
def test():
parse = """(ROOT(S(NP(NP (DT The) (NN boy))(VP (VBG laying)(S(VP (VB face)(PRT (RP down))(PP (IN on)(NP (DT a) (NN skateboard)))))))(VP (VBZ is)(VP (VBG being)(VP (VBN pushed)(PP (IN along)(NP (DT the) (NN ground)))(PP (IN by)(NP (DT another) (NN boy))))))(. .)))"""
tree_cuts = tree_enrichment.string2cuts(parse)
tree_strings = [cut.save_str() for cut in tree_cuts]
derived_trees = [DerivationTree.from_bracketed(tree_string) for tree_string in tree_strings]
derived_trees[2].elem_tree.insertion_points[0].hlf_symbol = 'g0'
derived_trees[1].elem_tree.tree_operation.target['target_hlf'] = 'g0'
derived_trees[1].elem_tree.tree_operation.target['target_gorn'] = (0,)
#derived_two = [DerivationTree.from_parse_tree(tree) for tree in tree_cuts]
return derived_trees
if __name__ == "__main__":
test()
| braingineer/baal | baal/structures/gist_trees.py | Python | mit | 54,149 | 0.005522 |
from subprocess import Popen
import os
import time
from marmot.representations.representation_generator import RepresentationGenerator
from marmot.experiment.import_utils import mk_tmp_dir
class POSRepresentationGenerator(RepresentationGenerator):
def _get_random_name(self, suffix=''):
return 'tmp_'+suffix+str(time.time())
def _get_pos_tagging(self, src, tagger, par_file, tmp_dir):
# tokenize and add the sentence end marker
# tokenization is done with nltk
tmp_tokenized_name = os.path.join(tmp_dir, self._get_random_name('tok'))
tmp_tok = open(tmp_tokenized_name, 'wr+')
for words in src:
tmp_tok.write('%s\nSentenceEndMarker\n' % '\n'.join([w.encode('utf-8') for w in words]))
tmp_tok.seek(0)
# pass to tree-tagger
tmp_tagged_name = os.path.join(tmp_dir, self._get_random_name('tag'))
tmp_tagged = open(tmp_tagged_name, 'wr+')
tagger_call = Popen([tagger, '-token', par_file], stdin=tmp_tok, stdout=tmp_tagged)
tagger_call.wait()
tmp_tagged.seek(0)
# remove sentence markers, restore sentence structure
output = []
cur_sentence = []
for line in tmp_tagged:
word_tag = line[:-1].decode('utf-8').strip().split('\t')
# each string has to be <word>\t<tag>
# TODO: if it's not of this format, it could be the end of sequence (empty string) or an error
if len(word_tag) != 2:
continue
if word_tag[0] == 'SentenceEndMarker':
output.append(cur_sentence)
cur_sentence = []
else:
cur_sentence.append(word_tag[1])
tmp_tok.close()
tmp_tagged.close()
# delete all temporary files
os.remove(tmp_tokenized_name)
os.remove(tmp_tagged_name)
return output
# <tagger> -- path to tree-tagger
# <parameters> -- parameters of tree-tagger
# <data_label> -- which data should be tagged ('source' or 'target')
def __init__(self, tagger, parameters, data_label, tmp_dir=None):
self.tmp_dir = mk_tmp_dir(tmp_dir)
self.tagger = tagger
self.parameters = parameters
self.data = data_label
def generate(self, data_obj):
data_obj[self.data+'_pos'] = self._get_pos_tagging(data_obj[self.data], self.tagger, self.parameters, self.tmp_dir)
return data_obj
| qe-team/marmot | marmot/representations/pos_representation_generator.py | Python | isc | 2,447 | 0.002452 |
import sys
import json
import smartfeed.django
base = sys.argv[1]
command = sys.argv[2]
db = smartfeed.django.get_default_model()
if command == 'add':
data = json.loads(sys.argv[3])
id = sys.argv[4] if len(sys.argv) >= 5 else None
db.add(base, data, id=id)
elif command == 'del':
id = sys.argv[3]
db.delete(base, id)
elif command == 'exp':
ttl = int(sys.argv[3])
db.clear_expired(base, ttl)
elif command == 'expany':
ttl = int(sys.argv[3])
db.clear_expired(base, ttl, deleted=False)
else:
raise ValueError('unsupported command')
| fanout/pysmartfeed | test.py | Python | mit | 542 | 0.01845 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Nicolas P. Rougier
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
""" This example shows reactive pan-zoom transform (2D). """
import numpy as np
from PIL import Image
from glumpy import app, gl, glm, gloo, data
from glumpy.transforms import PanZoom, Position
vertex = """
attribute vec2 position;
attribute vec2 texcoord;
varying vec2 v_texcoord;
void main()
{
gl_Position = <transform>;
v_texcoord = texcoord;
}
"""
fragment = """
uniform sampler2D texture;
varying vec2 v_texcoord;
void main()
{
gl_FragColor = texture2D(texture, v_texcoord);
// gl_FragColor = <interpolation>;
}
"""
window = app.Window(width=800, height=800)
@window.event
def on_draw(dt):
window.clear()
program.draw(gl.GL_TRIANGLE_STRIP)
@window.event
def on_key_press(key, modifiers):
if key == app.window.key.SPACE:
transform.reset()
program = gloo.Program(vertex, fragment, count=4)
program['position'] = [(-1,-1), (-1,1), (1,-1), (1,1)]
program['texcoord'] = [( 0, 1), ( 0, 0), ( 1, 1), ( 1, 0)]
program['texture'] = data.get("lena.png")
transform = PanZoom(Position("position"), aspect=1)
program['transform'] = transform
window.attach(transform)
app.run()
| duyuan11/glumpy | examples/transform-pan-zoom.py | Python | bsd-3-clause | 1,497 | 0.007348 |
#!/usr/bin/env python
# Copyright (c) 2014, Bo Tian <tianbo@gmail.com>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may
# be used to endorse or promote products derived from this software without specific
# prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
# SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
def main():
args = sys.argv
print str(args)
extract_symbols(args[1], args[2])
def extract_symbols(input_file, output_file):
fin = open(input_file, 'r')
fout = open(output_file, 'w')
for line in fin:
if '|' in line:
cols = line.split('|')
if not '$' in cols[1]: # Skip preferred shares, warrant etc.
symbol = cols[1].replace('.', '-') # e.g., BRK.B -> BRK-B for Yahoo finance.
fout.write(symbol + '\n')
fin.close()
fout.close()
if __name__ == "__main__":
main()
| btian/market_correlator | extract_symbols.py | Python | bsd-3-clause | 2,097 | 0.013829 |
# encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from django.contrib.admin.options import ModelAdmin, csrf_protect_m
from django.contrib.admin.views.main import SEARCH_VAR, ChangeList
from django.core.exceptions import PermissionDenied
from django.core.paginator import InvalidPage, Paginator
from django.shortcuts import render
from django.utils.encoding import force_text
from django.utils.translation import ungettext
from haystack import connections
from haystack.query import SearchQuerySet
from haystack.utils import get_model_ct_tuple
def list_max_show_all(changelist):
"""
Returns the maximum amount of results a changelist can have for the
"Show all" link to be displayed in a manner compatible with both Django
1.4 and 1.3. See Django ticket #15997 for details.
"""
try:
# This import is available in Django 1.3 and below
from django.contrib.admin.views.main import MAX_SHOW_ALL_ALLOWED
return MAX_SHOW_ALL_ALLOWED
except ImportError:
return changelist.list_max_show_all
class SearchChangeList(ChangeList):
def __init__(self, **kwargs):
self.haystack_connection = kwargs.pop('haystack_connection', 'default')
super(SearchChangeList, self).__init__(**kwargs)
def get_results(self, request):
if not SEARCH_VAR in request.GET:
return super(SearchChangeList, self).get_results(request)
# Note that pagination is 0-based, not 1-based.
sqs = SearchQuerySet(self.haystack_connection).models(self.model).auto_query(request.GET[SEARCH_VAR]).load_all()
paginator = Paginator(sqs, self.list_per_page)
# Get the number of objects, with admin filters applied.
result_count = paginator.count
full_result_count = SearchQuerySet(self.haystack_connection).models(self.model).all().count()
can_show_all = result_count <= list_max_show_all(self)
multi_page = result_count > self.list_per_page
# Get the list of objects to display on this page.
try:
result_list = paginator.page(self.page_num + 1).object_list
# Grab just the Django models, since that's what everything else is
# expecting.
result_list = [result.object for result in result_list]
except InvalidPage:
result_list = ()
self.result_count = result_count
self.full_result_count = full_result_count
self.result_list = result_list
self.can_show_all = can_show_all
self.multi_page = multi_page
self.paginator = paginator
class SearchModelAdminMixin(object):
# haystack connection to use for searching
haystack_connection = 'default'
@csrf_protect_m
def changelist_view(self, request, extra_context=None):
if not self.has_change_permission(request, None):
raise PermissionDenied
if not SEARCH_VAR in request.GET:
# Do the usual song and dance.
return super(SearchModelAdminMixin, self).changelist_view(request, extra_context)
# Do a search of just this model and populate a Changelist with the
# returned bits.
if not self.model in connections[self.haystack_connection].get_unified_index().get_indexed_models():
# Oops. That model isn't being indexed. Return the usual
# behavior instead.
return super(SearchModelAdminMixin, self).changelist_view(request, extra_context)
# So. Much. Boilerplate.
# Why copy-paste a few lines when you can copy-paste TONS of lines?
list_display = list(self.list_display)
kwargs = {
'haystack_connection': self.haystack_connection,
'request': request,
'model': self.model,
'list_display': list_display,
'list_display_links': self.list_display_links,
'list_filter': self.list_filter,
'date_hierarchy': self.date_hierarchy,
'search_fields': self.search_fields,
'list_select_related': self.list_select_related,
'list_per_page': self.list_per_page,
'list_editable': self.list_editable,
'model_admin': self
}
# Django 1.4 compatibility.
if hasattr(self, 'list_max_show_all'):
kwargs['list_max_show_all'] = self.list_max_show_all
changelist = SearchChangeList(**kwargs)
formset = changelist.formset = None
media = self.media
# Build the action form and populate it with available actions.
# Check actions to see if any are available on this changelist
actions = self.get_actions(request)
if actions:
action_form = self.action_form(auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
else:
action_form = None
selection_note = ungettext('0 of %(count)d selected',
'of %(count)d selected', len(changelist.result_list))
selection_note_all = ungettext('%(total_count)s selected',
'All %(total_count)s selected', changelist.result_count)
context = {
'module_name': force_text(self.model._meta.verbose_name_plural),
'selection_note': selection_note % {'count': len(changelist.result_list)},
'selection_note_all': selection_note_all % {'total_count': changelist.result_count},
'title': changelist.title,
'is_popup': changelist.is_popup,
'cl': changelist,
'media': media,
'has_add_permission': self.has_add_permission(request),
# More Django 1.4 compatibility
'root_path': getattr(self.admin_site, 'root_path', None),
'app_label': self.model._meta.app_label,
'action_form': action_form,
'actions_on_top': self.actions_on_top,
'actions_on_bottom': self.actions_on_bottom,
'actions_selection_counter': getattr(self, 'actions_selection_counter', 0),
}
context.update(extra_context or {})
request.current_app = self.admin_site.name
app_name, model_name = get_model_ct_tuple(self.model)
return render(request, self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_name, model_name),
'admin/%s/change_list.html' % app_name,
'admin/change_list.html'
], context)
class SearchModelAdmin(SearchModelAdminMixin, ModelAdmin):
pass
| steventimberman/masterDebater | venv/lib/python2.7/site-packages/haystack/admin.py | Python | mit | 6,567 | 0.002284 |
# -*- coding: utf-8 -*-
from flask import Blueprint
from ..models import Permission
main = Blueprint('main', __name__)
from . import views, errors, my_test
# app_context_processor
# let the variable Permission can accessed by all templates
@main.app_context_processor
def inject_permissions():
return dict(Permission=Permission)
| yelongyu/chihu | app/main/__init__.py | Python | gpl-3.0 | 338 | 0.002959 |
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from osc_lib import utils
DEFAULT_METRICS_API_VERSION = '1'
API_VERSION_OPTION = 'os_metrics_api_version'
API_NAME = "metric"
API_VERSIONS = {
"1": "gnocchiclient.v1.client.Client",
}
def make_client(instance):
"""Returns a metrics service client."""
version = instance._api_version[API_NAME]
try:
version = int(version)
except ValueError:
version = float(version)
gnocchi_client = utils.get_client_class(
API_NAME,
version,
API_VERSIONS)
# NOTE(sileht): ensure setup of the session is done
instance.setup_auth()
return gnocchi_client(session=instance.session,
adapter_options={
'interface': instance.interface,
'region_name': instance.region_name
})
def build_option_parser(parser):
"""Hook to add global options."""
parser.add_argument(
'--os-metrics-api-version',
metavar='<metrics-api-version>',
default=utils.env(
'OS_METRICS_API_VERSION',
default=DEFAULT_METRICS_API_VERSION),
help=('Metrics API version, default=' +
DEFAULT_METRICS_API_VERSION +
' (Env: OS_METRICS_API_VERSION)'))
return parser
| sileht/python-gnocchiclient | gnocchiclient/osc.py | Python | apache-2.0 | 1,874 | 0 |
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import time
import unittest
import config
import node
COMMISSIONER = 1
LEADER = 2
ROUTER1 = 3
ED1 = 4
class Cert_9_2_13_EnergyScan(unittest.TestCase):
def setUp(self):
self.simulator = config.create_default_simulator()
self.nodes = {}
for i in range(1,5):
self.nodes[i] = node.Node(i, (i == ED1), simulator=self.simulator)
self.nodes[COMMISSIONER].set_panid(0xface)
self.nodes[COMMISSIONER].set_mode('rsdn')
self.nodes[COMMISSIONER].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[COMMISSIONER].enable_whitelist()
self.nodes[COMMISSIONER].set_router_selection_jitter(1)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[COMMISSIONER].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[ROUTER1].set_panid(0xface)
self.nodes[ROUTER1].set_mode('rsdn')
self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER1].add_whitelist(self.nodes[ED1].get_addr64())
self.nodes[ROUTER1].enable_whitelist()
self.nodes[ROUTER1].set_router_selection_jitter(1)
self.nodes[ED1].set_panid(0xface)
self.nodes[ED1].set_mode('rs')
self.nodes[ED1].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[ED1].enable_whitelist()
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
del self.nodes
del self.simulator
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[COMMISSIONER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[COMMISSIONER].get_state(), 'router')
self.nodes[COMMISSIONER].commissioner_start()
self.simulator.go(3)
self.nodes[ROUTER1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.nodes[ED1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ED1].get_state(), 'child')
ipaddrs = self.nodes[ROUTER1].get_addrs()
for ipaddr in ipaddrs:
if ipaddr[0:4] != 'fe80':
break
self.assertTrue(self.nodes[COMMISSIONER].ping(ipaddr))
self.nodes[COMMISSIONER].energy_scan(0x50000, 0x02, 0x20, 0x3e8, ipaddr)
ipaddrs = self.nodes[ED1].get_addrs()
for ipaddr in ipaddrs:
if ipaddr[0:4] != 'fe80':
break
self.assertTrue(self.nodes[COMMISSIONER].ping(ipaddr))
self.nodes[COMMISSIONER].energy_scan(0x50000, 0x02, 0x20, 0x3e8, ipaddr)
self.nodes[COMMISSIONER].energy_scan(0x50000, 0x02, 0x20, 0x3e8, 'ff33:0040:fdde:ad00:beef:0:0:1')
self.assertTrue(self.nodes[COMMISSIONER].ping(ipaddr))
if __name__ == '__main__':
unittest.main()
| georgecpr/openthread | tests/scripts/thread-cert/Cert_9_2_13_EnergyScan.py | Python | bsd-3-clause | 4,676 | 0.001283 |
# screensaverpause - pauses Exaile playback on screensaver activation
# Copyright (C) 2009-2011 Johannes Sasongko <sasongko@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
import dbus, gtk
from xl import event, player, settings
SERVICES = [
dict( # GNOME
bus_name='org.gnome.ScreenSaver',
path='/org/gnome/ScreenSaver',
dbus_interface='org.gnome.ScreenSaver',
),
dict( # KDE
bus_name='org.freedesktop.ScreenSaver',
path='/',
dbus_interface='org.freedesktop.ScreenSaver',
),
]
import prefs
def get_preferences_pane():
return prefs
matches = set()
bus = None
was_playing = None
def screensaver_active_changed(is_active):
global was_playing
if is_active:
was_playing = player.PLAYER.is_playing()
player.PLAYER.pause()
elif was_playing and settings.get_option("screensaverpause/unpause", 0):
player.PLAYER.unpause()
def enable(exaile):
if exaile.loading:
event.add_callback(_enable, 'exaile_loaded')
else:
_enable()
def _enable(*a):
global bus
bus = dbus.SessionBus()
for service in SERVICES:
matches.add(bus.add_signal_receiver(screensaver_active_changed,
signal_name='ActiveChanged', **service))
def disable(exaile):
if bus is None: return
for match in frozenset(matches):
match.remove()
matches.remove(match)
def test():
import glib, gobject
gobject.threads_init()
import dbus.mainloop.glib as dbgl
dbgl.DBusGMainLoop(set_as_default=True)
global bus
bus = dbus.SessionBus()
for service in SERVICES:
try:
proxy = bus.get_object(service['bus_name'], service['path'],
follow_name_owner_changes=True)
except dbus.DBusException:
continue
break
else:
return None
assert proxy
interface = dbus.Interface(proxy, service['dbus_interface'])
mainloop = glib.MainLoop()
def active_changed(new_value):
if not new_value:
mainloop.quit()
interface.connect_to_signal('ActiveChanged', screensaver_active_changed)
# For some reason Lock never returns.
interface.Lock(ignore_reply=True)
mainloop.run()
if __name__ == '__main__':
test()
# vi: et sts=4 sw=4 tw=80
| eri-trabiccolo/exaile | plugins/screensaverpause/__init__.py | Python | gpl-2.0 | 3,410 | 0.005279 |
#!/usr/bin/env python
# Copyright (c) 2011 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Generates data for a recovery performance graph.
Measures recovery time as a function of partition size for a
single recovery Master and 3 different object sizes.
"""
from __future__ import division, print_function
from common import *
import config
import recovery
import subprocess
dat = open('%s/recovery/objectsize_scale.data' % top_path, 'w', 1)
numBackups = len(config.hosts)
for objectSize in [128, 256, 1024]:
print('# objectSize:', objectSize, file=dat)
print('# Data sourced by %d backups' % objectSize, file=dat)
for partitionSize in range(1, 1050, 100):
args = {}
args['num_servers'] = numBackups
args['backups_per_server'] = 1
args['num_partitions'] = 1
args['object_size'] = objectSize
args['replicas'] = 3
args['master_ram'] = 8000
numObjectsPerMb = 2**20 / (objectSize + 38)
args['num_objects'] = int(numObjectsPerMb * partitionSize)
print('Running with %d backups' % numBackups)
print('Running with objects of size %d for a %d MB partition' %
(objectSize, partitionSize))
r = recovery.insist(**args)
print('->', r['ns'] / 1e6, 'ms', '(run %s)' % r['run'])
print(partitionSize, r['ns'] / 1e6, file=dat)
print(file=dat)
print(file=dat)
| DavidLi2010/ramcloud | scripts/objectsize_scale.py | Python | isc | 2,086 | 0 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Sharing.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| XDSETeamA/XD_SE_TeamA | team9/1/Sharing/manage.py | Python | mit | 250 | 0 |
# Copyright 2012, 2013 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Test `maasserver.preseed` and related bits and bobs."""
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
str = None
__metaclass__ = type
__all__ = []
import httplib
import os
from pipes import quote
from urlparse import urlparse
from django.conf import settings
from django.core.urlresolvers import reverse
from maasserver.enum import (
ARCHITECTURE,
DISTRO_SERIES,
NODE_STATUS,
NODEGROUPINTERFACE_MANAGEMENT,
PRESEED_TYPE,
)
from maasserver.models import Config
from maasserver.preseed import (
compose_enlistment_preseed_url,
compose_preseed_url,
GENERIC_FILENAME,
get_curtin_config,
get_curtin_context,
get_curtin_installer_url,
get_curtin_userdata,
get_enlist_preseed,
get_hostname_and_path,
get_node_preseed_context,
get_preseed,
get_preseed_context,
get_preseed_filenames,
get_preseed_template,
get_preseed_type_for,
load_preseed_template,
PreseedTemplate,
render_enlistment_preseed,
render_preseed,
split_subarch,
TemplateNotFoundError,
)
from maasserver.testing.factory import factory
from maasserver.testing.testcase import MAASServerTestCase
from maasserver.utils import map_enum
from maastesting.matchers import ContainsAll
from testtools.matchers import (
AllMatch,
Contains,
IsInstance,
MatchesAll,
Not,
StartsWith,
)
import yaml
class TestSplitSubArch(MAASServerTestCase):
"""Tests for `split_subarch`."""
def test_split_subarch_returns_list(self):
self.assertEqual(['amd64'], split_subarch('amd64'))
def test_split_subarch_splits_sub_architecture(self):
self.assertEqual(['amd64', 'test'], split_subarch('amd64/test'))
class TestGetHostnameAndPath(MAASServerTestCase):
"""Tests for `get_hostname_and_path`."""
def test_get_hostname_and_path(self):
input_and_results = [
('http://name.domain/my/path', ('name.domain', '/my/path')),
('https://domain/path', ('domain', '/path')),
('http://domain/', ('domain', '/')),
('http://domain', ('domain', '')),
]
inputs = [input for input, _ in input_and_results]
results = [result for _, result in input_and_results]
self.assertEqual(results, map(get_hostname_and_path, inputs))
class TestGetPreseedFilenames(MAASServerTestCase):
"""Tests for `get_preseed_filenames`."""
def test_get_preseed_filenames_returns_filenames(self):
hostname = factory.getRandomString()
prefix = factory.getRandomString()
release = factory.getRandomString()
node = factory.make_node(hostname=hostname)
arch, subarch = node.architecture.split('/')
self.assertSequenceEqual(
[
'%s_%s_%s_%s_%s' % (prefix, arch, subarch, release, hostname),
'%s_%s_%s_%s' % (prefix, arch, subarch, release),
'%s_%s_%s' % (prefix, arch, subarch),
'%s_%s' % (prefix, arch),
'%s' % prefix,
'generic',
],
list(get_preseed_filenames(node, prefix, release, default=True)))
def test_get_preseed_filenames_if_node_is_None(self):
release = factory.getRandomString()
prefix = factory.getRandomString()
self.assertSequenceEqual(
[
'%s_%s' % (prefix, release),
'%s' % prefix,
],
list(get_preseed_filenames(None, prefix, release)))
def test_get_preseed_filenames_supports_empty_prefix(self):
hostname = factory.getRandomString()
release = factory.getRandomString()
node = factory.make_node(hostname=hostname)
arch, subarch = node.architecture.split('/')
self.assertSequenceEqual(
[
'%s_%s_%s_%s' % (arch, subarch, release, hostname),
'%s_%s_%s' % (arch, subarch, release),
'%s_%s' % (arch, subarch),
'%s' % arch,
],
list(get_preseed_filenames(node, '', release)))
def test_get_preseed_filenames_returns_list_without_default(self):
# If default=False is passed to get_preseed_filenames, the
# returned list won't include the default template name as a
# last resort template.
hostname = factory.getRandomString()
prefix = factory.getRandomString()
release = factory.getRandomString()
node = factory.make_node(hostname=hostname)
self.assertSequenceEqual(
'generic',
list(get_preseed_filenames(
node, prefix, release, default=True))[-1])
def test_get_preseed_filenames_returns_list_with_default(self):
# If default=True is passed to get_preseed_filenames, the
# returned list will include the default template name as a
# last resort template.
hostname = factory.getRandomString()
prefix = factory.getRandomString()
release = factory.getRandomString()
node = factory.make_node(hostname=hostname)
self.assertSequenceEqual(
prefix,
list(get_preseed_filenames(
node, prefix, release, default=False))[-1])
class TestConfiguration(MAASServerTestCase):
"""Test for correct configuration of the preseed component."""
def test_setting_defined(self):
self.assertThat(
settings.PRESEED_TEMPLATE_LOCATIONS,
AllMatch(IsInstance(unicode)))
class TestGetPreseedTemplate(MAASServerTestCase):
"""Tests for `get_preseed_template`."""
def test_get_preseed_template_returns_None_if_no_template_locations(self):
# get_preseed_template() returns None when no template locations are
# defined.
self.patch(settings, "PRESEED_TEMPLATE_LOCATIONS", [])
self.assertEqual(
(None, None),
get_preseed_template(
(factory.getRandomString(), factory.getRandomString())))
def test_get_preseed_template_returns_None_when_no_filenames(self):
# get_preseed_template() returns None when no filenames are passed in.
self.patch(settings, "PRESEED_TEMPLATE_LOCATIONS", [self.make_dir()])
self.assertEqual((None, None), get_preseed_template(()))
def test_get_preseed_template_find_template_in_first_location(self):
template_content = factory.getRandomString()
template_path = self.make_file(contents=template_content)
template_filename = os.path.basename(template_path)
locations = [
os.path.dirname(template_path),
self.make_dir(),
]
self.patch(settings, "PRESEED_TEMPLATE_LOCATIONS", locations)
self.assertEqual(
(template_path, template_content),
get_preseed_template([template_filename]))
def test_get_preseed_template_find_template_in_last_location(self):
template_content = factory.getRandomString()
template_path = self.make_file(contents=template_content)
template_filename = os.path.basename(template_path)
locations = [
self.make_dir(),
os.path.dirname(template_path),
]
self.patch(settings, "PRESEED_TEMPLATE_LOCATIONS", locations)
self.assertEqual(
(template_path, template_content),
get_preseed_template([template_filename]))
class TestLoadPreseedTemplate(MAASServerTestCase):
"""Tests for `load_preseed_template`."""
def setUp(self):
super(TestLoadPreseedTemplate, self).setUp()
self.location = self.make_dir()
self.patch(
settings, "PRESEED_TEMPLATE_LOCATIONS", [self.location])
def create_template(self, location, name, content=None):
# Create a tempita template in the given `self.location` with the
# given `name`. If content is not provided, a random content
# will be put inside the template.
path = os.path.join(self.location, name)
rendered_content = None
if content is None:
rendered_content = factory.getRandomString()
content = b'{{def stuff}}%s{{enddef}}{{stuff}}' % rendered_content
with open(path, "wb") as outf:
outf.write(content)
return rendered_content
def test_load_preseed_template_returns_PreseedTemplate(self):
name = factory.getRandomString()
self.create_template(self.location, name)
node = factory.make_node()
template = load_preseed_template(node, name)
self.assertIsInstance(template, PreseedTemplate)
def test_load_preseed_template_raises_if_no_template(self):
node = factory.make_node()
unknown_template_name = factory.getRandomString()
self.assertRaises(
TemplateNotFoundError, load_preseed_template, node,
unknown_template_name)
def test_load_preseed_template_generic_lookup(self):
# The template lookup method ends up picking up a template named
# 'generic' if no more specific template exist.
content = self.create_template(self.location, GENERIC_FILENAME)
node = factory.make_node(hostname=factory.getRandomString())
template = load_preseed_template(node, factory.getRandomString())
self.assertEqual(content, template.substitute())
def test_load_preseed_template_prefix_lookup(self):
# 2nd last in the hierarchy is a template named 'prefix'.
prefix = factory.getRandomString()
# Create the generic template. This one will be ignored due to the
# presence of a more specific template.
self.create_template(self.location, GENERIC_FILENAME)
# Create the 'prefix' template. This is the one which will be
# picked up.
content = self.create_template(self.location, prefix)
node = factory.make_node(hostname=factory.getRandomString())
template = load_preseed_template(node, prefix)
self.assertEqual(content, template.substitute())
def test_load_preseed_template_node_specific_lookup(self):
# At the top of the lookup hierarchy is a template specific to this
# node. It will be used first if it's present.
prefix = factory.getRandomString()
release = factory.getRandomString()
# Create the generic and 'prefix' templates. They will be ignored
# due to the presence of a more specific template.
self.create_template(self.location, GENERIC_FILENAME)
self.create_template(self.location, prefix)
node = factory.make_node(hostname=factory.getRandomString())
node_template_name = "%s_%s_%s_%s" % (
prefix, node.architecture.replace('/', '_'),
release, node.hostname)
# Create the node-specific template.
content = self.create_template(self.location, node_template_name)
template = load_preseed_template(node, prefix, release)
self.assertEqual(content, template.substitute())
def test_load_preseed_template_with_inherits(self):
# A preseed file can "inherit" from another file.
prefix = factory.getRandomString()
# Create preseed template.
master_template_name = factory.getRandomString()
preseed_content = '{{inherit "%s"}}' % master_template_name
self.create_template(self.location, prefix, preseed_content)
master_content = self.create_template(
self.location, master_template_name)
node = factory.make_node()
template = load_preseed_template(node, prefix)
self.assertEqual(master_content, template.substitute())
def test_load_preseed_template_parent_lookup_doesnt_include_default(self):
# The lookup for parent templates does not include the default
# 'generic' file.
prefix = factory.getRandomString()
# Create 'generic' template. It won't be used because the
# lookup for parent templates does not use the 'generic' template.
self.create_template(self.location, GENERIC_FILENAME)
unknown_master_template_name = factory.getRandomString()
# Create preseed template.
preseed_content = '{{inherit "%s"}}' % unknown_master_template_name
self.create_template(self.location, prefix, preseed_content)
node = factory.make_node()
template = load_preseed_template(node, prefix)
self.assertRaises(
TemplateNotFoundError, template.substitute)
def make_url(name):
"""Create a fake archive URL."""
return "http://%s.example.com/%s/" % (
factory.make_name(name),
factory.make_name('path'),
)
class TestPreseedContext(MAASServerTestCase):
"""Tests for `get_preseed_context`."""
def test_get_preseed_context_contains_keys(self):
release = factory.getRandomString()
nodegroup = factory.make_node_group(maas_url=factory.getRandomString())
context = get_preseed_context(release, nodegroup)
self.assertItemsEqual(
['release', 'metadata_enlist_url', 'server_host', 'server_url',
'cluster_host', 'main_archive_hostname', 'main_archive_directory',
'ports_archive_hostname', 'ports_archive_directory',
'http_proxy'],
context)
def test_get_preseed_context_archive_refs(self):
# urlparse lowercases the hostnames. That should not have any
# impact but for testing, create lower-case hostnames.
main_archive = make_url('main_archive')
ports_archive = make_url('ports_archive')
Config.objects.set_config('main_archive', main_archive)
Config.objects.set_config('ports_archive', ports_archive)
nodegroup = factory.make_node_group(maas_url=factory.getRandomString())
context = get_preseed_context(factory.make_node(), nodegroup)
parsed_main_archive = urlparse(main_archive)
parsed_ports_archive = urlparse(ports_archive)
self.assertEqual(
(
parsed_main_archive.hostname,
parsed_main_archive.path,
parsed_ports_archive.hostname,
parsed_ports_archive.path,
),
(
context['main_archive_hostname'],
context['main_archive_directory'],
context['ports_archive_hostname'],
context['ports_archive_directory'],
))
def test_preseed_context_cluster_host(self):
# The cluster_host context variable is derived from the nodegroup.
release = factory.getRandomString()
nodegroup = factory.make_node_group(maas_url=factory.getRandomString())
context = get_preseed_context(release, nodegroup)
self.assertIsNotNone(context["cluster_host"])
self.assertEqual(
nodegroup.get_managed_interface().ip,
context["cluster_host"])
def test_preseed_context_cluster_host_if_unmanaged(self):
# If the nodegroup has no managed interface recorded, the cluster_host
# context variable is still present and derived from the nodegroup.
release = factory.getRandomString()
nodegroup = factory.make_node_group(maas_url=factory.getRandomString())
for interface in nodegroup.nodegroupinterface_set.all():
interface.management = NODEGROUPINTERFACE_MANAGEMENT.UNMANAGED
interface.save()
context = get_preseed_context(release, nodegroup)
self.assertIsNotNone(context["cluster_host"])
self.assertEqual(
nodegroup.get_any_interface().ip,
context["cluster_host"])
def test_preseed_context_null_cluster_host_if_does_not_exist(self):
# If there's no nodegroup, the cluster_host context variable is
# present, but None.
release = factory.getRandomString()
context = get_preseed_context(release)
self.assertIsNone(context["cluster_host"])
class TestNodePreseedContext(MAASServerTestCase):
"""Tests for `get_node_preseed_context`."""
def test_get_node_preseed_context_contains_keys(self):
node = factory.make_node()
release = factory.getRandomString()
context = get_node_preseed_context(node, release)
self.assertItemsEqual(
['node', 'preseed_data', 'node_disable_pxe_url',
'node_disable_pxe_data',
],
context)
class TestPreseedTemplate(MAASServerTestCase):
"""Tests for class:`PreseedTemplate`."""
def test_escape_shell(self):
template = PreseedTemplate("{{var|escape.shell}}")
var = "$ ! ()"
observed = template.substitute(var=var)
self.assertEqual(quote(var), observed)
class TestRenderPreseed(MAASServerTestCase):
"""Tests for `render_preseed`.
These tests check that the templates render (i.e. that no variable is
missing).
"""
# Create a scenario for each possible value of PRESEED_TYPE except
# enlistment. Those have their own test case.
scenarios = [
(name, {'preseed': value})
for name, value in map_enum(PRESEED_TYPE).items()
if not value.startswith('enlist')
]
def test_render_preseed(self):
node = factory.make_node()
preseed = render_preseed(node, self.preseed, "precise")
# The test really is that the preseed is rendered without an
# error.
self.assertIsInstance(preseed, bytes)
def test_get_preseed_uses_nodegroup_maas_url(self):
ng_url = 'http://%s' % factory.make_hostname()
ng = factory.make_node_group(maas_url=ng_url)
maas_url = 'http://%s' % factory.make_hostname()
node = factory.make_node(
nodegroup=ng, status=NODE_STATUS.COMMISSIONING)
self.patch(settings, 'DEFAULT_MAAS_URL', maas_url)
preseed = render_preseed(node, self.preseed, "precise")
self.assertThat(
preseed, MatchesAll(*[Contains(ng_url), Not(Contains(maas_url))]))
class TestRenderEnlistmentPreseed(MAASServerTestCase):
"""Tests for `render_enlistment_preseed`."""
# Create a scenario for each possible value of PRESEED_TYPE for
# enlistment. The rest have their own test case.
scenarios = [
(name, {'preseed': value})
for name, value in map_enum(PRESEED_TYPE).items()
if value.startswith('enlist')
]
def test_render_enlistment_preseed(self):
preseed = render_enlistment_preseed(self.preseed, "precise")
# The test really is that the preseed is rendered without an
# error.
self.assertIsInstance(preseed, bytes)
def test_render_enlistment_preseed_valid_yaml(self):
preseed = render_enlistment_preseed(self.preseed, "precise")
self.assertTrue(yaml.safe_load(preseed))
def test_get_preseed_uses_nodegroup_maas_url(self):
ng_url = 'http://%s' % factory.make_hostname()
maas_url = 'http://%s' % factory.make_hostname()
self.patch(settings, 'DEFAULT_MAAS_URL', maas_url)
nodegroup = factory.make_node_group(maas_url=ng_url)
preseed = render_enlistment_preseed(
self.preseed, "precise", nodegroup=nodegroup)
self.assertThat(
preseed, MatchesAll(*[Contains(ng_url), Not(Contains(maas_url))]))
class TestGetCurtinUserData(MAASServerTestCase):
"""Tests for `get_curtin_userdata`."""
def test_get_curtin_userdata(self):
node = factory.make_node()
node.use_fastpath_installer()
user_data = get_curtin_userdata(node)
# Just check that the user data looks good.
self.assertIn("PREFIX='curtin'", user_data)
class TestCurtinUtilities(MAASServerTestCase):
"""Tests for the curtin-related utilities."""
def test_get_curtin_config(self):
node = factory.make_node()
node.use_fastpath_installer()
config = get_curtin_config(node)
self.assertThat(
config,
ContainsAll(
[
'mode: reboot',
"debconf_selections:",
]
))
def test_get_curtin_context(self):
node = factory.make_node()
node.use_fastpath_installer()
context = get_curtin_context(node)
self.assertItemsEqual(['curtin_preseed'], context)
self.assertIn('cloud-init', context['curtin_preseed'])
def test_get_curtin_installer_url(self):
# Exclude DISTRO_SERIES.default. It's a special value that defers
# to a run-time setting which we don't provide in this test.
series = factory.getRandomEnum(
DISTRO_SERIES, but_not=DISTRO_SERIES.default)
arch = factory.getRandomEnum(ARCHITECTURE)
node = factory.make_node(architecture=arch, distro_series=series)
installer_url = get_curtin_installer_url(node)
self.assertEqual(
'http://%s/MAAS/static/images/%s/%s/xinstall/root.tar.gz' % (
node.nodegroup.get_managed_interface().ip, arch, series),
installer_url)
def test_get_preseed_type_for(self):
normal = factory.make_node()
normal.use_traditional_installer()
fpi = factory.make_node()
fpi.use_fastpath_installer()
self.assertEqual(PRESEED_TYPE.DEFAULT, get_preseed_type_for(normal))
self.assertEqual(PRESEED_TYPE.CURTIN, get_preseed_type_for(fpi))
class TestRenderPreseedArchives(MAASServerTestCase):
"""Test that the default preseed contains the default mirrors."""
def test_render_preseed_uses_default_archives_intel(self):
nodes = [
factory.make_node(architecture=ARCHITECTURE.i386),
factory.make_node(architecture=ARCHITECTURE.amd64),
]
default_snippets = [
"d-i mirror/http/hostname string archive.ubuntu.com",
"d-i mirror/http/directory string /ubuntu",
]
for node in nodes:
preseed = render_preseed(node, PRESEED_TYPE.DEFAULT, "precise")
self.assertThat(preseed, ContainsAll(default_snippets))
def test_render_preseed_uses_default_archives_arm(self):
node = factory.make_node(architecture=ARCHITECTURE.armhf_highbank)
default_snippets = [
"d-i mirror/http/hostname string ports.ubuntu.com",
"d-i mirror/http/directory string /ubuntu-ports",
]
preseed = render_preseed(node, PRESEED_TYPE.DEFAULT, "precise")
self.assertThat(preseed, ContainsAll(default_snippets))
class TestPreseedProxy(MAASServerTestCase):
def test_preseed_uses_default_proxy(self):
server_host = factory.make_hostname()
url = 'http://%s:%d/%s' % (
server_host, factory.getRandomPort(), factory.getRandomString())
self.patch(settings, 'DEFAULT_MAAS_URL', url)
expected_proxy_statement = (
"mirror/http/proxy string http://%s:8000" % server_host)
preseed = render_preseed(
factory.make_node(), PRESEED_TYPE.DEFAULT, "precise")
self.assertIn(expected_proxy_statement, preseed)
def test_preseed_uses_configured_proxy(self):
http_proxy = 'http://%s:%d/%s' % (
factory.getRandomString(), factory.getRandomPort(),
factory.getRandomString())
Config.objects.set_config('http_proxy', http_proxy)
expected_proxy_statement = (
"mirror/http/proxy string %s" % http_proxy)
preseed = render_preseed(
factory.make_node(), PRESEED_TYPE.DEFAULT, "precise")
self.assertIn(expected_proxy_statement, preseed)
class TestPreseedMethods(MAASServerTestCase):
"""Tests for `get_enlist_preseed` and `get_preseed`.
These tests check that the preseed templates render and 'look right'.
"""
def test_get_preseed_returns_default_preseed(self):
node = factory.make_node()
preseed = get_preseed(node)
self.assertIn('preseed/late_command', preseed)
def test_get_preseed_returns_curtin_preseed(self):
node = factory.make_node()
node.use_fastpath_installer()
preseed = get_preseed(node)
curtin_url = reverse('curtin-metadata')
self.assertIn(curtin_url, preseed)
def test_get_enlist_preseed_returns_enlist_preseed(self):
preseed = get_enlist_preseed()
self.assertTrue(preseed.startswith('#cloud-config'))
def test_get_preseed_returns_commissioning_preseed(self):
node = factory.make_node(status=NODE_STATUS.COMMISSIONING)
preseed = get_preseed(node)
self.assertIn('#cloud-config', preseed)
class TestPreseedURLs(MAASServerTestCase):
"""Tests for functions that return preseed URLs."""
def test_compose_enlistment_preseed_url_links_to_enlistment_preseed(self):
response = self.client.get(compose_enlistment_preseed_url())
self.assertEqual(
(httplib.OK, get_enlist_preseed()),
(response.status_code, response.content))
def test_compose_enlistment_preseed_url_returns_absolute_link(self):
url = 'http://%s' % factory.make_name('host')
self.patch(settings, 'DEFAULT_MAAS_URL', url)
self.assertThat(
compose_enlistment_preseed_url(), StartsWith(url))
def test_compose_preseed_url_links_to_preseed_for_node(self):
node = factory.make_node()
response = self.client.get(compose_preseed_url(node))
self.assertEqual(
(httplib.OK, get_preseed(node)),
(response.status_code, response.content))
def test_compose_preseed_url_returns_absolute_link(self):
self.assertThat(
compose_preseed_url(factory.make_node()),
StartsWith('http://'))
| cloudbase/maas | src/maasserver/tests/test_preseed.py | Python | agpl-3.0 | 25,940 | 0.000578 |
import numpy as np
import numpy.random as rng
class Oscillator:
"""
A point in phase space for an oscillator.
"""
def __init__(self, state, omega=1., tau=1., beta=1.):
"""
Constructor: takes initial yition
and velocity as argument. Sets the time to zero
"""
self.state = state
self.omega, self.tau, self.beta = omega, tau, beta
self.time = 0.
def deriv(self, time, state, dt):
"""
Compute the derivatives from the given state
(not necessarily self.state, you need to pass that in
if that's what you want!)
"""
a = np.empty(2)
a[0] = state[1]
a[1] = -self.omega**2*state[0] - state[1]/self.tau\
+ self.beta*rng.randn()/np.sqrt(dt)
return a
def update(self, dt):
"""
Take a step using RK4
"""
f1 = self.deriv(self.time, self.state, dt)
f2 = self.deriv(self.time + 0.5*dt, self.state + 0.5*dt*f1, dt)
f3 = self.deriv(self.time + 0.5*dt, self.state + 0.5*dt*f2, dt)
f4 = self.deriv(self.time + dt, self.state + dt*f3, dt)
self.state += dt/6.*(f1 + 2*f2 + 2*f3 + f4)
self.time += dt
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initial conditions
oscillator = Oscillator(np.array([0., 0.]))
# Timestep
dt = 0.01
steps = 100000 # Take this many steps
skip = 100 # Store and plot results this often
keep = np.empty((steps/skip, 3)) # Store results in here
# Columns: time, yition, vocity
plt.ion() # Turn "interactive mode" for plotting on, so plots
# can update without the user having to close the window
plt.hold(False) # Clear the plot every time we plot something new
# Main loop
for i in xrange(0, steps):
# Saving and plotting
if i%skip == 0:
index = i/skip
# Save state to keep array
keep[index, :] = \
np.array([oscillator.time, oscillator.state[0],\
oscillator.state[1]])
# Plot yition vs time
plt.plot(keep[0:(index+1), 0], keep[0:(index+1), 1], 'b')
plt.xlabel('Time')
plt.ylabel('y')
plt.title('Stdev = %.03f'%keep[0:(index+1), 1].std())
plt.draw() # Refresh the plot
# Update the oscillator
oscillator.update(dt)
# At end of run, leave the last plot showing until the user closes it
plt.ioff()
plt.show()
| eggplantbren/Oscillations | StateSpace/Python/Oscillator.py | Python | gpl-3.0 | 2,171 | 0.03731 |
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_mon_notification
short_description: Create or delete a Rackspace Cloud Monitoring notification.
description:
- Create or delete a Rackspace Cloud Monitoring notification that specifies a
channel that can be used to communicate alarms, such as email, webhooks, or
PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check ->
*rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm
version_added: "2.0"
options:
state:
description:
- Ensure that the notification with this C(label) exists or does not exist.
choices: ['present', 'absent']
label:
description:
- Defines a friendly name for this notification. String between 1 and 255
characters long.
required: true
notification_type:
description:
- A supported notification type.
choices: ["webhook", "email", "pagerduty"]
required: true
details:
description:
- Dictionary of key-value pairs used to initialize the notification.
Required keys and meanings vary with notification type. See
http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/
service-notification-types-crud.html for details.
required: true
author: Ash Wilson
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Monitoring notification example
gather_facts: False
hosts: local
connection: local
tasks:
- name: Email me when something goes wrong.
rax_mon_entity:
credentials: ~/.rax_pub
label: omg
type: email
details:
address: me@mailhost.com
register: the_notification
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
def notification(module, state, label, notification_type, details):
if len(label) < 1 or len(label) > 255:
module.fail_json(msg='label must be between 1 and 255 characters long')
changed = False
notification = None
cm = pyrax.cloud_monitoring
if not cm:
module.fail_json(msg='Failed to instantiate client. This typically '
'indicates an invalid region or an incorrectly '
'capitalized region name.')
existing = []
for n in cm.list_notifications():
if n.label == label:
existing.append(n)
if existing:
notification = existing[0]
if state == 'present':
should_update = False
should_delete = False
should_create = False
if len(existing) > 1:
module.fail_json(msg='%s existing notifications are labelled %s.' %
(len(existing), label))
if notification:
should_delete = (notification_type != notification.type)
should_update = (details != notification.details)
if should_update and not should_delete:
notification.update(details=notification.details)
changed = True
if should_delete:
notification.delete()
else:
should_create = True
if should_create:
notification = cm.create_notification(notification_type,
label=label, details=details)
changed = True
else:
for n in existing:
n.delete()
changed = True
if notification:
notification_dict = {
"id": notification.id,
"type": notification.type,
"label": notification.label,
"details": notification.details
}
module.exit_json(changed=changed, notification=notification_dict)
else:
module.exit_json(changed=changed)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent']),
label=dict(required=True),
notification_type=dict(required=True, choices=['webhook', 'email', 'pagerduty']),
details=dict(required=True, type='dict')
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
state = module.params.get('state')
label = module.params.get('label')
notification_type = module.params.get('notification_type')
details = module.params.get('details')
setup_rax_module(module, pyrax)
notification(module, state, label, notification_type, details)
if __name__ == '__main__':
main()
| bearstech/ansible | lib/ansible/modules/cloud/rackspace/rax_mon_notification.py | Python | gpl-3.0 | 5,164 | 0.001356 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RCircstats(RPackage):
"""Circular Statistics, from "Topics in Circular Statistics" (2001)
Circular Statistics, from "Topics in Circular Statistics" (2001) S.
Rao Jammalamadaka and A. SenGupta, World Scientific."""
homepage = "https://cloud.r-project.org/package=CircStats"
url = "https://cloud.r-project.org/src/contrib/CircStats_0.2-6.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/CircStats"
maintainers = ['dorton21']
version('0.2-6', sha256='8efed93b75b314577341effea214e3dd6e0a515cfe1212eb051047a1f3276f1d')
depends_on('r-mass', type=('build', 'run'))
depends_on('r-boot', type=('build', 'run'))
| LLNL/spack | var/spack/repos/builtin/packages/r-circstats/package.py | Python | lgpl-2.1 | 898 | 0.002227 |
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test variable expansion of '<|(list.txt ...)' syntax commands.
"""
import os
import sys
import TestGyp
test = TestGyp.TestGyp()
CHDIR = 'src'
test.run_gyp('filelist2.gyp', chdir=CHDIR)
test.build('filelist2.gyp', 'foo', chdir=CHDIR)
contents = test.read('src/dummy_foo').replace('\r', '')
expect = 'John\nJacob\nJingleheimer\nSchmidt\n'
if not test.match(contents, expect):
print("Unexpected contents of `src/dummy_foo'")
test.diff(expect, contents, 'src/dummy_foo')
test.fail_test()
test.pass_test()
| ibc/MediaSoup | worker/deps/gyp/test/variables/filelist/gyptest-filelist.py | Python | isc | 697 | 0.004304 |
"""Kytos Napps Module."""
import json
import os
import re
import sys
import tarfile
import urllib
from abc import ABCMeta, abstractmethod
from pathlib import Path
from random import randint
from threading import Event, Thread
from kytos.core.events import KytosEvent
from kytos.core.logs import NAppLog
__all__ = ('KytosNApp',)
LOG = NAppLog()
class NApp:
"""Class to represent a NApp."""
# pylint: disable=too-many-arguments
def __init__(self, username=None, name=None, version=None,
repository=None, meta=False):
self.username = username
self.name = name
self.version = version if version else 'latest'
self.repository = repository
self.meta = meta
self.description = None
self.tags = []
self.enabled = False
self.napp_dependencies = []
def __str__(self):
return "{}/{}".format(self.username, self.name)
def __repr__(self):
return f"NApp({self.username}/{self.name})"
def __hash__(self):
return hash(self.id)
def __eq__(self, other):
"""Compare username/name strings."""
return isinstance(other, self.__class__) and self.id == other.id
@property
def id(self): # pylint: disable=invalid-name
"""username/name string."""
return str(self)
@property
def uri(self):
"""Return a unique identifier of this NApp."""
version = self.version if self.version else 'latest'
if not self._has_valid_repository():
return ""
# Use the next line after Diraol fix redirect using ":" for version
# return "{}/{}:{}".format(self.repository, self.id, version)
return "{}/{}-{}".format(self.repository, self.id, version)
@property
def package_url(self):
"""Return a fully qualified URL for a NApp package."""
if not self.uri:
return ""
return "{}.napp".format(self.uri)
@classmethod
def create_from_uri(cls, uri):
"""Return a new NApp instance from an unique identifier."""
regex = r'^(((https?://|file://)(.+))/)?(.+?)/(.+?)/?(:(.+))?$'
match = re.match(regex, uri)
if not match:
return None
return cls(username=match.groups()[4],
name=match.groups()[5],
version=match.groups()[7],
repository=match.groups()[1])
@classmethod
def create_from_json(cls, filename):
"""Return a new NApp instance from a metadata file."""
with open(filename, encoding='utf-8') as data_file:
data = json.loads(data_file.read())
return cls.create_from_dict(data)
@classmethod
def create_from_dict(cls, data):
"""Return a new NApp instance from metadata."""
napp = cls()
for attribute, value in data.items():
setattr(napp, attribute, value)
return napp
def as_json(self):
"""Dump all NApp attributes on a json format."""
return json.dumps(self.__dict__)
def match(self, pattern):
"""Whether a pattern is present on NApp id, description and tags."""
try:
pattern = '.*{}.*'.format(pattern)
pattern = re.compile(pattern, re.IGNORECASE)
strings = [self.id, self.description] + self.tags
return any(pattern.match(string) for string in strings)
except TypeError:
return False
def download(self):
"""Download NApp package from his repository.
Raises:
urllib.error.HTTPError: If download is not successful.
Returns:
str: Downloaded temp filename.
"""
if not self.package_url:
return None
package_filename = urllib.request.urlretrieve(self.package_url)[0]
extracted = self._extract(package_filename)
Path(package_filename).unlink()
self._update_repo_file(extracted)
return extracted
@staticmethod
def _extract(filename):
"""Extract NApp package to a temporary folder.
Return:
pathlib.Path: Temp dir with package contents.
"""
random_string = '{:0d}'.format(randint(0, 10**6))
tmp = '/tmp/kytos-napp-' + Path(filename).stem + '-' + random_string
os.mkdir(tmp)
with tarfile.open(filename, 'r:xz') as tar:
tar.extractall(tmp)
return Path(tmp)
def _has_valid_repository(self):
"""Whether this NApp has a valid repository or not."""
return all([self.username, self.name, self.repository])
def _update_repo_file(self, destination=None):
"""Create or update the file '.repo' inside NApp package."""
with open("{}/.repo".format(destination), 'w') as repo_file:
repo_file.write(self.repository + '\n')
class KytosNApp(Thread, metaclass=ABCMeta):
"""Base class for any KytosNApp to be developed."""
def __init__(self, controller, **kwargs):
"""Contructor of KytosNapps.
Go through all of the instance methods and selects those that have
the events attribute, then creates a dict containing the event_name
and the list of methods that are responsible for handling such event.
At the end, the setup method is called as a complement of the init
process.
"""
Thread.__init__(self, daemon=False)
self.controller = controller
self.username = None # loaded from json
self.name = None # loaded from json
self.meta = False # loaded from json
self._load_json()
# Force a listener with a private method.
self._listeners = {
'kytos/core.shutdown': [self._shutdown_handler],
'kytos/core.shutdown.' + self.napp_id: [self._shutdown_handler]}
self.__event = Event()
#: int: Seconds to sleep before next call to :meth:`execute`. If
#: negative, run :meth:`execute` only once.
self.__interval = -1
self.setup()
#: Add non-private methods that listen to events.
handler_methods = [getattr(self, method_name) for method_name in
dir(self) if method_name[0] != '_' and
callable(getattr(self, method_name)) and
hasattr(getattr(self, method_name), 'events')]
# Building the listeners dictionary
for method in handler_methods:
for event_name in method.events:
if event_name not in self._listeners:
self._listeners[event_name] = []
self._listeners[event_name].append(method)
@property
def napp_id(self):
"""username/name string."""
return "{}/{}".format(self.username, self.name)
def listeners(self):
"""Return all listeners registered."""
return list(self._listeners.keys())
def _load_json(self):
"""Update object attributes based on kytos.json."""
current_file = sys.modules[self.__class__.__module__].__file__
json_path = os.path.join(os.path.dirname(current_file), 'kytos.json')
with open(json_path, encoding='utf-8') as data_file:
data = json.loads(data_file.read())
for attribute, value in data.items():
setattr(self, attribute, value)
def execute_as_loop(self, interval):
"""Run :meth:`execute` within a loop. Call this method during setup.
By calling this method, the application does not need to worry about
loop details such as sleeping and stopping the loop when
:meth:`shutdown` is called. Just call this method during :meth:`setup`
and implement :meth:`execute` as a single execution.
Args:
interval (int): Seconds between each call to :meth:`execute`.
"""
self.__interval = interval
def run(self):
"""Call the execute method, looping as needed.
It should not be overriden.
"""
self.notify_loaded()
LOG.info("Running NApp: %s", self)
self.execute()
while self.__interval > 0 and not self.__event.is_set():
self.__event.wait(self.__interval)
self.execute()
def notify_loaded(self):
"""Inform this NApp has been loaded."""
name = f'{self.username}/{self.name}.loaded'
event = KytosEvent(name=name, content={})
self.controller.buffers.app.put(event)
# all listeners receive event
def _shutdown_handler(self, event): # pylint: disable=unused-argument
"""Listen shutdown event from kytos.
This method listens the kytos/core.shutdown event and call the shutdown
method from napp subclass implementation.
Paramters
event (:class:`KytosEvent`): event to be listened.
"""
if not self.__event.is_set():
self.__event.set()
self.shutdown()
@abstractmethod
def setup(self):
"""Replace the 'init' method for the KytosApp subclass.
The setup method is automatically called on the NApp __init__().
Users aren't supposed to call this method directly.
"""
@abstractmethod
def execute(self):
"""Execute in a loop until 'kytos/core.shutdown' is received.
The execute method is called by KytosNApp class.
Users shouldn't call this method directly.
"""
@abstractmethod
def shutdown(self):
"""Run before the app is unloaded and the controller, stopped.
The user implementation of this method should do the necessary routine
for the user App and also it is a good moment to break the loop of the
execute method if it is in a loop.
This methods is not going to be called directly, it is going to be
called by the _shutdown_handler method when a KytosShutdownEvent is
sent.
"""
| kytos/kytos | kytos/core/napps/base.py | Python | mit | 9,959 | 0 |
# Generated by Django 2.0.8 on 2018-11-05 20:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tickets', '0008_auto_20180730_2035'),
]
operations = [
migrations.AlterModelOptions(
name='ticket',
options={'default_permissions': ('add', 'change', 'delete', 'view')},
),
]
| IntegratedAlarmSystem-Group/ias-webserver | tickets/migrations/0009_auto_20181105_2039.py | Python | lgpl-3.0 | 383 | 0.002611 |
from django.conf.urls import url
from . import views
app_name = 'polls'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^(?P<question_id>[0-9]+)$', views.vote, name='vote'),
url(r'^(?P<question_id>[0-9]+)/results$', views.results, name='results'),
url(r'^add$', views.add, name='add'),
url(r'^check$', views.check, name='check'),
url(r'^search$', views.search, name='search'),
url(r'^searched$', views.searched, name='searched'),
url(r'^random$', views.get_random, name='get_random'),
url(r'^faq$', views.get_faq, name='get_faq'),
] | kura-pl/fast_polls | polls/urls.py | Python | mit | 583 | 0.001715 |
# __init__.py - collection of Croatian numbers
# coding: utf-8
#
# Copyright (C) 2012 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""Collection of Croatian numbers."""
| tonyseek/python-stdnum | stdnum/hr/__init__.py | Python | lgpl-2.1 | 870 | 0 |
import teneto
import matplotlib.pyplot as plt
def test_sliceplot():
G = teneto.generatenetwork.rand_binomial([4, 2], 0.5, 'graphlet', 'wu')
fig, ax = plt.subplots(1)
ax = teneto.plot.slice_plot(G, ax)
plt.close(fig)
def test_circleplot():
G = teneto.generatenetwork.rand_binomial([4, 2], 0.5, 'graphlet', 'wd')
fig, ax = plt.subplots(1)
ax = teneto.plot.circle_plot(G.mean(axis=-1), ax)
plt.close(fig)
def test_stackplot():
G = teneto.generatenetwork.rand_binomial([4, 2], 0.5, 'contact', 'wd')
fig, ax = plt.subplots(1)
ax = teneto.plot.graphlet_stack_plot(G, ax, q=1)
plt.close(fig)
| wiheto/teneto | test/plot/test_plot.py | Python | gpl-3.0 | 640 | 0 |
# Configuration file for ipython.
c = get_config() # noqa: F821
c.Completer.use_jedi = False
# ------------------------------------------------------------------------------
# InteractiveShellApp configuration
# ------------------------------------------------------------------------------
# A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
# Execute the given command string.
# c.InteractiveShellApp.code_to_run = ''
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.InteractiveShellApp.pylab = None
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.InteractiveShellApp.exec_PYTHONSTARTUP = True
# lines of code to run at IPython startup.
c.InteractiveShellApp.exec_lines = [
"import biokbase.narrative.magics",
"from biokbase.narrative.services import *",
"from biokbase.narrative.widgetmanager import WidgetManager",
"from biokbase.narrative.jobs import *",
]
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.InteractiveShellApp.gui = None
# Reraise exceptions encountered loading IPython extensions?
# c.InteractiveShellApp.reraise_ipython_extension_failures = False
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.InteractiveShellApp.matplotlib = None
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.InteractiveShellApp.pylab_import_all = True
# A list of dotted module names of IPython extensions to load.
# c.InteractiveShellApp.extensions = []
# Run the module as a script.
# c.InteractiveShellApp.module_to_run = ''
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.InteractiveShellApp.hide_initial_ns = True
# dotted module name of an IPython extension to load.
# c.InteractiveShellApp.extra_extension = ''
# List of files to run at IPython startup.
# c.InteractiveShellApp.exec_files = []
# A file to be run
# c.InteractiveShellApp.file_to_run = ''
# ------------------------------------------------------------------------------
# TerminalIPythonApp configuration
# ------------------------------------------------------------------------------
# TerminalIPythonApp will inherit config from: BaseIPythonApplication,
# Application, InteractiveShellApp
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.TerminalIPythonApp.exec_PYTHONSTARTUP = True
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.TerminalIPythonApp.pylab = None
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.TerminalIPythonApp.verbose_crash = False
# Run the module as a script.
# c.TerminalIPythonApp.module_to_run = ''
# The date format used by logging formatters for %(asctime)s
# c.TerminalIPythonApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# Whether to overwrite existing config files when copying
# c.TerminalIPythonApp.overwrite = False
# Execute the given command string.
# c.TerminalIPythonApp.code_to_run = ''
# Set the log level by value or name.
# c.TerminalIPythonApp.log_level = 30
# lines of code to run at IPython startup.
# c.TerminalIPythonApp.exec_lines = []
# Suppress warning messages about legacy config files
# c.TerminalIPythonApp.ignore_old_config = False
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.TerminalIPythonApp.extra_config_file = u''
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.TerminalIPythonApp.hide_initial_ns = True
# dotted module name of an IPython extension to load.
# c.TerminalIPythonApp.extra_extension = ''
# A file to be run
# c.TerminalIPythonApp.file_to_run = ''
# The IPython profile to use.
# c.TerminalIPythonApp.profile = u'default'
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.TerminalIPythonApp.matplotlib = None
# If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# c.TerminalIPythonApp.force_interact = False
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.TerminalIPythonApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.TerminalIPythonApp.ipython_dir = u''
# Whether to display a banner upon starting IPython.
# c.TerminalIPythonApp.display_banner = True
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.TerminalIPythonApp.copy_config_files = False
# List of files to run at IPython startup.
# c.TerminalIPythonApp.exec_files = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.TerminalIPythonApp.gui = None
# Reraise exceptions encountered loading IPython extensions?
# c.TerminalIPythonApp.reraise_ipython_extension_failures = False
# A list of dotted module names of IPython extensions to load.
# c.TerminalIPythonApp.extensions = []
# Start IPython quickly by skipping the loading of config files.
# c.TerminalIPythonApp.quick = False
# The Logging format template
# c.TerminalIPythonApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# ------------------------------------------------------------------------------
# TerminalInteractiveShell configuration
# ------------------------------------------------------------------------------
# TerminalInteractiveShell will inherit config from: InteractiveShell
# auto editing of files with syntax errors.
# c.TerminalInteractiveShell.autoedit_syntax = False
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.TerminalInteractiveShell.color_info = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.TerminalInteractiveShell.ast_transformers = []
#
# c.TerminalInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.TerminalInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.TerminalInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.TerminalInteractiveShell.colors = 'LightBG'
# If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# c.TerminalInteractiveShell.display_page = False
# Autoindent IPython code entered interactively.
# c.TerminalInteractiveShell.autoindent = True
#
# c.TerminalInteractiveShell.separate_in = '\n'
# Deprecated, use PromptManager.in2_template
# c.TerminalInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.TerminalInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.TerminalInteractiveShell.prompt_in1 = 'In [\\#]: '
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.TerminalInteractiveShell.autocall = 0
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.TerminalInteractiveShell.screen_length = 0
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.TerminalInteractiveShell.editor = 'vi'
# Deprecated, use PromptManager.justify
# c.TerminalInteractiveShell.prompts_pad_left = True
# The part of the banner to be printed before the profile
# c.TerminalInteractiveShell.banner1 = 'Python 2.7.6 (default, Nov 18 2013, 15:12:51) \nType "copyright", "credits" or "license" for more information.\n\nIPython 3.2.0-dev -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
#
# c.TerminalInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# The part of the banner to be printed after the profile
# c.TerminalInteractiveShell.banner2 = ''
#
# c.TerminalInteractiveShell.separate_out2 = ''
#
# c.TerminalInteractiveShell.wildcards_case_sensitive = True
#
# c.TerminalInteractiveShell.debug = False
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.TerminalInteractiveShell.confirm_exit = True
#
# c.TerminalInteractiveShell.ipython_dir = ''
#
# c.TerminalInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
# c.TerminalInteractiveShell.logstart = False
# The name of the logfile to use.
# c.TerminalInteractiveShell.logfile = ''
# The shell program to be used for paging.
# c.TerminalInteractiveShell.pager = 'less'
# Enable magic commands to be called without the leading %.
# c.TerminalInteractiveShell.automagic = True
# Save multi-line entries as one entry in readline history
# c.TerminalInteractiveShell.multiline_history = True
#
# c.TerminalInteractiveShell.readline_use = True
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.TerminalInteractiveShell.deep_reload = False
# Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
# c.TerminalInteractiveShell.logappend = ''
#
# c.TerminalInteractiveShell.xmode = 'Context'
#
# c.TerminalInteractiveShell.quiet = False
# Enable auto setting the terminal title.
# c.TerminalInteractiveShell.term_title = False
#
# c.TerminalInteractiveShell.object_info_string_level = 0
# Deprecated, use PromptManager.out_template
# c.TerminalInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.TerminalInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.TerminalInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.TerminalInteractiveShell.pdb = False
# ------------------------------------------------------------------------------
# PromptManager configuration
# ------------------------------------------------------------------------------
# This is the primary interface for producing IPython's prompts.
# Output prompt. '\#' will be transformed to the prompt number
# c.PromptManager.out_template = 'Out[\\#]: '
# Continuation prompt.
# c.PromptManager.in2_template = ' .\\D.: '
# If True (default), each prompt will be right-aligned with the preceding one.
# c.PromptManager.justify = True
# Input prompt. '\#' will be transformed to the prompt number
# c.PromptManager.in_template = 'In [\\#]: '
#
# c.PromptManager.color_scheme = 'Linux'
# ------------------------------------------------------------------------------
# HistoryManager configuration
# ------------------------------------------------------------------------------
# A class to organize all history-related functionality in one place.
# HistoryManager will inherit config from: HistoryAccessor
# Should the history database include output? (default: no)
# c.HistoryManager.db_log_output = False
# Write to database every x commands (higher values save disk access & power).
# Values of 1 or less effectively disable caching.
# c.HistoryManager.db_cache_size = 0
# Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
# c.HistoryManager.hist_file = u''
# Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database conenctions.
# c.HistoryManager.connection_options = {}
# enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
# c.HistoryManager.enabled = True
# ------------------------------------------------------------------------------
# ProfileDir configuration
# ------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
# ------------------------------------------------------------------------------
# PlainTextFormatter configuration
# ------------------------------------------------------------------------------
# The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
# PlainTextFormatter will inherit config from: BaseFormatter
#
# c.PlainTextFormatter.type_printers = {}
# Truncate large collections (lists, dicts, tuples, sets) to this size.
#
# Set to 0 to disable truncation.
# c.PlainTextFormatter.max_seq_length = 1000
#
# c.PlainTextFormatter.float_precision = ''
#
# c.PlainTextFormatter.verbose = False
#
# c.PlainTextFormatter.deferred_printers = {}
#
# c.PlainTextFormatter.newline = '\n'
#
# c.PlainTextFormatter.max_width = 79
#
# c.PlainTextFormatter.pprint = True
#
# c.PlainTextFormatter.singleton_printers = {}
# ------------------------------------------------------------------------------
# IPCompleter configuration
# ------------------------------------------------------------------------------
# Extension of the completer class with IPython-specific features
# IPCompleter will inherit config from: Completer
# Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
# c.IPCompleter.omit__names = 2
# Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
# c.IPCompleter.merge_completions = True
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
# c.IPCompleter.limit_to__all__ = False
# Activate greedy completion
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
# c.IPCompleter.greedy = False
# ------------------------------------------------------------------------------
# ScriptMagics configuration
# ------------------------------------------------------------------------------
# Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
# Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
# c.ScriptMagics.script_magics = []
# Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
# c.ScriptMagics.script_paths = {}
# ------------------------------------------------------------------------------
# StoreMagics configuration
# ------------------------------------------------------------------------------
# Lightweight persistence for python variables.
#
# Provides the %store magic.
# If True, any %store-d variables will be automatically restored when IPython
# starts.
# c.StoreMagics.autorestore = False
| kbase/narrative | kbase-extension/ipython/profile_default/ipython_config.py | Python | mit | 20,674 | 0.000532 |
__author__ = 'moskupols'
__all__ = ['morphology', 'cognates', 'pymystem'] | hatbot-team/hatbot_resources | preparation/lang_utils/__init__.py | Python | mit | 74 | 0.013514 |
import requests
__url = 'http://www.baidu.com/s?wd=' # 搜索请求网址
def page(word):
r = requests.get(__url + word)
if r.status_code == 200: # 请求错误(不是200)处理
return r.text
else:
print(r.status_code)
return False
| JianmingXia/StudyTest | KnowledgeQuizTool/MillionHeroes/baiduSearch/get.py | Python | mit | 277 | 0 |
#!/usr/bin/python -tt
# Updated version of clipping adapters from sequences
# Used cutadapt on combined sequences and removes first 13 bases with fastx_clipper
# Website: https://code.google.com/p/cutadapt/
# Updated on: 09/26/2013
# Import OS features to run external programs
import os
import glob
# Directories for input and output
input_dir = "/home/chris/transcriptome/fastq/qc/fastx/"
output_dir = "/home/chris/transcriptome/fastq/qc/fastx/"
fastq_orig = sorted(glob.glob1(input_dir, "*.fastq"))
orig = len(list(fastq_orig))
print "Input Directory: %s" % (input_dir)
print "Output Directory: %s" % (output_dir)
print "Scanning Input Directory..."
print "Found %s fastq files..." % (orig)
print "Fastq files: %s" % (fastq_orig)
for files in range(orig):
print "Analyzing %s..." % (fastq_orig[files])
fastqfile_in = input_dir + fastq_orig[files]
sample_name = os.path.splitext(os.path.basename(fastq_orig[files]))[0]
# Remove adapters from sequences and keep score above 30, with a min length of 51
# Any other sequences will be discarded. This may be modified in future to see what the impact
# of removal of lower sequences yields.
fastq_tmp = output_dir + fastq_orig[files] + '_temp.fastq'
log_out = output_dir + "logs/" + fastq_orig[files] + '.log'
fastq_out = output_dir + sample_name + "_filtered.fastq"
print "Running quality filter of 20 score, 100%..."
os.system("fastq_quality_filter -v -Q 32 -q 20 -p 100 -i %s -o %s >> %s" % (fastqfile_in, fastq_tmp, log_out))
print "Removing artifacts..."
os.system("fastx_artifacts_filter -v -Q 32 -i %s -o %s >> %s" % (fastq_tmp, fastq_out, log_out))
os.system("rm %s" % (fastq_tmp))
| calandryll/transcriptome | scripts/old/quality_control3.py | Python | gpl-2.0 | 1,659 | 0.011453 |
# Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = "Giuseppe Natale, Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "pynq_support@xilinx.com"
import pytest
from pynq import Overlay
from pynq.iop import request_iop
global ol
ol = Overlay("base.bit")
@pytest.mark.run(order=11)
def test_request_iop():
"""Test for the _IOP class and the method request_iop().
Test whether the request_iop() can return an object without errors.
This is a test for case 1 (for more information, please see request_iop).
"""
fixed_id = 1
exception_raised = False
try:
request_iop(fixed_id,'mailbox.bin')
except LookupError:
exception_raised = True
assert not exception_raised, 'request_iop() should not raise exception.'
ol.reset()
@pytest.mark.run(order=12)
def test_request_iop_same():
"""Test for the _IOP class and the method request_iop().
The request_iop() should not raise any exception since the previous IOP
runs the same program.
This is a test for case 1 (for more information, please see request_iop).
"""
fixed_id = 1
exception_raised = False
request_iop(fixed_id,'mailbox.bin')
try:
request_iop(fixed_id,'mailbox.bin')
except LookupError:
exception_raised = True
assert not exception_raised, 'request_iop() should not raise exception.'
ol.reset()
@pytest.mark.run(order=13)
def test_request_iop_conflict():
"""Test for the _IOP class and the method request_iop().
Creates multiple IOP instances on the same fixed ID. Tests whether
request_iop() correctly raises a LookupError exception.
This is a test for case 2 (for more information, please see request_iop).
"""
fixed_id = 1
request_iop(fixed_id,'pmod_adc.bin')
pytest.raises(LookupError, request_iop, fixed_id, 'pmod_dac.bin')
ol.reset()
| VectorBlox/PYNQ | python/pynq/iop/tests/test__iop.py | Python | bsd-3-clause | 3,522 | 0.012493 |
#!/usr/bin/env python
#
# Copyright (c) 2017, Piotr Przymus
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Command-line tool to expose qtile.command functionality to shell.
This can be used standalone or in other shell scripts.
"""
import pprint
import argparse
from libqtile.command import Client
from libqtile.command import CommandError, CommandException
def get_formated_info(obj, cmd, args=True, short=True):
"""
Get documentation for command/function and format it.
Returns:
* args=True, short=False - (function args) and a summary line.
* args=True, short=True - '*' if arguments are present and a summary line.
* args=False - a summary line.
If 'doc' function is not present in object or there is no doc string for given cmd it returns empty string.
The arguments are extracted from doc[0] line, the summary is constructed from doc[1] line.
"""
doc_func = obj.doc if hasattr(obj, "doc") else lambda x: ""
doc = doc_func(cmd).splitlines()
doc_args = ""
if doc:
short_description = doc[1] if len(doc) > 1 else ""
tdoc = doc[0]
doc_args = tdoc[tdoc.find("(") + 1:tdoc.find(")")].strip()
if doc_args: # return formatted args
doc_args = "({})".format(doc_args)
if args is False:
doc_args = ""
elif args and short:
doc_args = "*" if len(doc_args) > 1 else " "
return (doc_args + " " + short_description).rstrip()
def print_commands(prefix, obj):
"Print available commands for given object."
prefix += " -f "
output = []
max_cmd = 0 # max len of cmd for formatting
try:
cmds = obj.commands()
except AttributeError:
print("error: Sorry no commands in ", prefix)
exit()
except CommandError:
print("error: Sorry no such object ", prefix)
exit()
for cmd in cmds:
doc_args = get_formated_info(obj, cmd)
pcmd = prefix + cmd
max_cmd = max(len(pcmd), max_cmd)
output.append([pcmd, doc_args])
# Print formatted output
formating = "{:<%d}\t{}" % (max_cmd + 1)
for line in output:
print(formating.format(line[0], line[1]))
def get_object(argv):
"""
Constructs a path to object and returns given object (if it exists).
"""
client = Client()
obj = client
if argv[0] == "cmd":
argv = argv[1:]
# Generate full obj specification
for arg in argv:
try:
obj = obj[arg] # check if it is an item
except KeyError:
try:
obj = getattr(obj, arg) # check it it is an attr
except AttributeError:
print("Specified object does not exist " + " ".join(argv))
exit()
return obj
def run_function(obj, funcname, args):
"Run command with specified args on given object."
try:
func = getattr(obj, funcname)
except AttributeError:
print("error: Sorry no function ", funcname)
exit()
try:
ret = func(*args)
except CommandError:
print("error: Sorry command '{}' cannot be found".format(funcname))
exit()
except CommandException:
print("error: Sorry cannot run function '{}' with arguments {}"
.format(funcname, args))
exit()
return ret
def print_base_objects():
"Prints access objects of Client, use cmd for commands."
actions = ["-o cmd", "-o window", "-o layout", "-o group", "-o bar"]
print("\n".join(actions))
def main():
"Runs tool according to specified arguments."
description = 'Simple tool to expose qtile.command functionality to shell.'
epilog = '''\
Examples:\n\
qtile-cmd\n\
qtile-cmd -o cmd\n\
qtile-cmd -o cmd -f prev_layout -i\n\
qtile-cmd -o cmd -f prev_layout -a 3 # prev_layout on group 3\n\
qtile-cmd -o group 3 -f focus_back\n
'''
fmt = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(description=description, epilog=epilog,
formatter_class=fmt)
parser.add_argument('--object', '-o', dest='obj_spec', nargs='+',
help='''Specify path to object (space separated).\
If no --function flag display available commands.''')
parser.add_argument('--function', '-f', dest='function', nargs=1,
default="help", help='Select function to execute.')
parser.add_argument('--args', '-a', dest='args', nargs='+',
default=[], help='Set arguments supplied to function.')
parser.add_argument('--info', '-i', dest='info', action='store_true',
help='''With both --object and --function args prints\
documentation for function.''')
args = parser.parse_args()
if args.obj_spec:
obj = get_object(args.obj_spec)
if args.function == "help":
print_commands("-o " + " ".join(args.obj_spec), obj)
elif args.info:
print(get_formated_info(obj, args.function[0],
args=True, short=False))
else:
ret = run_function(obj, args.function[0], args.args)
if ret is not None:
pprint.pprint(ret)
else:
print_commands("-o " + " ".join(args.obj_spec), obj)
else:
print_base_objects()
if __name__ == "__main__":
main()
| flacjacket/qtile | libqtile/scripts/qtile_cmd.py | Python | mit | 6,464 | 0.000464 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Athanasios Theocharis <athatheoc@gmail.com>
# This was made under ESA Summer of Code in Space 2019
# by Athanasios Theocharis, mentored by Daniel Estevez
#
# This file is part of gr-satellites
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
from gnuradio import gr
import pmt
from . import telemetry
from . import space_packet
import array
class telemetry_packet_reconstruction(gr.basic_block):
"""
docstring for block telemetry_packet_reconstruction
"""
def __init__(self):
gr.basic_block.__init__(self,
name="telemetry_packet_reconstruction",
in_sig=[],
out_sig=[])
self.space_packet = []
self.length_of_space_packet = 0
##################################################
# Blocks
##################################################
self.message_port_register_in(pmt.intern('in'))
self.set_msg_handler(pmt.intern('in'), self.handle_msg)
self.message_port_register_out(pmt.intern('out'))
def handle_msg(self, msg_pmt):
msg = pmt.cdr(msg_pmt)
if not pmt.is_u8vector(msg):
print("[ERROR] Received invalid message type. Expected u8vector")
return
packet = bytearray(pmt.u8vector_elements(msg))
size = len(packet) - 6
try:
header = telemetry.PrimaryHeader.parse(packet[:])
if header.ocf_flag == 1:
size -= 4
except:
print("Could not decode telemetry packet")
return
parsed = telemetry.FullPacket.parse(packet[:], size=size)
payload = parsed.payload
#The number 6 is used here, because that's the length of the Primary Header.
#todo: Add a variable for this
while len(payload) != 0:
if len(self.space_packet) < 6:
left = 6 - len(self.space_packet)
self.space_packet.extend(payload[:left])
payload = payload[left:]
if len(self.space_packet) >= 6:
self.length_of_space_packet = space_packet.PrimaryHeader.parse(bytearray(self.space_packet)).data_length
left = self.length_of_space_packet + 6 - len(self.space_packet)
self.space_packet.extend(payload[:left])
payload = payload[left:]
if 6 + self.length_of_space_packet == len(self.space_packet):
self.sendPacket()
def sendPacket(self):
packet = self.space_packet
packet = array.array('B', packet[:])
packet = pmt.cons(pmt.PMT_NIL, pmt.init_u8vector(len(packet), packet))
self.message_port_pub(pmt.intern('out'), packet)
self.length_of_space_packet = 0
self.space_packet = []
| daniestevez/gr-satellites | python/ccsds/telemetry_packet_reconstruction.py | Python | gpl-3.0 | 2,817 | 0.00284 |
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from selectreverse.utils import ReverseManager
class Building(models.Model):
number = models.IntegerField()
owners = models.ManyToManyField('Owner')
objects = models.Manager()
reversemanager = ReverseManager({'apartments': 'apartment_set', 'parkings': 'parking_set', 'xowners': 'owners'})
class Apartment(models.Model):
number = models.IntegerField()
building = models.ForeignKey(Building)
def __unicode__(self):
return u'%s' % self.number
class Parking(models.Model):
number = models.IntegerField()
building = models.ForeignKey(Building)
class Owner(models.Model):
name = models.CharField(max_length = 50)
objects = models.Manager()
reversemanager = ReverseManager({'buildings': 'building_set'})
class TaggedItem(models.Model):
tag = models.SlugField()
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
class Bookmark(models.Model):
url = models.URLField()
tags = generic.GenericRelation(TaggedItem)
objects = models.Manager()
reversemanager = ReverseManager({'gtags': 'tags'})
| japsu/django-selectreverse | selectreverse/tests/models.py | Python | bsd-2-clause | 1,338 | 0.008969 |
# -*- coding: utf-8 -*-
# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es
__author__ = 'jfernandez'
from lettuce import world, before, after
from commons.terrain_steps import setup_feature, setup_scenario, setup_outline, tear_down
from commons.provisioning_steps import ProvisioningSteps
from commons.rest_utils import RestUtils
from commons.configuration import CONFIG_VM_HOSTNAME
from commons.fabric_utils import execute_chef_client, execute_puppet_agent, remove_chef_client_cert_file, \
execute_chef_client_stop, execute_puppet_agent_stop, remove_puppet_agent_cert_file, remove_all_generated_test_files, \
remove_puppet_agent_catalog
provisioning_steps = ProvisioningSteps()
rest_utils = RestUtils()
@before.each_feature
def before_each_feature(feature):
"""
Hook: Will be executed before each feature. Configures global vars and gets token from keystone.
Launch agents (puppet and chef) in the target VM
"""
setup_feature(feature)
@before.each_scenario
def before_each_scenario(scenario):
"""
Hook: Will be executed before each Scenario.
Setup Scenario: initialize World vars and launch agents (puppet and chef) in the target VM
"""
setup_scenario(scenario)
execute_chef_client()
execute_puppet_agent()
@before.outline
def before_outline(param1, param2, param3, param4):
""" Hook: Will be executed before each Scenario Outline. Same behaviour as 'before_each_scenario'"""
setup_outline(param1, param2, param3, param4)
remove_all_generated_test_files()
remove_puppet_agent_catalog()
@after.each_scenario
def after_each_scenario(scenario):
"""
Hook: Will be executed after all each scenario
Removes Feature data and cleans the system. Kills all agents running in the VM.
"""
execute_chef_client_stop()
execute_puppet_agent_stop()
remove_chef_client_cert_file()
remove_puppet_agent_cert_file()
remove_all_generated_test_files()
remove_puppet_agent_catalog()
rest_utils.delete_node(world.headers, world.tenant_id, CONFIG_VM_HOSTNAME)
@after.all
def after_all(scenario):
"""
Hook: Will be executed after all each scenario
Removes Feature data and cleans the system. Kills all agents running in the VM.
"""
after_each_scenario(scenario)
tear_down(scenario)
| telefonicaid/fiware-sdc | test/acceptance/e2e/uninstall_product/feature/terrain.py | Python | apache-2.0 | 3,017 | 0.002653 |
"""
mediatum - a multimedia content repository
Copyright (C) 2007 Arne Seifert <seiferta@in.tum.de>
Copyright (C) 2007 Matthias Kramm <kramm@in.tum.de>
Copyright (C) 2013 Iryna Feuerstein <feuersti@in.tum.de>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from warnings import warn
class Context(object):
def __init__(self, field, value="", width=400, name="", lock=0, language=None, collection=None, container=None, user=None, ip=""):
if collection is not None:
warn("collections argument is deprecated, use container", DeprecationWarning)
if container is not None:
raise ValueError("container and collection cannot be used together")
container = collection
self.field = field
self.value = value
self.width = width
self.name = name
self.language = language
self.collection = container
self.container = container
self.ip = ip
self.user = user
self.lock = lock
class Metatype(object):
joiner = '\n'
@classmethod
def get_name4schema(cls):
name = cls.__name__
return name[2:] if name.startswith("m_") else name
def getEditorHTML(self, field, value="", width=400, lock=0, language=None, required=None):
return ""
def getSearchHTML(self, context):
None
def getFormattedValue(self, metafield, maskitem, mask, node, language, html):
None
def format_request_value_for_db(self, field, params, item, language=None):
"""Prepare value for the database from update request params.
:param field: associated field
:param params: dict which contains POST form values
:param item: field name prepended with language specifier. Is the same as field name for non-multilingual fields.
"""
# just fetch the unmodified alue from the params dict
return params.get(item)
def getMaskEditorHTML(self, field, metadatatype=None, language=None):
return ""
@classmethod
def isContainer(cls):
return False
def isFieldType(self):
return True
def getName(self):
return ""
def getInformation(self):
return {"moduleversion": "1.0"}
''' events '''
def event_metafield_changed(self, node, field):
None
def get_input_pattern(self, field):
return ''
def get_input_title(self, field):
return ''
def get_input_placeholder(self, field):
return ''
def is_required(self, required):
"""
It's necessary to return different types in order for the template to render properly.
Since required='' or even required='False' is still interpreted as a required field,
it needs to be completely removed from the template where applicable. TAL attributes
are removed if they evaluate to None.
@param required: 0 or 1
@return: str True or None object
"""
if required:
return 'True'
else:
return None
charmap = [
[' ', '160', 'no-break space'],
['&', '38', 'ampersand'],
['"', '34', 'quotation mark'],
# finance
['¢', '162', 'cent sign'],
['€', '8364', 'euro sign'],
['£', '163', 'pound sign'],
['¥', '165', 'yen sign'],
# signs
['©', '169', 'copyright sign'],
['®', '174', 'registered sign'],
['™', '8482', 'trade mark sign'],
['‰', '8240', 'per mille sign'],
['µ', '181', 'micro sign'],
['·', '183', 'middle dot'],
['•', '8226', 'bullet'],
['…', '8230', 'three dot leader'],
['′', '8242', 'minutes / feet'],
['″', '8243', 'seconds / inches'],
['§', '167', 'section sign'],
['¶', '182', 'paragraph sign'],
['ß', '223', 'sharp s / ess-zed'],
# quotations
['‹', '8249', 'single left-pointing angle quotation mark'],
['›', '8250', 'single right-pointing angle quotation mark'],
['«', '171', 'left pointing guillemet'],
['»', '187', 'right pointing guillemet'],
['‘', '8216', 'left single quotation mark'],
['’', '8217', 'right single quotation mark'],
['“', '8220', 'left double quotation mark'],
['”', '8221', 'right double quotation mark'],
['‚', '8218', 'single low-9 quotation mark'],
['„', '8222', 'double low-9 quotation mark'],
['<', '60', 'less-than sign'],
['>', '62', 'greater-than sign'],
['≤', '8804', 'less-than or equal to'],
['≥', '8805', 'greater-than or equal to'],
['–', '8211', 'en dash'],
['—', '8212', 'em dash'],
['¯', '175', 'macron'],
['‾', '8254', 'overline'],
['¤', '164', 'currency sign'],
['¦', '166', 'broken bar'],
['¨', '168', 'diaeresis'],
['¡', '161', 'inverted exclamation mark'],
['¿', '191', 'turned question mark'],
['ˆ', '710', 'circumflex accent'],
['˜', '732', 'small tilde'],
['°', '176', 'degree sign'],
['−', '8722', 'minus sign'],
['±', '177', 'plus-minus sign'],
['÷', '247', 'division sign'],
['⁄', '8260', 'fraction slash'],
['×', '215', 'multiplication sign'],
['¹', '185', 'superscript one'],
['²', '178', 'superscript two'],
['³', '179', 'superscript three'],
['¼', '188', 'fraction one quarter'],
['½', '189', 'fraction one half'],
['¾', '190', 'fraction three quarters'],
# math / logical
['ƒ', '402', 'function / florin'],
['∫', '8747', 'integral'],
['∑', '8721', 'n-ary sumation'],
['∞', '8734', 'infinity'],
['√', '8730', 'square root'],
['∼', '8764', 'similar to'],
['≅', '8773', 'approximately equal to'],
['≈', '8776', 'almost equal to'],
['≠', '8800', 'not equal to'],
['≡', '8801', 'identical to'],
['∈', '8712', 'element of'],
['∉', '8713', 'not an element of'],
['∋', '8715', 'contains as member'],
['∏', '8719', 'n-ary product'],
['∧', '8743', 'logical and'],
['∨', '8744', 'logical or'],
['¬', '172', 'not sign'],
['∩', '8745', 'intersection'],
['∪', '8746', 'union'],
['∂', '8706', 'partial differential'],
['∀', '8704', 'for all'],
['∃', '8707', 'there exists'],
['∅', '8709', 'diameter'],
['∇', '8711', 'backward difference'],
['∗', '8727', 'asterisk operator'],
['∝', '8733', 'proportional to'],
['∠', '8736', 'angle'],
# undefined
['´', '180', 'acute accent'],
['¸', '184', 'cedilla'],
['ª', '170', 'feminine ordinal indicator'],
['º', '186', 'masculine ordinal indicator'],
['†', '8224', 'dagger'],
['‡', '8225', 'double dagger'],
# alphabetical special chars
['À', '192', 'A - grave'],
['Á', '193', 'A - acute'],
['Â', '194', 'A - circumflex'],
['Ã', '195', 'A - tilde'],
['Ä', '196', 'A - diaeresis'],
['Å', '197', 'A - ring above'],
['Æ', '198', 'ligature AE'],
['Ç', '199', 'C - cedilla'],
['È', '200', 'E - grave'],
['É', '201', 'E - acute'],
['Ê', '202', 'E - circumflex'],
['Ë', '203', 'E - diaeresis'],
['Ì', '204', 'I - grave'],
['Í', '205', 'I - acute'],
['Î', '206', 'I - circumflex'],
['Ï', '207', 'I - diaeresis'],
['Ð', '208', 'ETH'],
['Ñ', '209', 'N - tilde'],
['Ò', '210', 'O - grave'],
['Ó', '211', 'O - acute'],
['Ô', '212', 'O - circumflex'],
['Õ', '213', 'O - tilde'],
['Ö', '214', 'O - diaeresis'],
['Ø', '216', 'O - slash'],
['Œ', '338', 'ligature OE'],
['Š', '352', 'S - caron'],
['Ù', '217', 'U - grave'],
['Ú', '218', 'U - acute'],
['Û', '219', 'U - circumflex'],
['Ü', '220', 'U - diaeresis'],
['Ý', '221', 'Y - acute'],
['Ÿ', '376', 'Y - diaeresis'],
['Þ', '222', 'THORN'],
['à', '224', 'a - grave'],
['á', '225', 'a - acute'],
['â', '226', 'a - circumflex'],
['ã', '227', 'a - tilde'],
['ä', '228', 'a - diaeresis'],
['å', '229', 'a - ring above'],
['æ', '230', 'ligature ae'],
['ç', '231', 'c - cedilla'],
['è', '232', 'e - grave'],
['é', '233', 'e - acute'],
['ê', '234', 'e - circumflex'],
['ë', '235', 'e - diaeresis'],
['ì', '236', 'i - grave'],
['í', '237', 'i - acute'],
['î', '238', 'i - circumflex'],
['ï', '239', 'i - diaeresis'],
['ð', '240', 'eth'],
['ñ', '241', 'n - tilde'],
['ò', '242', 'o - grave'],
['ó', '243', 'o - acute'],
['ô', '244', 'o - circumflex'],
['õ', '245', 'o - tilde'],
['ö', '246', 'o - diaeresis'],
['ø', '248', 'o slash'],
['œ', '339', 'ligature oe'],
['š', '353', 's - caron'],
['ù', '249', 'u - grave'],
['ú', '250', 'u - acute'],
['û', '251', 'u - circumflex'],
['ü', '252', 'u - diaeresis'],
['ý', '253', 'y - acute'],
['þ', '254', 'thorn'],
['ÿ', '255', 'y - diaeresis'],
['Α', '913', 'Alpha'],
['Β', '914', 'Beta'],
['Γ', '915', 'Gamma'],
['Δ', '916', 'Delta'],
['Ε', '917', 'Epsilon'],
['Ζ', '918', 'Zeta'],
['Η', '919', 'Eta'],
['Θ', '920', 'Theta'],
['Ι', '921', 'Iota'],
['Κ', '922', 'Kappa'],
['Λ', '923', 'Lambda'],
['Μ', '924', 'Mu'],
['Ν', '925', 'Nu'],
['Ξ', '926', 'Xi'],
['Ο', '927', 'Omicron'],
['Π', '928', 'Pi'],
['Ρ', '929', 'Rho'],
['Σ', '931', 'Sigma'],
['Τ', '932', 'Tau'],
['Υ', '933', 'Upsilon'],
['Φ', '934', 'Phi'],
['Χ', '935', 'Chi'],
['Ψ', '936', 'Psi'],
['Ω', '937', 'Omega'],
['α', '945', 'alpha'],
['β', '946', 'beta'],
['γ', '947', 'gamma'],
['δ', '948', 'delta'],
['ε', '949', 'epsilon'],
['ζ', '950', 'zeta'],
['η', '951', 'eta'],
['θ', '952', 'theta'],
['ι', '953', 'iota'],
['κ', '954', 'kappa'],
['λ', '955', 'lambda'],
['μ', '956', 'mu'],
['ν', '957', 'nu'],
['ξ', '958', 'xi'],
['ο', '959', 'omicron'],
['π', '960', 'pi'],
['ρ', '961', 'rho'],
['ς', '962', 'final sigma'],
['σ', '963', 'sigma'],
['τ', '964', 'tau'],
['υ', '965', 'upsilon'],
['φ', '966', 'phi'],
['χ', '967', 'chi'],
['ψ', '968', 'psi'],
['ω', '969', 'omega'],
# symbols
['ℵ', '8501', 'alef symbol'],
['ϖ', '982', 'pi symbol'],
['ℜ', '8476', 'real part symbol'],
['ϑ', '977', 'theta symbol'],
['ϒ', '978', 'upsilon - hook symbol'],
['℘', '8472', 'Weierstrass p'],
['ℑ', '8465', 'imaginary part'],
# arrows
['←', '8592', 'leftwards arrow'],
['↑', '8593', 'upwards arrow'],
['→', '8594', 'rightwards arrow'],
['↓', '8595', 'downwards arrow'],
['↔', '8596', 'left right arrow'],
['↵', '8629', 'carriage return'],
['⇐', '8656', 'leftwards double arrow'],
['⇑', '8657', 'upwards double arrow'],
['⇒', '8658', 'rightwards double arrow'],
['⇓', '8659', 'downwards double arrow'],
['⇔', '8660', 'left right double arrow'],
['∴', '8756', 'therefore'],
['⊂', '8834', 'subset of'],
['⊃', '8835', 'superset of'],
['⊄', '8836', 'not a subset of'],
['⊆', '8838', 'subset of or equal to'],
['⊇', '8839', 'superset of or equal to'],
['⊕', '8853', 'circled plus'],
['⊗', '8855', 'circled times'],
['⊥', '8869', 'perpendicular'],
['⋅', '8901', 'dot operator'],
['⌈', '8968', 'left ceiling'],
['⌉', '8969', 'right ceiling'],
['⌊', '8970', 'left floor'],
['⌋', '8971', 'right floor'],
['⟨', '9001', 'left-pointing angle bracket'],
['⟩', '9002', 'right-pointing angle bracket'],
['◊', '9674', 'lozenge'],
['♠', '9824', 'black spade suit'],
['♣', '9827', 'black club suit'],
['♥', '9829', 'black heart suit'],
['♦', '9830', 'black diamond suit'],
[' ', '8194', 'en space'],
[' ', '8195', 'em space'],
[' ', '8201', 'thin space'],
['‌', '8204', 'zero width non-joiner'],
['‍', '8205', 'zero width joiner'],
['‎', '8206', 'left-to-right mark'],
['‏', '8207', 'right-to-left mark'],
['­', '173', 'soft hyphen']
]
| mediatum/mediatum | core/metatype.py | Python | gpl-3.0 | 13,830 | 0.000723 |
# This file is part of PARPG.
# PARPG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# PARPG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with PARPG. If not, see <http://www.gnu.org/licenses/>.
from random import randrange
from fife import fife
import base
from moving import MovingAgentBehaviour
class NPCBehaviour(MovingAgentBehaviour):
"""This is a basic NPC behaviour"""
def __init__(self, parent=None):
super(NPCBehaviour, self).__init__()
self.parent = parent
self.state = base._AGENT_STATE_NONE
self.pc = None
self.target_loc = None
# hard code these for now
self.distRange = (2, 4)
# these are parameters to lower the rate of wandering
# wander rate is the number of "IDLEs" before a wander step
# this could be set for individual NPCs at load time
# or thrown out altogether.
# HACK: 09.Oct.2011 Beliar
# I increased the wander rate to 900 since the idle method
# gets called way more often now.
self.wanderCounter = 0
self.wanderRate = 9
def getTargetLocation(self):
"""@rtype: fife.Location
@return: NPC's position"""
x = self.getX()
y = self.getY()
if self.state == base._AGENT_STATE_WANDER:
""" Random Target Location """
l = [0, 0]
for i in range(len(l)):
sign = randrange(0, 2)
dist = randrange(self.distRange[0], self.distRange[1])
if sign == 0:
dist *= -1
l[i] = dist
x += l[0]
y += l[1]
# Random walk is
# rl = randint(-1, 1);ud = randint(-1, 1);x += rl;y += ud
l = fife.Location(self.agent.getLocation())
l.setLayerCoordinates(fife.ModelCoordinate(x, y))
return l
def onInstanceActionFinished(self, instance, action):
"""What the NPC does when it has finished an action.
Called by the engine and required for InstanceActionListeners.
@type instance: fife.Instance
@param instance: self.agent
@type action: ???
@param action: ???
@return: None"""
if self.state == base._AGENT_STATE_WANDER:
self.target_loc = self.getTargetLocation()
MovingAgentBehaviour.onInstanceActionFinished(self, instance, action)
def idle(self):
"""Controls the NPC when it is idling. Different actions
based on the NPC's state.
@return: None"""
if self.state == base._AGENT_STATE_NONE:
self.state = base._AGENT_STATE_IDLE
self.animate('stand')
elif self.state == base._AGENT_STATE_IDLE:
if self.wanderCounter > self.wanderRate:
self.wanderCounter = 0
self.state = base._AGENT_STATE_WANDER
else:
self.wanderCounter += 1
self.state = base._AGENT_STATE_NONE
self.target_loc = self.getTargetLocation()
self.animate('stand')
elif self.state == base._AGENT_STATE_WANDER:
self.wander(self.target_loc)
self.state = base._AGENT_STATE_NONE
elif self.state == base._AGENT_STATE_TALK:
self.animate('stand', self.pc.getLocation())
def wander(self, location):
"""Nice slow movement for random walking.
@type location: fife.Location
@param location: Where the NPC will walk to.
@return: None"""
self.agent.move('walk', location, self.speed)
coords = location.getMapCoordinates()
| parpg/parpg | parpg/behaviours/npc.py | Python | gpl-3.0 | 4,169 | 0.002639 |
class Solution(object):
def nextPermutation(self, nums):
cursor = -1
for i in range(len(nums) - 1, 0, -1):
if nums[i - 1] < nums[i]:
cursor = i - 1
break
for i in range(len(nums) - 1, -1, -1):
if nums[i] > nums[cursor]:
nums[i], nums[cursor] = nums[cursor], nums[i]
nums[cursor + 1:] = sorted(nums[cursor + 1:])
break
| luosch/leetcode | python/Next Permutation.py | Python | mit | 459 | 0.002179 |
# -*- coding: utf-8 -*-
from .grammar import SchemaGrammar
from .sqlite_grammar import SQLiteSchemaGrammar
from .postgres_grammar import PostgresSchemaGrammar
from .mysql_grammar import MySqlSchemaGrammar
| MakarenaLabs/Orator-Google-App-Engine | orator/schema/grammars/__init__.py | Python | mit | 206 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 SWIPO Project
#
# Authors (this file):
# Stefan Schinkel <stefan.schinkel@gmail.com>
"""
Provides sanity checks for basic for parallel and serial circiuts.
"""
import numpy as np
import networkx as nx
from pyunicorn import ResNetwork
from .ResistiveNetwork_utils import *
debug = 0
""" Test for basic sanity, parallel and serial circiuts
"""
def testParallelTrivial():
r""" Trivial parallel case:
a) 0 --- 1 --- 2
/---- 3 ---\
b) 0 --- 1 --- 2
c) /---- 3 ---\
0 --- 1 --- 2
\____ 4 ___/
ER(a) = 2*ER(b) = 3*ER(c)
"""
nws = []
# construct nw1
idI, idJ = [0, 1], [1, 2]
nws.append(makeNW(idI, idJ, [.1]))
# construct nw2
idI += [0, 3]
idJ += [3, 2]
nws.append(makeNW(idI, idJ, [.1]))
# nw3
idI += [0, 4]
idJ += [4, 2]
nws.append(makeNW(idI, idJ, [.1]))
ER = []
for nw in nws:
rnw = ResNetwork(nw)
ER.append(rnw.effective_resistance(0, 2))
assert abs(ER[0]/2-ER[1]) < .1E-6
assert abs(ER[0]/3-ER[2]) < .1E-6
def testParallelLessTrivial():
""" Less Trivial Parallel Case:
|--- 1 --- 0
a) 2 |
|--- 3 ----4
|--- 1 --- 0 --- 5 --- |
b) 2 | | 7
|--- 3 ----4 --- 6 --- |
|---- 8 ----------- |
| | |
| |----------| |
| | |
|--- 1 --- 0 --- 5 --- | | |
c) 2 | | 7 | 9
|--- 3 ----4 --- 6 --- | | |
| | |
| ----------| |
| | |
|---- 10 -----------|
"""
nws = []
idI = [0, 1, 1, 2, 3]
idJ = [1, 2, 3, 3, 4]
nws.append(makeNW(idI, idJ, [1]*len(idI)))
idI.extend([0, 5, 5, 6, 6])
idJ.extend([5, 6, 7, 7, 4])
nws.append(makeNW(idI, idJ, [1]*len(idI)))
idI.extend([0, 8, 8, 9, 10])
idJ.extend([8, 9, 10, 10, 4])
nws.append(makeNW(idI, idJ, [1]*len(idI)))
ER = []
Gs = []
for nw in nws:
rnw = ResNetwork(nw)
ER.append(rnw.effective_resistance(0, 4))
# Gs.append(nx.DiGraph(nw))
# # showGraphs(Gs)
# # s = ''
# # for i,e in enumerate(ER):
# # s = s + "NW{:d} {:.3f}\t".format(i,e)
# # print "Effective resistances (0,2)\n %s" % (s)
assert abs(ER[0]/2-ER[1]) < .1E-6
assert abs(ER[0]/3-ER[2]) < .1E-6
# """ Less Trivial Parallel Case:
# /--- 1 --- 0
# a) 2 |
# \--- 3 ----4
# /--- 1 --- 0 --- 5 --- \
# b) 2 | | 7
# \--- 3 ----4 --- 6 --- /
# / --- 8 ----------- \
# | \
# /--- 1 --- 0 --- 5 --- \ \
# c) 2 7 9
# \--- 3 ----4 --- 6 --- / /
# | /
# \ --- 10 -----------/
# """
# nws =[]
# #construct nw1
# idI = [0,1,1,2,3]
# idJ = [1,2,3,3,4]
# val = [.1] * 5
# nws.append(makeNW(idI,idJ,[.1]*len(idI))[0])
# idI.extend([0,5,6,7])
# idJ.extend([5,6,7,4])
# val.extend( val * 6)
# nws.append(makeNW(idI,idJ,[.1]*len(idI))[0])
# idI.extend([0,8,9,10])
# idJ.extend([8,9,10,4])
# val.extend( val * 4)
# nws.append(makeNW(idI,idJ,val)[0])
# ER = []
# for nw in nws:
# rnw = ResNetwork(nw)
# ER.append( rnw.effective_resistance(0,4))
# s = ''
# for i,e in enumerate(ER):
# s = s + "NW{:d} {:.3f}\t".format(i,e)
# print "Effective resistances (0,2)\n %s" % (s)
# assert abs(ER[0]/2-ER[1]) < .1E-6
# assert abs(ER[0]/3-ER[2]) < .1E-6
def testParallelRandom():
""" 50 random parallel cases
"""
N = 10
p = .7
runs = 0
while runs < 50:
G = nx.fast_gnp_random_graph(N, p)
a = 0
b = G.number_of_nodes()-1
try:
nx.shortest_path(G, source=a, target=b)
except RuntimeError:
continue
i, j = [], []
for xx in G.edges():
i.append(xx[0])
j.append(xx[1])
# %.1f values for resistance
val = np.round(np.random.ranf(len(i))*100)/10
# and test
nw1 = makeNW(i, j, val)
nw2 = parallelCopy(nw1, a, b)
ER1 = ResNetwork(nw1).effective_resistance(a, b)
ER2 = ResNetwork(nw2).effective_resistance(a, b)
# assertion
assert (ER1/2-ER2) < 1E-6
# increment runs
runs += 1
def testSerialTrivial():
"""Trivial serial test case
a) 0 --- 1 --- 2
b) 0 --- 1 --- 2 --- 3 --- 4
ER(a)/2 = ER(b)
"""
# construct nw1
idI = [0, 1]
idJ = [1, 2]
val = [1, 1]
nw1 = np.zeros((3, 3))
G1 = nx.DiGraph()
for i, j, v in zip(idI, idJ, val):
nw1[i, j] = v
nw1[j, i] = v
# construct nw2
idI = idI + [2, 3]
idJ = idJ + [3, 4]
val = val + [1, 1]
nw2 = np.zeros((5, 5))
for i, j, v in zip(idI, idJ, val):
nw2[i, j] = v
nw2[j, i] = v
# init ResNetworks
rnw1 = ResNetwork(nw1)
rnw2 = ResNetwork(nw2)
ER1 = rnw1.effective_resistance(0, 2)
ER2 = rnw2.effective_resistance(0, 4)
print "Effective resistances (0,2)"
print "NW1 %.3f\tNW2 %.3f\t 2*NW1 = %.3f" % (ER1, ER2, 2*ER1)
assert (ER1*2-ER2) < 1E-6
def testSerialRandom():
""" 50 Random serial test cases
"""
N = 10
p = .7
runs = 0
while runs < 50:
# a random graph
G = nx.fast_gnp_random_graph(N, p)
try:
nx.shortest_path(G, source=0, target=N-1)
except RuntimeError:
continue
# convert to plain ndarray
nw1 = nx2nw(G)
# copy and join network
nw2 = serialCopy(nw1)
# compute effective resistance
ER1 = ResNetwork(
nw1, silence_level=3).effective_resistance(0, len(nw1)-1)
ER2 = ResNetwork(
nw2, silence_level=3).effective_resistance(0, len(nw2)-1)
# increment runs
runs += 1
# assertion
print ER1*2-ER2
assert (ER1*2-ER2) < 1E-6
| leftaroundabout/pyunicorn | tests/test_core/TestResitiveNetwork-circuits.py | Python | bsd-3-clause | 6,379 | 0 |
import requests
import json
NL_KEY = '*'
TL_KEY = '*'
def translate(text):
tmp_payload = {"q": text, "target": "en"}
s = requests.post('https://translation.googleapis.com/language/translate/v2?key=' + TL_KEY, json=tmp_payload)
data = s.json()['data']['translations'][0]
return data['translatedText']
def sentiment(text):
payload = {"encodingType": "UTF8", "document": {"type": "PLAIN_TEXT", "content": text}}
r = requests.post('https://language.googleapis.com/v1/documents:analyzeSentiment?key=' + NL_KEY,
json=payload)
return r.json()
def generate_sentiments():
with open('resources/new_output.json') as json_data:
objs = json.load(json_data)
newobjs = []
amt = len(objs)
for obj in objs:
comments = obj['comments']
scores = []
tot_score = 0
print "for " + obj['hardmob_link'] + ":"
try:
for comment in comments:
traducao = translate(comment)
sent = sentiment(traducao)
score = sent['documentSentiment']['score']
scores.append(score)
tot_score += score
obj['scores'] = scores
obj['avg_score'] = tot_score/int(len(scores))
newobjs.append(obj)
except:
print "error found"
print "remaining: " + str(amt)
amt -= 1
return newobjs
def main():
newjson = generate_sentiments()
with open('resources/scored_info.json', 'w') as json_file:
json.dump(newjson, json_file, indent=4, sort_keys=True, ensure_ascii=True)
if __name__ == '__main__':
main()
| thiagoald/hardmob_information_extractor | sources/generate_sentiments.py | Python | mit | 1,731 | 0.006932 |
'''
Created on Mar 8, 2013
@author: Gary
'''
import unittest
from housemonitor.outputs.zigbee.zigbeecontrol import ZigBeeControl
from housemonitor.outputs.zigbee.zigbeeoutputstep import ZigBeeOutputStep
from housemonitor.outputs.zigbee.zigbeeoutputthread import ZigBeeOutputThread
from housemonitor.lib.hmqueue import HMQueue
from housemonitor.lib.constants import Constants
from mock import Mock, MagicMock, patch
from housemonitor.lib.common import Common
import logging.config
class Test( unittest.TestCase ):
logger = logging.getLogger( 'UnitTest' )
def setUp( self ):
logging.config.fileConfig( "unittest_logging.conf" )
def tearDown( self ):
pass
def test_logger_name( self ):
queue = HMQueue()
zig = ZigBeeOutputStep( queue )
self.assertEqual( Constants.LogKeys.outputsZigBee, zig.logger_name )
def test_topic_name( self ):
queue = HMQueue()
zig = ZigBeeOutputStep( queue )
self.assertEqual( Constants.TopicNames.ZigBeeOutput, zig.topic_name )
def test_step( self ):
value = 5
data = {Constants.DataPacket.device: 'device',
Constants.DataPacket.port: 'port',
Constants.DataPacket.arrival_time: 'arrival_time'}
listeners = ['a', 'b', 'c']
package = {'data': data, 'value': value}
queue = MagicMock( spec=HMQueue )
zig = ZigBeeOutputStep( queue )
v, d, l = zig.step( value, data, listeners )
queue.transmit.assert_called_once()
self.assertEqual( value, v )
self.assertEqual( data, d )
self.assertEqual( listeners, l )
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| gary-pickens/HouseMonitor | housemonitor/outputs/zigbee/test/zigbeeoutputstep_test.py | Python | mit | 1,738 | 0.021864 |
import logging
from django.conf import settings
from django.contrib.auth.models import User
from django.core.management import call_command
from django.core.management.base import BaseCommand
from tallessa.utils import log_get_or_create
# usually you should getLogger(__name__) but we are not under the tallessa namespace right now
logger = logging.getLogger('tallessa')
class Command(BaseCommand):
def handle(self, *args, **options):
management_commands = [
# (('collectstatic',), dict(interactive=False)),
(('migrate',), dict()),
]
if settings.DEBUG:
management_commands.append((('setup_default_team',), dict()))
for pargs, opts in management_commands:
logger.info("** Running: %s", pargs[0])
call_command(*pargs, **opts)
if settings.DEBUG:
user, created = User.objects.get_or_create(
username='mahti',
defaults=dict(
first_name='Markku',
last_name='Mahtinen',
is_staff=True,
is_superuser=True,
),
)
if created:
user.set_password('mahti')
user.save()
log_get_or_create(logger, user, created)
| tallessa/tallessa-backend | tallessa_backend/management/commands/setup.py | Python | agpl-3.0 | 1,317 | 0.000759 |
# -*- coding: utf-8 -*-
#
# allocations/entries.py is part of MetaDoc (Client).
#
# All of MetaDoc is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# MetaDoc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MetaDoc. If not, see <http://www.gnu.org/licenses/>.
#
import metaelement
class AllocationEntry(metaelement.MetaElement):
"""AllocationEntry - Allocation for specific projects. """
xml_tag_name = "all_entry"
def __init__(self, account_nmb, volume, metric, all_class, period):
""" Defines attributes for all_entry XML elements.
@param account_nmb: Account number for allocation.
@type account_nmb: String
@param volume: The amount of parameter metric.
@type volume: String
@param metric: Measurement of parameter volume.
@type metric: String
@param all_class: Allocations class.
@type all_class: String, either "pri" or "nonpri", for prioritized and
non-prioritized allocation.
@param period: Period of allocation.
@type period: String on form "YYYY.P" where P is the year's period.
"""
attributes = {
'account_nmb': account_nmb,
'volume': volume,
'metric': metric,
'all_class': all_class,
'period': period,
}
self.legal_metric = ('hours', 'mb',)
self.legal_all_class = ('pri', 'nonpri',)
super(AllocationEntry, self).__init__(AllocationEntry.xml_tag_name, attributes)
def clean_metric(self, metric):
"""Checks for legal values of metric.
Raises L{IllegalAttributeValueError} on illegal metric value.
@param metric: Metric for allocation
@type metric: String
@return: String
"""
self._clean_allowed_values(metric, self.legal_metric, 'metric', self.xml_tag_name, False)
return metric
def clean_all_class(self, all_class):
"""Checks for legal values of all_class.
Raises L{IllegalAttributeValueError} on illegal all_class value.
@param all_class: Allocation class of allocation
@type all_class: String
@return: String
"""
self._clean_allowed_values(all_class, self.legal_all_class, 'all_class', self.xml_tag_name, False)
return all_class
| henrikau/metadoc | client/allocations/entries.py | Python | gpl-3.0 | 2,825 | 0.004248 |
import sys, os, json, requests, threading
from urlparse import urljoin
from pyinotify import WatchManager, Notifier, EventsCodes, ProcessEvent
import converter, settings
class QueueManager:
'''
Manages the batching and publishing of statements in a thread-safe way.
'''
def __init__(self):
self.cache = []
self.cache_lock = threading.Lock()
self.publish_timer = None
def __del__(self):
self.destroy()
def destroy(self):
if self.publish_timer != None:
self.publish_timer.cancel()
def push(self, stmt):
'''Add a statement to the outgoing queue'''
# push statement to queue
with self.cache_lock:
self.cache.append(stmt)
# set timeout to publish statements
if len(self.cache) == 1 and settings.PUBLISH_MAX_WAIT_TIME > 0:
self.publish_timer = threading.Timer(settings.PUBLISH_MAX_WAIT_TIME, self.publish)
self.publish_timer.start()
# publish immediately if statement threshold is reached
if settings.PUBLISH_MAX_PAYLOAD <= len(self.cache):
self.publish()
def publish(self):
'''Publish the queued statements to the LRS and clear the queue'''
# make sure no new statements are added while publishing
with self.cache_lock:
# push statements to the lrs
url = urljoin(settings.LRS_ENDPOINT, 'statements')
r = requests.post(url, data=json.dumps(self.cache),
auth=(settings.LRS_USERNAME, settings.LRS_PASSWORD),
headers={'X-Experience-API-Version':'1.0.1', 'Content-Type':'application/json'})
print r.text
# clear cache and cancel any pending publish timeouts
self.cache = []
if self.publish_timer != None:
self.publish_timer.cancel()
class TailHandler(ProcessEvent):
'''
Parse incoming log events, convert to xapi, and add to publish queue
'''
MASK = EventsCodes.OP_FLAGS['IN_MODIFY']
def __init__(self, filename):
# prepare file input stream
self.ifp = open(filename, 'r', 1)
self.ifp.seek(0,2)
self.publish_queue = QueueManager()
self.raceBuffer = ''
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.publish_queue.destroy()
def process_IN_MODIFY(self,event):
'''Handles any changes to the log file'''
# read all new contents from the end of the file
buff = self.raceBuffer + self.ifp.read()
# if there's no newline at end of file, we probably read it before edx finished writing
# add read contents to a buffer and return
if len(buff) != 0 and buff[-1] != '\n':
self.raceBuffer = buff
else:
self.raceBuffer = ''
evts = [i for i in buff.split('\n') if len(i) != 0]
for e in evts:
try:
evtObj = json.loads(e)
except ValueError as err:
print 'Could not parse JSON for', e
continue
xapi = converter.to_xapi(evtObj)
if xapi != None:
for i in xapi:
self.publish_queue.push(i)
print '{} - {} {} {}'.format(i['timestamp'], i['actor']['name'], i['verb']['display']['en-US'], i['object']['definition']['name']['en-US'])
def watch(watch_file):
'''
Watch the given file for changes
'''
wm = WatchManager()
with TailHandler(watch_file) as th:
notifier = Notifier(wm, th)
wdd = wm.add_watch(watch_file, TailHandler.MASK)
notifier.loop()
# flush queue before exiting
th.publish_queue.publish()
print 'Exiting'
if __name__ == '__main__':
log_path = os.path.abspath(sys.argv[1]) if len(sys.argv) > 1 else '/edx/var/log/tracking.log'
print 'Watching file', log_path
watch(log_path)
| daafgo/Edx_bridge | xapi-bridge/__main__.py | Python | apache-2.0 | 3,433 | 0.034081 |
#!/usr/bin/env python2.7
"""
@package: pyHerkulex
@name: herkulex.py
@author: Achu Wilson (achuwilson@gmail.com), Akhil Chandran (akhilchandran.t.r@gmail.com)
@version: 0.1
This is a python library for interfacing the Herkulex range of smart
servo motors manufactured by Dongbu Robotics.
The library was created by Achu Wilson (mailto:achu@sastrarobotics.com)
for the internal projects of Sastra Robotics
This free software is distributed under the GNU General Public License.
See http://www.gnu.org/licenses/gpl.html for details.
For usage of this code for commercial purposes contact Sastra Robotics
India Pvt. Ltd. (mailto:contact@sastrarobotics.com)
"""
import time
try:
# PySerial Module
import serial
except:
raise ImportError("couldnt find pySerial")
# Commands
EEP_WRITE_REQ = 0x01
EEP_READ_REQ = 0x02
RAM_WRITE_REQ = 0x03
RAM_READ_REQ = 0x04
I_JOG_REQ = 0x05
S_JOG_REQ = 0x06
STAT_REQ = 0x07
ROLLBACK_REQ = 0x08
REBOOT_REQ = 0x09
EEP_WRITE_ACK = 0x41
EEP_READ_ACK = 0x42
RAM_WRITE_ACK = 0x43
RAM_READ_ACK = 0x44
I_JOG_ACK = 0x45
S_JOG_ACK = 0x46
STAT_ACK = 0x47
ROLLBACK_ACK = 0x48
REBOOT_ACK = 0x49
#Addresses
MODEL_NO1_EEP = 0
MODEL_NO2_EEP = 1
VERSION1_EEP = 2
VERSION2_EEP = 3
BAUD_RATE_EEP = 4
SERVO_ID_EEP = 6
SERVO_ID_RAM = 0
ACK_POLICY_EEP = 7
ACK_POLICY_RAM = 1
ALARM_LED_POLICY_EEP = 8
ALARM_LED_POLICY_RAM = 2
TORQUE_POLICY_EEP = 9
TORQUE_POLICY_RAM = 3
MAX_TEMP_EEP = 11
MAX_TEMP_RAM = 5
MIN_VOLTAGE_EEP = 12
MIN_VOLTAGE_RAM = 6
MAX_VOLTAGE_EEP = 13
MAX_VOLTAGE_RAM = 7
ACCELERATION_RATIO_EEP = 14
ACCELERATION_RATIO_RAM = 8
MAX_ACCELERATION_TIME_EEP = 15
MAX_ACCELERATION_TIME_RAM = 9
DEAD_ZONE_EEP = 16
DEAD_ZONE_RAM = 10
SATURATOR_OFFSET_EEP = 17
SATURATOR_OFFSET_RAM = 11
SATURATOR_SLOPE_EEP = 18
SATURATOR_SLOPE_RAM = 12
PWM_OFFSET_EEP = 20
PWM_OFFSET_RAM = 14
MIN_PWM_EEP = 21
MIN_PWM_RAM = 15
MAX_PWM_EEP = 22
MAX_PWM_RAM = 16
OVERLOAD_PWM_THRESHOLD_EEP = 24
OVERLOAD_PWM_THRESHOLD_RAM = 18
MIN_POSITION_EEP = 26
MIN_POSITION_RAM = 20
MAX_POSITION_EEP = 28
MAX_POSITION_RAM = 22
POSITION_KP_EEP = 30
POSITION_KP_RAM = 24
POSITION_KD_EEP = 32
POSITION_KD_RAM = 26
POSITION_KI_EEP = 34
POSITION_KI_RAM =28
POSITION_FEEDFORWARD_GAIN1_EEP = 36
POSITION_FEEDFORWARD_GAIN1_RAM = 30
POSITION_FEEDFORWARD_GAIN2_EEP = 38
POSITION_FEEDFORWARD_GAIN2_RAM = 32
VELOCITY_KP_EEP = 40
VELOCITY_KP_RAM = 34
VELOCITY_KI_EEP = 42
VELOCITY_KI_RAM = 36
LED_BLINK_PERIOD_EEP = 44
LED_BLINK_PERIOD_RAM = 38
ADC_FAULT_CHECK_PERIOD_EEP = 45
ADC_FAULT_CHECK_PERIOD_RAM = 39
PACKET_GARBAGE_CHECK_PERIOD_EEP = 46
PACKET_GARBAGE_CHECK_PERIOD_RAM = 40
STOP_DETECTION_PERIOD_EEP = 47
STOP_DETECTION_PERIOD_RAM = 41
OVERLOAD_DETECTION_PERIOD_EEP = 48
OVERLOAD_DETECTION_PERIOD_RAM = 42
STOP_THRESHOLD_EEP = 49
STOP_THRESHOLD_RAM = 43
INPOSITION_MARGIN_EEP = 50
INPOSITION_MARGIN_RAM = 44
CALIBRATION_DIFF_LOW_EEP = 52
CALIBRATION_DIFF_LOW_RAM = 46
CALIBRATION_DIFF_UP_EEP = 53
CALIBRATION_DIFF_UP_RAM = 47
STATUS_ERROR_RAM = 48
STATUS_DETAIL_RAM = 49
AUX1_RAM = 50
TORQUE_CONTROL_RAM = 52
LED_CONTROL_RAM = 53
VOLTAGE_RAM = 54
TEMPERATURE_RAM = 55
CURRENT_CONTROL_MODE_RAM = 56
TICK_RAM = 57
CALIBRATED_POSITION_RAM = 58
ABSOLUTE_POSITION_RAM = 60
DIFFERENTIAL_POSITION_RAM = 62
PWM_RAM = 64
ABSOLUTE_SECOND_POSITION_RAM = 66
ABSOLUTE_GOAL_POSITION_RAM = 68
ABSOLUTE_DESIRED_TRAJECTORY_POSITION = 70
DESIRED_VELOCITY_RAM = 72
BYTE1 = 0x01
BYTE2 = 0x02
BROADCAST_ID = 0xFE
SERPORT = None
def connect(portname, baudrate):
""" Connect to the Herkulex bus
Connect to serial port to which Herkulex Servos are attatched
Args:
portname (str): The serial port name
baudrate (int): The serial port baudrate
Raises:
SerialException: Error occured while opening serial port
"""
global SERPORT
try:
SERPORT = serial.Serial(portname, baudrate, timeout = 0.1)
except:
raise HerkulexError("could not open the serial port")
def close():
""" Close the Serial port
Properly close the serial port before exiting the application
Raises:
SerialException: Error occured while closing serial port
"""
try:
SERPORT.close()
except:
raise HerkulexError("could not close the serial port")
def checksum1(data, stringlength):
""" Calculate Checksum 1
Calculate the ckecksum 1 required for the herkulex data packet
Args:
data (list): the data of which checksum is to be calculated
stringlength (int): the length of the data
Returns:
int: The calculated checksum 1
"""
value_buffer = 0
for count in range(0, stringlength):
value_buffer = value_buffer ^ data[count]
return value_buffer&0xFE
def checksum2(data):
""" Calculate Checksum 2
Calculate the ckecksum 2 required for the herkulex data packet
Args:
data (int): the data of which checksum is to be calculated
Returns:
int: The calculated checksum 2
"""
return (~data)&0xFE
def send_data(data):
""" Send data to herkulex
Paketize & write the packet to serial port
Args:
data (list): the data to be sent
Raises:
SerialException: Error occured while opening serial port
"""
datalength = len(data)
csm1 = checksum1(data, datalength)
csm2 = checksum2(csm1)
data.insert(0, 0xFF)
data.insert(1, 0xFF)
data.insert(5, csm1)
data.insert(6, csm2)
stringtosend = ""
for i in range(len(data)):
byteformat = '%02X' % data[i]
stringtosend = stringtosend + "\\x" + byteformat
try:
SERPORT.write(stringtosend.decode('string-escape'))
#print stringtosend
except:
raise HerkulexError("could not communicate with motors")
def clear_errors():
""" Clears the errors register of all Herkulex servos
Args:
none
"""
data = []
data.append(0x0B)
data.append(BROADCAST_ID)
data.append(RAM_WRITE_REQ)
data.append(STATUS_ERROR_RAM)
data.append(BYTE2)
data.append(0x00)
data.append(0x00)
send_data(data)
def scale(input_value, input_min, input_max, out_min, out_max):
""" scale a value from one range to another
"""
# Figure out how 'wide' each range is
input_span = input_max - input_min
output_span = out_max - out_min
# Convert the left range into a 0-1 range (float)
valuescaled = float(input_value - input_min) / float(input_span)
# Convert the 0-1 range into a value in the right range.
return out_min + (valuescaled * output_span)
def scan_servos():
"""Scan for the herkulex servos connected
This function will scan for all the herkulex servos connected
to the bus.
Args:
none
Returns:
list: a list of tuples of the form [(id, model)]
"""
servos = []
for servo_id in range(0x00, 0xFE):
model = get_model(servo_id)
if model:
servos += [(servo_id, model)]
return servos
def get_model(servoid):
""" Get the servo model
This function gets the model of the herkules servo, provided its id
Args:
servoid(int): the id of the servo
Returns:
int: an integer corresponding to the model number
0x06 for DRS-602
0x04 for DRS-402
0x02 for DRS-202
"""
data = []
data.append(0x09)
data.append(servoid)
data.append(EEP_READ_REQ)
data.append(MODEL_NO1_EEP)
data.append(BYTE1)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(12)
return ord(rxdata[9])&0xFF
except:
raise HerkulexError("could not communicate with motors")
class servo:
""" The servo class
This class handles the interface to the herkulex smart servos
"""
def __init__(self, servoid):
""" servo class initialization
Args:
servoid(int): the id of the servo
"""
self.servoid = servoid
self.servomodel = get_model(servoid)
def get_model(self):
""" Get the servo model
This function gets the model of the herkules servo, provided its id
Args:
none
Returns:
int: an integer corresponding to the model number
0x06 for DRS-602
0x04 for DRS-402
0x02 for DRS-202
"""
data = []
data.append(0x09)
data.append(self.servoid)
data.append(EEP_READ_REQ)
data.append(MODEL_NO1_EEP)
data.append(BYTE1)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(12)
return ord(rxdata[9])&0xFF
except:
raise HerkulexError("could not communicate with motors")
def get_servo_status(self):
""" Get the error status of servo
This function gets the error status (if any) of the servo
Args:
none
Returns:
int: an integer corresponding to the servo status
* refer datasheet
"""
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(STATUS_ERROR_RAM)
data.append(BYTE1)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(12)
return ord(rxdata[9])&0xFF
except:
raise HerkulexError("could not communicate with motors")
def get_servo_status_detail(self):
""" Get the detailed error status of servo
This function gets the detailed error status (if any) of the servo
Args:
none
Returns:
int: an integer corresponding to the servo status
* refer datasheet
"""
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(STATUS_DETAIL_RAM)
data.append(BYTE1)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(12)
return ord(rxdata[9])&0xFF
except HerkulexError:
raise HerkulexError("could not communicate with motors")
def set_led(self, colorcode):
""" Set the LED Color of Herkulex
Args:
colorcode (int): The code for colors
(0x00-OFF
0x02-BLUE
0x03-CYAN
0x04-RED
0x05-ORANGE
0x06-VIOLET
0x07-WHITE
"""
data = []
data.append(0x0A)
data.append(self.servoid)
data.append(RAM_WRITE_REQ)
data.append(LED_CONTROL_RAM)
data.append(0x01)
data.append(colorcode)
send_data(data)
def brake_on(self):
""" Set the Brakes of Herkulex
In braked mode, position control and velocity control
will not work, enable torque before that
Args:
none
"""
data = []
data.append(0x0A)
data.append(self.servoid)
data.append(RAM_WRITE_REQ)
data.append(TORQUE_CONTROL_RAM)
data.append(0x01)
data.append(0x40)
send_data(data)
def torque_off(self):
""" Set the torques of Herkulex to zero
In this mode, position control and velocity control
will not work, enable torque before that. Also the
servo shaft is freely movable
Args:
none
"""
data = []
data.append(0x0A)
data.append(self.servoid)
data.append(RAM_WRITE_REQ)
data.append(TORQUE_CONTROL_RAM)
data.append(0x01)
data.append(0x00)
send_data(data)
def torque_on(self):
""" Enable the torques of Herkulex
In this mode, position control and velocity control
will work.
Args:
none
"""
data = []
data.append(0x0A)
data.append(self.servoid)
data.append(RAM_WRITE_REQ)
data.append(TORQUE_CONTROL_RAM)
data.append(0x01)
data.append(0x60)
send_data(data)
def get_torque_state(self):
""" get the torque state of motor
Returns:
bool: True if torque is enabled, else False
"""
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(TORQUE_CONTROL_RAM)
data.append(BYTE2)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(13)
return bool(ord(rxdata[9]))
except HerkulexError:
raise HerkulexError("could not communicate with motors")
def set_servo_position(self, goalposition, goaltime, led):
""" Set the position of Herkulex
Enable torque using torque_on function before calling this
Args:
goalposition (int): The desired position, min-0 & max-1023
goaltime (int): the time taken to move from present
position to goalposition
led (int): the LED color
0x00 LED off
0x04 GREEN
0x08 BLUE
0x10 RED
"""
goalposition_msb = int(goalposition) >> 8
goalposition_lsb = int(goalposition) & 0xff
data = []
data.append(0x0C)
data.append(self.servoid)
data.append(I_JOG_REQ)
data.append(goalposition_lsb)
data.append(goalposition_msb)
data.append(led)
data.append(self.servoid)
data.append(goaltime)
send_data(data)
def get_servo_position(self):
""" Gets the current position of Herkulex
Args:
none
Returns:
int: position of the servo- 0 to 1023
Raises:
SerialException: Error occured while opening serial port
"""
#global SERPORT
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(CALIBRATED_POSITION_RAM)
data.append(BYTE2)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(13)
if (self.servomodel==0x06) or (self.servomodel == 0x04):
return ((ord(rxdata[10])&0xff)<<8) | (ord(rxdata[9])&0xFF)
else:
#print ord(rxdata[9]),ord(rxdata[10])
return ((ord(rxdata[10])&0x03)<<8) | (ord(rxdata[9])&0xFF)
except HerkulexError:
print "Could not read from the servos. Check connection"
def get_servo_temperature(self):
""" Gets the current temperature of Herkulex
Args:
none
Returns:
int: the current temperature register of Herkulex
Raises:
SerialException: Error occured while opening serial port
"""
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(TEMPERATURE_RAM)
data.append(BYTE2)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(13)
return ord(rxdata[9])
except HerkulexError:
raise HerkulexError("Could not communicate with motors")
def get_servo_torque(self):
""" Gets the current torque of Herkulex
Gives the current load on the servo shaft.
It is actually the PWM value to the motors
Args:
none
Returns:
int: the torque on servo shaft. range from -1023 to 1023
Raises:
SerialException: Error occured while opening serial port
"""
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(PWM_RAM)
data.append(BYTE2)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(13)
if ord(rxdata[10])<=127:
return ((ord(rxdata[10])&0x03)<<8) | (ord(rxdata[9])&0xFF)
else:
return (ord(rxdata[10])-0xFF)*0xFF + (ord(rxdata[9])&0xFF)-0xFF
except HerkulexError:
raise HerkulexError("could not communicate with motors")
def set_servo_speed(self, goalspeed, led):
""" Set the Herkulex in continuous rotation mode
Args:
goalspeed (int): the speed , range -1023 to 1023
led (int): the LED color
0x00 LED off
0x04 GREEN
0x08 BLUE
0x10 RED
"""
if goalspeed>0 :
goalspeed_msb = (int(goalspeed)& 0xFF00) >> 8
goalspeed_lsb = int(goalspeed) & 0xff
elif goalspeed<0 :
goalspeed_msb = 64+(255- ((int(goalspeed)& 0xFF00) >> 8))
goalspeed_lsb = (abs(goalspeed) & 0xff)
#print goalspeed_msb,goalspeed_lsb
data = []
data.append(0x0C)
data.append(self.servoid)
data.append(I_JOG_REQ)
data.append(goalspeed_lsb)
data.append(goalspeed_msb)
data.append(0x02|led)
data.append(self.servoid)
data.append(0x00)
send_data(data)
def set_position_p(self, pvalue):
""" Set the P gain of the position PID
Args:
pvalue (int): P value
"""
pvalue_msb = int(pvalue) >> 8
pvalue_lsb = int(pvalue) & 0xff
data = []
data.append(0x0B)
data.append(self.servoid)
data.append(RAM_WRITE_REQ)
data.append(POSITION_KP_RAM)
data.append(BYTE2)
data.append( pvalue_lsb)
data.append( pvalue_msb)
send_data(data)
def set_position_i(self, ivalue):
""" Set the I gain of the position PID
Args:
ivalue (int): I value
"""
ivalue_msb = int(ivalue) >> 8
ivalue_lsb = int(ivalue) & 0xff
data = []
data.append(0x0B)
data.append(self.servoid)
data.append(RAM_WRITE_REQ)
data.append(POSITION_KI_RAM)
data.append(BYTE2)
data.append(ivalue_lsb)
data.append(ivalue_msb)
send_data(data)
def set_position_d(self, dvalue):
""" Set the D gain of the PID
Args:
dvalue (int): D value
"""
dvalue_msb = int(dvalue) >> 8
dvalue_lsb = int(dvalue) & 0xff
data = []
data.append(0x0B)
data.append(self.servoid)
data.append(RAM_WRITE_REQ)
data.append(POSITION_KD_RAM)
data.append(BYTE2)
data.append(dvalue_lsb)
data.append(dvalue_msb)
send_data(data)
def get_position_p(self):
""" Get the P value of the current PID for position
"""
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(POSITION_KP_RAM)
data.append(BYTE2)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(13)
return (ord(rxdata[10])*256)+(ord(rxdata[9])&0xff)
except HerkulexError:
raise HerkulexError("could not communicate with motors")
def get_position_i(self):
""" Get the I value of the current PID for position
"""
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(POSITION_KI_RAM)
data.append(BYTE2)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(13)
return (ord(rxdata[10])*256)+(ord(rxdata[9])&0xff)
except HerkulexError:
raise HerkulexError("Could not read from motors")
def get_position_d(self):
""" Get the D value of the current PID for position
"""
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(POSITION_KD_RAM)
data.append(BYTE2)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(13)
return (ord(rxdata[10])*256)+(ord(rxdata[9])&0xff)
except HerkulexError:
raise HerkulexError("could not communicate with motors")
def save_pid_eeprom(self):
""" saves the PID values from RAM to EEPROM
"""
pval = self.get_position_p()
ival = self.get_position_i()
dval = self.get_position_d()
#write P value
pvalue_msb = int(pval) >> 8
pvalue_lsb = int(pval) & 0xff
data_p = []
data_p.append(0x0B)
data_p.append(self.servoid)
data_p.append(EEP_WRITE_REQ)
data_p.append(POSITION_KP_EEP)
data_p.append(BYTE2)
data_p.append( pvalue_lsb)
data_p.append( pvalue_msb)
send_data(data_p)
# write I value
ivalue_msb = int(ival) >> 8
ivalue_lsb = int(ival) & 0xff
data_i = []
data_i.append(0x0B)
data_i.append(self.servoid)
data_i.append(EEP_WRITE_REQ)
data_i.append(POSITION_KI_EEP)
data_i.append(BYTE2)
data_i.append( ivalue_lsb)
data_i.append( ivalue_msb)
send_data(data_i)
# write D value
dvalue_msb = int(dval) >> 8
dvalue_lsb = int(dval) & 0xff
data_d = []
data_d.append(0x0B)
data_d.append(self.servoid)
data_d.append(EEP_WRITE_REQ)
data_d.append(POSITION_KD_EEP)
data_d.append(BYTE2)
data_d.append( dvalue_lsb)
data_d.append( dvalue_msb)
send_data(data_d)
def set_servo_angle(self, goalangle, goaltime, led):
""" Sets the servo angle (in degrees)
Enable torque using torque_on function before calling this
Args:
goalangle (int): The desired angle in degrees, range -150 to 150
goaltime (int): the time taken to move from present
position to goalposition
led (int): the LED color
0x00 LED off
0x04 GREEN
0x08 BLUE
0x10 RED
"""
if (self.servomodel==0x06) or (self.servomodel == 0x04):
goalposition = scale(goalangle, -159.9, 159.6, 10627, 22129)
else:
goalposition = scale(goalangle, -150, 150, 21, 1002)
self.set_servo_position(goalposition, goaltime, led)
def get_servo_angle(self):
""" Gets the current angle of the servo in degrees
Args:
none
Returns:
int : the current servo angle
"""
servoposition = self.get_servo_position()
if (self.servomodel==0x06) or (self.servomodel == 0x04):
return scale(servoposition, 10627, 22129, -159.9, 159.6)
else:
return scale(servoposition, 21, 1002, -150, 150)
class HerkulexError(Exception):
""" Class to handle sservo errors
"""
def __init__(self, message):
super(HerkulexError, self).__init__(message)
self.message = message
| seiji56/rmaze-2016 | logic_code/last_ver/sim/herkulex.py | Python | gpl-3.0 | 23,289 | 0.004122 |
# Library for RTS2 JSON calls.
# (C) 2012 Petr Kubanek, Institute of Physics
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import json
class Target:
def __init__(self,id,name=None):
self.id = id
self.name = name
def reload(self):
"""Load target data from JSON interface."""
if self.id is None:
name = None
return
try:
data = json.getProxy().loadJson('/api/tbyid',{'id':self.id})['d'][0]
self.name = data[1]
except Exception,ex:
self.name = None
def get(name):
"""Return array with targets matching given name or target ID"""
try:
return json.getProxy().loadJson('/api/tbyid',{'id':int(name)})['d']
except ValueError:
return json.getProxy().loadJson('/api/tbyname',{'n':name})['d']
def create(name,ra,dec):
return json.getProxy().loadJson('/api/create_target', {'tn':name, 'ra':ra, 'dec':dec})['id']
| zguangyu/rts2 | python/rts2/target.py | Python | gpl-2.0 | 1,526 | 0.026212 |
import numpy as np
import unittest
from skimage._shared.testing import assert_array_almost_equal
from skimage._shared.testing import assert_equal
from scipy import ndimage as ndi
from skimage.feature import peak
np.random.seed(21)
class TestPeakLocalMax():
def test_trivial_case(self):
trivial = np.zeros((25, 25))
peak_indices = peak.peak_local_max(trivial, min_distance=1, indices=True)
assert type(peak_indices) is np.ndarray
assert not peak_indices # inherent boolean-ness of empty list
peaks = peak.peak_local_max(trivial, min_distance=1, indices=False)
assert (peaks.astype(np.bool) == trivial).all()
def test_noisy_peaks(self):
peak_locations = [(7, 7), (7, 13), (13, 7), (13, 13)]
# image with noise of amplitude 0.8 and peaks of amplitude 1
image = 0.8 * np.random.rand(20, 20)
for r, c in peak_locations:
image[r, c] = 1
peaks_detected = peak.peak_local_max(image, min_distance=5)
assert len(peaks_detected) == len(peak_locations)
for loc in peaks_detected:
assert tuple(loc) in peak_locations
def test_relative_threshold(self):
image = np.zeros((5, 5), dtype=np.uint8)
image[1, 1] = 10
image[3, 3] = 20
peaks = peak.peak_local_max(image, min_distance=1, threshold_rel=0.5)
assert len(peaks) == 1
assert_array_almost_equal(peaks, [(3, 3)])
def test_absolute_threshold(self):
image = np.zeros((5, 5), dtype=np.uint8)
image[1, 1] = 10
image[3, 3] = 20
peaks = peak.peak_local_max(image, min_distance=1, threshold_abs=10)
assert len(peaks) == 1
assert_array_almost_equal(peaks, [(3, 3)])
def test_constant_image(self):
image = 128 * np.ones((20, 20), dtype=np.uint8)
peaks = peak.peak_local_max(image, min_distance=1)
assert len(peaks) == 0
def test_flat_peak(self):
image = np.zeros((5, 5), dtype=np.uint8)
image[1:3, 1:3] = 10
peaks = peak.peak_local_max(image, min_distance=1)
assert len(peaks) == 4
def test_sorted_peaks(self):
image = np.zeros((5, 5), dtype=np.uint8)
image[1, 1] = 20
image[3, 3] = 10
peaks = peak.peak_local_max(image, min_distance=1)
assert peaks.tolist() == [[3, 3], [1, 1]]
image = np.zeros((3, 10))
image[1, (1, 3, 5, 7)] = (1, 3, 2, 4)
peaks = peak.peak_local_max(image, min_distance=1)
assert peaks.tolist() == [[1, 7], [1, 5], [1, 3], [1, 1]]
def test_num_peaks(self):
image = np.zeros((7, 7), dtype=np.uint8)
image[1, 1] = 10
image[1, 3] = 11
image[1, 5] = 12
image[3, 5] = 8
image[5, 3] = 7
assert len(peak.peak_local_max(image, min_distance=1, threshold_abs=0)) == 5
peaks_limited = peak.peak_local_max(
image, min_distance=1, threshold_abs=0, num_peaks=2)
assert len(peaks_limited) == 2
assert (1, 3) in peaks_limited
assert (1, 5) in peaks_limited
peaks_limited = peak.peak_local_max(
image, min_distance=1, threshold_abs=0, num_peaks=4)
assert len(peaks_limited) == 4
assert (1, 3) in peaks_limited
assert (1, 5) in peaks_limited
assert (1, 1) in peaks_limited
assert (3, 5) in peaks_limited
def test_num_peaks_and_labels(self):
image = np.zeros((7, 7), dtype=np.uint8)
labels = np.zeros((7, 7), dtype=np.uint8) + 20
image[1, 1] = 10
image[1, 3] = 11
image[1, 5] = 12
image[3, 5] = 8
image[5, 3] = 7
peaks_limited = peak.peak_local_max(
image, min_distance=1, threshold_abs=0, labels=labels)
assert len(peaks_limited) == 5
peaks_limited = peak.peak_local_max(
image, min_distance=1, threshold_abs=0, labels=labels, num_peaks=2)
assert len(peaks_limited) == 2
def test_num_peaks_tot_vs_labels_4quadrants(self):
np.random.seed(21)
image = np.random.uniform(size=(20, 30))
i, j = np.mgrid[0:20, 0:30]
labels = 1 + (i >= 10) + (j >= 15) * 2
result = peak.peak_local_max(image, labels=labels,
min_distance=1, threshold_rel=0,
indices=True,
num_peaks=np.inf,
num_peaks_per_label=2)
assert len(result) == 8
result = peak.peak_local_max(image, labels=labels,
min_distance=1, threshold_rel=0,
indices=True,
num_peaks=np.inf,
num_peaks_per_label=1)
assert len(result) == 4
result = peak.peak_local_max(image, labels=labels,
min_distance=1, threshold_rel=0,
indices=True,
num_peaks=2,
num_peaks_per_label=2)
assert len(result) == 2
def test_num_peaks3D(self):
# Issue 1354: the old code only hold for 2D arrays
# and this code would die with IndexError
image = np.zeros((10, 10, 100))
image[5,5,::5] = np.arange(20)
peaks_limited = peak.peak_local_max(image, min_distance=1, num_peaks=2)
assert len(peaks_limited) == 2
def test_reorder_labels(self):
image = np.random.uniform(size=(40, 60))
i, j = np.mgrid[0:40, 0:60]
labels = 1 + (i >= 20) + (j >= 30) * 2
labels[labels == 4] = 5
i, j = np.mgrid[-3:4, -3:4]
footprint = (i * i + j * j <= 9)
expected = np.zeros(image.shape, float)
for imin, imax in ((0, 20), (20, 40)):
for jmin, jmax in ((0, 30), (30, 60)):
expected[imin:imax, jmin:jmax] = ndi.maximum_filter(
image[imin:imax, jmin:jmax], footprint=footprint)
expected = (expected == image)
result = peak.peak_local_max(image, labels=labels, min_distance=1,
threshold_rel=0, footprint=footprint,
indices=False, exclude_border=False)
assert (result == expected).all()
def test_indices_with_labels(self):
image = np.random.uniform(size=(40, 60))
i, j = np.mgrid[0:40, 0:60]
labels = 1 + (i >= 20) + (j >= 30) * 2
i, j = np.mgrid[-3:4, -3:4]
footprint = (i * i + j * j <= 9)
expected = np.zeros(image.shape, float)
for imin, imax in ((0, 20), (20, 40)):
for jmin, jmax in ((0, 30), (30, 60)):
expected[imin:imax, jmin:jmax] = ndi.maximum_filter(
image[imin:imax, jmin:jmax], footprint=footprint)
expected = np.transpose(np.nonzero(expected == image))
expected = expected[np.argsort(image[tuple(expected.T)])[::-1]]
result = peak.peak_local_max(image, labels=labels, min_distance=1,
threshold_rel=0, footprint=footprint,
indices=True, exclude_border=False)
result = result[np.argsort(image[tuple(result.T)])[::-1]]
assert (result == expected).all()
def test_ndarray_indices_false(self):
nd_image = np.zeros((5, 5, 5))
nd_image[2, 2, 2] = 1
peaks = peak.peak_local_max(nd_image, min_distance=1, indices=False)
assert (peaks == nd_image.astype(np.bool)).all()
def test_ndarray_exclude_border(self):
nd_image = np.zeros((5, 5, 5))
nd_image[[1, 0, 0], [0, 1, 0], [0, 0, 1]] = 1
nd_image[3, 0, 0] = 1
nd_image[2, 2, 2] = 1
expected = np.zeros_like(nd_image, dtype=np.bool)
expected[2, 2, 2] = True
expectedNoBorder = nd_image > 0
result = peak.peak_local_max(nd_image, min_distance=2,
exclude_border=2, indices=False)
assert_equal(result, expected)
# Check that bools work as expected
assert_equal(
peak.peak_local_max(nd_image, min_distance=2,
exclude_border=2, indices=False),
peak.peak_local_max(nd_image, min_distance=2,
exclude_border=True, indices=False)
)
assert_equal(
peak.peak_local_max(nd_image, min_distance=2,
exclude_border=0, indices=False),
peak.peak_local_max(nd_image, min_distance=2,
exclude_border=False, indices=False)
)
# Check both versions with no border
assert_equal(
peak.peak_local_max(nd_image, min_distance=2,
exclude_border=0, indices=False),
expectedNoBorder,
)
assert_equal(
peak.peak_local_max(nd_image,
exclude_border=False, indices=False),
expectedNoBorder,
)
def test_empty(self):
image = np.zeros((10, 20))
labels = np.zeros((10, 20), int)
result = peak.peak_local_max(image, labels=labels,
footprint=np.ones((3, 3), bool),
min_distance=1, threshold_rel=0,
indices=False, exclude_border=False)
assert np.all(~ result)
def test_one_point(self):
image = np.zeros((10, 20))
labels = np.zeros((10, 20), int)
image[5, 5] = 1
labels[5, 5] = 1
result = peak.peak_local_max(image, labels=labels,
footprint=np.ones((3, 3), bool),
min_distance=1, threshold_rel=0,
indices=False, exclude_border=False)
assert np.all(result == (labels == 1))
def test_adjacent_and_same(self):
image = np.zeros((10, 20))
labels = np.zeros((10, 20), int)
image[5, 5:6] = 1
labels[5, 5:6] = 1
result = peak.peak_local_max(image, labels=labels,
footprint=np.ones((3, 3), bool),
min_distance=1, threshold_rel=0,
indices=False, exclude_border=False)
assert np.all(result == (labels == 1))
def test_adjacent_and_different(self):
image = np.zeros((10, 20))
labels = np.zeros((10, 20), int)
image[5, 5] = 1
image[5, 6] = .5
labels[5, 5:6] = 1
expected = (image == 1)
result = peak.peak_local_max(image, labels=labels,
footprint=np.ones((3, 3), bool),
min_distance=1, threshold_rel=0,
indices=False, exclude_border=False)
assert np.all(result == expected)
result = peak.peak_local_max(image, labels=labels,
min_distance=1, threshold_rel=0,
indices=False, exclude_border=False)
assert np.all(result == expected)
def test_not_adjacent_and_different(self):
image = np.zeros((10, 20))
labels = np.zeros((10, 20), int)
image[5, 5] = 1
image[5, 8] = .5
labels[image > 0] = 1
expected = (labels == 1)
result = peak.peak_local_max(image, labels=labels,
footprint=np.ones((3, 3), bool),
min_distance=1, threshold_rel=0,
indices=False, exclude_border=False)
assert np.all(result == expected)
def test_two_objects(self):
image = np.zeros((10, 20))
labels = np.zeros((10, 20), int)
image[5, 5] = 1
image[5, 15] = .5
labels[5, 5] = 1
labels[5, 15] = 2
expected = (labels > 0)
result = peak.peak_local_max(image, labels=labels,
footprint=np.ones((3, 3), bool),
min_distance=1, threshold_rel=0,
indices=False, exclude_border=False)
assert np.all(result == expected)
def test_adjacent_different_objects(self):
image = np.zeros((10, 20))
labels = np.zeros((10, 20), int)
image[5, 5] = 1
image[5, 6] = .5
labels[5, 5] = 1
labels[5, 6] = 2
expected = (labels > 0)
result = peak.peak_local_max(image, labels=labels,
footprint=np.ones((3, 3), bool),
min_distance=1, threshold_rel=0,
indices=False, exclude_border=False)
assert np.all(result == expected)
def test_four_quadrants(self):
image = np.random.uniform(size=(20, 30))
i, j = np.mgrid[0:20, 0:30]
labels = 1 + (i >= 10) + (j >= 15) * 2
i, j = np.mgrid[-3:4, -3:4]
footprint = (i * i + j * j <= 9)
expected = np.zeros(image.shape, float)
for imin, imax in ((0, 10), (10, 20)):
for jmin, jmax in ((0, 15), (15, 30)):
expected[imin:imax, jmin:jmax] = ndi.maximum_filter(
image[imin:imax, jmin:jmax], footprint=footprint)
expected = (expected == image)
result = peak.peak_local_max(image, labels=labels, footprint=footprint,
min_distance=1, threshold_rel=0,
indices=False, exclude_border=False)
assert np.all(result == expected)
def test_disk(self):
'''regression test of img-1194, footprint = [1]
Test peak.peak_local_max when every point is a local maximum
'''
image = np.random.uniform(size=(10, 20))
footprint = np.array([[1]])
result = peak.peak_local_max(image, labels=np.ones((10, 20)),
footprint=footprint,
min_distance=1, threshold_rel=0,
threshold_abs=-1, indices=False,
exclude_border=False)
assert np.all(result)
result = peak.peak_local_max(image, footprint=footprint, threshold_abs=-1,
indices=False, exclude_border=False)
assert np.all(result)
def test_3D(self):
image = np.zeros((30, 30, 30))
image[15, 15, 15] = 1
image[5, 5, 5] = 1
assert_equal(peak.peak_local_max(image, min_distance=10, threshold_rel=0),
[[15, 15, 15]])
assert_equal(peak.peak_local_max(image, min_distance=6, threshold_rel=0),
[[15, 15, 15]])
assert sorted(peak.peak_local_max(image, min_distance=10, threshold_rel=0,
exclude_border=False).tolist()) == \
[[5, 5, 5], [15, 15, 15]]
assert sorted(peak.peak_local_max(image, min_distance=5,
threshold_rel=0).tolist()) == \
[[5, 5, 5], [15, 15, 15]]
def test_4D(self):
image = np.zeros((30, 30, 30, 30))
image[15, 15, 15, 15] = 1
image[5, 5, 5, 5] = 1
assert_equal(peak.peak_local_max(image, min_distance=10, threshold_rel=0),
[[15, 15, 15, 15]])
assert_equal(peak.peak_local_max(image, min_distance=6, threshold_rel=0),
[[15, 15, 15, 15]])
assert sorted(peak.peak_local_max(image, min_distance=10, threshold_rel=0,
exclude_border=False).tolist()) == \
[[5, 5, 5, 5], [15, 15, 15, 15]]
assert sorted(peak.peak_local_max(image, min_distance=5,
threshold_rel=0).tolist()) == \
[[5, 5, 5, 5], [15, 15, 15, 15]]
def test_threshold_rel_default(self):
image = np.ones((5, 5))
image[2, 2] = 1
assert len(peak.peak_local_max(image)) == 0
image[2, 2] = 2
assert_equal(peak.peak_local_max(image), [[2, 2]])
image[2, 2] = 0
assert len(peak.peak_local_max(image, min_distance=0)) == image.size - 1
class TestProminentPeaks(unittest.TestCase):
def test_isolated_peaks(self):
image = np.zeros((15, 15))
x0, y0, i0 = (12, 8, 1)
x1, y1, i1 = (2, 2, 1)
x2, y2, i2 = (5, 13, 1)
image[y0, x0] = i0
image[y1, x1] = i1
image[y2, x2] = i2
out = peak._prominent_peaks(image)
assert len(out[0]) == 3
for i, x, y in zip (out[0], out[1], out[2]):
self.assertTrue(i in (i0, i1, i2))
self.assertTrue(x in (x0, x1, x2))
self.assertTrue(y in (y0, y1, y2))
def test_threshold(self):
image = np.zeros((15, 15))
x0, y0, i0 = (12, 8, 10)
x1, y1, i1 = (2, 2, 8)
x2, y2, i2 = (5, 13, 10)
image[y0, x0] = i0
image[y1, x1] = i1
image[y2, x2] = i2
out = peak._prominent_peaks(image, threshold=None)
assert len(out[0]) == 3
for i, x, y in zip (out[0], out[1], out[2]):
self.assertTrue(i in (i0, i1, i2))
self.assertTrue(x in (x0, x1, x2))
out = peak._prominent_peaks(image, threshold=9)
assert len(out[0]) == 2
for i, x, y in zip (out[0], out[1], out[2]):
self.assertTrue(i in (i0, i2))
self.assertTrue(x in (x0, x2))
self.assertTrue(y in (y0, y2))
def test_peaks_in_contact(self):
image = np.zeros((15, 15))
x0, y0, i0 = (8, 8, 1)
x1, y1, i1 = (7, 7, 1) # prominent peak
x2, y2, i2 = (6, 6, 1)
image[y0, x0] = i0
image[y1, x1] = i1
image[y2, x2] = i2
out = peak._prominent_peaks(image, min_xdistance=3,
min_ydistance=3,)
assert_equal(out[0], np.array((i1,)))
assert_equal(out[1], np.array((x1,)))
assert_equal(out[2], np.array((y1,)))
def test_input_labels_unmodified(self):
image = np.zeros((10, 20))
labels = np.zeros((10, 20), int)
image[5, 5] = 1
labels[5, 5] = 1
labelsin = labels.copy()
result = peak.peak_local_max(image, labels=labels,
footprint=np.ones((3, 3), bool),
min_distance=1, threshold_rel=0,
indices=False, exclude_border=False)
assert np.all(labels == labelsin)
| kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/skimage/feature/tests/test_peak.py | Python | gpl-3.0 | 18,709 | 0.001604 |
# Created On: 2010-06-02
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QWidget, QHBoxLayout, QRadioButton
from .util import horizontalSpacer
class RadioBox(QWidget):
def __init__(self, parent=None, items=None, spread=True, **kwargs):
# If spread is False, insert a spacer in the layout so that the items don't use all the
# space they're given but rather align left.
if items is None:
items = []
super().__init__(parent, **kwargs)
self._buttons = []
self._labels = items
self._selected_index = 0
self._spacer = horizontalSpacer() if not spread else None
self._layout = QHBoxLayout(self)
self._update_buttons()
#--- Private
def _update_buttons(self):
if self._spacer is not None:
self._layout.removeItem(self._spacer)
to_remove = self._buttons[len(self._labels):]
for button in to_remove:
self._layout.removeWidget(button)
button.setParent(None)
del self._buttons[len(self._labels):]
to_add = self._labels[len(self._buttons):]
for _ in to_add:
button = QRadioButton(self)
self._buttons.append(button)
self._layout.addWidget(button)
button.toggled.connect(self.buttonToggled)
if self._spacer is not None:
self._layout.addItem(self._spacer)
if not self._buttons:
return
for button, label in zip(self._buttons, self._labels):
button.setText(label)
self._update_selection()
def _update_selection(self):
self._selected_index = max(0, min(self._selected_index, len(self._buttons)-1))
selected = self._buttons[self._selected_index]
selected.setChecked(True)
#--- Event Handlers
def buttonToggled(self):
for i, button in enumerate(self._buttons):
if button.isChecked():
self._selected_index = i
self.itemSelected.emit(i)
break
#--- Signals
itemSelected = pyqtSignal(int)
#--- Properties
@property
def buttons(self):
return self._buttons[:]
@property
def items(self):
return self._labels[:]
@items.setter
def items(self, value):
self._labels = value
self._update_buttons()
@property
def selected_index(self):
return self._selected_index
@selected_index.setter
def selected_index(self, value):
self._selected_index = value
self._update_selection()
| hsoft/qtlib | radio_box.py | Python | bsd-3-clause | 2,903 | 0.007234 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('clientes', '0002_auto_20150530_1324'),
]
operations = [
migrations.CreateModel(
name='PerfilCliente',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nombre', models.CharField(max_length=50)),
('apellido', models.CharField(max_length=50)),
('documento', models.IntegerField()),
('telefono', models.IntegerField()),
('obrasocial', models.CharField(max_length=50)),
('email', models.EmailField(max_length=75)),
('cliente', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.DeleteModel(
name='Cliente',
),
]
| acs-um/gestion-turnos | apps/clientes/migrations/0003_auto_20150605_2119.py | Python | mit | 1,158 | 0.000864 |
# -*- coding: utf-8 -*-
from django import template
from django_users.forms import CreateUserForm
#from django.utils.translation import ugettext as _
register = template.Library()
@register.inclusion_tag('users/templatetags/registration.html', takes_context = True)
def registration_form(context, form=None, *args, **kwargs):
if not form:
form = CreateUserForm
return {
'form': form,
}
| AdrianRibao/django-users | django_users/templatetags/users.py | Python | bsd-3-clause | 422 | 0.014218 |
import numpy as np
from milk.measures.nfoldcrossvalidation import nfoldcrossvalidation, foldgenerator
# Regression test in 2011-01-31
def test_getfoldgenerator():
labels = np.array([
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
])
origins = np.array([
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6,
6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
9, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14,
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19,
19, 19, 19, 19, 19, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 22, 22, 22, 22,
22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25,
25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27,
27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28,
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
29, 29, 29, 29, 29, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30,
30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 31, 31, 31, 31, 31,
31, 31, 31, 31, 31, 31, 31, 31, 31, 32, 32, 32, 32, 32, 32, 32, 32,
33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 34, 34, 34, 34,
34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, 35,
35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 36,
36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, 37,
37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, 38, 38,
38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, 39,
39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 40, 40,
40, 40, 40, 40, 40, 40, 40, 40, 40, 41, 41, 41, 41, 41, 41, 41, 41,
41, 41, 41, 41, 41, 41, 41, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
42, 42, 42, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43, 44, 44, 44, 44, 44, 44, 44, 44, 44,
44, 44, 44, 44, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45,
45, 45, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46,
47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47,
47, 47, 47, 47, 47, 47, 47, 47, 48, 48, 48, 48, 48, 48, 48, 48, 48,
48, 48, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 50, 50, 50, 50,
50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50,
50, 50, 50, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51,
51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 52, 52, 52, 52, 52, 52, 52,
52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
52, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
53, 53, 53, 53, 53, 53, 53, 53, 53, 54, 54, 54, 54, 54, 54, 54, 54,
54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 55,
55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55,
55, 55, 55, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
56, 56, 56, 56, 56, 56, 56, 56, 56, 56
])
for Tr,Te in foldgenerator(labels, 3, origins):
assert (np.array(labels)[Te] == 2).any()
for Tr,Te in foldgenerator(labels[::2], 3, origins[::2]):
assert (np.array(labels[::2])[Te] == 2).any()
for Tr,Te in foldgenerator(labels[::3], 3, origins[::3]):
assert (np.array(labels[::3])[Te] == 2).any()
def test_getfoldgenerator_simplified():
# This is a cut-down version of the above
labels = np.zeros(45, bool)
labels[35:] = 1
origins = np.array([0, 0, 1, 2, 2, 2, 3, 4, 4, 4, 5, 6, 6, 6,
6, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11, 11,
11, 12, 12, 12, 13, 13, 14, 14, 14, 14, 15, 15, 15, 16])
for Tr,Te in foldgenerator(labels, 3, origins):
assert np.any(labels[Te])
for Tr,Te in foldgenerator(labels, 4, origins):
assert np.any(labels[Te])
def test_getfoldgenerator_simplified_2():
# This is a cut-down version of the above
labels = np.zeros(44, bool)
labels[35:] = 1
origins = np.array([0, 0, 1, 2, 2, 2, 3, 4, 4, 4, 5, 6, 6, 6,
6, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11, 11,
11, 12, 12, 12, 13, 13, 14, 14, 14, 14, 15, 15, 15])
for Tr,Te in foldgenerator(labels, 3, origins):
assert np.any(labels[Te])
| pombredanne/milk | milk/tests/test_nfoldcrossvalidation_regression.py | Python | mit | 10,055 | 0.001392 |
# -*- coding: utf-8 -*-
# Copyright Tom SF Haines
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import posixpath
import random
import math
from bin.shared import ray_cast
from bin.shared import csp
from direct.actor.Actor import Actor
from direct.interval.IntervalGlobal import *
from direct.interval.ActorInterval import ActorInterval
from panda3d.core import *
from panda3d.ode import *
class SimpleWeapon:
"""Provides a simple weapon system - not very sophisticaed, but good enough to test shooting things."""
def __init__(self,manager,xml):
self.gunView = render.attachNewNode('gun-view')
self.ray = None
self.reload(manager,xml)
def destroy(self):
self.gunView.removeNode()
if self.ray!=None:
self.ray.destroy()
def reload(self,manager,xml):
# Get the path to load weapons from...
basePath = manager.get('paths').getConfig().find('weapons').get('path')
# Variables to manage the firing state (Used G36 as reference for defaults.)...
bullet = xml.find('bullet')
if bullet!=None:
self.bulletRate = float(bullet.get('rate',1.0/12.5))
self.bulletSpeed = float(bullet.get('speed',920.0))
self.bulletWeight = float(bullet.get('mass',0.004))
else:
self.bulletRate = 1.0/12.5
self.bulletSpeed = 920.0
self.bulletWeight = 0.004
# Determine the weapon meshes path...
self.meshPath = posixpath.join(basePath, xml.find('egg').get('file'))
# Get the camera interface, so we can zoom in when the player aims...
self.camera = manager.get(xml.find('camera').get('plugin'))
# Create our gun node - both the gun and the ray used for shooting track this - allows for gun jitter, kick back etc...
parent = xml.find('parent')
self.gunView.reparentTo(manager.get(parent.get('plugin')).getNode(parent.get('node')))
# Create a ray cast to detect what the player is looking at... and what will be shot...
self.space = manager.get('ode').getSpace()
if self.ray!=None:
self.ray.destroy()
self.ray = OdeRayGeom(100.0)
self.ray.setCategoryBits(BitMask32(0xfffffffe))
self.ray.setCollideBits(BitMask32(0xfffffffe))
# Get all the stuff we need to do the muzzle flash particle effect...
flash = xml.find('muzzle_flash')
self.flashManager = manager.get(flash.get('plugin'))
self.flashEffect = flash.get('effect')
self.flashBone = flash.get('bone') # Will be swapped out for the actual node latter.
self.flashPos = csp.getPos(flash.get('pos'))
# Get all the stuff we need to do the bullet hit sparks effect...
sparks = xml.find('sparks')
self.sparksManager = manager.get(sparks.get('plugin'))
self.sparksEffect = sparks.get('effect')
# Create a quaternion that rotates +ve z to +ve y - used to point it in the weapon direction rather than up...
self.zToY = Quat()
self.zToY.setFromAxisAngle(-90.0,Vec3(1.0,0.0,0.0))
# State for the animation...
self.state = False # False==casual, True==aim.
self.nextState = False
# Firing state...
self.firing = False # True if the trigger is being held.
self.triggerTime = 0.0 # How long the trigger has been held for, so we know when to eject ammo.
# For bullet holes
bh = xml.find('bullet_holes')
if bh != None:
self.bulletHoles = manager.get(bh.get('plugin'))
else:
self.bulletHoles = None
def postInit(self):
for i in self.postReload():
yield i
def postReload(self):
# Load the actor...
self.mesh = Actor(self.meshPath)
yield
# Shader generator makes it shiny, plus we need it in the right places in the render graph...
self.mesh.setShaderAuto()
self.mesh.reparentTo(self.gunView)
self.mesh.hide()
yield
# Set its animation going... except we pause it until needed...
self.nextAni()
self.interval.pause()
# Gun flash requires an exposed bone...
self.flashBone = self.mesh.exposeJoint(None,"modelRoot",self.flashBone)
yield
def gunControl(self,task):
# Update the gun direction ray to follow the players view...
self.ray.setPosition(self.gunView.getPos(render))
self.ray.setQuaternion(self.zToY.multiply(self.gunView.getQuat(render)))
# If the gun is firing update the trigger time, if a bullet is ejected do the maths...
if self.firing:
dt = globalClock.getDt()
self.triggerTime += dt
while self.triggerTime>self.bulletRate:
self.triggerTime -= self.bulletRate
hit,pos,norm = ray_cast.nearestHit(self.space,self.ray)
# Create a muzzle flash effect...
self.flashManager.doEffect(self.flashEffect, self.flashBone, True, self.flashPos)
if hit:
# Create an impact sparks effect...
# Calculate the reflection direction...
rd = self.ray.getDirection()
sparkDir = (norm * (2.0*norm.dot(rd))) - rd
# Convert the reflection direction into a quaternion that will rotate +ve z to the required direction...
try:
ang = -math.acos(sparkDir[2])
except:
print 'Angle problem', sparkDir
ang = 0.0
axis = Vec3(0.0,0.0,1.0).cross(sparkDir)
axis.normalize()
sparkQuat = Quat()
sparkQuat.setFromAxisAngleRad(ang,axis)
# Set it going...
self.sparksManager.doEffect(self.sparksEffect, render, False, pos, sparkQuat)
# Make a bullet hole
if hit.hasBody() and isinstance(hit.getBody().getData(), NodePath):
self.bulletHoles.makeNew(pos, norm, hit.getBody().getData())
else:
self.bulletHoles.makeNew(pos, norm, None)
# Impart some energy on the object...
if hit and hit.hasBody():
body = hit.getBody()
# Calculate the force required to supply the energy the bullet contains to the body...
force = self.bulletWeight*self.bulletSpeed/0.05
# Get the direction of travel of the bullet, multiply by force...
d = self.ray.getDirection()
d *= force
# If the object is asleep awaken it...
if not body.isEnabled():
body.enable()
# Add the force to the object...
body.addForceAtPos(d,pos)
return task.cont
def start(self):
# Make the gun visible...
self.mesh.show()
# Set the gun animation going...
self.interval.finish()
# Weapon task - this primarily makes it shoot...
self.task = taskMgr.add(self.gunControl,'GunControl')
def stop(self):
self.interval.pause()
self.mesh.hide()
taskMgr.remove(self.task)
def nextAni(self):
self.state = self.nextState
if self.state:
ani = random.choice(('aim_wiggle_a','aim_wiggle_b','aim_wiggle_c'))
else:
ani = random.choice(('casual_wiggle_a','casual_wiggle_b','casual_wiggle_c'))
self.mesh.pose(ani,0)
self.interval = Sequence(self.mesh.actorInterval(ani),Func(self.nextAni))
self.interval.start()
def setAiming(self,s):
if self.nextState!=s:
self.interval.pause()
self.nextState = s
self.camera.setZoomed(s)
def wib():
self.interval.finish()
if s: ani = 'casual_aim'
else: ani = 'aim_casual'
transition = Sequence(self.mesh.actorInterval(ani),Func(wib))
transition.start()
def setFiring(self,s):
self.firing = s
if self.firing:
self.triggerTime = 0.0
| Panda3D-google-code-repositories/naith | game/plugins/simpleweapon/simpleweapon.py | Python | apache-2.0 | 7,962 | 0.017458 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-03-05 18:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('teams', '0007_auto_20160305_1726'),
]
operations = [
migrations.AlterField(
model_name='team',
name='lang_pref',
field=models.CharField(choices=[('C', 'C'), ('J', 'Java')], default='0', max_length=1, verbose_name='programming language preference'),
),
]
| shivamMg/malvo | teams/migrations/0008_auto_20160305_1811.py | Python | gpl-3.0 | 546 | 0.001832 |
import uuid
from paho.mqtt import publish
from paho.mqtt.client import MQTTv31
from conf.mqttconf import *
def send(msg, user_list, qos=2, retain=False):
"""
发布mqtt消息
:param msg:消息内容,可以是字符串、int、bytearray
:param user_list: 用户列表数组(不带前缀的),例如:["zhangsan","lilei"]
:param qos: 消息质量(0:至多一次,1:至少一次,2:只有一次)
:param retain:设置是否保存消息,为True时当订阅者不在线时发送的消息等上线后会得到通知,否则只发送给在线的设备
:return:
"""
auth = {"username": MOSQUITTO_PUB_USER, "password": MOSQUITTO_PUB_PWD}
client_id = MOSQUITTO_PREFIX + str(uuid.uuid1())
msgs = []
for i in user_list:
print(i)
msg_obj = dict()
msg_obj["qos"] = qos
msg_obj["retain"] = retain
msg_obj["topic"] = MOSQUITTO_TOPIC_PREFIX + str(i)
msg_obj["payload"] = msg
msgs.append(msg_obj)
if len(msgs) > 0 and msg:
print(msgs)
try:
publish.multiple(msgs, hostname=MOSQUITTO_HOST, port=MOSQUITTO_PORT, client_id=client_id, keepalive=60,
will=None, auth=auth, tls=None, protocol=MQTTv31)
ret = 1
except Exception as e:
print(str(e))
ret = -1
else:
ret = -2
return ret | cherrishes/weilai | xingxing/common/mqtt_helper.py | Python | apache-2.0 | 1,402 | 0.001664 |
#!/usr/bin/env python
import subprocess, os, sys
import colorama
from colorama import Fore, Back, Style
# Store paths and connection info
sourcetree = "/home/flux/Projects/hackday"
gitlabapi = ""
drupalbootstrap = sourcetree + "/drupal/includes/bootstrap.inc"
contrib = sourcetree + "/all/modules/contrib"
rpmmodules = sourcetree + "/drupal/modules"
rssDrupalCore = "http://drupal.org/security/rss.xml"
rssDrupalContrib = "http://drupal.org/security/contrib/rss.xml"
doAPI = "https://www.drupal.org/api-d7/node.json?type=project_module&field_project_machine_name="
# Start with Drupal core
print Fore.BLUE + ("=" * 7) + " Drupal Core " + ("=" * 7) + Style.RESET_ALL
print Fore.GREEN
with open(drupalbootstrap, 'r') as searchfile:
for line in searchfile:
if """define('VERSION',""" in line:
drupalversion = line.split("'")
print "-- Drupal Core: " + drupalversion[3]
print Style.RESET_ALL
# Function to iterate through a module path and pull the version numbers
def modulelist(modpath):
print Fore.BLUE + ("=" * 7) + " " + modpath + " " + ("=" * 7) + Style.RESET_ALL
dirs = os.listdir(modpath)
print Fore.GREEN
for module in dirs:
info = modpath + "/" + module + "/" + module + ".info"
try:
with open(info, 'r') as searchfile:
for line in searchfile:
if """version = """ in line:
moduleversion = line.split("version =")
if not "VERSION" in moduleversion[1]:
print "-- " + module + " " + moduleversion[1].replace('\"','')
except:
pass
print Style.RESET_ALL
modulelist(contrib)
modulelist(rpmmodules)
| Andy-Thornton/drupal-security-updates | security-updates.py | Python | gpl-3.0 | 1,721 | 0.006973 |
# encoding: utf-8
u'''MCL — Organ Folder'''
from ._base import IIngestableFolder, Ingestor, IngestableFolderView
from .interfaces import IOrgan
from five import grok
class IOrganFolder(IIngestableFolder):
u'''Folder containing body systems, also known as organs.'''
class OrganIngestor(Ingestor):
u'''RDF ingestor for organs.'''
grok.context(IOrganFolder)
def getContainedObjectInterface(self):
return IOrgan
class View(IngestableFolderView):
u'''View for an organ folder'''
grok.context(IOrganFolder)
| MCLConsortium/mcl-site | src/jpl.mcl.site.knowledge/src/jpl/mcl/site/knowledge/organfolder.py | Python | apache-2.0 | 544 | 0.001845 |
# !/usr/bin/python
# Copyright (c) 2007 Randal Barlow <im.tehk at gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import sys
import os
import subprocess
import atexit
import gobject
import pygtk
import gtk
from gtk import gdk
import awn
import dbus
from dbus.mainloop.glib import DBusGMainLoop
import string
try:
import mutagen.mp3
import mutagen.mp4
from mutagen.id3 import ID3
import tempfile
album_art_file = "%s/awnmediaplayer_%s.png" % (tempfile.gettempdir(), os.getenv('USERNAME'))
art_icon_from_tag = True
except ImportError:
art_icon_from_tag = False
if gtk.gtk_version >= (2, 18):
from urllib import unquote
DBusGMainLoop(set_as_default=True)
def cleanup():
if art_icon_from_tag:
try:
os.remove(album_art_file)
except OSError:
pass
atexit.register(cleanup)
def get_app_name():
player_name = None
bus_obj = dbus.SessionBus().get_object('org.freedesktop.DBus', '/org/freedesktop/DBus')
if bus_obj.NameHasOwner('org.gnome.Rhythmbox') == True:
player_name = "Rhythmbox"
elif bus_obj.NameHasOwner('org.exaile.DBusInterface') == True:
player_name = "Exaile"
elif bus_obj.NameHasOwner('org.gnome.Banshee') == True:
player_name = "Banshee"
elif bus_obj.NameHasOwner('org.bansheeproject.Banshee') == True:
player_name = "BansheeOne"
elif bus_obj.NameHasOwner('org.gnome.Listen') == True:
player_name = "Listen"
elif bus_obj.NameHasOwner('net.sacredchao.QuodLibet') == True:
player_name = "QuodLibet"
elif bus_obj.NameHasOwner('org.mpris.songbird') == True:
player_name = "Songbird"
elif bus_obj.NameHasOwner('org.mpris.vlc') == True:
player_name = "VLC"
elif bus_obj.NameHasOwner('org.mpris.audacious') == True:
player_name = "Audacious"
elif bus_obj.NameHasOwner('org.mpris.bmp') == True:
player_name = "BMP"
elif bus_obj.NameHasOwner('org.mpris.xmms2') == True:
player_name = "XMMS2"
elif bus_obj.NameHasOwner('org.mpris.amarok') == True:
player_name = "Amarok"
elif bus_obj.NameHasOwner('org.mpris.aeon') == True:
player_name = "Aeon"
elif bus_obj.NameHasOwner('org.mpris.dragonplayer') == True:
player_name = "DragonPlayer"
elif bus_obj.NameHasOwner('org.freedesktop.MediaPlayer') == True:
player_name = "mpDris"
elif bus_obj.NameHasOwner('org.mpris.clementine') == True:
player_name = "Clementine"
elif bus_obj.NameHasOwner('org.mpris.guayadeque') == True:
player_name = "Guayadeque"
return player_name
def player_available(executable):
"""Check if player is installed if it's not in 'Activatable Services' on DBus"""
for path in os.getenv('PATH').split(':'):
if path == '':
continue
if os.path.isfile(os.path.join(path, executable)):
return True
return False
def launch_player(args):
"""Launch player if this can't be done via DBus"""
try:
subprocess.Popen(args)
except OSError, e:
print "awnmediaplayer: error launching %s: %s" % (args, e)
return False
return True
class GenericPlayer(object):
"""Insert the level of support here"""
def __init__(self, dbus_name=None):
# set signalling_supported to True in your subclass's constructor if you use signal(s) which are received when currently played song changes (e.g. playingUriChanged signal)
self.signalling_supported = False
# set to DBus service name string in your subclass
self.dbus_base_name = dbus_name
self.song_change_cb = None
self.playing_changed_cb = None
self.dbus_driver()
def set_song_change_callback(self, cb):
self.song_change_cb = cb
def set_playing_changed_callback(self, cb):
self.playing_changed_cb = cb
def song_changed_emitter(self, *args, **kwargs):
if (self.song_change_cb):
self.song_change_cb()
def playing_changed_emitter(self, playing):
if (self.playing_changed_cb):
self.playing_changed_cb(playing)
def is_async(self):
"""
Returns True if this player class supports song change signalling.
"""
return self.signalling_supported
def is_available(self):
"""
Returns true if this player is present on the system.
Override if necessary.
"""
if (self.dbus_base_name != None):
bus_obj = dbus.SessionBus().get_object('org.freedesktop.DBus', '/org/freedesktop/DBus')
ACTIVATABLE_SERVICES = bus_obj.ListActivatableNames()
return self.dbus_base_name in ACTIVATABLE_SERVICES
return False
def start(self):
"""
Starts given player.
Override if necessary.
"""
if (self.dbus_base_name != None):
object_path = '/' + self.dbus_base_name.replace('.', '/')
try:
bus = dbus.SessionBus()
obj = bus.get_object(self.dbus_base_name, object_path)
return True
except Exception, e:
print "awnmediaplayer: error launching %s: %s" % (self.__class__.__name__, e)
return False
def get_dbus_name(self):
"""
Returns player's dbus name.
"""
return self.dbus_base_name
def dbus_driver(self):
"""
Defining the dbus location for GenericPlayer
Provides self.player and any other interfaces needed by get_media_info
and the button methods
"""
pass
def get_media_info(self):
"""
This method tries to get information about currently playing media
Returns
* dict result = dictionary of various information about media
(should always have at least the 'title' key)
"""
return {}
def is_playing(self):
"""
This method determines if the player is currently in 'playing' state
as opossed to 'paused' / 'stopped'
"""
return False
def previous(self):
pass
def play_pause(self):
pass
def next(self):
pass
def play_uri(self, uri):
"""
Immediately starts playing the specified URI.
"""
return False
def enqueue_uris(self, uris):
"""
Adds uris to current playlist.
"""
return False
class MPRISPlayer(GenericPlayer):
""" a default implementation of MPRIS """
def __init__(self, interface):
GenericPlayer.__init__(self, interface)
self.signalling_supported = True
def playing_changed_emitter(self, playing):
print "Status Change: ", playing
if (self.playing_changed_cb):
self.playing_changed_cb(playing[0] == 0)
def dbus_driver(self):
"""
Defining the dbus location for
"""
bus_obj = dbus.SessionBus().get_object('org.freedesktop.DBus', '/org/freedesktop/DBus')
if bus_obj.NameHasOwner(self.dbus_base_name) == True:
self.session_bus = dbus.SessionBus()
self.player = self.session_bus.get_object(self.dbus_base_name, '/Player')
self.player.connect_to_signal('TrackChange', self.song_changed_emitter, member_keyword='member')
self.player.connect_to_signal('StatusChange', self.playing_changed_emitter)
def get_media_info(self):
self.dbus_driver()
# Get information about song
info = self.player.GetMetadata()
result = {}
if 'title' in info.keys():
result['title'] = str(info['title'])
elif 'location' in info.keys():
pos = info['location'].rfind("/")
if pos is not -1:
result['title'] = str(info['location'][pos + 1:])
else:
result['title'] = ''
else:
result['title'] = ''
if 'artist' in info.keys():
result['artist'] = str(info['artist'])
if 'album' in info.keys():
result['album'] = str(info['album'])
if 'arturl' in info:
if info['arturl'][0:7] == "file://":
result['album-art'] = str(info['arturl'][7:])
if gtk.gtk_version >= (2, 18):
result['album-art'] = unquote(result['album-art'])
else:
print "Don't understand the album art location: %s" % info['arturl']
return result
def is_playing(self):
self.dbus_driver()
stat = self.player.GetStatus()
return stat[0] == 0
def previous(self):
self.player.Prev()
def play_pause(self):
stat = self.player.GetStatus()
if stat[0] != 2:
self.player.Pause()
else:
self.player.Play()
def next(self):
self.player.Next()
class Rhythmbox(GenericPlayer):
"""Full Support with signals"""
def __init__(self):
GenericPlayer.__init__(self, 'org.gnome.Rhythmbox')
self.signalling_supported = True
self._is_playing = False
def dbus_driver(self):
"""
Defining the dbus location for Rhythmbox
"""
bus_obj = dbus.SessionBus().get_object('org.freedesktop.DBus', '/org/freedesktop/DBus')
self._is_playing = False
if bus_obj.NameHasOwner(self.dbus_base_name) == True:
self.session_bus = dbus.SessionBus()
self.proxy_obj = self.session_bus.get_object(self.dbus_base_name, '/org/gnome/Rhythmbox/Player')
self.player = dbus.Interface(self.proxy_obj, 'org.gnome.Rhythmbox.Player')
self.player.connect_to_signal('playingUriChanged', self.song_changed_emitter, member_keyword='member')
self.player.connect_to_signal('playingSongPropertyChanged', self.song_changed_emitter, member_keyword='member')
self.player.connect_to_signal('playingChanged', self.playing_changed_emitter)
self.rbShell = self.session_bus.get_object(self.dbus_base_name, '/org/gnome/Rhythmbox/Shell')
self._is_playing = self.player.getPlaying()
def get_media_info(self):
self.dbus_driver()
ret_dict = {}
playinguri = self.player.getPlayingUri()
result = self.rbShell.getSongProperties(playinguri)
# Currently Playing Title
if result['artist'] != '':
ret_dict['artist'] = result['artist']
ret_dict['title'] = result['title']
if 'album' in result:
ret_dict['album'] = result['album']
elif 'rb:stream-song-title' in result:
if result['title'] != '':
ret_dict['title'] = result['rb:stream-song-title'] + ' (' + result['title'] + ')'
else:
ret_dict['title'] = result['rb:stream-song-title']
elif 'title' in result:
ret_dict['title'] = result['title']
# cover-art
if 'rb:coverArt-uri' in result:
albumart_exact = result['rb:coverArt-uri'].encode('utf8')
# bug in rhythmbox 0.11.6 - returns uri, but not properly encoded,
# but it's enough to remove the file:// prefix
albumart_exact = albumart_exact.replace('file://', '', 1)
if gtk.gtk_version >= (2, 18):
albumart_exact = unquote(albumart_exact)
# Sanity check if encoding and unquoting did work
if os.path.isfile(albumart_exact):
ret_dict['album-art'] = albumart_exact
return ret_dict
else:
print "awnmediaplayers: Unquoting error:\n%s\ndoes not match\n%s" % (result['rb:coverArt-uri'], albumart_exact)
# perhaps it's in the cache folder
if 'album' in result and 'artist' in result:
cache_dir = os.path.expanduser("~/.cache/rhythmbox/covers")
cache_file = '%s/%s - %s.jpg' % (cache_dir, result['artist'], result['album'])
if os.path.isfile(cache_file):
ret_dict['album-art'] = cache_file
return ret_dict
# The following is based on code from Dockmanager
# Copyright (C) 2009-2010 Jason Smith, Rico Tzschichholz, Robert Dyer
# Look in song folder
filename = playinguri.encode('utf8').replace('file://', '', 1)
if gtk.gtk_version >= (2, 18):
filename = unquote(filename)
coverdir = os.path.dirname(filename)
if os.path.isdir(coverdir):
covernames = ["cover", "album", "albumart", ".folder", "folder"]
extensions = [".jpg", ".jpeg", ".png"]
for f in os.listdir(coverdir):
for ext in extensions:
if f.lower().endswith(ext):
for name in covernames:
if f.lower() == (name + ext):
ret_dict['album-art'] = os.path.join(coverdir, f)
return ret_dict
else:
print "awnmediaplayers: Unquoting error:\n%s (file)\ndoes not match\n%s (directory)" % (playinguri, coverdir)
# Look for image in tags
if art_icon_from_tag and 'mimetype' in result:
image_data = None
if result['mimetype'] == "application/x-id3":
try:
f = ID3(filename)
apicframes = f.getall("APIC")
if len(apicframes) >= 1:
frame = apicframes[0]
image_data = frame.data
except:
pass
elif result['mimetype'] == "audio/x-aac":
try:
f = mutagen.mp4.MP4(filename)
if "covr" in f.tags:
covertag = f.tags["covr"][0]
image_data = covertag
except:
pass
if image_data:
try:
loader = gtk.gdk.PixbufLoader()
loader.write(image_data)
loader.close()
loader.get_pixbuf().save(album_art_file, "png", {})
ret_dict['album-art'] = album_art_file
except:
pass
return ret_dict
def is_playing(self):
self.dbus_driver()
return self._is_playing
def previous(self):
self.player.previous()
def play_pause(self):
self.player.playPause(1)
def next(self):
self.player.next()
def play_uri(self, uri):
# unfortunatelly this only works for items present in media library
self.rbShell.loadURI(uri, True)
return True
def enqueue_uris(self, uris):
# unfortunatelly this only works for items present in media library
for uri in uris:
self.rbShell.addToQueue(uri)
return True
class Exaile(GenericPlayer):
"""Full Support for the Exaile media player
No signals as of Exaile 0.2.11
Issues exist with play. It stops the player when pushed. Need further dbus info.
"""
def __init__(self):
GenericPlayer.__init__(self, 'org.exaile.DBusInterface')
def dbus_driver(self):
"""
Defining the dbus location for
"""
bus_obj = dbus.SessionBus().get_object('org.freedesktop.DBus', '/org/freedesktop/DBus')
if bus_obj.NameHasOwner('org.exaile.DBusInterface') == True:
self.session_bus = dbus.SessionBus()
self.proxy_obj = self.session_bus.get_object('org.exaile.DBusInterface', '/DBusInterfaceObject')
self.player = dbus.Interface(self.proxy_obj, "org.exaile.DBusInterface")
def get_media_info(self):
self.dbus_driver()
# Currently Playing Title
result = {}
result['title'] = self.player.get_title()
result['artist'] = self.player.get_artist()
result['album'] = self.player.get_album()
result['album-art'] = self.player.get_cover_path()
return result
def previous(self):
self.player.prev_track()
def play_pause(self):
self.player.play_pause()
def next(self):
self.player.next_track()
def play_uri(self, uri):
self.player.play_file(uri)
return True
def enqueue_uris(self, uris):
for uri in uris:
self.player.play_file(uri)
return True
class Banshee(GenericPlayer):
"""Full Support for the banshee media player
No signals as of Banshee 0.13.2
"""
def __init__(self):
GenericPlayer.__init__(self, 'org.gnome.Banshee')
def dbus_driver(self):
"""
Defining the dbus location for
"""
bus_obj = dbus.SessionBus().get_object('org.freedesktop.DBus', '/org/freedesktop/DBus')
if bus_obj.NameHasOwner('org.gnome.Banshee') == True:
self.session_bus = dbus.SessionBus()
self.proxy_obj = self.session_bus.get_object('org.gnome.Banshee', "/org/gnome/Banshee/Player")
self.player = dbus.Interface(self.proxy_obj, "org.gnome.Banshee.Core")
def get_media_info(self):
self.dbus_driver()
# Currently Playing Title
result = {}
result['title'] = self.player.GetPlayingTitle()
result['artist'] = self.player.GetPlayingArtist()
#result['album'] = self.player.GetPlayingAlbum() #FIXME: does it exist?
result['album-art'] = self.player.GetPlayingCoverUri()
return result
def previous(self):
self.player.Previous()
def play_pause(self):
self.player.TogglePlaying()
def next(self):
self.player.Next()
def play_uri(self, uri):
self.player.EnqueueFiles([uri])
return True
def enqueue_uris(self, uris):
self.player.EnqueueFiles(uris)
return True
class BansheeOne(GenericPlayer):
"""Partial support for the banshee media player"""
def __init__(self):
GenericPlayer.__init__(self, 'org.bansheeproject.Banshee')
self.signalling_supported = True
self._is_playing = False
def dbus_driver(self):
"""
Defining the dbus location for Banshee
"""
self._is_playing = False
bus_obj = dbus.SessionBus().get_object('org.freedesktop.DBus', '/org/freedesktop/DBus')
if bus_obj.NameHasOwner('org.bansheeproject.Banshee') == True:
self.session_bus = dbus.SessionBus()
self.proxy_obj = self.session_bus.get_object('org.bansheeproject.Banshee', "/org/bansheeproject/Banshee/PlayerEngine")
self.proxy_obj1 = self.session_bus.get_object('org.bansheeproject.Banshee', "/org/bansheeproject/Banshee/PlaybackController")
self.player = dbus.Interface(self.proxy_obj, "org.bansheeproject.Banshee.PlayerEngine")
self.player1 = dbus.Interface(self.proxy_obj1, "org.bansheeproject.Banshee.PlaybackController")
self.player.connect_to_signal('EventChanged', self.event_changed, member_keyword='member')
self._is_playing = self.player.GetCurrentState() == 'playing'
def event_changed(self, *args, **kwargs):
self.song_changed_emitter()
playing = False
try:
# careful for dbus exceptions
playing = self.player.GetCurrentState() == 'playing'
except:
pass
if (playing != self._is_playing):
self.playing_changed_emitter(playing)
self._is_playing = playing
def get_media_info(self):
self.dbus_driver()
result = {}
self.albumart_general = os.environ['HOME'] + "/.cache/media-art/"
self.albumart_general2 = os.environ['HOME'] + "/.cache/album-art/"
# Currently Playing Title
info = self.player.GetCurrentTrack()
if 'name' in info.keys():
result['title'] = str(info['name'])
else:
result['title'] = ''
if 'artist' in info.keys():
result['artist'] = str(info['artist'])
if 'album' in info.keys():
result['album'] = str(info['album'])
if 'artwork-id' in info:
result['album-art'] = '%s.jpg' % (self.albumart_general + info['artwork-id'])
if not os.path.isfile(result['album-art']):
result['album-art'] = '%s.jpg' % (self.albumart_general2 + info['artwork-id'])
elif 'album' in info and 'artist' in result:
albumart_exact = '%s-%s.jpg' % (self.albumart_general + result['artist'], info['album'])
result['album-art'] = albumart_exact.replace(' ', '').lower()
return result
def is_playing(self):
self.dbus_driver()
return self._is_playing
def previous(self):
self.player1.Previous(False)
def play_pause(self):
self.player.TogglePlaying()
def next(self):
self.player1.Next(False)
class Listen(GenericPlayer):
"""Partial Support"""
def __init__(self):
GenericPlayer.__init__(self, 'org.gnome.Listen')
def dbus_driver(self):
"""
Defining the dbus location for
"""
bus_obj = dbus.SessionBus().get_object('org.freedesktop.DBus', '/org/freedesktop/DBus')
if bus_obj.NameHasOwner('org.gnome.Listen') == True:
self.session_bus = dbus.SessionBus()
self.proxy_obj = self.session_bus.get_object('org.gnome.Listen', "/org/gnome/listen")
self.player = dbus.Interface(self.proxy_obj, "org.gnome.Listen")
def get_media_info(self):
self.dbus_driver()
result = {}
# Currently Playing Title
try:
# Version => 0.6
result['title'] = self.player.get_title()
if result['title'] == None: # if paused
result['title'] = ''
result['artist'] = self.player.get_artist()
result['album'] = self.player.get_album()
result['album-art'] = self.player.get_cover_path()
except:
# Version == 0.5
# A single string of this pattern: Title - (Album - Artist)
# Streaming media can have less fields
result['title'] = self.player.current_playing().split(" - ", 3)[0]
try:
result['album'] = self.player.current_playing().split(" - ", 3)[1][1:]
result['artist'] = self.player.current_playing().split(" - ", 3)[2][:-1]
result['album-art'] = os.environ['HOME'] + "/.listen/cover/" + result['artist'].lower() + "+" + result['album'].lower() + ".jpg"
except IndexError:
pass
return result
def previous(self):
self.player.previous()
def play_pause(self):
self.player.play_pause()
def next(self):
self.player.next()
def play_uri(self, uri):
self.player.play([uri])
return True
def enqueue_uris(self, uris):
self.player.enqueue(uris)
return True
class QuodLibet(GenericPlayer):
"""Full Support with signals""" # (but not implemented yet)
def __init__(self):
GenericPlayer.__init__(self, 'net.sacredchao.QuodLibet')
def dbus_driver(self):
"""
Defining the dbus location for
"""
bus_obj = dbus.SessionBus().get_object('org.freedesktop.DBus', '/org/freedesktop/DBus')
if bus_obj.NameHasOwner('net.sacredchao.QuodLibet') == True:
self.session_bus = dbus.SessionBus()
self.proxy_obj = self.session_bus.get_object('net.sacredchao.QuodLibet', '/net/sacredchao/QuodLibet')
self.player = dbus.Interface(self.proxy_obj, 'net.sacredchao.QuodLibet')
def get_media_info(self):
# You need to activate the "Picture Saver" plugin in QuodLibet
albumart_exact = os.environ["HOME"] + "/.quodlibet/current.cover"
self.dbus_driver()
# Currently Playing Title
result = self.player.CurrentSong()
result['album-art'] = albumart_exact
return albumart_exact, markup, tooltip
def previous(self):
self.player.Previous()
def play_pause(self):
self.player.PlayPause()
def next(self):
self.player.Next()
class Songbird(MPRISPlayer):
"""Discontinued in 2010"""
def __init__(self):
MPRISPlayer.__init__(self, 'org.mpris.songbird')
class VLC(MPRISPlayer):
def __init__(self):
MPRISPlayer.__init__(self, 'org.mpris.vlc')
def is_available(self):
return player_available('vlc')
def start(self):
return launch_player(['vlc', '--control', 'dbus'])
class Audacious(MPRISPlayer):
def __init__(self):
MPRISPlayer.__init__(self, 'org.mpris.audacious')
def is_available(self):
return player_available('audacious')
def start(self):
return launch_player('audacious')
class BMP(MPRISPlayer):
"""Beep Media Player, discontinued"""
def __init__(self):
MPRISPlayer.__init__(self, 'org.mpris.bmp')
class XMMS2(MPRISPlayer):
def __init__(self):
MPRISPlayer.__init__(self, 'org.mpris.xmms2')
class Amarok(MPRISPlayer):
"""Amarok 2.0 +"""
def __init__(self):
MPRISPlayer.__init__(self, 'org.mpris.amarok')
def is_available(self):
return player_available('amarok')
def start(self):
return launch_player('amarok')
class Aeon(MPRISPlayer):
"""Discontinued"""
def __init__(self):
MPRISPlayer.__init__(self, 'org.mpris.aeon')
class DragonPlayer(MPRISPlayer):
""" FIXME: Doesn't work: bus path is org.mpris.dragonplayer-XXXXX """
def __init__(self):
MPRISPlayer.__init__(self, 'org.mpris.dragonplayer')
class mpDris(MPRISPlayer):
""" mpDris is an implementation of the XMMS2 media player interface MPRIS as a client for MPD. """
def __init__(self):
MPRISPlayer.__init__(self, 'org.freedesktop.MediaPlayer')
class Clementine(MPRISPlayer):
def __init__(self):
MPRISPlayer.__init__(self, 'org.mpris.clementine')
def is_available(self):
return player_available('clementine')
def start(self):
return launch_player('clementine')
def previous(self):
self.player.Prev()
# We have to emit song changed signal ourselves (Clementine 0.5)
self.song_changed_emitter()
def next(self):
self.player.Next()
# We have to emit song changed signal ourselves (Clementine 0.5)
self.song_changed_emitter()
class Guayadeque(MPRISPlayer):
def __init__(self):
MPRISPlayer.__init__(self, 'org.mpris.guayadeque')
def dbus_driver(self):
bus_obj = dbus.SessionBus().get_object('org.freedesktop.DBus', '/org/freedesktop/DBus')
if bus_obj.NameHasOwner(self.dbus_base_name) == True:
self.session_bus = dbus.SessionBus()
self.proxy_obj = self.session_bus.get_object(self.dbus_base_name, '/Player')
self.player = dbus.Interface(self.proxy_obj, 'org.freedesktop.MediaPlayer')
self.player.connect_to_signal('TrackChange', self.song_changed_emitter, member_keyword='member')
self.player.connect_to_signal('StatusChange', self.playing_changed_emitter)
def is_available(self):
return player_available('guayadeque')
def start(self):
return launch_player('guayadeque')
| p12tic/awn-extras | shared/python/awnmediaplayers.py | Python | gpl-2.0 | 28,094 | 0.003132 |
# -*- coding: utf-8 -*-
from feedz.processors.content_filter import ContentFilterProcessor
| indexofire/gork | src/gork/application/feedz/processors/__init__.py | Python | mit | 91 | 0 |
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rds_param_group
version_added: "1.5"
short_description: manage RDS parameter groups
description:
- Creates, modifies, and deletes RDS parameter groups. This module has a dependency on python-boto >= 2.5.
options:
state:
description:
- Specifies whether the group should be present or absent.
required: true
default: present
aliases: []
choices: [ 'present' , 'absent' ]
name:
description:
- Database parameter group identifier.
required: true
default: null
aliases: []
description:
description:
- Database parameter group description. Only set when a new group is added.
required: false
default: null
aliases: []
engine:
description:
- The type of database for this group. Required for state=present.
required: false
default: null
aliases: []
choices: [ 'aurora5.6', 'mariadb10.0', 'mysql5.1', 'mysql5.5', 'mysql5.6', 'mysql5.7', 'oracle-ee-11.2', 'oracle-ee-12.1', 'oracle-se-11.2', 'oracle-se-12.1', 'oracle-se1-11.2', 'oracle-se1-12.1', 'postgres9.3', 'postgres9.4', 'postgres9.5', sqlserver-ee-10.5', 'sqlserver-ee-11.0', 'sqlserver-ex-10.5', 'sqlserver-ex-11.0', 'sqlserver-ex-12.0', 'sqlserver-se-10.5', 'sqlserver-se-11.0', 'sqlserver-se-12.0', 'sqlserver-web-10.5', 'sqlserver-web-11.0', 'sqlserver-web-12.0' ]
immediate:
description:
- Whether to apply the changes immediately, or after the next reboot of any associated instances.
required: false
default: null
aliases: []
params:
description:
- Map of parameter names and values. Numeric values may be represented as K for kilo (1024), M for mega (1024^2), G for giga (1024^3), or T for tera (1024^4), and these values will be expanded into the appropriate number before being set in the parameter group.
required: false
default: null
aliases: []
author: "Scott Anderson (@tastychutney)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Add or change a parameter group, in this case setting auto_increment_increment to 42 * 1024
- rds_param_group:
state: present
name: norwegian_blue
description: 'My Fancy Ex Parrot Group'
engine: 'mysql5.6'
params:
auto_increment_increment: "42K"
# Remove a parameter group
- rds_param_group:
state: absent
name: norwegian_blue
'''
VALID_ENGINES = [
'aurora5.6',
'mariadb10.0',
'mysql5.1',
'mysql5.5',
'mysql5.6',
'mysql5.7',
'oracle-ee-11.2',
'oracle-ee-12.1',
'oracle-se-11.2',
'oracle-se-12.1',
'oracle-se1-11.2',
'oracle-se1-12.1',
'postgres9.3',
'postgres9.4',
'postgres9.5',
'sqlserver-ee-10.5',
'sqlserver-ee-11.0',
'sqlserver-ex-10.5',
'sqlserver-ex-11.0',
'sqlserver-ex-12.0',
'sqlserver-se-10.5',
'sqlserver-se-11.0',
'sqlserver-se-12.0',
'sqlserver-web-10.5',
'sqlserver-web-11.0',
'sqlserver-web-12.0',
]
try:
import boto.rds
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# returns a tuple: (whether or not a parameter was changed, the remaining parameters that weren't found in this parameter group)
class NotModifiableError(Exception):
def __init__(self, error_message, *args):
super(NotModifiableError, self).__init__(error_message, *args)
self.error_message = error_message
def __repr__(self):
return 'NotModifiableError: %s' % self.error_message
def __str__(self):
return 'NotModifiableError: %s' % self.error_message
INT_MODIFIERS = {
'K': 1024,
'M': pow(1024, 2),
'G': pow(1024, 3),
'T': pow(1024, 4),
}
TRUE_VALUES = ('on', 'true', 'yes', '1',)
def set_parameter(param, value, immediate):
"""
Allows setting parameters with 10M = 10* 1024 * 1024 and so on.
"""
converted_value = value
if param.type == 'string':
converted_value = str(value)
elif param.type == 'integer':
if isinstance(value, basestring):
try:
for modifier in INT_MODIFIERS.keys():
if value.endswith(modifier):
converted_value = int(value[:-1]) * INT_MODIFIERS[modifier]
converted_value = int(converted_value)
except ValueError:
# may be based on a variable (ie. {foo*3/4}) so
# just pass it on through to boto
converted_value = str(value)
elif type(value) == bool:
converted_value = 1 if value else 0
else:
converted_value = int(value)
elif param.type == 'boolean':
if isinstance(value, basestring):
converted_value = value in TRUE_VALUES
else:
converted_value = bool(value)
param.value = converted_value
param.apply(immediate)
def modify_group(group, params, immediate=False):
""" Set all of the params in a group to the provided new params. Raises NotModifiableError if any of the
params to be changed are read only.
"""
changed = {}
new_params = dict(params)
for key in new_params.keys():
if key in group:
param = group[key]
new_value = new_params[key]
try:
old_value = param.value
except ValueError:
# some versions of boto have problems with retrieving
# integer values from params that may have their value
# based on a variable (ie. {foo*3/4}), so grab it in a
# way that bypasses the property functions
old_value = param._value
if old_value != new_value:
if not param.is_modifiable:
raise NotModifiableError('Parameter %s is not modifiable.' % key)
changed[key] = {'old': old_value, 'new': new_value}
set_parameter(param, new_value, immediate)
del new_params[key]
return changed, new_params
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state = dict(required=True, choices=['present', 'absent']),
name = dict(required=True),
engine = dict(required=False, choices=VALID_ENGINES),
description = dict(required=False),
params = dict(required=False, aliases=['parameters'], type='dict'),
immediate = dict(required=False, type='bool'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
group_name = module.params.get('name').lower()
group_engine = module.params.get('engine')
group_description = module.params.get('description')
group_params = module.params.get('params') or {}
immediate = module.params.get('immediate') or False
if state == 'present':
for required in ['name', 'description', 'engine']:
if not module.params.get(required):
module.fail_json(msg = str("Parameter %s required for state='present'" % required))
else:
for not_allowed in ['description', 'engine', 'params']:
if module.params.get(not_allowed):
module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed))
# Retrieve any AWS settings from the environment.
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
if not region:
module.fail_json(msg = str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
try:
conn = connect_to_aws(boto.rds, region, **aws_connect_kwargs)
except boto.exception.BotoServerError as e:
module.fail_json(msg = e.error_message)
group_was_added = False
try:
changed = False
try:
all_groups = conn.get_all_dbparameter_groups(group_name, max_records=100)
exists = len(all_groups) > 0
except BotoServerError as e:
if e.error_code != 'DBParameterGroupNotFound':
module.fail_json(msg = e.error_message)
exists = False
if state == 'absent':
if exists:
conn.delete_parameter_group(group_name)
changed = True
else:
changed = {}
if not exists:
new_group = conn.create_parameter_group(group_name, engine=group_engine, description=group_description)
group_was_added = True
# If a "Marker" is present, this group has more attributes remaining to check. Get the next batch, but only
# if there are parameters left to set.
marker = None
while len(group_params):
next_group = conn.get_all_dbparameters(group_name, marker=marker)
changed_params, group_params = modify_group(next_group, group_params, immediate)
changed.update(changed_params)
if hasattr(next_group, 'Marker'):
marker = next_group.Marker
else:
break
except BotoServerError as e:
module.fail_json(msg = e.error_message)
except NotModifiableError as e:
msg = e.error_message
if group_was_added:
msg = '%s The group "%s" was added first.' % (msg, group_name)
module.fail_json(msg=msg)
module.exit_json(changed=changed)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| cchurch/ansible-modules-core | cloud/amazon/rds_param_group.py | Python | gpl-3.0 | 10,548 | 0.005973 |
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from mock import Mock, call
from os.path import dirname, join
from requests import HTTPError
from requests_mock import ANY, mock as requests_mock
from unittest import TestCase
from octodns.record import Record
from octodns.provider.dnsimple import DnsimpleClientNotFound, DnsimpleProvider
from octodns.provider.yaml import YamlProvider
from octodns.zone import Zone
class TestDnsimpleProvider(TestCase):
expected = Zone('unit.tests.', [])
source = YamlProvider('test', join(dirname(__file__), 'config'))
source.populate(expected)
# Our test suite differs a bit, add our NS and remove the simple one
expected.add_record(Record.new(expected, 'under', {
'ttl': 3600,
'type': 'NS',
'values': [
'ns1.unit.tests.',
'ns2.unit.tests.',
]
}))
for record in list(expected.records):
if record.name == 'sub' and record._type == 'NS':
expected._remove_record(record)
break
def test_populate(self):
provider = DnsimpleProvider('test', 'token', 42)
# Bad auth
with requests_mock() as mock:
mock.get(ANY, status_code=401,
text='{"message": "Authentication failed"}')
with self.assertRaises(Exception) as ctx:
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals('Unauthorized', ctx.exception.message)
# General error
with requests_mock() as mock:
mock.get(ANY, status_code=502, text='Things caught fire')
with self.assertRaises(HTTPError) as ctx:
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(502, ctx.exception.response.status_code)
# Non-existant zone doesn't populate anything
with requests_mock() as mock:
mock.get(ANY, status_code=404,
text='{"message": "Domain `foo.bar` not found"}')
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(set(), zone.records)
# No diffs == no changes
with requests_mock() as mock:
base = 'https://api.dnsimple.com/v2/42/zones/unit.tests/' \
'records?page='
with open('tests/fixtures/dnsimple-page-1.json') as fh:
mock.get('{}{}'.format(base, 1), text=fh.read())
with open('tests/fixtures/dnsimple-page-2.json') as fh:
mock.get('{}{}'.format(base, 2), text=fh.read())
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(14, len(zone.records))
changes = self.expected.changes(zone, provider)
self.assertEquals(0, len(changes))
# 2nd populate makes no network calls/all from cache
again = Zone('unit.tests.', [])
provider.populate(again)
self.assertEquals(14, len(again.records))
# bust the cache
del provider._zone_records[zone.name]
# test handling of invalid content
with requests_mock() as mock:
with open('tests/fixtures/dnsimple-invalid-content.json') as fh:
mock.get(ANY, text=fh.read())
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(set([
Record.new(zone, '', {
'ttl': 3600,
'type': 'SSHFP',
'values': []
}),
Record.new(zone, '_srv._tcp', {
'ttl': 600,
'type': 'SRV',
'values': []
}),
Record.new(zone, 'naptr', {
'ttl': 600,
'type': 'NAPTR',
'values': []
}),
]), zone.records)
def test_apply(self):
provider = DnsimpleProvider('test', 'token', 42)
resp = Mock()
resp.json = Mock()
provider._client._request = Mock(return_value=resp)
# non-existant domain, create everything
resp.json.side_effect = [
DnsimpleClientNotFound, # no zone in populate
DnsimpleClientNotFound, # no domain during apply
]
plan = provider.plan(self.expected)
# No root NS, no ignored
n = len(self.expected.records) - 2
self.assertEquals(n, len(plan.changes))
self.assertEquals(n, provider.apply(plan))
provider._client._request.assert_has_calls([
# created the domain
call('POST', '/domains', data={'name': 'unit.tests'}),
# created at least one of the record with expected data
call('POST', '/zones/unit.tests/records', data={
'content': '20 30 foo-1.unit.tests.',
'priority': 10,
'type': 'SRV',
'name': '_srv._tcp',
'ttl': 600
}),
])
# expected number of total calls
self.assertEquals(26, provider._client._request.call_count)
provider._client._request.reset_mock()
# delete 1 and update 1
provider._client.records = Mock(return_value=[
{
'id': 11189897,
'name': 'www',
'content': '1.2.3.4',
'ttl': 300,
'type': 'A',
},
{
'id': 11189898,
'name': 'www',
'content': '2.2.3.4',
'ttl': 300,
'type': 'A',
},
{
'id': 11189899,
'name': 'ttl',
'content': '3.2.3.4',
'ttl': 600,
'type': 'A',
}
])
# Domain exists, we don't care about return
resp.json.side_effect = ['{}']
wanted = Zone('unit.tests.', [])
wanted.add_record(Record.new(wanted, 'ttl', {
'ttl': 300,
'type': 'A',
'value': '3.2.3.4'
}))
plan = provider.plan(wanted)
self.assertEquals(2, len(plan.changes))
self.assertEquals(2, provider.apply(plan))
# recreate for update, and deletes for the 2 parts of the other
provider._client._request.assert_has_calls([
call('POST', '/zones/unit.tests/records', data={
'content': '3.2.3.4',
'type': 'A',
'name': 'ttl',
'ttl': 300
}),
call('DELETE', '/zones/unit.tests/records/11189899'),
call('DELETE', '/zones/unit.tests/records/11189897'),
call('DELETE', '/zones/unit.tests/records/11189898')
], any_order=True)
| h-hwang/octodns | tests/test_octodns_provider_dnsimple.py | Python | mit | 6,915 | 0 |
from django.test import TestCase
class PublicTest(TestCase):
fixtures = ['main/fixtures/arches.json', 'main/fixtures/repos.json',
'main/fixtures/package.json', 'main/fixtures/groups.json',
'devel/fixtures/staff_groups.json']
def test_index(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_about(self):
response = self.client.get('/about/')
self.assertEqual(response.status_code, 200)
def test_art(self):
response = self.client.get('/art/')
self.assertEqual(response.status_code, 200)
def test_svn(self):
response = self.client.get('/svn/')
self.assertEqual(response.status_code, 200)
def test_donate(self):
response = self.client.get('/donate/')
self.assertEqual(response.status_code, 200)
def test_download(self):
response = self.client.get('/download/')
self.assertEqual(response.status_code, 200)
def test_master_keys(self):
response = self.client.get('/master-keys/')
self.assertEqual(response.status_code, 200)
def test_master_keys_json(self):
response = self.client.get('/master-keys/json/')
self.assertEqual(response.status_code, 200)
def test_feeds(self):
response = self.client.get('/feeds/')
self.assertEqual(response.status_code, 200)
def test_people(self):
response = self.client.get('/people/developers/')
self.assertEqual(response.status_code, 200)
def test_sitemap(self):
sitemaps = ['sitemap', 'sitemap-base']
for sitemap in sitemaps:
response = self.client.get('/{}.xml'.format(sitemap))
self.assertEqual(response.status_code, 200)
| archlinux/archweb | public/tests.py | Python | gpl-2.0 | 1,786 | 0.00056 |
"""
The DoInterestManager keeps track of which parent/zones that we currently
have interest in. When you want to "look" into a zone you add an interest
to that zone. When you want to get rid of, or ignore, the objects in that
zone, remove interest in that zone.
p.s. A great deal of this code is just code moved from ClientRepository.py.
"""
from panda3d.core import *
from panda3d.direct import *
from .MsgTypes import *
from direct.showbase.PythonUtil import *
from direct.showbase import DirectObject
from .PyDatagram import PyDatagram
from direct.directnotify.DirectNotifyGlobal import directNotify
import types
from direct.showbase.PythonUtil import report
class InterestState:
StateActive = 'Active'
StatePendingDel = 'PendingDel'
def __init__(self, desc, state, context, event, parentId, zoneIdList,
eventCounter, auto=False):
self.desc = desc
self.state = state
self.context = context
# We must be ready to keep track of multiple events. If somebody
# requested an interest to be removed and we get a second request
# for removal of the same interest before we get a response for the
# first interest removal, we now have two parts of the codebase
# waiting for a response on the removal of a single interest.
self.events = []
self.eventCounter = eventCounter
if event:
self.addEvent(event)
self.parentId = parentId
self.zoneIdList = zoneIdList
self.auto = auto
def addEvent(self, event):
self.events.append(event)
self.eventCounter.num += 1
def getEvents(self):
return list(self.events)
def clearEvents(self):
self.eventCounter.num -= len(self.events)
assert self.eventCounter.num >= 0
self.events = []
def sendEvents(self):
for event in self.events:
messenger.send(event)
self.clearEvents()
def setDesc(self, desc):
self.desc = desc
def isPendingDelete(self):
return self.state == InterestState.StatePendingDel
def __repr__(self):
return 'InterestState(desc=%s, state=%s, context=%s, event=%s, parentId=%s, zoneIdList=%s)' % (
self.desc, self.state, self.context, self.events, self.parentId, self.zoneIdList)
class InterestHandle:
"""This class helps to ensure that valid handles get passed in to DoInterestManager funcs"""
def __init__(self, id):
self._id = id
def asInt(self):
return self._id
def __eq__(self, other):
if type(self) == type(other):
return self._id == other._id
return self._id == other
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self._id)
# context value for interest changes that have no complete event
NO_CONTEXT = 0
class DoInterestManager(DirectObject.DirectObject):
"""
Top level Interest Manager
"""
notify = directNotify.newCategory("DoInterestManager")
InterestDebug = ConfigVariableBool('interest-debug', False)
# 'handle' is a number that represents a single interest set that the
# client has requested; the interest set may be modified
_HandleSerialNum = 0
# high bit is reserved for server interests
_HandleMask = 0x7FFF
# 'context' refers to a single request to change an interest set
_ContextIdSerialNum = 100
_ContextIdMask = 0x3FFFFFFF # avoid making Python create a long
_interests = {}
if __debug__:
_debug_interestHistory = []
_debug_maxDescriptionLen = 40
_SerialGen = SerialNumGen()
_SerialNum = serialNum()
def __init__(self):
assert DoInterestManager.notify.debugCall()
DirectObject.DirectObject.__init__(self)
self._addInterestEvent = uniqueName('DoInterestManager-Add')
self._removeInterestEvent = uniqueName('DoInterestManager-Remove')
self._noNewInterests = False
self._completeDelayedCallback = None
# keep track of request contexts that have not completed
self._completeEventCount = ScratchPad(num=0)
self._allInterestsCompleteCallbacks = []
def __verbose(self):
return self.InterestDebug.getValue() or self.getVerbose()
def _getAnonymousEvent(self, desc):
return 'anonymous-%s-%s' % (desc, DoInterestManager._SerialGen.next())
def setNoNewInterests(self, flag):
self._noNewInterests = flag
def noNewInterests(self):
return self._noNewInterests
def setAllInterestsCompleteCallback(self, callback):
if ((self._completeEventCount.num == 0) and
(self._completeDelayedCallback is None)):
callback()
else:
self._allInterestsCompleteCallbacks.append(callback)
def getAllInterestsCompleteEvent(self):
return 'allInterestsComplete-%s' % DoInterestManager._SerialNum
def resetInterestStateForConnectionLoss(self):
DoInterestManager._interests.clear()
self._completeEventCount = ScratchPad(num=0)
if __debug__:
self._addDebugInterestHistory("RESET", "", 0, 0, 0, [])
def isValidInterestHandle(self, handle):
# pass in a handle (or anything else) and this will return true if it is
# still a valid interest handle
if not isinstance(handle, InterestHandle):
return False
return handle.asInt() in DoInterestManager._interests
def updateInterestDescription(self, handle, desc):
iState = DoInterestManager._interests.get(handle.asInt())
if iState:
iState.setDesc(desc)
def addInterest(self, parentId, zoneIdList, description, event=None):
"""
Look into a (set of) zone(s).
"""
assert DoInterestManager.notify.debugCall()
handle = self._getNextHandle()
# print 'base.cr.addInterest(',description,',',handle,'):',globalClock.getFrameCount()
if self._noNewInterests:
DoInterestManager.notify.warning(
"addInterest: addingInterests on delete: %s" % (handle))
return
# make sure we've got parenting rules set in the DC
if parentId not in (self.getGameDoId(),):
parent = self.getDo(parentId)
if not parent:
DoInterestManager.notify.error(
'addInterest: attempting to add interest under unknown object %s' % parentId)
else:
if not parent.hasParentingRules():
DoInterestManager.notify.error(
'addInterest: no setParentingRules defined in the DC for object %s (%s)'
'' % (parentId, parent.__class__.__name__))
if event:
contextId = self._getNextContextId()
else:
contextId = 0
# event = self._getAnonymousEvent('addInterest')
DoInterestManager._interests[handle] = InterestState(
description, InterestState.StateActive, contextId, event, parentId, zoneIdList, self._completeEventCount)
if self.__verbose():
print('CR::INTEREST.addInterest(handle=%s, parentId=%s, zoneIdList=%s, description=%s, event=%s)' % (
handle, parentId, zoneIdList, description, event))
self._sendAddInterest(handle, contextId, parentId, zoneIdList, description)
if event:
messenger.send(self._getAddInterestEvent(), [event])
assert self.printInterestsIfDebug()
return InterestHandle(handle)
def addAutoInterest(self, parentId, zoneIdList, description):
"""
Look into a (set of) zone(s).
"""
assert DoInterestManager.notify.debugCall()
handle = self._getNextHandle()
if self._noNewInterests:
DoInterestManager.notify.warning(
"addInterest: addingInterests on delete: %s" % (handle))
return
# make sure we've got parenting rules set in the DC
if parentId not in (self.getGameDoId(),):
parent = self.getDo(parentId)
if not parent:
DoInterestManager.notify.error(
'addInterest: attempting to add interest under unknown object %s' % parentId)
else:
if not parent.hasParentingRules():
DoInterestManager.notify.error(
'addInterest: no setParentingRules defined in the DC for object %s (%s)'
'' % (parentId, parent.__class__.__name__))
DoInterestManager._interests[handle] = InterestState(
description, InterestState.StateActive, 0, None, parentId, zoneIdList, self._completeEventCount, True)
if self.__verbose():
print('CR::INTEREST.addInterest(handle=%s, parentId=%s, zoneIdList=%s, description=%s)' % (
handle, parentId, zoneIdList, description))
assert self.printInterestsIfDebug()
return InterestHandle(handle)
def removeInterest(self, handle, event = None):
"""
Stop looking in a (set of) zone(s)
"""
# print 'base.cr.removeInterest(',handle,'):',globalClock.getFrameCount()
assert DoInterestManager.notify.debugCall()
assert isinstance(handle, InterestHandle)
existed = False
if not event:
event = self._getAnonymousEvent('removeInterest')
handle = handle.asInt()
if handle in DoInterestManager._interests:
existed = True
intState = DoInterestManager._interests[handle]
if event:
messenger.send(self._getRemoveInterestEvent(),
[event, intState.parentId, intState.zoneIdList])
if intState.isPendingDelete():
self.notify.warning(
'removeInterest: interest %s already pending removal' %
handle)
# this interest is already pending delete, so let's just tack this
# callback onto the list
if event is not None:
intState.addEvent(event)
else:
if len(intState.events) > 0:
# we're not pending a removal, but we have outstanding events?
# probably we are waiting for an add/alter complete.
# should we send those events now?
assert self.notify.warning('removeInterest: abandoning events: %s' %
intState.events)
intState.clearEvents()
intState.state = InterestState.StatePendingDel
contextId = self._getNextContextId()
intState.context = contextId
if event:
intState.addEvent(event)
self._sendRemoveInterest(handle, contextId)
if not event:
self._considerRemoveInterest(handle)
if self.__verbose():
print('CR::INTEREST.removeInterest(handle=%s, event=%s)' % (
handle, event))
else:
DoInterestManager.notify.warning(
"removeInterest: handle not found: %s" % (handle))
assert self.printInterestsIfDebug()
return existed
def removeAutoInterest(self, handle):
"""
Stop looking in a (set of) zone(s)
"""
assert DoInterestManager.notify.debugCall()
assert isinstance(handle, InterestHandle)
existed = False
handle = handle.asInt()
if handle in DoInterestManager._interests:
existed = True
intState = DoInterestManager._interests[handle]
if intState.isPendingDelete():
self.notify.warning(
'removeInterest: interest %s already pending removal' %
handle)
# this interest is already pending delete, so let's just tack this
# callback onto the list
else:
if len(intState.events) > 0:
# we're not pending a removal, but we have outstanding events?
# probably we are waiting for an add/alter complete.
# should we send those events now?
self.notify.warning('removeInterest: abandoning events: %s' %
intState.events)
intState.clearEvents()
intState.state = InterestState.StatePendingDel
self._considerRemoveInterest(handle)
if self.__verbose():
print('CR::INTEREST.removeAutoInterest(handle=%s)' % (handle))
else:
DoInterestManager.notify.warning(
"removeInterest: handle not found: %s" % (handle))
assert self.printInterestsIfDebug()
return existed
@report(types = ['args'], dConfigParam = 'guildmgr')
def removeAIInterest(self, handle):
"""
handle is NOT an InterestHandle. It's just a bare integer representing an
AI opened interest. We're making the client close down this interest since
the AI has trouble removing interests(that its opened) when the avatar goes
offline. See GuildManager(UD) for how it's being used.
"""
self._sendRemoveAIInterest(handle)
def alterInterest(self, handle, parentId, zoneIdList, description=None,
event=None):
"""
Removes old interests and adds new interests.
Note that when an interest is changed, only the most recent
change's event will be triggered. Previous events are abandoned.
If this is a problem, consider opening multiple interests.
"""
assert DoInterestManager.notify.debugCall()
assert isinstance(handle, InterestHandle)
#assert not self._noNewInterests
handle = handle.asInt()
if self._noNewInterests:
DoInterestManager.notify.warning(
"alterInterest: addingInterests on delete: %s" % (handle))
return
exists = False
if event is None:
event = self._getAnonymousEvent('alterInterest')
if handle in DoInterestManager._interests:
if description is not None:
DoInterestManager._interests[handle].desc = description
else:
description = DoInterestManager._interests[handle].desc
# are we overriding an existing change?
if DoInterestManager._interests[handle].context != NO_CONTEXT:
DoInterestManager._interests[handle].clearEvents()
contextId = self._getNextContextId()
DoInterestManager._interests[handle].context = contextId
DoInterestManager._interests[handle].parentId = parentId
DoInterestManager._interests[handle].zoneIdList = zoneIdList
DoInterestManager._interests[handle].addEvent(event)
if self.__verbose():
print('CR::INTEREST.alterInterest(handle=%s, parentId=%s, zoneIdList=%s, description=%s, event=%s)' % (
handle, parentId, zoneIdList, description, event))
self._sendAddInterest(handle, contextId, parentId, zoneIdList, description, action='modify')
exists = True
assert self.printInterestsIfDebug()
else:
DoInterestManager.notify.warning(
"alterInterest: handle not found: %s" % (handle))
return exists
def openAutoInterests(self, obj):
if hasattr(obj, '_autoInterestHandle'):
# must be multiple inheritance
self.notify.debug('openAutoInterests(%s): interests already open' % obj.__class__.__name__)
return
autoInterests = obj.getAutoInterests()
obj._autoInterestHandle = None
if not len(autoInterests):
return
obj._autoInterestHandle = self.addAutoInterest(obj.doId, autoInterests, '%s-autoInterest' % obj.__class__.__name__)
def closeAutoInterests(self, obj):
if not hasattr(obj, '_autoInterestHandle'):
# must be multiple inheritance
self.notify.debug('closeAutoInterests(%s): interests already closed' % obj)
return
if obj._autoInterestHandle is not None:
self.removeAutoInterest(obj._autoInterestHandle)
del obj._autoInterestHandle
# events for InterestWatcher
def _getAddInterestEvent(self):
return self._addInterestEvent
def _getRemoveInterestEvent(self):
return self._removeInterestEvent
def _getInterestState(self, handle):
return DoInterestManager._interests[handle]
def _getNextHandle(self):
handle = DoInterestManager._HandleSerialNum
while True:
handle = (handle + 1) & DoInterestManager._HandleMask
# skip handles that are already in use
if handle not in DoInterestManager._interests:
break
DoInterestManager.notify.warning(
'interest %s already in use' % handle)
DoInterestManager._HandleSerialNum = handle
return DoInterestManager._HandleSerialNum
def _getNextContextId(self):
contextId = DoInterestManager._ContextIdSerialNum
while True:
contextId = (contextId + 1) & DoInterestManager._ContextIdMask
# skip over the 'no context' id
if contextId != NO_CONTEXT:
break
DoInterestManager._ContextIdSerialNum = contextId
return DoInterestManager._ContextIdSerialNum
def _considerRemoveInterest(self, handle):
"""
Consider whether we should cull the interest set.
"""
assert DoInterestManager.notify.debugCall()
if handle in DoInterestManager._interests:
if DoInterestManager._interests[handle].isPendingDelete():
# make sure there is no pending event for this interest
if DoInterestManager._interests[handle].context == NO_CONTEXT:
assert len(DoInterestManager._interests[handle].events) == 0
del DoInterestManager._interests[handle]
if __debug__:
def printInterestsIfDebug(self):
if DoInterestManager.notify.getDebug():
self.printInterests()
return 1 # for assert
def _addDebugInterestHistory(self, action, description, handle,
contextId, parentId, zoneIdList):
if description is None:
description = ''
DoInterestManager._debug_interestHistory.append(
(action, description, handle, contextId, parentId, zoneIdList))
DoInterestManager._debug_maxDescriptionLen = max(
DoInterestManager._debug_maxDescriptionLen, len(description))
def printInterestHistory(self):
print("***************** Interest History *************")
format = '%9s %' + str(DoInterestManager._debug_maxDescriptionLen) + 's %6s %6s %9s %s'
print(format % (
"Action", "Description", "Handle", "Context", "ParentId",
"ZoneIdList"))
for i in DoInterestManager._debug_interestHistory:
print(format % tuple(i))
print("Note: interests with a Context of 0 do not get" \
" done/finished notices.")
def printInterestSets(self):
print("******************* Interest Sets **************")
format = '%6s %' + str(DoInterestManager._debug_maxDescriptionLen) + 's %11s %11s %8s %8s %8s'
print(format % (
"Handle", "Description",
"ParentId", "ZoneIdList",
"State", "Context",
"Event"))
for id, state in DoInterestManager._interests.items():
if len(state.events) == 0:
event = ''
elif len(state.events) == 1:
event = state.events[0]
else:
event = state.events
print(format % (id, state.desc,
state.parentId, state.zoneIdList,
state.state, state.context,
event))
print("************************************************")
def printInterests(self):
self.printInterestHistory()
self.printInterestSets()
def _sendAddInterest(self, handle, contextId, parentId, zoneIdList, description,
action=None):
"""
Part of the new otp-server code.
handle is a client-side created number that refers to
a set of interests. The same handle number doesn't
necessarily have any relationship to the same handle
on another client.
"""
assert DoInterestManager.notify.debugCall()
if __debug__:
if isinstance(zoneIdList, list):
zoneIdList.sort()
if action is None:
action = 'add'
self._addDebugInterestHistory(
action, description, handle, contextId, parentId, zoneIdList)
if parentId == 0:
DoInterestManager.notify.error(
'trying to set interest to invalid parent: %s' % parentId)
datagram = PyDatagram()
# Add message type
if isinstance(zoneIdList, list):
vzl = list(zoneIdList)
vzl.sort()
uniqueElements(vzl)
datagram.addUint16(CLIENT_ADD_INTEREST_MULTIPLE)
datagram.addUint32(contextId)
datagram.addUint16(handle)
datagram.addUint32(parentId)
datagram.addUint16(len(vzl))
for zone in vzl:
datagram.addUint32(zone)
else:
datagram.addUint16(CLIENT_ADD_INTEREST)
datagram.addUint32(contextId)
datagram.addUint16(handle)
datagram.addUint32(parentId)
datagram.addUint32(zoneIdList)
self.send(datagram)
def _sendRemoveInterest(self, handle, contextId):
"""
handle is a client-side created number that refers to
a set of interests. The same handle number doesn't
necessarily have any relationship to the same handle
on another client.
"""
assert DoInterestManager.notify.debugCall()
assert handle in DoInterestManager._interests
datagram = PyDatagram()
# Add message type
datagram.addUint16(CLIENT_REMOVE_INTEREST)
datagram.addUint32(contextId)
datagram.addUint16(handle)
self.send(datagram)
if __debug__:
state = DoInterestManager._interests[handle]
self._addDebugInterestHistory(
"remove", state.desc, handle, contextId,
state.parentId, state.zoneIdList)
def _sendRemoveAIInterest(self, handle):
"""
handle is a bare int, NOT an InterestHandle. Use this to
close an AI opened interest.
"""
datagram = PyDatagram()
# Add message type
datagram.addUint16(CLIENT_REMOVE_INTEREST)
datagram.addUint16((1<<15) + handle)
self.send(datagram)
def cleanupWaitAllInterestsComplete(self):
if self._completeDelayedCallback is not None:
self._completeDelayedCallback.destroy()
self._completeDelayedCallback = None
def queueAllInterestsCompleteEvent(self, frames=5):
# wait for N frames, if no new interests, send out all-done event
# calling this is OK even if there are no pending interest completes
def checkMoreInterests():
# if there are new interests, cancel this delayed callback, another
# will automatically be scheduled when all interests complete
# print 'checkMoreInterests(',self._completeEventCount.num,'):',globalClock.getFrameCount()
return self._completeEventCount.num > 0
def sendEvent():
messenger.send(self.getAllInterestsCompleteEvent())
for callback in self._allInterestsCompleteCallbacks:
callback()
self._allInterestsCompleteCallbacks = []
self.cleanupWaitAllInterestsComplete()
self._completeDelayedCallback = FrameDelayedCall(
'waitForAllInterestCompletes',
callback=sendEvent,
frames=frames,
cancelFunc=checkMoreInterests)
checkMoreInterests = None
sendEvent = None
def handleInterestDoneMessage(self, di):
"""
This handles the interest done messages and may dispatch an event
"""
assert DoInterestManager.notify.debugCall()
contextId = di.getUint32()
handle = di.getUint16()
if self.__verbose():
print('CR::INTEREST.interestDone(handle=%s)' % handle)
DoInterestManager.notify.debug(
"handleInterestDoneMessage--> Received handle %s, context %s" % (
handle, contextId))
if handle in DoInterestManager._interests:
eventsToSend = []
# if the context matches, send out the event
if contextId == DoInterestManager._interests[handle].context:
DoInterestManager._interests[handle].context = NO_CONTEXT
# the event handlers may call back into the interest manager. Send out
# the events after we're once again in a stable state.
#DoInterestManager._interests[handle].sendEvents()
eventsToSend = list(DoInterestManager._interests[handle].getEvents())
DoInterestManager._interests[handle].clearEvents()
else:
DoInterestManager.notify.debug(
"handleInterestDoneMessage--> handle: %s: Expecting context %s, got %s" % (
handle, DoInterestManager._interests[handle].context, contextId))
if __debug__:
state = DoInterestManager._interests[handle]
self._addDebugInterestHistory(
"finished", state.desc, handle, contextId, state.parentId,
state.zoneIdList)
self._considerRemoveInterest(handle)
for event in eventsToSend:
messenger.send(event)
else:
DoInterestManager.notify.warning(
"handleInterestDoneMessage: handle not found: %s" % (handle))
# if there are no more outstanding interest-completes, send out global all-done event
if self._completeEventCount.num == 0:
self.queueAllInterestsCompleteEvent()
assert self.printInterestsIfDebug()
if __debug__:
import unittest
class AsyncTestCase(unittest.TestCase):
def setCompleted(self):
self._async_completed = True
def isCompleted(self):
return getattr(self, '_async_completed', False)
class AsyncTestSuite(unittest.TestSuite):
pass
class AsyncTestLoader(unittest.TestLoader):
suiteClass = AsyncTestSuite
class AsyncTextTestRunner(unittest.TextTestRunner):
def run(self, testCase):
result = self._makeResult()
startTime = time.time()
test(result)
stopTime = time.time()
timeTaken = stopTime - startTime
result.printErrors()
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
if not result.wasSuccessful():
self.stream.write("FAILED (")
failed, errored = map(len, (result.failures, result.errors))
if failed:
self.stream.write("failures=%d" % failed)
if errored:
if failed: self.stream.write(", ")
self.stream.write("errors=%d" % errored)
self.stream.writeln(")")
else:
self.stream.writeln("OK")
return result
class TestInterestAddRemove(AsyncTestCase, DirectObject.DirectObject):
def testInterestAdd(self):
event = uniqueName('InterestAdd')
self.acceptOnce(event, self.gotInterestAddResponse)
self.handle = base.cr.addInterest(base.cr.GameGlobalsId, 100, 'TestInterest', event=event)
def gotInterestAddResponse(self):
event = uniqueName('InterestRemove')
self.acceptOnce(event, self.gotInterestRemoveResponse)
base.cr.removeInterest(self.handle, event=event)
def gotInterestRemoveResponse(self):
self.setCompleted()
def runTests():
suite = unittest.makeSuite(TestInterestAddRemove)
unittest.AsyncTextTestRunner(verbosity=2).run(suite)
| chandler14362/panda3d | direct/src/distributed/DoInterestManager.py | Python | bsd-3-clause | 29,162 | 0.002846 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This test checks if dynamic loading of library into MXNet is successful
import os
import platform
import mxnet as mx
import numpy as np
from mxnet import nd
from mxnet.gluon import nn
from mxnet.base import MXNetError
from mxnet.test_utils import download, is_cd_run, assert_almost_equal, default_context
import pytest
base_path = os.path.join(os.path.dirname(__file__), "../../..")
def check_platform():
return platform.machine() not in ['x86_64', 'AMD64']
@pytest.mark.skipif(check_platform(), reason="not all machine types supported")
@pytest.mark.skipif(is_cd_run(), reason="continuous delivery run - ignoring test")
def test_custom_op():
# possible places to find library file
if (os.name=='posix'):
lib = 'libcustomop_lib.so'
if os.path.exists(lib):
fname = lib
elif os.path.exists(os.path.join(base_path,'build/'+lib)):
fname = os.path.join(base_path,'build/'+lib)
else:
raise MXNetError("library %s not found " % lib)
elif (os.name=='nt'):
lib = 'libcustomop_lib.dll'
if os.path.exists('windows_package\\lib\\'+lib):
fname = 'windows_package\\lib\\'+lib
else:
raise MXNetError("library %s not found " % lib)
fname = os.path.abspath(fname)
# load the library containing gemm custom operators
mx.library.load(fname)
# test symbol 2D gemm custom operators
s = mx.sym.Variable('s')
t = mx.sym.Variable('t')
c = mx.sym.my_gemm(s,t)
d = mx.sym.state_gemm(s,t)
# baseline gemm from MXNet
base = mx.sym.linalg.gemm2(s,t)
# get some random input matrices
dim_n, dim_k, dim_m = tuple(np.random.randint(1, 5, size=3))
mat1 = mx.nd.random.uniform(-10, 10, shape=(dim_n, dim_k), ctx=mx.cpu())
mat2 = mx.nd.random.uniform(-10, 10, shape=(dim_k, dim_m), ctx=mx.cpu())
# intermediate ndarrays to be populated by gradient compute
in_grad1 = [mx.nd.empty((dim_n,dim_k),ctx=mx.cpu()),mx.nd.empty((dim_k,dim_m),ctx=mx.cpu())]
in_grad2 = [mx.nd.empty((dim_n,dim_k),ctx=mx.cpu()),mx.nd.empty((dim_k,dim_m),ctx=mx.cpu())]
in_grad_base = [mx.nd.empty((dim_n,dim_k),ctx=mx.cpu()),mx.nd.empty((dim_k,dim_m),ctx=mx.cpu())]
exe1 = c.bind(ctx=mx.cpu(),args={'s':mat1,'t':mat2},args_grad=in_grad1)
exe2 = d.bind(ctx=mx.cpu(),args={'s':mat1,'t':mat2},args_grad=in_grad2)
exe_base = base.bind(ctx=mx.cpu(),args={'s':mat1,'t':mat2},args_grad=in_grad_base)
out1 = exe1.forward()
out2 = exe2.forward()
# test stateful operator by calling it multiple times
out2 = exe2.forward()
out_base = exe_base.forward()
# check that forward compute matches one executed by MXNet
assert_almost_equal(out_base[0].asnumpy(), out1[0].asnumpy(), rtol=1e-3, atol=1e-3)
assert_almost_equal(out_base[0].asnumpy(), out2[0].asnumpy(), rtol=1e-3, atol=1e-3)
# random output grad ndarray for gradient update
out_grad = mx.nd.ones((dim_n, dim_m), ctx=mx.cpu())
exe1.backward([out_grad])
exe2.backward([out_grad])
exe_base.backward([out_grad])
# check that gradient compute matches one executed by MXNet
assert_almost_equal(in_grad_base[0].asnumpy(), in_grad1[0].asnumpy(), rtol=1e-3, atol=1e-3)
assert_almost_equal(in_grad_base[0].asnumpy(), in_grad2[0].asnumpy(), rtol=1e-3, atol=1e-3)
@pytest.mark.skipif(check_platform(), reason="not all machine types supported")
@pytest.mark.skipif(is_cd_run(), reason="continuous delivery run - ignoring test")
def test_subgraph():
# possible places to find library file
if (os.name=='posix'):
lib = 'libsubgraph_lib.so'
if os.path.exists(lib):
# plain make build, when run in the CI
fname = lib
elif os.path.exists(os.path.join(base_path, 'build/'+lib)):
# plain cmake build when run in the CI
fname = os.path.join(base_path, 'build/'+lib)
else:
raise MXNetError("library %s not found " % lib)
elif (os.name=='nt'):
lib = 'libsubgraph_lib.dll'
if os.path.exists('windows_package\\lib\\'+lib):
# plain make build, when run in the CI
fname = 'windows_package\\lib\\'+lib
else:
# plain cmake build when run in the CI
raise MXNetError("library %s not found " % lib)
fname = os.path.abspath(fname)
mx.library.load(fname)
# test simple graph with add, exp and log operators, library supports exp/log
a = mx.sym.var('a')
b = mx.sym.var('b')
c = a + b
d = mx.sym.exp(c)
sym = mx.sym.log(d)
args = {'a':mx.nd.ones((3,2),ctx=mx.cpu()), 'b':mx.nd.ones((3,2),ctx=mx.cpu())}
arg_array = [mx.nd.ones((3,2),dtype='float32',ctx=mx.cpu()),
mx.nd.ones((3,2),dtype='float32',ctx=mx.cpu())]
# baseline - regular execution in MXNet
exe = sym.bind(ctx=mx.cpu(), args=args)
out = exe.forward()
# without propogating shapes/types, passing a custom option to subgraph prop "myOpt"
# should not create subgraph since subgraph prop requires type info
mysym1 = sym.optimize_for("myProp", myOpt='yello')
exe1 = mysym1.bind(ctx=mx.cpu(), args=args)
out1 = exe1.forward()
# check that result matches one executed by MXNet
assert_almost_equal(out[0].asnumpy(), out1[0].asnumpy(), rtol=1e-3, atol=1e-3)
# with propogating shapes/types, rejecting subgraph
# this tests creating the subgraph and having the subgraph prop reject it
mysym2 = sym.optimize_for("myProp", arg_array, reject=True)
exe2 = mysym2.bind(ctx=mx.cpu(), args=args)
out2 = exe2.forward()
# check that result matches one executed by MXNet
assert_almost_equal(out[0].asnumpy(), out2[0].asnumpy(), rtol=1e-3, atol=1e-3)
# with propogating shapes/types
mysym3 = sym.optimize_for("myProp",arg_array)
exe3 = mysym3.bind(ctx=mx.cpu(), args=args)
out3 = exe3.forward()
# check that result matches one executed by MXNet
assert_almost_equal(out[0].asnumpy(), out3[0].asnumpy(), rtol=1e-3, atol=1e-3)
# Gluon Hybridize partitioning with shapes/types
sym_block = nn.SymbolBlock(sym, [a,b])
sym_block.initialize()
sym_block.hybridize(backend='myProp')
out4 = sym_block(mx.nd.ones((3,2)),mx.nd.ones((3,2)))
# check that result matches one executed by MXNet
assert_almost_equal(out[0].asnumpy(), out4[0].asnumpy(), rtol=1e-3, atol=1e-3)
# Gluon Hybridize partitioning with shapes/types
sym_block2 = nn.SymbolBlock(sym, [a,b])
sym_block2.initialize()
a_data = mx.nd.ones((3,2))
b_data = mx.nd.ones((3,2))
sym_block2.optimize_for(a_data, b_data, backend='myProp')
sym_block2.export('optimized')
sym_block3 = nn.SymbolBlock.imports('optimized-symbol.json',['a','b'],
'optimized-0000.params')
out5 = sym_block3(a_data, b_data)
# check that result matches one executed by MXNet
assert_almost_equal(out[0].asnumpy(), out5[0].asnumpy(), rtol=1e-3, atol=1e-3)
| zhreshold/mxnet | tests/python/unittest/test_extensions.py | Python | apache-2.0 | 7,787 | 0.010659 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import frappe, os, json
from frappe.utils import cstr
from unidecode import unidecode
from six import iteritems
def create_charts(company, chart_template=None, existing_company=None):
chart = get_chart(chart_template, existing_company)
if chart:
accounts = []
def _import_accounts(children, parent, root_type, root_account=False):
for account_name, child in iteritems(children):
if root_account:
root_type = child.get("root_type")
if account_name not in ["account_number", "account_type",
"root_type", "is_group", "tax_rate"]:
account_number = cstr(child.get("account_number")).strip()
account_name, account_name_in_db = add_suffix_if_duplicate(account_name,
account_number, accounts)
is_group = identify_is_group(child)
report_type = "Balance Sheet" if root_type in ["Asset", "Liability", "Equity"] \
else "Profit and Loss"
account = frappe.get_doc({
"doctype": "Account",
"account_name": account_name,
"company": company,
"parent_account": parent,
"is_group": is_group,
"root_type": root_type,
"report_type": report_type,
"account_number": account_number,
"account_type": child.get("account_type"),
"account_currency": frappe.db.get_value("Company", company, "default_currency"),
"tax_rate": child.get("tax_rate")
})
if root_account or frappe.local.flags.allow_unverified_charts:
account.flags.ignore_mandatory = True
account.flags.ignore_permissions = True
account.insert()
accounts.append(account_name_in_db)
_import_accounts(child, account.name, root_type)
_import_accounts(chart, None, None, root_account=True)
def add_suffix_if_duplicate(account_name, account_number, accounts):
if account_number:
account_name_in_db = unidecode(" - ".join([account_number,
account_name.strip().lower()]))
else:
account_name_in_db = unidecode(account_name.strip().lower())
if account_name_in_db in accounts:
count = accounts.count(account_name_in_db)
account_name = account_name + " " + cstr(count)
return account_name, account_name_in_db
def identify_is_group(child):
if child.get("is_group"):
is_group = child.get("is_group")
elif len(set(child.keys()) - set(["account_type", "root_type", "is_group", "tax_rate", "account_number"])):
is_group = 1
else:
is_group = 0
return is_group
def get_chart(chart_template, existing_company=None):
chart = {}
if existing_company:
return get_account_tree_from_existing_company(existing_company)
elif chart_template == "Standard":
from erpnext.accounts.doctype.account.chart_of_accounts.verified import standard_chart_of_accounts
return standard_chart_of_accounts.get()
elif chart_template == "Standard with Numbers":
from erpnext.accounts.doctype.account.chart_of_accounts.verified \
import standard_chart_of_accounts_with_account_number
return standard_chart_of_accounts_with_account_number.get()
else:
folders = ("verified",)
if frappe.local.flags.allow_unverified_charts:
folders = ("verified", "unverified")
for folder in folders:
path = os.path.join(os.path.dirname(__file__), folder)
for fname in os.listdir(path):
fname = frappe.as_unicode(fname)
if fname.endswith(".json"):
with open(os.path.join(path, fname), "r") as f:
chart = f.read()
if chart and json.loads(chart).get("name") == chart_template:
return json.loads(chart).get("tree")
@frappe.whitelist()
def get_charts_for_country(country, with_standard=False):
charts = []
def _get_chart_name(content):
if content:
content = json.loads(content)
if (content and content.get("disabled", "No") == "No") \
or frappe.local.flags.allow_unverified_charts:
charts.append(content["name"])
country_code = frappe.db.get_value("Country", country, "code")
if country_code:
folders = ("verified",)
if frappe.local.flags.allow_unverified_charts:
folders = ("verified", "unverified")
for folder in folders:
path = os.path.join(os.path.dirname(__file__), folder)
if not os.path.exists(path):
continue
for fname in os.listdir(path):
fname = frappe.as_unicode(fname)
if (fname.startswith(country_code) or fname.startswith(country)) and fname.endswith(".json"):
with open(os.path.join(path, fname), "r") as f:
_get_chart_name(f.read())
# if more than one charts, returned then add the standard
if len(charts) != 1 or with_standard:
charts += ["Standard", "Standard with Numbers"]
return charts
def get_account_tree_from_existing_company(existing_company):
all_accounts = frappe.get_all('Account',
filters={'company': existing_company},
fields = ["name", "account_name", "parent_account", "account_type",
"is_group", "root_type", "tax_rate", "account_number"],
order_by="lft, rgt")
account_tree = {}
# fill in tree starting with root accounts (those with no parent)
if all_accounts:
build_account_tree(account_tree, None, all_accounts)
return account_tree
def build_account_tree(tree, parent, all_accounts):
# find children
parent_account = parent.name if parent else ""
children = [acc for acc in all_accounts if cstr(acc.parent_account) == parent_account]
# if no children, but a group account
if not children and parent.is_group:
tree["is_group"] = 1
tree["account_number"] = parent.account_number
# build a subtree for each child
for child in children:
# start new subtree
tree[child.account_name] = {}
# assign account_type and root_type
if child.account_number:
tree[child.account_name]["account_number"] = child.account_number
if child.account_type:
tree[child.account_name]["account_type"] = child.account_type
if child.tax_rate:
tree[child.account_name]["tax_rate"] = child.tax_rate
if not parent:
tree[child.account_name]["root_type"] = child.root_type
# call recursively to build a subtree for current account
build_account_tree(tree[child.account_name], child, all_accounts)
@frappe.whitelist()
def validate_bank_account(coa, bank_account):
accounts = []
chart = get_chart(coa)
if chart:
def _get_account_names(account_master):
for account_name, child in iteritems(account_master):
if account_name not in ["account_number", "account_type",
"root_type", "is_group", "tax_rate"]:
accounts.append(account_name)
_get_account_names(child)
_get_account_names(chart)
return (bank_account in accounts)
| manassolanki/erpnext | erpnext/accounts/doctype/account/chart_of_accounts/chart_of_accounts.py | Python | gpl-3.0 | 6,558 | 0.02516 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reddit', '0020_pendingsubmissions_submitted'),
]
operations = [
migrations.AddField(
model_name='redditcredentials',
name='reddit_username',
field=models.CharField(max_length=50, null=True),
),
]
| kiwiheretic/logos-v2 | reddit/migrations/0021_redditcredentials_reddit_username.py | Python | apache-2.0 | 440 | 0 |
#
# sublimelinter.py
# Part of SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Ryan Hileman and Aparajita Fishman
#
# Project: https://github.com/SublimeLinter/SublimeLinter3
# License: MIT
#
"""This module provides the SublimeLinter plugin class and supporting methods."""
import os
import re
import sublime
import sublime_plugin
from .lint.linter import Linter
from .lint.highlight import HighlightSet
from .lint.queue import queue
from .lint import persist, util
def plugin_loaded():
"""The ST3 entry point for plugins."""
persist.plugin_is_loaded = True
persist.settings.load()
persist.printf('debug mode:', 'on' if persist.debug_mode() else 'off')
util.create_tempdir()
for linter in persist.linter_classes.values():
linter.initialize()
plugin = SublimeLinter.shared_plugin()
queue.start(plugin.lint)
util.generate_menus()
util.generate_color_scheme(from_reload=False)
util.install_syntaxes()
persist.settings.on_update_call(SublimeLinter.on_settings_updated)
# This ensures we lint the active view on a fresh install
window = sublime.active_window()
if window:
plugin.on_activated(window.active_view())
class SublimeLinter(sublime_plugin.EventListener):
"""The main ST3 plugin class."""
# We use this to match linter settings filenames.
LINTER_SETTINGS_RE = re.compile('^SublimeLinter(-.+?)?\.sublime-settings')
shared_instance = None
@classmethod
def shared_plugin(cls):
"""Return the plugin instance."""
return cls.shared_instance
def __init__(self, *args, **kwargs):
"""Initialize a new instance."""
super().__init__(*args, **kwargs)
# Keeps track of which views we have assigned linters to
self.loaded_views = set()
# Keeps track of which views have actually been linted
self.linted_views = set()
# A mapping between view ids and syntax names
self.view_syntax = {}
self.__class__.shared_instance = self
@classmethod
def lint_all_views(cls):
"""Simulate a modification of all views, which will trigger a relint."""
def apply(view):
if view.id() in persist.view_linters:
cls.shared_instance.hit(view)
util.apply_to_all_views(apply)
def lint(self, view_id, hit_time=None, callback=None):
"""
Lint the view with the given id.
This method is called asynchronously by persist.Daemon when a lint
request is pulled off the queue, or called synchronously when the
Lint command is executed or a file is saved and Show Errors on Save
is enabled.
If provided, hit_time is the time at which the lint request was added
to the queue. It is used to determine if the view has been modified
since the lint request was queued. If so, the lint is aborted, since
another lint request is already in the queue.
callback is the method to call when the lint is finished. If not
provided, it defaults to highlight().
"""
# If the view has been modified since the lint was triggered,
# don't lint again.
if hit_time is not None and persist.last_hit_times.get(view_id, 0) > hit_time:
return
view = Linter.get_view(view_id)
if view is None:
return
filename = view.file_name()
code = Linter.text(view)
callback = callback or self.highlight
Linter.lint_view(view, filename, code, hit_time, callback)
def highlight(self, view, linters, hit_time):
"""
Highlight any errors found during a lint of the given view.
This method is called by Linter.lint_view after linting is finished.
linters is a list of the linters that ran. hit_time has the same meaning
as in lint(), and if the view was modified since the lint request was
made, this method aborts drawing marks.
If the view has not been modified since hit_time, all of the marks and
errors from the list of linters are aggregated and drawn, and the status
is updated.
"""
vid = view.id()
# If the view has been modified since the lint was triggered,
# don't draw marks.
if hit_time is not None and persist.last_hit_times.get(vid, 0) > hit_time:
return
errors = {}
highlights = persist.highlights[vid] = HighlightSet()
for linter in linters:
if linter.highlight:
highlights.add(linter.highlight)
if linter.errors:
for line, errs in linter.errors.items():
errors.setdefault(line, []).extend(errs)
# Keep track of one view in each window that shares view's buffer
window_views = {}
buffer_id = view.buffer_id()
for window in sublime.windows():
wid = window.id()
for other_view in window.views():
if other_view.buffer_id() == buffer_id:
vid = other_view.id()
persist.highlights[vid] = highlights
highlights.clear(other_view)
highlights.draw(other_view)
persist.errors[vid] = errors
if window_views.get(wid) is None:
window_views[wid] = other_view
for view in window_views.values():
self.on_selection_modified_async(view)
def hit(self, view):
"""Record an activity that could trigger a lint and enqueue a desire to lint."""
vid = view.id()
self.check_syntax(view)
self.linted_views.add(vid)
if view.size() == 0:
for linter in Linter.get_linters(vid):
linter.clear()
return
persist.last_hit_times[vid] = queue.hit(view)
def check_syntax(self, view):
"""
Check and return if view's syntax has changed.
If the syntax has changed, a new linter is assigned.
"""
vid = view.id()
syntax = persist.get_syntax(view)
# Syntax either has never been set or just changed
if vid not in self.view_syntax or self.view_syntax[vid] != syntax:
self.view_syntax[vid] = syntax
Linter.assign(view, reset=True)
self.clear(view)
return True
else:
return False
def clear(self, view):
"""Clear all marks, errors and status from the given view."""
Linter.clear_view(view)
def is_scratch(self, view):
"""
Return whether a view is effectively scratch.
There is a bug (or feature) in the current ST3 where the Find panel
is not marked scratch but has no window.
There is also a bug where settings files opened from within .sublime-package
files are not marked scratch during the initial on_modified event, so we have
to check that a view with a filename actually exists on disk if the file
being opened is in the Sublime Text packages directory.
"""
if view.is_scratch() or view.is_read_only() or view.window() is None or view.settings().get("repl") is not None:
return True
elif (
view.file_name() and
view.file_name().startswith(sublime.packages_path() + os.path.sep) and
not os.path.exists(view.file_name())
):
return True
else:
return False
def view_has_file_only_linter(self, vid):
"""Return True if any linters for the given view are file-only."""
for lint in persist.view_linters.get(vid, []):
if lint.tempfile_suffix == '-':
return True
return False
# sublime_plugin.EventListener event handlers
def on_modified(self, view):
"""Called when a view is modified."""
if self.is_scratch(view):
return
if view.id() not in persist.view_linters:
syntax_changed = self.check_syntax(view)
if not syntax_changed:
return
else:
syntax_changed = False
if syntax_changed or persist.settings.get('lint_mode', 'background') == 'background':
self.hit(view)
else:
self.clear(view)
def on_activated(self, view):
"""Called when a view gains input focus."""
if self.is_scratch(view):
return
# Reload the plugin settings.
persist.settings.load()
self.check_syntax(view)
view_id = view.id()
if view_id not in self.linted_views:
if view_id not in self.loaded_views:
self.on_new(view)
if persist.settings.get('lint_mode', 'background') in ('background', 'load/save'):
self.hit(view)
self.on_selection_modified_async(view)
def on_open_settings(self, view):
"""
Called when any settings file is opened.
view is the view that contains the text of the settings file.
"""
if self.is_settings_file(view, user_only=True):
persist.settings.save(view=view)
def is_settings_file(self, view, user_only=False):
"""Return True if view is a SublimeLinter settings file."""
filename = view.file_name()
if not filename:
return False
if not filename.startswith(sublime.packages_path()):
return False
dirname, filename = os.path.split(filename)
dirname = os.path.basename(dirname)
if self.LINTER_SETTINGS_RE.match(filename):
if user_only:
return dirname == 'User'
else:
return dirname in (persist.PLUGIN_DIRECTORY, 'User')
@classmethod
def on_settings_updated(cls, relint=False):
"""Callback triggered when the settings are updated."""
if relint:
cls.lint_all_views()
else:
Linter.redraw_all()
def on_new(self, view):
"""Called when a new buffer is created."""
self.on_open_settings(view)
if self.is_scratch(view):
return
vid = view.id()
self.loaded_views.add(vid)
self.view_syntax[vid] = persist.get_syntax(view)
def get_focused_view_id(self, view):
"""
Return the focused view which shares view's buffer.
When updating the status, we want to make sure we get
the selection of the focused view, since multiple views
into the same buffer may be open.
"""
active_view = view.window().active_view()
for view in view.window().views():
if view == active_view:
return view
def on_selection_modified_async(self, view):
"""Called when the selection changes (cursor moves or text selected)."""
if self.is_scratch(view):
return
view = self.get_focused_view_id(view)
if view is None:
return
vid = view.id()
# Get the line number of the first line of the first selection.
try:
lineno = view.rowcol(view.sel()[0].begin())[0]
except IndexError:
lineno = -1
if vid in persist.errors:
errors = persist.errors[vid]
if errors:
lines = sorted(list(errors))
counts = [len(errors[line]) for line in lines]
count = sum(counts)
plural = 's' if count > 1 else ''
if lineno in errors:
# Sort the errors by column
line_errors = sorted(errors[lineno], key=lambda error: error[0])
line_errors = [error[1] for error in line_errors]
if plural:
# Sum the errors before the first error on this line
index = lines.index(lineno)
first = sum(counts[0:index]) + 1
if len(line_errors) > 1:
last = first + len(line_errors) - 1
status = '{}-{} of {} errors: '.format(first, last, count)
else:
status = '{} of {} errors: '.format(first, count)
else:
status = 'Error: '
status += '; '.join(line_errors)
else:
status = '%i error%s' % (count, plural)
view.set_status('sublimelinter', status)
else:
view.erase_status('sublimelinter')
def on_pre_save(self, view):
"""
Called before view is saved.
If a settings file is the active view and is saved,
copy the current settings first so we can compare post-save.
"""
if view.window().active_view() == view and self.is_settings_file(view):
persist.settings.copy()
def on_post_save(self, view):
"""Called after view is saved."""
if self.is_scratch(view):
return
# First check to see if the project settings changed
if view.window().project_file_name() == view.file_name():
self.lint_all_views()
else:
# Now see if a .sublimelinterrc has changed
filename = os.path.basename(view.file_name())
if filename == '.sublimelinterrc':
# If a .sublimelinterrc has changed, to be safe
# clear the rc cache and relint.
util.get_rc_settings.cache_clear()
self.lint_all_views()
# If a file other than one of our settings files changed,
# check if the syntax changed or if we need to show errors.
elif filename != 'SublimeLinter.sublime-settings':
self.file_was_saved(view)
def file_was_saved(self, view):
"""Check if the syntax changed or if we need to show errors."""
syntax_changed = self.check_syntax(view)
vid = view.id()
mode = persist.settings.get('lint_mode', 'background')
show_errors = persist.settings.get('show_errors_on_save', False)
if syntax_changed:
self.clear(view)
if vid in persist.view_linters:
if mode != 'manual':
self.lint(vid)
else:
show_errors = False
else:
show_errors = False
else:
if (
show_errors or
mode in ('load/save', 'save only') or
mode == 'background' and self.view_has_file_only_linter(vid)
):
self.lint(vid)
elif mode == 'manual':
show_errors = False
if show_errors and vid in persist.errors and persist.errors[vid]:
view.run_command('sublimelinter_show_all_errors')
def on_close(self, view):
"""Called after view is closed."""
if self.is_scratch(view):
return
vid = view.id()
if vid in self.loaded_views:
self.loaded_views.remove(vid)
if vid in self.linted_views:
self.linted_views.remove(vid)
if vid in self.view_syntax:
del self.view_syntax[vid]
persist.view_did_close(vid)
class SublimelinterEditCommand(sublime_plugin.TextCommand):
"""A plugin command used to generate an edit object for a view."""
def run(self, edit):
"""Run the command."""
persist.edit(self.view.id(), edit)
| yubchen/Qlinter | sublimelinter.py | Python | mit | 15,716 | 0.001145 |
from django.conf.urls.defaults import *
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^login/', 'django.contrib.auth.views.login'),
(r'^admin/', include(admin.site.urls)),
)
| callowayproject/django-cookiesession | example/urls.py | Python | apache-2.0 | 222 | 0.004505 |
import os
from .utils import get_config
PACKAGE_PATH = os.path.abspath(os.path.dirname(__file__))
REPO_PATH = os.path.join(PACKAGE_PATH, os.pardir)
LOG_PATH = '~/Library/Application Support/Adium 2.0/Users/Default/Logs'
LOG_PATH = os.path.expanduser(LOG_PATH)
MOVED_LOG_PATH = os.path.expanduser('~/.adiumshlogs')
CONFIG_PATH = os.path.expanduser('~/.adiumsh') \
if not os.environ.get('ADIUMSH_TEST') \
else os.path.join(REPO_PATH, '.adiumsh')
default_config = get_config(CONFIG_PATH, 'default')
DEFAULT_ACCOUNT = \
default_config.get('account', None) if default_config else None
DEFAULT_SERVICE = \
default_config.get('service', None) if default_config else None
DEFAULT_BUDDY = \
default_config.get('buddy', None) if default_config else None
DEFAULT_CHAT = \
default_config.get('chat', None) if default_config else None
EVENT_MESSAGE_RECEIVED = 'MESSAGE_RECEIVED'
EVENT_MESSAGE_SENT = 'MESSAGE_SENT'
EVENT_STATUS_AWAY = 'STATUS_AWAY'
EVENT_STATUS_ONLINE = 'STATUS_ONLINE'
EVENT_STATUS_OFFLINE = 'STATUS_OFFLINE'
EVENT_STATUS_CONNECTED = 'STATUS_CONNECTED'
EVENT_STATUS_DISCONNECTED = 'STATUS_DISCONNECTED'
| shichao-an/adium-sh | adiumsh/settings.py | Python | bsd-2-clause | 1,138 | 0 |
from django.http import HttpResponseRedirect
from django.conf import settings
from django.utils.deprecation import MiddlewareMixin
from re import compile
EXEMPT_URLS = [compile(settings.LOGIN_URL.lstrip('/'))]
if hasattr(settings, 'LOGIN_EXEMPT_URLS'):
EXEMPT_URLS += [compile(expr) for expr in settings.LOGIN_EXEMPT_URLS]
class LoginRequiredMiddleware(MiddlewareMixin):
"""
Middleware that requires a user to be authenticated to view any page other
than LOGIN_URL. Exemptions to this requirement can optionally be specified
in settings via a list of regular expressions in LOGIN_EXEMPT_URLS (which
you can copy from your urls.py).
Requires authentication middleware and template context processors to be
loaded. You'll get an error if they aren't.
"""
def process_request(self, request):
assert hasattr(request, 'user'), "The Login Required middleware\
requires authentication middleware to be installed. Edit your\
MIDDLEWARE_CLASSES setting to insert\
'django.contrib.auth.middlware.AuthenticationMiddleware'. If that doesn't\
work, ensure your TEMPLATE_CONTEXT_PROCESSORS setting includes\
'django.core.context_processors.auth'."
if not request.user.is_authenticated():
path = request.path_info.lstrip('/')
if not any(m.match(path) for m in EXEMPT_URLS):
return HttpResponseRedirect(settings.LOGIN_URL)
| forestdussault/olc_webportalv2 | olc_webportalv2/users/middleware.py | Python | mit | 1,413 | 0 |
# -*- coding: utf-8 -*-
# Etalage -- Open Data POIs portal
# By: Emmanuel Raviart <eraviart@easter-eggs.com>
#
# Copyright (C) 2011, 2012 Easter-eggs
# http://gitorious.org/infos-pratiques/etalage
#
# This file is part of Etalage.
#
# Etalage is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Etalage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Context loaded and saved in WSGI requests"""
import gettext
import webob
from . import conf
__all__ = ['Ctx', 'null_ctx']
class Ctx(object):
_parent = None
default_values = dict(
_lang = None,
_scopes = UnboundLocalError,
_translator = None,
base_categories_slug = None,
category_tags_slug = None,
container_base_url = None,
distance = None, # Max distance in km
gadget_id = None,
hide_directory = False,
req = None,
subscriber = None,
)
env_keys = ('_lang', '_scopes', '_translator')
def __init__(self, req = None):
if req is not None:
self.req = req
etalage_env = req.environ.get('etalage', {})
for key in object.__getattribute__(self, 'env_keys'):
value = etalage_env.get(key)
if value is not None:
setattr(self, key, value)
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
parent = object.__getattribute__(self, '_parent')
if parent is None:
default_values = object.__getattribute__(self, 'default_values')
if name in default_values:
return default_values[name]
raise
return getattr(parent, name)
@property
def _(self):
return self.translator.ugettext
def blank_req(self, path, environ = None, base_url = None, headers = None, POST = None, **kw):
env = environ.copy() if environ else {}
etalage_env = env.setdefault('etalage', {})
for key in self.env_keys:
value = getattr(self, key)
if value is not None:
etalage_env[key] = value
return webob.Request.blank(path, environ = env, base_url = base_url, headers = headers, POST = POST, **kw)
def get_containing(self, name, depth = 0):
"""Return the n-th (n = ``depth``) context containing attribute named ``name``."""
ctx_dict = object.__getattribute__(self, '__dict__')
if name in ctx_dict:
if depth <= 0:
return self
depth -= 1
parent = ctx_dict.get('_parent')
if parent is None:
return None
return parent.get_containing(name, depth = depth)
def get_inherited(self, name, default = UnboundLocalError, depth = 1):
ctx = self.get_containing(name, depth = depth)
if ctx is None:
if default is UnboundLocalError:
raise AttributeError('Attribute %s not found in %s' % (name, self))
return default
return object.__getattribute__(ctx, name)
def iter(self):
yield self
parent = object.__getattribute__(self, '_parent')
if parent is not None:
for ancestor in parent.iter():
yield ancestor
def iter_containing(self, name):
ctx_dict = object.__getattribute__(self, '__dict__')
if name in ctx_dict:
yield self
parent = ctx_dict.get('_parent')
if parent is not None:
for ancestor in parent.iter_containing(name):
yield ancestor
def iter_inherited(self, name):
for ctx in self.iter_containing(name):
yield object.__getattribute__(ctx, name)
def lang_del(self):
del self._lang
if self.req is not None and self.req.environ.get('etalage') is not None \
and '_lang' in self.req.environ['etalage']:
del self.req.environ['etalage']['_lang']
def lang_get(self):
if self._lang is None:
# self._lang = self.req.accept_language.best_matches('en-US') if self.req is not None else []
# Note: Don't forget to add country-less language code when only a "language-COUNTRY" code is given.
self._lang = ['fr-FR', 'fr']
if self.req is not None:
self.req.environ.setdefault('etalage', {})['_lang'] = self._lang
return self._lang
def lang_set(self, lang):
self._lang = lang
if self.req is not None:
self.req.environ.setdefault('etalage', {})['_lang'] = self._lang
# Reinitialize translator for new languages.
if self._translator is not None:
# Don't del self._translator, because attribute _translator can be defined in a parent.
self._translator = None
if self.req is not None and self.req.environ.get('etalage') is not None \
and '_translator' in self.req.environ['etalage']:
del self.req.environ['etalage']['_translator']
lang = property(lang_get, lang_set, lang_del)
def new(self, **kwargs):
ctx = Ctx()
ctx._parent = self
for name, value in kwargs.iteritems():
setattr(ctx, name, value)
return ctx
@property
def parent(self):
return object.__getattribute__(self, '_parent')
def scopes_del(self):
del self._scopes
if self.req is not None and self.req.environ.get('wenoit_etalage') is not None \
and '_scopes' in self.req.environ['wenoit_etalage']:
del self.req.environ['wenoit_etalage']['_scopes']
def scopes_get(self):
return self._scopes
def scopes_set(self, scopes):
self._scopes = scopes
if self.req is not None:
self.req.environ.setdefault('wenoit_etalage', {})['_scopes'] = scopes
scopes = property(scopes_get, scopes_set, scopes_del)
@property
def session(self):
return self.req.environ.get('beaker.session') if self.req is not None else None
@property
def translator(self):
"""Get a valid translator object from one or several languages names."""
if self._translator is None:
languages = self.lang
if not languages:
return gettext.NullTranslations()
if not isinstance(languages, list):
languages = [languages]
translator = gettext.NullTranslations()
i18n_dir_by_plugin_name = conf['i18n_dir_by_plugin_name'] or {}
for name, i18n_dir in [
('biryani', conf['biryani_i18n_dir']),
(conf['package_name'], conf['i18n_dir']),
] + sorted(i18n_dir_by_plugin_name.iteritems()):
if name is not None and i18n_dir is not None:
translator = new_translator(name, i18n_dir, languages, fallback = translator)
self._translator = translator
return self._translator
null_ctx = Ctx()
null_ctx.lang = ['fr-FR', 'fr']
def new_translator(domain, localedir, languages, fallback = None):
new = gettext.translation(domain, localedir, fallback = True, languages = languages)
if fallback is not None:
new.add_fallback(fallback)
return new
| Gentux/etalage | etalage/contexts.py | Python | agpl-3.0 | 7,875 | 0.009524 |
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from . import wiz_lock_lot
| alhashash/odoomrp-wip | stock_lock_lot/wizard/__init__.py | Python | agpl-3.0 | 288 | 0 |
#!/usr/bin/env python
#
# This script maps breed names to Livestock Breed Ontology (LBO) IDs.
#
# Input files:
# ONTO.tsv with two columns: id (format: <ontology_acronym>:<term_id>) and name
# pigQTLdb.tsv with two columns: qtl_id and breed (comma-separated field of values)
#
# Output (STDOUT):
# three columns separated by \t: id, name, LBO breed IDs
#
sep='\t'
file1 ='../data/ONTO.tsv'
file2 ='../data/pigQTLdb.tsv'
lookup = dict()
with open(file1) as fin:
for ln in fin:
ln = ln.rstrip()
id, name = ln.split(sep)
lookup[name.lower()] = id
with open(file2) as fin:
for ln in fin:
ln = ln.rstrip()
cols = ln.split(sep)
if len(cols) == 2:
ids = []
for b in cols[1].lower().split(','):
if b in lookup:
ids.append(lookup[b])
print(ln + sep + ','.join(ids))
else:
print(ln)
| candYgene/abg-ld | src/tmp/ontomap_breed.py | Python | apache-2.0 | 894 | 0.021253 |
from ..sqlclear import SQLClearCommand as Command
| skibblenybbles/django-commando | commando/django/core/management/commands/sqlclear.py | Python | mit | 50 | 0 |
from setuptools import setup, find_packages
setup(name='pocket',
version='0.0.0',
packages=find_packages(),
install_requires=['sneeze'],
entry_points={'nose.plugins.sneeze.plugins.add_models' : ['pocket_models = pocket.database:add_models'],
'nose.plugins.sneeze.plugins.add_options' : ['pocket_options = pocket.log_lib:add_options'],
'nose.plugins.sneeze.plugins.managers' : ['pocket_manager = pocket.log_lib:TissueHandler']}) | psusloparov/sneeze | pocket/setup.py | Python | apache-2.0 | 492 | 0.014228 |
# -*- coding: utf-8 -*-
from flask import render_template, redirect, url_for, flash, abort
from purchasing.decorators import requires_roles
from purchasing.data.stages import Stage
from purchasing.data.flows import Flow
from purchasing.conductor.forms import FlowForm, NewFlowForm
from purchasing.conductor.manager import blueprint
@blueprint.route('/flow/new', methods=['GET', 'POST'])
@requires_roles('conductor', 'admin', 'superadmin')
def new_flow():
'''Create a new flow
:status 200: Render the new flow template
:status 302: Try to create a new flow using the
:py:class:`~purchasing.conductor.forms.NewFlowForm`, redirect
to the flows list view if successful
'''
stages = Stage.choices_factory()
form = NewFlowForm(stages=stages)
if form.validate_on_submit():
stage_order = []
for entry in form.stage_order.entries:
# try to evaluate the return value as an ID
try:
stage_id = int(entry.data)
# otherwise it's a new stage
except ValueError:
new_stage = Stage.create(name=entry.data)
stage_id = new_stage.id
stage_order.append(stage_id)
Flow.create(flow_name=form.flow_name.data, stage_order=stage_order)
flash('Flow created successfully!', 'alert-success')
return redirect(url_for('conductor.flows_list'))
return render_template('conductor/flows/new.html', stages=stages, form=form)
@blueprint.route('/flows')
@requires_roles('conductor', 'admin', 'superadmin')
def flows_list():
'''List all flows
:status 200: Render the all flows list template
'''
flows = Flow.query.order_by(Flow.flow_name).all()
active, archived = [], []
for flow in flows:
if flow.is_archived:
archived.append(flow)
else:
active.append(flow)
return render_template('conductor/flows/browse.html', active=active, archived=archived)
@blueprint.route('/flow/<int:flow_id>', methods=['GET', 'POST'])
@requires_roles('conductor', 'admin', 'superadmin')
def flow_detail(flow_id):
'''View/edit a flow's details
:status 200: Render the flow edit template
:status 302: Post changes to the a flow using the submitted
:py:class:`~purchasing.conductor.forms.FlowForm`, redirect back to
the current flow's detail page if successful
'''
flow = Flow.query.get(flow_id)
if flow:
form = FlowForm(obj=flow)
if form.validate_on_submit():
flow.update(
flow_name=form.data['flow_name'],
is_archived=form.data['is_archived']
)
flash('Flow successfully updated', 'alert-success')
return redirect(url_for('conductor.flow_detail', flow_id=flow.id))
return render_template('conductor/flows/edit.html', form=form, flow=flow)
abort(404)
| codeforamerica/pittsburgh-purchasing-suite | purchasing/conductor/manager/flow_management.py | Python | bsd-3-clause | 2,911 | 0.002061 |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2015, MuChu Hsu
Contributed by Muchu Hsu (muchu1983@gmail.com)
This file is part of BSD license
<https://opensource.org/licenses/BSD-3-Clause>
"""
import unittest
import logging
from cameo.spiderForPEDAILY import SpiderForPEDAILY
"""
測試 抓取 PEDAILY
"""
class SpiderForPEDAILYTest(unittest.TestCase):
#準備
def setUp(self):
logging.basicConfig(level=logging.INFO)
self.spider = SpiderForPEDAILY()
self.spider.initDriver()
#收尾
def tearDown(self):
self.spider.quitDriver()
"""
#測試抓取 index page
def test_downloadIndexPage(self):
logging.info("SpiderForPEDAILYTest.test_downloadIndexPage")
self.spider.downloadIndexPage()
#測試抓取 category page
def test_downloadCategoryPage(self):
logging.info("SpiderForPEDAILYTest.test_downloadCategoryPage")
self.spider.downloadCategoryPage()
"""
#測試抓取 news page
def test_downloadNewsPage(self):
logging.info("SpiderForPEDAILYTest.test_downloadNewsPage")
self.spider.downloadNewsPage(strCategoryName=None)
#測試開始
if __name__ == "__main__":
unittest.main(exit=False)
| muchu1983/104_cameo | test/unit/test_spiderForPEDAILY.py | Python | bsd-3-clause | 1,237 | 0.00841 |
# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import sys
from optparse import OptionParser
import rsa.key
def private_to_public() -> None:
"""Reads a private key and outputs the corresponding public key."""
# Parse the CLI options
parser = OptionParser(usage='usage: %prog [options]',
description='Reads a private key and outputs the '
'corresponding public key. Both private and public keys use '
'the format described in PKCS#1 v1.5')
parser.add_option('-i', '--input', dest='infilename', type='string',
help='Input filename. Reads from stdin if not specified')
parser.add_option('-o', '--output', dest='outfilename', type='string',
help='Output filename. Writes to stdout of not specified')
parser.add_option('--inform', dest='inform',
help='key format of input - default PEM',
choices=('PEM', 'DER'), default='PEM')
parser.add_option('--outform', dest='outform',
help='key format of output - default PEM',
choices=('PEM', 'DER'), default='PEM')
(cli, cli_args) = parser.parse_args(sys.argv)
# Read the input data
if cli.infilename:
print('Reading private key from %s in %s format' %
(cli.infilename, cli.inform), file=sys.stderr)
with open(cli.infilename, 'rb') as infile:
in_data = infile.read()
else:
print('Reading private key from stdin in %s format' % cli.inform,
file=sys.stderr)
in_data = sys.stdin.read().encode('ascii')
assert type(in_data) == bytes, type(in_data)
# Take the public fields and create a public key
priv_key = rsa.key.PrivateKey.load_pkcs1(in_data, cli.inform)
pub_key = rsa.key.PublicKey(priv_key.n, priv_key.e)
# Save to the output file
out_data = pub_key.save_pkcs1(cli.outform)
if cli.outfilename:
print('Writing public key to %s in %s format' %
(cli.outfilename, cli.outform), file=sys.stderr)
with open(cli.outfilename, 'wb') as outfile:
outfile.write(out_data)
else:
print('Writing public key to stdout in %s format' % cli.outform,
file=sys.stderr)
sys.stdout.write(out_data.decode('ascii'))
| javier-ruiz-b/docker-rasppi-images | raspberry-google-home/env/lib/python3.7/site-packages/rsa/util.py | Python | apache-2.0 | 2,986 | 0.00067 |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
# Author: illuz <iilluzen[at]gmail.com>
# File: AC_dfs_n.py
# Create Date: 2015-08-16 10:15:54
# Usage: AC_dfs_n.py
# Descripton:
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param {TreeNode} root
# @return {string[]}
def binaryTreePaths(self, root):
path =[]
paths = []
def dfs(root):
if root:
path.append(str(root.val))
if root.left == None and root.right == None:
paths.append('->'.join(path))
dfs(root.left)
dfs(root.right)
path.pop()
dfs(root)
return paths
| yxping/leetcode | solutions/257.Binary_Tree_Paths/AC_dfs_n.py | Python | gpl-2.0 | 836 | 0.010766 |
from twisted.internet import threads
from config import config
from enigma import eDBoxLCD, eTimer, iPlayableService
import NavigationInstance
from Tools.Directories import fileExists
from Components.ParentalControl import parentalControl
from Components.ServiceEventTracker import ServiceEventTracker
from Components.SystemInfo import SystemInfo
from boxbranding import getBoxType
POLLTIME = 5 # seconds
def SymbolsCheck(session, **kwargs):
global symbolspoller, POLLTIME
if getBoxType() in ('dummy'):
POLLTIME = 1
symbolspoller = SymbolsCheckPoller(session)
symbolspoller.start()
class SymbolsCheckPoller:
def __init__(self, session):
self.session = session
self.blink = False
self.led = "0"
self.timer = eTimer()
self.onClose = []
self.__event_tracker = ServiceEventTracker(screen=self,eventmap=
{
iPlayableService.evUpdatedInfo: self.__evUpdatedInfo,
})
def __onClose(self):
pass
def start(self):
if self.symbolscheck not in self.timer.callback:
self.timer.callback.append(self.symbolscheck)
self.timer.startLongTimer(0)
def stop(self):
if self.symbolscheck in self.timer.callback:
self.timer.callback.remove(self.symbolscheck)
self.timer.stop()
def symbolscheck(self):
threads.deferToThread(self.JobTask)
self.timer.startLongTimer(POLLTIME)
def JobTask(self):
self.Recording()
self.PlaySymbol()
self.timer.startLongTimer(POLLTIME)
def __evUpdatedInfo(self):
self.service = self.session.nav.getCurrentService()
self.Subtitle()
self.ParentalControl()
self.PlaySymbol()
del self.service
def Recording(self):
if fileExists("/proc/stb/lcd/symbol_circle"):
recordings = len(NavigationInstance.instance.getRecordings())
if recordings > 0:
open("/proc/stb/lcd/symbol_circle", "w").write("3")
else:
open("/proc/stb/lcd/symbol_circle", "w").write("0")
elif getBoxType() in ('dummy'):
recordings = len(NavigationInstance.instance.getRecordings())
if recordings > 0:
open("/proc/stb/lcd/symbol_recording", "w").write("1")
else:
open("/proc/stb/lcd/symbol_recording", "w").write("0")
elif getBoxType() in ('dummy'):
recordings = len(NavigationInstance.instance.getRecordings())
self.blink = not self.blink
if recordings > 0:
if self.blink:
open("/proc/stb/lcd/powerled", "w").write("1")
self.led = "1"
else:
open("/proc/stb/lcd/powerled", "w").write("0")
self.led = "0"
elif self.led == "1":
open("/proc/stb/lcd/powerled", "w").write("0")
elif getBoxType() in ('dummy'):
recordings = len(NavigationInstance.instance.getRecordings())
self.blink = not self.blink
if recordings > 0:
if self.blink:
open("/proc/stb/lcd/powerled", "w").write("0")
self.led = "1"
else:
open("/proc/stb/lcd/powerled", "w").write("1")
self.led = "0"
elif self.led == "1":
open("/proc/stb/lcd/powerled", "w").write("1")
else:
if not fileExists("/proc/stb/lcd/symbol_recording") or not fileExists("/proc/stb/lcd/symbol_record_1") or not fileExists("/proc/stb/lcd/symbol_record_2"):
return
recordings = len(NavigationInstance.instance.getRecordings())
if recordings > 0:
open("/proc/stb/lcd/symbol_recording", "w").write("1")
if recordings == 1:
open("/proc/stb/lcd/symbol_record_1", "w").write("1")
open("/proc/stb/lcd/symbol_record_2", "w").write("0")
elif recordings >= 2:
open("/proc/stb/lcd/symbol_record_1", "w").write("1")
open("/proc/stb/lcd/symbol_record_2", "w").write("1")
else:
open("/proc/stb/lcd/symbol_recording", "w").write("0")
open("/proc/stb/lcd/symbol_record_1", "w").write("0")
open("/proc/stb/lcd/symbol_record_2", "w").write("0")
def Subtitle(self):
if not fileExists("/proc/stb/lcd/symbol_smartcard"):
return
subtitle = self.service and self.service.subtitle()
subtitlelist = subtitle and subtitle.getSubtitleList()
if subtitlelist:
subtitles = len(subtitlelist)
if subtitles > 0:
open("/proc/stb/lcd/symbol_smartcard", "w").write("1")
else:
open("/proc/stb/lcd/symbol_smartcard", "w").write("0")
else:
open("/proc/stb/lcd/symbol_smartcard", "w").write("0")
def ParentalControl(self):
if not fileExists("/proc/stb/lcd/symbol_parent_rating"):
return
service = self.session.nav.getCurrentlyPlayingServiceReference()
if service:
if parentalControl.getProtectionLevel(service.toCompareString()) == -1:
open("/proc/stb/lcd/symbol_parent_rating", "w").write("0")
else:
open("/proc/stb/lcd/symbol_parent_rating", "w").write("1")
else:
open("/proc/stb/lcd/symbol_parent_rating", "w").write("0")
def PlaySymbol(self):
if not fileExists("/proc/stb/lcd/symbol_play "):
return
if SystemInfo["SeekStatePlay"]:
open("/proc/stb/lcd/symbol_play ", "w").write("1")
else:
open("/proc/stb/lcd/symbol_play ", "w").write("0") | idrogeno/IdroMips | lib/python/Components/VfdSymbols.py | Python | gpl-2.0 | 4,865 | 0.027955 |
import hashlib
import os
import tempfile
import zipfile
from bs4 import BeautifulSoup
from django.test import Client
from django.test import TestCase
from mock import patch
from ..models import LocalFile
from ..utils.paths import get_content_storage_file_path
from kolibri.core.auth.test.helpers import provision_device
from kolibri.utils.tests.helpers import override_option
DUMMY_FILENAME = "hashi123.js"
empty_content = '<html><head><script src="/static/content/hashi123.js"></script></head><body></body></html>'
@patch("kolibri.core.content.views.get_hashi_filename", return_value=DUMMY_FILENAME)
@override_option("Paths", "CONTENT_DIR", tempfile.mkdtemp())
class ZipContentTestCase(TestCase):
"""
Testcase for zipcontent endpoint
"""
index_name = "index.html"
index_str = "<html></html>"
other_name = "other.html"
other_str = "<html><head></head></html>"
script_name = "script.html"
script_str = "<html><head><script>test</script></head></html>"
async_script_name = "async_script.html"
async_script_str = (
'<html><head><script async src="url/url.js"></script></head></html>'
)
empty_html_name = "empty.html"
empty_html_str = ""
doctype_name = "doctype.html"
doctype = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
"""
doctype_str = doctype + "<html><head><script>test</script></head></html>"
html5_doctype_name = "html5_doctype.html"
html5_doctype = "<!DOCTYPE HTML>"
html5_doctype_str = (
html5_doctype + "<html><head><script>test</script></head></html>"
)
test_name_1 = "testfile1.txt"
test_str_1 = "This is a test!"
test_name_2 = "testfile2.txt"
test_str_2 = "And another test..."
embedded_file_name = "test/this/path/test.txt"
embedded_file_str = "Embedded file test"
def setUp(self):
self.client = Client()
provision_device()
self.hash = hashlib.md5("DUMMYDATA".encode()).hexdigest()
self.extension = "zip"
self.filename = "{}.{}".format(self.hash, self.extension)
self.zip_path = get_content_storage_file_path(self.filename)
zip_path_dir = os.path.dirname(self.zip_path)
if not os.path.exists(zip_path_dir):
os.makedirs(zip_path_dir)
with zipfile.ZipFile(self.zip_path, "w") as zf:
zf.writestr(self.index_name, self.index_str)
zf.writestr(self.other_name, self.other_str)
zf.writestr(self.script_name, self.script_str)
zf.writestr(self.async_script_name, self.async_script_str)
zf.writestr(self.empty_html_name, self.empty_html_str)
zf.writestr(self.doctype_name, self.doctype_str)
zf.writestr(self.html5_doctype_name, self.html5_doctype_str)
zf.writestr(self.test_name_1, self.test_str_1)
zf.writestr(self.test_name_2, self.test_str_2)
zf.writestr(self.embedded_file_name, self.embedded_file_str)
self.zip_file_obj = LocalFile(
id=self.hash, extension=self.extension, available=True
)
self.zip_file_base_url = self.zip_file_obj.get_storage_url()
def test_zip_file_url_reversal(self, filename_patch):
file = LocalFile(id=self.hash, extension=self.extension, available=True)
self.assertEqual(
file.get_storage_url(), "/zipcontent/{}/".format(self.filename)
)
def test_non_zip_file_url_reversal(self, filename_patch):
file = LocalFile(id=self.hash, extension="otherextension", available=True)
filename = file.get_filename()
self.assertEqual(
file.get_storage_url(),
"/content/storage/{}/{}/{}".format(filename[0], filename[1], filename),
)
def test_zip_file_internal_file_access(self, filename_patch):
# test reading the data from file #1 inside the zip
response = self.client.get(self.zip_file_base_url + self.test_name_1)
self.assertEqual(next(response.streaming_content).decode(), self.test_str_1)
# test reading the data from file #2 inside the zip
response = self.client.get(self.zip_file_base_url + self.test_name_2)
self.assertEqual(next(response.streaming_content).decode(), self.test_str_2)
def test_nonexistent_zip_file_access(self, filename_patch):
bad_base_url = self.zip_file_base_url.replace(
self.zip_file_base_url[20:25], "aaaaa"
)
response = self.client.get(bad_base_url + self.test_name_1)
self.assertEqual(response.status_code, 404)
def test_zip_file_nonexistent_internal_file_access(self, filename_patch):
response = self.client.get(self.zip_file_base_url + "qqq" + self.test_name_1)
self.assertEqual(response.status_code, 404)
def test_non_allowed_file_internal_file_access(self, filename_patch):
response = self.client.get(
self.zip_file_base_url.replace("zip", "png") + self.test_name_1
)
self.assertEqual(response.status_code, 404)
def test_not_modified_response_when_if_modified_since_header_set(
self, filename_patch
):
caching_client = Client(HTTP_IF_MODIFIED_SINCE="Sat, 10-Sep-2016 19:14:07 GMT")
response = caching_client.get(self.zip_file_base_url + self.test_name_1)
self.assertEqual(response.status_code, 304)
def test_content_security_policy_header(self, filename_patch):
response = self.client.get(self.zip_file_base_url + self.test_name_1)
self.assertEqual(
response.get("Content-Security-Policy"),
"default-src 'self' 'unsafe-inline' 'unsafe-eval' data: blob: http://testserver",
)
def test_content_security_policy_header_http_referer(self, filename_patch):
response = self.client.get(
self.zip_file_base_url + self.test_name_1,
HTTP_REFERER="http://testserver:1234/iam/a/real/path/#thatsomeonemightuse",
)
self.assertEqual(
response.get("Content-Security-Policy"),
"default-src 'self' 'unsafe-inline' 'unsafe-eval' data: blob: http://testserver:1234",
)
def test_access_control_allow_origin_header(self, filename_patch):
response = self.client.get(self.zip_file_base_url + self.test_name_1)
self.assertEqual(response.get("Access-Control-Allow-Origin"), "*")
response = self.client.options(self.zip_file_base_url + self.test_name_1)
self.assertEqual(response.get("Access-Control-Allow-Origin"), "*")
def test_x_frame_options_header(self, filename_patch):
response = self.client.get(self.zip_file_base_url + self.test_name_1)
self.assertEqual(response.get("X-Frame-Options", ""), "")
def test_access_control_allow_headers(self, filename_patch):
headerval = "X-Penguin-Dance-Party"
response = self.client.options(
self.zip_file_base_url + self.test_name_1,
HTTP_ACCESS_CONTROL_REQUEST_HEADERS=headerval,
)
self.assertEqual(response.get("Access-Control-Allow-Headers", ""), headerval)
response = self.client.get(
self.zip_file_base_url + self.test_name_1,
HTTP_ACCESS_CONTROL_REQUEST_HEADERS=headerval,
)
self.assertEqual(response.get("Access-Control-Allow-Headers", ""), headerval)
def test_request_for_html_no_head_return_hashi_modified_html(self, filename_patch):
response = self.client.get(self.zip_file_base_url)
content = '<html><head><script src="/static/content/hashi123.js"></script></head><body></body></html>'
self.assertEqual(response.content.decode("utf-8"), content)
def test_request_for_html_body_no_script_return_hashi_modified_html(
self, filename_patch
):
response = self.client.get(self.zip_file_base_url + self.other_name)
self.assertEqual(response.content.decode("utf-8"), empty_content)
def test_request_for_html_body_script_return_hashi_modified_html(
self, filename_patch
):
response = self.client.get(self.zip_file_base_url + self.script_name)
content = (
'<html><head><template hashi-script="true"><script>test</script></template><script src="/static/content/hashi123.js"></script></head>'
+ "<body></body></html>"
)
self.assertEqual(response.content.decode("utf-8"), content)
def test_request_for_html_body_script_with_extra_slash_return_hashi_modified_html(
self, filename_patch
):
response = self.client.get(self.zip_file_base_url + "/" + self.script_name)
content = (
'<html><head><template hashi-script="true"><script>test</script></template><script src="/static/content/hashi123.js"></script></head>'
+ "<body></body></html>"
)
self.assertEqual(response.content.decode("utf-8"), content)
def test_request_for_embedded_file_return_embedded_file(self, filename_patch):
response = self.client.get(self.zip_file_base_url + self.embedded_file_name)
self.assertEqual(
next(response.streaming_content).decode(), self.embedded_file_str
)
def test_request_for_embedded_file_with_double_slashes_return_embedded_file(
self, filename_patch
):
response = self.client.get(
self.zip_file_base_url + self.embedded_file_name.replace("/", "//")
)
self.assertEqual(
next(response.streaming_content).decode(), self.embedded_file_str
)
def test_request_for_html_body_script_skip_get_param_return_unmodified_html(
self, filename_patch
):
response = self.client.get(
self.zip_file_base_url + self.script_name + "?SKIP_HASHI=true"
)
self.assertEqual(next(response.streaming_content).decode(), self.script_str)
def test_request_for_html_doctype_return_with_doctype(self, filename_patch):
response = self.client.get(self.zip_file_base_url + self.doctype_name)
content = response.content.decode("utf-8")
self.assertEqual(
content[:92].lower().replace(" ", " "), self.doctype.strip().lower()
)
def test_request_for_html5_doctype_return_with_doctype(self, filename_patch):
response = self.client.get(self.zip_file_base_url + self.html5_doctype_name)
content = response.content.decode("utf-8")
self.assertEqual(content[:15].lower(), self.html5_doctype.strip().lower())
def test_request_for_html_body_script_return_correct_length_header(
self, filename_patch
):
response = self.client.get(self.zip_file_base_url + self.script_name)
file_size = len(
'<html><head><template hashi-script="true"><script>test</script></template><script src="/static/content/hashi123.js"></script></head>'
+ "<body></body></html>"
)
self.assertEqual(int(response["Content-Length"]), file_size)
def test_request_for_html_body_async_script_return_hashi_modified_html(
self, filename_patch
):
response = self.client.get(self.zip_file_base_url + self.async_script_name)
soup = BeautifulSoup(response.content, "html.parser")
template = soup.find("template")
self.assertEqual(template.attrs["async"], "true")
def test_request_for_html_empty_html_no_modification(self, filename_patch):
response = self.client.get(self.zip_file_base_url + self.empty_html_name)
self.assertEqual(response.content.decode("utf-8"), empty_content)
def test_not_modified_response_when_if_modified_since_header_set_index_file(
self, filename_patch
):
caching_client = Client(HTTP_IF_MODIFIED_SINCE="Sat, 10-Sep-2016 19:14:07 GMT")
response = caching_client.get(self.zip_file_base_url)
self.assertEqual(response.status_code, 304)
def test_not_modified_response_when_if_modified_since_header_set_other_html_file(
self, filename_patch
):
caching_client = Client(HTTP_IF_MODIFIED_SINCE="Sat, 10-Sep-2016 19:14:07 GMT")
response = caching_client.get(self.zip_file_base_url + self.other_name)
self.assertEqual(response.status_code, 304)
| mrpau/kolibri | kolibri/core/content/test/test_zipcontent.py | Python | mit | 12,256 | 0.003264 |
import os, gzip, argparse
parser = argparse.ArgumentParser(description='Extract transcript records')
parser.add_argument("gff3", help="input gff3 file")
parser.add_argument("-F", dest="Feature", nargs='+', required=True,
help="transcript type(s) and attribution(s), " +
"E.g \"-F mRNA ncRNA:ncrna_class=lncRNA\"")
parser.add_argument("-A", dest="Attri", nargs='+', default=None,
help="output attribution(s) of selected transcript(s)")
parser.add_argument("-a", dest="attri", nargs='+', default=None,
help="output attribution(s) of exon")
if len(os.sys.argv) == 1: parser.print_help(); os.sys.exit(0)
args = parser.parse_args()
MFeature = {}
for i in args.Feature:
ii = i.split(":")
try:
MFeature[ ii[0] ] = ii[1].split(",")
except:
MFeature[ ii[0] ] = None
def extract ( Fx9 ):
global k; f9 = Fx9[8].split(';'); t = Fx9[2]
if t in MFeature:
k = 1
if MFeature[t] != None and not any(x in MFeature[t] for x in f9):
k=0
if t != 'exon' and t not in MFeature: k = 0
if k != 1: return 0
atr = args.Attri if t in MFeature else args.attri
F9 = ""
if atr != None:
for i in f9:
if i.split(sep='=', maxsplit=1)[0] in atr: F9 += i + ';'
Fx9[8] = F9.strip(';')
print('\t'.join(Fx9))
if args.gff3.endswith('.gz'):
f = gzip.open(args.gff3, 'r')
for line in f:
line = line.decode("utf8").rstrip()
if not line.startswith("#"): extract( line.split('\t') )
else:
f = open(args.gff3, 'r')
for line in f:
line = line.rstrip()
if not line.startswith("#"): extract( line.split('\t') )
f.close()
| d2jvkpn/bioinformatics_tools | bioparser/gff3extract_v0.9.py | Python | gpl-3.0 | 1,658 | 0.019903 |
from model.group import Group
import random
import string
import os.path
import jsonpickle
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of groups", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 3
f = "data/groups.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + string.punctuation + " "*10
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
testdata = [Group(name="", header="", footer="")] + [
Group(name=random_string("name", 10), header=random_string("header", 20), footer=random_string("footer", 20))
for i in range(n)
]
file = config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as out:
# out.write(json.dumps(testdata, default=lambda x: x.__dict__, indent=2))
jsonpickle.set_encoder_options("json", indent = 2)
out.write(jsonpickle.encode(testdata))
# "dump" changes data sturcture into a json string
# __dict__ keeps the same properties that are assigned to the __init__ method in model.group | kasiazubielik/python_training | generator/group.py | Python | apache-2.0 | 1,257 | 0.009547 |
#!/usr/bin/env python3
from subprocess import getoutput
import threading
import time
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk as gtk
from gi.repository import GLib
class CheckConnection(threading.Thread):
def __init__(self, pppoedi):
super(CheckConnection,self).__init__()
self.pppoedi = pppoedi
def run(self):
super(CheckConnection,self).run()
self.pppoedi.settings.active_status = False
#fsyslog = open('/var/log/syslog','r')
self.pppoedi.pppoedi_bus_interface.OpenSyslog()
while not self.pppoedi.settings.quit_pppoedi:
#ppp_status=fsyslog.read()
ppp_status=self.pppoedi.pppoedi_bus_interface.ReadSyslog()
if self.pppoedi.settings.connect_active:
if ppp_status != '':
if 'PAP authentication succeeded' in ppp_status and not self.pppoedi.settings.active_status:
self.pppoedi.settings.active_status = True
self.pppoedi.button_conn_disconn.set_label("Desconectar")
self.pppoedi.button_conn_disconn.set_sensitive(True)
elif 'PAP authentication failed' in ppp_status:
self.pppoedi.settings.active_status = False
self.pppoedi.disconnect()
GLib.idle_add(self.pppoedi.showAlertMsg,'Falha na autenticação.', gtk.MessageType.ERROR)
elif 'Unable to complete PPPoE Discovery' in ppp_status:
self.pppoedi.settings.active_status = False
self.pppoedi.disconnect()
GLib.idle_add(self.pppoedi.showAlertMsg,'Não foi possível completar o PPPoE Discovery.', gtk.MessageType.ERROR)
elif 'Connection terminated.' in ppp_status:
self.pppoedi.settings.active_status = False
self.pppoedi.disconnect()
GLib.idle_add(self.pppoedi.showAlertMsg,'A conexão foi terminada.', gtk.MessageType.ERROR)
time.sleep(0.5)
#fsyslog.close()
| LAR-UFES/pppoe-plugin | pppoediplugin/CheckConnection.py | Python | gpl-3.0 | 2,146 | 0.00794 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sequence-to-Sequence with attention model for text summarization.
"""
from collections import namedtuple
import numpy as np
import seq2seq_lib
from six.moves import xrange
import tensorflow as tf
import embedding as emb
HParams = namedtuple('HParams',
'vocab_path, mode, min_lr, lr, batch_size, '
'enc_layers, enc_timesteps, dec_timesteps, '
'min_input_len, num_hidden, emb_dim, max_grad_norm, '
'num_softmax_samples')
def _extract_argmax_and_embed(embedding, output_projection=None,
update_embedding=True):
"""Get a loop_function that extracts the previous symbol and embeds it.
Args:
embedding: embedding tensor for symbols.
output_projection: None or a pair (W, B). If provided, each fed previous
output will first be multiplied by W and added B.
update_embedding: Boolean; if False, the gradients will not propagate
through the embeddings.
Returns:
A loop function.
"""
def loop_function(prev, _):
"""function that feed previous model output rather than ground truth."""
if output_projection is not None:
prev = tf.nn.xw_plus_b(
prev, output_projection[0], output_projection[1])
prev_symbol = tf.argmax(prev, 1)
# Note that gradients will not propagate through the second parameter of
# embedding_lookup.
emb_prev = tf.nn.embedding_lookup(embedding, prev_symbol)
if not update_embedding:
emb_prev = tf.stop_gradient(emb_prev)
return emb_prev
return loop_function
class Seq2SeqAttentionModel(object):
"""Wrapper for Tensorflow model graph for text sum vectors."""
def __init__(self, hps, vocab, num_gpus=0):
self._hps = hps
self._vocab = vocab
self._num_gpus = num_gpus
self._cur_gpu = 0
def run_train_step(self, sess, article_batch, abstract_batch, targets,
article_lens, abstract_lens, loss_weights):
to_return = [self._train_op, self._summaries, self._loss, self.global_step]
return sess.run(to_return,
feed_dict={self._articles: article_batch,
self._abstracts: abstract_batch,
self._targets: targets,
self._article_lens: article_lens,
self._abstract_lens: abstract_lens,
self._loss_weights: loss_weights})
def run_eval_step(self, sess, article_batch, abstract_batch, targets,
article_lens, abstract_lens, loss_weights):
to_return = [self._summaries, self._loss, self.global_step]
return sess.run(to_return,
feed_dict={self._articles: article_batch,
self._abstracts: abstract_batch,
self._targets: targets,
self._article_lens: article_lens,
self._abstract_lens: abstract_lens,
self._loss_weights: loss_weights})
def run_decode_step(self, sess, article_batch, abstract_batch, targets,
article_lens, abstract_lens, loss_weights):
to_return = [self._outputs, self.global_step]
return sess.run(to_return,
feed_dict={self._articles: article_batch,
self._abstracts: abstract_batch,
self._targets: targets,
self._article_lens: article_lens,
self._abstract_lens: abstract_lens,
self._loss_weights: loss_weights})
def _next_device(self):
"""Round robin the gpu device. (Reserve last gpu for expensive op)."""
if self._num_gpus == 0:
return ''
dev = '/gpu:%d' % self._cur_gpu
if self._num_gpus > 1:
self._cur_gpu = (self._cur_gpu + 1) % (self._num_gpus-1)
return dev
def _get_gpu(self, gpu_id):
if self._num_gpus <= 0 or gpu_id >= self._num_gpus:
return ''
return '/gpu:%d' % gpu_id
def _add_placeholders(self):
"""Inputs to be fed to the graph."""
hps = self._hps
self._articles = tf.placeholder(tf.int32,
[hps.batch_size, hps.enc_timesteps],
name='articles')
self._abstracts = tf.placeholder(tf.int32,
[hps.batch_size, hps.dec_timesteps],
name='abstracts')
self._targets = tf.placeholder(tf.int32,
[hps.batch_size, hps.dec_timesteps],
name='targets')
self._article_lens = tf.placeholder(tf.int32, [hps.batch_size],
name='article_lens')
self._abstract_lens = tf.placeholder(tf.int32, [hps.batch_size],
name='abstract_lens')
self._loss_weights = tf.placeholder(tf.float32,
[hps.batch_size, hps.dec_timesteps],
name='loss_weights')
def _add_seq2seq(self):
hps = self._hps
vsize = self._vocab.NumIds()
with tf.variable_scope('seq2seq'):
encoder_inputs = tf.unstack(tf.transpose(self._articles))
decoder_inputs = tf.unstack(tf.transpose(self._abstracts))
targets = tf.unstack(tf.transpose(self._targets))
loss_weights = tf.unstack(tf.transpose(self._loss_weights))
article_lens = self._article_lens
# Embedding shared by the input and outputs.
with tf.variable_scope('embedding'), tf.device('/cpu:0'):
# embedding = tf.get_variable(
# 'embedding', [vsize, hps.emb_dim], dtype=tf.float32,
# initializer=tf.truncated_normal_initializer(stddev=1e-4))
word2vec = emb.embeded(hps)
embedding = word2vec.get_embedding()
emb_encoder_inputs = [tf.nn.embedding_lookup(embedding, x)
for x in encoder_inputs]
emb_decoder_inputs = [tf.nn.embedding_lookup(embedding, x)
for x in decoder_inputs]
for layer_i in xrange(hps.enc_layers):
with tf.variable_scope('encoder%d'%layer_i), tf.device(
self._next_device()):
cell_fw = tf.contrib.rnn.LSTMCell(
hps.num_hidden,
initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=123),
state_is_tuple=False)
cell_bw = tf.contrib.rnn.LSTMCell(
hps.num_hidden,
initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=113),
state_is_tuple=False)
(emb_encoder_inputs, fw_state, _) = tf.contrib.rnn.static_bidirectional_rnn(
cell_fw, cell_bw, emb_encoder_inputs, dtype=tf.float32,
sequence_length=article_lens)
encoder_outputs = emb_encoder_inputs
with tf.variable_scope('output_projection'):
w = tf.get_variable(
'w', [hps.num_hidden, vsize], dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=1e-4))
w_t = tf.transpose(w)
v = tf.get_variable(
'v', [vsize], dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=1e-4))
with tf.variable_scope('decoder'), tf.device(self._next_device()):
# When decoding, use model output from the previous step
# for the next step.
loop_function = None
if hps.mode == 'decode':
loop_function = _extract_argmax_and_embed(
embedding, (w, v), update_embedding=False)
cell = tf.contrib.rnn.LSTMCell(
hps.num_hidden,
initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=113),
state_is_tuple=False)
encoder_outputs = [tf.reshape(x, [hps.batch_size, 1, 2*hps.num_hidden])
for x in encoder_outputs]
self._enc_top_states = tf.concat(axis=1, values=encoder_outputs)
self._dec_in_state = fw_state
# During decoding, follow up _dec_in_state are fed from beam_search.
# dec_out_state are stored by beam_search for next step feeding.
initial_state_attention = (hps.mode == 'decode')
decoder_outputs, self._dec_out_state = tf.contrib.legacy_seq2seq.attention_decoder(
emb_decoder_inputs, self._dec_in_state, self._enc_top_states,
cell, num_heads=1, loop_function=loop_function,
initial_state_attention=initial_state_attention)
with tf.variable_scope('output'), tf.device(self._next_device()):
model_outputs = []
for i in xrange(len(decoder_outputs)):
if i > 0:
tf.get_variable_scope().reuse_variables()
model_outputs.append(
tf.nn.xw_plus_b(decoder_outputs[i], w, v))
if hps.mode == 'decode':
with tf.variable_scope('decode_output'), tf.device('/gpu:0'):
best_outputs = [tf.argmax(x, 1) for x in model_outputs]
tf.logging.info('best_outputs%s', best_outputs[0].get_shape())
self._outputs = tf.concat(
axis=1, values=[tf.reshape(x, [hps.batch_size, 1]) for x in best_outputs])
self._topk_log_probs, self._topk_ids = tf.nn.top_k(
tf.log(tf.nn.softmax(model_outputs[-1])), hps.batch_size*2)
with tf.variable_scope('loss'), tf.device(self._next_device()):
def sampled_loss_func(inputs, labels):
with tf.device('/gpu:0'): # Try gpu.
labels = tf.reshape(labels, [-1, 1])
return tf.nn.sampled_softmax_loss(
weights=w_t, biases=v, labels=labels, inputs=inputs,
num_sampled=hps.num_softmax_samples, num_classes=vsize, partition_strategy="div")
if hps.num_softmax_samples != 0 and hps.mode == 'train':
self._loss = seq2seq_lib.sampled_sequence_loss(
decoder_outputs, targets, loss_weights, sampled_loss_func) # output [batch*numsteps, vocab_size], target [batch_size, num_steps], weight
else:
self._loss = tf.contrib.legacy_seq2seq.sequence_loss(
model_outputs, targets, loss_weights)
tf.summary.scalar('loss', tf.minimum(12.0, self._loss))
def _add_train_op(self):
"""Sets self._train_op, op to run for training."""
hps = self._hps
self._lr_rate = tf.maximum(
hps.min_lr, # min_lr_rate.
tf.train.exponential_decay(hps.lr, self.global_step, self.global_step/10, 0.5))
tvars = tf.trainable_variables()
with tf.device(self._get_gpu(self._num_gpus-1)):
grads, global_norm = tf.clip_by_global_norm(
tf.gradients(self._loss, tvars), hps.max_grad_norm)
tf.summary.scalar('global_norm', global_norm)
# optimizer = tf.train.GradientDescentOptimizer(self._lr_rate)
# #Try tf.train.AdadeltaOptimizer
optimizer = tf.train.AdadeltaOptimizer(self._lr_rate)
tf.summary.scalar('learning rate', self._lr_rate)
self._train_op = optimizer.apply_gradients(
zip(grads, tvars), global_step=self.global_step, name='train_step')
def encode_top_state(self, sess, enc_inputs, enc_len):
"""Return the top states from encoder for decoder.
Args:
sess: tensorflow session.
enc_inputs: encoder inputs of shape [batch_size, enc_timesteps].
enc_len: encoder input length of shape [batch_size]
Returns:
enc_top_states: The top level encoder states.
dec_in_state: The decoder layer initial state.
"""
results = sess.run([self._enc_top_states, self._dec_in_state],
feed_dict={self._articles: enc_inputs,
self._article_lens: enc_len})
return results[0], results[1][0]
def decode_topk(self, sess, latest_tokens, enc_top_states, dec_init_states):
"""Return the topK results and new decoder states."""
feed = {
self._enc_top_states: enc_top_states,
self._dec_in_state:
np.squeeze(np.array(dec_init_states)),
self._abstracts:
np.transpose(np.array([latest_tokens])),
self._abstract_lens: np.ones([len(dec_init_states)], np.int32)}
results = sess.run(
[self._topk_ids, self._topk_log_probs, self._dec_out_state],
feed_dict=feed)
ids, probs, states = results[0], results[1], results[2]
new_states = [s for s in states]
return ids, probs, new_states
def build_graph(self):
self._add_placeholders()
self._add_seq2seq()
self.global_step = tf.Variable(0, name='global_step', trainable=False)
if self._hps.mode == 'train':
self._add_train_op()
self._summaries = tf.summary.merge_all()
| xuleiboy1234/autoTitle | textsum/seq2seq_attention_model.py | Python | mit | 13,425 | 0.00432 |
#!/home/executor/Ice/python2.4/bin/python
# Copyright (C) 2012 CEDIANT <info@cediant.es>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License v2
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import os, time
import sys, traceback, Ice, IceGrid
import threading
import random
SLICE_CONTRACT = os.environ['HOME'] + "/DISTRIB/HPCServer/execservant.ice"
#CONFIG_FILE = os.environ['HOME'] + "/config"
CONFIG_FILE = "client.cfg"
DEFAULT_PRIO = "5"
BASE_VERSION = "364"
ALTERNATE_VERSION = "364"
import amebajobs
Ice.loadSlice(SLICE_CONTRACT)
import HPCServer
def random_prio ( max_prio=10 ) :
return int( max_prio * random.random() )
def step_prio ( max_prio=10 ) :
priolist = range(max_prio,0,-1)
top = 0
for i in priolist :
top += i
prio = int( top * random.random() )
for i in priolist :
limit += i
if prio < limit :
break
return i - 1
import math
def stats ( values ) :
avg , err = 0.0 , 0.0
for v in values :
avg += v
err += v * v
avg /= len(values)
err /= len(values)
err -= avg * avg
return avg , math.sqrt( err )
class AMI_HPCServer_AsyncgExec(Ice.Object):
def __init__(self,name,application):
self.name = name
self.application = application
def ice_response(self, result):
self.application.cond.acquire()
try:
# print "Terminada operacion con resultado [%s]" % result
if self.application.detailfile : self.application.detailfile.write( "%s %s\n" % ( result , time.time() ) )
self.application.jobs-=1
if self.application.jobs==0:
self.application.cond.notify()
finally:
self.application.cond.release()
print result
def ice_exception(self,ex):
self.application.cond.acquire()
try:
self.application.jobs-=1
if self.application.jobs==0:
self.application.cond.notify()
print "excepcion --- %s" % ex
finally:
self.application.cond.release()
class ExecClientApp(Ice.Application):
def __init__(self):
self.jobs = 0
self.cond = threading.Condition()
self.detailfile = None
def launchOperation(self,input_string,prio):
try:
ic = self.communicator()
base = ic.stringToProxy("HPCServerServant")
#base = ic.stringToProxy("HPCServerServant:default -p 10000")
e_servant = HPCServer.ExecServantPrx.checkedCast(base)
except Ice.NotRegisteredException:
print "%s : couldn't find a `::HPCServer::HPCServerServant' object." % self.appName()
return False
try:
ctx={}
if prio == "random" :
ctx["prio"] = "%s" % random_prio( 10 )
else :
ctx["prio"] = prio
if ameba_percent :
if ameba_percent < int(100*random.random()) :
ctx["ameba_version"] = ALTERNATE_VERSION
else :
ctx["ameba_version"] = BASE_VERSION
elif ameba_range :
ctx["ameba_version"] = "%s" % int( 10 + ameba_range * random.random() )
else :
ctx["ameba_version"] = BASE_VERSION
#ctx["testing"] = "No"
ctx["submittime"] = "%s" % time.time()
ctx["url"] = "http://01cnbtlgridp:21000/deploy/"
ctx["errormail"] = "gfernandez@cediant.es"
ctx["smtp1"] = "01cnbtlgridp:25"
## AMI + AMD
#print "lanzada operacion"
self.cond.acquire()
try:
callback = AMI_HPCServer_AsyncgExec(input_string,self)
e_servant.gExec_async(callback,input_string,ctx) # Asynchronous call
self.jobs+=1
finally:
self.cond.release()
except:
traceback.print_exc()
return False
return True
def constructInput(self,prio,clientid):
operationid = int( 10000000 * random.random() )
for i in range( pack * ncores ) :
input = amebajobs.construct_job((clientid,operationid),i)
if not self.launchOperation(input,prio) :
print "Job was not submitted"
return False
self.cond.acquire()
try:
while self.jobs:
self.cond.wait()
finally:
self.cond.release()
return True
def evaluateFile(self,file_path,prio):
if not os.path.exists(file_path):
return False
try:
f = open(file_path,'r')
request_list = f.read().split("{{")[1:]
f.close()
except:
print "No se pudo leer el fichero %s" % file_path
#send operations to ameba file
for i in range(len(request_list)):
# print "Lanzando operacion %d" % i
if not self.launchOperation("{{"+request_list[i],prio):
return False
#wait for jobs termination.(AMI+AMD Issue)
self.cond.acquire()
try:
while self.jobs:
self.cond.wait()
finally:
self.cond.release()
return True
def run(self,args):
service_name = "HPCServerServant"
ic = None
ic = self.communicator()
# This is probably a bug somewhere, but is required to handle connection loss
ic.getProperties().setProperty( "Ice.ThreadPool.Client.SizeMax" , "20" )
# Launch file section
#---------------------------------------
file_name = None
if len(sys.argv) > 1 :
aux_name = sys.argv[1]
if os.path.exists(aux_name):
file_name=aux_name
if len(sys.argv) > 2 :
prio = sys.argv[2]
else:
prio = DEFAULT_PRIO
init_time = time.time()
subtotals = []
summary = open( outfile + ".out" , 'w' )
summary.write( "# args : %s\n" % sys.argv )
summary.write( "# cores : %s\n" % ncores )
if file_name is None :
summary.write( "# pack : %s\n" % pack )
else :
summary.write( "# input_file : %s\n" % file_name )
for i in range( npacks ) :
self.detailfile = open( os.path.join( outfile , "%s.out" % i ) , 'w' )
starttime = time.time()
if file_name is None :
clientid = int( 1000000 * random.random() )
self.constructInput(prio,clientid)
else :
self.evaluateFile(file_name,prio)
subtotal = time.time() - starttime
subtotals.append( subtotal )
summary.write( "%s %s %s %s\n" % ( i , subtotal , ameba_percent , ameba_range ) )
self.detailfile.close()
self.detailfile = None
summary.close()
if ic :
try:
ic.destroy()
except:
traceback.print_exc()
avg , err = stats( subtotals )
print "%s %s %s %s %s %s" % ( header , time.time() - init_time , avg , err , ameba_percent , ameba_range )
return True
header = "AVERAGE"
outfile = "output"
ncores = 8
pack = 30
npacks = 10
ameba_percent = False
ameba_range = False
if (__name__ == "__main__"):
if len(sys.argv) > 1 and sys.argv[1] == "--pack" :
sys.argv.pop(1)
pack = int(sys.argv.pop(1))
if len(sys.argv) > 1 and sys.argv[1] == "--npacks" :
sys.argv.pop(1)
npacks = int(sys.argv.pop(1))
if len(sys.argv) > 1 and sys.argv[1] == "--ameba-percent" :
sys.argv.pop(1)
ameba_percent = int(sys.argv.pop(1))
if len(sys.argv) > 1 and sys.argv[1] == "--ameba-range" :
sys.argv.pop(1)
ameba_range = int(sys.argv.pop(1))
if ameba_percent :
ameba_range = False
if len(sys.argv) > 1 and sys.argv[1] == "--suffix" :
sys.argv.pop(1)
header = sys.argv.pop(1)
outfile = os.path.join( outfile , header )
os.makedirs( outfile )
app = ExecClientApp()
ret = app.main(sys.argv,CONFIG_FILE)
sys.exit(ret)
| cediant/hpcservergridscheduler | CLIENT/client.py | Python | gpl-2.0 | 7,365 | 0.057298 |
"""Define tests for systems."""
| w1ll1am23/simplisafe-python | tests/system/__init__.py | Python | mit | 32 | 0 |
__author__ = 'Cedric Da Costa Faro'
from flask import render_template
from . import main
@main.app_errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@main.app_errorhandler(405)
def method_not_allowed(e):
return render_template('405.html'), 405
@main.app_errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
| cdcf/time_tracker | app/main/errors.py | Python | bsd-3-clause | 392 | 0 |
import os
import socket
import atexit
import re
from setuptools.extern.six.moves import urllib, http_client, map
import pkg_resources
from pkg_resources import ResolutionError, ExtractionError
try:
import ssl
except ImportError:
ssl = None
__all__ = [
'VerifyingHTTPSHandler', 'find_ca_bundle', 'is_available', 'cert_paths',
'opener_for'
]
cert_paths = """
/etc/pki/tls/certs/ca-bundle.crt
/etc/ssl/certs/ca-certificates.crt
/usr/share/ssl/certs/ca-bundle.crt
/usr/local/share/certs/ca-root.crt
/etc/ssl/cert.pem
/System/Library/OpenSSL/certs/cert.pem
/usr/local/share/certs/ca-root-nss.crt
/etc/ssl/ca-bundle.pem
""".strip().split()
try:
HTTPSHandler = urllib.request.HTTPSHandler
HTTPSConnection = http_client.HTTPSConnection
except AttributeError:
HTTPSHandler = HTTPSConnection = object
is_available = ssl is not None and object not in (HTTPSHandler, HTTPSConnection)
try:
from ssl import CertificateError, match_hostname
except ImportError:
try:
from backports.ssl_match_hostname import CertificateError
from backports.ssl_match_hostname import match_hostname
except ImportError:
CertificateError = None
match_hostname = None
if not CertificateError:
class CertificateError(ValueError):
pass
if not match_hostname:
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
class VerifyingHTTPSHandler(HTTPSHandler):
"""Simple verifying handler: no auth, subclasses, timeouts, etc."""
def __init__(self, ca_bundle):
self.ca_bundle = ca_bundle
HTTPSHandler.__init__(self)
def https_open(self, req):
return self.do_open(
lambda host, **kw: VerifyingHTTPSConn(host, self.ca_bundle, **kw), req
)
class VerifyingHTTPSConn(HTTPSConnection):
"""Simple verifying connection: no auth, subclasses, timeouts, etc."""
def __init__(self, host, ca_bundle, **kw):
HTTPSConnection.__init__(self, host, **kw)
self.ca_bundle = ca_bundle
def connect(self):
sock = socket.create_connection(
(self.host, self.port), getattr(self, 'source_address', None)
)
# Handle the socket if a (proxy) tunnel is present
if hasattr(self, '_tunnel') and getattr(self, '_tunnel_host', None):
self.sock = sock
self._tunnel()
# http://bugs.python.org/issue7776: Python>=3.4.1 and >=2.7.7
# change self.host to mean the proxy server host when tunneling is
# being used. Adapt, since we are interested in the destination
# host for the match_hostname() comparison.
actual_host = self._tunnel_host
else:
actual_host = self.host
self.sock = ssl.wrap_socket(
sock, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca_bundle
)
try:
match_hostname(self.sock.getpeercert(), actual_host)
except CertificateError:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
def opener_for(ca_bundle=None):
"""Get a urlopen() replacement that uses ca_bundle for verification"""
return urllib.request.build_opener(
VerifyingHTTPSHandler(ca_bundle or find_ca_bundle())
).open
_wincerts = None
def get_win_certfile():
global _wincerts
if _wincerts is not None:
return _wincerts.name
try:
from wincertstore import CertFile
except ImportError:
return None
class MyCertFile(CertFile):
def __init__(self, stores=(), certs=()):
CertFile.__init__(self)
for store in stores:
self.addstore(store)
self.addcerts(certs)
atexit.register(self.close)
def close(self):
try:
super(MyCertFile, self).close()
except OSError:
pass
_wincerts = MyCertFile(stores=['CA', 'ROOT'])
return _wincerts.name
def find_ca_bundle():
"""Return an existing CA bundle path, or None"""
if os.name == 'nt':
return get_win_certfile()
else:
for cert_path in cert_paths:
if os.path.isfile(cert_path):
return cert_path
try:
import certifi
return certifi.where()
except (ImportError, ResolutionError, ExtractionError):
return None
| AustinRoy7/Pomodoro-timer | venv/Lib/site-packages/setuptools/ssl_support.py | Python | mit | 8,130 | 0.001107 |
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import os.path
import unique
import itertools
dirfile = unique
############ File Import Functions #############
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
return dir_list
def returnDirectories(sub_dir):
dir_list = unique.returnDirectories(sub_dir)
return dir_list
class GrabFiles:
def setdirectory(self,value): self.data = value
def display(self): print self.data
def searchdirectory(self,search_term):
#self is an instance while self.data is the value of the instance
files = getDirectoryFiles(self.data,search_term)
if len(files)<1: print 'files not found'
return files
def returndirectory(self):
dir_list = getAllDirectoryFiles(self.data)
return dir_list
def getAllDirectoryFiles(import_dir):
all_files = []
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
for data in dir_list: #loop through each file in the directory to output results
data_dir = import_dir[1:]+'/'+data
if '.' in data:
all_files.append(data_dir)
return all_files
def getDirectoryFiles(import_dir,search_term):
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
matches=[]
for data in dir_list: #loop through each file in the directory to output results
data_dir = import_dir[1:]+'/'+data
if search_term not in data_dir and '.' in data: matches.append(data_dir)
return matches
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def combineAllLists(files_to_merge,original_filename,includeColumns=False):
headers =[]; all_keys={}; dataset_data={}; files=[]; unique_filenames=[]
count=0
for filename in files_to_merge:
duplicates=[]
count+=1
fn=filepath(filename); x=0; combined_data ={}
if '/' in filename:
file = string.split(filename,'/')[-1][:-4]
else:
file = string.split(filename,'\\')[-1][:-4]
### If two files with the same name being merged
if file in unique_filenames:
file += str(count)
unique_filenames.append(file)
print file
files.append(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:
if data[0]!='#':
x=1
try: t = t[1:]
except Exception: t = ['null']
if includeColumns==False:
for i in t:
headers.append(i+'.'+file)
#headers.append(i)
else:
headers.append(t[includeColumns]+'.'+file)
else: #elif 'FOXP1' in data or 'SLK' in data or 'MBD2' in data:
key = t[0]
if includeColumns==False:
try: values = t[1:]
except Exception: values = ['null']
try:
if original_filename in filename and len(original_filename)>0: key = t[1]; values = t[2:]
except IndexError: print original_filename,filename,t;kill
else:
values = [t[includeColumns]]
#key = string.replace(key,' ','')
if len(key)>0 and key != ' ' and key not in combined_data: ### When the same key is present in the same dataset more than once
try: all_keys[key] += 1
except KeyError: all_keys[key] = 1
if permform_all_pairwise == 'yes':
try: combined_data[key].append(values); duplicates.append(key)
except Exception: combined_data[key] = [values]
else:
combined_data[key] = values
#print duplicates
dataset_data[filename] = combined_data
for i in dataset_data:
print len(dataset_data[i]), i
###Add null values for key's in all_keys not in the list for each individual dataset
combined_file_data = {}
for filename in files:
combined_data = dataset_data[filename]
###Determine the number of unique values for each key for each dataset
null_values = []; i=0
for key in combined_data: number_of_values = len(combined_data[key][0]); break
while i<number_of_values: null_values.append('0'); i+=1
for key in all_keys:
include = 'yes'
if combine_type == 'intersection':
if all_keys[key]>(len(files_to_merge)-1): include = 'yes'
else: include = 'no'
if include == 'yes':
try: values = combined_data[key]
except KeyError:
values = null_values
if permform_all_pairwise == 'yes':
values = [null_values]
if permform_all_pairwise == 'yes':
try:
val_list = combined_file_data[key]
val_list.append(values)
combined_file_data[key] = val_list
except KeyError: combined_file_data[key] = [values]
else:
try:
previous_val = combined_file_data[key]
new_val = previous_val + values
combined_file_data[key] = new_val
except KeyError: combined_file_data[key] = values
original_filename = string.replace(original_filename,'1.', '1.AS-')
export_file = output_dir+'/MergedFiles.txt'
fn=filepath(export_file);data = open(fn,'w')
title = string.join(['uid']+headers,'\t')+'\n'; data.write(title)
for key in combined_file_data:
#if key == 'ENSG00000121067': print key,combined_file_data[key];kill
new_key_data = string.split(key,'-'); new_key = new_key_data[0]
if permform_all_pairwise == 'yes':
results = getAllListCombinations(combined_file_data[key])
for result in results:
merged=[]
for i in result: merged+=i
values = string.join([key]+merged,'\t')+'\n'; data.write(values) ###removed [new_key]+ from string.join
else:
try:
values = string.join([key]+combined_file_data[key],'\t')+'\n'; data.write(values) ###removed [new_key]+ from string.join
except Exception: print combined_file_data[key];sys.exit()
data.close()
print "exported",len(dataset_data),"to",export_file
def customLSDeepCopy(ls):
ls2=[]
for i in ls: ls2.append(i)
return ls2
def getAllListCombinationsLong(a):
ls1 = ['a1','a2','a3']
ls2 = ['b1','b2','b3']
ls3 = ['c1','c2','c3']
ls = ls1,ls2,ls3
list_len_db={}
for ls in a:
list_len_db[len(x)]=[]
print len(list_len_db), list_len_db;sys.exit()
if len(list_len_db)==1 and 1 in list_len_db:
### Just simply combine non-complex data
r=[]
for i in a:
r+=i
else:
#http://code.activestate.com/recipes/496807-list-of-all-combination-from-multiple-lists/
r=[[]]
for x in a:
t = []
for y in x:
for i in r:
t.append(i+[y])
r = t
return r
def combineUniqueAllLists(files_to_merge,original_filename):
headers =[]; all_keys={}; dataset_data={}; files=[]
for filename in files_to_merge:
print filename
fn=filepath(filename); x=0; combined_data ={}; files.append(filename)
if '/' in filename:
file = string.split(filename,'/')[-1][:-4]
else:
file = string.split(filename,'\\')[-1][:-4]
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:
if data[0]!='#':
x=1
try: t = t[1:]
except Exception: t = ['null']
for i in t:
headers.append(i+'.'+file)
if x==0:
if data[0]!='#':
x=1;
headers+=t[1:] ###Occurs for the header line
headers+=['null']
else: #elif 'FOXP1' in data or 'SLK' in data or 'MBD2' in data:
key = t[0]
try: values = t[1:]
except Exception: values = ['null']
try:
if original_filename in filename and len(original_filename)>0: key = t[1]; values = t[2:]
except IndexError: print original_filename,filename,t;kill
#key = string.replace(key,' ','')
combined_data[key] = values
if len(key)>0 and key != ' ':
try: all_keys[key] += 1
except KeyError: all_keys[key] = 1
dataset_data[filename] = combined_data
###Add null values for key's in all_keys not in the list for each individual dataset
combined_file_data = {}
for filename in files:
combined_data = dataset_data[filename]
###Determine the number of unique values for each key for each dataset
null_values = []; i=0
for key in combined_data: number_of_values = len(combined_data[key]); break
while i<number_of_values: null_values.append('0'); i+=1
for key in all_keys:
include = 'yes'
if combine_type == 'intersection':
if all_keys[key]>(len(files_to_merge)-1): include = 'yes'
else: include = 'no'
if include == 'yes':
try: values = combined_data[key]
except KeyError: values = null_values
try:
previous_val = combined_file_data[key]
new_val = previous_val + values
combined_file_data[key] = new_val
except KeyError: combined_file_data[key] = values
original_filename = string.replace(original_filename,'1.', '1.AS-')
export_file = output_dir+'/MergedFiles.txt'
fn=filepath(export_file);data = open(fn,'w')
title = string.join(['uid']+headers,'\t')+'\n'; data.write(title)
for key in combined_file_data:
#if key == 'ENSG00000121067': print key,combined_file_data[key];kill
new_key_data = string.split(key,'-'); new_key = new_key_data[0]
values = string.join([key]+combined_file_data[key],'\t')+'\n'; data.write(values) ###removed [new_key]+ from string.join
data.close()
print "exported",len(dataset_data),"to",export_file
def getAllListCombinations(a):
#http://www.saltycrane.com/blog/2011/11/find-all-combinations-set-lists-itertoolsproduct/
""" Nice code to get all combinations of lists like in the above example, where each element from each list is represented only once """
list_len_db={}
for x in a:
list_len_db[len(x)]=[]
if len(list_len_db)==1 and 1 in list_len_db:
### Just simply combine non-complex data
r=[]
for i in a:
r.append(i[0])
return [r]
else:
return list(itertools.product(*a))
def joinFiles(files_to_merge,CombineType,unique_join,outputDir):
""" Join multiple files into a single output file """
global combine_type
global permform_all_pairwise
global output_dir
output_dir = outputDir
combine_type = string.lower(CombineType)
permform_all_pairwise = 'yes'
print 'combine type:',combine_type
print 'join type:', unique_join
#g = GrabFiles(); g.setdirectory(import_dir)
#files_to_merge = g.searchdirectory('xyz') ###made this a term to excluded
if unique_join:
combineUniqueAllLists(files_to_merge,'')
else:
combineAllLists(files_to_merge,'')
return output_dir+'/MergedFiles.txt'
if __name__ == '__main__':
dirfile = unique
includeColumns=-2
includeColumns = False
output_dir = filepath('output')
combine_type = 'union'
permform_all_pairwise = 'yes'
print "Analysis Mode:"
print "1) Batch Analysis"
print "2) Single Output"
inp = sys.stdin.readline(); inp = inp.strip()
if inp == "1": batch_mode = 'yes'
elif inp == "2": batch_mode = 'no'
print "Combine Lists Using:"
print "1) Grab Union"
print "2) Grab Intersection"
inp = sys.stdin.readline(); inp = inp.strip()
if inp == "1": combine_type = 'union'
elif inp == "2": combine_type = 'intersection'
if batch_mode == 'yes': import_dir = '/batch/general_input'
else: import_dir = '/input'
g = GrabFiles(); g.setdirectory(import_dir)
files_to_merge = g.searchdirectory('xyz') ###made this a term to excluded
if batch_mode == 'yes':
second_import_dir = '/batch/primary_input'
g = GrabFiles(); g.setdirectory(second_import_dir)
files_to_merge2 = g.searchdirectory('xyz') ###made this a term to excluded
for file in files_to_merge2:
temp_files_to_merge = customLSDeepCopy(files_to_merge)
original_filename = string.split(file,'/'); original_filename = original_filename[-1]
temp_files_to_merge.append(file)
if '.' in file:
combineAllLists(temp_files_to_merge,original_filename)
else:
combineAllLists(files_to_merge,'',includeColumns=includeColumns)
print "Finished combining lists. Select return/enter to exit"; inp = sys.stdin.readline()
| nsalomonis/AltAnalyze | import_scripts/mergeFilesUnique.py | Python | apache-2.0 | 14,369 | 0.021992 |
import unittest
from twisted.internet.defer import Deferred
from landscape.lib.testing import (
FSTestCase, TwistedTestCase, ProcessDataBuilder)
from landscape.sysinfo.sysinfo import SysInfoPluginRegistry
from landscape.sysinfo.processes import Processes
class ProcessesTest(FSTestCase, TwistedTestCase, unittest.TestCase):
def setUp(self):
super(ProcessesTest, self).setUp()
self.fake_proc = self.makeDir()
self.processes = Processes(proc_dir=self.fake_proc)
self.sysinfo = SysInfoPluginRegistry()
self.sysinfo.add(self.processes)
self.builder = ProcessDataBuilder(self.fake_proc)
def test_run_returns_succeeded_deferred(self):
result = self.processes.run()
self.assertTrue(isinstance(result, Deferred))
called = []
def callback(result):
called.append(True)
result.addCallback(callback)
self.assertTrue(called)
def test_number_of_processes(self):
"""The number of processes is added as a header."""
for i in range(3):
self.builder.create_data(i, self.builder.RUNNING, uid=0, gid=0,
process_name="foo%d" % (i,))
self.processes.run()
self.assertEqual(self.sysinfo.get_headers(),
[("Processes", "3")])
def test_no_zombies(self):
self.processes.run()
self.assertEqual(self.sysinfo.get_notes(), [])
def test_number_of_zombies(self):
"""The number of zombies is added as a note."""
self.builder.create_data(99, self.builder.ZOMBIE, uid=0, gid=0,
process_name="ZOMBERS")
self.processes.run()
self.assertEqual(self.sysinfo.get_notes(),
["There is 1 zombie process."])
def test_multiple_zombies(self):
"""Stupid English, and its plurality"""
for i in range(2):
self.builder.create_data(i, self.builder.ZOMBIE, uid=0, gid=0,
process_name="ZOMBERS%d" % (i,))
self.processes.run()
self.assertEqual(self.sysinfo.get_notes(),
["There are 2 zombie processes."])
| CanonicalLtd/landscape-client | landscape/sysinfo/tests/test_processes.py | Python | gpl-2.0 | 2,217 | 0 |
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the scantxoutset rpc call."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
from decimal import Decimal
import shutil
import os
def descriptors(out):
return sorted(u['desc'] for u in out['unspents'])
class ScantxoutsetTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(110)
addr_P2SH_SEGWIT = self.nodes[0].getnewaddress("", "p2sh-segwit")
pubk1 = self.nodes[0].getaddressinfo(addr_P2SH_SEGWIT)['pubkey']
addr_LEGACY = self.nodes[0].getnewaddress("", "legacy")
pubk2 = self.nodes[0].getaddressinfo(addr_LEGACY)['pubkey']
addr_BECH32 = self.nodes[0].getnewaddress("", "bech32")
pubk3 = self.nodes[0].getaddressinfo(addr_BECH32)['pubkey']
self.nodes[0].sendtoaddress(addr_P2SH_SEGWIT, 0.001)
self.nodes[0].sendtoaddress(addr_LEGACY, 0.002)
self.nodes[0].sendtoaddress(addr_BECH32, 0.004)
#send to child keys of tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK
self.nodes[0].sendtoaddress("mkHV1C6JLheLoUSSZYk7x3FH5tnx9bu7yc", 0.008) # (m/0'/0'/0')
self.nodes[0].sendtoaddress("mipUSRmJAj2KrjSvsPQtnP8ynUon7FhpCR", 0.016) # (m/0'/0'/1')
self.nodes[0].sendtoaddress("n37dAGe6Mq1HGM9t4b6rFEEsDGq7Fcgfqg", 0.032) # (m/0'/0'/1500')
self.nodes[0].sendtoaddress("mqS9Rpg8nNLAzxFExsgFLCnzHBsoQ3PRM6", 0.064) # (m/0'/0'/0)
self.nodes[0].sendtoaddress("mnTg5gVWr3rbhHaKjJv7EEEc76ZqHgSj4S", 0.128) # (m/0'/0'/1)
self.nodes[0].sendtoaddress("mketCd6B9U9Uee1iCsppDJJBHfvi6U6ukC", 0.256) # (m/0'/0'/1500)
self.nodes[0].sendtoaddress("mj8zFzrbBcdaWXowCQ1oPZ4qioBVzLzAp7", 0.512) # (m/1/1/0')
self.nodes[0].sendtoaddress("mfnKpKQEftniaoE1iXuMMePQU3PUpcNisA", 1.024) # (m/1/1/1')
self.nodes[0].sendtoaddress("mou6cB1kaP1nNJM1sryW6YRwnd4shTbXYQ", 2.048) # (m/1/1/1500')
self.nodes[0].sendtoaddress("mtfUoUax9L4tzXARpw1oTGxWyoogp52KhJ", 4.096) # (m/1/1/0)
self.nodes[0].sendtoaddress("mxp7w7j8S1Aq6L8StS2PqVvtt4HGxXEvdy", 8.192) # (m/1/1/1)
self.nodes[0].sendtoaddress("mpQ8rokAhp1TAtJQR6F6TaUmjAWkAWYYBq", 16.384) # (m/1/1/1500)
self.nodes[0].generate(1)
self.log.info("Stop node, remove wallet, mine again some blocks...")
self.stop_node(0)
shutil.rmtree(os.path.join(self.nodes[0].datadir, "regtest", 'wallets'))
self.start_node(0)
self.nodes[0].generate(110)
scan = self.nodes[0].scantxoutset("start", [])
info = self.nodes[0].gettxoutsetinfo()
assert_equal(scan['success'], True)
assert_equal(scan['height'], info['height'])
assert_equal(scan['txouts'], info['txouts'])
assert_equal(scan['bestblock'], info['bestblock'])
self.restart_node(0, ['-nowallet'])
self.log.info("Test if we have found the non HD unspent outputs.")
assert_equal(self.nodes[0].scantxoutset("start", [ "pkh(" + pubk1 + ")", "pkh(" + pubk2 + ")", "pkh(" + pubk3 + ")"])['total_amount'], Decimal("0.002"))
assert_equal(self.nodes[0].scantxoutset("start", [ "wpkh(" + pubk1 + ")", "wpkh(" + pubk2 + ")", "wpkh(" + pubk3 + ")"])['total_amount'], Decimal("0.004"))
assert_equal(self.nodes[0].scantxoutset("start", [ "sh(wpkh(" + pubk1 + "))", "sh(wpkh(" + pubk2 + "))", "sh(wpkh(" + pubk3 + "))"])['total_amount'], Decimal("0.001"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(" + pubk1 + ")", "combo(" + pubk2 + ")", "combo(" + pubk3 + ")"])['total_amount'], Decimal("0.007"))
assert_equal(self.nodes[0].scantxoutset("start", [ "addr(" + addr_P2SH_SEGWIT + ")", "addr(" + addr_LEGACY + ")", "addr(" + addr_BECH32 + ")"])['total_amount'], Decimal("0.007"))
assert_equal(self.nodes[0].scantxoutset("start", [ "addr(" + addr_P2SH_SEGWIT + ")", "addr(" + addr_LEGACY + ")", "combo(" + pubk3 + ")"])['total_amount'], Decimal("0.007"))
self.log.info("Test range validation.")
assert_raises_rpc_error(-8, "End of range is too high", self.nodes[0].scantxoutset, "start", [ {"desc": "desc", "range": -1}])
assert_raises_rpc_error(-8, "Range should be greater or equal than 0", self.nodes[0].scantxoutset, "start", [ {"desc": "desc", "range": [-1, 10]}])
assert_raises_rpc_error(-8, "End of range is too high", self.nodes[0].scantxoutset, "start", [ {"desc": "desc", "range": [(2 << 31 + 1) - 1000000, (2 << 31 + 1)]}])
assert_raises_rpc_error(-8, "Range specified as [begin,end] must not have begin after end", self.nodes[0].scantxoutset, "start", [ {"desc": "desc", "range": [2, 1]}])
assert_raises_rpc_error(-8, "Range is too large", self.nodes[0].scantxoutset, "start", [ {"desc": "desc", "range": [0, 1000001]}])
self.log.info("Test extended key derivation.")
# Run various scans, and verify that the sum of the amounts of the matches corresponds to the expected subset.
# Note that all amounts in the UTXO set are powers of 2 multiplied by 0.001 BTC, so each amounts uniquely identifies a subset.
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0h/0h)"])['total_amount'], Decimal("0.008"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0'/1h)"])['total_amount'], Decimal("0.016"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0'/1500')"])['total_amount'], Decimal("0.032"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0h/0)"])['total_amount'], Decimal("0.064"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0h/1)"])['total_amount'], Decimal("0.128"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0'/1500)"])['total_amount'], Decimal("0.256"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0h/*h)", "range": 1499}])['total_amount'], Decimal("0.024"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0'/*h)", "range": 1500}])['total_amount'], Decimal("0.056"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0'/*)", "range": 1499}])['total_amount'], Decimal("0.192"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0h/*)", "range": 1500}])['total_amount'], Decimal("0.448"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/0')"])['total_amount'], Decimal("0.512"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/1')"])['total_amount'], Decimal("1.024"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/1500h)"])['total_amount'], Decimal("2.048"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/0)"])['total_amount'], Decimal("4.096"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/1)"])['total_amount'], Decimal("8.192"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/1500)"])['total_amount'], Decimal("16.384"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/0)"])['total_amount'], Decimal("4.096"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo([abcdef88/1/2'/3/4h]tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/1)"])['total_amount'], Decimal("8.192"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/1500)"])['total_amount'], Decimal("16.384"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*')", "range": 1499}])['total_amount'], Decimal("1.536"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*')", "range": 1500}])['total_amount'], Decimal("3.584"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)", "range": 1499}])['total_amount'], Decimal("12.288"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)", "range": 1500}])['total_amount'], Decimal("28.672"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/*)", "range": 1499}])['total_amount'], Decimal("12.288"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/*)", "range": 1500}])['total_amount'], Decimal("28.672"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/*)", "range": [1500,1500]}])['total_amount'], Decimal("16.384"))
# Test the reported descriptors for a few matches
assert_equal(descriptors(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0'/*)", "range": 1499}])), ["pkh([0c5f9a1e/0'/0'/0]026dbd8b2315f296d36e6b6920b1579ca75569464875c7ebe869b536a7d9503c8c)#dzxw429x", "pkh([0c5f9a1e/0'/0'/1]033e6f25d76c00bedb3a8993c7d5739ee806397f0529b1b31dda31ef890f19a60c)#43rvceed"])
assert_equal(descriptors(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/0)"])), ["pkh([0c5f9a1e/1/1/0]03e1c5b6e650966971d7e71ef2674f80222752740fc1dfd63bbbd220d2da9bd0fb)#cxmct4w8"])
assert_equal(descriptors(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/*)", "range": 1500}])), ['pkh([0c5f9a1e/1/1/0]03e1c5b6e650966971d7e71ef2674f80222752740fc1dfd63bbbd220d2da9bd0fb)#cxmct4w8', 'pkh([0c5f9a1e/1/1/1500]03832901c250025da2aebae2bfb38d5c703a57ab66ad477f9c578bfbcd78abca6f)#vchwd07g', 'pkh([0c5f9a1e/1/1/1]030d820fc9e8211c4169be8530efbc632775d8286167afd178caaf1089b77daba7)#z2t3ypsa'])
if __name__ == '__main__':
ScantxoutsetTest().main()
| nikkitan/bitcoin | test/functional/rpc_scantxoutset.py | Python | mit | 13,144 | 0.008673 |
import datetime
import re
import scrapy
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
day_mapping = {'MON': 'Mo','TUE': 'Tu','WED': 'We','THU': 'Th',
'FRI': 'Fr','SAT': 'Sa','SUN': 'Su'}
def convert_24hour(time):
"""
Takes 12 hour time as a string and converts it to 24 hour time.
"""
if len(time[:-2].split(':')) < 2:
hour = time[:-2]
minute = '00'
else:
hour, minute = time[:-2].split(':')
if time[-2:] == 'AM':
time_formatted = hour + ':' + minute
elif time[-2:] == 'PM':
time_formatted = str(int(hour)+ 12) + ':' + minute
if time_formatted in ['24:00','0:00','00:00']:
time_formatted = '23:59'
return time_formatted
class PetSmartSpider(scrapy.Spider):
download_delay = 0.2
name = "petsmart"
allowed_domains = ["petsmart.com"]
start_urls = (
'https://www.petsmart.com/store-locator/all/',
)
def parse(self, response):
state_urls = response.xpath('//li[@class="col-sm-12 col-md-4"]/a/@href').extract()
is_store_details_urls = response.xpath('//a[@class="store-details-link"]/@href').extract()
if not state_urls and is_store_details_urls:
for url in is_store_details_urls:
yield scrapy.Request(response.urljoin(url), callback=self.parse_store)
else:
for url in state_urls:
yield scrapy.Request(response.urljoin(url))
def parse_store(self, response):
ref = re.search(r'.+/?\?(.+)', response.url).group(1)
properties = {
'name': response.xpath('//span[@itemprop="name"]/text()').extract_first().strip(),
'addr_full': response.xpath('//div[@itemprop="streetAddress"]/text()').extract_first(),
'city': response.xpath('//span[@itemprop="addressLocality"][1]/text()').extract_first().title(),
'state': response.xpath('//span[@itemprop="addressLocality"][2]/text()').extract_first(),
'postcode': response.xpath('//span[@itemprop="postalCode"]/text()').extract_first(),
'lat': float(response.xpath('//input[@name="storeLatitudeVal"]/@value').extract_first()),
'lon': float(response.xpath('//input[@name="storeLongitudeVal"]/@value').extract_first()),
'phone': response.xpath('//a[@class="store-contact-info"]/text()').extract_first(),
'ref': ref,
'website': response.url
}
hours = self.parse_hours(response.xpath('//div[@class="store-detail-address"]'))
if hours:
properties['opening_hours'] = hours
yield GeojsonPointItem(**properties)
def parse_hours(self, elements):
opening_hours = OpeningHours()
days = elements.xpath('//span[@itemprop="dayOfWeek"]/text()').extract()
today = (set(day_mapping) - set(days)).pop()
days.remove('TODAY')
days.insert(0,today)
open_hours = elements.xpath('//div[@class="store-hours"]/time[@itemprop="opens"]/@content').extract()
close_hours = elements.xpath('//div[@class="store-hours"]/time[@itemprop="closes"]/@content').extract()
store_hours = dict((z[0],list(z[1:])) for z in zip(days, open_hours, close_hours))
for day, hours in store_hours.items():
if 'CLOSED' in hours:
continue
opening_hours.add_range(day=day_mapping[day],
open_time=convert_24hour(hours[0]),
close_time=convert_24hour(hours[1]))
return opening_hours.as_opening_hours()
| iandees/all-the-places | locations/spiders/petsmart.py | Python | mit | 3,626 | 0.007722 |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['RelativeDifference'] , ['Lag1Trend'] , ['Seasonal_Hour'] , ['ARX'] ); | antoinecarme/pyaf | tests/model_control/detailed/transf_RelativeDifference/model_control_one_enabled_RelativeDifference_Lag1Trend_Seasonal_Hour_ARX.py | Python | bsd-3-clause | 165 | 0.048485 |
import logging
from virttest import libvirt_xml
from virttest import utils_libvirtd
from virttest import utils_misc
from autotest.client.shared import error
def run(test, params, env):
"""
Test capabilities with host numa node topology
"""
libvirtd = utils_libvirtd.Libvirtd()
libvirtd.start()
try:
new_cap = libvirt_xml.CapabilityXML()
if not libvirtd.is_running():
raise error.TestFail("Libvirtd is not running")
topo = new_cap.cells_topology
logging.debug("topo xml is %s", topo.xmltreefile)
cell_list = topo.get_cell()
numa_info = utils_misc.NumaInfo()
for cell_num in range(len(cell_list)):
# check node distances
cell_distance = cell_list[cell_num].sibling
logging.debug("cell %s distance is %s", cell_num, cell_distance)
node_distance = numa_info.distances[cell_num]
for j in range(len(cell_list)):
if cell_distance[j]['value'] != node_distance[j]:
raise error.TestFail("cell distance value not expected.")
# check node cell cpu
cell_xml = cell_list[cell_num]
cpu_list_from_xml = cell_xml.cpu
node_ = numa_info.nodes[cell_num]
cpu_list = node_.cpus
logging.debug("cell %s cpu list is %s", cell_num, cpu_list)
cpu_topo_list = []
for cpu_id in cpu_list:
cpu_dict = node_.get_cpu_topology(cpu_id)
cpu_topo_list.append(cpu_dict)
logging.debug("cpu topology list from capabilities xml is %s",
cpu_list_from_xml)
if cpu_list_from_xml != cpu_topo_list:
raise error.TestFail("cpu list %s from capabilities xml not "
"expected.")
finally:
libvirtd.restart()
| rbian/tp-libvirt | libvirt/tests/src/numa/numa_capabilities.py | Python | gpl-2.0 | 1,889 | 0 |
########################################
# Automatically generated, do not edit.
########################################
from pyvisdk.thirdparty import Enum
HostDiskPartitionInfoPartitionFormat = Enum(
'gpt',
'mbr',
'unknown',
)
| xuru/pyvisdk | pyvisdk/enums/host_disk_partition_info_partition_format.py | Python | mit | 247 | 0 |
# Define a custom User class to work with django-social-auth
from django.db import models
from django.contrib.auth.models import User
class Task(models.Model):
name = models.CharField(max_length=200)
owner = models.ForeignKey(User)
finished = models.BooleanField(default=False)
shared = models.BooleanField(default=False)
class Viewer(models.Model):
name = models.ForeignKey(User)
tasks = models.ForeignKey(Task)
class Friends(models.Model):
created = models.DateTimeField(auto_now_add=True, editable=False)
creator = models.ForeignKey(User, related_name="friendship_creator_set")
friend = models.ForeignKey(User, related_name="friend_set")
class CustomUserManager(models.Manager):
def create_user(self, username, email):
return self.model._default_manager.create(username=username)
class CustomUser(models.Model):
username = models.CharField(max_length=128)
last_login = models.DateTimeField(blank=True, null=True)
objects = CustomUserManager()
def is_authenticated(self):
return True
| kurdd/Oauth | app/models.py | Python | apache-2.0 | 1,109 | 0.012624 |
import nltk
import filemanager
import multiprocessing
import os
import ConfigParser
from assistant import Assistant, Messenger
from nltk.corpus import wordnet
resources_dir = 'resources\\'
login_creds = ConfigParser.SafeConfigParser()
if os.path.isfile(resources_dir + 'login_creds.cfg'):
login_creds.read(resources_dir + 'login_creds.cfg')
else:
print "No logins... creating now"
new_login_creds = open(resources_dir + 'login_creds.cfg', 'w')
login_creds.write(new_login_creds)
new_login_creds.close()
def fb_worker(email, password):
messenger = Messenger(email, password)
messenger.listen()
return
def check_for_word(word, verblist):
if word in verbs:
return True
target = wordnet.synsets(word)
for synonyms in target:
new_list = [str(x) for x in synonyms.lemma_names()]
if any(i in new_list for i in verblist):
return True
return False
if __name__ == '__main__':
use_speech = False
nlp_debug = False
jarvis = Assistant(use_speech)
jarvis.say('I have been fully loaded')
input = ''
while (input != 'Goodbye JARVIS'):
try:
input = jarvis.get_input()
if not input == '':
words = nltk.word_tokenize(input)
tagged = nltk.pos_tag(words)
verbs = []
proper_nouns = []
pronouns = []
has_question_word = False
has_question = False
for word in tagged:
if 'VB' in word[1]:
verbs.append(word[0].lower())
elif word[1] == 'NNP':
proper_nouns.append(word[0].lower())
elif 'PRP' in word[1]:
pronouns.append(word[0].lower())
elif word[1][0] == 'W':
has_question_word = True
has_question = has_question_word and len(pronouns) == 0
if nlp_debug:
print 'Tags: {}'.format(tagged)
print 'Verbs: {}'.format(verbs)
if not has_question:
if check_for_word('open', verbs):
jarvis.say(filemanager.try_open_executable(words, tagged))
elif check_for_word('respond', verbs):
if "facebook" in proper_nouns:
if not login_creds.has_section('Facebook'):
login_creds.add_section('Facebook')
login_creds.set('Facebook', 'email', raw_input('Enter your FB email: '))
login_creds.set('Facebook', 'password', raw_input('Enter your FB password: '))
with open(resources_dir + 'login_creds.cfg', 'wb') as configfile:
login_creds.write(configfile)
fb_process = multiprocessing.Process(target = fb_worker, args = (login_creds.get('Facebook', 'email'), login_creds.get('Facebook', 'password')))
fb_process.daemon = True
fb_process.start()
jarvis.say('Answering your Facebook messages.')
else:
jarvis.respond(input)
else:
if not jarvis.search_wolfram(input):
jarvis.respond(input)
except Exception as e:
print e
try:
fb_process.terminate()
fb_process.join()
except NameError:
pass
break
| omn0mn0m/JARVIS | jarvis/jarvis.py | Python | mit | 3,867 | 0.006465 |
import _testcapi
import codecs
import io
import locale
import sys
import unittest
import warnings
from test import support
if sys.platform == 'win32':
VISTA_OR_LATER = (sys.getwindowsversion().major >= 6)
else:
VISTA_OR_LATER = False
try:
import ctypes
except ImportError:
ctypes = None
SIZEOF_WCHAR_T = -1
else:
SIZEOF_WCHAR_T = ctypes.sizeof(ctypes.c_wchar)
def coding_checker(self, coder):
def check(input, expect):
self.assertEqual(coder(input), (expect, len(input)))
return check
class Queue(object):
"""
queue: write bytes at one end, read bytes from the other end
"""
def __init__(self, buffer):
self._buffer = buffer
def write(self, chars):
self._buffer += chars
def read(self, size=-1):
if size<0:
s = self._buffer
self._buffer = self._buffer[:0] # make empty
return s
else:
s = self._buffer[:size]
self._buffer = self._buffer[size:]
return s
class MixInCheckStateHandling:
def check_state_handling_decode(self, encoding, u, s):
for i in range(len(s)+1):
d = codecs.getincrementaldecoder(encoding)()
part1 = d.decode(s[:i])
state = d.getstate()
self.assertIsInstance(state[1], int)
# Check that the condition stated in the documentation for
# IncrementalDecoder.getstate() holds
if not state[1]:
# reset decoder to the default state without anything buffered
d.setstate((state[0][:0], 0))
# Feeding the previous input may not produce any output
self.assertTrue(not d.decode(state[0]))
# The decoder must return to the same state
self.assertEqual(state, d.getstate())
# Create a new decoder and set it to the state
# we extracted from the old one
d = codecs.getincrementaldecoder(encoding)()
d.setstate(state)
part2 = d.decode(s[i:], True)
self.assertEqual(u, part1+part2)
def check_state_handling_encode(self, encoding, u, s):
for i in range(len(u)+1):
d = codecs.getincrementalencoder(encoding)()
part1 = d.encode(u[:i])
state = d.getstate()
d = codecs.getincrementalencoder(encoding)()
d.setstate(state)
part2 = d.encode(u[i:], True)
self.assertEqual(s, part1+part2)
class ReadTest(MixInCheckStateHandling):
def check_partial(self, input, partialresults):
# get a StreamReader for the encoding and feed the bytestring version
# of input to the reader byte by byte. Read everything available from
# the StreamReader and check that the results equal the appropriate
# entries from partialresults.
q = Queue(b"")
r = codecs.getreader(self.encoding)(q)
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
q.write(bytes([c]))
result += r.read()
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(r.read(), "")
self.assertEqual(r.bytebuffer, b"")
# do the check again, this time using a incremental decoder
d = codecs.getincrementaldecoder(self.encoding)()
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(bytes([c]))
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode(b"", True), "")
self.assertEqual(d.buffer, b"")
# Check whether the reset method works properly
d.reset()
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(bytes([c]))
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode(b"", True), "")
self.assertEqual(d.buffer, b"")
# check iterdecode()
encoded = input.encode(self.encoding)
self.assertEqual(
input,
"".join(codecs.iterdecode([bytes([c]) for c in encoded], self.encoding))
)
def test_readline(self):
def getreader(input):
stream = io.BytesIO(input.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
def readalllines(input, keepends=True, size=None):
reader = getreader(input)
lines = []
while True:
line = reader.readline(size=size, keepends=keepends)
if not line:
break
lines.append(line)
return "|".join(lines)
s = "foo\nbar\r\nbaz\rspam\u2028eggs"
sexpected = "foo\n|bar\r\n|baz\r|spam\u2028|eggs"
sexpectednoends = "foo|bar|baz|spam|eggs"
self.assertEqual(readalllines(s, True), sexpected)
self.assertEqual(readalllines(s, False), sexpectednoends)
self.assertEqual(readalllines(s, True, 10), sexpected)
self.assertEqual(readalllines(s, False, 10), sexpectednoends)
# Test long lines (multiple calls to read() in readline())
vw = []
vwo = []
for (i, lineend) in enumerate("\n \r\n \r \u2028".split()):
vw.append((i*200)*"\3042" + lineend)
vwo.append((i*200)*"\3042")
self.assertEqual(readalllines("".join(vw), True), "".join(vw))
self.assertEqual(readalllines("".join(vw), False),"".join(vwo))
# Test lines where the first read might end with \r, so the
# reader has to look ahead whether this is a lone \r or a \r\n
for size in range(80):
for lineend in "\n \r\n \r \u2028".split():
s = 10*(size*"a" + lineend + "xxx\n")
reader = getreader(s)
for i in range(10):
self.assertEqual(
reader.readline(keepends=True),
size*"a" + lineend,
)
reader = getreader(s)
for i in range(10):
self.assertEqual(
reader.readline(keepends=False),
size*"a",
)
def test_bug1175396(self):
s = [
'<%!--===================================================\r\n',
' BLOG index page: show recent articles,\r\n',
' today\'s articles, or articles of a specific date.\r\n',
'========================================================--%>\r\n',
'<%@inputencoding="ISO-8859-1"%>\r\n',
'<%@pagetemplate=TEMPLATE.y%>\r\n',
'<%@import=import frog.util, frog%>\r\n',
'<%@import=import frog.objects%>\r\n',
'<%@import=from frog.storageerrors import StorageError%>\r\n',
'<%\r\n',
'\r\n',
'import logging\r\n',
'log=logging.getLogger("Snakelets.logger")\r\n',
'\r\n',
'\r\n',
'user=self.SessionCtx.user\r\n',
'storageEngine=self.SessionCtx.storageEngine\r\n',
'\r\n',
'\r\n',
'def readArticlesFromDate(date, count=None):\r\n',
' entryids=storageEngine.listBlogEntries(date)\r\n',
' entryids.reverse() # descending\r\n',
' if count:\r\n',
' entryids=entryids[:count]\r\n',
' try:\r\n',
' return [ frog.objects.BlogEntry.load(storageEngine, date, Id) for Id in entryids ]\r\n',
' except StorageError,x:\r\n',
' log.error("Error loading articles: "+str(x))\r\n',
' self.abort("cannot load articles")\r\n',
'\r\n',
'showdate=None\r\n',
'\r\n',
'arg=self.Request.getArg()\r\n',
'if arg=="today":\r\n',
' #-------------------- TODAY\'S ARTICLES\r\n',
' self.write("<h2>Today\'s articles</h2>")\r\n',
' showdate = frog.util.isodatestr() \r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'elif arg=="active":\r\n',
' #-------------------- ACTIVE ARTICLES redirect\r\n',
' self.Yredirect("active.y")\r\n',
'elif arg=="login":\r\n',
' #-------------------- LOGIN PAGE redirect\r\n',
' self.Yredirect("login.y")\r\n',
'elif arg=="date":\r\n',
' #-------------------- ARTICLES OF A SPECIFIC DATE\r\n',
' showdate = self.Request.getParameter("date")\r\n',
' self.write("<h2>Articles written on %s</h2>"% frog.util.mediumdatestr(showdate))\r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'else:\r\n',
' #-------------------- RECENT ARTICLES\r\n',
' self.write("<h2>Recent articles</h2>")\r\n',
' dates=storageEngine.listBlogEntryDates()\r\n',
' if dates:\r\n',
' entries=[]\r\n',
' SHOWAMOUNT=10\r\n',
' for showdate in dates:\r\n',
' entries.extend( readArticlesFromDate(showdate, SHOWAMOUNT-len(entries)) )\r\n',
' if len(entries)>=SHOWAMOUNT:\r\n',
' break\r\n',
' \r\n',
]
stream = io.BytesIO("".join(s).encode(self.encoding))
reader = codecs.getreader(self.encoding)(stream)
for (i, line) in enumerate(reader):
self.assertEqual(line, s[i])
def test_readlinequeue(self):
q = Queue(b"")
writer = codecs.getwriter(self.encoding)(q)
reader = codecs.getreader(self.encoding)(q)
# No lineends
writer.write("foo\r")
self.assertEqual(reader.readline(keepends=False), "foo")
writer.write("\nbar\r")
self.assertEqual(reader.readline(keepends=False), "")
self.assertEqual(reader.readline(keepends=False), "bar")
writer.write("baz")
self.assertEqual(reader.readline(keepends=False), "baz")
self.assertEqual(reader.readline(keepends=False), "")
# Lineends
writer.write("foo\r")
self.assertEqual(reader.readline(keepends=True), "foo\r")
writer.write("\nbar\r")
self.assertEqual(reader.readline(keepends=True), "\n")
self.assertEqual(reader.readline(keepends=True), "bar\r")
writer.write("baz")
self.assertEqual(reader.readline(keepends=True), "baz")
self.assertEqual(reader.readline(keepends=True), "")
writer.write("foo\r\n")
self.assertEqual(reader.readline(keepends=True), "foo\r\n")
def test_bug1098990_a(self):
s1 = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\r\n"
s2 = "offending line: ladfj askldfj klasdj fskla dfzaskdj fasklfj laskd fjasklfzzzzaa%whereisthis!!!\r\n"
s3 = "next line.\r\n"
s = (s1+s2+s3).encode(self.encoding)
stream = io.BytesIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), "")
def test_bug1098990_b(self):
s1 = "aaaaaaaaaaaaaaaaaaaaaaaa\r\n"
s2 = "bbbbbbbbbbbbbbbbbbbbbbbb\r\n"
s3 = "stillokay:bbbbxx\r\n"
s4 = "broken!!!!badbad\r\n"
s5 = "againokay.\r\n"
s = (s1+s2+s3+s4+s5).encode(self.encoding)
stream = io.BytesIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), s4)
self.assertEqual(reader.readline(), s5)
self.assertEqual(reader.readline(), "")
class UTF32Test(ReadTest, unittest.TestCase):
encoding = "utf-32"
spamle = (b'\xff\xfe\x00\x00'
b's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00'
b's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00')
spambe = (b'\x00\x00\xfe\xff'
b'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m'
b'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m')
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = io.BytesIO()
f = writer(s)
f.write("spam")
f.write("spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = io.BytesIO(d)
f = reader(s)
self.assertEqual(f.read(), "spamspam")
def test_badbom(self):
s = io.BytesIO(4*b"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = io.BytesIO(8*b"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"", # first byte of BOM read
"", # second byte of BOM read
"", # third byte of BOM read
"", # fourth byte of BOM read => byteorder known
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_handlers(self):
self.assertEqual(('\ufffd', 1),
codecs.utf_32_decode(b'\x01', 'replace', True))
self.assertEqual(('', 1),
codecs.utf_32_decode(b'\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_decode,
b"\xff", "strict", True)
def test_decoder_state(self):
self.check_state_handling_decode(self.encoding,
"spamspam", self.spamle)
self.check_state_handling_decode(self.encoding,
"spamspam", self.spambe)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded_le = b'\xff\xfe\x00\x00' + b'\x00\x00\x01\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_decode(encoded_le)[0])
encoded_be = b'\x00\x00\xfe\xff' + b'\x00\x01\x00\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_decode(encoded_be)[0])
class UTF32LETest(ReadTest, unittest.TestCase):
encoding = "utf-32-le"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_simple(self):
self.assertEqual("\U00010203".encode(self.encoding), b"\x03\x02\x01\x00")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_le_decode,
b"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = b'\x00\x00\x01\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_le_decode(encoded)[0])
class UTF32BETest(ReadTest, unittest.TestCase):
encoding = "utf-32-be"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_simple(self):
self.assertEqual("\U00010203".encode(self.encoding), b"\x00\x01\x02\x03")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_be_decode,
b"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = b'\x00\x01\x00\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_be_decode(encoded)[0])
class UTF16Test(ReadTest, unittest.TestCase):
encoding = "utf-16"
spamle = b'\xff\xfes\x00p\x00a\x00m\x00s\x00p\x00a\x00m\x00'
spambe = b'\xfe\xff\x00s\x00p\x00a\x00m\x00s\x00p\x00a\x00m'
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = io.BytesIO()
f = writer(s)
f.write("spam")
f.write("spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = io.BytesIO(d)
f = reader(s)
self.assertEqual(f.read(), "spamspam")
def test_badbom(self):
s = io.BytesIO(b"\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = io.BytesIO(b"\xff\xff\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"", # first byte of BOM read
"", # second byte of BOM read => byteorder known
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_handlers(self):
self.assertEqual(('\ufffd', 1),
codecs.utf_16_decode(b'\x01', 'replace', True))
self.assertEqual(('', 1),
codecs.utf_16_decode(b'\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_decode,
b"\xff", "strict", True)
def test_decoder_state(self):
self.check_state_handling_decode(self.encoding,
"spamspam", self.spamle)
self.check_state_handling_decode(self.encoding,
"spamspam", self.spambe)
def test_bug691291(self):
# Files are always opened in binary mode, even if no binary mode was
# specified. This means that no automatic conversion of '\n' is done
# on reading and writing.
s1 = 'Hello\r\nworld\r\n'
s = s1.encode(self.encoding)
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, 'wb') as fp:
fp.write(s)
with codecs.open(support.TESTFN, 'U', encoding=self.encoding) as reader:
self.assertEqual(reader.read(), s1)
class UTF16LETest(ReadTest, unittest.TestCase):
encoding = "utf-16-le"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_errors(self):
tests = [
(b'\xff', '\ufffd'),
(b'A\x00Z', 'A\ufffd'),
(b'A\x00B\x00C\x00D\x00Z', 'ABCD\ufffd'),
(b'\x00\xd8', '\ufffd'),
(b'\x00\xd8A', '\ufffd'),
(b'\x00\xd8A\x00', '\ufffdA'),
(b'\x00\xdcA\x00', '\ufffdA'),
]
for raw, expected in tests:
self.assertRaises(UnicodeDecodeError, codecs.utf_16_le_decode,
raw, 'strict', True)
self.assertEqual(raw.decode('utf-16le', 'replace'), expected)
def test_nonbmp(self):
self.assertEqual("\U00010203".encode(self.encoding),
b'\x00\xd8\x03\xde')
self.assertEqual(b'\x00\xd8\x03\xde'.decode(self.encoding),
"\U00010203")
class UTF16BETest(ReadTest, unittest.TestCase):
encoding = "utf-16-be"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_errors(self):
tests = [
(b'\xff', '\ufffd'),
(b'\x00A\xff', 'A\ufffd'),
(b'\x00A\x00B\x00C\x00DZ', 'ABCD\ufffd'),
(b'\xd8\x00', '\ufffd'),
(b'\xd8\x00\xdc', '\ufffd'),
(b'\xd8\x00\x00A', '\ufffdA'),
(b'\xdc\x00\x00A', '\ufffdA'),
]
for raw, expected in tests:
self.assertRaises(UnicodeDecodeError, codecs.utf_16_be_decode,
raw, 'strict', True)
self.assertEqual(raw.decode('utf-16be', 'replace'), expected)
def test_nonbmp(self):
self.assertEqual("\U00010203".encode(self.encoding),
b'\xd8\x00\xde\x03')
self.assertEqual(b'\xd8\x00\xde\x03'.decode(self.encoding),
"\U00010203")
class UTF8Test(ReadTest, unittest.TestCase):
encoding = "utf-8"
def test_partial(self):
self.check_partial(
"\x00\xff\u07ff\u0800\uffff\U00010000",
[
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u07ff",
"\x00\xff\u07ff",
"\x00\xff\u07ff",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff\U00010000",
]
)
def test_decoder_state(self):
u = "\x00\x7f\x80\xff\u0100\u07ff\u0800\uffff\U0010ffff"
self.check_state_handling_decode(self.encoding,
u, u.encode(self.encoding))
def test_lone_surrogates(self):
self.assertRaises(UnicodeEncodeError, "\ud800".encode, "utf-8")
self.assertRaises(UnicodeDecodeError, b"\xed\xa0\x80".decode, "utf-8")
self.assertEqual("[\uDC80]".encode("utf-8", "backslashreplace"),
b'[\\udc80]')
self.assertEqual("[\uDC80]".encode("utf-8", "xmlcharrefreplace"),
b'[�]')
self.assertEqual("[\uDC80]".encode("utf-8", "surrogateescape"),
b'[\x80]')
self.assertEqual("[\uDC80]".encode("utf-8", "ignore"),
b'[]')
self.assertEqual("[\uDC80]".encode("utf-8", "replace"),
b'[?]')
def test_surrogatepass_handler(self):
self.assertEqual("abc\ud800def".encode("utf-8", "surrogatepass"),
b"abc\xed\xa0\x80def")
self.assertEqual(b"abc\xed\xa0\x80def".decode("utf-8", "surrogatepass"),
"abc\ud800def")
self.assertEqual("\U00010fff\uD800".encode("utf-8", "surrogatepass"),
b"\xf0\x90\xbf\xbf\xed\xa0\x80")
self.assertEqual(b"\xf0\x90\xbf\xbf\xed\xa0\x80".decode("utf-8", "surrogatepass"),
"\U00010fff\uD800")
self.assertTrue(codecs.lookup_error("surrogatepass"))
with self.assertRaises(UnicodeDecodeError):
b"abc\xed\xa0".decode("utf-8", "surrogatepass")
with self.assertRaises(UnicodeDecodeError):
b"abc\xed\xa0z".decode("utf-8", "surrogatepass")
@unittest.skipUnless(sys.platform == 'win32',
'cp65001 is a Windows-only codec')
class CP65001Test(ReadTest, unittest.TestCase):
encoding = "cp65001"
def test_encode(self):
tests = [
('abc', 'strict', b'abc'),
('\xe9\u20ac', 'strict', b'\xc3\xa9\xe2\x82\xac'),
('\U0010ffff', 'strict', b'\xf4\x8f\xbf\xbf'),
]
if VISTA_OR_LATER:
tests.extend((
('\udc80', 'strict', None),
('\udc80', 'ignore', b''),
('\udc80', 'replace', b'?'),
('\udc80', 'backslashreplace', b'\\udc80'),
('\udc80', 'surrogatepass', b'\xed\xb2\x80'),
))
else:
tests.append(('\udc80', 'strict', b'\xed\xb2\x80'))
for text, errors, expected in tests:
if expected is not None:
try:
encoded = text.encode('cp65001', errors)
except UnicodeEncodeError as err:
self.fail('Unable to encode %a to cp65001 with '
'errors=%r: %s' % (text, errors, err))
self.assertEqual(encoded, expected,
'%a.encode("cp65001", %r)=%a != %a'
% (text, errors, encoded, expected))
else:
self.assertRaises(UnicodeEncodeError,
text.encode, "cp65001", errors)
def test_decode(self):
tests = [
(b'abc', 'strict', 'abc'),
(b'\xc3\xa9\xe2\x82\xac', 'strict', '\xe9\u20ac'),
(b'\xf4\x8f\xbf\xbf', 'strict', '\U0010ffff'),
(b'\xef\xbf\xbd', 'strict', '\ufffd'),
(b'[\xc3\xa9]', 'strict', '[\xe9]'),
# invalid bytes
(b'[\xff]', 'strict', None),
(b'[\xff]', 'ignore', '[]'),
(b'[\xff]', 'replace', '[\ufffd]'),
(b'[\xff]', 'surrogateescape', '[\udcff]'),
]
if VISTA_OR_LATER:
tests.extend((
(b'[\xed\xb2\x80]', 'strict', None),
(b'[\xed\xb2\x80]', 'ignore', '[]'),
(b'[\xed\xb2\x80]', 'replace', '[\ufffd\ufffd\ufffd]'),
))
else:
tests.extend((
(b'[\xed\xb2\x80]', 'strict', '[\udc80]'),
))
for raw, errors, expected in tests:
if expected is not None:
try:
decoded = raw.decode('cp65001', errors)
except UnicodeDecodeError as err:
self.fail('Unable to decode %a from cp65001 with '
'errors=%r: %s' % (raw, errors, err))
self.assertEqual(decoded, expected,
'%a.decode("cp65001", %r)=%a != %a'
% (raw, errors, decoded, expected))
else:
self.assertRaises(UnicodeDecodeError,
raw.decode, 'cp65001', errors)
@unittest.skipUnless(VISTA_OR_LATER, 'require Windows Vista or later')
def test_lone_surrogates(self):
self.assertRaises(UnicodeEncodeError, "\ud800".encode, "cp65001")
self.assertRaises(UnicodeDecodeError, b"\xed\xa0\x80".decode, "cp65001")
self.assertEqual("[\uDC80]".encode("cp65001", "backslashreplace"),
b'[\\udc80]')
self.assertEqual("[\uDC80]".encode("cp65001", "xmlcharrefreplace"),
b'[�]')
self.assertEqual("[\uDC80]".encode("cp65001", "surrogateescape"),
b'[\x80]')
self.assertEqual("[\uDC80]".encode("cp65001", "ignore"),
b'[]')
self.assertEqual("[\uDC80]".encode("cp65001", "replace"),
b'[?]')
@unittest.skipUnless(VISTA_OR_LATER, 'require Windows Vista or later')
def test_surrogatepass_handler(self):
self.assertEqual("abc\ud800def".encode("cp65001", "surrogatepass"),
b"abc\xed\xa0\x80def")
self.assertEqual(b"abc\xed\xa0\x80def".decode("cp65001", "surrogatepass"),
"abc\ud800def")
self.assertEqual("\U00010fff\uD800".encode("cp65001", "surrogatepass"),
b"\xf0\x90\xbf\xbf\xed\xa0\x80")
self.assertEqual(b"\xf0\x90\xbf\xbf\xed\xa0\x80".decode("cp65001", "surrogatepass"),
"\U00010fff\uD800")
self.assertTrue(codecs.lookup_error("surrogatepass"))
class UTF7Test(ReadTest, unittest.TestCase):
encoding = "utf-7"
def test_partial(self):
self.check_partial(
"a+-b",
[
"a",
"a",
"a+",
"a+-",
"a+-b",
]
)
class UTF16ExTest(unittest.TestCase):
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_ex_decode, b"\xff", "strict", 0, True)
def test_bad_args(self):
self.assertRaises(TypeError, codecs.utf_16_ex_decode)
class ReadBufferTest(unittest.TestCase):
def test_array(self):
import array
self.assertEqual(
codecs.readbuffer_encode(array.array("b", b"spam")),
(b"spam", 4)
)
def test_empty(self):
self.assertEqual(codecs.readbuffer_encode(""), (b"", 0))
def test_bad_args(self):
self.assertRaises(TypeError, codecs.readbuffer_encode)
self.assertRaises(TypeError, codecs.readbuffer_encode, 42)
class UTF8SigTest(ReadTest, unittest.TestCase):
encoding = "utf-8-sig"
def test_partial(self):
self.check_partial(
"\ufeff\x00\xff\u07ff\u0800\uffff\U00010000",
[
"",
"",
"", # First BOM has been read and skipped
"",
"",
"\ufeff", # Second BOM has been read and emitted
"\ufeff\x00", # "\x00" read and emitted
"\ufeff\x00", # First byte of encoded "\xff" read
"\ufeff\x00\xff", # Second byte of encoded "\xff" read
"\ufeff\x00\xff", # First byte of encoded "\u07ff" read
"\ufeff\x00\xff\u07ff", # Second byte of encoded "\u07ff" read
"\ufeff\x00\xff\u07ff",
"\ufeff\x00\xff\u07ff",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff\U00010000",
]
)
def test_bug1601501(self):
# SF bug #1601501: check that the codec works with a buffer
self.assertEqual(str(b"\xef\xbb\xbf", "utf-8-sig"), "")
def test_bom(self):
d = codecs.getincrementaldecoder("utf-8-sig")()
s = "spam"
self.assertEqual(d.decode(s.encode("utf-8-sig")), s)
def test_stream_bom(self):
unistring = "ABC\u00A1\u2200XYZ"
bytestring = codecs.BOM_UTF8 + b"ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + list(range(1, 11)) + \
[64, 128, 256, 512, 1024]:
istream = reader(io.BytesIO(bytestring))
ostream = io.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
def test_stream_bare(self):
unistring = "ABC\u00A1\u2200XYZ"
bytestring = b"ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + list(range(1, 11)) + \
[64, 128, 256, 512, 1024]:
istream = reader(io.BytesIO(bytestring))
ostream = io.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
class EscapeDecodeTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.escape_decode(b""), (b"", 0))
def test_raw(self):
decode = codecs.escape_decode
for b in range(256):
b = bytes([b])
if b != b'\\':
self.assertEqual(decode(b + b'0'), (b + b'0', 2))
def test_escape(self):
decode = codecs.escape_decode
check = coding_checker(self, decode)
check(b"[\\\n]", b"[]")
check(br'[\"]', b'["]')
check(br"[\']", b"[']")
check(br"[\\]", br"[\]")
check(br"[\a]", b"[\x07]")
check(br"[\b]", b"[\x08]")
check(br"[\t]", b"[\x09]")
check(br"[\n]", b"[\x0a]")
check(br"[\v]", b"[\x0b]")
check(br"[\f]", b"[\x0c]")
check(br"[\r]", b"[\x0d]")
check(br"[\7]", b"[\x07]")
check(br"[\8]", br"[\8]")
check(br"[\78]", b"[\x078]")
check(br"[\41]", b"[!]")
check(br"[\418]", b"[!8]")
check(br"[\101]", b"[A]")
check(br"[\1010]", b"[A0]")
check(br"[\501]", b"[A]")
check(br"[\x41]", b"[A]")
check(br"[\X41]", br"[\X41]")
check(br"[\x410]", b"[A0]")
for b in range(256):
if b not in b'\n"\'\\abtnvfr01234567x':
b = bytes([b])
check(b'\\' + b, b'\\' + b)
def test_errors(self):
decode = codecs.escape_decode
self.assertRaises(ValueError, decode, br"\x")
self.assertRaises(ValueError, decode, br"[\x]")
self.assertEqual(decode(br"[\x]\x", "ignore"), (b"[]", 6))
self.assertEqual(decode(br"[\x]\x", "replace"), (b"[?]?", 6))
self.assertRaises(ValueError, decode, br"\x0")
self.assertRaises(ValueError, decode, br"[\x0]")
self.assertEqual(decode(br"[\x0]\x0", "ignore"), (b"[]", 8))
self.assertEqual(decode(br"[\x0]\x0", "replace"), (b"[?]?", 8))
class RecodingTest(unittest.TestCase):
def test_recoding(self):
f = io.BytesIO()
f2 = codecs.EncodedFile(f, "unicode_internal", "utf-8")
f2.write("a")
f2.close()
# Python used to crash on this at exit because of a refcount
# bug in _codecsmodule.c
# From RFC 3492
punycode_testcases = [
# A Arabic (Egyptian):
("\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644"
"\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F",
b"egbpdaj6bu4bxfgehfvwxn"),
# B Chinese (simplified):
("\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587",
b"ihqwcrb4cv8a8dqg056pqjye"),
# C Chinese (traditional):
("\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587",
b"ihqwctvzc91f659drss3x8bo0yb"),
# D Czech: Pro<ccaron>prost<ecaron>nemluv<iacute><ccaron>esky
("\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074"
"\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D"
"\u0065\u0073\u006B\u0079",
b"Proprostnemluvesky-uyb24dma41a"),
# E Hebrew:
("\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8"
"\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2"
"\u05D1\u05E8\u05D9\u05EA",
b"4dbcagdahymbxekheh6e0a7fei0b"),
# F Hindi (Devanagari):
("\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D"
"\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939"
"\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947"
"\u0939\u0948\u0902",
b"i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd"),
#(G) Japanese (kanji and hiragana):
("\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092"
"\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B",
b"n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa"),
# (H) Korean (Hangul syllables):
("\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774"
"\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74"
"\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C",
b"989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j"
b"psd879ccm6fea98c"),
# (I) Russian (Cyrillic):
("\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E"
"\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440"
"\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A"
"\u0438",
b"b1abfaaepdrnnbgefbaDotcwatmq2g4l"),
# (J) Spanish: Porqu<eacute>nopuedensimplementehablarenEspa<ntilde>ol
("\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070"
"\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070"
"\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061"
"\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070"
"\u0061\u00F1\u006F\u006C",
b"PorqunopuedensimplementehablarenEspaol-fmd56a"),
# (K) Vietnamese:
# T<adotbelow>isaoh<odotbelow>kh<ocirc>ngth<ecirchookabove>ch\
# <ihookabove>n<oacute>iti<ecircacute>ngVi<ecircdotbelow>t
("\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B"
"\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068"
"\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067"
"\u0056\u0069\u1EC7\u0074",
b"TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g"),
#(L) 3<nen>B<gumi><kinpachi><sensei>
("\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F",
b"3B-ww4c5e180e575a65lsy2b"),
# (M) <amuro><namie>-with-SUPER-MONKEYS
("\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074"
"\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D"
"\u004F\u004E\u004B\u0045\u0059\u0053",
b"-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n"),
# (N) Hello-Another-Way-<sorezore><no><basho>
("\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F"
"\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D"
"\u305D\u308C\u305E\u308C\u306E\u5834\u6240",
b"Hello-Another-Way--fc4qua05auwb3674vfr0b"),
# (O) <hitotsu><yane><no><shita>2
("\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032",
b"2-u9tlzr9756bt3uc0v"),
# (P) Maji<de>Koi<suru>5<byou><mae>
("\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059"
"\u308B\u0035\u79D2\u524D",
b"MajiKoi5-783gue6qz075azm5e"),
# (Q) <pafii>de<runba>
("\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0",
b"de-jg4avhby1noc0d"),
# (R) <sono><supiido><de>
("\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067",
b"d9juau41awczczp"),
# (S) -> $1.00 <-
("\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020"
"\u003C\u002D",
b"-> $1.00 <--")
]
for i in punycode_testcases:
if len(i)!=2:
print(repr(i))
class PunycodeTest(unittest.TestCase):
def test_encode(self):
for uni, puny in punycode_testcases:
# Need to convert both strings to lower case, since
# some of the extended encodings use upper case, but our
# code produces only lower case. Converting just puny to
# lower is also insufficient, since some of the input characters
# are upper case.
self.assertEqual(
str(uni.encode("punycode"), "ascii").lower(),
str(puny, "ascii").lower()
)
def test_decode(self):
for uni, puny in punycode_testcases:
self.assertEqual(uni, puny.decode("punycode"))
puny = puny.decode("ascii").encode("ascii")
self.assertEqual(uni, puny.decode("punycode"))
class UnicodeInternalTest(unittest.TestCase):
@unittest.skipUnless(SIZEOF_WCHAR_T == 4, 'specific to 32-bit wchar_t')
def test_bug1251300(self):
# Decoding with unicode_internal used to not correctly handle "code
# points" above 0x10ffff on UCS-4 builds.
ok = [
(b"\x00\x10\xff\xff", "\U0010ffff"),
(b"\x00\x00\x01\x01", "\U00000101"),
(b"", ""),
]
not_ok = [
b"\x7f\xff\xff\xff",
b"\x80\x00\x00\x00",
b"\x81\x00\x00\x00",
b"\x00",
b"\x00\x00\x00\x00\x00",
]
for internal, uni in ok:
if sys.byteorder == "little":
internal = bytes(reversed(internal))
with support.check_warnings():
self.assertEqual(uni, internal.decode("unicode_internal"))
for internal in not_ok:
if sys.byteorder == "little":
internal = bytes(reversed(internal))
with support.check_warnings(('unicode_internal codec has been '
'deprecated', DeprecationWarning)):
self.assertRaises(UnicodeDecodeError, internal.decode,
"unicode_internal")
if sys.byteorder == "little":
invalid = b"\x00\x00\x11\x00"
else:
invalid = b"\x00\x11\x00\x00"
with support.check_warnings():
self.assertRaises(UnicodeDecodeError,
invalid.decode, "unicode_internal")
with support.check_warnings():
self.assertEqual(invalid.decode("unicode_internal", "replace"),
'\ufffd')
@unittest.skipUnless(SIZEOF_WCHAR_T == 4, 'specific to 32-bit wchar_t')
def test_decode_error_attributes(self):
try:
with support.check_warnings(('unicode_internal codec has been '
'deprecated', DeprecationWarning)):
b"\x00\x00\x00\x00\x00\x11\x11\x00".decode("unicode_internal")
except UnicodeDecodeError as ex:
self.assertEqual("unicode_internal", ex.encoding)
self.assertEqual(b"\x00\x00\x00\x00\x00\x11\x11\x00", ex.object)
self.assertEqual(4, ex.start)
self.assertEqual(8, ex.end)
else:
self.fail()
@unittest.skipUnless(SIZEOF_WCHAR_T == 4, 'specific to 32-bit wchar_t')
def test_decode_callback(self):
codecs.register_error("UnicodeInternalTest", codecs.ignore_errors)
decoder = codecs.getdecoder("unicode_internal")
with support.check_warnings(('unicode_internal codec has been '
'deprecated', DeprecationWarning)):
ab = "ab".encode("unicode_internal").decode()
ignored = decoder(bytes("%s\x22\x22\x22\x22%s" % (ab[:4], ab[4:]),
"ascii"),
"UnicodeInternalTest")
self.assertEqual(("ab", 12), ignored)
def test_encode_length(self):
with support.check_warnings(('unicode_internal codec has been '
'deprecated', DeprecationWarning)):
# Issue 3739
encoder = codecs.getencoder("unicode_internal")
self.assertEqual(encoder("a")[1], 1)
self.assertEqual(encoder("\xe9\u0142")[1], 2)
self.assertEqual(codecs.escape_encode(br'\x00')[1], 4)
# From http://www.gnu.org/software/libidn/draft-josefsson-idn-test-vectors.html
nameprep_tests = [
# 3.1 Map to nothing.
(b'foo\xc2\xad\xcd\x8f\xe1\xa0\x86\xe1\xa0\x8bbar'
b'\xe2\x80\x8b\xe2\x81\xa0baz\xef\xb8\x80\xef\xb8\x88\xef'
b'\xb8\x8f\xef\xbb\xbf',
b'foobarbaz'),
# 3.2 Case folding ASCII U+0043 U+0041 U+0046 U+0045.
(b'CAFE',
b'cafe'),
# 3.3 Case folding 8bit U+00DF (german sharp s).
# The original test case is bogus; it says \xc3\xdf
(b'\xc3\x9f',
b'ss'),
# 3.4 Case folding U+0130 (turkish capital I with dot).
(b'\xc4\xb0',
b'i\xcc\x87'),
# 3.5 Case folding multibyte U+0143 U+037A.
(b'\xc5\x83\xcd\xba',
b'\xc5\x84 \xce\xb9'),
# 3.6 Case folding U+2121 U+33C6 U+1D7BB.
# XXX: skip this as it fails in UCS-2 mode
#('\xe2\x84\xa1\xe3\x8f\x86\xf0\x9d\x9e\xbb',
# 'telc\xe2\x88\x95kg\xcf\x83'),
(None, None),
# 3.7 Normalization of U+006a U+030c U+00A0 U+00AA.
(b'j\xcc\x8c\xc2\xa0\xc2\xaa',
b'\xc7\xb0 a'),
# 3.8 Case folding U+1FB7 and normalization.
(b'\xe1\xbe\xb7',
b'\xe1\xbe\xb6\xce\xb9'),
# 3.9 Self-reverting case folding U+01F0 and normalization.
# The original test case is bogus, it says `\xc7\xf0'
(b'\xc7\xb0',
b'\xc7\xb0'),
# 3.10 Self-reverting case folding U+0390 and normalization.
(b'\xce\x90',
b'\xce\x90'),
# 3.11 Self-reverting case folding U+03B0 and normalization.
(b'\xce\xb0',
b'\xce\xb0'),
# 3.12 Self-reverting case folding U+1E96 and normalization.
(b'\xe1\xba\x96',
b'\xe1\xba\x96'),
# 3.13 Self-reverting case folding U+1F56 and normalization.
(b'\xe1\xbd\x96',
b'\xe1\xbd\x96'),
# 3.14 ASCII space character U+0020.
(b' ',
b' '),
# 3.15 Non-ASCII 8bit space character U+00A0.
(b'\xc2\xa0',
b' '),
# 3.16 Non-ASCII multibyte space character U+1680.
(b'\xe1\x9a\x80',
None),
# 3.17 Non-ASCII multibyte space character U+2000.
(b'\xe2\x80\x80',
b' '),
# 3.18 Zero Width Space U+200b.
(b'\xe2\x80\x8b',
b''),
# 3.19 Non-ASCII multibyte space character U+3000.
(b'\xe3\x80\x80',
b' '),
# 3.20 ASCII control characters U+0010 U+007F.
(b'\x10\x7f',
b'\x10\x7f'),
# 3.21 Non-ASCII 8bit control character U+0085.
(b'\xc2\x85',
None),
# 3.22 Non-ASCII multibyte control character U+180E.
(b'\xe1\xa0\x8e',
None),
# 3.23 Zero Width No-Break Space U+FEFF.
(b'\xef\xbb\xbf',
b''),
# 3.24 Non-ASCII control character U+1D175.
(b'\xf0\x9d\x85\xb5',
None),
# 3.25 Plane 0 private use character U+F123.
(b'\xef\x84\xa3',
None),
# 3.26 Plane 15 private use character U+F1234.
(b'\xf3\xb1\x88\xb4',
None),
# 3.27 Plane 16 private use character U+10F234.
(b'\xf4\x8f\x88\xb4',
None),
# 3.28 Non-character code point U+8FFFE.
(b'\xf2\x8f\xbf\xbe',
None),
# 3.29 Non-character code point U+10FFFF.
(b'\xf4\x8f\xbf\xbf',
None),
# 3.30 Surrogate code U+DF42.
(b'\xed\xbd\x82',
None),
# 3.31 Non-plain text character U+FFFD.
(b'\xef\xbf\xbd',
None),
# 3.32 Ideographic description character U+2FF5.
(b'\xe2\xbf\xb5',
None),
# 3.33 Display property character U+0341.
(b'\xcd\x81',
b'\xcc\x81'),
# 3.34 Left-to-right mark U+200E.
(b'\xe2\x80\x8e',
None),
# 3.35 Deprecated U+202A.
(b'\xe2\x80\xaa',
None),
# 3.36 Language tagging character U+E0001.
(b'\xf3\xa0\x80\x81',
None),
# 3.37 Language tagging character U+E0042.
(b'\xf3\xa0\x81\x82',
None),
# 3.38 Bidi: RandALCat character U+05BE and LCat characters.
(b'foo\xd6\xbebar',
None),
# 3.39 Bidi: RandALCat character U+FD50 and LCat characters.
(b'foo\xef\xb5\x90bar',
None),
# 3.40 Bidi: RandALCat character U+FB38 and LCat characters.
(b'foo\xef\xb9\xb6bar',
b'foo \xd9\x8ebar'),
# 3.41 Bidi: RandALCat without trailing RandALCat U+0627 U+0031.
(b'\xd8\xa71',
None),
# 3.42 Bidi: RandALCat character U+0627 U+0031 U+0628.
(b'\xd8\xa71\xd8\xa8',
b'\xd8\xa71\xd8\xa8'),
# 3.43 Unassigned code point U+E0002.
# Skip this test as we allow unassigned
#(b'\xf3\xa0\x80\x82',
# None),
(None, None),
# 3.44 Larger test (shrinking).
# Original test case reads \xc3\xdf
(b'X\xc2\xad\xc3\x9f\xc4\xb0\xe2\x84\xa1j\xcc\x8c\xc2\xa0\xc2'
b'\xaa\xce\xb0\xe2\x80\x80',
b'xssi\xcc\x87tel\xc7\xb0 a\xce\xb0 '),
# 3.45 Larger test (expanding).
# Original test case reads \xc3\x9f
(b'X\xc3\x9f\xe3\x8c\x96\xc4\xb0\xe2\x84\xa1\xe2\x92\x9f\xe3\x8c'
b'\x80',
b'xss\xe3\x82\xad\xe3\x83\xad\xe3\x83\xa1\xe3\x83\xbc\xe3'
b'\x83\x88\xe3\x83\xabi\xcc\x87tel\x28d\x29\xe3\x82'
b'\xa2\xe3\x83\x91\xe3\x83\xbc\xe3\x83\x88')
]
class NameprepTest(unittest.TestCase):
def test_nameprep(self):
from encodings.idna import nameprep
for pos, (orig, prepped) in enumerate(nameprep_tests):
if orig is None:
# Skipped
continue
# The Unicode strings are given in UTF-8
orig = str(orig, "utf-8", "surrogatepass")
if prepped is None:
# Input contains prohibited characters
self.assertRaises(UnicodeError, nameprep, orig)
else:
prepped = str(prepped, "utf-8", "surrogatepass")
try:
self.assertEqual(nameprep(orig), prepped)
except Exception as e:
raise support.TestFailed("Test 3.%d: %s" % (pos+1, str(e)))
class IDNACodecTest(unittest.TestCase):
def test_builtin_decode(self):
self.assertEqual(str(b"python.org", "idna"), "python.org")
self.assertEqual(str(b"python.org.", "idna"), "python.org.")
self.assertEqual(str(b"xn--pythn-mua.org", "idna"), "pyth\xf6n.org")
self.assertEqual(str(b"xn--pythn-mua.org.", "idna"), "pyth\xf6n.org.")
def test_builtin_encode(self):
self.assertEqual("python.org".encode("idna"), b"python.org")
self.assertEqual("python.org.".encode("idna"), b"python.org.")
self.assertEqual("pyth\xf6n.org".encode("idna"), b"xn--pythn-mua.org")
self.assertEqual("pyth\xf6n.org.".encode("idna"), b"xn--pythn-mua.org.")
def test_stream(self):
r = codecs.getreader("idna")(io.BytesIO(b"abc"))
r.read(3)
self.assertEqual(r.read(), "")
def test_incremental_decode(self):
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"python.org"), "idna")),
"python.org"
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"python.org."), "idna")),
"python.org."
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"xn--pythn-mua.org."), "idna")),
"pyth\xf6n.org."
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"xn--pythn-mua.org."), "idna")),
"pyth\xf6n.org."
)
decoder = codecs.getincrementaldecoder("idna")()
self.assertEqual(decoder.decode(b"xn--xam", ), "")
self.assertEqual(decoder.decode(b"ple-9ta.o", ), "\xe4xample.")
self.assertEqual(decoder.decode(b"rg"), "")
self.assertEqual(decoder.decode(b"", True), "org")
decoder.reset()
self.assertEqual(decoder.decode(b"xn--xam", ), "")
self.assertEqual(decoder.decode(b"ple-9ta.o", ), "\xe4xample.")
self.assertEqual(decoder.decode(b"rg."), "org.")
self.assertEqual(decoder.decode(b"", True), "")
def test_incremental_encode(self):
self.assertEqual(
b"".join(codecs.iterencode("python.org", "idna")),
b"python.org"
)
self.assertEqual(
b"".join(codecs.iterencode("python.org.", "idna")),
b"python.org."
)
self.assertEqual(
b"".join(codecs.iterencode("pyth\xf6n.org.", "idna")),
b"xn--pythn-mua.org."
)
self.assertEqual(
b"".join(codecs.iterencode("pyth\xf6n.org.", "idna")),
b"xn--pythn-mua.org."
)
encoder = codecs.getincrementalencoder("idna")()
self.assertEqual(encoder.encode("\xe4x"), b"")
self.assertEqual(encoder.encode("ample.org"), b"xn--xample-9ta.")
self.assertEqual(encoder.encode("", True), b"org")
encoder.reset()
self.assertEqual(encoder.encode("\xe4x"), b"")
self.assertEqual(encoder.encode("ample.org."), b"xn--xample-9ta.org.")
self.assertEqual(encoder.encode("", True), b"")
class CodecsModuleTest(unittest.TestCase):
def test_decode(self):
self.assertEqual(codecs.decode(b'\xe4\xf6\xfc', 'latin-1'),
'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.decode)
self.assertEqual(codecs.decode(b'abc'), 'abc')
self.assertRaises(UnicodeDecodeError, codecs.decode, b'\xff', 'ascii')
def test_encode(self):
self.assertEqual(codecs.encode('\xe4\xf6\xfc', 'latin-1'),
b'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.encode)
self.assertRaises(LookupError, codecs.encode, "foo", "__spam__")
self.assertEqual(codecs.encode('abc'), b'abc')
self.assertRaises(UnicodeEncodeError, codecs.encode, '\xffff', 'ascii')
def test_register(self):
self.assertRaises(TypeError, codecs.register)
self.assertRaises(TypeError, codecs.register, 42)
def test_lookup(self):
self.assertRaises(TypeError, codecs.lookup)
self.assertRaises(LookupError, codecs.lookup, "__spam__")
self.assertRaises(LookupError, codecs.lookup, " ")
def test_getencoder(self):
self.assertRaises(TypeError, codecs.getencoder)
self.assertRaises(LookupError, codecs.getencoder, "__spam__")
def test_getdecoder(self):
self.assertRaises(TypeError, codecs.getdecoder)
self.assertRaises(LookupError, codecs.getdecoder, "__spam__")
def test_getreader(self):
self.assertRaises(TypeError, codecs.getreader)
self.assertRaises(LookupError, codecs.getreader, "__spam__")
def test_getwriter(self):
self.assertRaises(TypeError, codecs.getwriter)
self.assertRaises(LookupError, codecs.getwriter, "__spam__")
def test_lookup_issue1813(self):
# Issue #1813: under Turkish locales, lookup of some codecs failed
# because 'I' is lowercased as "ı" (dotless i)
oldlocale = locale.setlocale(locale.LC_CTYPE)
self.addCleanup(locale.setlocale, locale.LC_CTYPE, oldlocale)
try:
locale.setlocale(locale.LC_CTYPE, 'tr_TR')
except locale.Error:
# Unsupported locale on this system
self.skipTest('test needs Turkish locale')
c = codecs.lookup('ASCII')
self.assertEqual(c.name, 'ascii')
class StreamReaderTest(unittest.TestCase):
def setUp(self):
self.reader = codecs.getreader('utf-8')
self.stream = io.BytesIO(b'\xed\x95\x9c\n\xea\xb8\x80')
def test_readlines(self):
f = self.reader(self.stream)
self.assertEqual(f.readlines(), ['\ud55c\n', '\uae00'])
class EncodedFileTest(unittest.TestCase):
def test_basic(self):
f = io.BytesIO(b'\xed\x95\x9c\n\xea\xb8\x80')
ef = codecs.EncodedFile(f, 'utf-16-le', 'utf-8')
self.assertEqual(ef.read(), b'\\\xd5\n\x00\x00\xae')
f = io.BytesIO()
ef = codecs.EncodedFile(f, 'utf-8', 'latin-1')
ef.write(b'\xc3\xbc')
self.assertEqual(f.getvalue(), b'\xfc')
all_unicode_encodings = [
"ascii",
"big5",
"big5hkscs",
"charmap",
"cp037",
"cp1006",
"cp1026",
"cp1140",
"cp1250",
"cp1251",
"cp1252",
"cp1253",
"cp1254",
"cp1255",
"cp1256",
"cp1257",
"cp1258",
"cp424",
"cp437",
"cp500",
"cp720",
"cp737",
"cp775",
"cp850",
"cp852",
"cp855",
"cp856",
"cp857",
"cp858",
"cp860",
"cp861",
"cp862",
"cp863",
"cp864",
"cp865",
"cp866",
"cp869",
"cp874",
"cp875",
"cp932",
"cp949",
"cp950",
"euc_jis_2004",
"euc_jisx0213",
"euc_jp",
"euc_kr",
"gb18030",
"gb2312",
"gbk",
"hp_roman8",
"hz",
"idna",
"iso2022_jp",
"iso2022_jp_1",
"iso2022_jp_2",
"iso2022_jp_2004",
"iso2022_jp_3",
"iso2022_jp_ext",
"iso2022_kr",
"iso8859_1",
"iso8859_10",
"iso8859_11",
"iso8859_13",
"iso8859_14",
"iso8859_15",
"iso8859_16",
"iso8859_2",
"iso8859_3",
"iso8859_4",
"iso8859_5",
"iso8859_6",
"iso8859_7",
"iso8859_8",
"iso8859_9",
"johab",
"koi8_r",
"koi8_u",
"latin_1",
"mac_cyrillic",
"mac_greek",
"mac_iceland",
"mac_latin2",
"mac_roman",
"mac_turkish",
"palmos",
"ptcp154",
"punycode",
"raw_unicode_escape",
"shift_jis",
"shift_jis_2004",
"shift_jisx0213",
"tis_620",
"unicode_escape",
"unicode_internal",
"utf_16",
"utf_16_be",
"utf_16_le",
"utf_7",
"utf_8",
]
if hasattr(codecs, "mbcs_encode"):
all_unicode_encodings.append("mbcs")
# The following encoding is not tested, because it's not supposed
# to work:
# "undefined"
# The following encodings don't work in stateful mode
broken_unicode_with_streams = [
"punycode",
"unicode_internal"
]
broken_incremental_coders = broken_unicode_with_streams + [
"idna",
]
class BasicUnicodeTest(unittest.TestCase, MixInCheckStateHandling):
def test_basics(self):
s = "abc123" # all codecs should be able to encode these
for encoding in all_unicode_encodings:
name = codecs.lookup(encoding).name
if encoding.endswith("_codec"):
name += "_codec"
elif encoding == "latin_1":
name = "latin_1"
self.assertEqual(encoding.replace("_", "-"), name.replace("_", "-"))
with support.check_warnings():
# unicode-internal has been deprecated
(b, size) = codecs.getencoder(encoding)(s)
self.assertEqual(size, len(s), "%r != %r (encoding=%r)" % (size, len(s), encoding))
(chars, size) = codecs.getdecoder(encoding)(b)
self.assertEqual(chars, s, "%r != %r (encoding=%r)" % (chars, s, encoding))
if encoding not in broken_unicode_with_streams:
# check stream reader/writer
q = Queue(b"")
writer = codecs.getwriter(encoding)(q)
encodedresult = b""
for c in s:
writer.write(c)
chunk = q.read()
self.assertTrue(type(chunk) is bytes, type(chunk))
encodedresult += chunk
q = Queue(b"")
reader = codecs.getreader(encoding)(q)
decodedresult = ""
for c in encodedresult:
q.write(bytes([c]))
decodedresult += reader.read()
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
if encoding not in broken_incremental_coders:
# check incremental decoder/encoder (fetched via the Python
# and C API) and iterencode()/iterdecode()
try:
encoder = codecs.getincrementalencoder(encoding)()
cencoder = _testcapi.codec_incrementalencoder(encoding)
except LookupError: # no IncrementalEncoder
pass
else:
# check incremental decoder/encoder
encodedresult = b""
for c in s:
encodedresult += encoder.encode(c)
encodedresult += encoder.encode("", True)
decoder = codecs.getincrementaldecoder(encoding)()
decodedresult = ""
for c in encodedresult:
decodedresult += decoder.decode(bytes([c]))
decodedresult += decoder.decode(b"", True)
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
# check C API
encodedresult = b""
for c in s:
encodedresult += cencoder.encode(c)
encodedresult += cencoder.encode("", True)
cdecoder = _testcapi.codec_incrementaldecoder(encoding)
decodedresult = ""
for c in encodedresult:
decodedresult += cdecoder.decode(bytes([c]))
decodedresult += cdecoder.decode(b"", True)
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
# check iterencode()/iterdecode()
result = "".join(codecs.iterdecode(codecs.iterencode(s, encoding), encoding))
self.assertEqual(result, s, "%r != %r (encoding=%r)" % (result, s, encoding))
# check iterencode()/iterdecode() with empty string
result = "".join(codecs.iterdecode(codecs.iterencode("", encoding), encoding))
self.assertEqual(result, "")
if encoding not in ("idna", "mbcs"):
# check incremental decoder/encoder with errors argument
try:
encoder = codecs.getincrementalencoder(encoding)("ignore")
cencoder = _testcapi.codec_incrementalencoder(encoding, "ignore")
except LookupError: # no IncrementalEncoder
pass
else:
encodedresult = b"".join(encoder.encode(c) for c in s)
decoder = codecs.getincrementaldecoder(encoding)("ignore")
decodedresult = "".join(decoder.decode(bytes([c])) for c in encodedresult)
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
encodedresult = b"".join(cencoder.encode(c) for c in s)
cdecoder = _testcapi.codec_incrementaldecoder(encoding, "ignore")
decodedresult = "".join(cdecoder.decode(bytes([c])) for c in encodedresult)
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
def test_seek(self):
# all codecs should be able to encode these
s = "%s\n%s\n" % (100*"abc123", 100*"def456")
for encoding in all_unicode_encodings:
if encoding == "idna": # FIXME: See SF bug #1163178
continue
if encoding in broken_unicode_with_streams:
continue
reader = codecs.getreader(encoding)(io.BytesIO(s.encode(encoding)))
for t in range(5):
# Test that calling seek resets the internal codec state and buffers
reader.seek(0, 0)
data = reader.read()
self.assertEqual(s, data)
def test_bad_decode_args(self):
for encoding in all_unicode_encodings:
decoder = codecs.getdecoder(encoding)
self.assertRaises(TypeError, decoder)
if encoding not in ("idna", "punycode"):
self.assertRaises(TypeError, decoder, 42)
def test_bad_encode_args(self):
for encoding in all_unicode_encodings:
encoder = codecs.getencoder(encoding)
with support.check_warnings():
# unicode-internal has been deprecated
self.assertRaises(TypeError, encoder)
def test_encoding_map_type_initialized(self):
from encodings import cp1140
# This used to crash, we are only verifying there's no crash.
table_type = type(cp1140.encoding_table)
self.assertEqual(table_type, table_type)
def test_decoder_state(self):
# Check that getstate() and setstate() handle the state properly
u = "abc123"
for encoding in all_unicode_encodings:
if encoding not in broken_incremental_coders:
self.check_state_handling_decode(encoding, u, u.encode(encoding))
self.check_state_handling_encode(encoding, u, u.encode(encoding))
class CharmapTest(unittest.TestCase):
def test_decode_with_string_map(self):
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict", "abc"),
("abc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict", "\U0010FFFFbc"),
("\U0010FFFFbc", 3)
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict", "ab"
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict", "ab\ufffe"
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace", "ab"),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace", "ab\ufffe"),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore", "ab"),
("ab", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore", "ab\ufffe"),
("ab", 3)
)
allbytes = bytes(range(256))
self.assertEqual(
codecs.charmap_decode(allbytes, "ignore", ""),
("", len(allbytes))
)
def test_decode_with_int2str_map(self):
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: 'c'}),
("abc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 'Aa', 1: 'Bb', 2: 'Cc'}),
("AaBbCc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: '\U0010FFFF', 1: 'b', 2: 'c'}),
("\U0010FFFFbc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: ''}),
("ab", 3)
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b'}
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: None}
)
# Issue #14850
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: '\ufffe'}
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: 'a', 1: 'b'}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: 'a', 1: 'b', 2: None}),
("ab\ufffd", 3)
)
# Issue #14850
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: 'a', 1: 'b', 2: '\ufffe'}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: 'a', 1: 'b'}),
("ab", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: 'a', 1: 'b', 2: None}),
("ab", 3)
)
# Issue #14850
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: 'a', 1: 'b', 2: '\ufffe'}),
("ab", 3)
)
allbytes = bytes(range(256))
self.assertEqual(
codecs.charmap_decode(allbytes, "ignore", {}),
("", len(allbytes))
)
def test_decode_with_int2int_map(self):
a = ord('a')
b = ord('b')
c = ord('c')
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: a, 1: b, 2: c}),
("abc", 3)
)
# Issue #15379
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 0x10FFFF, 1: b, 2: c}),
("\U0010FFFFbc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: sys.maxunicode, 1: b, 2: c}),
(chr(sys.maxunicode) + "bc", 3)
)
self.assertRaises(TypeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: sys.maxunicode + 1, 1: b, 2: c}
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: a, 1: b},
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: a, 1: b, 2: 0xFFFE},
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: a, 1: b}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: a, 1: b, 2: 0xFFFE}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: a, 1: b}),
("ab", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: a, 1: b, 2: 0xFFFE}),
("ab", 3)
)
class WithStmtTest(unittest.TestCase):
def test_encodedfile(self):
f = io.BytesIO(b"\xc3\xbc")
with codecs.EncodedFile(f, "latin-1", "utf-8") as ef:
self.assertEqual(ef.read(), b"\xfc")
def test_streamreaderwriter(self):
f = io.BytesIO(b"\xc3\xbc")
info = codecs.lookup("utf-8")
with codecs.StreamReaderWriter(f, info.streamreader,
info.streamwriter, 'strict') as srw:
self.assertEqual(srw.read(), "\xfc")
class TypesTest(unittest.TestCase):
def test_decode_unicode(self):
# Most decoders don't accept unicode input
decoders = [
codecs.utf_7_decode,
codecs.utf_8_decode,
codecs.utf_16_le_decode,
codecs.utf_16_be_decode,
codecs.utf_16_ex_decode,
codecs.utf_32_decode,
codecs.utf_32_le_decode,
codecs.utf_32_be_decode,
codecs.utf_32_ex_decode,
codecs.latin_1_decode,
codecs.ascii_decode,
codecs.charmap_decode,
]
if hasattr(codecs, "mbcs_decode"):
decoders.append(codecs.mbcs_decode)
for decoder in decoders:
self.assertRaises(TypeError, decoder, "xxx")
def test_unicode_escape(self):
# Escape-decoding an unicode string is supported ang gives the same
# result as decoding the equivalent ASCII bytes string.
self.assertEqual(codecs.unicode_escape_decode(r"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.unicode_escape_decode(br"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.raw_unicode_escape_decode(r"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.raw_unicode_escape_decode(br"\u1234"), ("\u1234", 6))
self.assertRaises(UnicodeDecodeError, codecs.unicode_escape_decode, br"\U00110000")
self.assertEqual(codecs.unicode_escape_decode(r"\U00110000", "replace"), ("\ufffd", 10))
self.assertRaises(UnicodeDecodeError, codecs.raw_unicode_escape_decode, br"\U00110000")
self.assertEqual(codecs.raw_unicode_escape_decode(r"\U00110000", "replace"), ("\ufffd", 10))
class UnicodeEscapeTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.unicode_escape_encode(""), (b"", 0))
self.assertEqual(codecs.unicode_escape_decode(b""), ("", 0))
def test_raw_encode(self):
encode = codecs.unicode_escape_encode
for b in range(32, 127):
if b != b'\\'[0]:
self.assertEqual(encode(chr(b)), (bytes([b]), 1))
def test_raw_decode(self):
decode = codecs.unicode_escape_decode
for b in range(256):
if b != b'\\'[0]:
self.assertEqual(decode(bytes([b]) + b'0'), (chr(b) + '0', 2))
def test_escape_encode(self):
encode = codecs.unicode_escape_encode
check = coding_checker(self, encode)
check('\t', br'\t')
check('\n', br'\n')
check('\r', br'\r')
check('\\', br'\\')
for b in range(32):
if chr(b) not in '\t\n\r':
check(chr(b), ('\\x%02x' % b).encode())
for b in range(127, 256):
check(chr(b), ('\\x%02x' % b).encode())
check('\u20ac', br'\u20ac')
check('\U0001d120', br'\U0001d120')
def test_escape_decode(self):
decode = codecs.unicode_escape_decode
check = coding_checker(self, decode)
check(b"[\\\n]", "[]")
check(br'[\"]', '["]')
check(br"[\']", "[']")
check(br"[\\]", r"[\]")
check(br"[\a]", "[\x07]")
check(br"[\b]", "[\x08]")
check(br"[\t]", "[\x09]")
check(br"[\n]", "[\x0a]")
check(br"[\v]", "[\x0b]")
check(br"[\f]", "[\x0c]")
check(br"[\r]", "[\x0d]")
check(br"[\7]", "[\x07]")
check(br"[\8]", r"[\8]")
check(br"[\78]", "[\x078]")
check(br"[\41]", "[!]")
check(br"[\418]", "[!8]")
check(br"[\101]", "[A]")
check(br"[\1010]", "[A0]")
check(br"[\x41]", "[A]")
check(br"[\x410]", "[A0]")
check(br"\u20ac", "\u20ac")
check(br"\U0001d120", "\U0001d120")
for b in range(256):
if b not in b'\n"\'\\abtnvfr01234567xuUN':
check(b'\\' + bytes([b]), '\\' + chr(b))
def test_decode_errors(self):
decode = codecs.unicode_escape_decode
for c, d in (b'x', 2), (b'u', 4), (b'U', 4):
for i in range(d):
self.assertRaises(UnicodeDecodeError, decode,
b"\\" + c + b"0"*i)
self.assertRaises(UnicodeDecodeError, decode,
b"[\\" + c + b"0"*i + b"]")
data = b"[\\" + c + b"0"*i + b"]\\" + c + b"0"*i
self.assertEqual(decode(data, "ignore"), ("[]", len(data)))
self.assertEqual(decode(data, "replace"),
("[\ufffd]\ufffd", len(data)))
self.assertRaises(UnicodeDecodeError, decode, br"\U00110000")
self.assertEqual(decode(br"\U00110000", "ignore"), ("", 10))
self.assertEqual(decode(br"\U00110000", "replace"), ("\ufffd", 10))
class RawUnicodeEscapeTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.raw_unicode_escape_encode(""), (b"", 0))
self.assertEqual(codecs.raw_unicode_escape_decode(b""), ("", 0))
def test_raw_encode(self):
encode = codecs.raw_unicode_escape_encode
for b in range(256):
self.assertEqual(encode(chr(b)), (bytes([b]), 1))
def test_raw_decode(self):
decode = codecs.raw_unicode_escape_decode
for b in range(256):
self.assertEqual(decode(bytes([b]) + b'0'), (chr(b) + '0', 2))
def test_escape_encode(self):
encode = codecs.raw_unicode_escape_encode
check = coding_checker(self, encode)
for b in range(256):
if b not in b'uU':
check('\\' + chr(b), b'\\' + bytes([b]))
check('\u20ac', br'\u20ac')
check('\U0001d120', br'\U0001d120')
def test_escape_decode(self):
decode = codecs.raw_unicode_escape_decode
check = coding_checker(self, decode)
for b in range(256):
if b not in b'uU':
check(b'\\' + bytes([b]), '\\' + chr(b))
check(br"\u20ac", "\u20ac")
check(br"\U0001d120", "\U0001d120")
def test_decode_errors(self):
decode = codecs.raw_unicode_escape_decode
for c, d in (b'u', 4), (b'U', 4):
for i in range(d):
self.assertRaises(UnicodeDecodeError, decode,
b"\\" + c + b"0"*i)
self.assertRaises(UnicodeDecodeError, decode,
b"[\\" + c + b"0"*i + b"]")
data = b"[\\" + c + b"0"*i + b"]\\" + c + b"0"*i
self.assertEqual(decode(data, "ignore"), ("[]", len(data)))
self.assertEqual(decode(data, "replace"),
("[\ufffd]\ufffd", len(data)))
self.assertRaises(UnicodeDecodeError, decode, br"\U00110000")
self.assertEqual(decode(br"\U00110000", "ignore"), ("", 10))
self.assertEqual(decode(br"\U00110000", "replace"), ("\ufffd", 10))
class SurrogateEscapeTest(unittest.TestCase):
def test_utf8(self):
# Bad byte
self.assertEqual(b"foo\x80bar".decode("utf-8", "surrogateescape"),
"foo\udc80bar")
self.assertEqual("foo\udc80bar".encode("utf-8", "surrogateescape"),
b"foo\x80bar")
# bad-utf-8 encoded surrogate
self.assertEqual(b"\xed\xb0\x80".decode("utf-8", "surrogateescape"),
"\udced\udcb0\udc80")
self.assertEqual("\udced\udcb0\udc80".encode("utf-8", "surrogateescape"),
b"\xed\xb0\x80")
def test_ascii(self):
# bad byte
self.assertEqual(b"foo\x80bar".decode("ascii", "surrogateescape"),
"foo\udc80bar")
self.assertEqual("foo\udc80bar".encode("ascii", "surrogateescape"),
b"foo\x80bar")
def test_charmap(self):
# bad byte: \xa5 is unmapped in iso-8859-3
self.assertEqual(b"foo\xa5bar".decode("iso-8859-3", "surrogateescape"),
"foo\udca5bar")
self.assertEqual("foo\udca5bar".encode("iso-8859-3", "surrogateescape"),
b"foo\xa5bar")
def test_latin1(self):
# Issue6373
self.assertEqual("\udce4\udceb\udcef\udcf6\udcfc".encode("latin-1", "surrogateescape"),
b"\xe4\xeb\xef\xf6\xfc")
class BomTest(unittest.TestCase):
def test_seek0(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
self.addCleanup(support.unlink, support.TESTFN)
for encoding in tests:
# Check if the BOM is written only once
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# Check that the BOM is written after a seek(0)
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data[0])
self.assertNotEqual(f.tell(), 0)
f.seek(0)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# (StreamWriter) Check that the BOM is written after a seek(0)
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data[0])
self.assertNotEqual(f.writer.tell(), 0)
f.writer.seek(0)
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# Check that the BOM is not written after a seek() at a position
# different than the start
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.seek(f.tell())
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# (StreamWriter) Check that the BOM is not written after a seek()
# at a position different than the start
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data)
f.writer.seek(f.writer.tell())
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
bytes_transform_encodings = [
"base64_codec",
"uu_codec",
"quopri_codec",
"hex_codec",
]
try:
import zlib
except ImportError:
pass
else:
bytes_transform_encodings.append("zlib_codec")
try:
import bz2
except ImportError:
pass
else:
bytes_transform_encodings.append("bz2_codec")
class TransformCodecTest(unittest.TestCase):
def test_basics(self):
binput = bytes(range(256))
for encoding in bytes_transform_encodings:
# generic codecs interface
(o, size) = codecs.getencoder(encoding)(binput)
self.assertEqual(size, len(binput))
(i, size) = codecs.getdecoder(encoding)(o)
self.assertEqual(size, len(o))
self.assertEqual(i, binput)
def test_read(self):
for encoding in bytes_transform_encodings:
sin = codecs.encode(b"\x80", encoding)
reader = codecs.getreader(encoding)(io.BytesIO(sin))
sout = reader.read()
self.assertEqual(sout, b"\x80")
def test_readline(self):
for encoding in bytes_transform_encodings:
if encoding in ['uu_codec', 'zlib_codec']:
continue
sin = codecs.encode(b"\x80", encoding)
reader = codecs.getreader(encoding)(io.BytesIO(sin))
sout = reader.readline()
self.assertEqual(sout, b"\x80")
@unittest.skipUnless(sys.platform == 'win32',
'code pages are specific to Windows')
class CodePageTest(unittest.TestCase):
# CP_UTF8 is already tested by CP65001Test
CP_UTF8 = 65001
def test_invalid_code_page(self):
self.assertRaises(ValueError, codecs.code_page_encode, -1, 'a')
self.assertRaises(ValueError, codecs.code_page_decode, -1, b'a')
self.assertRaises(WindowsError, codecs.code_page_encode, 123, 'a')
self.assertRaises(WindowsError, codecs.code_page_decode, 123, b'a')
def test_code_page_name(self):
self.assertRaisesRegex(UnicodeEncodeError, 'cp932',
codecs.code_page_encode, 932, '\xff')
self.assertRaisesRegex(UnicodeDecodeError, 'cp932',
codecs.code_page_decode, 932, b'\x81\x00')
self.assertRaisesRegex(UnicodeDecodeError, 'CP_UTF8',
codecs.code_page_decode, self.CP_UTF8, b'\xff')
def check_decode(self, cp, tests):
for raw, errors, expected in tests:
if expected is not None:
try:
decoded = codecs.code_page_decode(cp, raw, errors)
except UnicodeDecodeError as err:
self.fail('Unable to decode %a from "cp%s" with '
'errors=%r: %s' % (raw, cp, errors, err))
self.assertEqual(decoded[0], expected,
'%a.decode("cp%s", %r)=%a != %a'
% (raw, cp, errors, decoded[0], expected))
# assert 0 <= decoded[1] <= len(raw)
self.assertGreaterEqual(decoded[1], 0)
self.assertLessEqual(decoded[1], len(raw))
else:
self.assertRaises(UnicodeDecodeError,
codecs.code_page_decode, cp, raw, errors)
def check_encode(self, cp, tests):
for text, errors, expected in tests:
if expected is not None:
try:
encoded = codecs.code_page_encode(cp, text, errors)
except UnicodeEncodeError as err:
self.fail('Unable to encode %a to "cp%s" with '
'errors=%r: %s' % (text, cp, errors, err))
self.assertEqual(encoded[0], expected,
'%a.encode("cp%s", %r)=%a != %a'
% (text, cp, errors, encoded[0], expected))
self.assertEqual(encoded[1], len(text))
else:
self.assertRaises(UnicodeEncodeError,
codecs.code_page_encode, cp, text, errors)
def test_cp932(self):
self.check_encode(932, (
('abc', 'strict', b'abc'),
('\uff44\u9a3e', 'strict', b'\x82\x84\xe9\x80'),
# test error handlers
('\xff', 'strict', None),
('[\xff]', 'ignore', b'[]'),
('[\xff]', 'replace', b'[y]'),
('[\u20ac]', 'replace', b'[?]'),
('[\xff]', 'backslashreplace', b'[\\xff]'),
('[\xff]', 'xmlcharrefreplace', b'[ÿ]'),
))
self.check_decode(932, (
(b'abc', 'strict', 'abc'),
(b'\x82\x84\xe9\x80', 'strict', '\uff44\u9a3e'),
# invalid bytes
(b'[\xff]', 'strict', None),
(b'[\xff]', 'ignore', '[]'),
(b'[\xff]', 'replace', '[\ufffd]'),
(b'[\xff]', 'surrogateescape', '[\udcff]'),
(b'\x81\x00abc', 'strict', None),
(b'\x81\x00abc', 'ignore', '\x00abc'),
(b'\x81\x00abc', 'replace', '\ufffd\x00abc'),
))
def test_cp1252(self):
self.check_encode(1252, (
('abc', 'strict', b'abc'),
('\xe9\u20ac', 'strict', b'\xe9\x80'),
('\xff', 'strict', b'\xff'),
('\u0141', 'strict', None),
('\u0141', 'ignore', b''),
('\u0141', 'replace', b'L'),
))
self.check_decode(1252, (
(b'abc', 'strict', 'abc'),
(b'\xe9\x80', 'strict', '\xe9\u20ac'),
(b'\xff', 'strict', '\xff'),
))
def test_cp_utf7(self):
cp = 65000
self.check_encode(cp, (
('abc', 'strict', b'abc'),
('\xe9\u20ac', 'strict', b'+AOkgrA-'),
('\U0010ffff', 'strict', b'+2//f/w-'),
('\udc80', 'strict', b'+3IA-'),
('\ufffd', 'strict', b'+//0-'),
))
self.check_decode(cp, (
(b'abc', 'strict', 'abc'),
(b'+AOkgrA-', 'strict', '\xe9\u20ac'),
(b'+2//f/w-', 'strict', '\U0010ffff'),
(b'+3IA-', 'strict', '\udc80'),
(b'+//0-', 'strict', '\ufffd'),
# invalid bytes
(b'[+/]', 'strict', '[]'),
(b'[\xff]', 'strict', '[\xff]'),
))
def test_multibyte_encoding(self):
self.check_decode(932, (
(b'\x84\xe9\x80', 'ignore', '\u9a3e'),
(b'\x84\xe9\x80', 'replace', '\ufffd\u9a3e'),
))
self.check_decode(self.CP_UTF8, (
(b'\xff\xf4\x8f\xbf\xbf', 'ignore', '\U0010ffff'),
(b'\xff\xf4\x8f\xbf\xbf', 'replace', '\ufffd\U0010ffff'),
))
if VISTA_OR_LATER:
self.check_encode(self.CP_UTF8, (
('[\U0010ffff\uDC80]', 'ignore', b'[\xf4\x8f\xbf\xbf]'),
('[\U0010ffff\uDC80]', 'replace', b'[\xf4\x8f\xbf\xbf?]'),
))
def test_incremental(self):
decoded = codecs.code_page_decode(932, b'\x82', 'strict', False)
self.assertEqual(decoded, ('', 0))
decoded = codecs.code_page_decode(932,
b'\xe9\x80\xe9', 'strict',
False)
self.assertEqual(decoded, ('\u9a3e', 2))
decoded = codecs.code_page_decode(932,
b'\xe9\x80\xe9\x80', 'strict',
False)
self.assertEqual(decoded, ('\u9a3e\u9a3e', 4))
decoded = codecs.code_page_decode(932,
b'abc', 'strict',
False)
self.assertEqual(decoded, ('abc', 3))
if __name__ == "__main__":
unittest.main()
| mancoast/CPythonPyc_test | fail/332_test_codecs.py | Python | gpl-3.0 | 91,385 | 0.001609 |
#
# Protocol diffing tool from http://github.com/dsjoerg/s2protocol
#
# Usage: s2_cli.py --diff 38215,38749
#
import sys
import argparse
import pprint
from s2protocol.versions import build
def diff_things(typeinfo_index, thing_a, thing_b):
if type(thing_a) != type(thing_b):
print(
"typeinfo {} diff types: {} {}".format(
typeinfo_index, type(thing_a), type(thing_b)
)
)
return
if type(thing_a) == dict:
thing_a = thing_a.items()
thing_b = thing_b.items()
if type(thing_a) == list or type(thing_a) == tuple:
if len(thing_a) != len(thing_b):
print(
"typeinfo {} diff len: {} {}".format(
typeinfo_index, len(thing_a), len(thing_b)
)
)
else:
for ix in range(len(thing_a)):
diff_things(typeinfo_index, thing_a[ix], thing_b[ix])
elif thing_a != thing_b:
if type(thing_a) == int:
if (thing_a < 55 or thing_a - 1 != thing_b):
print(
"typeinfo {} diff number: {} {}".format(
typeinfo_index, thing_a, thing_b
)
)
else:
print(
"typeinfo {} diff string: {} {}".format(
typeinfo_index, thing_a, thing_b
)
)
def diff(protocol_a_ver, protocol_b_ver):
print(
"Comparing {} to {}".format(
protocol_a_ver, protocol_b_ver
)
)
protocol_a = build(protocol_a_ver)
protocol_b = build(protocol_b_ver)
count_a = len(protocol_a.typeinfos)
count_b = len(protocol_b.typeinfos)
print("Count of typeinfos: {} {}".format(count_a, count_b))
for index in range(max(count_a, count_b)):
if index >= count_a:
print("Protocol {} missing typeinfo {}".format(protocol_a_ver, index))
continue
if index >= count_b:
print("Protocol {} missing typeinfo {}".format(protocol_b_ver, index))
continue
a = protocol_a.typeinfos[index]
b = protocol_b.typeinfos[index]
diff_things(index, a, b)
| Blizzard/s2protocol | s2protocol/diff.py | Python | mit | 2,237 | 0.002235 |
# Authors: See README.RST for Contributors
# Copyright 2015-2017
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import fields, models
class IrActionsReportDuplicate(models.TransientModel):
_name = "ir.actions.report.duplicate"
_description = "Duplicate Qweb report"
suffix = fields.Char(
string="Suffix", help="This suffix will be added to the report"
)
def duplicate_report(self):
self.ensure_one()
active_id = self.env.context.get("active_id")
model = self.env.context.get("active_model")
if model:
record = self.env[model].browse(active_id)
record.with_context(suffix=self.suffix, enable_duplication=True).copy()
return {}
| OCA/server-tools | base_report_auto_create_qweb/wizard/report_duplicate.py | Python | agpl-3.0 | 755 | 0.001325 |
"""
https://leetcode.com/problems/construct-the-rectangle/
https://leetcode.com/submissions/detail/107452214/
"""
import math
class Solution(object):
def constructRectangle(self, area):
"""
:type area: int
:rtype: List[int]
"""
W = int(math.sqrt(area))
while area % W != 0:
W -= 1
return [int(area / W), W]
import unittest
class Test(unittest.TestCase):
def test(self):
solution = Solution()
self.assertEqual(solution.constructRectangle(4), [2, 2])
if __name__ == '__main__':
unittest.main()
| vivaxy/algorithms | python/problems/construct_the_rectangle.py | Python | mit | 598 | 0.001672 |
import bpy
bpy.context.object.data.sensor_width = 23.6
bpy.context.object.data.sensor_height = 15.6
bpy.context.object.data.sensor_fit = 'HORIZONTAL'
| cschenck/blender_sim | fluid_sim_deps/blender-2.69/2.69/scripts/presets/camera/Nikon_D7000.py | Python | gpl-3.0 | 150 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.