repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
repotvsupertuga/tvsupertuga.repository
|
script.module.universalscrapers/lib/universalscrapers/scraperplugins/unsure/filmapik.py
|
Python
|
gpl-2.0
| 5,801 | 0.022755 |
import requests
import re,time
import xbmc,xbmcaddon
from ..scraper import Scraper
from ..common import clean_title,clean_search,send_log,error_log
dev_log = xbmcaddon.Addon('script.module.universalscrapers').getSetting("dev_log")
requests.packages.urllib3.disable_warnings()
s = requests.session()
User_Agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
class filmapik(Scraper):
domains = ['https://www.filmapik.io']
name = "Filmapik"
sources = []
def __init__(self):
self.base_link = 'https://www.filmapik.io'
def scrape_movie(self, title, year, imdb, debrid = False):
try:
start_time = time.time()
search_id = clean_search(title.lower())
start_url = '%s/?s=%s' %(self.base_link,search_id.replace(' ','+'))
#print 'start>>>> '+start_url
headers={'User-Agent':User_Agent}
html = requests.get(start_url,headers=headers,timeout=5).content
match = re.compile('data-movie-id=.+?href="(.+?)".+?<h2>(.+?)</h2>',re.DOTALL).findall(html)
for item_url, name in match:
#print 'clean name > '+clean_title(name).lower()
if not clean_title(search_id).lower() == clean_title(name).lower():
continue
if not year in name:
continue
item_url = item_url + 'play'
mode = 'movie'
#print item_url
self.get_source(item_url,mode,title,year,'','',start_time)
return self.sources
except Exception, argument:
if dev_log == 'true':
error_log(self.name,argument)
return self.sources
def scrape_episode(self,title, show_year, year, season, episode, imdb, tvdb, debrid = False):
try:
start_time = time.time()
search_id = clean_search(title.lower())
start_url = '%s/?s=%s' %(self.base_link,search_id.replace(' ','+'))
headers={'User-Agent':User_Agent}
html = requests.get(start_url,headers=headers,timeout=5
|
).content
match = re.compile('data-movie-id=.+?href="(.+?)".+?<h2>(.+?)</h2>',re.DOTALL).findall(html)
for item_url, name in match:
#print item_url
if clean_title(search_id).lower() == clean_title(name).lower():
item_url = self.base_link + '/episodes/%s-%sx%s/play' %(search_id.replace(' ','-'),s
|
eason,episode)
#print item_url
mode = 'tv'
self.get_source(item_url,mode,title,year,season,episode,start_time)
return self.sources
except Exception, argument:
if dev_log == 'true':
error_log(self.name,argument)
return self.sources
def get_source(self,item_url,mode,title,year,season,episode,start_time):
try:
#print 'cfwd > '+item_url
headers={'User-Agent':User_Agent}
OPEN = requests.get(item_url,headers=headers,timeout=20).content
#print OPEN
if mode == 'movie':
match = re.compile('<div class="player_nav" id="referNav">(.+?)<div class="swiper-wrapper" style="padding-bottom: 10px;">',re.DOTALL).findall(OPEN)
else:
match = re.compile('<div class="player_nav" id="referNav">(.+?)<div class="movies-list-wrap mlw-category">',re.DOTALL).findall(OPEN)
Sources = re.compile('href="(.+?)">(.+?)</a>',re.DOTALL).findall(str(match))
count = 0
for embFile, server in Sources:
if not 'G-SHARER'in server:
if 'FAST' in server:
#print embFile
qual = server.replace(' ','').replace('FAST','').replace('360p','')
#print qual
OPEN1 = requests.get(embFile,headers=headers,timeout=10).content
#print OPEN1
sources1 = re.compile('<iframe.+?src="(.+?)"',re.DOTALL).findall(OPEN1)[1]
#print sources1
OPEN2 = requests.get(sources1,headers=headers,timeout=10).content
match2 = re.compile('"file":"(.+?)"',re.DOTALL).findall(OPEN2)
for link in match2:
#print link
count +=1
self.sources.append({'source': self.name, 'quality': qual, 'scraper': self.name, 'url': link,'direct': False})
else:
#print embFile
qual = 'SD'
#print qual
OPEN1 = requests.get(embFile,headers=headers,timeout=10).content
#print OPEN1
sources1 = re.compile('<iframe.+?src="(.+?)"',re.DOTALL).findall(OPEN1)[1]
host = sources1.split('//')[1].replace('www.','')
host = host.split('/')[0].lower()
host = host.split('.')[0]
count +=1
self.sources.append({'source': host, 'quality': qual, 'scraper': self.name, 'url': sources1,'direct': False})
if dev_log=='true':
end_time = time.time() - self.start_time
send_log(self.name,end_time,count,title,year, season=season,episode=episode)
except:
pass
#filmapik().scrape_movie('tomb raider', '2018','')
#filmapik().scrape_episode('the resident', '2018', '', '1', '2', '', '')
|
catapult-project/catapult-csm
|
telemetry/telemetry/internal/browser/possible_browser.py
|
Python
|
bsd-3-clause
| 1,414 | 0.010608 |
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.internal.app import possible_app
class PossibleBrowser(possible_app.PossibleApp):
"""A browser that can be controlled.
Call Create() to launch the browser and begin manipulating it..
"""
def __init__(self, browser_type, target_os, supports_tab_control):
super(PossibleBrowser, self).__init__(app_type=browser_type,
target_os=target_os)
self._supports_tab_control = supports_tab_control
self._credentials_path = None
def __repr__(self):
return 'PossibleBrowser(app_type=%s)' % self.app_type
@property
def browser_type(self):
return self.app_type
@property
def supports_tab_control(self):
return self._supports_tab_control
def _InitPlatformIfNeeded(self):
rais
|
e NotImplementedError()
def Create(self, finder_options):
raise NotImplementedError()
def SupportsOptions(self, browser_options):
"""Tests for extension support."""
raise NotImplementedError()
def IsRemote(self):
return False
def RunRemote(self):
pass
def UpdateExecutableIfNeeded(self):
pass
def l
|
ast_modification_time(self):
return -1
def SetCredentialsPath(self, credentials_path):
self._credentials_path = credentials_path
|
SyntaxBlitz/syntaxblitz.github.io
|
mining-lear/process/step6.py
|
Python
|
mit
| 1,654 | 0.024788 |
import json
f = open('text-stripped-3.json')
out = open('text-lines.json', 'w')
start_obj = json.load(f)
end_obj = {'data': []}
characters_on_stage = []
currently_speaking = None
last_scene = '1.1'
for i in range(len(start_obj['data'])):
obj = start_obj['data'][i]
if obj['type'] == 'entrance':
if obj['characters'] in characters_on_stage:
raise Exception('Character tried to enter stage when already on stage at object ' + str(i))
characters_on_stage = characters_on_stage + obj['characters']
elif obj['type'] == 'exeunt':
characters_on_stage = []
elif obj['type'] == 'exit':
characters_on_stage = [char for char in characters_on_stage if char not in obj['characters']]
elif obj['type'] == 'speaker tag':
if obj['speaker'] not in characters_on_stage:
raise Exception('Character tried to speak when not on stage at object ' + str(i), start_obj['data'][i + 1])
currently_speaking = obj['speaker']
elif obj['type'] == 'line':
if currently_speaking == None:
raise Exception('A line did not have an associated speaker at object ' + str(i))
identifier_info = obj['identifier'].split('.')
scene = identifier_info[0] + '.' + identifier_info[1]
#if scene != last_scene:
# if len(characters_on_stage) != 0:
# print('Warning: scene ' + scene + ' just st
|
arted with ' + str(characters_on_stage) + ' still on stage')
last_scene = scene
end_obj['data'].append({
'type': 'line',
'identifier': obj['identifier'],
'text': obj['text'].strip()
|
,
'speaker': currently_speaking,
'characters': characters_on_stage
})
if len(characters_on_stage) == 0:
currently_speaking = None
json.dump(end_obj, out)
|
salberin/libsigrokdecode
|
decoders/usb_signalling/pd.py
|
Python
|
gpl-3.0
| 7,715 | 0.002333 |
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2011 Gareth McMullin <gareth@blacksphere.co.nz>
## Copyright (C) 2012-2013 Uwe Hermann <uwe@hermann-uwe.de>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##
import sigrokdecode as srd
'''
OUTPUT_PYTHON format:
Packet:
[<ptype>, <pdata>]
<ptype>, <pdata>:
- 'SOP', None
- 'SYM', <sym>
- 'BIT', <bit>
- 'STUFF BIT', None
- 'EOP', None
<sym>:
- 'J', 'K', 'SE0', or 'SE1'
<bit>:
- 0 or 1
- Note: Symbols like SE0, SE1, and the J that's part of EOP don't yield 'BIT'.
'''
# Low-/full-speed symbols.
# Note: Low-speed J and K are inverted compared to the full-speed J and K!
symbols = {
'low-speed': {
# (<dp>, <dm>): <symbol/state>
(0, 0): 'SE0',
(1, 0): 'K',
(0, 1): 'J',
(1, 1): 'SE1',
},
'full-speed': {
# (<dp>, <dm>): <symbol/state>
(0, 0): 'SE0',
(1, 0): 'J',
(0, 1): 'K',
(1, 1): 'SE1',
},
}
bitrates = {
'low-speed': 1500000, # 1.5Mb/s (+/- 1.5%)
'full-speed': 12000000, # 12Mb/s (+/- 0.25%)
}
sym_idx = {
'J': 0,
'K': 1,
'SE0': 2,
'SE1': 3,
}
class Decoder(srd.Decoder):
api_version = 2
id = 'usb_signalling'
name = 'USB signalling'
longname = 'Universal Serial Bus (LS/FS) signalling'
desc = 'USB (low-speed and full-speed) signalling protocol.'
license = 'gplv2+'
inputs = ['logic']
outputs = ['usb_signalling']
channels = (
{'id': 'dp', 'name': 'D+', 'desc': 'USB D+ signal'},
{'id': 'dm', 'name': 'D-', 'desc': 'USB D- signal'},
)
options = (
{'id': 'signalling', 'desc': 'Signalling',
'default': 'full-speed', 'values': ('full-speed', 'low-speed')},
)
annotations = (
('sym-j', 'J symbol'),
('sym-k', 'K symbol'),
('sym-se0', 'SE0 symbol'),
('sym-se1', 'SE1 symbol'),
('sop', 'Start of packet (SOP)'),
('eop', 'End of packet (EOP)'),
('bit', 'Bit'),
('stuffbit', 'Stuff bit'),
)
annotation_rows = (
('bits', 'Bits', (4, 5, 6, 7)),
('symbols', 'Symbols', (0, 1, 2, 3)),
)
def __init__(self):
self.samplerate = None
self.oldsym = 'J' # The "idle" state is J.
self.ss_sop = None
self.ss_block = None
self.samplenum = 0
self.syms = []
self.bitrate = None
self.bitwidth = None
self.bitnum = 0
self.samplenum_target = None
self.oldpins = None
self.consecutive_ones = 0
self.state = 'IDLE'
def start(self):
self.out_python = self.register(srd.OUTPUT_PYTHON)
self.out_ann = self.register(srd.OUTPUT_ANN)
def metadata(self, key, value):
if key == srd.SRD_CONF_SAMPLERATE:
self.samplerate = value
self.bitrate = bitrates[self.options['signalling']]
self.bitwidth = float(self.samplerate) / float(self.bitrate)
self.halfbit = int(self.bitwidth / 2)
def putpx(self, data):
self.put(self.samplenum, self.samplenum, self.out_python, data)
def putx(self, data):
self.put(self.samplenum, self.samplenum, self.out_ann, data)
def putpm(self, data):
s, h = self.samplenum, self.halfbit
self.put(self.ss_block - h, s + h, self.out_python, data)
def putm(self, data):
s, h = self.samplenum, self.halfbit
self.put(self.ss_block - h, s + h, self.out_ann, data)
def putpb(self, data):
s, h = self.samplenum, self.halfbit
se
|
lf.put(s - h, s + h, self.out_python, data)
def putb(self, data):
s, h = self.samplenum, self.halfbit
self.put(s
|
- h, s + h, self.out_ann, data)
def set_new_target_samplenum(self):
bitpos = self.ss_sop + (self.bitwidth / 2)
bitpos += self.bitnum * self.bitwidth
self.samplenum_target = int(bitpos)
def wait_for_sop(self, sym):
# Wait for a Start of Packet (SOP), i.e. a J->K symbol change.
if sym != 'K':
self.oldsym = sym
return
self.ss_sop = self.samplenum
self.set_new_target_samplenum()
self.putpx(['SOP', None])
self.putx([4, ['SOP', 'S']])
self.state = 'GET BIT'
def handle_bit(self, sym, b):
if self.consecutive_ones == 6 and b == '0':
# Stuff bit.
self.putpb(['STUFF BIT', None])
self.putb([7, ['Stuff bit: %s' % b, 'SB: %s' % b, '%s' % b]])
self.putb([sym_idx[sym], ['%s' % sym]])
self.consecutive_ones = 0
else:
# Normal bit (not a stuff bit).
self.putpb(['BIT', b])
self.putb([6, ['%s' % b]])
self.putb([sym_idx[sym], ['%s' % sym]])
if b == '1':
self.consecutive_ones += 1
else:
self.consecutive_ones = 0
def get_eop(self, sym):
# EOP: SE0 for >= 1 bittime (usually 2 bittimes), then J.
self.syms.append(sym)
self.putpb(['SYM', sym])
self.putb([sym_idx[sym], ['%s' % sym, '%s' % sym[0]]])
self.bitnum += 1
self.set_new_target_samplenum()
self.oldsym = sym
if self.syms[-2:] == ['SE0', 'J']:
# Got an EOP.
self.putpm(['EOP', None])
self.putm([5, ['EOP', 'E']])
self.bitnum, self.syms, self.state = 0, [], 'IDLE'
self.consecutive_ones = 0
def get_bit(self, sym):
if sym == 'SE0':
# Start of an EOP. Change state, run get_eop() for this bit.
self.state = 'GET EOP'
self.ss_block = self.samplenum
self.get_eop(sym)
return
self.syms.append(sym)
self.putpb(['SYM', sym])
b = '0' if self.oldsym != sym else '1'
self.handle_bit(sym, b)
self.bitnum += 1
self.set_new_target_samplenum()
self.oldsym = sym
def decode(self, ss, es, data):
if self.samplerate is None:
raise Exception("Cannot decode without samplerate.")
for (self.samplenum, pins) in data:
# State machine.
if self.state == 'IDLE':
# Ignore identical samples early on (for performance reasons).
if self.oldpins == pins:
continue
self.oldpins = pins
sym = symbols[self.options['signalling']][tuple(pins)]
self.wait_for_sop(sym)
elif self.state in ('GET BIT', 'GET EOP'):
# Wait until we're in the middle of the desired bit.
if self.samplenum < self.samplenum_target:
continue
sym = symbols[self.options['signalling']][tuple(pins)]
if self.state == 'GET BIT':
self.get_bit(sym)
elif self.state == 'GET EOP':
self.get_eop(sym)
else:
raise Exception('Invalid state: %s' % self.state)
|
adhoc-dev/odoo-nautical
|
nautical/partner.py
|
Python
|
agpl-3.0
| 957 | 0.014629 |
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################
|
################
import re
from openerp import netsvc
from openerp.osv import osv, fields
class partner(osv.osv):
""""""
_name = 'res.partner'
_inherits = { }
_inherit = [ 'res.partn
|
er' ]
_columns = {
'authorization_ids': fields.one2many('nautical.authorization', 'partner_id', string='Authorizations'),
'historical_record_ids': fields.one2many('nautical.partner_record', 'partner_id', string='historical_record_ids'),
'owned_craft_ids': fields.one2many('nautical.craft', 'owner_id', string='Owned Crafts'),
}
_defaults = {
}
_constraints = [
]
partner()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
callmealien/wazimap_zambia
|
censusreporter/api/config.py
|
Python
|
mit
| 106 | 0 |
# Database
DB_NAME = 'censusreporter_ke'
DB_USER = 'censusreporter_ke'
DB_PASSWO
|
RD = 'census
|
reporter_ke'
|
mattyhall/North-South-Divide
|
divide.py
|
Python
|
bsd-3-clause
| 2,269 | 0.009255 |
from flask import Flask, render_template, request
from lib.work import work
from lib.loaders.policedata import PoliceData
from lib.loaders.populationdata import PopulationData
from lib.loaders.childpovertydata import ChildPovertyData
from lib.loaders.cancerdata import CancerData
import json
app = Flask(__name__)
@app.route('/')
def root():
return render_template('index.html', police=request.args.get('police', 'false'),
p
|
opulation=request.args.get('population', 'false'),
child_poverty=request.args.get('childpoverty', 'false'),
cancer=request.args.get('cancer', 'false'),
no_line=request.args.get('noline', 'false'),
heat_map=request.args.get('heatmap', 'false'),
step=float(re
|
quest.args.get('step', '0.2')),
realistic=request.args.get('realistic', 'false'))
@app.route('/data')
def data():
police = request.args.get('police', 'true')
population = request.args.get('population', 'false')
child_poverty = request.args.get('childpoverty', 'false')
cancer = request.args.get('cancer', 'false')
step = float(request.args.get('step', '0.2'))
data_sets = []
if police == 'true':
data_set = PoliceData()
data_set.load()
data_sets.append(data_set)
if population == 'true':
data_set = PopulationData()
data_set.load()
data_sets.append(data_set)
if child_poverty == 'true':
data_set = ChildPovertyData()
data_set.load()
data_sets.append(data_set)
if cancer == 'true':
data_set = CancerData()
data_set.load()
data_sets.append(data_set)
output = {}
average = 0
for data_set in data_sets:
data = work(step, data_set)
output[data_set.NAME] = data
average += data['average_line'][0]['latitude']
average /= len(data_sets)
output['average'] = [{'latitude': average, 'longitude': -5}, {'latitude': average, 'longitude': 2}]
return json.dumps(output)
if __name__ == '__main__':
app.run(debug=True)
|
factorlibre/odoomrp-wip
|
stock_picking_wave_management/models/stock_picking_wave.py
|
Python
|
agpl-3.0
| 3,408 | 0.000293 |
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, fields, api, exceptions, _
class StockPickingWave(models.Model):
_inherit = 'stock.picking.wave'
@api.one
def _count_confirmed_pickings(self):
self.num_confirmed = len(self.picking_ids.filtered(lambda x: x.state ==
'confirmed'))
@api.one
def _count_assigned_pickings(self):
self.num_assigned = len(self.picking_ids.filtered(lambda x: x.state ==
'assigned'))
pickings_products = fields.One2many(
'stock.move', 'wave', string='Products', readonly=True)
pickings_operations = fields.One2many(
'stock.pack.operation', 'wave', string='Operations', readonly=True)
num_confirmed = fields.Integer(
compute="_count_confirmed_pickings", string="Confirmed pickings")
|
num_assigned = fields.Integer(
compute="_count_assigned_pickings", string="Assigned pickings")
partner = fields.Many2one('res.partner', 'Partner')
|
@api.multi
def confirm_picking(self):
picking_obj = self.env['stock.picking']
for wave in self:
pickings = picking_obj.search([('wave_id', '=', wave.id),
('state', '=', 'draft')])
pickings.action_assign()
wave.state = 'in_progress'
return True
@api.one
def button_check_availability(self):
pickings = self.picking_ids.filtered(lambda x: x.state == 'confirmed')
pickings.action_assign()
# The old API is used because the father is updated method context
def action_transfer(self, cr, uid, ids, context=None):
picking_obj = self.pool['stock.picking']
wave = self.browse(cr, uid, ids[0], context=context)
pickings = wave.picking_ids.filtered(lambda x: x.state == 'assigned')
c = context.copy()
c.update({'origin_wave': wave.id})
return picking_obj.do_enter_transfer_details(
cr, uid, pickings.ids, context=c)
@api.multi
def _get_pickings_domain(self):
self.ensure_one()
cond = [('wave_id', '=', False),
('state', 'not in', ['done', 'cancel'])]
if self.partner.child_ids:
cond.extend(['|', ('partner_id', '=', self.partner.id),
('partner_id', 'in',
self.partner.child_ids.ids)])
elif self.partner:
cond.extend([('partner_id', '=', self.partner.id)])
return cond
@api.multi
@api.onchange('partner')
def onchange_partner(self):
self.ensure_one()
cond = self._get_pickings_domain()
return {'domain': {'picking_ids': cond}}
@api.multi
def done(self):
for wave in self:
for picking in wave.picking_ids:
if picking.state not in ('cancel', 'done'):
raise exceptions.Warning(_(
'Some pickings are not transferred. '
'Please transfer pickings before set wave to done'))
return super(StockPickingWave, self).done()
|
kamyu104/LeetCode
|
Python/palindrome-pairs.py
|
Python
|
mit
| 4,503 | 0.001777 |
# Time: O(n * k^2), n is the number of the words, k is the max length of the words.
# Space: O(n * k)
# Given a list of unique words. Find all pairs of indices (i, j)
# in the given list, so that the concatenation of the two words,
# i.e. words[i] + words[j] is a palindrome.
#
# Example 1:
# Given words = ["bat", "tab", "cat"]
# Return [[0, 1], [1, 0]]
# The palindromes are ["battab", "tabbat"]
# Example 2:
# Given words = ["abcd", "dcba", "lls", "s", "sssll"]
# Return [[0, 1], [1, 0], [3, 2], [2, 4]]
# The palindromes are ["dcbaabcd", "abcddcba", "slls", "llssssll"]
import collections
class Solution(object):
def palindromePairs(self, words):
"""
:type words: List[str]
:rtype: Lis
|
t[List[int]]
"""
res = []
lookup = {}
for i, word in enumerate(words):
lookup[word] = i
for i in xrange(len(words)):
for j in xrange(len(words[i]) + 1):
prefix = words[i][j:]
suffix = words[i][:j]
if prefix == prefix[::-1] and \
suffix[::-1] in lookup and lookup[suffix[::-1]] != i:
|
res.append([i, lookup[suffix[::-1]]])
if j > 0 and suffix == suffix[::-1] and \
prefix[::-1] in lookup and lookup[prefix[::-1]] != i:
res.append([lookup[prefix[::-1]], i])
return res
# Time: O(n * k^2), n is the number of the words, k is the max length of the words.
# Space: O(n * k^2)
# Manacher solution.
class Solution_TLE(object):
def palindromePairs(self, words):
"""
:type words: List[str]
:rtype: List[List[int]]
"""
def manacher(s, P):
def preProcess(s):
if not s:
return ['^', '$']
T = ['^']
for c in s:
T += ["#", c]
T += ['#', '$']
return T
T = preProcess(s)
center, right = 0, 0
for i in xrange(1, len(T) - 1):
i_mirror = 2 * center - i
if right > i:
P[i] = min(right - i, P[i_mirror])
else:
P[i] = 0
while T[i + 1 + P[i]] == T[i - 1 - P[i]]:
P[i] += 1
if i + P[i] > right:
center, right = i, i + P[i]
prefix, suffix = collections.defaultdict(list), collections.defaultdict(list)
for i, word in enumerate(words):
P = [0] * (2 * len(word) + 3)
manacher(word, P)
for j in xrange(len(P)):
if j - P[j] == 1:
prefix[word[(j + P[j]) / 2:]].append(i)
if j + P[j] == len(P) - 2:
suffix[word[:(j - P[j]) / 2]].append(i)
res = []
for i, word in enumerate(words):
for j in prefix[word[::-1]]:
if j != i:
res.append([i, j])
for j in suffix[word[::-1]]:
if len(word) != len(words[j]):
res.append([j, i])
return res
# Time: O(n * k^2), n is the number of the words, k is the max length of the words.
# Space: O(n * k)
# Trie solution.
class TrieNode:
def __init__(self):
self.word_idx = -1
self.leaves = {}
def insert(self, word, i):
cur = self
for c in word:
if not c in cur.leaves:
cur.leaves[c] = TrieNode()
cur = cur.leaves[c]
cur.word_idx = i
def find(self, s, idx, res):
cur = self
for i in reversed(xrange(len(s))):
if s[i] in cur.leaves:
cur = cur.leaves[s[i]]
if cur.word_idx not in (-1, idx) and \
self.is_palindrome(s, i - 1):
res.append([cur.word_idx, idx])
else:
break
def is_palindrome(self, s, j):
i = 0
while i <= j:
if s[i] != s[j]:
return False
i += 1
j -= 1
return True
class Solution_MLE(object):
def palindromePairs(self, words):
"""
:type words: List[str]
:rtype: List[List[int]]
"""
res = []
trie = TrieNode()
for i in xrange(len(words)):
trie.insert(words[i], i)
for i in xrange(len(words)):
trie.find(words[i], i, res)
return res
|
t3dev/odoo
|
addons/payment/models/account_invoice.py
|
Python
|
gpl-3.0
| 4,255 | 0.00423 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import ValidationError
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
transaction_ids = fields.Many2many('payment.transaction', 'account_invoice_transaction_rel', 'invoice_id', 'transaction_id',
string='Transactions', copy=False, readonly=True)
authorized_transaction_ids = fields.Many2many('payment.transaction', compute='_compute_authorized_transaction_ids',
string='Authorized Transactions', copy=False, readonly=True)
@api.depends('transaction_ids')
def _compute_authorized_transaction_ids(self):
for trans in self:
trans.authorized_transaction_ids = trans.transaction_ids.filtered(lambda t: t.state == 'authorized')
@api.multi
def get_portal_last_transaction(self):
self.ensure_one()
|
return self.transaction_ids.get_last_transaction()
@api.multi
def _create_payment_transaction(self, vals):
'''Similar to self.env['payment.transaction'].create(vals) but the values are filled with the
current invoices fields (e.g. the partner or the currency).
:param vals: The values to create a new payment.transaction.
:return: The newly created payment.transaction record.
'''
# Ensure the
|
currencies are the same.
currency = self[0].currency_id
if any([inv.currency_id != currency for inv in self]):
raise ValidationError(_('A transaction can\'t be linked to invoices having different currencies.'))
# Ensure the partner are the same.
partner = self[0].partner_id
if any([inv.partner_id != partner for inv in self]):
raise ValidationError(_('A transaction can\'t be linked to invoices having different partners.'))
# Try to retrieve the acquirer. However, fallback to the token's acquirer.
acquirer_id = vals.get('acquirer_id')
acquirer = None
payment_token_id = vals.get('payment_token_id')
if payment_token_id:
payment_token = self.env['payment.token'].sudo().browse(payment_token_id)
# Check payment_token/acquirer matching or take the acquirer from token
if acquirer_id:
acquirer = self.env['payment.acquirer'].browse(acquirer_id)
if payment_token and payment_token.acquirer_id != acquirer:
raise ValidationError(_('Invalid token found! Token acquirer %s != %s') % (
payment_token.acquirer_id.name, acquirer.name))
if payment_token and payment_token.partner_id != partner:
raise ValidationError(_('Invalid token found! Token partner %s != %s') % (
payment_token.partner.name, partner.name))
else:
acquirer = payment_token.acquirer_id
# Check an acquirer is there.
if not acquirer_id and not acquirer:
raise ValidationError(_('A payment acquirer is required to create a transaction.'))
if not acquirer:
acquirer = self.env['payment.acquirer'].browse(acquirer_id)
# Check a journal is set on acquirer.
if not acquirer.journal_id:
raise ValidationError(_('A journal must be specified of the acquirer %s.' % acquirer.name))
if not acquirer_id and acquirer:
vals['acquirer_id'] = acquirer.id
vals.update({
'amount': sum(self.mapped('residual')),
'currency_id': currency.id,
'partner_id': partner.id,
'invoice_ids': [(6, 0, self.ids)],
})
transaction = self.env['payment.transaction'].create(vals)
# Process directly if payment_token
if transaction.payment_token_id:
transaction.s2s_do_transaction()
return transaction
@api.multi
def payment_action_capture(self):
self.authorized_transaction_ids.s2s_capture_transaction()
@api.multi
def payment_action_void(self):
self.authorized_transaction_ids.s2s_void_transaction()
|
Azure/azure-sdk-for-python
|
sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2021_03_03_preview/models/_models_py3.py
|
Python
|
mit
| 167,573 | 0.003473 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Any, Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._iot_hub_client_enums import *
class ArmIdentity(msrest.serialization.Model):
"""ArmIdentity.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: Principal Id.
:vartype principal_id: str
:ivar tenant_id: Tenant Id.
:vartype tenant_id: str
:ivar type: The type of identity used for the resource. The type 'SystemAssigned,UserAssigned'
includes both an implicitly created identity and a set of user assigned identities. The type
'None' will remove any identities from the service. Possible values include: "SystemAssigned",
"UserAssigned", "SystemAssigned, UserAssigned", "None".
:vartype type: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.ResourceIdentityType
:ivar user_assigned_identities: Dictionary of :code:`<ArmUserIdentity>`.
:vartype user_assigned_identities: dict[str,
~azure.mgmt.iothub.v2021_03_03_preview.models.ArmUserIdentity]
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'p
|
rincipal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'user_assigned_identi
|
ties': {'key': 'userAssignedIdentities', 'type': '{ArmUserIdentity}'},
}
def __init__(
self,
*,
type: Optional[Union[str, "ResourceIdentityType"]] = None,
user_assigned_identities: Optional[Dict[str, "ArmUserIdentity"]] = None,
**kwargs
):
"""
:keyword type: The type of identity used for the resource. The type
'SystemAssigned,UserAssigned' includes both an implicitly created identity and a set of user
assigned identities. The type 'None' will remove any identities from the service. Possible
values include: "SystemAssigned", "UserAssigned", "SystemAssigned, UserAssigned", "None".
:paramtype type: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.ResourceIdentityType
:keyword user_assigned_identities: Dictionary of :code:`<ArmUserIdentity>`.
:paramtype user_assigned_identities: dict[str,
~azure.mgmt.iothub.v2021_03_03_preview.models.ArmUserIdentity]
"""
super(ArmIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = type
self.user_assigned_identities = user_assigned_identities
class ArmUserIdentity(msrest.serialization.Model):
"""ArmUserIdentity.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id:
:vartype principal_id: str
:ivar client_id:
:vartype client_id: str
"""
_validation = {
'principal_id': {'readonly': True},
'client_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ArmUserIdentity, self).__init__(**kwargs)
self.principal_id = None
self.client_id = None
class CertificateBodyDescription(msrest.serialization.Model):
"""The JSON-serialized X509 Certificate.
:ivar certificate: base-64 representation of the X509 leaf certificate .cer file or just .pem
file content.
:vartype certificate: str
"""
_attribute_map = {
'certificate': {'key': 'certificate', 'type': 'str'},
}
def __init__(
self,
*,
certificate: Optional[str] = None,
**kwargs
):
"""
:keyword certificate: base-64 representation of the X509 leaf certificate .cer file or just
.pem file content.
:paramtype certificate: str
"""
super(CertificateBodyDescription, self).__init__(**kwargs)
self.certificate = certificate
class CertificateDescription(msrest.serialization.Model):
"""The X509 Certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar properties: The description of an X509 CA Certificate.
:vartype properties: ~azure.mgmt.iothub.v2021_03_03_preview.models.CertificateProperties
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The name of the certificate.
:vartype name: str
:ivar etag: The entity tag.
:vartype etag: str
:ivar type: The resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'etag': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': 'CertificateProperties'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
properties: Optional["CertificateProperties"] = None,
**kwargs
):
"""
:keyword properties: The description of an X509 CA Certificate.
:paramtype properties: ~azure.mgmt.iothub.v2021_03_03_preview.models.CertificateProperties
"""
super(CertificateDescription, self).__init__(**kwargs)
self.properties = properties
self.id = None
self.name = None
self.etag = None
self.type = None
class CertificateListDescription(msrest.serialization.Model):
"""The JSON-serialized array of Certificate objects.
:ivar value: The array of Certificate objects.
:vartype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.CertificateDescription]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[CertificateDescription]'},
}
def __init__(
self,
*,
value: Optional[List["CertificateDescription"]] = None,
**kwargs
):
"""
:keyword value: The array of Certificate objects.
:paramtype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.CertificateDescription]
"""
super(CertificateListDescription, self).__init__(**kwargs)
self.value = value
class CertificateProperties(msrest.serialization.Model):
"""The description of an X509 CA Certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar subject: The certificate's subject name.
:vartype subject: str
:ivar expiry: The certificate's expiration date and time.
:vartype expiry: ~datetime.datetime
:ivar thumbprint: The certificate's thumbprint.
:vartype thumbprint: str
:ivar is_verified: Determines whether certificate has been verified.
:vartype is_verified: bool
:ivar created: The certificate's create date and time.
:vartype created: ~datetime.datetime
:ivar updated: The certificate's last update date and time.
:vartype updated: ~datetime.datetime
:ivar certificate: The certificate content.
:vartype certificate: str
"""
_validation = {
'subject': {'readonly': True},
'expiry': {'readonly': True},
'thumbprint': {'readonly': True},
'is_verified': {'readonly': True},
'created': {'readonly': True},
'updated': {'readonly': True},
}
_attribute_map = {
'subject':
|
emimorad/smile_openerp_matrix_widget
|
smile_matrix_widget/widgets/__init__.py
|
Python
|
gpl-3.0
| 966 | 0 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Smile. All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public Lic
|
ense as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along w
|
ith this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import matrix
|
yejingfu/samples
|
tensorflow/pyplot03.py
|
Python
|
mit
| 3,650 | 0.011233 |
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
import re
rootPath = "/Users/jeff/work/debug/20181216_hard_fe2k_15fps/"
finalLogFile = "rosout.log.2"
def appendTimestamps(arr, start, stop, flag):
#flag = True
d = stop - start
if flag or (d > -10 and d < 2000):
arr.append(d)
return True
return False
## camera -> OA(ObjectAanalytics) -> Fusion -> Flink -> V2X
stamps = [[],[],[],[],[]]
log = open(rootPath + finalLogFile)
lines = log.readlines()
log.close()
for i in range(0, len(lines)):
line = lines[i].rstrip('\n').strip()
ret = re.findall(r'\"camera_output_ts\":(\d+),.*\"flink_output_ts\":(\d+),.*\"fusion_output_ts\":(\d+),.*\"oa_output_ts\":\[([\d,]+)\],.*\"v2xnode_input_ts\":(\d+)', line)
if len(ret) > 0 and len(ret[0]) == 5:
if i < 2:
#print("line", line)
print("ret:", ret)
stamps[0].append(long(ret[0][0])) # camera
stamps[2].append(long(ret[0][2])) # fusion
stamps[3].append(long(ret[0][1])) # flink
stamps[4].append(long(ret[0][4])) # v2x
# oa
oastamps = ret[0][3].split(',')
t1 = long(oastamps[0])
t2 = long(oastamps[1])
t3 = long(oastamps[2])
mi = t1
ma = t1
if mi > t2:
mi = t2
if mi > t3:
mi = t3
if ma < t2:
ma = t2
if ma < t3:
ma = t3
#stamps[1].append((t1 + t2 + t3) / 3)
#stamps[1].append(mi)
stamps[1].append(ma)
stamps[1].append(long(oastamps[0]))
stamps[1].append(long(oastamps[1]))
stamps[1].append(long(oastamps[2]))
## [ 0 1 2 3
|
4 ]
## [ Camera OA(3) Fusion Flink V2X ]
## [ 0 1 2 3 4 5 ]
## [ Total(V2X - Camera), OA(OA-Camera), Fusion(Fusion-OA), Flink(Flink - Fusion), V2X(V2X - Flink) Fusion-CA
|
M ]
delays = [[], [], [], [], [], [], [], []]
for i in range(len(stamps[0])):
if appendTimestamps(delays[0], stamps[0][i], stamps[4][i], False): # total
appendTimestamps(delays[1], stamps[0][i], stamps[1][i * 4], True) # OA
appendTimestamps(delays[2], stamps[1][i * 4], stamps[2][i], True) # Fusion
appendTimestamps(delays[3], stamps[2][i], stamps[3][i], True) # Flink
appendTimestamps(delays[4], stamps[3][i], stamps[4][i], True) # V2x
appendTimestamps(delays[5], stamps[0][i], stamps[2][i], True) # Fusion - Cam
print("===length: ", len(delays[0]),len(delays[1]),len(delays[2]),len(delays[3]),len(delays[4]))
delayavg = [0,0,0,0,0,0]
if len(delays[0]) == 0:
print("empty delay array")
quit()
for i in range(len(delays[0])):
delayavg[0] = delayavg[0] + delays[0][i]
delayavg[1] = delayavg[1] + delays[1][i]
delayavg[2] = delayavg[2] + delays[2][i]
delayavg[3] = delayavg[3] + delays[3][i]
delayavg[4] = delayavg[4] + delays[4][i]
delayavg[5] = delayavg[5] + delays[5][i]
for i in range(6):
delayavg[i] = delayavg[i] / len(delays[0])
print("===AVG(Total, OA, Fusion, Flink, V2X): ", delayavg)
frameIntervals = []
for i in range(len(stamps[0]) - 1):
tmp = stamps[0][i + 1] - stamps[0][i]
if tmp < 1000:
frameIntervals.append(stamps[0][i + 1] - stamps[0][i])
## plot
plt.figure()
#plt.plot(delays[0])
#plt.plot(delays[1])
#plt.plot(delays[2])
#plt.plot(delays[3])
plt.plot(delays[4])
#plt.plot(delays[5])
plt.legend(["Total", "OA", "Fusion", "Flink", "V2X", "OA+Fusion"])
plt.show()
'''
## interval
plt.plot(frameIntervals)
plt.show()
'''
print("done!")
|
sergeimoiseev/othodi
|
bokehm.py
|
Python
|
mit
| 4,203 | 0.012848 |
# -*- coding: UTF-8 -*-
# to run by anaconda
# from bokeh.plotting import figure, output_file, show
import bokeh.plotting as bp
import bokeh_gmapm
import logging
logger = logging.getLogger(__name__)
class Figure(object):
def __init__(self, *args, **kwargs):
self._output_fname = kwargs.get('output_fname',"bokeh.html")
bp.output_file(self._output_fname)
self._use_gmap = kwargs.get('use_gmap',False)
if self._use_gmap and kwargs.get('center_coords',False):
self._p = bokeh_gmapm.create_plot(kwargs['center_coords'],zoom_level = 7)
else:
self._p = bp.figure(plot_width=640, plot_height=480)
def a
|
dd_line(self, *args,**kwargs):
logger.info("starting line add with points num = %d" % (len(args[0])))
if self._use_gmap:
bokeh_gmapm.add_line(self._p,args[0],**kwargs)
else:
if len(args[0])==0:
lats = [0,1,2,3]
lngs = [2,3,4,5]
else:
c_size=kwargs.get('circle_size',15)
c_color=kwargs.get('circles_color','red')
self._
|
p.line([d['lat'] for d in args[0]],
[d['lng'] for d in args[0]],
size=c_size,color=c_color,alpha=0.5)
self._p.circle([d['lat'] for d in args[0]],
[d['lng'] for d in args[0]],
line_width=c_size/2,color=c_color,alpha=0.5)
return True
def save2html(self):
bp.save(self._p)
return self._output_fname
def show(self):
bp.show(self._p)
return True
# def plot_route_on_basemap(coord_pairs,annotes,added_points_param_list=None):
# bp.output_file("map_bokeh.html")
# p = bp.figure(plot_width=640, plot_height=480)
# lat_list, lng_list = zip(*coord_pairs)
# MIN_L_WIDTH=7
# POINT_SIZE=2*MIN_L_WIDTH
# x_all=[]
# y_all=[]
# for i,point in enumerate(coord_pairs):
# lon = point[-1]
# lat = point[0]
# x,y = lon,lat
# # x,y = m(*[lon,lat])
# x_all.append(x)
# y_all.append(y)
# if (i!=0 and i!=len(annotes)-1):
# pass
# # plt.annotate(annotes[i], xy=(x,y), xytext=(POINT_SIZE/2,POINT_SIZE/2), textcoords='offset points',bbox=dict(boxstyle="round", fc=(1.0, 0.7, 0.7), ec="none"))
# p.line(x_all,y_all,line_width=5,color='red')
# if added_points_param_list!=None:
# added_points_coords = added_points_param_list[0]
# names = added_points_param_list[1]
# x_added=[]
# y_added=[]
# for i,point in enumerate(added_points_coords):
# lat = point[0]
# lon = point[-1]
# # x,y = m(*[lon,lat])
# x,y = lon,lat
# x_added.append(x)
# y_added.append(y)
# if (i!=0 and i!=len(names)-1):
# p.text(x, y, text=[names[i]], text_color="#449944", text_align="left", text_font_size="10pt")
# p.circle(x,y,size=20,color='red',alpha=0.5)
# bp.save(p)
def test_simple_bokeh_plot():
tver_coords = {u'lat':56.8583600, u'lng':35.9005700}
fig = Figure(output_fname='bokehm_simple_test.html',use_gmap=False, center_coords=tver_coords)
line_to_plot = [{u'lat':tver_coords[u'lat']*(1+i*0.0001),
u'lng':tver_coords[u'lng']*(1+i*0.0001)} \
for i in range(10)]
fig.add_line(line_to_plot,circle_size=20, circles_color='green')
fig.save2html()
fig.show()
def test_gmap_bokeh_plot():
tver_coords = {u'lat':56.8583600, u'lng':35.9005700}
fig = Figure(output_fname='bokehm_test.html',use_gmap=True, center_coords=tver_coords)
line_to_plot = []
for i in range(10):
line_to_plot.append({u'lat':tver_coords[u'lat']*(1+i*0.0001), u'lng':tver_coords[u'lng']*(1+i*0.0001)})
print(type(line_to_plot[0]))
fig.add_line(line_to_plot,circle_size=20, circles_color='green')
fig.save2html()
fig.show()
def main():
pass
if __name__ == "__main__":
main()
|
jakubroztocil/httpie
|
setup.py
|
Python
|
bsd-3-clause
| 2,890 | 0.000346 |
# This is purely the result of trial and error.
import sys
from setuptools import setup, find_packages
import httpie
# Note: keep requirements here to ease distributions packaging
tests_require = [
'pytest',
'pytest-httpbin>=0.0.6',
'responses',
]
dev_require = [
*tests_require,
'flake8',
'flake8-comprehensions',
'flake8-deprecated',
'flake8-mutable',
'flake8-tuple',
'pyopenssl',
'pytest-cov',
'pyyaml',
'twine',
'wheel',
'Jinja2'
]
install_requires = [
'charset_normalizer>=2.0.0',
'defusedxml>=0.6.0',
'requests[socks]>=2.22.0',
|
'Pygments>=2.5.2',
'requests-toolbelt>=0.9.1',
'multidict>=4.7.0',
'setuptools',
]
install_requires_win_only = [
'colorama>=0.2.4',
]
# Conditional dependencies:
# sdist
if 'bdist_wheel' not in sys.argv:
if 'win32' in str(sys.platform).lower():
# Terminal colors for Windows
install_requires.extend(install_requires_win_only)
# bdist_wheel
extras_require = {
'dev': dev_require,
|
'test': tests_require,
# https://wheel.readthedocs.io/en/latest/#defining-conditional-dependencies
':sys_platform == "win32"': install_requires_win_only,
}
def long_description():
with open('README.md', encoding='utf-8') as f:
return f.read()
setup(
name='httpie',
version=httpie.__version__,
description=httpie.__doc__.strip(),
long_description=long_description(),
long_description_content_type='text/markdown',
url='https://httpie.io/',
download_url=f'https://github.com/httpie/httpie/archive/{httpie.__version__}.tar.gz',
author=httpie.__author__,
author_email='jakub@roztocil.co',
license=httpie.__licence__,
packages=find_packages(include=['httpie', 'httpie.*']),
entry_points={
'console_scripts': [
'http = httpie.__main__:main',
'https = httpie.__main__:main',
],
},
python_requires='>=3.6',
extras_require=extras_require,
install_requires=install_requires,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development',
'Topic :: System :: Networking',
'Topic :: Terminals',
'Topic :: Text Processing',
'Topic :: Utilities'
],
project_urls={
'GitHub': 'https://github.com/httpie/httpie',
'Twitter': 'https://twitter.com/httpie',
'Discord': 'https://httpie.io/discord',
'Documentation': 'https://httpie.io/docs',
'Online Demo': 'https://httpie.io/run',
},
)
|
kevinrombach/TNTNodeMonitorBot
|
src/commandhandler/node.py
|
Python
|
apache-2.0
| 10,737 | 0.011456 |
#!/usr/bin/env python3
import logging
from src import util
from src import etherscan
from src import messages
from crypto.prices import *
logger = logging.getLogger("node")
######
# Telegram command handler for adding nodes for the user who fired the command.
#
# Command: /node :address0;name0 ... :addressN;nameN
#
# Command parameter: :address0 - Address of the first node to add
# :name0 - Name of the first node
# :addressN - Address of the last node to add
# :nameN - Name of the last node
#
# Gets only called by the telegram bot api
######
def nodeAdd(bot, update, args):
response = "*Add*\n\n"
chatId = update.message.chat_id
logger.warning("add - args " + " ".join(args))
logger.warning("add - user: {}".format(update.message.from_user.id))
pool = bot.database.getPoolById(chatId)
if pool == None:
bot.create(bot,update,[])
if len(args) == 0:
response += ("Arguments required: address_0;name_0 ... address_n;name_n\n\n"
"Example: /add 0xFf2ED74286a5686Bc4F4896761718DE031680000;Node1 0xFf2ED74286a5686Bc4F4896761718DE031681111;Node2\n")
valid = False
else:
for arg in args:
|
valid = True
newNode = arg.split(";")
if len(newNode) != 2:
response += messages.invalidParameterError.format(arg)
valid = False
else:
if not util.validateTntAddress( newNode[0] ):
response += messages.invali
|
dTntAddressError.format(newNode[0])
valid = False
if not util.validateName( newNode[1] ):
response += messages.invalidNameError.format(newNode[1])
valid = False
if valid:
address = newNode[0]
name = newNode[1]
if bot.database.addNode(update.message.chat_id, address, name, update.message.from_user.id,update.message.from_user.username):
response += "Added node {}!\n".format(address)
else:
response += messages.nodeExistsError.format(address)
bot.sendMessage(update.message.chat_id, response )
######
# Telegram command handler for updating nodes for the user who fired the command.
#
# Command: /add :address :newname
#
# Command parameter: :address - Address of the node to update
# :newname - New name for the node
#
# Gets only called by the telegram bot api
######
def nodeUpdate(bot, update, args):
response = "*Update*\n\n"
chatId = update.message.chat_id
logger.warning("update - args " + " ".join(args))
logger.warning("update - user: {}".format(update.message.from_user.id))
pool = bot.database.getPoolById(chatId)
user = bot.database.getUser(chatId, update.message.from_user.id)
if pool == None:
response+= messages.noPoolError
elif user == None:
response += messages.notActiveInPoolError
elif len(args) != 2:
response += ("Exactly 2 arguments required: :address :newname\n"
"Where :address is the address of the node to update and :newname the"
"new name of the node.\n\n"
"Example: /update 0xFf2ED74286a5686Bc4F4896761718DE031680000 MyNewNodeName\n")
else:
valid = True
if not util.validateTntAddress( args[0] ):
response += messages.invalidTntAddressError.format(args[0])
valid = False
elif not util.validateName( args[1] ):
response += messages.invalidNameError.format(args[1])
valid = False
if valid:
address = args[0]
name = args[1]
logger.info("update - {} {}".format(address, user['id']))
if bot.database.getNode(address, user['id']) == None:
response += messages.nodeNotExistsError.format(address)
else:
bot.database.updateNode(address,user['id'], name)
response += "Node successfully updated. {}\n".format(address)
bot.sendMessage(chatId, response )
######
# Telegram command handler for removing nodes for the user who fired the command.
#
# Command: /remove :address
#
# Command parameter: :address - Address of the node to remove
#
#
# Gets only called by the telegram bot api
######
def nodeRemove(bot, update, args):
response = "*Remove*\n\n"
chatId = update.message.chat_id
logger.warning("remove - " + " ".join(args))
logger.warning("remove - user: {}".format(update.message.from_user.id))
pool = bot.database.getPoolById(chatId)
user = bot.database.getUser(chatId, update.message.from_user.id)
if pool == None:
response+= messages.noPoolError
elif user == None:
response += messages.notActiveInPoolError
elif len(args) != 1:
response += ("Argument required: :address\n\n"
"Example: /remove 0xFf2ED74286a5686Bc4F4896761718DE031680000\n")
else:
address = args[0]
if not util.validateTntAddress( address ):
response += "ERROR: Invalid TNT-Address: {}\n".format(address)
else:
logger.info("remove - valid {}".format(address, ))
if bot.database.getNode(address, user['id']) == None:
response += "ERROR: Node {} does not exist!\n".format(address)
else:
bot.database.deleteNode(address,user['id'])
response += "Node {} successfully deleted.\n".format(address)
bot.sendMessage(chatId, response )
######
# Telegram command handler for reading the amounts of each node of the users
# in the pool
#
# Command: /nodes
#
# Gets only called by the telegram bot api
######
def nodes(bot, update):
response = ""
chatId = update.message.chat_id
nodesFound = False
pool = bot.database.getPoolById(chatId)
logger.warning("nodes - {}".format(chatId))
if pool == None:
response = "*Nodes*\n\n"
response += ("You need to create a pool with nodes first. "
"Type /help to show the list of commands.")
else:
tntPrice = liqui(Cryptos.TNT)
addresses = []
for user in bot.database.getUsers(pool['id']):
nodes = bot.database.getNodes(user['id'])
if len(nodes) == 0:
continue
for node in nodes:
addresses.append(node["address"])
amounts = etherscan.getTNTAmount(addresses, pool['api_key'])
for user in bot.database.getUsers(pool['id']):
nodes = bot.database.getNodes(user['id'])
if len(nodes) == 0:
continue
nodesFound = True
response += "*" + user['name'] + "*\n"
total = 0
for node in nodes:
tnt = amounts[node["address"]]
if tnt == -1:
response += node['name'] + " -> Sorry, there was an error.\n".format(tnt)
else:
total += int(tnt)
response += node['name'] + " -> {} TNT\n".format(tnt)
if tntPrice != None:
response += '\n*Total:\n TNT: {}\n USD: {}*\n\n'.format(total,int(total*tntPrice.usd))
else:
response += '\n*Total TNT: {}*\n\n'.format(total)
response += "\n\n"
if not nodesFound and pool:
response = "*Nodes*\n\n"
response += ("There are currently no nodes in this pool. You can create "
"nodes with /add.")
bot.sendMessage(chatId, response )
######
# Telegram command handler for reading the total amounts of all nodes of the users
# in the pool
#
# Command: /total
#
# Gets only called by the telegram bot api
######
def total(bot, update):
response = ""
chatId = update.message.chat_id
nodesFound = False
pool = bot.database.getPoolById(chatId)
logger.warning("total - {}".format(chatId))
if po
|
nuclear-wizard/moose
|
python/chigger/tests/utils/test_get_active_filenames.py
|
Python
|
lgpl-2.1
| 1,925 | 0.007792 |
#!/usr/bin/env python3
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import unittest
import time
import mooseutils
import chigger
class Test_getActiveFilenames(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""
Copy the temporary files to working directory.
"""
cls.basename = cls.__name__
cls.testfiles = chigger.utils.copy_adaptive_exodus_test_files(cls.basename)
@classmethod
def tearDownClass(cls):
"""
Cleanup test files
"""
for fname in cls.testfiles:
if os.path.exists(fname): os.remove(fname)
def testBasic(self):
"""
Test that all files can be read.
"""
active = chigger.utils.get_active_filenames(self.basename + '.e', self.basename + '.e-s*')
self.assertEqual(len(active), 9)
self.assertEqual(active[0][0], self.basename + '.e')
self.assertEqual(active[-1][0], self.basename + '.e-s009')
def testUpdate(self):
|
"""
Test that updating the files updates the active list.
"""
# Wai
|
t and the "update" the first few files
time.sleep(1.5)
for i in range(5):
print(self.testfiles[i])
mooseutils.touch(self.testfiles[i])
active = chigger.utils.get_active_filenames(self.basename + '.e', self.basename + '.e-s*')
self.assertEqual(len(active), 5)
self.assertEqual(active[0][0], self.basename + '.e')
self.assertEqual(active[-1][0], self.basename + '.e-s005')
if __name__ == '__main__':
unittest.main(module=__name__, verbosity=2)
|
GaretJax/ppc
|
fabfile.py
|
Python
|
mit
| 2,432 | 0.000411 |
import pipes
from fabric.api import settings, task, local, hide
from fabric.contrib.console import confirm
def is_working_tree_clean():
with settings(hide('everything'), warn_only=True):
local('git update-index -q --ignore-submodules --refresh')
unstaged = local('git diff-files --quiet --ignore-submodules --',
capture=True)
uncommitted = local('git diff-index --cached --quiet HEAD '
'--ignore-submodules --', capture=True)
return unstaged.succeeded and uncommitted.succeeded
@task
def lint():
"""
Checks the source code using flake8.
"""
local('flake8 --statistics --exit-zero --max-complexity=10 '
'--exclude=\'*/migrations/*,build,dist\' .')
@task
def authors():
"""
Updates the AUTHORS file with a list of committers from GIT.
"""
local('git shortlog -s -e -n | cut -f 2- > AUTHORS')
@task
def compass():
local('compass watch -c ppc/assets/sass/config.rb')
@task
def livereload():
local('bundle exec guard')
@task
def release():
"""
Create a new release and upload it to PyPI.
"""
if not is_working_tree_clean():
print 'Your working tree is not clean. Refusing to create a release.'
return
print 'Rebuilding the AUTHORS file t
|
o check for modifications...'
authors()
if not is_working_tree_clean():
print (
'Your working tree is not clean after the AUTHORS file was '
'rebuilt.'
)
print 'Please commit the changes before continuing.'
|
return
# Get version
version = 'v{}'.format(local('python setup.py --version', capture=True))
name = local('python setup.py --name', capture=True)
# Tag
tag_message = '{} release version {}.'.format(name, version)
print '----------------------'
print 'Proceeding will tag the release, push the repository upstream,'
print 'and release a new version on PyPI.'
print
print 'Version: {}'.format(version)
print 'Tag message: {}'.format(tag_message)
print
if not confirm('Continue?', default=True):
print 'Aborting.'
return
local('git tag -a {} -m {}'.format(pipes.quote(version),
pipes.quote(tag_message)))
# Push
local('git push origin master')
# Package and upload to pypi
local('python setup.py sdist upload')
|
dana-i2cat/felix
|
vt_manager/src/python/vt_manager/controller/dispatchers/xmlrpc/InformationDispatcher.py
|
Python
|
apache-2.0
| 7,769 | 0.022268 |
from vt_manager.controller.actions.ActionController import ActionController
from vt_manager.controller.drivers.VTDriver import VTDriver
from vt_manager.models.Action import Action
from vt_manager.models.VirtualMachine import VirtualMachine
import xmlrpclib, threading, logging, copy
from vt_manager.communication.utils.XmlHelper import XmlHelper
from vt_manager.models.resourcesHash import resourcesHash
class InformationDispatcher():
@staticmethod
def listResources(remoteHashValue, projectUUID = 'None', sliceUUID ='None'):
logging.debug("Enter listResources")
infoRspec = XmlHelper.getSimpleInformation()
servers = VTDriver.getAllServers()
baseVM = copy.deepcopy(infoRspec.response.information.resources.server[0].virtual_machine[0])
if not servers:
logging.debug("No VTServers available")
infoRspec.response.information.resources.server.pop()
resourcesString = XmlHelper.craftXmlClass(infoRspec)
localHashValue = str(hash(resourcesString))
else:
for sIndex, server in enumerate(servers):
if(sIndex == 0):
baseServer = copy.deepcopy(infoRspec.response.information.resources.server[0])
if(sIndex != 0):
newServer = copy.deepcopy(baseServer)
infoRspec.response.information.resources.server.append(newServer)
InformationDispatcher.__ServerModelToClass(server, infoRspec.response.information.resources.server[sIndex] )
if (projectUUID is not 'None'):
vms = server.getVMs(projectId = projectUUID)
else:
vms = server.getVMs()
if not vms:
logging.debug("No VMs available")
if infoRspec.response.information.resources.server[sIndex].virtual_machine:
infoRspec.response.information.resources.server[sIndex].virtual_machine.pop()
elif (sliceUUID is not 'None'):
vms = vms.filter(sliceId = sliceUUID)
if not vms:
logging.error("No VMs available")
infoRspec.response.information.resources.server[sIndex].virtual_machine.pop()
for vIndex, vm in enumerate(vms):
if (vIndex != 0):
newVM = copy.deepcopy(baseVM)
infoRspec.response.information.resources.server[sIndex].virtual_machine.append(newVM)
InformationDispatcher.__VMmodelToClass(vm, infoRspec.response.information.resources.server[sIndex].virtual_machine[vIndex])
resourcesString = XmlHelper.craftXmlClass(infoRspec)
localHashValue = str(hash(resourcesString))
try:
rHashObject = resourcesHash.objects.get(projectUUID = projectUUID, sliceUUID = sliceUUID)
rHashObject.hashValue = localHashValue
rHashObject.save()
except:
rHashObject = resourcesHash(hashValue = localHashValue, projectUUID= projectUUID, sliceUUID = sliceUUID)
rHashObject.save()
if remoteHashValue == rHashObject.hashValue:
return localHashValue, ''
else:
return localHashValue, resourcesString
@staticmethod
def listVMTemplatesInfo(serverUUID):
#def listVMTemplatesInfo(serverUUID, callbackURL):
logging.debug("Enter listVMTemplatesInfo")
server = VTDriver.getServerByUUID(serverUUID)
xmlrpc_server = xmlrpclib.Server(server.getAgentURL())
templates_info = xmlrpc_server.list_vm_templates(server.getAgentPassword())
#templates_info = xmlrpc_server.list_vm_templates(callbackURL, server.getAgentPassword())
return str(templates_info)
@staticmethod
def forceListActiveVMs(serverID='None', vmID='None'):
if serverID != 'None':
server = VTDriver.getServerById(serverID)
vtam_vms = server.getVMs()
else:
if vmID != 'None':
servers = VTDriver.getAllServers()
vtam_vms = list()
for server in servers:
vtam_vms = server.getVMs(id=int(vmID))
if vtam_vms:
vmID = vtam_vms[0].getUUID()
break
if not vtam_vms:
raise Exception("VM not found")
xmlrpc_server = xmlrpclib.Server(server.getAgentURL())
# Handle safely the connection against the agent
try:
server_active_vms = xmlrpc_server.force_list_active_vms(server.getAgentPassword(), vmID)
for vm in vtam_vms:
if vm.getUUID() in server_active_vms.keys():
vm.setState("running")
vm.save()
else:
# XXX: avoiding "on queue" and "unknown" states to avoid bad management
|
#if vm.getState() in ['deleting...', 'failed', 'on queue', 'unknown']:
if vm.getState() in ["deleting...", "failed"]:
child = vm.getChildObje
|
ct()
server = vm.Server.get()
#Action.objects.all().filter(objectUUID = vm.uuid).delete()
server.deleteVM(vm)
# Keep actions table up-to-date after each deletion
vm_uuids = [ vm.uuid for vm in VirtualMachine.objects.all() ]
Action.objects.all().exclude(objectUUID__in = vm_uuids).delete()
elif vm.getState() in ["running", "starting...", "stopping..."] :
vm.setState("stopped")
vm.save()
else:
continue
except:
server_active_vms = dict()
return server_active_vms
@staticmethod
def __ServerModelToClass(sModel, sClass ):
sClass.name = sModel.getName()
#XXX: CHECK THIS
sClass.id = sModel.id
sClass.uuid = sModel.getUUID()
sClass.operating_system_type = sModel.getOSType()
sClass.operating_system_distribution = sModel.getOSDistribution()
sClass.operating_system_version = sModel.getOSVersion()
sClass.virtualization_type = sModel.getVirtTech()
ifaces = sModel.getNetworkInterfaces()
for ifaceIndex, iface in enumerate(ifaces):
if ifaceIndex != 0:
newInterface = copy.deepcopy(sClass.interfaces.interface[0])
sClass.interfaces.interface.append(newInterface)
if iface.isMgmt:
sClass.interfaces.interface[ifaceIndex].ismgmt = True
else:
sClass.interfaces.interface[ifaceIndex].ismgmt = False
sClass.interfaces.interface[ifaceIndex].name = iface.name
sClass.interfaces.interface[ifaceIndex].switch_id= iface.switchID
sClass.interfaces.interface[ifaceIndex].switch_port = iface.port
@staticmethod
def __VMmodelToClass(VMmodel, VMxmlClass):
VMxmlClass.name = VMmodel.getName()
VMxmlClass.uuid = VMmodel.getUUID()
VMxmlClass.status = VMmodel.getState()
VMxmlClass.project_id = VMmodel.getProjectId()
VMxmlClass.slice_id = VMmodel.getSliceId()
VMxmlClass.project_name = VMmodel.getProjectName()
VMxmlClass.slice_name = VMmodel.getSliceName()
VMxmlClass.operating_system_type = VMmodel.getOSType()
VMxmlClass.operating_system_version = VMmodel.getOSVersion()
VMxmlClass.operating_system_distribution = VMmodel.getOSDistribution()
VMxmlClass.virtualization_type = VMmodel.Server.get().getVirtTech()
VMxmlClass.server_id = VMmodel.Server.get().getUUID()
VMxmlClass.xen_configuration.hd_setup_type = VMmodel.getHdSetupType()
VMxmlClass.xen_configuration.hd_origin_path = VMmodel.getHdOriginPath()
VMxmlClass.xen_configuration.virtualization_setup_type = VMmodel.getVirtualizationSetupType()
VMxmlClass.xen_configuration.memory_mb = VMmodel.getMemory()
ActionController.PopulateNetworkingParams(VMxmlClass.xen_configuration.interfaces.interface, VMmodel)
|
zpincus/RisWidget
|
ris_widget/qgraphicsitems/viewport_rect_item.py
|
Python
|
mit
| 1,046 | 0.002868 |
# This code is licensed under the MIT License (see LICENSE file for details)
from PyQt5 import Qt
class ViewportRectItem(Qt.QGraphicsObject):
size_changed = Qt.pyqtSignal(Qt.QSizeF)
def __init__(self):
super().__init__()
self.setFlags(
Qt.QGraphicsItem.ItemIgnoresTransformations |
Qt.QGraphicsItem.ItemSendsGeometryChanges |
Qt.QGraphicsItem.ItemSendsScenePositionChanges |
Qt.QGraphicsItem.ItemHasNoContents
)
self._size = Qt.QSizeF()
# Children are generally overlay items that sho
|
uld appear over anything else rather than z-fighting
self.setZValue(10)
@property
def size(self):
return self._size
@size.setter
def size(self, v):
if not isinstance(v, Qt.QSizeF):
v = Qt.QSizeF(v)
if self._size != v:
self.prepareGeometryChange()
self._size = v
self.size_changed.emit(v)
def boundingRect(self):
return Qt.QRectF
|
(Qt.QPointF(), self._size)
|
AlexandreGazagnes/Unduplicator
|
Copier_coller_fichiers.py
|
Python
|
mit
| 2,039 | 0.02207 |
###############################################################################
###############################################################################
# MODULE COPIER_COLLER_FICHIERS
###############################################################################
###############################################################################
###############################################################################
# IMPORT
#######################
|
########################################################
import os
import shutil
###############################################################################
# CONSTANTES / VARIABLES
##############################################################
|
#################
###############################################################################
# CLASSES
###############################################################################
###############################################################################
# FONCTIONS
###############################################################################
def creer_dossier_doublons(source, dossier):
os.mkdir(source + "/" + dossier)
def copier_fichier(fi, source, dest):
shutil.copy2(fi, str(source + "/" + dest))
def supprimer_fichier(fi, source):
os.remove(source+"/"+fi)
def traiter_fichiers(liste, o, p):
for i in range(len(liste.traitee)):
if liste.doublons[i] != False :
if o.supprimer_doublons :
if liste.doublons[i] == True :
supprimer_fichier(liste[i], repertoire_source)
else :
if o.deplacer_orginal and o.deplacer_doublons :
copier_fichier(fichier, repertoire_source, repertoire_destination)
supprimer_fichier(fichier, repertoire_source)
elif o.deplacer_doublons and not o.deplacer_original:
if liste.doublons[i] == True :
copier_fichier(fichier, repertoire_source, repertoire_destination)
supprimer_fichier(fichier, repertoire_source)
else:
input("probleme")
|
alkorgun/blacksmith-2
|
expansions/user_stats/insc.py
|
Python
|
apache-2.0
| 813 | 0.048961 |
# coding: utf-8
if DefLANG in ("RU", "UA"):
AnsBase_temp = tuple([line.decode("utf-8") for line in (
"\nВсего входов
|
- %d\nВремя последнего входа - %s\nПоследняя роль - %s", # 0
"\nВремя последнего выхода - %s\nПричина выхода - %s", # 1
"\nНики: %s", # 2
"Нет статистики.", # 3
"«%s» сидит здесь - %s.", # 4
"Ты провёл здесь - %s.", # 5
"Здесь нет такого юзера." # 6
)])
else:
AnsBase_temp = (
"\nTotal joins - %d\nThe Last join-time - %s\nThe last role - %s", # 0
"\nThe last leave-time
|
- %s\nExit reason - %s", # 1
"\nNicks: %s", # 2
"No statistics.", # 3
"'%s' spent here - %s.", # 4
"You spent here - %s.", # 5
"No such user here." # 6
)
|
pacoqueen/odfpy
|
tests/testuserfields.py
|
Python
|
gpl-2.0
| 7,878 | 0.000127 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2007 Michael Howitz, gocept gmbh & co. kg
#
# This is free software. You may redistribute it under the terms
# of the Apache license and the GNU General Public License Version
# 2 or at your option any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
import unittest
import os
import os.path
import odf.userfield
import tempfile
import zipfile
try:
from cStringIO import StringIO
except InputError:
from StringIO import StringIO
def get_file_path(file_name):
return os.path.join(os.path.dirname(__file__), "examples", file_name)
def get_user_fields(file_path):
return odf.userfield.UserFields(file_path)
class TestUserFields(unittest.TestCase):
userfields_odt = get_file_path("userfields.odt")
userfields_ooo3_odt = get_file_path("userfields_ooo3.odt")
no_userfields_odt = get_file_path("no_userfields.odt")
def setUp(self):
self.unlink_list = []
def tearDown(self):
# delete created destination files
for filename in self.unlink_list:
os.unlink(filename)
def test_exception(self):
# no zip-file
no_zip = odf.userfield.UserFields(__file__)
self.assertRaises(TypeError, no_zip.list_fields)
self.assertRaises(TypeError, no_zip.update, {})
def test_list_fields(self):
""" Find the expected fields in the file """
self.assertEqual([],
get_user_fields(self.no_userfields_odt).list_fields())
self.assertEqual(['username', 'firstname', 'lastname', 'address'],
get_user_fields(self.userfields_odt).list_fields())
def test_list_fields_and_values(self):
""" Find the expected fields and values in the file """
no_user_fields = get_user_fields(self.no_userfields_odt)
self.assertEqual([],
no_user_fields.list_fields_and_values())
self.assertEqual([],
no_user_fields.list_fields_and_values(['username']))
user_fields = get_user_fields(self.userfields_odt)
self.assertEqual([('username', 'string', ''),
('lastname', 'string', '<none>')],
user_fields.list_fields_and_values(['username',
'lastname']))
self.assertEqual(4, len(user_fields.list_fields_and_values()))
def test_list_values(self):
self.assertEqual(
[],
get_user_fields(self.no_userfields_odt).list_values(['username']))
self.assertEqual(
['', '<none>'],
get_user_fields(self.userfields_odt).list_values(
['username', 'lastname']))
def test_get(self):
user_fields = get_user_fields(self.userfields_odt)
self.assertEqual(
None,
get_user_fields(self.no_userfields_odt).get('username'))
self.assertEqual('', user_fields.get('username'))
self.assertEqual('<none>', user_fields.get('lastname'))
self.assertEqual(None, user_fields.get('street'))
def test_get_type_and_value(self):
self.assertEqual(
None,
get_user_fields(self.no_userfields_odt).get_type_and_value(
'username'))
user_fields = get_user_fields(self.userfields_odt)
self.assertEqual(
('string', ''), user_fields.get_type_and_value('username'))
self.assertEqual(
('string', '<none>'),
user_fields.get_type_and_value('lastname'))
self.assertEqual(None, user_fields.get_type_and_value('street'))
def test_update(self):
# test for file without user fields
no_user_fields = get_user_fields(self.no_userfields_odt)
no_user_fields.dest_file = self._get_dest_file_name()
no_user_fields.update({'username': 'mac'})
dest = odf.userfield.UserFields(no_user_fields.dest_file)
self.assertEqual([], dest.list_fields_and_values())
# test for file with user field, including test of encoding
user_fields = get_user_fields(self.userfields_odt)
user_fields.dest_file = self._get_dest_file_name()
user_fields.update({'username': 'mac',
'firstname': u'André',
'street': 'I do not exist'})
dest = odf.userfield.UserFields(user_fields.dest_file)
self.assertEqual([('username', 'string', 'mac'),
('firstname', 'string', 'André'),
('lastname', 'string', '<none>'),
('address', 'string', '')],
dest.list_fields_and_values())
def test_update_open_office_version_3(self):
"""Update fields in OpenOffice.org 3.x version of file."""
user_fields = get_user_fields(self.userfields_ooo3_odt)
user_fields.dest_file = self._get_dest_file_name()
user_fields.update({'username': 'mari',
'firstname': u'Lukas',
'street': 'I might exist.'})
dest = odf.userfield.UserFields(user_fields.dest_file)
self.assertEqual([('username', 'string', 'mari'),
('firstname', 'string', 'Lukas'),
('lastname', 'string', '<none>'),
('address', 'string', '')],
dest.list_fields_and_values())
def test_stringio(self):
# test wether it is possible to use a StringIO as src and dest
src = StringIO(file(self.userfields_odt).read())
dest = StringIO()
# update fields
user_fields = odf.userfield.UserFields(src, dest)
user_fields.update({'username': 'mac',
'firstname': u'André',
'street': 'I do not exist'})
# reread dest StringIO to get field values
dest_user_fields = odf.userfield.UserFields(dest)
self.assertEqual([('username', 'string', 'mac'),
('firstname', 'string', 'André'),
('lastname', 'string', '<none>'),
('address', 'string', '')],
dest_user_fields.list_fields_and_values())
def test_newlines_in_values(self):
# test that newlines in values are encoded correctly so that
# they get read back correctly
|
user_fields = get_user_fields(self.userfields_odt)
user_fields.dest_file = self._get_dest_file_name()
user_fields.update({'username': 'mac',
|
'firstname': 'mac',
'lastname': 'mac',
'address': 'Hall-Platz 3\n01234 Testheim'})
dest = odf.userfield.UserFields(user_fields.dest_file)
self.assertEqual([('username', 'string', 'mac'),
('firstname', 'string', 'mac'),
('lastname', 'string', 'mac'),
('address', 'string',
'Hall-Platz 3\n01234 Testheim')],
dest.list_fields_and_values())
def _get_dest_file_name(self):
dummy_fh, dest_file_name = tempfile.mkstemp('.odt')
os.close(dummy_fh)
self.unlink_list.append(dest_file_name)
return dest_file_name
if __name__ == '__main__':
unittest.main()
|
rl-institut/reegis_hp
|
reegis_hp/de21/scenario_tools.py
|
Python
|
gpl-3.0
| 22,415 | 0.000491 |
# -*- coding: utf-8 -*-
import pandas as pd
import os
import os.path as path
import logging
from oemof import network
from oemof.solph import EnergySystem
from oemof.solph.options import BinaryFlow, Investment
from oemof.solph.plumbing import sequence
from oemof.solph.network import (Bus, Source, Sink, Flow, LinearTransformer,
Storage)
PARAMETER = (
'conversion_factors', 'nominal_value',
'min', 'max', 'summed_max', 'actual_value', 'fixed_costs', 'variable_costs',
|
'fixed', 'nominal_capacity', 'capacity_loss', 'inflow_conversion_factor',
'outflow_conversion_factor', 'initial_capacity', 'capacity_min',
'capacity_max', 'balanced', 'sort_index')
INDEX = ('class', 'label', 'source', 'target')
class SolphScenario(EnergySystem):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.p = kwargs.get('parameters')
self.s = kwargs.get('sequences')
self.path = kwargs.ge
|
t('path', path.dirname(path.realpath(__file__)))
self.name = kwargs.get('name')
def create_parameter_table(self, additional_parameter=None):
"""Create an empty parameter table."""
if additional_parameter is None:
additional_parameter = tuple()
my_index = pd.MultiIndex(levels=[[], [], [], []],
labels=[[], [], [], []],
names=INDEX)
self.p = pd.DataFrame(columns=PARAMETER + tuple(additional_parameter),
index=my_index)
def create_sequence_table(self, datetime_index=None):
"""Create an empty sequence table."""
if datetime_index is None:
datetime_index = self.timeindex
my_index = pd.MultiIndex(
levels=[[], [], [], [], []], labels=[[], [], [], [], []],
names=INDEX + ('attributes',))
df = pd.DataFrame(index=datetime_index, columns=my_index)
self.s = df
def create_tables(self, **kwargs):
"""Create empty scenario tables (sequence and parameter)."""
self.create_parameter_table(
additional_parameter=kwargs.get('additional_parameter'))
self.create_sequence_table(datetime_index=kwargs.get('datetime_index'))
def read_parameter_table(self, filename=None):
"""Read existing parameter table from file."""
if filename is None:
filename = path.join(self.path, self.name + '.csv')
self.p = pd.read_csv(filename, index_col=[0, 1, 2, 3])
def read_sequence_table(self, filename=None):
"""Read existing parameter table from file."""
if filename is None:
filename = path.join(self.path, self.name + '_seq.csv')
self.s = pd.read_csv(filename, header=[0, 1, 2, 3, 4], parse_dates=True,
index_col=0)
def read_tables(self, parameterfile=None, sequencefile=None):
"""Read existing scenario tables (parameter and sequence)"""
self.read_parameter_table(parameterfile)
self.read_sequence_table(sequencefile)
def write_parameter_table(self, filename=None):
"""Write parameter table to file."""
if filename is None:
filename = path.join(self.path, self.name + '.csv')
self.p.sort_values('sort_index', inplace=True)
self.p.fillna('').to_csv(filename)
def write_sequence_table(self, filename=None):
"""Write sequence table to file."""
if filename is None:
filename = path.join(self.path, self.name + '_seq.csv')
self.s.to_csv(filename)
def write_tables(self, parameterfile=None, sequencefile=None):
"""Write scenario tables into two separate files."""
self.write_parameter_table(parameterfile)
self.write_sequence_table(sequencefile)
def create_nodes(self):
"""
Create nodes for a solph.energysystem
Notes
-----
At the moment the nodes_from_csv function does not accept Multiindex
DataFrames therefore the DataFrames need to be reshaped.
"""
tmp1 = pd.DataFrame(
index=self.s.columns).reset_index().transpose().reset_index()
tmp2 = self.s.reset_index()
for n in range(len(tmp2.columns.levels) - 1):
tmp2.columns = tmp2.columns.droplevel(0)
length = len(tmp1.columns)
tmp1.columns = list(range(length))
tmp2.columns = list(range(length))
return nodes_from_csv(
nodes_flows=self.p.reset_index(),
nodes_flows_seq=pd.DataFrame(pd.concat([tmp1, tmp2],
ignore_index=True)))
def add_parameters(self, idx, columns, values):
self.p.loc[idx, columns] = values
self.p = self.p.sortlevel()
def add_sequences(self, idx, seq):
self.s[idx[0], idx[1], idx[2], idx[3], idx[4]] = seq
def add_comment_line(self, comment, sort_entry):
self.p.loc[('### {0}'.format(comment), '', '', ''),
'sort_index'] = sort_entry
self.p = self.p.sortlevel()
def function1(row, nodes, classes, flow_attrs, seq_attributes, nodes_flows_seq,
i):
"""
create node if not existent and set attributes
(attributes must be placed either in the first line or in all
lines of multiple node entries (flows) in csv file)
"""
try:
if row['class'] in classes.keys():
node = nodes.get(row['label'])
if node is None:
node = classes[row['class']](label=row['label'])
# for the if check below we use all flow_attrs except
# investment
# because for storages investment needs to be set as a node
# attribute (and a flow attribute)
flow_attrs_ = [i for i in flow_attrs if i != 'investment']
for attr in row.keys():
if (attr not in flow_attrs_ and
attr not in ('class', 'label', 'source', 'target',
'conversion_factors')):
if row[attr] != 'seq':
if attr in seq_attributes:
print(attr)
print(row)
print(row[attr])
print('blubb')
row[attr] = sequence(float(row[attr]))
# again from investment storage the next lines
# are a little hacky as we need to create an
# solph.options.Investment() object
if (isinstance(node, Storage) and
attr == 'investment'):
setattr(node, attr, Investment())
invest_attrs = vars(Investment()).keys()
for iattr in invest_attrs:
if iattr in row.keys() and row[attr]:
setattr(node.investment,
iattr, row[iattr])
# for all 'normal' attributes
else:
setattr(node, attr, row[attr])
else:
seq = nodes_flows_seq.loc[row['class'],
row['label'],
row['source'],
row['target'],
attr]
if attr in seq_attributes:
seq = [i for i in seq]
seq = sequence(seq)
else:
seq = [i for i in seq.values]
setattr(node, attr, seq)
except:
print('Error with node creation in line', i+2, 'in csv file.')
print('Label:', row['label'])
raise
return node
def function2(row, node, flow_attrs, seq_attributes, nodes_flows_seq, i):
"""create flow and set attributes
"""
try:
flow = Flow()
for attr in flow_attrs:
if attr
|
JackDanger/sentry
|
src/sentry/api/endpoints/project_release_file_details.py
|
Python
|
bsd-3-clause
| 7,531 | 0.000266 |
from __future__ import absolute_import
import posixpath
from rest_framework import serializers
from rest_framework.response import Response
from sentry.api.base import DocSection
from sentry.api.bases.project import ProjectEndpoint, ProjectReleasePermission
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.serializers import serialize
from sentry.models import Release, ReleaseFile
from sentry.utils.apidocs import scenario, attach_scenarios
try:
from django.http import (
CompatibleStreamingHttpResponse as StreamingHttpResponse
)
except ImportError:
from django.http import StreamingHttpResponse
@scenario('RetrieveReleaseFile')
def retrieve_file_scenario(runner):
rf = runner.utils.create_release_file(
project=runner.default_project,
release=runner.default_release,
path='/demo/readme.txt',
contents='Hello World!'
)
runner.request(
method='GET',
path='/projects/%s/%s/releases/%s/files/%s/' % (
runner.org.slug, runner.default_project.slug,
runner.default_release.version, rf.id)
)
@scenario('UpdateReleaseFile')
def update_file_scenario(runner):
rf = runner.utils.create_release_file(
project=runner.default_project,
release=runner.default_release,
path='/demo/hello.txt',
contents='Good bye World!'
)
runner.request(
method='PUT',
path='/projects/%s/%s/releases/%s/files/%s/' % (
runner.org.slug, runner.default_project.slug,
runner.default_release.version, rf.id),
data={
'name': '/demo/goodbye.txt'
}
)
@scenario('DeleteReleaseFile')
def delete_file_scenario(runner):
rf = runner.utils.create_release_file(
project=runner.default_project,
release=runner.default_release,
path='/demo/badfile.txt',
contents='Whatever!'
)
runner.request(
method='DELETE',
path='/projects/%s/%s/releases/%s/files/%s/' % (
runner.org.slug, runner.default_project.slug,
runner.default_release.version, rf.id)
)
class ReleaseFileSerializer(serializers.Serializer):
name = serializers.CharField(max_length=200, required=True)
class ProjectReleaseFileDetailsEndpoint(ProjectEndpoint):
doc_section = DocSection.RELEASES
permission_classes = (ProjectReleasePermission,)
def download(self, releasefile):
file = releasefile.file
fp = file.getfile()
response = StreamingHttpResponse(
iter(lambda: fp.read(4096), b''),
content_type=file.headers.get('content-type', 'application/octet-stream'),
)
response['Content-Length'] = file.size
response['Content-Disposition'] = 'attachment; filename="%s"' % posixpath.basename(" ".join(releasefile.name.split()))
return response
@attach_scenarios([retrieve_file_scenario])
def get(self, request, project, version, file_id):
"""
Retrieve a Project Release's File
`````````````````````````````````
Return details on an individual file within a release. This does
not actually return the contents of the file, just the associated
metadata.
:pparam string organization_slug: the slug of the organization the
release belongs to.
:pparam string project_slug: the slug of the project to retrieve the
file of.
:pparam string version: the version identifier of the release.
:pparam string file_id: the ID of the file to retrieve.
:auth: required
"""
try:
release = Release.objects.get(
organization_id=project.organization_id,
projects=project,
version=version,
)
except Release.DoesNotExist:
raise ResourceDoesNotExist
try:
|
releasefile = ReleaseFile.objects.get(
|
release=release,
id=file_id,
)
except ReleaseFile.DoesNotExist:
raise ResourceDoesNotExist
download_requested = request.GET.get('download') is not None
if download_requested and (
request.access.has_scope('project:write')):
return self.download(releasefile)
elif download_requested:
return Response(status=403)
return Response(serialize(releasefile, request.user))
@attach_scenarios([update_file_scenario])
def put(self, request, project, version, file_id):
"""
Update a File
`````````````
Update metadata of an existing file. Currently only the name of
the file can be changed.
:pparam string organization_slug: the slug of the organization the
release belongs to.
:pparam string project_slug: the slug of the project to update the
file of.
:pparam string version: the version identifier of the release.
:pparam string file_id: the ID of the file to update.
:param string name: the new name of the file.
:auth: required
"""
try:
release = Release.objects.get(
organization_id=project.organization_id,
projects=project,
version=version,
)
except Release.DoesNotExist:
raise ResourceDoesNotExist
try:
releasefile = ReleaseFile.objects.get(
release=release,
id=file_id,
)
except ReleaseFile.DoesNotExist:
raise ResourceDoesNotExist
serializer = ReleaseFileSerializer(data=request.DATA)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
result = serializer.object
releasefile.update(
name=result['name'],
)
return Response(serialize(releasefile, request.user))
@attach_scenarios([delete_file_scenario])
def delete(self, request, project, version, file_id):
"""
Delete a File
`````````````
Permanently remove a file from a release.
This will also remove the physical file from storage.
:pparam string organization_slug: the slug of the organization the
release belongs to.
:pparam string project_slug: the slug of the project to delete the
file of.
:pparam string version: the version identifier of the release.
:pparam string file_id: the ID of the file to delete.
:auth: required
"""
try:
release = Release.objects.get(
organization_id=project.organization_id,
projects=project,
version=version,
)
except Release.DoesNotExist:
raise ResourceDoesNotExist
try:
releasefile = ReleaseFile.objects.get(
release=release,
id=file_id,
)
except ReleaseFile.DoesNotExist:
raise ResourceDoesNotExist
file = releasefile.file
# TODO(dcramer): this doesnt handle a failure from file.deletefile() to
# the actual deletion of the db row
releasefile.delete()
file.delete()
return Response(status=204)
|
lissyx/build-mozharness
|
mozharness/mozilla/release.py
|
Python
|
mpl-2.0
| 1,945 | 0.002571 |
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
"""release.py
"""
import os
from mozharness.base.config import parse_config_file
# SignAndroid {{{1
class ReleaseMixin():
release_config = {}
def query_release_config(self):
if self.release_config:
|
return self.release_config
c = self.config
dirs = self.query_abs_dirs()
if c.get("release_config_file"):
self.info("Getting release config from %s..." % c["release_config_file"])
rc = None
try:
rc = parse_config_file(
os.path.join(dirs['abs_work_dir'],
c["release_config_file"]),
config_dict_name="releaseConfig
|
"
)
except IOError:
self.fatal("Release config file %s not found!" % c["release_config_file"])
except RuntimeError:
self.fatal("Invalid release config file %s!" % c["release_config_file"])
self.release_config['version'] = rc['version']
self.release_config['buildnum'] = rc['buildNumber']
self.release_config['ftp_server'] = rc['stagingServer']
self.release_config['ftp_user'] = c.get('ftp_user', rc['hgUsername'])
self.release_config['ftp_ssh_key'] = c.get('ftp_ssh_key', rc['hgSshKey'])
else:
self.info("No release config file; using default config.")
for key in ('version', 'buildnum',
'ftp_server', 'ftp_user', 'ftp_ssh_key'):
self.release_config[key] = c[key]
self.info("Release config:\n%s" % self.release_config)
return self.release_config
|
timothyparez/PyBitmessage
|
src/bitmessageqt/migrationwizard.py
|
Python
|
mit
| 2,437 | 0.005334 |
#!/usr/bin/env python2.7
from PyQt4 import QtCore, QtGui
class MigrationWizardIntroPage(QtGui.QWizardPage):
def __init__(self):
super(QtGui.QWizardPage, self).__init__()
self.setTitle("Migrating configuration")
label = QtGui.QLabel("This wizard will help you to migrate your configuration. "
"You can still keep using PyBitMessage once you migrate, the changes are backwards compatible.")
label.setWordWrap(True)
layout = QtGui.QVBoxLayout()
layout.addWidget(label)
self.setLayout(layout)
def nextId(self):
return 1
class MigrationWizardAddressesPage(QtGui.QWizardPage):
def __init__(self, addresses):
super(QtGui.QWizardPage, self).__init__()
self.setTitle("Addresses")
label = QtGui.QLabel("Please select addresses that you are already using with mailchuck. ")
label.setWordWrap(True)
layout = QtGui.QVBoxLayout()
layout.addWidget(label)
self.setLayout(layout)
def nextId(self):
return 10
class MigrationWizardGPUPage(QtGui.QWizardPage):
def __init__(self):
super(QtGui.QWizardPage, self).__init__()
self.setTitle("GPU")
label = QtGui.QLabel("Are you using a GPU? ")
label.setWordWrap(True)
layout = QtGui.QVBoxLayout()
layout.addWidget(label)
self.setLayout(layout)
def nextId(self):
return 10
class MigrationWizardConclusionPage(QtGui.QWizardPage):
def __init__(self):
sup
|
er(QtGui.QWizardPage, self).__init__()
self.setTitle("All done!")
label = QtGui.QLabel("You successfully migrated.")
label.setWordWrap(True)
layout = QtGui.QVBoxLayout()
layout.addWidget(label)
self.setLayout(layout)
class Ui_MigrationWizard(QtGui.QWizard):
def __init__(self, addresses):
super(QtGui.QWizard, self).__init__()
self.pages = {}
pag
|
e = MigrationWizardIntroPage()
self.setPage(0, page)
self.setStartId(0)
page = MigrationWizardAddressesPage(addresses)
self.setPage(1, page)
page = MigrationWizardGPUPage()
self.setPage(2, page)
page = MigrationWizardConclusionPage()
self.setPage(10, page)
self.setWindowTitle("Migration from PyBitMessage wizard")
self.adjustSize()
self.show()
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247972723/samba/dcerpc/dnsserver/DNS_RPC_ENUM_ZONES_FILTER.py
|
Python
|
gpl-2.0
| 1,215 | 0.00823 |
# encoding: utf-8
# module samba.dcerpc.dnsserver
# from /usr/lib/python2.7/dist-packages/samba/dcerpc/dnsserver.so
# by generator 1.135
""" dnsserver DCE/RPC """
# imports
import dcerpc as __dcerpc
import talloc as __talloc
class DNS_RPC_ENUM_ZONES_FILTER(__talloc.Object):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
dwFilter = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
dwReserved0 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
dw
|
RpcStructureVersion = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
pszPartitionFqdn = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
pszQueryString = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
pszReserved =
|
property(lambda self: object(), lambda self, v: None, lambda self: None) # default
|
jhmatthews/cobra
|
source/disk_sub.py
|
Python
|
gpl-2.0
| 3,164 | 0.072377 |
import sys
import numpy as np
import pylab
import matplotlib.pyplot as plt
import scipy.integrate
import scipy.optimize
from collections import namedtuple
import geo
import astro_help as ah
import disk_sub as disk
RADIAN=57.29598
C=2.997925e10
MSOL=1.979e33
G=6.670e-8
YR=3.1556925e7
EPSILON=1e-6
PI=3.1416
STEFAN_BOLTZMANN=5.669e-5
def tdisk (m, mdot, r):
t = 3. * G / (8. * PI * STEFAN_BOLTZMANN) * m * mdot / (r * r * r)
t = pow (t, 0.25)
return (t)
def teff (t, x):
q = (1.e0 - (x ** -0.5e0)) / (x * x * x);
q = t * (q ** 0.25e0);
return (q)
def spec_disk (f1,f2,m,mdot,rmin,rmax):
tref=tdisk(m, mdot, rmin)
nfreq=(f2/f1)*100
freq=np.linspace(f1,f2,nfreq)
spec=np.empty(nfreq)
dfreq=freq[1]-freq[0]
rtemp=np.logspace(np.log10(rmin),np.log10(rmax),num=100)
rdisk=[]
for j in range(len(rtemp)-1):
rdisk.append((rtemp[j]+rtemp[j+1])/2.0)
r=rdisk[j]/rmin
area=PI*(rtemp[j+1]*rtemp[j+1]-rtemp[j]*rtemp[j])
t=(disk.teff(tref,r))
for i in range(len(freq)):
spec[i]=spec[i]+(ah.planck_nu(t,freq[i])*area*PI*2)
return (freq,spec)
def spec_disk1 (f1,f2,m,mdot,rmin,rmax):
tref=tdisk(m, mdot, rmin)
nfr
|
eq=1000
freq=np.logspace(np.log10(f1),np.log10(f2),nfreq)
spec=np.empty(nfreq)
dfreq=freq[1]-freq[0]
rtemp=np.logspace(np.log10(rmin),np.log10(rmax),num=100)
rdisk=[]
for j in range(len(rtemp)-1):
rdisk.append((rtemp[j]+rtemp[j+1])/2.0)
r=rdisk[j]/rmin
area=PI*(rtemp[j+1]*rtemp[j+1]-rtemp[
|
j]*rtemp[j])
t=(disk.teff(tref,r))
for i in range(len(freq)-1):
spec[i]=spec[i]+(ah.planck_nu(t,(freq[i+1]+freq[i])/2.0)*area*PI*2*(freq[i+1]-freq[i]))
return (freq,spec)
def lnu_disk (f,m,mdot,rmin,rmax):
tref=tdisk(m, mdot, rmin)
rtemp=np.logspace(np.log10(rmin),np.log10(rmax),num=100)
rdisk=[]
lnu=0.0
for j in range(len(rtemp)-1):
rdisk.append((rtemp[j]+rtemp[j+1])/2.0)
r=rdisk[j]/rmin
area=PI*(rtemp[j+1]*rtemp[j+1]-rtemp[j]*rtemp[j])
t=(disk.teff(tref,r))
lnu=lnu+(ah.planck_nu(t,f)*area*PI*2.0)
return (lnu)
def llamb_disk (lamb,m,mdot,rmin,rmax):
tref=tdisk(m, mdot, rmin)
rtemp=np.logspace(np.log10(rmin),np.log10(rmax),num=100)
rdisk=[]
llamb=0.0
for j in range(len(rtemp)-1):
rdisk.append((rtemp[j]+rtemp[j+1])/2.0)
r=rdisk[j]/rmin
area=PI*(rtemp[j+1]*rtemp[j+1]-rtemp[j]*rtemp[j])
t=(disk.teff(tref,r))
llamb=llamb+(ah.planck_lamb(t,lamb)*area*PI*2.0)
return (llamb)
def spec_disk2 (f1,f2,m,mdot,rmin,rmax):
tref=tdisk(m, mdot, rmin)
nfreq=10
f1a=10**float(int(np.log10(f1)))
f2a=10**float(int(np.log10(f2))+1)
nrange=int(np.log10((f2a/f1a)))
freq=[]
dfreq=[]
ftemp=f1a
df=f1a/nfreq
for i in range(nrange):
for j in range(nfreq*9):
ftemp=ftemp+df
if ftemp > f2:
break
if ftemp >= f1:
freq.append(ftemp)
df=df*10.0
#print freq[0],freq[len(freq)-1]
spec=np.zeros(len(freq))
rtemp=np.logspace(np.log10(rmin),np.log10(rmax),num=100)
rdisk=[]
for j in range(len(rtemp)-1):
rdisk.append((rtemp[j]+rtemp[j+1])/2.0)
r=rdisk[j]/rmin
area=PI*(rtemp[j+1]*rtemp[j+1]-rtemp[j]*rtemp[j])
t=(disk.teff(tref,r))
for i in range(len(freq)-1):
spec[i]=spec[i]+(ah.planck_nu(t,freq[i])*area*PI*2)
return (freq,spec)
|
JerryXia/fastgoagent
|
goagent/server/paas/wsgi.py
|
Python
|
mit
| 5,380 | 0.004833 |
#!/usr/bin/env python
# coding=utf-8
# Contributor:
# Phus Lu <phus.lu@gmail.com>
__version__ = '3.1.1'
__password__ = ''
__hostsdeny__ = () # __hostsdeny__ = ('.youtube.com', '.youku.com')
import gevent.monkey
gevent.monkey.patch_all(subprocess=True)
import sys
import errno
import time
import itertools
import logging
import string
import base64
import urlparse
import httplib
import socket
import ssl
import select
TIMEOUT = 20
def message_html(title, banner, detail=''):
MESSAGE_TEMPLATE = '''
<html><head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<title>$title</title>
<style><!--
body {font-family: arial,sans-serif}
div.nav {margin-top: 1ex}
div.nav A {font-size: 10pt; font-family: arial,sans-serif}
span.nav {font-size: 10pt; font-family: arial,sans-serif; font-weight: bold}
div.nav A,span.big {font-size: 12pt; color: #0000cc}
div.nav A {font-size: 10pt; color: black}
A.l:link {color: #6f6f6f}
A.u:link {color: green}
//--></style>
</head>
<body text=#000000 bgcolor=#ffffff>
<table border=0 cellpadding=2 cellspacing=0 width=100%>
<tr><td bgcolor=#3366cc><font face=arial,sans-serif color=#ffffff><b>Message</b></td></tr>
<tr><td> </td></tr></table>
<blockquote>
<H1>$banner</H1>
$detail
<p>
</blockquote>
<table width=100% cellpadding=0 cellspacing=0><tr><td bgcolor=#3366cc><img alt="" width=1 height=4></td></tr></table>
</body></html>
'''
return string.Template(MESSAGE_TEMPLATE).substitute(title=title, banner=banner, detail=detail)
class XORCipher(object):
"""XOR Cipher Class"""
def __init__(self, key):
self.__key_gen = itertools.cycle(key).next
def encrypt(self, data):
return ''.join(chr(ord(x) ^ ord(self.__key_gen())) for x in data)
class XORFileObject(object):
"""fileobj for xor"""
def __init__(self, stream, key):
self.__stream = stream
self.__cipher = XORCipher(key)
def __getattr__(self, attr):
if attr not in ('__st
|
ream', '__key_gen'):
return getattr(self.__stream, attr)
def read(self, size=-1):
return self.__cipher.encrypt(self.__stream.read(size))
def forward_socket(local, remote, timeout=60, tick=2, bufsize=8192, maxping=None, maxpong=None):
|
try:
timecount = timeout
while 1:
timecount -= tick
if timecount <= 0:
break
(ins, _, errors) = select.select([local, remote], [], [local, remote], tick)
if errors:
break
if ins:
for sock in ins:
data = sock.recv(bufsize)
if data:
if sock is remote:
local.sendall(data)
timecount = maxpong or timeout
else:
remote.sendall(data)
timecount = maxping or timeout
else:
return
except socket.error as e:
if e.args[0] not in ('timed out', errno.ECONNABORTED, errno.ECONNRESET, errno.EBADF, errno.EPIPE, errno.ENOTCONN, errno.ETIMEDOUT):
raise
finally:
if local:
local.close()
if remote:
remote.close()
def application(environ, start_response):
if environ['REQUEST_METHOD'] == 'GET':
start_response('302 Found', [('Location', 'https://www.google.com')])
raise StopIteration
query_string = environ['QUERY_STRING']
kwargs = dict(urlparse.parse_qsl(query_string))
host = kwargs.pop('host')
port = int(kwargs.pop('port'))
timeout = int(kwargs.get('timeout') or TIMEOUT)
logging.info('%s "%s %s %s" - -', environ['REMOTE_ADDR'], host, port, 'HTTP/1.1')
if __password__ and __password__ != kwargs.get('password'):
random_host = 'g%d%s' % (int(time.time()*100), environ['HTTP_HOST'])
conn = httplib.HTTPConnection(random_host, timeout=timeout)
conn.request('GET', '/')
response = conn.getresponse(True)
status_line = '%s %s' % (response.status, httplib.responses.get(response.status, 'OK'))
start_response(status_line, response.getheaders())
yield response.read()
raise StopIteration
if __hostsdeny__ and host.endswith(__hostsdeny__):
start_response('403 Forbidden', [('Content-Type', 'text/html')])
yield message_html('403 Forbidden Host', 'Hosts Deny(%s)' % host, detail='host=%r' % host)
raise StopIteration
wsgi_input = environ['wsgi.input']
remote = socket.create_connection((host, port), timeout=timeout)
if kwargs.get('ssl'):
remote = ssl.wrap_socket(remote)
while True:
data = wsgi_input.read(8192)
if not data:
break
remote.send(data)
start_response('200 OK', [])
forward_socket(wsgi_input.socket, remote)
yield 'out'
if __name__ == '__main__':
import gevent.wsgi
logging.basicConfig(level=logging.INFO, format='%(levelname)s - - %(asctime)s %(message)s', datefmt='[%b %d %H:%M:%S]')
server = gevent.wsgi.WSGIServer(('', int(sys.argv[1])), application)
logging.info('local paas_application serving at %s:%s', server.address[0], server.address[1])
server.serve_forever()
|
ewandor/home-assistant
|
homeassistant/helpers/entity_component.py
|
Python
|
apache-2.0
| 17,208 | 0.000058 |
"""Helpers for components that manage entities."""
import asyncio
from datetime import timedelta
from homeassistant import config as conf_util
from homeassistant.setup import async_prepare_setup_platform
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_SCAN_INTERVAL, CONF_ENTITY_NAMESPACE,
DEVICE_DEFAULT_NAME)
from homeassistant.core import callback, valid_entity_id
from homeassistant.exceptions import HomeAssistantError, PlatformNotReady
from homeassistant.loader import get_component
from homeassistant.helpers import config_per_platform, discovery
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.helpers.event import (
async_track_time_interval, async_track_point_in_time)
from homeassistant.helpers.service import extract_entity_ids
from homeassistant.util import slugify
from homeassistant.util.async import (
run_callback_threadsafe, run_coroutine_threadsafe)
import homeassistant.util.dt as dt_util
DEFAULT_SCAN_INTERVAL = timedelta(seconds=15)
SLOW_SETUP_WARNING = 10
SLOW_SETUP_MAX_WAIT = 60
PLATFORM_NOT_READY_RETRIES = 10
class EntityComponent(object):
"""Helper class that will help a component manage its entities."""
def __init__(self, logger, domain, hass,
scan_interval=DEFAULT_SCAN_INTERVAL, group_name=None):
"""Initialize an entity component."""
self.logger = logger
self.hass = hass
self.domain = domain
self.entity_id_format = domain + '.{}'
self.scan_interval = scan_interval
self.group_name = group_name
self.entities = {}
self.config = None
self._platforms = {
'core': EntityPlatform(self, domain, self.scan_interval, 0, None),
}
self.async_add_entities = self._platforms['core'].async_add_entities
self.add_entities = self._platforms['core'].add_entities
def setup(self, config):
"""Set up a full entity component.
This doesn't block the executor to protect from deadlocks.
"""
self.hass.add_job(self.async_setup(config))
@asyncio.coroutine
def async_setup(self, config):
|
"""Set up a full entity component.
Loads the platforms from the config and will listen for supported
discovered platforms.
This method must be run in the event loop.
"""
self.config = config
# Look in config for Domain, Domain 2, Domain 3 etc and load them
tasks = []
for p_type, p_config in config_per_platform(config, self.do
|
main):
tasks.append(self._async_setup_platform(p_type, p_config))
if tasks:
yield from asyncio.wait(tasks, loop=self.hass.loop)
# Generic discovery listener for loading platform dynamically
# Refer to: homeassistant.components.discovery.load_platform()
@callback
def component_platform_discovered(platform, info):
"""Handle the loading of a platform."""
self.hass.async_add_job(
self._async_setup_platform(platform, {}, info))
discovery.async_listen_platform(
self.hass, self.domain, component_platform_discovered)
def extract_from_service(self, service, expand_group=True):
"""Extract all known entities from a service call.
Will return all entities if no entities specified in call.
Will return an empty list if entities specified but unknown.
"""
return run_callback_threadsafe(
self.hass.loop, self.async_extract_from_service, service,
expand_group
).result()
@callback
def async_extract_from_service(self, service, expand_group=True):
"""Extract all known and available entities from a service call.
Will return all entities if no entities specified in call.
Will return an empty list if entities specified but unknown.
This method must be run in the event loop.
"""
if ATTR_ENTITY_ID not in service.data:
return [entity for entity in self.entities.values()
if entity.available]
return [self.entities[entity_id] for entity_id
in extract_entity_ids(self.hass, service, expand_group)
if entity_id in self.entities and
self.entities[entity_id].available]
@asyncio.coroutine
def _async_setup_platform(self, platform_type, platform_config,
discovery_info=None, tries=0):
"""Set up a platform for this component.
This method must be run in the event loop.
"""
platform = yield from async_prepare_setup_platform(
self.hass, self.config, self.domain, platform_type)
if platform is None:
return
# Config > Platform > Component
scan_interval = (
platform_config.get(CONF_SCAN_INTERVAL) or
getattr(platform, 'SCAN_INTERVAL', None) or self.scan_interval)
parallel_updates = getattr(
platform, 'PARALLEL_UPDATES',
int(not hasattr(platform, 'async_setup_platform')))
entity_namespace = platform_config.get(CONF_ENTITY_NAMESPACE)
key = (platform_type, scan_interval, entity_namespace)
if key not in self._platforms:
entity_platform = self._platforms[key] = EntityPlatform(
self, platform_type, scan_interval, parallel_updates,
entity_namespace)
else:
entity_platform = self._platforms[key]
self.logger.info("Setting up %s.%s", self.domain, platform_type)
warn_task = self.hass.loop.call_later(
SLOW_SETUP_WARNING, self.logger.warning,
"Setup of platform %s is taking over %s seconds.", platform_type,
SLOW_SETUP_WARNING)
try:
if getattr(platform, 'async_setup_platform', None):
task = platform.async_setup_platform(
self.hass, platform_config,
entity_platform.async_schedule_add_entities, discovery_info
)
else:
# This should not be replaced with hass.async_add_job because
# we don't want to track this task in case it blocks startup.
task = self.hass.loop.run_in_executor(
None, platform.setup_platform, self.hass, platform_config,
entity_platform.schedule_add_entities, discovery_info
)
yield from asyncio.wait_for(
asyncio.shield(task, loop=self.hass.loop),
SLOW_SETUP_MAX_WAIT, loop=self.hass.loop)
yield from entity_platform.async_block_entities_done()
self.hass.config.components.add(
'{}.{}'.format(self.domain, platform_type))
except PlatformNotReady:
tries += 1
wait_time = min(tries, 6) * 30
self.logger.warning(
'Platform %s not ready yet. Retrying in %d seconds.',
platform_type, wait_time)
async_track_point_in_time(
self.hass, self._async_setup_platform(
platform_type, platform_config, discovery_info, tries),
dt_util.utcnow() + timedelta(seconds=wait_time))
except asyncio.TimeoutError:
self.logger.error(
"Setup of platform %s is taking longer than %s seconds."
" Startup will proceed without waiting any longer.",
platform_type, SLOW_SETUP_MAX_WAIT)
except Exception: # pylint: disable=broad-except
self.logger.exception(
"Error while setting up platform %s", platform_type)
finally:
warn_task.cancel()
def add_entity(self, entity, platform=None, update_before_add=False):
"""Add entity to component."""
return run_coroutine_threadsafe(
self.async_add_entity(entity, platform, update_before_add),
self.hass.loop
).result()
@asyncio.coroutine
def async_add_entity(self, entity, platform=None, update_before_add=False):
|
MarkWh1te/xueqiu_predict
|
python3_env/lib/python3.4/site-packages/bpython/test/test_autocomplete.py
|
Python
|
mit
| 16,423 | 0 |
# encoding: utf-8
from collections import namedtuple
import inspect
import keyword
import sys
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
import jedi
has_jedi = True
except ImportError:
has_jedi = False
from bpython import autocomplete
from bpython._py3compat import py3
from bpython.test import mock
is_py34 = sys.version_info[:2] >= (3, 4)
if is_py34:
glob_function = 'glob.iglob'
else:
glob_function = 'glob.glob'
class TestSafeEval(unittest.TestCase):
def test_catches_syntax_error(self):
with self.assertRaises(autocomplete.EvaluationError):
autocomplete.safe_eval('1re', {})
class TestFormatters(unittest.TestCase):
def test_filename(self):
completer = autocomplete.FilenameCompletion()
last_part_of_filename = completer.format
self.assertEqual(last_part_of_filename('abc'), 'abc')
self.assertEqual(last_part_of_filename('abc/'), 'abc/')
self.assertEqual(last_part_of_filename('abc/efg'), 'efg')
self.assertEqual(last_part_of_filename('abc/efg/'), 'efg/')
self.assertEqual(last_part_of_filename('/abc'), 'abc')
self.assertEqual(last_part_of_filename('ab.c/e.f.g/'), 'e.f.g/')
def test_attribute(self):
self.assertEqual(autocomplete.after_last_dot('abc.edf'), 'edf')
def completer(matches):
mock_completer = autocomplete.BaseCompletionType()
mock_completer.matches = mock.Mock(return_value=matches)
return mock_completer
class TestGetCompleter(unittest.TestCase):
def test_no_completers(self):
self.assertTupleEqual(autocomplete.get_completer([], 0, ''),
([], None))
def test_one_completer_without_matches_returns_empty_list_and_none(self):
a = completer([])
self.assertTupleEqual(autocomplete.get_completer([a], 0, ''),
([], None))
def test_one_completer_returns_matches_and_completer(self):
a = completer(['a'])
self.assertTupleEqual(autocomplete.get_completer([a], 0, ''),
(['a'], a))
def test_two_completers_with_matches_returns_first_matches(self):
a = completer(['a'])
b = completer(['b'])
self.assertEqual(autocomplete.get_completer([a, b], 0, ''), (['a'], a))
def test_first_non_none_completer_matches_are_returned(self):
a = completer([])
b = completer(['a'])
self.assertEqual(autocomplete.get_completer([a, b], 0, ''), ([], None))
def test_only_completer_returns_None(self):
a = completer(None)
self.assertEqual(autocomplete.get_completer([a], 0, ''), ([], None))
def test_first_completer_returns_None(self):
a = completer(None)
b = completer(['a'])
self.assertEqual(autocomplete.get_completer([a, b], 0, ''), (['a'], b))
class TestCumulativeCompleter(unittest.TestCase):
def completer(self, matches, ):
mock_completer = autocomplete.BaseCompletionType()
mock_completer.matches = mock.Mock(return_value=matches)
return mock_completer
def test_no_completers_fails(self):
with self.assertRaises(ValueError):
autocomplete.CumulativeCompleter([])
def test_one_empty_completer_returns_empty(self):
a = self.completer([])
cumulative = autocomplete.CumulativeCompleter([a])
self.assertEqual(cumulative.matches(3, 'abc'), set())
def test_one_none_completer_returns_none(self):
a = self.completer(None)
cumulative = autocomplete.CumulativeCompleter([a])
self.assertEqual(cumulative.matches(3, 'abc'), None)
def test_two_completers_get_both(self):
a = self.completer(['a'])
b = self.completer(['b'])
cumulative = autocomplete.CumulativeCompleter([a, b])
self.assertEqual(cumulative.matches(3, 'abc'), set(['a', 'b']))
class TestFilenameCompletion(unittest.TestCase):
def setUp(self):
self.completer = autocomplete.FilenameCompletion()
def test_locate_fails_when_not_in_string(self):
self.assertEqual(self.completer.locate(4, "abcd"), None)
def test_locate_succeeds_when_in_string(self):
self.assertEqual(self.completer.locate(4, "a'bc'd"), (2, 4, 'bc'))
def test_issue_491(self):
self.assertNotEqual(self.completer.matches(9, '"a[a.l-1]'), None)
@mock.patch(glob_function, new=lambda text: [])
def test_match_returns_none_if_not_in_string(self):
self.assertEqual(self.completer.matches(2, 'abcd'), None)
@mock.patch(glob_function, new=lambda text: [])
def test_match_returns_empty_list_when_no_files(self):
self.assertEqual(self.completer.matches(2, '"a'), set())
@mock.patch(glob_function, new=lambda text: ['abcde', 'aaaaa'])
@mock.patch('os.path.expanduser', new=lambda text: text)
@mock.patch('os.path.isdir', new=lambda text: False)
@mock.patch('os.path.sep', new='/')
def test_match_returns_files_when_files_exist(self):
self.asser
|
tEqual(sorted(self.completer.matches(2, '"x')),
['aaaaa', 'abcde'])
@mock.patch(glob_function, new=lambda text: ['abcde', 'aaaaa'])
@mock.patch('os.path.expanduser', new=lambda te
|
xt: text)
@mock.patch('os.path.isdir', new=lambda text: True)
@mock.patch('os.path.sep', new='/')
def test_match_returns_dirs_when_dirs_exist(self):
self.assertEqual(sorted(self.completer.matches(2, '"x')),
['aaaaa/', 'abcde/'])
@mock.patch(glob_function,
new=lambda text: ['/expand/ed/abcde', '/expand/ed/aaaaa'])
@mock.patch('os.path.expanduser',
new=lambda text: text.replace('~', '/expand/ed'))
@mock.patch('os.path.isdir', new=lambda text: False)
@mock.patch('os.path.sep', new='/')
def test_tilde_stays_pretty(self):
self.assertEqual(sorted(self.completer.matches(4, '"~/a')),
['~/aaaaa', '~/abcde'])
@mock.patch('os.path.sep', new='/')
def test_formatting_takes_just_last_part(self):
self.assertEqual(self.completer.format('/hello/there/'), 'there/')
self.assertEqual(self.completer.format('/hello/there'), 'there')
class MockNumPy(object):
"""This is a mock numpy object that raises an error when there is an atempt
to convert it to a boolean."""
def __nonzero__(self):
raise ValueError("The truth value of an array with more than one "
"element is ambiguous. Use a.any() or a.all()")
class TestDictKeyCompletion(unittest.TestCase):
def test_set_of_keys_returned_when_matches_found(self):
com = autocomplete.DictKeyCompletion()
local = {'d': {"ab": 1, "cd": 2}}
self.assertSetEqual(com.matches(2, "d[", locals_=local),
set(["'ab']", "'cd']"]))
def test_none_returned_when_eval_error(self):
com = autocomplete.DictKeyCompletion()
local = {'e': {"ab": 1, "cd": 2}}
self.assertEqual(com.matches(2, "d[", locals_=local), None)
def test_none_returned_when_not_dict_type(self):
com = autocomplete.DictKeyCompletion()
local = {'l': ["ab", "cd"]}
self.assertEqual(com.matches(2, "l[", locals_=local), None)
def test_none_returned_when_no_matches_left(self):
com = autocomplete.DictKeyCompletion()
local = {'d': {"ab": 1, "cd": 2}}
self.assertEqual(com.matches(3, "d[r", locals_=local), None)
def test_obj_that_does_not_allow_conversion_to_bool(self):
com = autocomplete.DictKeyCompletion()
local = {'mNumPy': MockNumPy()}
self.assertEqual(com.matches(7, "mNumPy[", locals_=local), None)
class Foo(object):
a = 10
def __init__(self):
self.b = 20
def method(self, x):
pass
class OldStyleFoo:
a = 10
def __init__(self):
self.b = 20
def method(self, x):
pass
skip_old_style = unittest.skipIf(py3,
'In Python 3 there are no old style classes')
class Properties(Foo):
@property
def asserts_when_called(self):
raise Assertio
|
anurag03/integration_tests
|
cfme/ansible/repositories.py
|
Python
|
gpl-2.0
| 13,574 | 0.001842 |
# -*- coding: utf-8 -*-
"""Page model for Automation/Ansible/Repositories"""
import attr
from navmazing import NavigateToAttribute, NavigateToSibling
from widgetastic.exceptions import NoSuchElementException
from widgetastic.widget import Checkbox, Fillable, ParametrizedView, Text, View
from widgetastic_manageiq import PaginationPane, ParametrizedSummaryTable, Table
from widgetastic_patternfly import Button, Dropdown, Input
from cfme.base.login import BaseLoggedInPage
from cfme.common import Taggable, TagPageView
from cfme.exceptions import ItemNotFound
from cfme.modeling.base import BaseCollection, BaseEntity
from cfme.utils.appliance.implementations.ui import navigator, navigate_to, CFMENavigateStep
from cfme.utils.wait import wait_for
from .playbooks import PlaybooksCollection
class RepositoryBaseView(BaseLoggedInPage):
title = Text(locator='.//div[@id="main-content"]//h1')
@property
def in_ansible_repositories(self):
return (
self.logged_in_as_current_user and
self.navigation.currently_selected == ["Automation", "Ansible", "Repositories"]
)
class RepositoryAllView(RepositoryBaseView):
@View.nested
class toolbar(View): # noqa
configuration = Dropdown("Configuration")
policy = Dropdown(text='Policy')
entities = Table(".//div[@id='gtl_div']//table")
paginator = PaginationPane()
@property
def is_displayed(self):
return self.in_ansible_repositories and self.title.text == "Repositories"
class RepositoryDetailsView(RepositoryBaseView):
@View.nested
class toolbar(View): # noqa
refresh = Button(title="Refresh this page")
configuration = Dropdown("Configuration")
download = Button(title="Download summary in PDF format")
policy = Dropdown(text='Policy')
@View.nested
class entities(View): # noqa
summary = ParametrizedView.nested(ParametrizedSummaryTable)
@property
def is_displayed(self):
return (
self.in_ansible_repositories and
self.title.text == "{} (Summary)".format(self.context["object"].name)
)
class RepositoryFormView(RepositoryBaseView):
name = Input(name="name")
description = Input(name="description")
url = Input(name="scm_url")
scm_credentials = Dropdown("Select credentials")
scm_branch = Input(name="scm_branch")
# SCM Update Options
clean = Checkbox(name="clean")
delete_on_update = Checkbox(name="scm_delete_on_update")
update_on_launch = Checkbox(name="scm_update_on_launch")
cancel_button = Button("Cancel")
class RepositoryAddView(RepositoryFormView):
add_button = Button("Add")
@property
def is_displayed(self):
return (
self.in_ansible_repositories and
self.title.text == "Add new Repository"
)
class RepositoryEditView(RepositoryFormView):
save_button = Button("Save")
reset_button = Button("Reset")
@property
def is_displayed(self):
return (
self.in_ansible_repositories and
self.title.text == 'Edit Repository "{}"'.format(self.context["object"].name)
)
@attr.s
class Repository(BaseEntity, Fillable, Taggable):
"""A class representing one Embedded Ansible repository in the UI."""
name = attr.ib()
url = attr.ib()
description = attr.ib(default="")
scm_credentials = attr.ib(default=None)
scm_branch = attr.ib(default=False)
clean = attr.ib(default=False)
delete_on_update = attr.ib(default=False)
update_on_launch = attr.ib(default=None)
_collections = {'playbooks': PlaybooksCollection}
@property
def db_object(self):
table = self.appliance.db.client["configuration_script_sources"]
return self.appliance.db.client.sessionmaker(autocommit=True).query(table).filter(
table.name == self.name).first()
@property
def playbooks(self):
return self.collections.playbooks
@property
def as_fill_value(self):
"""For use when selecting this repo in the UI forms"""
return self.name
def update(self, updates):
"""Update the repository in the UI.
Args:
updates (dict): :py:class:`dict` of the updates.
"""
original_updated_at = self.db_object.updated_at
view = navigate_to(self, "Edit")
changed = view.fill(updates)
if changed:
view.save_button.click()
else:
view.cancel_button.click()
view = self.create_view(RepositoryAllView)
assert view.is_displayed
view.flash.assert_no_error()
if changed:
if self.appliance.version < "5.9":
msg = 'Edit of Repository "{}" was successfully initialized.'
else:
msg = 'Edit of Repository "{}" was successfully initiated.'
view.flash.assert_message(msg.format(updates.get("name", self.name)))
def _wait_until_changes_applied():
changed_updated_at = self.db_object.updated_at
return not original_updated_at == changed_updated_at
wait_for(_wait_until_changes_applied, delay=10, timeout="5m")
else:
view.flash.assert_message(
'Edit of Repository "{}" cancelled by the user.'.format(self.name))
@property
def exists(self):
|
try:
navigate_to(self, "Details")
return True
except ItemNotFound:
return False
def delete(self):
"""Delete the repository in the UI."""
|
view = navigate_to(self, "Details")
if self.appliance.version < "5.9":
remove_str = "Remove this Repository"
else:
remove_str = "Remove this Repository from Inventory"
view.toolbar.configuration.item_select(remove_str, handle_alert=True)
repo_list_page = self.create_view(RepositoryAllView)
assert repo_list_page.is_displayed
repo_list_page.flash.assert_no_error()
repo_list_page.flash.assert_message(
'Delete of Repository "{}" was successfully initiated.'.format(self.name))
wait_for(
lambda: not self.exists,
delay=10,
timeout=120,
fail_func=repo_list_page.browser.selenium.refresh)
def refresh(self):
"""Perform a refresh to update the repository."""
view = navigate_to(self, "Details")
view.toolbar.configuration.item_select("Refresh this Repository", handle_alert=True)
view.flash.assert_no_error()
view.flash.assert_message("Embedded Ansible refresh has been successfully initiated")
@attr.s
class RepositoryCollection(BaseCollection):
"""Collection object for the :py:class:`cfme.ansible.repositories.Repository`."""
ENTITY = Repository
def create(self, name, url, description=None, scm_credentials=None, scm_branch=None,
clean=None, delete_on_update=None, update_on_launch=None):
"""Add an ansible repository in the UI and return a Repository object.
Args:
name (str): name of the repository
url (str): url of the repository
description (str): description of the repository
scm_credentials (str): credentials of the repository
scm_branch (str): branch name
clean (bool): clean
delete_on_update (bool): delete the repo at each update
update_on_launch (bool): update the repo at each launch
Returns: an instance of :py:class:`cfme.ansible.repositories.Repository`
"""
add_page = navigate_to(self, "Add")
fill_dict = {
"name": name,
"description": description,
"url": url,
"scm_credentials": scm_credentials,
"scm_branch": scm_branch,
"clean": clean,
"delete_on_update": delete_on_update,
"update_on_launch": update_on_launch
}
add_page.fill(fill_dict)
add_page.add_button.click()
repo_list_page = self.create_view(RepositoryAllView)
assert repo_list_page.
|
mackorone/catan
|
src/view.py
|
Python
|
mit
| 8,044 | 0.001119 |
from color import Color
from orientation import Orientation
from tile import (
Terrain,
Harbor,
)
CENTER_TILE_TEMPLATE = [
list(' + -- + '),
list(' / \ '),
list('+ +'),
list(' \ / '),
list(' + -- + '),
]
BORDER_TILE_TEMPLATE = [
list(' | -- | '),
list(' - - '),
list(' | | '),
list(' - - '),
list(' | -- | '),
]
NUMBER_SPACES = [
(2, 4), (2, 5)
]
PERIMETER_SPACES = [
(0, 2), (0, 4),
(0, 5), (0, 7),
(1, 1), (1, 8),
(2, 0), (2, 2),
(2, 7), (2, 9),
(3, 1), (3, 8),
(4, 2), (4, 4),
(4, 5), (4, 7),
]
RESOURCE_SPACES = [
(1, 3), (1, 4),
(1, 5), (1, 6),
(2, 2), (2, 7),
(3, 3), (3, 4),
(3, 5), (3, 6),
]
# TODO: upforgrabs
# Fix ports to work with all size boards
# HARBOR_BRIDGE_SPACES = {
# Orientation.NORTH_EAST: [(2, 7), (1, 6)],
# Orientation.NORTH: [(1, 6), (1, 3)],
# Orientation.NORTH_WEST: [(1, 3), (2, 2)],
# Orientation.SOUTH_WEST: [(2, 2), (3, 3)],
# Orientation.SOUTH: [(3, 3), (3, 6)],
# Orientation.SOUTH_EAST: [(3, 6), (2, 7)],
# }
def remove_border_characters(board, coordinate, diff, tile_grid):
# First, calculate some helper values
helper_value_one = board.size.width // 2
helper_value_two = board.size.height - helper_value_one
# Top vertical ticks
if (
coordinate.row == -1 or
coordinate.column == -1
):
tile_grid[0][2] = ' '
tile_grid[0][7] = ' '
# Top horizonal ticks
else:
tile_grid[0][4] = ' '
tile_grid[0][5] = ' '
# Bottom vertical ticks
if (
coordinate.row == board.size.height or
coordinate.column == board.size.height
):
tile_grid[4][2] = ' '
tile_grid[4][7] = ' '
# Bottom horizonal ticks
else:
tile_grid[4][4] = ' '
tile_grid[4][5] = ' '
# Upper left single tick
if not (
coordinate.column == -1 and
coordinate.row < helper_value_one
):
tile_grid[1][1] = ' '
# Upper right single tick
if not (
coordinate.row == -1 and
coordinate.column < helper_value_one
):
tile_grid[1][8] = ' '
# Bottom left single tick
if not (
coordinate.row == board.size.height and
helper_value_two <= coordinate.column
):
tile_grid[3][1] = ' '
# Bottom right single tick
if not (
coordinate.column == board.size.height and
helper_value_two <= coordinate.row
):
tile_grid[3][8] = ' '
# Left vertical ticks
if abs(diff) <= helper_value_one or diff < 0:
tile_grid[0][2] = ' '
tile_grid[2][2] = ' '
tile_grid[4][2] = ' '
# Right vertical ticks
if abs(diff) <= helper_value_one or 0 < diff:
tile_grid[0][7] = ' '
tile_grid[2][7] = ' '
tile_grid[4][7] = ' '
return tile_grid
def copy_grid(grid):
return [[char for char in row] for row in grid]
def grid_to_str(grid):
return '\n'.join(''.join(row) for row in grid)
def str_to_grid(string):
return [[c for c in line] for line in string.split('\n')]
def get_tile_grid(tile, tile_grid):
tile_grid = copy_grid(tile_grid)
tile_grid = replace_numbers(tile, tile_grid)
tile_grid = replace_perimeter(tile, tile_grid)
tile_grid = replace_resources(tile, tile_grid)
return tile_grid
def replace_numbers(tile, tile_grid):
if isinstance(tile, Harbor):
return tile_grid
if not tile.number:
return tile_grid
if isinstance(tile, Terrain):
number_string = str(tile.number).zfill(len(NUMBER_SPACES))
tile_grid = copy_grid(tile_grid)
for row, col in NUMBER_SPACES:
index = col - min(NUMBER_SPACES)[1]
tile_grid[row][col] = number_string[index]
return tile_grid
def replace_perimeter(tile, tile_grid):
tile_grid = copy_grid(tile_grid)
for row, col in PERIMETER_SPACES:
colored = Color.GRAY.apply(tile_grid[row][col])
tile_grid[row][col] = colored
# TODO: upforgrabs
# Fix ports to work with all size boards
# if isinstance(tile, Harbor) and tile.orientation:
# spaces = HARBOR_BRIDGE_SPACES[tile.orientation]
# for row, col in spaces:
# char = '-'
# if row != 2:
# char = '\\' if (row =
|
= 1) == (col == 3) else '/'
# tile_grid[row][col] = Color.GRAY.apply(char)
return tile_grid
def replace_resources(tile, tile_grid):
if isinstance(tile, Terrain):
if not tile.resource:
return tile_grid
spaces = RESOURCE_SPACES
if isinstance(tile, Harbor):
# TODO: upforgrabs
# Fix ports to work with all size boards
# if not til
|
e.orientation:
# return tile_grid
return tile_grid
spaces = NUMBER_SPACES
char = '?'
if tile.resource:
char = tile.resource.color.apply(tile.resource.char)
tile_grid = copy_grid(tile_grid)
for row, col in spaces:
tile_grid[row][col] = char
return tile_grid
class View(object):
def __init__(self, board):
self.board = board
def __str__(self):
return grid_to_str(self.get_board_grid())
def get_board_grid(self):
# Add two to the height and width of the
# board to account for the perimeter tiles
num_tiles_tall = self.board.size.height + 2
num_tiles_wide = self.board.size.width + 2
# The number of characters tall and wide for the tile grid
tile_grid_height = len(CENTER_TILE_TEMPLATE)
tile_grid_narrow = len(''.join(CENTER_TILE_TEMPLATE[0]).strip())
tile_grid_wide = len(''.join(CENTER_TILE_TEMPLATE[2]).strip())
# The number of characters tall and wide for the board grid
total_grid_height = (tile_grid_height - 1) * num_tiles_tall + 1
total_grid_width = (
(num_tiles_wide // 2 + 1) * (tile_grid_wide - 1) +
(num_tiles_wide // 2 ) * (tile_grid_narrow - 1) + 1
)
# Create a 2D array of empty spaces, large enough to
# contain all characters for all tiles (but no larger)
board_grid = [
[' ' for i in range(total_grid_width)]
for j in range(total_grid_height)
]
# For all board tiles ...
for coordinate, tile in self.board.tiles.items():
# ... determine some intermediate values ...
# Note: We add +1 here to account for perimeter tiles
sum_ = (coordinate.row + 1) + (coordinate.column + 1)
diff = (coordinate.row + 1) - (coordinate.column + 1)
# ... and use them to figure the location of the upper
# left corner of the tile grid within the board grid ...
spaces_from_top = sum_ * (tile_grid_height // 2)
spaces_from_left = (
((num_tiles_wide // 2) - diff) *
((tile_grid_wide + tile_grid_narrow) // 2 - 1)
)
# ... then retrieve the base tile grid for the tile ...
template = (
CENTER_TILE_TEMPLATE if
isinstance(tile, Terrain) else
remove_border_characters(
board=self.board,
coordinate=coordinate,
diff=diff,
tile_grid=copy_grid(BORDER_TILE_TEMPLATE),
)
)
# ... and then replace the blank characters in the board
# grid with the correct characters from the tile grid
tile_grid = get_tile_grid(tile, template)
for i, tile_line in enumerate(tile_grid):
for j, char in enumerate(tile_line):
if ' ' not in char:
row = board_grid[spaces_from_top + i]
row[spaces_from_left + j] = char
# Trim extra columns off front and back of the grid
board_grid = [row[2:-2] for row in board_grid]
return board_grid
|
tapomayukh/projects_in_python
|
classification/Classification_with_kNN/Single_Contact_Classification/Scaled_Features/best_kNN_PCA/objects/test11_cross_validate_objects_1200ms_scaled_method_v.py
|
Python
|
mit
| 4,915 | 0.020753 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled')
from data_method_V import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 123:
j=0
while j < 140:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
#############
|
#############
print 'PCA - COV-Method u
|
sed'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
def my_mvpa(Y,num2):
#Using PYMVPA
PCA_data = np.array(Y)
PCA_label_2 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=num2)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_2)
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
return (1-error)*100
def result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC):
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:num_PC]
m_W, n_W = np.shape(W)
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
return Y.T
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
num_PC=1
while num_PC <=20:
Proj = np.zeros((140,num_PC))
Proj = result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC)
# PYMVPA:
num=0
cv_acc = np.zeros(21)
while num <=20:
cv_acc[num] = my_mvpa(Proj,num)
num = num+1
plot(np.arange(21),cv_acc,'-s')
grid('True')
hold('True')
num_PC = num_PC+1
legend(('1-PC', '2-PCs', '3-PCs', '4-PCs', '5-PCs', '6-PCs', '7-PCs', '8-PCs', '9-PCs', '10-PCs', '11-PC', '12-PCs', '13-PCs', '14-PCs', '15-PCs', '16-PCs', '17-PCs', '18-PCs', '19-PCs', '20-PCs'))
ylabel('Cross-Validation Accuracy')
xlabel('k in k-NN Classifier')
show()
|
skooda/passIon
|
index.py
|
Python
|
mit
| 1,879 | 0.002129 |
import logging
import pickle
from time import time
from hashlib import md5
from base64 import urlsafe_b64encode
from os import urandom
import redis
from flask import Flask, request, render_template
import config
logging.basicConfig(level=logging.DEBUG)
app = Flask(__name__,static_folder='public')
r = redis.StrictRedis(
host=co
|
nfig.REDIS_HOST,
port=config.REDIS_PORT,
db=config.REDIS_DB,
password=config.REDIS_PASSWORD
)
@app.route('/set', methods=['post'])
def setPass():
assert request.method == 'PO
|
ST'
password = request.form['pass']
iv = request.form['iv']
uuid = urlsafe_b64encode(md5(urandom(128)).digest())[:8].decode('utf-8')
with r.pipeline() as pipe:
data = {'status': 'ok', 'iv': iv, 'pass': password}
pipe.set(uuid, pickle.dumps(data))
pipe.expire(uuid, config.TTL)
pipe.execute()
return '/get/{}'.format(uuid)
@app.route('/get/<uuid>', methods=['get'])
def getPass(uuid):
with r.pipeline() as pipe:
raw_data = r.get(uuid)
if not raw_data:
return render_template('expired.html')
data = pickle.loads(raw_data)
if data['status'] == 'ok':
new_data = {'status': 'withdrawn', 'time': int(time()), 'ip': request.remote_addr}
r.set(uuid, pickle.dumps(new_data))
return render_template('get.html', data=data['iv'] + '|' + data['pass'])
if data['status'] == 'withdrawn':
return render_template('withdrawn.html')
@app.route('/', methods=['get'])
def index():
ttl = int(config.TTL/60)
return render_template('index.html', ttl=ttl)
if __name__ == '__main__':
try:
port = config.APP_PORT
except AttributeError:
port = 5000
try:
host = config.APP_HOST
except AttributeError:
host = '127.0.0.1'
app.run(host=host, port=port)
|
commonsense/divisi
|
doc/demo/vendor_only_svd.py
|
Python
|
gpl-3.0
| 1,871 | 0.008017 |
# Put libraries such as Divisi in the PYTHONPATH.
import sys, pickle, os
sys.path = ['/stuff/openmind'] + sys.path
from csc.divisi.cnet import *
from csc.divisi.graphics import output_svg
from vendor_db import iter_info
from csamoa.corpus.models import *
from csamoa.conceptnet.models import *
# Load the OMCS language model
en = Language.get('en')
en_nl=get_nl('en')
# Load OMCS stopwords
sw = open('stopwords.txt', 'r')
swords = [x.strip() for x in sw.readlines()]
# Parameters
factor = 1
wsize = 2
def check_concept(concept):
try:
Concept.get(concept, 'en')
return True
except:
return False
def english_window(text):
windows = []
words = [x fo
|
r x in text.lower().replace('&', 'and').split() if x not in swords]
for x in range(len(words)-wsize+1):
pair = " ".join(words[x:x+wsize])
if check_concept(pair): windows.append(pair)
if check_concept(words[x]): windows.ap
|
pend(words[x])
for c in range(wsize-1):
if check_concept(words[c]): windows.append(words[c])
return windows
if 'vendor_only.pickle' in os.listdir('.'):
print "Loading saved matrix."
matrix = pickle.load(open("vendor_only.pickle"))
else:
print "Creating New Tensor"
matrix = SparseLabeledTensor(ndim=2)
print "Adding Vendors"
for co, englist in iter_info('CFB_Cities'):
print co
for phrase in englist:
parts = english_window(phrase)
print parts
for part in parts:
matrix[co, ('sells', part)] += factor
matrix[part, ('sells_inv', co)] += factor
pickle.dump(matrix, open("vendor_only.pickle", 'w'))
print "Normalizing."
matrix = matrix.normalized()
print "Matrix constructed. Running SVD."
svd = matrix.svd(k=10)
svd.summarize()
output_svg(svd.u, "vendorplain.svg", xscale=3000, yscale=3000, min=0.03)
|
rbarlow/pulp_docker
|
plugins/test/unit/plugins/importers/data.py
|
Python
|
gpl-2.0
| 450 | 0.002222 |
import os
busybox_tar_path = os.path.join(os.path.dirname(__file__), '../../../data/busyboxlight.tar')
|
# these are in correct ancestry order
busybox_ids = (
'769b9341d937a3dba9e460f664b4f183a6cecdd62b337220a28b3deb50ee0a02',
'48e5f45168b97799ad0aafb7e2fef9fac57b5f16f6db7f67ba2000eb947637eb',
'bf747efa0e2fa9f7c691588ce3938944c75607a7bb5e757f7369f86904d97c78',
|
'511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158',
)
|
hall-lab/svtools
|
svtools/bedpesort.py
|
Python
|
mit
| 1,152 | 0.008681 |
import sys
import argparse
from svtools.external_cmd import ExternalCmd
class BedpeSort(ExternalCmd):
def __init__(self):
super(BedpeSort, self).__init__('bedpesort', 'bin/bedpesort')
def description():
return 'sort a BEDPE file'
def epilog():
return 'To read in stdin and output to a file, use /dev/stdin or - as the first positional argument.'
|
def add_arguments_to_parser(parser):
parser.add_argument('input', metavar='<BEDPE file>', nargs='?', help='BEDPE
|
file to sort')
parser.add_argument('output', metavar='<output file>', nargs='?', help='output file to write to')
parser.set_defaults(entry_point=run_from_args)
def command_parser():
parser = argparse.ArgumentParser(description=description())
add_arguments_to_parser(parser)
return parser
def run_from_args(args):
opts = list()
if args.input:
opts.append(args.input)
if args.output:
opts.append(args.output)
sort_cmd_runner = BedpeSort()
sort_cmd_runner.run_cmd_with_options(opts)
if __name__ == "__main__":
parser = command_parser()
args = parser.parse_args()
sys.exit(args.entry_point(args))
|
Alwnikrotikz/micolog2
|
plugins/akismet.py
|
Python
|
gpl-3.0
| 8,593 | 0.055148 |
#!/usr/bin/env python
#coding=utf-8
'''
Created on 2010-4-27
GPL License
@author: sypxue@gmail.com
'''
import urllib,pickle,StringIO
from micolog_plugin import *
from google.appengine.ext import db
from model import OptionSet,Comment,Blog,Entry,Blog
from google.appengine.api import urlfetch
class akismet(Plugin):
def __init__(self):
Plugin.__init__(self,__file__)
self.author="sypxue"
self.authoruri="http://sypxue.appspot.com"
self.uri="http://sypxue.appspot.com"
self.description="""Wordpress自带的Akismet插件的micolog版本,现在已实现过滤Spam,提交Spam,解除Spam等功能,开启即可使用,也可输入自己的Akismet API Key使用 。Author: sypxue@gmail.com"""
self.name="Akismet"
self.version="0.3.2"
self.AKISMET_VERSION = "2.2.7"
self.AKISMET_default_Key = "80e9452f5962"
self.register_action("pre_comment",self.pre_comment)
self.register_action("save_comment",self.save_comment)
def comment_handler(self,comment,action,*arg1,**arg2):
# rm 指示 是否自动过滤掉评论
rm=OptionSet.getValue("Akismet_AutoRemove",False)
if action=='pre' and rm!=True:
return
elif action=='save' and rm==True:
return
url = arg2['blog'].baseurl
user_agent = os.environ.get('HTTP_USER_AGENT','')
referrer = os.environ.get('HTTP_REFERER', 'unknown')
AkismetItem = {
'user_agent':user_agent,
'referrer':referrer,
'user_ip' : comment.ip,
'comment_type' : 'comment',
'comment_author' : comment.author.encode('utf-8'),
'comment_author_email' : comment.email,
'comment_author_url' : comment.weburl,
'comment_content' : comment.content.encode('utf-8')
}
apikey = OptionSet.getValue("Akismet_code",default=self.AKISMET_default_Key)
if len(apikey)<5:
apikey = self.AKISMET_default_Key
m = AkismetManager(apikey,url)
if m.IsSpam(AkismetItem):
if rm==True:
raise Exception
sComments=OptionSet.getValue("Akismet_Comments_v0.3",[])
if type(sComments)!=type([]):
sComments=[]
db.Model.put(comment)
sComments.append({'key':(str(comment.key()),str(comment.entry.key())),
'other':{'user_agent':user_agent,'referrer':referrer,'url':url}})
OptionSet.setValue("Akismet_Comments_v0.3",
sComments)
comment.entry.commentcount-=1
comment.entry.put()
e,comment.entry = comment.entry,None
try:
db.Model.put(comment)
comment.entry = e
except:
pass
def pre_comment(self,comment,*arg1,**arg2):
self.comment_handler(comment,'pre',*arg1,**arg2)
def save_comment(self,comment,*arg1,**arg2):
self.comment_handler(comment,'save',*arg1,**arg2)
def filter(self,content,*arg1,**arg2):
code=OptionSet.getValue("Akismet_code",default="")
return content+str(code)
def SubmitAkismet(self,item,url,f):
apikey = OptionSet.getValue("Akismet_code",default=self.AKISMET_default_Key)
if len(apikey)<5:
apikey = self.AKISMET_default_Key
m = AkismetManager(apikey,url)
try:
if f=="Ham":
m.SubmitHam(item)
elif f=="Spam":
m.SubmitSpam(item)
except:
pass
def get(self,page):
code=OptionSet.getValue("Akismet_code",default="")
up=OptionSet.getValue("Akismet_Comments_v0.3",default=[])
rm=OptionSet.getValue("Akismet_AutoRemove",False)
if type(up)!=type([]):
up=[]
delkey = page.param('delkey')
rekey = page.param('rekey')
if rekey or delkey:
newup = []
for i in up:
cmtkey = i['key'][0];
enykey = i['key'][1];
if delkey and cmtkey==delkey:
cm = Comment.get(cmtkey)
db.Model.delete(cm)
elif rekey and cmtkey==rekey:
cm = Comment.get(cmtkey)
eny = Entry.get(enykey)
eny.commentcount+=1
eny.put()
cm.entry = eny
db.Model.put(cm)
self.SubmitAkismet({
'user_agent':i['other']['user_agent'],
'referrer':i['other']['referrer'],
'user_ip' : cm.ip,
'comment_type' : 'comment',
'comment_author' : cm.author.encode('utf-8'),
'comment_author_email' : cm.email,
'comment_author_url' : cm.weburl,
'comment_content' : cm.content.encode('utf-8')
},i['other'].get('url',''),"Ham")
else:
newup.append(i)
if not len(up)==len(newup):
OptionSet.setValue("Akismet_Comments_v0.3",newup)
up = newup
cmts = [(Comment.get(i['key'][0]),Entry.get(i['key'][1])) for i in up]
comments = [u'<tr><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td><a target="_blank" href="/%s">%s</a></td><td><a href="?delkey=%s" title="删除">删除</a> <a href="?rekey=%s" title="这不是一个垃圾评论">还原</a></td></tr>'%(i[0].date,
i[0].author,i[0].content,i[0].email,i[0].ip,i[1].link,i[1].title,str(i[0].key()),str(i[0].key())) for i in cmts if i is not None and i[0] is not None]
comments = ''.join(comments)
apikey = OptionSet.getValue("Akismet_code",default=self.AKISMET_default_Key)
if l
|
en(apikey)<5:
apikey = self
|
.AKISMET_default_Key
api = AkismetManager(apikey,Blog.all()[0].baseurl)
if not code:
status = ''
elif api.IsValidKey():
status = 'True'
else:
status = 'False'
if rm==True:
rmchecked='checked="checked"'
else:
rmchecked=''
return u'''<h3>Akismet</h3>
<form action="" method="post">
<p>Akismet Api Key:</p>
<input name="code" style="width:400px;" value="%s"> %s
<br />
<p>自动删除检测到的垃圾评论:
<input type="checkbox" name="autorm" value="1" %s></p>
<p>删除一条正常的评论并提交Spam(输入评论的ID):</p>
<input name="spam" style="width:400px;" value="">
<br />
<input type="submit" value="submit">
</form>
<div>
<br />
<h3>被过滤的评论</h3> <table class="widefat"><thead><tr><th>日期</th><th>作者</th><th>内容</th><th>电子邮件</th><th>IP地址</th><th>文章/页面</th><th style="width:15%%;">选择操作</th></tr></thead><tbody>%s </tbody></table>
</div>'''%(code,status,rmchecked,comments)
def post(self,page):
code=page.param("code")
OptionSet.setValue("Akismet_code",code)
rm=page.param('autorm')
if rm and int(rm)==1:
rm=True
else:
rm=False
oldrm = OptionSet.getValue("Akismet_AutoRemove",False)
if oldrm!=rm:
OptionSet.setValue("Akismet_AutoRemove",rm)
spam=page.param("spam")
spam = len(spam)>0 and int(spam) or 0
sOther = ""
if spam>0:
cm = Comment.get_by_id(spam)
try:
url = Blog.all().fetch(1)[0].baseurl
self.SubmitAkismet({
'user_ip' : cm.ip,
'comment_type' : 'comment',
'comment_author' : cm.author.encode('utf-8'),
'comment_author_email' : cm.email,
'comment_author_url' : cm.weburl,
'comment_content' : cm.content.encode('utf-8')
},url,"Spam")
sOther = u"<div style='padding:8px;margin:8px;border:1px solid #aaa;color:red;'>评论已删除</div>"
cm.delit()
except:
sOther = u"<div style='padding:8px;margin:8px;border:1px solid #aaa;color:red;'>无法找到对应的评论项</div>"
return sOther + self.get(page)
class AkismetManager():
def __init__(self,key,url):
self.ApiKey = key
self.Url = url
def ExecuteRequest(self,url,content,method="GET"):
request = urlfetch.fetch(url,
method='POST',
payload=content
)
return request
def IsValidKey(self):
content = "key=%(key)s&blog=%(url)s&"%{'key':self.ApiKey,'url':self.Url}
response = self.ExecuteRequest("http://rest.akismet.com/1.1/verify-key",
content).content
if response and response == 'valid':
return True
else:
return False
def IsSpam(self,item=None):
if not item:
raise Exception
content = self.AddDefaultFields(urllib.urlencode(item))
response = self.ExecuteRequest(
"http://%(key)s.rest.akismet.com/1.1/comment-check"%{'key':self.ApiKey},
content).content
if response:
result = {'true':True,'false': False}
return result[response]
return False
def SubmitSpam(self,item):
if not item:
raise Exception
content = self.AddDefaultFields(urllib.urlencode(item))
response = self.ExecuteRequest(
"http://%(key)s.rest.akismet.com/1.1/submit-spam"%{'key':self.ApiKey},
content).content
if response == 'invalid':
raise Exception
elif len(response)>0:
raise Exception
def SubmitHam(self,item):
if not item:
raise Exception
content = self.AddDefaultFields(urllib.urlencode(item))
response = self.ExecuteRequest(
"http://%(key)s.rest.akismet.com/1.1/submit-ham"%{'key':self.ApiKey},
content).content
if response == 'invalid':
raise Exception
elif len(response)>0:
rai
|
deshmukhmayur/django-todo
|
todo/views.py
|
Python
|
mit
| 166 | 0.006024 |
from django.sh
|
ortcuts import render
from django.http import HttpResponse
# Creat
|
e your views here.
def index(request):
return render(request, 'todo/index.html')
|
swegener/micropython
|
ports/esp8266/modules/neopixel.py
|
Python
|
mit
| 836 | 0 |
# NeoPixel driver for MicroPython on ESP8266
# MIT license; Copyright (c) 2016 Damien P. George
from esp import neopixel_write
class NeoPixel:
ORDER = (1, 0, 2, 3)
def __init__(self, pin, n, bpp=3):
self.pin = pin
|
self.n = n
self.bpp = bpp
self.buf = bytearray(n * bpp)
self.pin.init(pin.OUT)
def __setitem__(self, index, val):
offset = index * self.bpp
for i in range(self.bpp):
self.buf[offset + self.ORDER[i]] = val[i]
def __getitem__(self, index):
offset = index * self.bpp
return tuple(self.buf[offset + self.ORDER[i]]
|
for i in range(self.bpp))
def fill(self, color):
for i in range(self.n):
self[i] = color
def write(self):
neopixel_write(self.pin, self.buf, True)
|
headlins/tida
|
Tida.py
|
Python
|
lgpl-3.0
| 3,125 | 0.03296 |
#!/usr/bin/env python
import os
from gi.repository import Gtk
from gi.repository import Vte
from gi.repository import GLib
from gi.repository import Keybinder
from gi.repository import Gdk
class Tida(Gtk.Window):
"""A micro-drop-down terminal like TILDA"""
def __init__(self, config=None):
Gtk.Window.__init__(self)
self.init_config(config)
self.init_icon()
self.init_terminal()
Gtk.main()
def init_config(self, config=None):
"""Initialise the program with config if exists, else set default values"""
if config != None:
self.set_default_size(config['width'], config['heigth'])
self.set_decorated(config['decorated'])
self.set_skip_taskbar_hint(config['skip_taskbar_hint'])
self.set_keep_above(config['keep_above'])
self.set_skip_pager_hint(config['skip_pager_hint'])
self.set_modal(config['modal'])
s = Gdk.Screen.get_default()
c = (s.get_width() - self.get_size()[0]) / 2.
self.move(int(c), 0)
else:
self.set_decorated(False)
self.set_skip_taskbar_hint(True)
self.set_keep_above(True)
self.set_skip_pager_hint(False)
self.set_modal(False)
self.set_default_size(720, 300)
self.move(323, 0)
self.init_keybinder(config)
def init_icon(self):
"""Initialise status icon"""
self.status_icon = Gtk.StatusIcon()
abs_file_name = os.path.join(os.path.dirname(__file__), "terminal.png")
self.status_icon.set_from_file(abs_file_name)
self.status_icon.set_title("StatusIcon TIDA")
self.status_icon.set_tooltip_text("TIDA :>")
def init_terminal(self):
"""Initialise and add new Vte Terminal to Window"""
self.term = Vte.Terminal()
self.term.set_scrollback_lines(-1)
self.term.connect('child-exited', Gtk.main_quit)
self.term.fork_command_full(Vte.PtyFlags.DEFAULT, os.environ['HOME'], ['/usr/bin/bash'], [], GLib.SpawnFlags.DO_NOT_REAP_CHILD, None, None)
self.add(self.term)
self.connect('delete-event', Gtk.main_quit)
def init_keybinder(self, config):
"""Initialise keybinder and bind some keys (toggle, copy, paste)"""
Keybinder.init()
Keybinder.set_use_cooked_accelerators(False)
self.bind_all_key(config['key_toggle_visibility'],
config['key_copy_to_clipboard'],
config['key_paste_from_clipboard'])
def bind_all_key(self, key_toggle, key_copy, key_paste):
"""Bind all keys used with tida"""
Keybinder.bind(key_toggle, self.callback_toggle_visibility, "asd")
Keybinder.bind(key_copy, self.callback_copy, "asd")
Keybinder.bind(key_paste, self.callback_paste, "asd")
def callback_copy(self, key, asd):
"""Ca
|
llback function used when press the shortcut for copy to clipboard"""
if self.is_visible():
self.term.copy_clipboard()
return True
return False
def callback_paste(self, key, asd):
"""Callback function used when press the shortcut for paste from clipboard"""
if self.is_visible():
self.term.paste_clipboard()
|
return True
return False
def callback_toggle_visibility(self, key, asd):
"""Callback function used when press the shortcut for toggle visibility of tida"""
if self.is_visible():
self.hide()
else:
self.show_all()
|
0x1001/funornot
|
utils/image_convert.py
|
Python
|
gpl-2.0
| 593 | 0.025295 |
import md5
import os
import sys
path = sys.
|
argv[1]
db_file = open(os.path.join(path,"pics_mysql.txt"),"w")
for file_name in os.listdir(path):
if not file_name.lower().endswith(".gif"): continue
with open(os.path.join(path,file_name),"rb") as fp:
contents = fp.read()
new_file_name = md5.new(contents).hexdigest() + ".gif"
print file_name + " --> " + new_file_name
os.rename(os.path.join(path,file_name),os.path.join(path,new_file_name))
db_file.write('INSER
|
T INTO pics (name) VALUES ("' + new_file_name + '");\n')
db_file.close()
|
TieWei/nova
|
nova/tests/objects/test_pci_device.py
|
Python
|
apache-2.0
| 12,888 | 0.000155 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language govern
|
ing permissions and limitations
# under the License.
import copy
from nova import context
from nova import db
from nova import exception
from nova.objects import instance
from nova.objects import pci_device
from nova.tests.objects import test_objects
dev_dict = {
'compute_node_id': 1,
'address': 'a',
'product_id': 'p',
'vendor_id': 'v',
'status': 'available'}
fake_db_dev = {
'cr
|
eated_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 1,
'compute_node_id': 1,
'address': 'a',
'vendor_id': 'v',
'product_id': 'p',
'dev_type': 't',
'status': 'available',
'dev_id': 'i',
'label': 'l',
'instance_uuid': None,
'extra_info': '{}',
}
fake_db_dev_1 = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 2,
'compute_node_id': 1,
'address': 'a1',
'vendor_id': 'v1',
'product_id': 'p1',
'dev_type': 't',
'status': 'available',
'dev_id': 'i',
'label': 'l',
'instance_uuid': None,
'extra_info': '{}',
}
class _TestPciDeviceObject(object):
def _create_fake_instance(self):
self.inst = instance.Instance()
self.inst.uuid = 'fake-inst-uuid'
self.inst.pci_devices = pci_device.PciDeviceList()
def _create_fake_pci_device(self):
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(db, 'pci_device_get_by_addr')
db.pci_device_get_by_addr(ctxt, 1, 'a').AndReturn(fake_db_dev)
self.mox.ReplayAll()
self.pci_device = pci_device.PciDevice.get_by_dev_addr(ctxt, 1, 'a')
def test_create_pci_device(self):
self.pci_device = pci_device.PciDevice.create(dev_dict)
self.assertEqual(self.pci_device.product_id, 'p')
self.assertEqual(self.pci_device.obj_what_changed(),
set(['compute_node_id', 'product_id', 'vendor_id',
'status', 'address', 'extra_info']))
def test_pci_device_extra_info(self):
self.dev_dict = copy.copy(dev_dict)
self.dev_dict['k1'] = 'v1'
self.dev_dict['k2'] = 'v2'
self.pci_device = pci_device.PciDevice.create(self.dev_dict)
extra_value = self.pci_device.extra_info
self.assertEqual(extra_value.get('k1'), 'v1')
self.assertEqual(set(extra_value.keys()), set(('k1', 'k2')))
self.assertEqual(self.pci_device.obj_what_changed(),
set(['compute_node_id', 'address', 'product_id',
'vendor_id', 'status', 'extra_info']))
def test_update_device(self):
self.pci_device = pci_device.PciDevice.create(dev_dict)
self.pci_device.obj_reset_changes()
changes = {'product_id': 'p2', 'vendor_id': 'v2'}
self.pci_device.update_device(changes)
self.assertEqual(self.pci_device.vendor_id, 'v2')
self.assertEqual(self.pci_device.obj_what_changed(),
set(['vendor_id', 'product_id']))
def test_update_device_same_value(self):
self.pci_device = pci_device.PciDevice.create(dev_dict)
self.pci_device.obj_reset_changes()
changes = {'product_id': 'p', 'vendor_id': 'v2'}
self.pci_device.update_device(changes)
self.assertEqual(self.pci_device.product_id, 'p')
self.assertEqual(self.pci_device.vendor_id, 'v2')
self.assertEqual(self.pci_device.obj_what_changed(),
set(['vendor_id', 'product_id']))
def test_get_by_dev_addr(self):
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(db, 'pci_device_get_by_addr')
db.pci_device_get_by_addr(ctxt, 1, 'a').AndReturn(fake_db_dev)
self.mox.ReplayAll()
self.pci_device = pci_device.PciDevice.get_by_dev_addr(ctxt, 1, 'a')
self.assertEqual(self.pci_device.product_id, 'p')
self.assertEqual(self.pci_device.obj_what_changed(), set())
self.assertRemotes()
def test_get_by_dev_id(self):
ctxt = context.get_admin_context()
self.mox.StubOutWithMock(db, 'pci_device_get_by_id')
db.pci_device_get_by_id(ctxt, 1).AndReturn(fake_db_dev)
self.mox.ReplayAll()
self.pci_device = pci_device.PciDevice.get_by_dev_id(ctxt, 1)
self.assertEqual(self.pci_device.product_id, 'p')
self.assertEqual(self.pci_device.obj_what_changed(), set())
self.assertRemotes()
def test_claim_device(self):
self._create_fake_instance()
self.pci_device = pci_device.PciDevice.create(dev_dict)
self.pci_device.claim(self.inst)
self.assertEqual(self.pci_device.status, 'claimed')
self.assertEqual(self.pci_device.instance_uuid,
'fake-inst-uuid')
self.assertEqual(len(self.inst.pci_devices), 0)
def test_claim_device_fail(self):
self._create_fake_instance()
self._create_fake_pci_device()
self.pci_device.status = 'allocated'
self.assertRaises(exception.PciDeviceInvalidStatus,
self.pci_device.claim, self.inst)
def test_allocate_device(self):
self._create_fake_instance()
self._create_fake_pci_device()
self.pci_device.claim(self.inst)
self.pci_device.allocate(self.inst)
self.assertEqual(self.pci_device.status, 'allocated')
self.assertEqual(self.pci_device.instance_uuid, 'fake-inst-uuid')
self.assertEqual(len(self.inst.pci_devices), 1)
self.assertEqual(self.inst.pci_devices[0]['vendor_id'], 'v')
self.assertEqual(self.inst.pci_devices[0]['status'], 'allocated')
def test_allocacte_device_fail_status(self):
self._create_fake_instance()
self._create_fake_pci_device()
self.pci_device.status = 'removed'
self.assertRaises(exception.PciDeviceInvalidStatus,
self.pci_device.allocate,
self.inst)
def test_allocacte_device_fail_owner(self):
self._create_fake_instance()
self._create_fake_pci_device()
inst_2 = instance.Instance()
inst_2.uuid = 'fake-inst-uuid-2'
self.pci_device.claim(self.inst)
self.assertRaises(exception.PciDeviceInvalidOwner,
self.pci_device.allocate, inst_2)
def test_free_claimed_device(self):
self._create_fake_instance()
self._create_fake_pci_device()
self.pci_device.claim(self.inst)
self.pci_device.free(self.inst)
self.assertEqual(self.pci_device.status, 'available')
self.assertEqual(self.pci_device.instance_uuid, None)
def test_free_allocated_device(self):
self._create_fake_instance()
self._create_fake_pci_device()
self.pci_device.claim(self.inst)
self.pci_device.allocate(self.inst)
self.assertEqual(len(self.inst.pci_devices), 1)
self.pci_device.free(self.inst)
self.assertEqual(len(self.inst.pci_devices), 0)
self.assertEqual(self.pci_device.status, 'available')
self.assertEqual(self.pci_device.instance_uuid, None)
def test_free_device_fail(self):
self._create_fake_pci_device()
self.pci_device.status = 'removed'
self.assertRaises(exception.PciDeviceInvalidStatus,
self.pci_device.free)
def test_remove_device(self):
self._create_fake_pci_device()
self.pci_device.remove()
self.assertEqual(self.pci_device.status, 'removed')
|
NickleDave/hybrid-vocal-classifier
|
tests/scripts/remake_model_files.py
|
Python
|
bsd-3-clause
| 3,608 | 0.00194 |
import os
from pathlib import Path
import shutil
import joblib
import hvc
from config import rewrite_config
HERE = Path(__file__).parent
DATA_FOR_TESTS = HERE / ".." / "data_for_tests"
TEST_CONFIGS = DATA_FOR_TESTS.joinpath("config.yml").resolve()
FEATURE_FILES_DST = DATA_FOR_TESTS.joinpath("feature_files").resolve()
MODEL_FILES_DST = DATA_FOR_TESTS.joinpath("model_files").resolve()
config_feature_file_pairs = {
"knn": ("test_select_knn_ftr_grp.config.yml", "knn.features"),
"svm": ("test_select_svm.config.yml", "svm.features"),
"flatwindow": ("test_select_flatwindow.config.yml", "flatwindow.features"),
}
def main():
for model_name, (
select_config,
feature_filename,
) in config_feature_file_pairs.items():
print("running {} to create model files".format(select_config))
# have to put tmp_output_dir into yaml file
select_config = TEST_CONFIGS / select_config
feature_file = sorted(FEATURE_FILES_DST.glob(feature_filename))
if len(feature_file) != 1:
raise ValueError(
"found more than one feature file with search {}:\n{}".format(
feature_filename, feature_file
)
)
else:
# call `resolve` to get full path to model file, so pytest fixtures find it from inside tmp directories
feature_file = feature_file[0].resolve()
replace_dict = {
"feature_file": ("replace with feature_file", str(feature_file)),
"output_dir": ("replace with tmp_output_dir", str(MODEL_FILES_DST)),
}
select_config_rewritten = rewrite_config(
select_config, str(MODEL_FILES_DST), replace_dict
)
select_output_before = [
select_output_dir
for select_output_dir in sorted(MODEL_FILES_DST.glob("*select*output*"))
if select_output_dir.is_dir()
]
hvc.select(select_config_rewritten)
select_output_after = [
select_output_dir
for select_output_dir in sorted(MODEL_FILES_DST.glob("*select*output*"))
if select_output_dir.is_dir()
]
select_output_dir = [
after for after in select_output_after if after not in select_output_before
]
if len(select_output_dir) != 1:
raise ValueError(
"incorrect number of outputs when looking for select "
"ouput dirs:\n{}".format(select_output_dir)
)
else:
select_output_dir = select_output_dir[0]
# arbitrarily grab the last .model and associated .meta file
model_file = sorted(select_output_dir.glob("*/*.model"))[-1]
# call `resolve` to get full path to model file, so pytest fi
|
xtures find it from inside tmp directories
model_file_dst = MODEL_FILES_DST.joinpath(model_name + ".model").resolve()
shutil.move(src=model_file, dst=model_file_dst)
meta_file = sorted(select_output_dir.glob("*/*.meta"))[-1]
meta_file_dst = MODEL_FILES_DST.joinpath(model_name + ".meta")
shutil.move(src=str(meta_file), dst=str(meta_fi
|
le_dst))
# need to change 'model_filename' in .meta file
meta_file = joblib.load(meta_file_dst)
meta_file["model_filename"] = os.path.abspath(model_file_dst)
joblib.dump(meta_file, meta_file_dst)
# clean up -- delete all the other model files, directory, and config
shutil.rmtree(select_output_dir)
os.remove(select_config_rewritten)
if __name__ == "__main__":
main()
|
openstack/monasca-notification
|
monasca_notification/common/repositories/base/base_repo.py
|
Python
|
apache-2.0
| 1,519 | 0.003292 |
# Copyright 2015 FUJITSU LIMITED
# (C) Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required b
|
y applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
class BaseRepo(object):
def __init__(self, config):
self._find_a
|
larm_action_sql = \
"""SELECT id, type, name, address, period
FROM alarm_action as aa
JOIN notification_method as nm ON aa.action_id = nm.id
WHERE aa.alarm_definition_id = %s and aa.alarm_state = %s"""
self._find_alarm_state_sql = \
"""SELECT state
FROM alarm
WHERE alarm.id = %s"""
self._insert_notification_types_sql = \
"""INSERT INTO notification_method_type (name) VALUES ( %s)"""
self._find_all_notification_types_sql = """SELECT name from notification_method_type """
self._get_notification_sql = """SELECT name, type, address, period
FROM notification_method
WHERE id = %s"""
|
google-research/remixmatch
|
mixup.py
|
Python
|
apache-2.0
| 4,608 | 0.002604 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""mixup: Beyond Empirical Risk Minimization.
Adaption to SSL of MixUp: https://arxiv.org/abs/1710.09412
"""
import functools
import os
import tensorflow as tf
from absl import app
from absl import flags
from libml import data, utils, models
from libml.utils import EasyDict
FLAGS = flags.FLAGS
class Mixup(models.MultiModel):
def augment(self, x, l, beta, **kwargs):
del kwargs
mix = tf.distributions.Beta(beta, beta).sample([tf.shape(x)[0], 1, 1, 1])
mix = tf.maximum(mix, 1 - mix)
xmix = x * mix + x[::-1] * (1 - mix)
lmix = l * mix[:, :, 0, 0] + l[::-1] * (1 - mix[:, :, 0, 0])
return xmix, lmix
def model(self, batch, lr, wd, ema, **kwargs):
hwc = [self.dataset.height, self.dataset.width, self.dataset.colors]
xt_in = tf.placeholder(tf.float32, [batch] + hwc, 'xt') # For training
x_in = tf.placeholder(tf.float32, [None] + hwc, 'x')
y_in = tf.placeholder(tf.float32, [batch] + hwc, 'y')
l_in = tf.placeholder(tf.int32, [batch], 'labels')
wd *= lr
classifier = lambda x, **kw: self.classifier(x, **kw, **kwargs).logits
def get_logits(x):
logits = classifier(x, training=True)
return logits
x, labels_x = self.augment(xt_in, tf.one_hot(l_in, self.nclass), **kwargs)
logits_x = get_logits(x)
post_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
y, labels_y = self.augment(y_in, tf.nn.softmax(get_logits(y_in)), **kwargs)
labels_y = tf.stop_gradie
|
nt(labels_y)
logits_y = get_logits(y)
loss_xe = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_x, logits=logits_x)
loss_xe = tf.reduce_mean(loss_xe)
loss_xeu = tf.nn.softmax_cross_entropy
|
_with_logits_v2(labels=labels_y, logits=logits_y)
loss_xeu = tf.reduce_mean(loss_xeu)
tf.summary.scalar('losses/xe', loss_xe)
tf.summary.scalar('losses/xeu', loss_xeu)
ema = tf.train.ExponentialMovingAverage(decay=ema)
ema_op = ema.apply(utils.model_vars())
ema_getter = functools.partial(utils.getter_ema, ema)
post_ops.append(ema_op)
post_ops.extend([tf.assign(v, v * (1 - wd)) for v in utils.model_vars('classify') if 'kernel' in v.name])
train_op = tf.train.AdamOptimizer(lr).minimize(loss_xe + loss_xeu, colocate_gradients_with_ops=True)
with tf.control_dependencies([train_op]):
train_op = tf.group(*post_ops)
return EasyDict(
xt=xt_in, x=x_in, y=y_in, label=l_in, train_op=train_op,
classify_raw=tf.nn.softmax(classifier(x_in, training=False)), # No EMA, for debugging.
classify_op=tf.nn.softmax(classifier(x_in, getter=ema_getter, training=False)))
def main(argv):
utils.setup_main()
del argv # Unused.
dataset = data.DATASETS()[FLAGS.dataset]()
log_width = utils.ilog2(dataset.width)
model = Mixup(
os.path.join(FLAGS.train_dir, dataset.name),
dataset,
lr=FLAGS.lr,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
nclass=dataset.nclass,
ema=FLAGS.ema,
beta=FLAGS.beta,
scales=FLAGS.scales or (log_width - 2),
filters=FLAGS.filters,
repeat=FLAGS.repeat)
model.train(FLAGS.train_kimg << 10, FLAGS.report_kimg << 10)
if __name__ == '__main__':
utils.setup_tf()
flags.DEFINE_float('wd', 0.02, 'Weight decay.')
flags.DEFINE_float('ema', 0.999, 'Exponential moving average of params.')
flags.DEFINE_float('beta', 0.5, 'Mixup beta distribution.')
flags.DEFINE_integer('scales', 0, 'Number of 2x2 downscalings in the classifier.')
flags.DEFINE_integer('filters', 32, 'Filter size of convolutions.')
flags.DEFINE_integer('repeat', 4, 'Number of residual layers per stage.')
FLAGS.set_default('dataset', 'cifar10.3@250-5000')
FLAGS.set_default('batch', 64)
FLAGS.set_default('lr', 0.002)
FLAGS.set_default('train_kimg', 1 << 16)
app.run(main)
|
YoshikawaMasashi/magenta
|
magenta/music/notebook_utils.py
|
Python
|
apache-2.0
| 1,557 | 0.001927 |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python function
|
s which run only within a Jupyter notebook."""
# internal imports
import IPython
|
from magenta.music import midi_synth
_DEFAULT_SAMPLE_RATE = 44100
def play_sequence(sequence,
synth=midi_synth.synthesize,
sample_rate=_DEFAULT_SAMPLE_RATE,
**synth_args):
"""Creates an interactive player for a synthesized note sequence.
This function should only be called from a Jupyter notebook.
Args:
sequence: A music_pb2.NoteSequence to synthesize and play.
synth: A synthesis function that takes a sequence and sample rate as input.
sample_rate: The sample rate at which to synthesize.
**synth_args: Additional keyword arguments to pass to the synth function.
"""
array_of_floats = synth(sequence, sample_rate=sample_rate, **synth_args)
IPython.display.display(IPython.display.Audio(array_of_floats,
rate=sample_rate))
|
jcherqui/searx
|
tests/unit/engines/test_bing_news.py
|
Python
|
agpl-3.0
| 7,039 | 0.000426 |
from collections import defaultdict
import mock
from searx.engines import bing_news
from searx.testing import SearxTestCase
import lxml
class TestBingNewsEngine(SearxTestCase):
def test_request(self):
bing_news.supported_languages = ['en', 'fr']
query = 'test_query'
dicto = defaultdict(dict)
dicto['pageno'] = 1
dicto['language'] = 'fr-FR'
dicto['time_range'] = ''
params = bing_news.request(query, dicto)
self.assertIn('url', params)
self.assertIn(query, params['url'])
self.assertIn('bing.com', params['url'])
self.assertIn('fr', params['url'])
dicto['language'] = 'all'
params = bing_news.request(query, dicto)
self.assertIn('en', params['url'])
def test_no_url_in_request_year_time_range(self):
dicto = defaultdict(dict)
query = 'test_query'
dicto['time_range'] = 'year'
params = bing_news.request(query, dicto)
self.assertEqual({}, params['url'])
def test_response(self):
self.assertRaises(AttributeError, bing_news.response, None)
self.assertRaises(AttributeError, bing_news.response, [])
self.assertRaises(AttributeError, bing_news.response, '')
self.assertRaises(AttributeError, bing_news.response, '[]')
response = mock.Mock(content='<html></html>')
self.assertEqual(bing_news.response(response), [])
response = mock.Mock(content='<html></html>')
self.assertEqual(bing_news.response(response), [])
html = """<?xml version="1.0" encoding="utf-8" ?>
<rss version="2.0" xmlns:News="https://www.bing.com:443/news/search?q=python&setmkt=en-US&first=1&format=RSS">
<channel>
<title>python - Bing News</title>
<link>https://www.bing.com:443/news/search?q=python&setmkt=en-US&first=1&format=RSS</link>
<description>Search results</description>

<copyright>Copyright</copyright>
<item>
<title>Title</title>
<link>https://www.bing.com/news/apiclick.aspx?ref=FexRss&aid=&tid=c237eccc50bd4758b106a5e3c94fce09&url=http%3a%2f%2furl.of.article%2f&c=xxxxxxxxx&mkt=en-us</link>
<description>Article Content</description>
<pubDate>Tue, 02 Jun 2015 13:37:00 GMT</pubDate>
<News:Source>Infoworld</News:Source>
<News:Image>http://a1.bing4.com/th?id=ON.13371337133713371337133713371337&pid=News</News:Image>
<News:ImageSize>w={0}&h={1}&c=7</News:ImageSize>
<News:ImageKeepOriginalRatio></News:ImageKeepOriginalRatio>
<News:ImageMaxWidth>620</News:ImageMaxWidth>
<News:ImageMaxHeight>413</News:ImageMaxHeight>
</item>
<item>
<title>Another Title</title>
<link>https://www.bing.com/news/apiclick.aspx?ref=FexRss&aid=&tid=c237eccc50bd4758b106a5e3c94fce09&url=http%3a%2f%2fanother.url.of.article%2f&c=xxxxxxxxx&mkt=en-us</link>
<description>Another Article Content</description>
<pubDate>Tue, 02 Jun 2015 13:37:00 GMT</pubDate>
</item>
</channel>
</rss>""" # noqa
response = mock.Mock(content=html.encode('utf-8'))
results = bing_news.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 2)
self.assertEqual(results[0]['title'], 'Title')
self.assertEqual(results[0]['url'], 'http://url.of.article/')
self.assertEqual(results[0]['content'], 'Article Content')
self.assertEqual(results[0]['img_src'], 'https://www.bing.com/th?id=ON.13371337133713371337133713371337')
self.assertEqual(results[1]['title'], 'Another Title')
self.assertEqual(results[1]['url'], 'http://another.url.of.article/')
self.assertEqual(results[1]['content'], 'Another Article Content')
self.assertNotIn('img_src', results[1])
html = """<?xml version="1.0" encoding="utf-8" ?>
<rss version="2.0" xmlns:News="https://www.bing.com:443/news/search?q=python&setmkt=en-US&first=1&format=RSS">
<channel>
<title>python - Bing News</title>
<link>https://www.bing.com:443/news/search?q=python&setmkt=en-US&first=1&format=RSS</link>
<description>Search results</description>

<copyright>Cop
|
yright</copyright>
<item>
<title>Title</title>
<link>http://another.url.of.article/</link>
<description>Article Content</description>
<pubDate>garbage</pubDate>
<News:Source>Infoworld</News:Source>
<News:Image>http://another.bing.com/image</News:Image>
<News:ImageSize>w={0}&h={1}&c=7</News:ImageSize>
<News:ImageKeepOriginalRatio></News:ImageKeepOriginalRatio>
<News:ImageMaxWidth>620</News:ImageMaxWidth>
<News:ImageMaxHeight>413</News:ImageMaxHeight>
</item>
</channel>
</rss>""" # noqa
response = mock.Mock(content=html.encode('utf-8'))
results = bing_news.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['title'], 'Title')
self.assertEqual(results[0]['url'], 'http://another.url.of.article/')
self.assertEqual(results[0]['content'], 'Article Content')
self.assertEqual(results[0]['img_src'], 'http://another.bing.com/image')
html = """<?xml version="1.0" encoding="utf-8" ?>
<rss version="2.0" xmlns:News="https://www.bing.com:443/news/search?q=python&setmkt=en-US&first=1&format=RSS">
<channel>
<title>python - Bing News</title>
<link>https://www.bing.com:443/news/search?q=python&setmkt=en-US&first=1&format=RSS</link>
<description>Search results</description>

</channel>
</rss>""" # noqa
response = mock.Mock(content=html.encode('utf-8'))
results = bing_news.response(response)
self.assertEqual(type(results), list)
self.assertEqual(len(results), 0)
html = """<?xml version="1.0" encoding="utf-8" ?>gabarge"""
response = mock.Mock(content=html.encode('utf-8'))
self.assertRaises(lxml.etree.XMLSyntaxError, bing_news.response, response)
|
sloev/pycorm
|
setup.py
|
Python
|
mit
| 1,378 | 0.000726 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
"jsonschema"
# TODO: put package requirements here
]
test_requirements = [
"jsonschema"
# TODO: put package test requirements here
]
setup(
name='pycorm',
version='0.2.13',
description="a pico orm that uses jsonschema",
long_description=readme + '\n\n' + history,
author="Johannes Valbjørn",
author_email='johannes.valbjorn@gmail.com',
url='https://github.com/sloev/pycorm',
packages=[
'pycorm',
],
package_dir={'pyc
|
orm':
'pycorm'},
include_package_data=True,
install_requires=requirements,
license="MIT",
zip_safe=False,
keywords='pycorm',
classifiers=[
'Development Status :: 2 -
|
Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: ISC License (ISCL)',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7'
],
test_suite='tests',
tests_require=test_requirements
)
|
antoinecarme/sklearn2sql_heroku
|
tests/classification/BreastCancer/ws_BreastCancer_DummyClassifier_mysql_code_gen.py
|
Python
|
bsd-3-clause
| 142 | 0.014085 |
from sklearn2sql_hero
|
ku.tests.cla
|
ssification import generic as class_gen
class_gen.test_model("DummyClassifier" , "BreastCancer" , "mysql")
|
yafeunteun/wikipedia-spam-classifier
|
revscoring/revscoring/scorer_models/test_statistics/precision_recall.py
|
Python
|
mit
| 3,506 | 0.001141 |
import io
import logging
from collections import defaultdict
from numpy import linspace
from scipy import interp
from sklearn.metrics import (auc, average_precision_score,
precision_recall_curve)
from tabulate import tabulate
from .test_statistic import ClassifierStatistic, TestStatistic
logger = logging.getLogger(__name__)
class precision_recall(ClassifierStatistic):
|
"""
Constructs a precision/recall statistics generator.
See https://e
|
n.wikipedia.org/wiki/Precision_and_recall
When applied to a test set, the `score()` method will return a dictionary
with four fields:
* auc: the area under the precision-recall curve
* precisions: a list of precisions
* recalls: a list of recalls
* thresholds: a list of probability thresholds
"""
@classmethod
def _single_class_stat(cls, scores, labels, comparison_label):
y_proba = [s['probability'][comparison_label] for s in scores]
y_true = [l == comparison_label for l in labels]
precisions, recalls, thresholds = \
precision_recall_curve(y_true, y_proba)
return {
'auc': average_precision_score(y_true, y_proba),
'precisions': list(precisions),
'recalls': list(recalls)
}
def merge(self, stats):
individual_auc = defaultdict(list)
label_sum_recalls = defaultdict(float)
for stat in stats:
for label, label_stat in stat.items():
individual_auc[label].append(label_stat['auc'])
precisions, recalls = \
label_stat['precisions'], label_stat['recalls']
label_sum_recalls[label] += \
interp(linspace(0, 1, 100), precisions, recalls)
merged_stat = {}
for label, sum_recalls in label_sum_recalls.items():
mean_recalls = sum_recalls / len(stats)
interp_auc = auc(linspace(0, 1, 100), mean_recalls)
logger.debug("interp_auc={0}, individual_auc={1}"
.format(interp_auc, individual_auc[label]))
merged_stat[label] = {
'auc': interp_auc,
'precisions': list(linspace(0, 1, 100)),
'recalls': list(mean_recalls)
}
return merged_stat
@classmethod
def format(cls, stat, format="str"):
if format == "str":
return cls.format_str(stat)
elif format == "json":
return {label: {'auc': round(ss['auc'], 3)}
for label, ss in stat.items()}
else:
raise TypeError("Format '{0}' not available for {1}."
.format(format, cls.__name__))
@classmethod
def format_str(cls, stats):
formatted = io.StringIO()
if 'auc' in stats and 'thresholds' in stats:
# Single class
formatted.write("PR-AUC: {0}".format(round(stats['auc'], 3)))
else:
# multiple classes
formatted.write("PR-AUC:\n")
table_data = [(repr(label), round(stats[label]['auc'], 3))
for label in sorted(stats.keys())]
formatted.write("".join(["\t" + line + "\n" for line in
tabulate(table_data).split("\n")]))
return formatted.getvalue()
TestStatistic.register("precision_recall", precision_recall)
TestStatistic.register("pr", precision_recall) # Backwards compatible
|
dimagi/commcare-hq
|
corehq/ex-submodules/casexml/apps/phone/tests/dummy.py
|
Python
|
bsd-3-clause
| 1,824 | 0.000548 |
from datetime import datetime
from casexml.apps.case.xml.generator import date_to_xml_string
DUMMY_ID = "foo"
DUMMY_USERNAME = "mclovin"
DUMMY_PASSWORD = "changeme"
DUMMY_PROJECT = "domain"
def dummy_user_xml(user=None):
username = user.username if user else DUMMY_USERNAME
password = user.password if user else DUMMY_PASSWORD
user_id = user.user_id if user else DUMMY_ID
date_joined = user.date_joined if user else datetime.utcnow()
project = user.domain if user else DUMMY_PROJECT
return """
<Registration xmlns="http://openrosa.org/user/registration">
<username>{}</username>
<password>{}</password>
<uuid>{}</uuid>
<date>{}</date>
<user_data>
<data key="commcare_first_name"/>
<data key="commcare_last_name"/>
<data key="commcare_phone_number"/>
<data key="commcare_project">{}</data>
<data key="something">arbitrary</data>
</user_d
|
ata>
</Registration>""".format(
username,
password,
user_id,
date_to_xml_string(date_joined),
project
)
DUMMY_RESTORE_XML_TEMPLATE = ("""
<OpenRosaResponse xmlns="http://openrosa.org/http/response"%(items_xml)s>
<message nature="ota_restore_success">%(message)s</message>
<Sync
|
xmlns="http://commcarehq.org/sync">
<restore_id>%(restore_id)s</restore_id>
</Sync>
%(user_xml)s
%(case_xml)s
</OpenRosaResponse>
""")
def dummy_restore_xml(restore_id, case_xml="", items=None, user=None):
return DUMMY_RESTORE_XML_TEMPLATE % {
"restore_id": restore_id,
"items_xml": '' if items is None else (' items="%s"' % items),
"user_xml": dummy_user_xml(user),
"case_xml": case_xml,
"message": "Successfully restored account mclovin!"
}
|
GoogleCloudPlatform/ai-platform-samples
|
prediction/xgboost/structured/base/prediction/predict.py
|
Python
|
apache-2.0
| 2,134 | 0 |
#!/usr/bin/env python
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/L
|
ICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the Licen
|
se.
# ==============================================================================
from google.api_core.client_options import ClientOptions
import os
import logging
import googleapiclient.discovery
logging.basicConfig()
# In this sample, we will reply on 6 features only:
# trip_miles trip_seconds fare
# trip_start_month trip_start_hour trip_start_day
instances = [
[1.1, 420, 625, 8, 16, 3],
[0.3, 960, 1485, 3, 22, 2],
[1.0, 300, 505, 1, 1, 1],
]
PROJECT_ID = os.getenv('PROJECT_ID')
MODEL_NAME = os.getenv('MODEL_NAME')
MODEL_VERSION = os.getenv('MODEL_VERSION')
REGION = os.getenv('REGION')
logging.info('PROJECT_ID: %s', PROJECT_ID)
logging.info('MODEL_NAME: %s', MODEL_NAME)
logging.info('MODEL_VERSION: %s', MODEL_VERSION)
logging.info('REGION: %s', REGION)
prefix = "{}-ml".format(REGION) if REGION else "ml"
api_endpoint = "https://{}.googleapis.com".format(prefix)
client_options = ClientOptions(api_endpoint=api_endpoint)
# Use Regional support
service = googleapiclient.discovery.build('ml', 'v1',
cache_discovery=False,
client_options=client_options)
name = 'projects/{}/models/{}/versions/{}'.format(PROJECT_ID, MODEL_NAME,
MODEL_VERSION)
response = service.projects().predict(
name=name,
body={'instances': instances}
).execute()
if 'error' in response:
logging.error(response['error'])
else:
print(response['predictions'])
|
rgayon/plaso
|
plaso/parsers/amcache.py
|
Python
|
apache-2.0
| 11,498 | 0.006784 |
# -*- coding: utf-8 -*-
"""File containing a Windows Registry plugin to parse the AMCache.hve file."""
from __future__ import unicode_literals
import pyregf
from dfdatetime import filetime
from dfdatetime import posix_time
from dfwinreg import definitions as dfwinreg_definitions
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import interface
from plaso.parsers import manager
class AMCacheFileEventData(events.EventData):
"""AMCache file event data.
Attributes:
company_name (str): company name that created product file belongs to.
file_description (str): description of file.
file_reference (str): file system file reference, for example 9-1 (MFT
entry - sequence number).
file_size (int): size of file in bytes.
file_version (str): version of file.
full_path (str): full path of file.
language_code (int): language code of file.
product_name (str): product name file belongs to.
program_identifier (str): GUID of entry under Root/Program key file belongs
to.
sha1 (str): SHA-1 of file.
"""
DATA_TYPE = 'windows:registry:amcache'
def __init__(self):
"""Initializes event data."""
super(AMCacheFileEventData, self).__init__(data_type=self.DATA_TYPE)
self.company_name = None
self.file_description = None
self.file_reference = None
self.file_size = None
self.file_version = None
self.full_path = None
self.language_code = None
self.product_name = None
self.program_identifier = None
self.sha1 = None
class AMCacheProgramEventData(events.EventData):
"""AMCache programs event data.
Attributes:
entry_type (str): type of entry (usually AddRemoveProgram).
file_paths (str): file paths of installed program.
files (str): list of files belonging to program.
language_code (int): language_code of program.
msi_package_code (str): MSI package code of program.
msi_product_code (str): MSI product code of program.
name (str): name of installed program.
p
|
ackage_code (str): package code of program.
product_code (str):
|
product code of program.
publisher (str): publisher of program.
uninstall_key (str): unicode string of uninstall registry key for program.
version (str): version of program.
"""
DATA_TYPE = 'windows:registry:amcache:programs'
def __init__(self):
"""Initializes event data."""
super(AMCacheProgramEventData, self).__init__(data_type=self.DATA_TYPE)
self.entry_type = None
self.file_paths = None
self.files = None
self.language_code = None
self.msi_package_code = None
self.msi_product_code = None
self.name = None
self.package_code = None
self.product_code = None
self.publisher = None
self.uninstall_key = None
self.version = None
class AMCacheParser(interface.FileObjectParser):
"""AMCache Registry plugin for recently run programs."""
NAME = 'amcache'
DATA_FORMAT = 'AMCache Windows NT Registry (AMCache.hve) file'
# Contains: {value name: attribute name}
_FILE_REFERENCE_KEY_VALUES = {
'0': 'product_name',
'1': 'company_name',
'3': 'language_code',
'5': 'file_version',
'6': 'file_size',
'c': 'file_description',
'15': 'full_path',
'100': 'program_identifier',
'101': 'sha1'}
_AMCACHE_COMPILATION_TIME = 'f'
_AMCACHE_FILE_MODIFICATION_TIME = '11'
_AMCACHE_FILE_CREATION_TIME = '12'
_AMCACHE_ENTRY_WRITE_TIME = '17'
_AMCACHE_P_INSTALLATION_TIME = 'a'
_AMCACHE_P_FILES = 'Files'
_PRODUCT_KEY_VALUES = {
'0': 'name',
'1': 'version',
'2': 'publisher',
'3': 'language_code',
'6': 'entry_type',
'7': 'uninstall_key',
'd': 'file_paths',
'f': 'product_code',
'10': 'package_code',
'11': 'msi_product_code',
'12': 'msi_package_code',
}
#TODO Add GetFormatSpecification when issues are fixed with adding
# multiple parsers for the same file format (in this case regf files)
# AddNewSignature ->
# b'\x41\x00\x6d\x00\x63\x00\x61\x00\x63\x00\x68\x00\x65', offset=88
def _GetValueDataAsObject(self, parser_mediator, value):
"""Retrieves the value data as an object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
value (pyregf_value): value.
Returns:
object: data as a Python type or None if the value cannot be read.
"""
try:
if value.type in (
dfwinreg_definitions.REG_SZ,
dfwinreg_definitions.REG_EXPAND_SZ,
dfwinreg_definitions.REG_LINK):
value_data = value.get_data_as_string()
elif value.type in (
dfwinreg_definitions.REG_DWORD,
dfwinreg_definitions.REG_DWORD_BIG_ENDIAN,
dfwinreg_definitions.REG_QWORD):
value_data = value.get_data_as_integer()
elif value.type == dfwinreg_definitions.REG_MULTI_SZ:
value_data = list(value.get_data_as_multi_string())
else:
value_data = value.data
except (IOError, OverflowError) as exception:
parser_mediator.ProduceExtractionWarning(
'Unable to read data from value: {0:s} with error: {1!s}'.format(
value.name, exception))
return None
return value_data
def _ParseFileKey(self, parser_mediator, file_key):
"""Parses a Root\\File key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_key (pyregf.key): the File key.
"""
for volume_key in file_key.sub_keys:
for file_reference_key in volume_key.sub_keys:
self._ParseFileReferenceKey(parser_mediator, file_reference_key)
def _ParseFileReferenceKey(self, parser_mediator, file_reference_key):
"""Parses a file reference key (sub key of Root\\File\\%VOLUME%) for events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_reference_key (pyregf.key): file reference key.
"""
event_data = AMCacheFileEventData()
try:
if '0000' in file_reference_key.name:
# A NTFS file is a combination of MFT entry and sequence number.
sequence_number, mft_entry = file_reference_key.name.split('0000')
mft_entry = int(mft_entry, 16)
sequence_number = int(sequence_number, 16)
event_data.file_reference = '{0:d}-{1:d}'.format(
mft_entry, sequence_number)
else:
# A FAT file is a single number.
file_reference = int(file_reference_key.name, 16)
event_data.file_reference = '{0:d}'.format(file_reference)
except (ValueError, TypeError):
pass
for value_name, attribute_name in self._FILE_REFERENCE_KEY_VALUES.items():
value = file_reference_key.get_value_by_name(value_name)
if not value:
continue
value_data = self._GetValueDataAsObject(parser_mediator, value)
if attribute_name == 'sha1' and value_data.startswith('0000'):
# Strip off the 4 leading zero's from the sha1 hash.
value_data = value_data[4:]
setattr(event_data, attribute_name, value_data)
amcache_time_value = file_reference_key.get_value_by_name(
self._AMCACHE_ENTRY_WRITE_TIME)
if amcache_time_value:
amcache_time = filetime.Filetime(amcache_time_value.get_data_as_integer())
event = time_events.DateTimeValuesEvent(
amcache_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
creation_time_value = file_reference_key.get_value_by_name(
self._AMCACHE_FILE_CREATION_TIME)
if creation_time_value:
creation_time = filetime.Filetime(
creation_time_value.get_data_as_integer())
event = time_events.DateTimeValuesEvent(
creation_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
modification_time_value = fil
|
cancerregulome/GeneSpot_1.0
|
websvcs/endpoints/storage/mongodb_lookups.py
|
Python
|
mit
| 10,153 | 0.002659 |
from tornado.options import options, logging
from itertools import product
import json
import tornado.web
import pymongo
import csv
class MongoDbLookupHandler(tornado.web.RequestHandler):
def get(self, identity):
logging.info("uri=%s [%s] [%s]" % (self.request.uri, identity, self.request.arguments))
ids = identity.split("/")
db_name = ids[1]
collection = self.open_collection(db_name, ids[2])
# TODO : Improve this logic to correctly parse arguments and convert to a proper mongo DB query
args = self.request.arguments
query = {}
case_sensitive_lookups = frozenset(options.case_sensitive_lookups)
normalize_fn = None
if db_name in case_sensitive_lookups:
normalize_fn = lambda x: x
else:
normalize_fn = lambda x: x.lower()
for key in args.keys():
if key != "output":
iargs = args[key]
if len(iargs) == 1:
query[key] = normalize_fn(args[key][0])
else:
query[key] = {"$in": map(normalize_fn, args[key])}
query_limit = options.mongo_lookup_query_limit
json_items = []
for idx, item in enumerate(collection.find(query)):
if idx > query_limit:
break
json_item = self.jsonable_item(item)
#json_item["uri"] = self.request.uri + "/" + json_item["id"]
json_items.append(json_item)
if self.get_argument("output", "json") == "tsv":
WriteTsv(self, json_items)
self.set_status(200)
return
self.write({"items": json_items})
self.set_status(200)
return
def jsonable_item(self, item):
json_item = {}
for k in item.iterkeys():
if k == "_id":
json_item["id"] = str(item["_id"])
elif "[]" in k:
json_item[k.replace("[]", "")] = item[k]
else:
json_item[k] = item[k]
return json_item
def open_collection(self, db_name, collection_name):
#if options.verbose:
logging.info("open_collection(%s)" % collection_name)
connection = pymongo.Connection(options.mongo_lookup_uri)
database = connection[db_name]
return database[collection_name]
class MongoDbPairwiseLookupHandler(tornado.web.RequestHandler):
def get(self, identity):
logging.info("uri=%s [%s] [%s]" % (self.request.uri, identity, self.request.arguments))
args = self.request.arguments
ids = identity.split("/")
feature_matrix_name = ids[1]
gene_label_1 = args['gene1'][0]
gene_label_2 = args['gene2'][0]
cancer_label = args['cancer'][0].lower()
# Get feature IDs
fmx_collection = self.open_feature_matrix_collection("qed_lookups", "fmx_" + feature_matrix_name)
pairwise_collection = self.open_pairwise_collection("qed_lookups", "pw_" + feature_matrix_name + "_" + cancer_label)
features_1 = filter(self.feature_filter_fn, fmx_collection.find({"cancer": cancer_label, "gene": gene_label_1}))
features_2 = filter(self.feature_filter_fn, fmx_collection.find({"cancer": cancer_label, "gene": gene_label_2}))
feature_ids_1 = map(lambda f: f['id'], features_1)
feature_ids_2 = map(lambda f: f['id'], features_2)
# Get pairwise values
pairwise_results = []
for id1, id2 in product(feature_ids_1, feature_ids_2):
pw = self.get_pairwise_result(pairwise_collection, id1, id2)
if pw is not None:
pairwise_results.append(pw)
result = {
|
"features": {
gene_label_1: map(self.jsonable_item, features_1),
|
gene_label_2: map(self.jsonable_item, features_2)
},
"pairwise_results": map(self.jsonable_item, pairwise_results)
}
log_msg = "Features found: "
log_msg += gene_label_1 + ": " + str(len(feature_ids_1))
log_msg += "\t" + gene_label_2 + ": " + str(len(feature_ids_2))
log_msg += "\tPairwise results: " + str(len(pairwise_results))
logging.info(log_msg)
self.write(json.dumps(result))
self.set_status(200)
def feature_filter_fn(self, feature):
fields = feature['id'].split(':')
source = fields[1]
if source == 'METH' or source == 'CNVR' or source == 'GEXP':
return True
elif source == 'GNAB' and fields[-1] == 'y_n_somatic':
return True
else:
return False
def jsonable_item(self, item):
json_item = {}
for k in item.iterkeys():
if k == "_id":
json_item["id"] = str(item["_id"])
elif "[]" in k:
json_item[k.replace("[]", "")] = item[k]
else:
json_item[k] = item[k]
return json_item
def get_pairwise_result(self, collection, id1, id2):
res1 = collection.find_one({"target": id1, "predictor": id2})
res2 = collection.find_one({"target": id2, "predictor": id1})
if res1 is not None:
return res1
elif res2 is not None:
return res2
else:
return None
def open_feature_matrix_collection(self, db_name, collection_name):
logging.info("open_collection(%s)" % collection_name)
return self.open_collection(options.mongo_lookup_uri, db_name, collection_name)
def open_pairwise_collection(self, db_name, collection_name):
logging.info("open_collection(%s)" % collection_name)
return self.open_collection(options.mongo_pairwise_lookup_uri, db_name, collection_name)
def open_collection(self, mongo_uri, db_name, collection_name):
logging.info("open_collection(%s)" % collection_name)
connection = pymongo.Connection(mongo_uri)
database = connection[db_name]
return database[collection_name]
class MongoDbMutSigHandler(tornado.web.RequestHandler):
def get(self, identity):
logging.info("uri=%s [%s] [%s]" % (self.request.uri, identity, self.request.arguments))
args = self.request.arguments
query = {}
for key in args.keys():
if key != "cancer":
continue
iargs = args[key]
if len(iargs) == 1:
query[key] = args[key][0].lower()
else:
query[key] = {"$in": map(lambda x: x.lower(), args[key])}
if "max_rank" not in args:
query["rank"] = {"$lt": 21}
else:
query["rank"] = {"$lt": int(args["max_rank"][0]) + 1}
collection = self.open_collection("qed_lookups", "mutsig_rankings")
items = []
if "cancer" in query:
items = collection.find(query)
json_items = map(self.jsonable_item, items)
if self.get_argument("output", "json") == "tsv":
WriteTsv(self, json_items)
self.set_status(200)
return
self.write(json.dumps({ "items": json_items }))
self.set_status(200)
def jsonable_item(self, item):
json_item = {}
for k in item.iterkeys():
if k == "_id":
json_item["id"] = str(item["_id"])
elif "[]" in k:
json_item[k.replace("[]", "")] = item[k]
else:
json_item[k] = item[k]
return json_item
def open_collection(self, db_name, collection_name):
logging.info("open_collection(%s)" % collection_name)
connection = pymongo.Connection(options.mongo_lookup_uri)
database = connection[db_name]
return database[collection_name]
class MongoDbFeaturesByLocationHandler(tornado.web.RequestHandler):
def get(self, identity):
logging.info("uri=%s [%s] [%s]" % (self.request.uri, identity, self.request.arguments))
args = self.request.arguments
ids = identity.split("/")
query = {
"chr": str(args["chr"][0]),
"start": {"$gt": int(args["start"][0])},
|
cxhernandez/mdentropy
|
mdentropy/metrics/mutinf.py
|
Python
|
gpl-3.0
| 2,347 | 0 |
from ..core import mi, nmi
from .base import (AlphaAngleBaseMetric, ContactBaseMetric, DihedralBaseMetric,
BaseMetric)
import numpy as np
from itertools import combinations_with_replacement as combinations
from multiprocessing import
|
Pool
from contextlib import closing
__all__ = ['AlphaAngleMutualInformation', 'ContactMutualInformation',
'DihedralMutualInformation']
class MutualInformationBase(BaseMetric):
"""Base mutual information object"""
def _pa
|
rtial_mutinf(self, p):
i, j = p
return self._est(self.n_bins,
self.data[i].values,
self.shuffled_data[j].values,
rng=self.rng,
method=self.method)
def _exec(self):
M = np.zeros((self.labels.size, self.labels.size))
with closing(Pool(processes=self.n_threads)) as pool:
values = pool.map(self._partial_mutinf,
combinations(self.labels, 2))
pool.terminate()
idx = np.triu_indices_from(M)
M[idx] = values
return M + M.T - np.diag(M.diagonal())
def __init__(self, normed=True, **kwargs):
self._est = nmi if normed else mi
self.partial_transform.__func__.__doc__ = """
Partial transform a mdtraj.Trajectory into an n_residue by n_residue
matrix of mutual information scores.
Parameters
----------
traj : mdtraj.Trajectory
Trajectory to transform
shuffle : int
Number of shuffle iterations (default: 0)
verbose : bool
Whether to display performance
Returns
-------
result : np.ndarray, shape = (n_residue, n_residue)
Mutual information matrix
"""
super(MutualInformationBase, self).__init__(**kwargs)
class AlphaAngleMutualInformation(AlphaAngleBaseMetric, MutualInformationBase):
"""Mutual information calculations for alpha angles"""
class ContactMutualInformation(ContactBaseMetric, MutualInformationBase):
"""Mutual information calculations for contacts"""
class DihedralMutualInformation(DihedralBaseMetric, MutualInformationBase):
"""Mutual information calculations for dihedral angles"""
|
jworr/scheduler
|
model/staff.py
|
Python
|
gpl-2.0
| 1,931 | 0.052822 |
import model
EmployeeColumns = ["name", "role_id", "is_active", "street_address", "city", "state", "zip", "phone"]
class StaffMember(object):
"""
Represents a staff member
"""
def __init__(self, name, roleId, isActive, street=None, city=None, state=None, zipCode=None, phone=None):
"""
Creates a new staff member
"""
self.name = name
self.street = street
self.city = city
self.state = state
self.zipCode = zipCode
self.phone = phone
self.roleId = roleId
self.isActive = isActive
def __repr__(self):
return "<Staff> %s, %i, %s, %s, %s, %s, %s, %s" % (self.name, self.roleId, self.isActive, self.street, self.city, self.state, self.zipCode, self.phone)
def __eq__(self, other):
return self.name == other.name \
and self.street == other.street \
and self.city == other.city \
and self.state == other.state \
and self.zipCode == other.zipCode \
and self.phone == other.phone \
and self.roleId == other.roleId \
and self.isActive == other.isActive
def fields(self):
"""
Returns a dictionary of all the classes fields
"""
return model.getFieldMap(self)
def flush(self, connection, oldName=None):
"""
Updates or creates the appointment in the database
"""
cursor = connection.cursor()
#map the database fields to this objects attributes
sqlMap = {"name":"name", "role_id":"roleId", "is_active":"isActive",
"street_address":"street", "city":"city",
"zip":"zipCode", "phone":"phone", "state":"state"}
#map the data
params = model.createSqlParams(Employe
|
eColumns, sqlMap, self)
#if a old name was given then do an update statement
if oldName:
query = model.updateString("employee", EmployeeColumns, "name = %(oldName)s")
params["oldName"] = oldName
#else do a create statement
else:
query = model.insertString("employee", EmployeeColumns)
cursor.execute(query, param
|
s)
connection.commit()
cursor.close()
|
hazelcast/hazelcast-python-client
|
benchmarks/map_bench.py
|
Python
|
apache-2.0
| 1,776 | 0.001689 |
import random
import time
import logging
import sys
from os.path import dirname
sys.path.append(dirname(dirname(dirname(__file__))))
import hazelcast
def do_benchmark():
THREAD_COUNT = 1
ENTRY_COUNT = 10 * 1000
VALUE_SIZE = 10000
GET_PERCENTAGE = 40
PUT_PERCENTAGE = 40
logging.basicConfig(format='%(asctime)s%(msecs)03d [%(name)s] %(levelname)s: %(message)s', datefmt="%H:%M%:%S,")
logging.getLogger().setLevel(logging.INFO)
logger = logging.getLogger("main")
config = hazelcast.ClientConfig()
config.group_config.name = "dev"
config.group_config.password = "dev-pass"
try:
from tests.hzrc.client import HzRemoteController
rc = HzRemoteController('127.0.0.1', '9701')
if not rc.ping():
logger.info("Remote Controller Server not running... exit
|
ing.")
exit()
logger.info("Remote Controller Server OK...")
rc_cluster = rc.createCluster(None, None)
rc_member = rc.startMember(rc_cluster.id)
config.network.addresses.append('{}:{}'
|
.format(rc_member.host, rc_member.port))
except (ImportError, NameError):
config.network.addresses.append('127.0.0.1')
client = hazelcast.HazelcastClient(config)
my_map = client.get_map("default")
for i in range(0, 1000):
key = int(random.random() * ENTRY_COUNT)
operation = int(random.random() * 100)
if operation < GET_PERCENTAGE:
my_map.get(key)
elif operation < GET_PERCENTAGE + PUT_PERCENTAGE:
my_map.put(key, "x" * VALUE_SIZE)
else:
my_map.remove(key)
if __name__ == '__main__':
start = time.time()
do_benchmark()
time_taken = time.time() - start
print("Took %s seconds" % (time_taken))
|
ulif/ulif.openoffice
|
src/ulif/openoffice/helpers.py
|
Python
|
gpl-2.0
| 22,408 | 0.000759 |
#
# helpers.py
#
# Copyright (C) 2011, 2013, 2015 Uli Fouquet
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""
Helpers for trivial jobs.
"""
import base64
import cssutils
import logging
import os
import re
import shutil
import tempfile
import zipfile
from bs4 import BeautifulSoup, UnicodeDammit
try:
from cStringIO import StringIO # Python 2.x
except ImportError: # pragma: no cover
from io import StringIO # Python 3.x
from pkg_resources import iter_entry_points
try:
from urlparse import urlparse # Python 2.x
except ImportError: # pragma: no cover
from urllib.parse import urlparse # Python 3.x
from six import string_types
try:
basestring = basestring # Python 2.x
except NameError: # pragma: no cover
basestring = (str, bytes) # Python 3.x
def copytree(src, dst, symlinks=False, ignore=None):
"""Recursively copy an entire directory tree rooted at `src`. The
destination directory, named by `dst`, might exist already; if
not, thenit will be created as well as missing parent
directories. Permissions and times of directories are copied with
:func:`shutil.copystat`, individual files are copied using
:func:`shutil.copy2`.
If `symlinks` is true, symbolic links in the source tree are
represented as symbolic links in the new tree; if false or
omitted, the contents of the linked files are copied to the new
tree.
If ignore is given, it must be a callable that will receive as its
arguments the directory being visited by :func:`shutil.copytree`,
and a list of its contents, as returned by
:func:`os.listdir`. Since :func:`copytree` is called recursively,
the ignore callable will be called once for each directory that is
copied. The callable must return a sequence of directory and file
names relative to the current directory (i.e. a subset of the
items in its second argument); these names will then be ignored in
the copy process. :func:`shutil.ignore_patterns` can be used to
create such a callable that ignores names based on glob-style
patterns.
If exception(s) occur, a :exc:`shutil.Error` is raised with a list
of reasons.
.. note:: This is a plain copy of the :func:`shutil.copytree`
implementation as provided with Python >= 2.6. There is,
however, one difference: this version will try to go on
if the destination directory already exists.
It is the callers responsibility to make sure that the
`dst` directory is in a proper state for
:func:`copytree`.
"""
if src in dst:
raise ValueError("Cannot copy %s to %s: trees are nested" % (
src, dst))
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
try:
os.makedirs(dst)
except os.error:
pass
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlin
|
k(linkto, dstname)
elif os.path.isdir(srcname):
|
copytree(srcname, dstname, symlinks, ignore)
else:
shutil.copy2(srcname, dstname)
# XXX What about devices, sockets etc.?
except (IOError, os.error) as why:
errors.append((srcname, dstname, str(why)))
# catch the Error from the recursive copytree so that we can
# continue with other files
except (shutil.Error) as why: # pragma: no cover
errors.append((srcname, dstname, str(why)))
try:
shutil.copystat(src, dst)
except (OSError) as why: # pragma: no cover
errors.extend((src, dst, str(why)))
if errors:
raise shutil.Error(errors)
def copy_to_secure_location(src):
"""Copy `src` to a temporay location.
If `src` is a file, the complete directory containing this file
will be copied. If `src` is a directory this directory will be
copied.
Returns the path of the newly created directory.
To copy the filetree we use :func:`shutil.copytree` with no
additional parameters. That means that symlinks won't be copied
and other restrictions apply. See :func:`shutil.copytree` docs to
check.
"""
if os.path.isfile(src):
src = os.path.dirname(src)
dst = tempfile.mkdtemp()
copytree(src, dst)
return dst
def get_entry_points(group):
"""Get all entry point plugins registered for group `group`.
The found entry points are returned as a dict with ``<NAME>`` as
key and ``<PLUGIN>`` as value where ``<NAME>`` is the name under
which the respective plugin was registered with setuptools and
``<PLUGIN>`` is the registered component itself.
"""
return dict(
[(x.name, x.load())
for x in iter_entry_points(group=group)])
def unzip(path, dst_dir):
"""Unzip the files stored in zipfile `path` in `dst_dir`.
`dst_dir` is the directory where all contents of the ZIP file is
stored into.
"""
zf = zipfile.ZipFile(path)
# Create all dirs
dirs = sorted([name for name in zf.namelist() if name.endswith('/')])
for dir in dirs:
new_dir = os.path.join(dst_dir, dir)
if not os.path.exists(new_dir):
os.mkdir(new_dir)
# Create all files
for name in zf.namelist():
if name.endswith('/'):
continue
outfile = open(os.path.join(dst_dir, name), 'wb')
outfile.write(zf.read(name))
outfile.flush()
outfile.close()
zf.close()
return
def zip(path):
"""Create a ZIP file out of `path`.
If `path` points to a file then a ZIP archive is created with this
file in compressed form in a newly created directory. The name of
the created zipfile is the basename of the input file with a
``.zip`` extension appended.
If `path` points to a directory then files and directories
_inside_ this directory are added to the archive.
Also empty directories are added although it cannot be guaranteed
that these entries are recovered correctly later on with all tools
and utilities on all platforms.
.. note:: It is the callers responsibility to remove the directory
the zipfile is created in after usage.
"""
if not os.path.isdir(path) and not os.path.isfile(path):
raise ValueError('Must be an existing path or directory: %s' % path)
new_dir = tempfile.mkdtemp()
basename = os.path.basename(path)
new_path = os.path.join(new_dir, basename) + '.zip'
zout = zipfile.ZipFile(new_path, 'w', zipfile.ZIP_DEFLATED)
if os.path.isfile(path):
zout.write(path, basename)
zout.close()
return new_path
for root, dirs, files in os.walk(path):
for dir in dirs:
# XXX: Maybe the wrong way to store directories?
dir_path = os.path.join(root, dir)
arc_name = dir_path[len(path) + 1:] + '/'
info = zipfile.ZipInfo(arc_name)
zout.writestr(info, '')
for file in files:
file_path = os.path.join(root, file)
arc_name = file_path[len(path) + 1:]
|
sposs/DIRAC
|
WorkloadManagementSystem/PilotAgent/__init__.py
|
Python
|
gpl-3.0
| 215 | 0.004651 |
###############
|
#############################################
# $HeadURL$
############################################################
"""
|
DIRAC.WorkloadManagementSystem.PilotAgent package
"""
__RCSID__ = "$Id$"
|
wibeasley/mayan-playground-1
|
teagen/chapter_02/calculations_and_variables.py
|
Python
|
apache-2.0
| 306 | 0.009804 |
print("-------------- assigning numbers -----------")
fred =100
print(fred)
print(fred)
fred = 200
print(fred)
print(fred)
john =
|
fred
fred = john
print("-------------- assigning letters -----------")
adam = "jj"
print(adam)
print("-------------- assign
|
ing coins -----------")
number_of_coins = 200
|
citrix-openstack-build/cinder
|
cinder/scheduler/simple.py
|
Python
|
apache-2.0
| 3,857 | 0 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 OpenStack, LLC.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Simple Scheduler
"""
from cinder import db
from cinder import exception
from cinder import flags
from cinder.openstack.common import cfg
from cinder.scheduler import chance
from cinder.scheduler import driver
from cinder import utils
simple_scheduler_opts = [
cfg.IntOpt("max_gigabytes",
default=10000,
help="maximum number of volume gigabytes to allow per host"), ]
FLAGS = flags.FLAGS
FLAGS.register_opts(simple_scheduler_opts)
class SimpleScheduler(chance.ChanceScheduler):
"""Implements Naive Scheduler that tries to find least loaded host."""
def schedule_create_volume(self, context, request_spec, filter_properties):
"""Picks a host that is up and has the fewest volumes."""
elevated = context.elevated()
volume_id = request_spec.get('volume_id')
snapshot_id = request_spec.get('snapshot_id')
image_id = request_spec.get('image_id')
volume_properties = request_spec.get('volume_properties')
volume_size = volume_properties.get('size')
availability_zone = volume_properties.get('availability_zone')
zone, host = None, None
if availability_zone:
zone, _x, host = availability_zone.partition(':')
if host and context.is_admin:
topic = FLAGS.volume_topic
service = db.service_get_by_args(elevated, host, topic)
if not utils
|
.service_is_up(service):
raise exception.WillNotSchedule(host=host)
updated_volume = driver.volume_update_db(context, volume_id, host)
self.volume_rpcapi.create_volume(context,
updated_volume,
host,
|
snapshot_id,
image_id)
return None
results = db.service_get_all_volume_sorted(elevated)
if zone:
results = [(service, gigs) for (service, gigs) in results
if service['availability_zone'] == zone]
for result in results:
(service, volume_gigabytes) = result
if volume_gigabytes + volume_size > FLAGS.max_gigabytes:
msg = _("Not enough allocatable volume gigabytes remaining")
raise exception.NoValidHost(reason=msg)
if utils.service_is_up(service) and not service['disabled']:
updated_volume = driver.volume_update_db(context, volume_id,
service['host'])
self.volume_rpcapi.create_volume(context,
updated_volume,
service['host'],
snapshot_id,
image_id)
return None
msg = _("Is the appropriate service running?")
raise exception.NoValidHost(reason=msg)
|
zstackio/zstack-woodpecker
|
integrationtest/vm/virtualrouter/ceph_pool_capacity/test_ceph_pool_cap_crt_vm_image.py
|
Python
|
apache-2.0
| 989 | 0.001011 |
'''
New Integration Test for Ceph Pool Capacity.
@author: Legion
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import time
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
pool_cap = test_stub.PoolCapacity()
def test():
pool_cap.get_bs()
pool_cap.create_vm()
test_obj_dict.add_vm(pool_cap.vm)
pool_cap.crt_vm_image(pool_cap.bs)
time.sleep(300)
pool_cap.get_bs()
used1 = pool_cap.bs.poolUsedCapacity
avail1 = pool_cap.bs.poolAvailableCapacity
|
pool_cap.check_pool_cap([used1, avail1], bs=True)
pool_cap.vm.destroy()
test_obj_dict.rm_vm(pool_cap.vm)
test_lib.lib_robot_cleanup(test_obj_dict)
test_util.test_pass('Ceph Image Pool Capacity Test Success')
|
#Will be called only if exception happens in test().
def error_cleanup():
global test_obj_dict
test_lib.lib_error_cleanup(test_obj_dict)
|
MungoRae/home-assistant
|
homeassistant/components/sensor/bme280.py
|
Python
|
apache-2.0
| 6,473 | 0 |
"""
Support for BME280 temperature, humidity and pressure sensor.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.bme280/
"""
import asyncio
from datetime import timedelta
from functools import partial
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
TEMP_FAHRENHEIT, CONF_NAME, CONF_MONITORED_CONDITIONS)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from homeassistant.util.temperature import celsius_to_fahrenheit
REQUIREMENTS = ['i2csense==0.0.4',
'smbus-cffi==0.5.1']
_LOGGER = logging.getLogger(__name__)
CONF_I2C_ADDRESS = 'i2c_address'
CONF_I2C_BUS = 'i2c_bus'
CONF_OVERSAMPLING_TEMP = 'oversampling_temperature'
CONF_OVERSAMPLING_PRES = 'oversampling_pressure'
CONF_OVERSAMPLING_HUM = 'oversampling_humidity'
CONF_OPERATION_MODE = 'operation_mode'
CONF_T_STANDBY = 'time_standby'
CONF_FILTER_MODE = 'filter_mode'
CONF_DELTA_TEMP = 'delta_temperature'
DEFAULT_NAME = 'BME280 Sensor'
DEFAULT_I2C_ADDRESS = '0x76'
DEFAULT_I2C_BUS = 1
DEFAULT_OVERSAMPLING_TEMP = 1 # Temperature oversampling x 1
DEFAULT_OVERSAMPLING_PRES = 1 # Pressure oversampling x 1
DEFAULT_OVERSAMPLING_HUM = 1 # Humidity oversampling x 1
DEFAULT_OPERATION_MODE = 3 # Normal mode (forced mode: 2)
DEFAULT_T_STANDBY = 5 # Tstandby 5ms
DEFAULT_FILTER_MODE = 0 # Filter off
DEFAULT_DELTA_TEMP = 0.
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=3)
SENSOR_TEMP = 'temperature'
SENSOR_HUMID = 'humidity'
SENSOR_PRESS = 'pressure'
SENSOR_TYPES = {
SENSOR_TEMP: ['Temperature', None],
SENSOR_HUMID: ['Humidity', '%'],
SENSOR_PRESS: ['Pressure', 'mb']
}
DEFAULT_MONITORED = [SENSOR_TEMP, SENSOR_HUMID, SENSOR_PRESS]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_I2C_ADDRESS, default=DEFAULT_I2C_ADDRESS): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=DEFAULT_MONITORED):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
vol.Optional(CONF_I2C_BUS, default=DEFAULT_I2C_BUS): vol.Coerce(int),
vol.Optional(CONF_OVERSAMPLING_TEMP,
default=DEFAULT_OVERSAMPLING_TEMP): vol.Coerce(int),
vol.Optional(CONF_OVERSAMPLING_PRES,
default=DEFAULT_OVERSAMPLING_PRES): vol.Coerce(int),
vol.Optional(CONF_OVERSAMPLING_HUM,
default=DEFAULT_OVERSAMPLING_HUM): vol.Coerce(int),
vol.Optional(CONF_OPERATION_MODE,
default=DEFAULT_OPERATION_MODE): vol.Coerce(int),
vol.Optional(CONF_T_STANDBY,
default=DEFAULT_T_STANDBY): vol.Coerce(int),
vol.Optional(CONF_FILTER_MODE,
default=DEFAULT_FILTER_MODE): vol.Coerce(int),
vol.Optional(CONF_DELTA_TEMP,
default=DEFAULT_DELTA_TEMP): vol.Coerce(float),
})
# pylint: disable=import-error
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the BME280 sensor."""
import smbus
from i2csense.bme280 import BME280
SENSOR_TYPES[SENSOR_TEMP][1] = hass.config.units.temperature_unit
name = config.get(CONF_NAME)
i2c_address = config.get(CONF_I2C_ADDRESS)
bus = smbus.SMBus(config.get(CONF_I2C_BUS))
sensor = yield from hass.async_add_job(
partial(BME280, bus, i2c_address,
osrs_t=config.get(CONF_OVERSAMPLING_TEMP),
osrs_p=config.get(CONF_OVERSAMPLING_PRES),
osrs_h=config.get(CONF_OVERSAMPLING_HUM),
mode=config.get(CONF_OPERATION_MODE),
t_sb=config.get(CONF_T_STANDBY),
filter_mode=config.get(CONF_FILTER_MODE),
delta_temp=config.get(CONF_DELTA_TEMP),
logger=_LOGGER)
)
if not sensor.sample_ok:
_LOGGER.error("BME280 sensor not detected at %s", i2c_address)
return False
sensor_handler = yield from hass.async_add_job(BME280Handler, sensor)
dev = []
try:
for variable in config[CONF_MONITORED_CONDITIONS]:
dev.append(BME280Sensor(
sensor_handler, variable, SENSOR_TYPES[variable][1], name))
except KeyError:
pass
async_add_devices(dev)
class BME280Handler:
"""BME280 sensor working in i2C bus."""
def __init__(self, sensor):
"""Initialize the sensor handler."""
self.sensor = sensor
self.update(True)
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self, first_reading=False):
"""Read sensor data."""
self.sensor.update(first_reading)
class BME280Sensor(Entity):
"""Implementation of the BME280 sensor."""
def __init__(self, bme280_client, sensor_type, temp_unit, name):
"""Initialize the sensor."""
self.client_name = name
self._name = SENSOR_TYPES[sensor_type][0]
self.bme280_client = bme280_client
self.temp_unit = temp_unit
self.type = sensor_type
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self.client_name, self._name)
@property
def state(self):
|
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of the sensor."""
return self._unit_of_measuremen
|
t
@asyncio.coroutine
def async_update(self):
"""Get the latest data from the BME280 and update the states."""
yield from self.hass.async_add_job(self.bme280_client.update)
if self.bme280_client.sensor.sample_ok:
if self.type == SENSOR_TEMP:
temperature = round(self.bme280_client.sensor.temperature, 1)
if self.temp_unit == TEMP_FAHRENHEIT:
temperature = round(celsius_to_fahrenheit(temperature), 1)
self._state = temperature
elif self.type == SENSOR_HUMID:
self._state = round(self.bme280_client.sensor.humidity, 1)
elif self.type == SENSOR_PRESS:
self._state = round(self.bme280_client.sensor.pressure, 1)
else:
_LOGGER.warning("Bad update of sensor.%s", self.name)
|
DBeath/flask-feedrsub
|
feedrsub/utils/profiler.py
|
Python
|
mit
| 386 | 0 |
import cProfile
import StringIO
import pstats
import contextlib
@contextlib.
|
contextmanager
def profiled():
pr = cProfile.Profile()
pr.enable()
yield
pr.disable()
s = StringIO.StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats("cumulative")
ps.print_stats()
# uncomment this to see who's calling what
# ps.print_callers()
print(s.getvalue(
|
))
|
aspose-cells/Aspose.Cells-for-Cloud
|
Examples/Python/Examples/GetMergedCellFromWorksheet.py
|
Python
|
mit
| 1,392 | 0.010057 |
import asposecellscloud
from asposecellscloud.CellsApi import CellsApi
from asposecellscloud.CellsApi import ApiException
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
apiKey = "XXXXX" #sepcify App Key
appSid = "XXXXX" #sepcify App SID
apiServer = "http://api.aspose.com/v1.1"
data_folder = "../../data/"
#Instantiate Aspose Storage API SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True)
storageApi = StorageApi(storage_a
|
piClient)
#Instantiate Aspose Cells API SDK
api_client = asposecellscloud.ApiClient.ApiClient(apiKey, appSid, True)
cellsApi = CellsApi(api_client);
#set input file name
filename = "Sample_Test_Book.xls"
sheetName = "Sheet1"
mergedCellIndex = 0
#upload file to aspose cloud stora
|
ge
#storageApi.PutCreate(Path=filename, file=data_folder + filename)
try:
#invoke Aspose.Cells Cloud SDK API to get merged cells from a worksheet
response = cellsApi.GetWorkSheetMergedCell(name=filename, sheetName=sheetName, mergedCellIndex=mergedCellIndex)
if response.Status == "OK":
mergedCell = response.MergedCell
print "Merge Start Column :: " + str(mergedCell.StartColumn)
print "Merge End Column :: " + str(mergedCell.EndColumn)
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
|
simphony/simphony-remote
|
remoteappmanager/cli/remoteappadmin/__main__.py
|
Python
|
bsd-3-clause
| 1,056 | 0 |
import sys # pragma: no cover
from remoteappmanager.command_line_config import (
CommandLineConfig) # pragma: no cover
from remoteappmanager.environment_config import (
EnvironmentConfig) # p
|
ragma: no cover
from remoteappmanager.file_config import FileConfig # pragma: no cover
|
from tornado.options import print_help # pragma: no cover
from remoteappmanager.admin_application import (
AdminApplication) # pragma: no cover
def main(): # pragma: no cover
try:
command_line_config = CommandLineConfig()
command_line_config.parse_config()
file_config = FileConfig()
if command_line_config.config_file:
file_config.parse_config(command_line_config.config_file)
environment_config = EnvironmentConfig()
environment_config.parse_config()
except Exception as e:
print_help()
print("Error: {}".format(e))
sys.exit(1)
app = AdminApplication(
command_line_config,
file_config,
environment_config)
app.start()
|
dreamibor/Algorithms-and-Data-Structures-Using-Python
|
practice/implementation/linked_list/merge_k_sorted_list.py
|
Python
|
gpl-3.0
| 4,308 | 0.009749 |
"""
Linked List: Merge K Sorted Lists (hard)
Description:
You are given an array of k linked-lists lists, each linked-list is
sorted in ascending order.
Merge all the linked-lists into one sorted linked-list and return it.
Example:
Input: lists = [[1,4,5],[1,3,4],[2,6]]
Output: [1,1,2,3,4,4,5,6]
Explanation: The linked-lists are:
[
1->4->5,
1->3->4
|
,
2->6
]
merging them into one sorted lis
|
t:
1->1->2->3->4->4->5->6
Solutions:
1. Brute force - Add all nodes in all lists into one list and then sort.
2. Brute force - Merge list one by one.
3. Divide and conquer - Merge half and half's half, ...
4. Priority queue - Get minimum element each time.
Notes:
For the priority queue based method, we need to
LeetCode Link: https://leetcode-cn.com/problems/merge-k-sorted-lists/
"""
from linked_list import ListNode, create_linked_list, traverse
import heapq
# import sys
# sys.setrecursionlimit(2000)
def merge_two_sorted_lists(l1: ListNode, l2: ListNode) -> ListNode:
""" Merge two sorted lists.
"""
if not l1: return l2
if not l2: return l1
if l1.val <= l2.val:
l1.next = merge_two_sorted_lists(l1.next, l2)
return l1
else:
l2.next = merge_two_sorted_lists(l1, l2.next)
return l2
def merge(input_lists: list, left: int, right: int) -> ListNode:
""" Divide and Conquer - divide the input lists into half and
process them and then merge them together.
"""
if left == right: return input_lists[left]
mid = left + (right - left) // 2
l1 = merge(input_lists, left, mid)
l2 = merge(input_lists, mid+1, right)
return merge_two_sorted_lists(l1, l2)
def merge_k_lists_divide_and_conquer(input_lists: list) -> ListNode:
""" Solution - Divide and Conquer
We can merge lists in pairs, suppose we have k lists at the beginning,
then we can merge list pairs for the first round, so we will have k/2
merged lists, repeat the process, until we have the final one sorted list.
Time Complexity - O(kn*logk) - the first round merging k/2 pair of lists,
the time complexity is O(2n) for each pair, the second round merging k/4
pair of lists, and the time complexty for each pair is O(4n), ... in total
the time complexity is O(kn*logk).
Space Complexity - O(logk) - for recursion stack.
"""
if not input_lists: return
n = len(input_lists)
return merge(input_lists, 0, n-1)
def merge_k_sorted_lists_heapq(input_lists: list) -> ListNode:
""" Solution - Min Heap
We first insert the first element (also smallest as the lists are sorted) of each
linked list in a min heap. After this, we can take out the smallest element from
the heap and add it to the merged list. After removing the smallest element from the
heap, we can insert the next element of the same list into the heap. Repeat previous
steps to populate the merged list in sorted order.
Time Complexity - O(kn*logk) - the number of elements in the priority queue will be
less than k, so the time complexity for insertion and deletion will be O(logk), there
are at most k*n elements (every node is inserted and deleted once), so the total time
complexity will be O(kn*logk)
Space Complexity - O(k) - for the priority queue (min-heap).
"""
dummy = ListNode(-1)
current = dummy
min_heap = []
# Put the root of each list in the min heap
for root in input_lists:
if root:
heapq.heappush(min_heap, root)
# Pop the smallest element from the min heap and add it to the result sorted list.
while min_heap:
node = heapq.heappop(min_heap)
current.next = node
current = current.next
# If the element poped still have next node, then add it into the heap.
if node.next:
heapq.heappush(min_heap, node.next)
return dummy.next
if __name__ == "__main__":
l1 = create_linked_list([2,6,8])
l2 = create_linked_list([3,6,7])
l3 = create_linked_list([1,3,4])
new_list = merge_k_lists_divide_and_conquer([l1, l2, l3])
print(traverse(new_list))
l1 = create_linked_list([1,4,5])
l2 = create_linked_list([1,3,4])
l3 = create_linked_list([2,6])
result = merge_k_sorted_lists_heapq([l1, l2, l3])
print(traverse(result))
|
magicgoose/dr14_t.meter
|
dr14tmeter/table.py
|
Python
|
gpl-3.0
| 16,975 | 0.043122 |
# dr14_t.meter: compute the DR14 value of the given audiofiles
# Copyright (C) 2011 Simone Riva
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
def float_formatter( el ):
if abs( el ) >= 1.0 :
return "%.2f" % el
else :
return "%.2E" % el
def default_formatter( el ):
if sys.version_info[0] == 2:
return unicode( el )
else:
return str( el )
def string_formatter( el ):
if sys.version_info[0] == 2:
return unicode( el )
else:
return str( el )
class Table:
def __init__(self):
self.__float_format = "%.2f"
self.__col_cnt = 5
self.__ini_txt = ""
self.__txt = ""
self.__formatter = {}
self.add_formatter( float , float_formatter )
self.add_formatter( str , string_formatter )
if sys.version_info[0] == 2:
self.add_formatter( unicode , string_formatter )
def _get_txt(self):
return self.__txt
def _set_txt( self , txt ):
self.__txt = txt
def _append_txt( self , txt ):
self.__txt += txt
def init_txt(self, txt = "" ):
self.__ini_txt = txt
def get_init_txt(self):
return self.__ini_txt
def new_table( self ):
NotImplementedError(" %s : is virutal and must be overridden." % sys._getframe().f_code.co_name )
def end_table( self ):
NotImplementedError(" %s : is virutal and must be overridden." % sys._getframe().f_code.co_name )
def write_table(self):
return self.__ini_txt + self._get_txt()
def nl(self):
if sys.platform.startswith('linux') or sys.platform.startswith('darwin'):
return '\n'
elif sys.platform.startswith('win'):
return '\n\r'
def add_formatter( self , _type , formatter ):
self.__formatter[_type] = formatter
def format_element( self , el ):
return self.__formatter.get( type(el) , default_formatter )( el )
def append_row( self , row_el , cell_type='d'):
if cell_type == 'd':
n_cell = self.new_cell
e_cell = self.end_cell
elif cell_type == 'h':
n_cell = self.new_hcell
e_cell = self.end_hcell
self.new_row()
for i in row_el:
n_cell()
self.add_value( i )
e_cell()
self.end_row()
def get_col_cnt( self ):
return self.__col_cnt
def set_col_cnt( self , col_cnt ):
self.__col_cnt = col_cnt
col_cnt = property( get_col_cnt , set_col_cnt )
def append_separator_line( self ):
self._append_txt( self.format_element( "" ) )
def append_closing_line( self ):
self._append_txt( self.format_element( "" ) )
def append_empty_line( self ):
self.append_row( [ "" ]*self.col_cnt )
def add_title( self , title ):
NotImplementedError(" %s : is virutal and must be overridden." % sys._getframe().f_code.co_name )
def add_value( self , val ):
self._append_txt( self.format_element(val) )
def new_head( self ):
self._append_txt( self.format_element( "" ) )
def end_head( self ):
self._append_txt( self.format_element( "" ) )
def new_tbody( self ):
self._append_txt( self.format_element( "" ) )
def end_tbody( self ):
self._append_txt( self.format_element( "" ) )
def new_foot( self ):
self._append_txt( self.format_element( "" ) )
def end_foot( self ):
self._append_txt( self.format_element( "" ) )
def new_row( self ):
NotImplementedError(" %s : is virutal and must be overridden." % sys._getframe().f_code.co_name )
def end_row( self ):
NotImplementedError(" %s : is virutal and must be overridden." % sys._getframe().f_code.co_name )
def new_cell( self ):
NotImplementedError(" %s : is virutal and must be overridden." % sys._getframe().f_code.co_name )
def end_cell( self ):
NotImplementedError(" %s : is virutal and must be overridden." % sys._getframe().f_code.co_name )
def new_hcell( self ):
return self.new_cell()
def end_hcell( self):
return self.end_cell()
def new_bold( self ):
NotImplementedError(" %s : is virutal and must be overridden." % sys._getframe().f_code.co_name )
def end_bold( self ):
NotImplementedError(" %s : is virutal and must be overridden." % sys._getframe().f_code.co_name )
class TextTable ( Table ):
def append_separator_line( self ):
self.append_row( [ "----------------------------------------------------------------------------------------------" ] )
def append_closing_line( self ):
self.append_row( [ "==============================================================================================" ] )
|
def append_empty_line( self ):
self.append_row( [ "" ] )
def add_title( self , title ):
self._append_txt( title + self.nl() )
|
def new_table( self ):
self._set_txt("")
def end_table( self ):
self._append_txt( self.nl() )
def new_row( self ):
self._append_txt("")
def end_row( self ):
self._append_txt( self.nl() )
def new_cell( self ):
self._append_txt("")
def end_cell( self ):
self._append_txt( "\t" )
def new_bold( self ):
self._append_txt("")
def end_bold( self ):
self._append_txt("")
class BBcodeTable ( Table ):
def append_separator_line( self ):
self.append_row( [ "------------" ] * self.col_cnt )
def append_closing_line( self ):
self.append_row( [ "============" ] * self.col_cnt )
def add_title( self , title ):
self._append_txt( self.nl() + "[tr]" + self.nl() + " [td colspan=%d] " % self.col_cnt + title + " [/td] " + self.nl() + "[/tr]" + self.nl() )
def new_table( self ):
self._set_txt("")
self._append_txt( '[table]' + self.nl() )
def end_table( self ):
self._append_txt( self.nl() + '[/table]' + self.nl() )
def new_row( self ):
self._append_txt( self.nl() + '[tr]' + self.nl() )
def end_row( self ):
self._append_txt( self.nl() + '[/tr]' + self.nl() )
def new_cell( self ):
self._append_txt( ' [td]' )
def end_cell( self ):
self._append_txt( '[/td]' )
def new_bold( self ):
self._append_txt( '[b]' )
def end_bold( self ):
self._append_txt( '[/b]' )
class HtmlTable ( Table ):
def add_title( self , title ):
self._append_txt( self.nl() + "<tr>" + self.nl() + " <th colspan=\"%d\" > " % self.col_cnt + title + "</th>" + self.nl() + "</tr>" + self.nl() )
def new_table( self ):
self._set_txt("")
self._append_txt( "<table>" + self.nl() )
def end_table( self ):
self._append_txt( self.nl() + "</table>" + self.nl() )
def new_head( self ):
self._append_txt( self.nl() + "<thead>" + self.nl() )
def end_head( self ):
self._append_txt( self.nl() + "</thead>" + self.nl() )
def new_tbody( self ):
self._append_txt( self.nl() + "<tbody>" + self.nl() )
def end_tbody( self ):
self._append_txt( self.nl() + "</tbody>" + self.nl() )
|
superdesk/Live-Blog
|
plugins/livedesk-sync/livedesk/api/blog_sync.py
|
Python
|
agpl-3.0
| 3,125 | 0.0144 |
'''
Created on April 26, 2013
@package: livedesk
@copyright: 2013 Sourcefabric o.p.s.
@license: http://www.gnu.org/licenses/gpl-3.0.txt
@author: Mugur Rus
API specifications for liveblo
|
g sync.
'''
from ally.support.api.entity import Entity, IEntityService, QEntity
from livedesk.api.blog import Blog
from datetime import datetime
from livedesk.api.domain_livedesk import modelLiveDesk
from ally.api.c
|
onfig import query, service, LIMIT_DEFAULT, call, UPDATE
from ally.api.criteria import AsRangeOrdered, AsDateTimeOrdered, AsBoolean
from superdesk.source.api.source import Source
from ally.api.type import Iter
from superdesk.source.api.type import SourceType
# --------------------------------------------------------------------
@modelLiveDesk(name='Sync')
class BlogSync(Entity):
'''
Provides the blog sync model. It is used for all kind of blog sync, currently chainde blog and SMS
'''
Blog = Blog
Source = Source
CId = int
LastActivity = datetime
Auto = bool
# --------------------------------------------------------------------
@query(BlogSync)
class QBlogSync(QEntity):
'''
Provides the query for BlogSync.
'''
cId = AsRangeOrdered
lastActivity = AsDateTimeOrdered
auto = AsBoolean
# --------------------------------------------------------------------
@service((Entity, BlogSync), (QEntity, QBlogSync))
class IBlogSyncService(IEntityService):
'''
Provides the service methods for the blog sync.
'''
@call(webName="checkTimeout", method=UPDATE)
def checkTimeout(self, blogSyncId:BlogSync.Id, timeout:int) -> bool:
'''
Returns true if the last activity is older than timeout and if it is older update the last activity value
'''
@call
def getBySourceType(self, sourceType:SourceType.Key, offset:int=None, limit:int=LIMIT_DEFAULT, detailed:bool=True, q:QBlogSync=None) -> Iter(BlogSync):
'''
Returns the list of blog sync models for source type.
@param sourceType: SourceType.Key
The source(provider) identifier
@param offset: integer
The offset to retrieve the entities from.
@param limit: integer
The limit of entities to retrieve.
@param detailed: boolean
If true will present the total count, limit and offset for the partially returned collection.
@param q: QBlogSync
The query to search by.
'''
@call
def getByBlog(self, blogId:Blog.Id, offset:int=None, limit:int=LIMIT_DEFAULT, detailed:bool=True, q:QBlogSync=None) -> Iter(BlogSync):
'''
Returns the list of blog sync models for blog.
@param blogId: Blog.Id
The blog id
@param offset: integer
The offset to retrieve the entities from.
@param limit: integer
The limit of entities to retrieve.
@param detailed: boolean
If true will present the total count, limit and offset for the partially returned collection.
@param q: QBlogSync
The query to search by.
'''
|
jrugis/cell_mesh
|
mesh2vtk.py
|
Python
|
gpl-3.0
| 2,909 | 0.024751 |
#!/usr/bin/python
import numpy as np
mdir = "mesh3d/"
fname = "out_p6-p4-p8"
####################
print "input mesh data file"
f1 = open(mdir+fname+".mesh", 'r')
for line in f1:
if line.startswith("Vertices
|
"): break
pcount = int(f1.next())
xyz = np.empty((pcount, 3), dtype=np.float)
for t in range(pcount):
xyz[t] = map(float,f1.n
|
ext().split()[0:3])
for line in f1:
if line.startswith("Triangles"): break
trisc = int(f1.next())
tris = np.empty((trisc,4), dtype=int)
for t in range(trisc):
tris[t] = map(int,f1.next().split())
for line in f1:
if line.startswith("Tetrahedra"): break
tetsc = int(f1.next())
tets = np.empty((tetsc,5), dtype=int)
for t in range(tetsc):
tets[t] = map(int,f1.next().split())
f1.close()
####################
print "identify geometry"
ftype = [('v0', np.int),('v1', np.int),('v2', np.int),('label', 'S2')]
faces = np.empty(trisc/2, dtype=ftype)
for i in range(len(faces)):
faces[i] = (tris[2*i][0],tris[2*i][1],tris[2*i][2],str(tris[2*i][3])+str(tris[2*i+1][3]))
face_list,face_count = np.unique(faces['label'], return_counts=True)
vtype = [('v0', np.int),('v1', np.int),('v2', np.int),('v3', np.int),('label', 'S1')]
vols = np.empty(tetsc, dtype=vtype)
for i in range(tetsc):
vols[i] = (tets[i][0],tets[i][1],tets[i][2],tets[i][3],str(tets[i][4]))
vol_list,vol_count = np.unique(vols['label'], return_counts=True)
####################
print "output vtk data files for faces"
for i, f in enumerate(face_list):
f2 = open(mdir+fname+"_"+face_list[i]+".vtk", 'w')
f2.write("# vtk DataFile Version 2.0\n")
f2.write("mesh data\n")
f2.write("ASCII\n")
f2.write("DATASET UNSTRUCTURED_GRID\n")
f2.write("POINTS "+str(pcount)+" float\n") # overkill, all points!
for v in xyz:
f2.write(str(v[0]-35.33)+' '+str(35.33-v[1])+' '+str(12.36-v[2])+'\n')
f2.write("CELLS "+str(face_count[i])+" "+str(face_count[i]*4)+"\n")
for v in faces:
if v[3] == f:
f2.write("3 "+str(v[0]-1)+' '+str(v[1]-1)+' '+str(v[2]-1)+'\n')
f2.write("CELL_TYPES "+str(face_count[i])+"\n")
for t in range(face_count[i]): f2.write("5 ")
f2.write("\n")
f2.close()
####################
print "output vtk data files for volumes"
for i, f in enumerate(vol_list):
f2 = open(mdir+fname+"_"+vol_list[i]+".vtk", 'w')
f2.write("# vtk DataFile Version 2.0\n")
f2.write("mesh data\n")
f2.write("ASCII\n")
f2.write("DATASET UNSTRUCTURED_GRID\n")
f2.write("POINTS "+str(pcount)+" float\n") # overkill, all points!
for v in xyz:
f2.write(str(v[0]-35.33)+' '+str(35.33-v[1])+' '+str(12.36-v[2])+'\n')
f2.write("CELLS "+str(vol_count[i])+" "+str(vol_count[i]*5)+"\n")
for v in vols:
if v[4] == f:
f2.write("4 "+str(v[0]-1)+' '+str(v[1]-1)+' '+str(v[2]-1)+' '+str(v[3]-1)+'\n')
f2.write("CELL_TYPES "+str(vol_count[i])+"\n")
for t in range(vol_count[i]): f2.write("10 ")
f2.write("\n")
f2.close()
####################
|
blondegeek/pymatgen
|
pymatgen/__init__.py
|
Python
|
mit
| 3,203 | 0.002498 |
import sys
import os
import warnings
import ruamel.yaml as yaml
from fnmatch import fnmatch
__author__ = "Pymatgen Development Team"
__email__ ="pymatgen@googlegroups.com"
__maintainer__ = "Shyue Ping Ong"
__maintainer_email__ ="shyuep@gmail.com"
__version__ = "2019.7.2"
SETTINGS_FILE = os.path.join(os.path.expanduser("~"), ".pmgrc.yaml")
def _load_pmg_settings():
try:
with open(SETTINGS_FILE, "rt") as f:
d = yaml.safe_load(f)
except IOError:
# If there are any errors, default to using environment variables
# if present.
d = {}
for k, v in os.environ.items():
if k.startswith("PMG_"):
d[k] = v
elif k in ["VASP_PSP_DIR", "MAPI_KEY", "DEFAULT_FUNCTIONAL"]:
d["PMG_" + k] = v
return dict(d)
SETTINGS = _load_pmg_settings()
# Order of imports is important on some systems to avoid
# failures when loading shared libraries.
# import spglib
# from . import optimization, util
# del(spglib, optimization, util)
# Useful aliases for commonly used objects and modules.
# Allows from pymatgen import <class> for quick usage.
from pymatgen.core import *
from .electronic_structure.core import Spin, Orbital
from .ext.matproj import MPRester
from monty.json import MontyEncoder, MontyDecoder, MSONable
def get_structure_from_mp(formula):
"""
Convenience method to get a crystal from the Materials Project database via
the API. Requires PMG_MAPI_KEY to be set.
Args:
formula (str): A formula
Returns:
(Structure) The lowest energy structure in Materials Project with that
formula.
"""
m = MPRester()
entries = m.get_entries(formula, inc_structure="final")
if len(entries) == 0:
|
raise ValueError("No structure with formula %s in Materials Project!" %
formula)
elif len(entries) > 1:
warnings.warn("%d structures with formula %s found in Materials "
"Project. The lowest energy structure will be returned." %
(len(entries), formula))
return min(entries, key=lambda e: e.energy_per_atom).structure
|
if sys.version_info < (3, 5):
warnings.warn("""
Pymatgen will drop Py2k support from v2019.1.1. Pls consult the documentation
at https://www.pymatgen.org for more details.""")
def loadfn(fname):
"""
Convenience method to perform quick loading of data from a filename. The
type of object returned depends the file type.
Args:
fname (string): A filename.
Returns:
Note that fname is matched using unix-style, i.e., fnmatch.
(Structure) if *POSCAR*/*CONTCAR*/*.cif
(Vasprun) *vasprun*
(obj) if *json* (passthrough to monty.serialization.loadfn)
"""
if (fnmatch(fname, "*POSCAR*") or fnmatch(fname, "*CONTCAR*") or
".cif" in fname.lower()) or fnmatch(fname, "*.vasp"):
return Structure.from_file(fname)
elif fnmatch(fname, "*vasprun*"):
from pymatgen.io.vasp import Vasprun
return Vasprun(fname)
elif fnmatch(fname, "*.json*"):
from monty.serialization import loadfn
return loadfn(fname)
|
skrzym/monday-morning-quarterback
|
Application/Site/mmq/main/controllers.py
|
Python
|
mit
| 7,841 | 0.054202 |
<<<<<<< HEAD
from flask import Blueprint, render_template, request, url_for, jsonify
from config import mongo
import pandas as pd
import json
from bson import json_util
import retrieve_model as rmodel
from collections import Counter
main = Blueprint('main', __name__, template_folder='templates')
@main.route('/')
def index():
#mongo.db.visits.insert_one({"no":"way"})
#visits = mongo.db.visits.find_one()
#return str(visits)
return render_template('index.html')
@main.route('/predict/')
def get_started():
down_list = [{'value':1,'name':'1st'},{'value':2,'name':'2nd'},{'value':3,'name':'3rd'},{'value':4,'name':'4th'}]
quarter_list = [{'value':1,'name':'1st'},{'value':2,'name':'2nd'},{'value':3,'name':'3rd'},{'value':4,'name':'4th'}]
clock_list = [{'value':15,'name':'<15'}, {'value':14,'name':'<14'}, {'value':13,'name':'<13'},
{'value':12,'name':'<12'}, {'value':11,'name':'<11'}, {'value':10,'name':'<10'},
{'value':9,'name':'<9'}, {'value':8,'name':'<8'}, {'value':7,'name':'<7'},
{'value':6,'name':'<6'}, {'value':5,'name':'<5'}, {'value':4,'name':'<4'},
{'value':3,'name':'<3'}, {'value':2,'name':'<2'}, {'value':1,'name':'<1'}]
yards_list = [{'value':0,'name':'inches'}, {'value':1,'name':'1'},
{'value':2,'name':'2'}, {'value':3,'name':'3'}, {'value':4,'name':'4'},
{'value':5,'name':'5'}, {'value':6,'name':'6'}, {'value':7,'name':'7'},
{'value':8,'name':'8'}, {'value':9,'name':'9'}, {'value':10,'name':'10'},
{'value':11,'name':'11'}, {'value':12,'name':'12'}, {'value':13,'name':'13'},
{'value':14,'name':'14'}, {'value':15,'name':'15'}, {'value':16,'name':'16'},
{'value':17,'name':'17'}, {'value':18,'name':'18'}, {'value':19,'name':'19'},
{'value':20,'name':'20'}, {'value':21,'name':'21'}, {'value':22,'name':'22'},
{'value':23,'name':'23'}, {'value':24,'name':'24'}, {'value':25,'name':'25'}]
field_list = range(0,101,1)
score_list = range(0,61,1)
down_dict = [{'value':1,'name':'1st'},{'value':2,'name':'2nd'},{'value':3,'name':'3rd'},{'value':4,'name':'4th'}]
return render_template('predict.html',
=======
from flask import Blueprint, render_template, request, url_for
from config import mongo
main = Blueprint('main', __name__, template_folder='templates')
@main.route('/')
def index():
mongo.db.visits.insert_one({"foo":"bar"})
visits = mongo.db.visits.find_one()
return str(visits)
#return render_template('index.html')
@main.route('/getstarted/')
def get_started():
down_list = ['1st','2nd','3rd','4th']
quarter_list = ['1st','2nd','3rd','4th']
clock_list = ['> 15 min', '> 10 min', '> 5 min', '> 2 min', '< 2 min', '< 1 min']
yards_list = ['inches', 'goal', '1', '2', '3', '4', '5', '6', '7' ,'8', '9', '10', '> 10']
field_list = range(0,105,5)
score_list = range(-60,61,1)
return render_template('getstarted.html',
>>>>>>> master
down_list=down_list,
quarter_list=quarter_list,
clock_list=clock_list,
yards_list=yards_list,
field_list=field_list,
<<<<<<< HEAD
score_list=score_list,
down_dict=down_dict
)
@main.route('/results/', methods=['POST'])
def results():
=======
score_list=score_list
)
@main.route('/run/', methods=['POST'])
def run():
>>>>>>> master
down = request.form['down']
quarter = request.form['quarter']
clock = request.form['clock']
yards = request.form['yards']
field = request.form['field']
score = request.form['score']
<<<<<<< HEAD
sign = request.form['sign']
guess = request.form['guess']
score = str(int(score) * int(sign))
# Store scenario in mongodb
scenario = {
'down': int(down),
'quarter': int(quarter),
'clock': int(clock),
'yards': int(yards),
'field': int(field),
'score': int(score),
'guess': guess
}
# Insert the current user's guess into the DB
print('Puting this into db:', scenario)
mongo.db.scenarios.insert_one(scenario)
# Pull User guesses from MongoDB
#scenarios = mongo.db.scenarios.find()
# Pull NFL Stats from MongoDB
#nflstats = mongo.db.nfldata.find()
guesses = {'pass':'Pass', 'run':'Run', 'punt':'Punt', 'fg':'Field Goal', 'kneel': 'QB Kneel'}
try:
return render_template('results.html',
guess_title = guesses[guess],
=======
guess = request.form['guess']
# Store scenario in mongodb
scenario = {
'down': down,
'quarter': quarter,
'clock': clock,
'yards': yards,
'field': field,
'score': score,
'guess': guess
}
mongo.db.scenarios.insert_one(scenario)
scenarios = mongo.db.scenarios.find()
try:
return render_template('results.html',
>>>>>>> master
down=down,
quarter=quarter,
clock=clock,
yards=yards,
field=field,
score=score,
guess=guess,
<<<<<<< HEAD
scenarios=[None],#scenarios,
nflstats=[None]#nflstats
)
except Exception as e:
return "Something went wrong..." + str(e)
@main.route('/stats/')
def tables():
title = 'Test Table'
title = rmodel.predict_proba(4,4,1,20,-1)
table = title
return render_template('stats.html', table=table, title=title)
@main.route('/data/guesses/')
def guessData():
guess = request.args.get('guess')
down = request.args.get('down')
quarter = request.args.get('quarter')
clock = request.args.get('clock')
yards = request.args.get('yards')
field = request.args.get('field')
score = request.args.get('score')
search_dict = request.args.to_dict()
for key in search_dict:
#if key != 'guess':
try:
search_dict[key] = int(search_dict[key])
except:
pass
print(search_dict)
s=[data['guess'] for data in mongo.db.scenarios.find(search_dict)]
options = ['pass', 'run', 'punt', 'fg', 'kneel']
count = {option:s.count(option) for option in options}
print(count)
return json.dumps(count, default=json_util.default)
@main.route('/data/nfl/')
def nflData():
playtype = request.args.get('PlayType')
down = request.args.get('down')
quarter = request.args.get('quarter')
clock = request.args.get('clock')
yards = request.args.get('yards')
field = request.args.get('field')
score = request.args.get('score')
search_dict = request.args.to_dict()
for key in search_dict:
if key != 'playtype':
try:
search_dict[key] = int(search_dict[key])
except:
pass
s=[data["PlayType"] for data in mongo.db.nfldata.find(search_dict)]
print(s)
options = ['pass', 'run', 'punt', 'fg', 'kneel']
count = {option:s.count(option) for option in options}
print(count)
return json.dumps(count, default=json_util.default)
@main.route('/api/predict/')
def apiPredict():
arg_dict = request.args.to_dict()
for key in arg_dict:
try:
arg_dict[key] = int(arg_dict[key])
except:
pass
calculations = [
{name:rmodel.predict_group_proba(
arg_dict['quarter'],
arg_dict['down'],
arg_dict['yards'],
arg_dict['clock'],
arg_dict['field'],
arg_dict['score'],
name)
} for name in ['quarter', 'down', 'yards', 'ti
|
meunder', 'yrdline100', 'scorediff']
]
calculations.append({'request':rmodel.predict_proba(
arg_dict['quarter'],
arg_dict['down'],
arg_dict['yards'],
arg_dict['clock'],
|
arg_dict['field'],
arg_dict['score'],
False)
})
return jsonify(calculations)
=======
scenarios=scenarios
)
except:
return "fail"
>>>>>>> master
|
rabipanda/tensorflow
|
tensorflow/contrib/py2tf/impl/naming_test.py
|
Python
|
apache-2.0
| 2,895 | 0.003454 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for naming module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.py2tf.impl import naming
from tensorflow.python.platform import test
class NamerTest(test.TestCase):
def test_compiled_function_name_tracks_names(self):
def bar():
pass
namer = naming.Namer({}, True, None, ())
self.assertEqual(('tf__foo', True), namer.compiled_function_name('foo'))
self.assertEqual(('tf__bar', True), namer.compiled_function_name(
'bar', bar))
self.assertEqual({bar: 'tf__bar'}, namer.renamed_calls)
self.assertItemsEqual(('tf__bar', 'tf__foo'), namer.generated_names)
def test_compiled_function_name_consistent(self):
def foo():
pass
namer = naming.Namer({}, True, None, ())
self.assertEqual(('tf__foo', True), namer.compiled_function_name(
'foo', foo))
self.assertEqual(('tf__foo', True), namer.compiled_function_name(
'foo', foo))
def test_compiled_
|
function_name_avoids_global_conflicts(self):
def foo():
pass
namer = naming.Namer({'tf__foo': 1}, True, None, ())
self.assertEqual(('tf__foo_1', True),
namer.compiled_function_name('foo', foo))
def test_new_symbol_tracks_names(self):
|
namer = naming.Namer({}, True, None, ())
self.assertEqual('temp', namer.new_symbol('temp', set()))
self.assertItemsEqual(('temp',), namer.generated_names)
def test_new_symbol_avoids_duplicates(self):
namer = naming.Namer({}, True, None, ())
self.assertEqual('temp', namer.new_symbol('temp', set()))
self.assertEqual('temp_1', namer.new_symbol('temp', set()))
self.assertItemsEqual(('temp', 'temp_1'), namer.generated_names)
def test_new_symbol_avoids_conflicts(self):
namer = naming.Namer({'temp': 1}, True, None, ())
# temp is reserved in the global namespace
self.assertEqual('temp_1', namer.new_symbol('temp', set()))
# temp_2 is reserved in the local namespace
self.assertEqual('temp_3', namer.new_symbol('temp', set(('temp_2',))))
self.assertItemsEqual(('temp_1', 'temp_3'), namer.generated_names)
if __name__ == '__main__':
test.main()
|
aforalee/rally
|
tests/unit/task/processing/test_charts.py
|
Python
|
apache-2.0
| 19,909 | 0 |
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from rally.common import costilius
from rally.task.processing import charts
from tests.unit import test
CHARTS = "rally.task.processing.charts."
class ChartTestCase(test.TestCase):
class Chart(charts.Chart):
def _map_iteration_values(self, iteration):
return [("foo_" + k, iteration[k]) for k in ["a", "b"]]
@property
def bench_info(self):
return {"iterations_count": 42, "atomic": {"a": {}, "b": {}, "c": {}}}
def test___init__(self):
self.assertRaises(TypeError, charts.Chart, self.bench_info)
chart = self.Chart(self.bench_info)
self.assertEqual({}, chart._data)
self.assertEqual(42, chart.base_size)
self.assertEqual(1000, chart.zipped_size)
chart = self.Chart(self.bench_info, zipped_size=24)
self.assertEqual({}, chart._data)
self.assertEqual(42, chart.base_size)
self.assertEqual(24, chart.zipped_size)
@mock.patch(CHARTS + "utils.GraphZipper")
def test_add_iteration_and_render(self, mock_graph_zipper):
gzipper_a = mock.Mock(get_zipped_graph=lambda: "a_points")
gzipper_b = mock.Mock(get_zipped_graph=lambda: "b_points")
mock_graph_zipper.side_effect = [gzipper_a, gzipper_b]
chart = self.Chart(self.bench_info, 24)
self.assertEqual([], chart.render())
[chart.add_iteration(itr) for itr in [{"a": 1, "b": 2},
{"a": 3, "b": 4}]]
self.assertEqual([mock.call(42, 24), mock.call(42, 24)],
mock_graph_zipper.mock_calls)
self.assertEqual(2, len(chart._data))
self.assertEqual([mock.call(1), mock.call(3)],
chart._data["foo_a"].add_point.mock_calls)
self.assertEqual([mock.call(2), mock.call(4)],
chart._data["foo_b"].add_point.mock_calls)
self.assertEqual([("foo_a", "a_points"), ("foo_b", "b_points")],
chart.render())
def test__fix_atomic_actions(self):
chart = self.Chart(self.bench_info)
self.assertEqual(
{"atomic_actions": {"a": 5, "b": 6, "c": 0}},
chart._fix_atomic_actions({"atomic_actions": {"a": 5, "b": 6}}))
class MainStackedAreaChartTestCase(test.TestCase):
def test_add_iteration_and_render(self):
chart = charts.MainStackedAreaChart({"iterations_count": 3,
"iterations_failed": 0}, 10)
self.assertIsInstance(chart, charts.Chart)
[chart.add_iteration(itr) for itr in (
{"duration": 1.1, "idle_duration": 2.2, "error": []},
{"error": [], "duration": 1.1, "idle_duration": 0.5},
{"duration": 1.3, "idle_duration": 3.4, "error": []})]
expected = [("duration", [[1, 1.1], [2, 1.1], [3, 1.3]]),
("idle_duration", [[1, 2.2], [2, 0.5], [3, 3.4]])]
self.assertEqual(expected, chart.render())
def test_add_iteration_and_render_with_failed_iterations(self):
chart = charts.MainStackedAreaChart({"iterations_count": 3,
"iterations_failed": 2}, 10)
self.assertIsInstance(chart, charts.Chart)
[chart.add_iteration(itr) for itr in (
{"duration": 1.1, "idle_duration": 2.2, "error": []},
{"error": ["foo_err"], "duration": 1.1, "idle_duration": 0.5},
{"duration": 1.3, "idle_duration": 3.4, "error": ["foo_err"]})]
expected = [("duration", [[1, 1.1], [2, 0], [3, 0]]),
("idle_duration", [[1, 2.2], [2, 0], [3, 0]]),
("failed_duration", [[1, 0], [2, 1.6], [3, 4.7]])]
self.assertEqual(expected, chart.render())
class AtomicStackedAreaChartTestCase(test.TestCase):
def test_add_iteration_and_render(self):
iterations = (
{"atomic_actions": {"foo": 1.1}, "error": []},
{"atomic_actions": {"foo": 1.1, "bar": 1.2},
"error": [], "duration": 40, "idle_duration": 2},
{"atomic_actions": {"bar": 1.2},
"error": [], "duration": 5.5, "idle_duration": 2.5})
expected = [("bar", [[1, 0], [2, 1.2], [3, 1.2]]),
("foo", [[1, 1.1], [2, 1.1], [3, 0]])]
chart = charts.AtomicStackedAreaChart(
{"iterations_count": 3, "iterations_failed": 0,
"atomic": {"foo": {}, "bar": {}}}, 10)
self.assertIsInstance(chart, charts.Chart)
[chart.add_iteration(iteration) for iteration in iterations]
self.assertEqual(expected, sorted(chart.render()))
def test_add_iteration_and_render_with_failed_iterations(self):
iterations = (
{"atomic_actions": {"foo": 1.1}, "error": []},
{"atomic_actions": {"foo": 1.1, "bar": 1.2},
"error": ["foo_err"], "duration": 40, "idle_duration": 2},
{"atomic_actions": {"bar": 1.2},
"error": ["foo_err"], "duration": 5.5, "idle_duration": 2.5})
expected = [("bar", [[1, 0], [2, 1.2], [3, 1.2]]),
("failed_duration", [[1, 0], [2, 39.7], [3, 6.8]]),
("foo", [[1, 1.1], [2, 1.1], [3, 0]])]
chart = charts.AtomicStackedAreaChart(
{"iterations_count": 3, "iterations_failed": 2,
"atomic": {"foo": {}, "bar": {}}}, 10)
self.assertIsInstance(chart, charts.Chart)
[chart.add_iteration(iteration) for iteration in iterations]
self.assertEqual(expected, sorted(chart.render()))
class OutputStackedAreaChartTestCase(test.TestCase):
def test_add_iteration_and_render(self):
chart = charts.OutputStackedAreaChart(
{"iterations_count": 3, "output_names": ["foo", "bar"]}, 10)
self.assertIsInstance(chart, charts.Chart)
[chart.add_iteration({"scenario_output": {"data": x}})
for x in ({"foo": 1.1, "bar": 1.2}, {"foo": 1.3}, {"bar": 1.4})]
expected = [("bar", [[1, 1.2], [2, 0], [3, 1.4]]),
("foo", [[1, 1.1], [2, 1.3], [3, 0]])]
self.assertEqual(expected, sorted(chart.render()))
class AvgChartTestCase(test.TestCase):
class AvgChart(charts.AvgChart):
def _map_iteration_values(self, iteration):
return iteration["foo"].items()
def test_add_iteration_and_render(self):
self.assertRaises(TypeError, charts.AvgChart, {"iterations_count": 3})
chart = self.AvgChart({"iterations_count": 3})
self.assertIsInstance(chart, charts.AvgChart)
[chart.add_iteration({"foo": x}) for x in ({"a": 1.3, "b": 4.3},
{"a": 2.4, "b": 5.4},
{"a": 3.5, "b": 7.7})]
self.assertEqual([("a", 2.4), ("b", 5.8)], sorted(chart.render()))
class AtomicAvgChartTestCase(test.TestCase):
def test_add_iteration_and_render(self):
chart = charts.AtomicAvgChart({"iterations_count": 3,
|
"atomic": {"foo": {}, "bar": {}}})
self.assertIsInstance(chart, charts.AvgChart)
[chart.add_iteration({"atomic_actions": costilius.OrderedDict(a)})
for a in ([("foo", 2), ("bar", 5)], [("foo", 4)], [("bar", 7)])]
self.assertEqual([("bar", 4.0), ("foo", 2.0)], sorted(chart.render()))
@ddt.ddt
class LoadProfileChartTestCase(test.TestCase):
@ddt.data({"count": 5, "load_duration": 63,
|
"tstamp_start": 12345,
"kwargs": {"scale": 10}, "data": [
|
MounirMesselmeni/django
|
tests/utils_tests/test_lazyobject.py
|
Python
|
bsd-3-clause
| 11,862 | 0.000506 |
from __future__ import unicode_literals
import copy
import pickle
import sys
import warnings
from unittest import TestCase
from django.utils import six
from django.utils.functional import LazyObject, SimpleLazyObject, empty
from .models import Category, CategoryInfo
class Foo(object):
"""
A simple class with just one attribute.
"""
foo = 'bar'
def __eq__(self, other):
return self.foo == other.foo
class LazyObjectTestCase(TestCase):
def lazy_wrap(self, wrapped_object):
"""
Wrap the given object into a LazyObject
"""
class AdHocLazyObject(LazyObject):
def _setup(self):
self._wrapped = wrapped_object
return AdHocLazyObject()
def test_getattr(self):
obj = self.lazy_wrap(Foo())
self.assertEqual(obj.foo, 'bar')
def test_setattr(self):
obj = self.lazy_wrap(Foo())
obj.foo = 'BAR'
obj.bar = 'baz'
self.assertEqual(obj.foo, 'BAR')
self.assertEqual(obj.bar, 'baz')
def test_setattr2(self):
# Same as test_setattr but in reversed order
obj = self.lazy_wrap(Foo())
obj.bar = 'baz'
obj.foo = 'BAR'
self.assertEqual(obj.foo, 'BAR')
self.assertEqual(obj.bar, 'baz')
def test_delattr(self):
obj = self.lazy_wrap(Foo())
obj.bar = 'baz'
self.assertEqual(obj.bar, 'baz')
del obj.bar
with self.assertRaises(AttributeError):
obj.bar
def test_cmp(self):
obj1 = self.lazy_wrap('foo')
obj2 = self.lazy_wrap('bar')
obj3 = self.lazy_wrap('foo')
self.assertEqual(obj1, 'foo')
self.assertEqual(obj1, obj3)
self.assertNotEqual(obj1, obj2)
self.assertNotEqual(obj1, 'bar')
def test_bytes(self):
obj = self.lazy_wrap(b'foo')
self.assertEqual(bytes(obj), b'foo')
def test_text(self):
obj = self.lazy_wrap('foo')
self.assertEqual(six.text_type(obj), 'foo')
def test_bool(self):
# Refs #21840
for f in [False, 0, (), {}, [], None, set()]:
self.assertFalse(self.lazy_wrap(f))
for t in [True, 1, (1,), {1: 2}, [1], object(), {1}]:
self.assertTrue(t)
def test_dir(self):
obj = self.lazy_wrap('foo')
self.assertEqual(dir(obj), dir('foo'))
def test_len(self):
for seq in ['asd', [1, 2, 3], {'a': 1, 'b': 2, 'c': 3}]:
obj = self.lazy_wrap(seq)
self.assertEqual(len(obj), 3)
def test_class(self):
self.assertIsInstance(self.lazy_wrap(42), int)
class Bar(Foo):
pass
self.assertIsInstance(self.lazy_wrap(Bar()), Foo)
def test_hash(self):
obj = self.lazy_wrap('foo')
d = {}
d[obj] = 'bar'
self.assertIn('foo', d)
self.assertEqual(d['foo'], 'bar')
def test_contains(self):
test_data = [
('c', 'abcde'),
(2, [1, 2, 3]),
('a', {'a': 1, 'b': 2, 'c': 3}),
(2, {1, 2, 3}),
]
for needle, haystack in test_data:
self.assertIn(needle, self.lazy_wrap(haystack))
# __contains__ doesn't work when the haystack is a string and the needle a LazyObject
for needle_haystack in test_data[1:]:
self.assertIn(self.lazy_wrap(needle), haystack)
self.assertIn(self.lazy_wrap(needle), self.lazy_wrap(haystack))
def test_getitem(self):
obj_list = self.lazy_wrap([1, 2, 3])
obj_dict = self.lazy_wrap({'a': 1, 'b': 2, 'c': 3})
self.assertEqual(obj_list[0], 1)
self.assertEqual(obj_list[-1], 3)
self.assertEqual(obj_list[1:2], [2])
self.assertEqual(obj_dict['b'], 2)
with self.assertRaises(IndexError):
obj_list[3]
with self.assertRaises(KeyError):
obj_dict['f']
def test_setitem(self):
obj_list = self.lazy_wrap([1, 2, 3])
obj_dict = self.lazy_wrap({'a': 1, 'b': 2, 'c': 3})
obj_list[0] = 100
self.assertEqual(obj_list, [100, 2, 3])
obj_list[1:2] = [200, 300, 400]
self.assertEqual(obj_list, [100, 200, 300, 400, 3])
obj_dict['a'] = 100
obj_dict['d'] = 400
self.assertEqual(obj_dict, {'a': 100, 'b': 2, 'c': 3, 'd': 400})
def test_delitem(self):
obj_list = self.lazy_wrap([1, 2, 3])
obj_dict = self.lazy_wrap({'a': 1, 'b': 2, 'c': 3})
del obj_list[-1]
del obj_dict['c']
self.assertEqual(obj_list, [1, 2])
self.assertEqual(obj_dict, {'a': 1, 'b': 2})
with self.assertRaises(IndexError):
del obj_list[3]
with self.assertRaises(KeyError):
del obj_dict['f']
def test_iter(self):
# Tests whether an object's custom `__iter__` method is being
# used when iterating over it.
class IterObject(object):
def __init__(self, values):
self.values = values
def __iter__(self):
return iter(self.values)
original_list = ['test', '123']
self.assertEqual(
list(self.lazy_wrap(IterObject(original_list))),
original_list
)
def test_pickle(self):
# See ticket #16563
obj = self.lazy_wrap(Foo())
pickled = pickle.dumps(obj)
unpickled = pickle.loads(pickled)
self.assertIsInstance(unpickled, Foo)
self.assertEqual(unpickled, obj)
self.assertEqual(unpickled.foo, obj.foo)
def test_deepcopy(self):
# Check that we *can* do deep copy, and that it returns the right
# objects.
l = [1, 2, 3]
obj = self.lazy_wrap(l)
len(l) # forces evaluation
obj2 = copy.deepcopy(obj)
self.assertIsInstance(obj2, list)
self.assertEqual(obj2, [1, 2, 3])
def test_deepcopy_no_evaluation(self):
# copying doesn't force evaluation
l = [1, 2, 3]
obj = self.lazy_wrap(l)
obj2 = copy.deepcopy(obj)
# Copying shouldn't force evaluation
self.assertIs(obj._wrapped, empty)
self.assertIs(obj2._wrapped, empty)
class SimpleLazyObjectTestCase(LazyObjectTestCase):
# By inheriting from LazyObjectTestCase and redefining the lazy_wrap()
# method which all testcases use, we get to make sure all behaviors
# tested in the parent testcase also apply to SimpleLazyObject.
def lazy_wrap(self, wrapped_object):
return SimpleLazyObject(lambda: wrapped_object)
def test_repr(self):
# First, for an unevaluated SimpleLazyObject
obj = self.lazy_wrap(42)
# __repr__ contains __repr__ of setup function and does not evaluate
# the SimpleLazyObject
six.assertRegex(self, repr(obj), '^<SimpleLazyObject:')
self.assertIs(obj._wrapped, empty) # make sure evaluation hasn't been triggered
self.assertEqual(obj, 42) # evaluate the lazy object
self.assertIsInstance(obj._wrapped, int)
self.assertEqual(repr(obj), '<SimpleLazyObject: 42>')
def test_trace(self):
# See ticket #19456
old_trace_func = sys.gettrace()
try:
def trace_func(frame, event, arg):
frame.f_locals['self'].__class__
if old_trace_func is not None:
old_trace_func(frame, event, arg)
sys.settrace(trace_func)
self.lazy_wrap(None)
finally:
sys.settrace(old_trace_func)
def test_none(self):
i = [0]
def f():
i[0] += 1
return None
x = SimpleLazyObject(f)
self.assertEqual(str(x), "None")
self.assertEqual(i, [1])
self.assertEqual(str(x), "None")
self.assertEqual(i, [1])
def test_dict(self):
# See ticket #18447
lazydict = SimpleLazyObject(lambda: {'one': 1})
self.assertEqual(lazydict['one'], 1)
lazydict['
|
one'] = -1
self.assertEqual(lazydict['one'],
|
-1)
self.assertIn('one', lazydict)
self.assertNotIn('two', lazydict)
self.asse
|
marcoconstancio/yanta
|
actions/horizontal_rule/horizontal_rule.py
|
Python
|
gpl-2.0
| 200 | 0.005 |
# -*- coding: utf-8 -*-
class ho
|
rizontal_rule:
def __init__(self):
pa
|
ss
@staticmethod
def process(data, args):
data['note_viewer'].call_function('insert_horizontal_rule')
|
mufaddalq/cloudstack-datera-driver
|
test/integration/smoke/test_loadbalance.py
|
Python
|
apache-2.0
| 25,268 | 0.001979 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.sshClient import SshClient
from marvin.integration.lib.utils import *
from marvin.integration.lib.base import *
from marvin.integration.lib.common import *
from nose.plugins.attrib import attr
#Import System modules
import time
_multiprocess_shared_ = True
class Services:
"""Test Network Services
"""
def __init__(self):
self.services = {
"ostype": "CentOS 5.3 (64-bit)",
# Cent OS 5.3 (64 bit)
"lb_switch_wait": 10,
# Time interval after which LB switches the requests
"sleep": 60,
"timeout":10,
"network_offering": {
"name": 'Test Network offering',
"displaytext": 'Test Network offering',
"guestiptype": 'Isolated',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding',
"traffictype": 'GUEST',
"availability": 'Optional',
"serviceProviderList" : {
"Dhcp": 'VirtualRouter',
"Dns": 'VirtualRouter',
"SourceNat": 'VirtualRouter',
"PortForwarding": 'VirtualRouter',
},
},
"network": {
"name": "Test Network",
"displaytext": "Test Network",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100,
# in MHz
"memory": 256,
# In MBs
},
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
"password": "password",
},
"server":
{
"displayname": "Small Instance",
"username": "root",
"password": "password",
"hypervisor": 'XenServer',
"privateport": 22,
"publicport": 22,
"ssh_port": 22,
"protocol": 'TCP',
},
"natrule":
{
"privateport": 22,
"publicport": 2222,
"protocol": "TCP"
},
"lbrule":
{
"name": "SSH",
"alg": "roundrobin",
# Algorithm used for load balancing
"privateport": 22,
"publicport": 2222,
"protocol": 'TCP'
}
}
class TestLoadBalance(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(TestLoadBalance, cls).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["server"]["zoneid"] = cls.zone.id
#Create an account, network, VM and IP addresses
cls.account = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.vm_1 = VirtualMachine.create(
cls.api_client,
cls.services["server"],
templateid=template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id
)
cls.vm_2 = VirtualMachine.create(
cls.api_cl
|
ient,
cls.services["server"]
|
,
templateid=template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id
)
cls.vm_3 = VirtualMachine.create(
cls.api_client,
cls.services["server"],
templateid=template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id
)
cls.non_src_nat_ip = PublicIPAddress.create(
cls.api_client,
cls.account.name,
cls.zone.id,
cls.account.domainid,
cls.services["server"]
)
# Open up firewall port for SSH
cls.fw_rule = FireWallRule.create(
cls.api_client,
ipaddressid=cls.non_src_nat_ip.ipaddress.id,
protocol=cls.services["lbrule"]["protocol"],
cidrlist=['0.0.0.0/0'],
startport=cls.services["lbrule"]["publicp
|
mandli/surge-examples
|
harvey/setplot.py
|
Python
|
mit
| 12,304 | 0.002682 |
from __future__ import absolute_import
from __future__ import print_function
import os
import numpy
import matplotlib.pyplot as plt
import datetime
import clawpack.visclaw.colormaps as colormap
import clawpack.visclaw.gaugetools as gaugetools
import clawpack.clawutil.data as clawutil
import clawpack.amrclaw.data as amrclaw
import clawpack.geoclaw.data as geodata
from clawpack.geoclaw.util import fetch_noaa_tide_data
import clawpack.geoclaw.surge.plot as surgeplot
try:
from setplotfg import setplotfg
except:
setplotfg = None
def setplot(plotdata=None):
""""""
if plotdata is None:
from clawpack.visclaw.data import ClawPlotData
plotdata = ClawPlotData()
# clear any old figures,axes,items data
plotdata.clearfigures()
plotdata.format = 'ascii'
# Load data from output
clawdata = clawutil.ClawInputData(2)
clawdata.read(os.path.join(plotdata.outdir, 'claw.data'))
physics = geodata.GeoClawData()
physics.read(os.path.join(plotdata.outdir, 'geoclaw.data'))
surge_data = geodata.SurgeData()
surge_data.read(os.path.join(plotdata.outdir, 'surge.data'))
friction_data = geodata.FrictionData()
friction_data.read(os.path.join(plotdata.outdir, 'friction.data'))
# Load storm track
track = surgeplot.track_data(os.path.join(plotdata.outdir, 'fort.track'))
# Set afteraxes function
def surge_afteraxes(cd):
surgeplot.surge_afteraxes(cd, track, plot_direction=False,
kwargs={"markersize": 4})
# Color limits
surface_limits = [-5.0, 5.0]
speed_limits = [0.0, 3.0]
wind_limits = [0, 64]
pressure_limits = [935, 1013]
friction_bounds = [0.01, 0.04]
def friction_after_axes(cd):
plt.title(r"Manning's $n$ Coefficient")
# ==========================================================================
# Plot specifications
# ==========================================================================
regions = {"Gulf": {"xlimits": (clawdata.lower[0], clawdata.upper[0]),
"ylimits": (clawdata.lower[1], clawdata.upper[1]),
"figsize": (6.4, 4.8)},
"Texas Gulf Coast": {"xlimits": (-99.2, -94.2),
"ylimits": (26.4, 30.4),
"figsize": (6, 6)}}
for (name, region_dict) in regions.items():
# Surface Figure
plotfigure = plotdata.new_plotfigure(name="Surface - %s" % name)
plotfigure.kwargs = {"figsize": region_dict['figsize']}
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = "Surface"
plotaxes.xlimits = region_dict["xlimits"]
plotaxes.ylimits = region_dict["ylimits"]
plotaxes.afteraxes = surge_afteraxes
surgeplot.add_surface_elevation(plotaxes, bounds=surface_limits)
surgeplot.add_land(plotaxes)
plotaxes.plotitem_dict['surface'].amr_patchedges_show = [0] * 10
plotaxes.plotitem_dict['land'].amr_patchedges_show = [0] * 10
# Speed Figure
plotfigure = plotdata.new_plotfigure(name="Currents - %s" % name)
plotfigure.kwargs = {"figsize": region_dict['figsize']}
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = "Currents"
plotaxes.xlimits = region_dict["xlimits"]
plotaxes.ylimits = region_dict["ylimits"]
plotaxes.afteraxes = surge_afteraxes
surgeplot.add_speed(plotaxes, bounds=speed_limits)
surgeplot.add_land(plotaxes)
plotaxes.plotitem_dict['speed'].amr_patchedges_show = [0] * 10
plotaxes.plotitem_dict['land'].amr_patchedges_show = [0] * 10
#
# Friction field
#
plotfigure = plotdata.new_plotfigure(name='Friction')
plotfigure.show = friction_data.variable_friction and True
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = regions['Gulf']['xlimits']
plotaxes.ylimits = regions['Gulf']['ylimits']
# plotaxes.title = "Manning's N Coefficient"
plotaxes.afteraxes = friction_after_axes
plotaxes.scaled = True
surgeplot.add_friction(plotaxes, bounds=friction_bounds, shrink=0.9)
plotaxes.plotitem_dict['friction'].amr_patchedges_show = [0] * 10
plotaxes.plotitem_dict['friction'].colorbar_label = "$n$"
#
# Hurricane Forcing fields
#
# Pressure field
plotfigure = plotdata.new_plotfigure(name='Pressure')
plotfigure.show = surge_data.pressure_forcing and True
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = regions['Gulf']['xlimits']
plotaxes.ylimits = regions['Gulf']['ylimits']
plotaxes.title = "Pressure Field"
plotaxes.afteraxes = surge_afteraxes
plotaxes.scaled = True
surgeplot.add_pressure(plotaxes, bounds=pressure_limits)
surgeplot.add_land(plotaxes)
# Wind field
plotfigure = plotdata.new_plotfigure(name='Wind Speed')
plotfigure.show = surge_data.wind_forcing and True
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = regions['Gulf']['xlimits']
plotaxes.ylimits = regions['Gulf']['ylimits']
plotaxes.title = "Wind Field"
plotaxes.afteraxes = surge_afteraxes
plotaxes.scaled = True
surgeplot.add_wind(plotaxes, bounds=wind_limits)
surgeplot.add_land(plotaxes)
# ========================================================================
# Figures for gauges
# ========================================================================
plotfigure = plotdata.new_plotfigure(name='Gauge Surfaces', figno=300,
type='each_gauge')
plotfigure.show = True
plotfigure.clf_each_gauge = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
#Time Conversions
def days2seconds(days):
return days * 60.0**2 * 24.0
stations = [('8773037', 'Seadrift'),
('8773701', 'Port OConnor'),
('8774230',
|
'Aransas Wildlife Refuge'),
('8775237', 'Port Aransas'),
('8775296', 'USS Lexington')]
landfall_time = numpy.datetime64('2017-08-25T10:00')
begin_date = datetime.datetime(2017, 8, 24)
end_date = datetime.datetime(2017, 8, 28)
def get_actual_water_levels(station_id):
# Fetch water levels and tide predictions for given station
date_time, water_level, tide = fetch
|
_noaa_tide_data(station_id,
begin_date, end_date)
# Calculate times relative to landfall
seconds_rel_landfall = (date_time - landfall_time) / numpy.timedelta64(1, 's')
# Subtract tide predictions from measured water levels
water_level -= tide
return seconds_rel_landfall, water_level
def gauge_afteraxes(cd):
station_id, station_name = stations[cd.gaugeno - 1]
seconds_rel_landfall, actual_level = get_actual_water_levels(station_id)
axes = plt.gca()
surgeplot.plot_landfall_gauge(cd.gaugesoln, axes)
axes.plot(seconds_rel_landfall, actual_level, 'g')
# Fix up plot - in particular fix time labels
axes.set_title(station_name)
axes.set_xlabel('Seconds relative to landfall')
axes.set_ylabel('Surface (m)')
axes.set_xlim([days2seconds(-1), days2seconds(3)])
axes.set_ylim([-1, 5])
axes.set_xticks([-days2seconds(-1), 0, days2seconds(1), days2seconds(2), days2seconds(3)])
#axes.set_xticklabels([r"$-1$", r"$0$", r"$1$", r"$2$", r"$3$"])
#axes.grid(True)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.afteraxes = gauge_afteraxes
# Plot surface as blue curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = 3
plotitem.plotstyle = 'b-'
#
# Gauge Location Plot
#
def gauge_location_afteraxes(cd):
plt.subplots_adjust(left=0.12, bottom=0.06, right=0.97, top=0.97)
surge_afteraxes(cd)
gaugetools.plot_gauge_locations(cd.plotdata, gaugenos='all',
format_string='ko', add_labels=False)
|
aedoler/is210-week-03-synthesizing
|
task_01.py
|
Python
|
mpl-2.0
| 183 | 0 |
#!/usr/bin/env pyth
|
on
# -*- coding: utf-8 -*-
"""Contains expectations."""
import inquisition
FISHY = inquisition.SPANISH
FISH
|
Y = FISHY.replace('surprise', 'haddock')
print FISHY
|
xaedes/PyCompuPipe
|
pycompupipe/components/processing/process_output.py
|
Python
|
mit
| 525 | 0.009579 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import division # Standardmäßig float division - Ganzzahldivision kann man explizit mit '//' durchführen
from __future__ import absolute_import
import pygame
from pyecs import *
# from pyecs.components import *
# from components import *
class ProcessOutput(Component):
"""doc
|
string for ProcessOutput"""
def __init__(self, process, *args,**kwargs):
super(ProcessOutput, self).__init__(*args,**kwargs)
|
self.process = process
|
spectralflux/colossus
|
tests/test_route.py
|
Python
|
mit
| 458 | 0.004367 |
from colossus.game import Route
def test_route_step():
route = R
|
oute(100.0, 1)
route.add_packet()
dmg = route.update(10.0)
assert dmg <= 0.0
dmg = 0.0
dmg += route.update(100000.0)
dmg += route.update(100000.0)
assert dmg > 0.0
def test_route_removal_of_packet_after_action():
route = Route(1.0, 1)
route.add_packet()
assert route.packet_co
|
unt() == 1
route.update(100.0)
assert route.packet_count() == 0
|
mscuthbert/abjad
|
abjad/tools/quantizationtools/test/test_quantizationtools_QGrid___call__.py
|
Python
|
gpl-3.0
| 3,905 | 0.000256 |
# -*- encoding: utf-8 -*-
from abjad import *
from abjad.tools.quantizationtools import *
def test_quantizationtools_QGrid___call___01():
q_grid = QGrid()
a = QEventProxy(SilentQEvent(0, ['A']), 0)
b = QEventProxy(SilentQEvent((1, 20), ['B']), (1, 20))
c = QEventProxy(SilentQEvent((9, 20), ['C']), (9, 20))
d = QEventProxy(SilentQEvent((1, 2), ['D']), (1, 2))
e = QEventProxy(SilentQEvent((11, 20), ['E']), (11, 20))
f = QEventProxy(SilentQEvent((19, 20), ['F']), (19, 20))
g = QEventProxy(SilentQEvent(1, ['G']), 1)
q_grid.fit_q_events([a, b, c, d, e, f, g])
result = q_grid((1, 4))
assert len(result) == 1
assert format(result[0]) == "c'4"
annotation = inspect_(result[0]).get_indicator(indicatortools.Annotation)
assert isinstance(annotation.value, tuple) and len(annotation.value) == 4
assert annotation.value[0].attachments == ('A',)
assert annotation.value[1].attachments == ('B',)
assert annotation.value[2].attachments == ('C',)
assert annotation.value[3].attachments == ('D',)
def test_quantizationtools_QGrid___call___02():
q_grid = QGrid()
q_grid.subdivide_leaves([(0, (1, 1, 1))])
q_grid.subdivide_leaves([(1, (1, 1))])
q_grid.subdivide_leaves([(-2, (1, 1, 1))])
a = QEventProxy(SilentQEvent(0, ['A']), 0)
b = QEventProxy(SilentQEvent((1, 20), ['B']), (1, 20))
c = QEventProxy(SilentQEvent((9, 20), ['C']), (9, 20))
d = QEventProxy(SilentQEvent((1, 2), ['D']), (1, 2))
e = QEventProxy(SilentQEvent((11, 20), ['E']), (11, 20))
f = QEventProxy(SilentQEvent((19, 20), ['F']), (19, 20))
g = QEventProxy(SilentQEvent(1,
|
['G']), 1)
q_grid.fit_q_events([a, b, c, d, e, f, g])
result = q_grid((1, 4))
assert isinstance(result, list) and len(result) == 1
assert systemtools.TestManager.compare(
result[0],
r'''
\times 2/3 {
c'8
c'16
c'16
\times 2/3 {
c
|
'16
c'16
c'16
}
}
'''
)
leaf = result[0].select_leaves()[0]
annotation = inspect_(leaf).get_indicators(indicatortools.Annotation)[0]
assert isinstance(annotation.value, tuple) and len(annotation.value) == 2
assert annotation.value[0].attachments == ('A',)
assert annotation.value[1].attachments == ('B',)
leaf = result[0].select_leaves()[1]
assert not inspect_(leaf).get_indicators(indicatortools.Annotation)
leaf = result[0].select_leaves()[2]
annotation = inspect_(leaf).get_indicator(indicatortools.Annotation)
assert isinstance(annotation.value, tuple) and len(annotation.value) == 3
assert annotation.value[0].attachments == ('C',)
assert annotation.value[1].attachments == ('D',)
assert annotation.value[2].attachments == ('E',)
for leaf in result[0].select_leaves()[3:6]:
assert not inspect_(leaf).get_indicators(indicatortools.Annotation)
def test_quantizationtools_QGrid___call___03():
r'''Non-binary works too.
'''
q_grid = QGrid()
q_grid.subdivide_leaves([(0, (1, 1))])
a = QEventProxy(SilentQEvent(0, ['A']), 0)
b = QEventProxy(SilentQEvent((1, 20), ['B']), (1, 20))
c = QEventProxy(SilentQEvent((9, 20), ['C']), (9, 20))
d = QEventProxy(SilentQEvent((1, 2), ['D']), (1, 2))
e = QEventProxy(SilentQEvent((11, 20), ['E']), (11, 20))
f = QEventProxy(SilentQEvent((19, 20), ['F']), (19, 20))
g = QEventProxy(SilentQEvent(1, ['G']), 1)
q_grid.fit_q_events([a, b, c, d, e, f, g])
result = q_grid((1, 3))
assert isinstance(result, list) and len(result) == 1
assert systemtools.TestManager.compare(
result[0],
r'''
\tweak #'edge-height #'(0.7 . 0)
\times 2/3 {
c'4
c'4
}
'''
)
|
mikolajsacha/tweetsclassification
|
src/features/word_embeddings/iword_embedding.py
|
Python
|
mit
| 1,681 | 0.003569 |
"""
Contains basic interface (abstract base class) for word embeddings.
"""
import os
from abc import ABCMeta, abstractmethod
class IWordEmbedding(object):
"""
Abstract base class for word embeddings
"""
__metaclass__ = ABCMeta
def __init__(self, path, vector_length):
self.model = None
self.path = path
self.vector_length = vector_length
self.already_built = False
@abstractmethod
def _build(self):
raise NotImplementedError
@abstractmethod
def __getitem__(self, word):
raise NotImplementedError
def build(self):
""" Loads word embedding from its file """
if
|
not self.already_built:
print("Loading pre-trained word embedding from
|
{0}...".format(self.path))
self._build()
self.already_built = True
print("Pre-trained word embedding from {0} loaded!".format(self.path))
def get_embedding_model_path(self):
""" :return: absolute path to folder containing saved word embedding model """
return os.path.join(os.path.dirname(__file__), '../../../models/word_embeddings', self.path)
@staticmethod
def data_file_to_sentences(data_file_path):
"""
Converts a processed data file to generator of lists of words
:param data_file_path: path to data file
:return: iterator yielding sentences as lists of words
"""
with open(data_file_path, 'r') as f:
for line in f:
sentence = line.split(' ')[1]
yield map(lambda word: word.rstrip(), sentence.split(','))
def __str__(self):
return type(self).__name__
|
deeplearning4j/deeplearning4j
|
libnd4j/include/graph/generated/nd4j/graph/FlatProperties.py
|
Python
|
apache-2.0
| 7,580 | 0.005937 |
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: graph
import flatbuffers
class FlatProperties(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsFlatProperties(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = FlatProperties()
x.Init(buf, n + offset)
return x
# FlatProperties
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# FlatProperties
def Name(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# FlatProperties
def I(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# FlatProperties
def IAsNumpy(self):
o = flatbuffer
|
s.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
return
|
0
# FlatProperties
def ILength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
# FlatProperties
def L(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
# FlatProperties
def LAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o)
return 0
# FlatProperties
def LLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.VectorLen(o)
return 0
# FlatProperties
def D(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Float64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
# FlatProperties
def DAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float64Flags, o)
return 0
# FlatProperties
def DLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.VectorLen(o)
return 0
# FlatProperties
def A(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from .FlatArray import FlatArray
obj = FlatArray()
obj.Init(self._tab.Bytes, x)
return obj
return None
# FlatProperties
def ALength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.VectorLen(o)
return 0
# FlatProperties
def B(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.BoolFlags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# FlatProperties
def BAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.BoolFlags, o)
return 0
# FlatProperties
def BLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.VectorLen(o)
return 0
# FlatProperties
def S(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
a = self._tab.Vector(o)
return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return ""
# FlatProperties
def SLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.VectorLen(o)
return 0
# FlatProperties
def Shape(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return 0
# FlatProperties
def ShapeAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
return 0
# FlatProperties
def ShapeLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.VectorLen(o)
return 0
def FlatPropertiesStart(builder): builder.StartObject(8)
def FlatPropertiesAddName(builder, name): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
def FlatPropertiesAddI(builder, i): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(i), 0)
def FlatPropertiesStartIVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def FlatPropertiesAddL(builder, l): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(l), 0)
def FlatPropertiesStartLVector(builder, numElems): return builder.StartVector(8, numElems, 8)
def FlatPropertiesAddD(builder, d): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(d), 0)
def FlatPropertiesStartDVector(builder, numElems): return builder.StartVector(8, numElems, 8)
def FlatPropertiesAddA(builder, a): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(a), 0)
def FlatPropertiesStartAVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def FlatPropertiesAddB(builder, b): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(b), 0)
def FlatPropertiesStartBVector(builder, numElems): return builder.StartVector(1, numElems, 1)
def FlatPropertiesAddS(builder, s): builder.PrependUOffsetTRelativeSlot(6, flatbuffers.number_types.UOffsetTFlags.py_type(s), 0)
def FlatPropertiesStartSVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def FlatPropertiesAddShape(builder, shape): builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(shape), 0)
def FlatPropertiesStartShapeVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def FlatPropertiesEnd(builder): return builder.EndObject()
|
mayhem/led-chandelier
|
software/patterns/sweep_two_color_shift.py
|
Python
|
mit
| 1,663 | 0.004811 |
#!/usr/bin/env python3
import os
import sys
import math
from colorsys import hsv_to_rgb
from random import random
from hippietrap.hippietrap import HippieTrap, ALL, NUM_NODES
from hippietrap.color import Color, ColorGenerator, random_color, hue_to_color
from hippietrap.geometry import HippieTrapGeometry
fro
|
m hippietrap.pattern import PatternBase, run_pattern
from time import sleep, time
class SweepTwoColorShiftPattern(PatternBase):
geo = HippieTrapGeometry()
cg = ColorGenerator()
name = "sweep two colors"
color_shift_between_rings = .045
def pattern(self):
self.trap.send_decay(ALL, 90)
self.trap.start_pattern(ALL)
index = 0
hue_offset
|
= 0.0
stop = False
color_rings = [ random_color(), random_color(), random_color() , random_color() ]
while not stop:
for bottle, angle in self.geo.enumerate_all_bottles(index % 2 == 0):
self.trap.set_color(bottle, color_rings[self.geo.get_ring_from_bottle(bottle)])
sleep(.01)
if self.stop_thread:
stop = True
break
index += 1
hue_offset = math.fmod(hue_offset + .02, 1.0)
shift = math.sin(index / self.color_shift_between_rings) / 2.0 + .50
new_offset = math.fmod(shift, 1.0)
color_rings.pop()
color_rings.insert(0, hue_to_color(new_offset))
self.trap.stop_pattern(ALL)
if __name__ == "__main__":
with HippieTrap() as trap:
trap.begin()
trap.set_brightness(ALL, 100)
p = SweepTwoColorShiftPattern(trap)
p.pattern()
|
ddurieux/alignak
|
alignak/db_oracle.py
|
Python
|
agpl-3.0
| 4,741 | 0.001477 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# xkilian, fmikus@acktomic.com
# Hartmut Goebel, h.goebel@goebel-consult.de
# Nicolas Dupeux, nicolas@dupeux.net
# Grégory Starck, g.starck@gmail.com
# Sebastien Coavoux, s.coavoux@free.fr
# Thibault Cohe
|
n, titilambert@gmail.com
# Jean Gabes, naparuba@gmail.com
# Zoran Zaric, zz@zoranzaric.de
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License,
|
or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# Failed to import will be catch by __init__.py
from cx_Oracle import connect as connect_function
from cx_Oracle import IntegrityError as IntegrityError_exp
from cx_Oracle import ProgrammingError as ProgrammingError_exp
from cx_Oracle import DatabaseError as DatabaseError_exp
from cx_Oracle import InternalError as InternalError_exp
from cx_Oracle import DataError as DataError_exp
from cx_Oracle import OperationalError as OperationalError_exp
from alignak.db import DB
from alignak.log import logger
connect_function = None
IntegrityError_exp = None
ProgrammingError_exp = None
DatabaseError_exp = None
InternalError_exp = None
DataError_exp = None
OperationalError_exp = None
class DBOracle(DB):
"""Manage connection and query execution against Oracle databases."""
def __init__(self, user, password, database, table_prefix=''):
self.user = user
self.password = password
self.database = database
self.table_prefix = table_prefix
def connect_database(self):
"""Create the database connection
TODO: finish (begin :) ) error catch and conf parameters...
"""
connstr = '%s/%s@%s' % (self.user, self.password, self.database)
self.db = connect_function(connstr)
self.db_cursor = self.db.cursor()
self.db_cursor.arraysize = 50
def execute_query(self, query):
""" Execute a query against an Oracle database.
"""
logger.debug("[DBOracle] Execute Oracle query %s\n", query)
try:
self.db_cursor.execute(query)
self.db.commit()
except IntegrityError_exp, exp:
logger.warning("[DBOracle] Warning: a query raise an integrity error: %s, %s",
query, exp)
except ProgrammingError_exp, exp:
logger.warning("[DBOracle] Warning: a query raise a programming error: %s, %s",
query, exp)
except DatabaseError_exp, exp:
logger.warning("[DBOracle] Warning: a query raise a database error: %s, %s",
query, exp)
except InternalError_exp, exp:
logger.warning("[DBOracle] Warning: a query raise an internal error: %s, %s",
query, exp)
except DataError_exp, exp:
logger.warning("[DBOracle] Warning: a query raise a data error: %s, %s",
query, exp)
except OperationalError_exp, exp:
logger.warning("[DBOracle] Warning: a query raise an operational error: %s, %s",
query, exp)
except Exception, exp:
logger.warning("[DBOracle] Warning: a query raise an unknown error: %s, %s",
query, exp)
logger.warning(exp.__dict__)
|
vmlaker/mpipe
|
doc/source/conf.py
|
Python
|
mit
| 7,581 | 0.006068 |
# -*- coding: utf-8 -*-
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../../tools'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.pngmath',
'sphinx.ext.intersphinx',
# Create links to Python source code for the module.
# 'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinx.ext.inheritance_diagram',
]
# Add any locations and names of other projects that should be linked to in this documentation.
intersphinx_mapping = {
'python': ('http://docs.python.org', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MPipe'
copyright = u'2014, Velimir Mlaker'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = ['wm5.', 'wm5', ]
# Set this to 'both' to append the __init__(self) docstring to the class docstring.
autoclass_content = 'both'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'mpipe'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{0} Documentation'.format(project)
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**' : [],
# '**' : ['localtoc.html'],
# '**' : ['globaltoc.html']
|
,
# '**' : ['searchbox.html', 'search.html'],
# '**' : ['searchbox.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = { 'search' : 'search.html' }
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each le
|
tter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MPipedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'MPipe.tex', u'MPipe Documentation',
u'Velimir Mlaker', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mpipe', u'MPipe Documentation',
[u'Velimir Mlaker'], 1)
]
rst_prolog = '''
.. |NAME| replace:: MPipe
'''
# End of file.
|
kevinwchang/Minecraft-Overviewer
|
overviewer_core/aux_files/genPOI.py
|
Python
|
gpl-3.0
| 21,247 | 0.003624 |
#!/usr/bin/env python2
'''
genPOI.py
Scans regionsets for TileEntities and Entities, filters them, and writes out
POI/marker info.
A markerSet is list of POIs to display on a tileset. It has a display name,
and a group name.
markersDB.js holds a list of POIs in each group
markers.js holds a list of which markerSets are attached to each tileSet
'''
import gzip
import itertools
import json
import logging
import multiprocessing
import os
import re
import sys
import time
import urllib2
import datetime
from collections import defaultdict
from contextlib import closing
from multiprocessing import Pool
from optparse import OptionParser
from overviewer_core import logger
from overviewer_core import nbt
from overviewer_core import configParser, world
from overviewer_core.files import FileReplacer, get_fs_caps
UUID_LOOKUP_URL = 'https://sessionserver.mojang.com/session/minecraft/profile/'
def replaceBads(s):
"Replaces bad characters with good characters!"
bads = [" ", "(", ")"]
x=s
for bad in bads:
x = x.replace(bad,"_")
return x
# If you want to keep your stomach contents do not, under any circumstance,
# read the body of the following function. You have been warned.
# All of this could be replaced by a simple json.loads if Mojang had
# introduced a TAG_JSON, but they didn't.
#
# So here are a few curiosities how 1.7 signs get seen in 1.8 in Minecraft:
# - null ->
# - "null" -> null
# - ["Hello"] -> Hello
# - [Hello] -> Hello
# - [1,2,3] -> 123
# Mojang just broke signs for everyone who ever used [, { and ". GG.
def jsonText(s):
if s is None or s == "null":
return ""
if (s.startswith('"') and s.endswith('"')) or \
(s.startswith('{') and s.endswith('}')):
try:
js = json.loads(s)
except ValueError:
return s
def parseLevel(foo):
bar = ""
if isinstance(foo, list):
for extra in foo:
bar += parseLevel(extra)
elif isinstance(foo, dict):
if "text" in foo:
bar += foo["text"]
if "extra" in foo:
bar += parseLevel(foo["extra"])
elif isinstance(foo, basestring):
bar = foo
return bar
return parseLevel(js)
else:
return s
# Since functions are not pickleable, we send their names instead.
# Here, set up worker processes to have a name -> function map
bucketChunkFuncs = {}
def initBucketChunks(config_path):
global bucketChunkFuncs
mw_parser = configParser.MultiWorldParser()
mw_parser.parse(config_path)
# ought not to fail since we already did it once
config = mw_parser.get_validated_config()
for name, render in config['renders'].iteritems():
for f in render['markers']:
ff = f['filterFunction']
bucketChunkFuncs[ff.__name__] = ff
# yes there's a double parenthesis here
# see below for when this is called, and why we do this
# a smarter way would be functools.partial, but that's broken on python 2.6
# when used with multiprocessing
def parseBucketChunks((bucket, rset, filters)):
global bucketChunkFuncs
pid = multiprocessing.current_process().pid
markers = defaultdict(list)
i = 0
cnt = 0
mcnt_prev = 0
for b in bucket:
try:
data = rset.get_chunk(b[0],b[1])
for poi in itertools.chain(data['TileEntities'], data['Entities']):
if poi['id'] == 'Sign' or poi['id'] == 'minecraft:sign':
poi = signWrangler(poi)
for name, filter_function in filters:
ff = bucketChunkFuncs[filter_function]
result = ff(poi)
if result:
d = create_marker_from_filter_result(poi, result)
markers[name].append(d)
except nbt.CorruptChunkError:
logging.warning("Ignoring POIs in corrupt chunk %d,%d", b[0], b[1])
# Perhaps only on verbose ?
i = i + 1
if i == 250:
i = 0
cnt = 250 + cnt
mcnt = sum(len(v) for v in markers.itervalues())
if mcnt > mcnt_prev:
logging.info("Found %d markers in thread %d so far at %d chunks", mcnt, pid, cnt);
mcnt_prev = mcnt
return markers
def signWrangler(poi):
"""
Just does the JSON things for signs
"""
for field in ["Text1", "Text2", "Text3", "Text4"]:
poi[fi
|
eld] = jsonText(poi[field])
return poi
def handleEntities(rset, config, config_path, filters, markers):
"""
Add markers for Entities or TileEntities.
For this every chunk of the regionset is parsed and filtered using multiple
processes, if so configured.
This function will not return anything, but it will update the parameter
`markers`.
"""
logging.info("Looking for entities in %r", rset)
numbuckets = config['process
|
es'];
if numbuckets < 0:
numbuckets = multiprocessing.cpu_count()
if numbuckets == 1:
for (x, z, mtime) in rset.iterate_chunks():
try:
data = rset.get_chunk(x, z, entities_only=True)
for poi in itertools.chain(data['TileEntities'], data['Entities']):
if poi['id'] == 'Sign' or poi['id'] == 'minecraft:sign': # kill me
poi = signWrangler(poi)
for name, __, filter_function, __, __, __ in filters:
result = filter_function(poi)
if result:
d = create_marker_from_filter_result(poi, result)
markers[name]['raw'].append(d)
except nbt.CorruptChunkError:
logging.warning("Ignoring POIs in corrupt chunk %d,%d", x,z)
else:
buckets = [[] for i in range(numbuckets)];
for (x, z, mtime) in rset.iterate_chunks():
i = x / 32 + z / 32
i = i % numbuckets
buckets[i].append([x, z])
for b in buckets:
logging.info("Buckets has %d entries", len(b));
# Create a pool of processes and run all the functions
pool = Pool(processes=numbuckets, initializer=initBucketChunks, initargs=(config_path,))
# simplify the filters dict, so pickle doesn't have to do so much
filters = [(name, filter_function.__name__) for name, __, filter_function, __, __, __ in filters]
results = pool.map(parseBucketChunks, ((buck, rset, filters) for buck in buckets))
logging.info("All the threads completed")
for marker_dict in results:
for name, marker_list in marker_dict.iteritems():
markers[name]['raw'].extend(marker_list)
logging.info("Done.")
class PlayerDict(dict):
use_uuid = False
_name = ''
uuid_cache = None # A cache for the UUID->profile lookups
@classmethod
def load_cache(cls, outputdir):
cache_file = os.path.join(outputdir, "uuidcache.dat")
if os.path.exists(cache_file):
try:
with closing(gzip.GzipFile(cache_file)) as gz:
cls.uuid_cache = json.load(gz)
logging.info("Loaded UUID cache from %r with %d entries",
cache_file, len(cls.uuid_cache.keys()))
except (ValueError, IOError):
logging.warning("Failed to load UUID cache -- it might be corrupt")
cls.uuid_cache = {}
corrupted_cache = cache_file + ".corrupted." + datetime.datetime.now().isoformat()
try:
os.rename(cache_file, corrupted_cache)
logging.warning("If %s does not appear to contain meaningful data, you may safely delete it", corrupted_cache)
except OSError:
logging.warning("Failed to backup corrupted UUID cache")
logging.info("Initialized an empty UUID cache")
else:
cls.uuid_cache = {}
logging.info("Initiali
|
piotroxp/scibibscan
|
scib/lib/python3.5/site-packages/astropy/modeling/tests/test_parameters.py
|
Python
|
mit
| 21,265 | 0.000611 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests models.parameters
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import itertools
import numpy as np
from numpy.testing import utils
from . import irafutil
from .. import models, fitting
from ..core import Model, FittableModel, ModelDefinitionError
from ..parameters import Parameter, InputParameterError
from ...utils.data import get_pkg_data_filename
from ...tests.helper import pytest
class TParModel(Model):
"""
A toy model to test parameters machinery
"""
coeff = Parameter()
e = Parameter()
def __init__(self, coeff, e, **kwargs):
super(TParModel, self).__init__(coeff=coeff, e=e, **kwargs)
@staticmethod
def evaluate(coeff, e):
pass
class MockModel(FittableModel):
alpha = Parameter(name='alpha', default=42)
@staticmethod
def evaluate(*args):
pass
def test_parameter_properties():
"""Test if getting / setting of Parameter properties works."""
m = MockModel()
p = m.alpha
assert p.name == 'alpha'
# Parameter names are immutable
with pytest.raises(AttributeError):
p.name = 'beta'
assert p.fixed == False
p.fixed = True
assert p.fixed == True
assert p.tied == False
p.tied = lambda _: 0
p.tied = False
assert p.tied == False
assert p.min == None
p.min = 42
assert p.min == 42
p.min = None
assert p.min == None
assert p.max == None
# TODO: shouldn't setting a max < min give an error?
p.max = 41
assert p.max == 41
def test_parameter_operators():
"""Test if the parameter arithmetic operators work."""
m = MockModel()
par = m.alpha
num = 42.
val = 3
assert par - val == num - val
assert val - par == val - num
assert par / val == num / val
assert val / par == val / num
assert par ** val == num ** val
assert val ** par == val ** num
assert par < 45
assert par > 41
assert par <= par
assert par >= par
assert par == par
assert -par == -num
assert abs(par) == abs(num)
def test_parameter_name_attribute_mismatch():
"""
It should not be possible to define Parameters on a model with different
names from the attributes they are assigned to.
"""
def make_bad_class():
class BadModel(Model):
foo = Parameter('bar')
def __call__(self): pass
def make_good_class():
class GoodModel(Model):
# This is redundant but okay
foo = Parameter('foo')
def __call__(self): pass
make_good_class()
pytest.raises(ModelDefinitionError, make_bad_class)
class TestParameters(object):
def setup_class(self):
"""
Unit tests for parameters
Read an iraf database file created by onedspec.identify. Use the
information to create a 1D Chebyshev model and perform the same fit.
Create also a gausian model.
"""
test_file = get_pkg_data_filename('data/idcompspec.fits')
f = open(test_file)
lines = f.read()
reclist = lines.split("begin")
f.close()
record = irafutil.IdentifyRecord(reclist[1])
self.icoeff = record.coeff
order = int(record.fields['order'])
self.model = models.Chebyshev1D(order - 1)
self.gmodel = models.Gaussian1D(2, mean=3, stddev=4)
self.linear_fitter = fitting.LinearLSQFitter()
self.x = record.x
self.y = record.z
self.yy = np.array([record.z, record.z])
def test_set_slice(self):
"""
Tests updating the parameters attribute with a slice.
This is what fitters internally do.
"""
self.model.parameters[:] = np.array([3, 4, 5, 6, 7])
assert (self.model.parameters == [3., 4., 5., 6., 7.]).all()
def test_set_parameters_as_list(self):
"""Tests updating parameters using a list."""
self.model.parameters = [30, 40, 50, 60, 70]
assert (self.model.parameters == [30., 40., 50., 60, 70]).all()
def test_set_parameters_as_array(self):
"""Tests updating parameters using an array."""
self.model.parameters = np.array([3, 4, 5, 6, 7])
assert (self.model.parameters == [3., 4., 5., 6., 7.]).all()
def test_set_as_tuple(self):
"""Tests updating parameters using a tuple."""
self.model.parameters = (1, 2, 3, 4, 5)
assert (self.model.parameters == [1, 2, 3, 4, 5]).all()
def test_set_model_attr_seq(self):
"""
Tests updating the parameters attribute when a model's
parameter (in this case coeff) is updated.
"""
self.model.parameters = [0, 0., 0., 0, 0]
self.model.c0 = 7
assert (self.model.parameters == [7, 0., 0., 0, 0]).all()
def test_set_model_attr_num(self):
"""Update the parameter list when a model's parameter is updated."""
self.gmodel.amplitude = 7
assert (self.gmodel.parameters == [7, 3, 4]).all()
def test_set_item(self):
"""Update the parameters using indexing."""
self.model.parameters = [1, 2, 3, 4, 5]
self.model.parameters[0] = 10.
assert (self.model.parameters == [10, 2, 3, 4, 5]).all()
assert self.model.c0 == 10
def test_wrong_size1(self):
"""
Tests raising an error when attempting to reset the parameters
using a list of a different size.
"""
with pytest.raises(InputParameterError):
self.model.parameters = [1, 2, 3]
def test_wrong_size2(self):
"""
Tests raising an exception when attempting to update a model's
parameter (in this case coeff) with a sequence of the wrong size.
"""
with pytest.raises(InputParameterError):
self.model.c0 = [1, 2, 3]
def test_wrong_shape(self):
"""
Tests raising an exception when attempting to update a model's
parameter and the new value has the wrong shape.
"""
with pytest.raises(InputParameterError):
self.gmodel.amplitude = [1, 2]
def test_par_against_iraf(self):
"""
Test the fitter modifies model.parameters.
Uses an iraf example.
"""
new_model = self.linear_fitter(self.model, self.x, self.y)
print(self.y, self.x)
utils.assert_allclose(new_model.parameters,
np.array(
[4826.1066602783685, 952.8943813407858,
12.641236013982386,
-1.7910672553339604,
0.90252884366711317]),
rtol=10 ** (-2))
def testPolynomial1D(self):
d = {'c0': 11, 'c1': 12, 'c2': 13, 'c3': 14}
p1 = models.Polynomial1D(3, **d)
utils.assert_equal(p1.parameters, [11, 12, 13, 14])
def test_poly1d_multiple_sets(self):
p1 = models.Polynomial1D(3, n_models=3)
utils.assert_equal(p1.parameters, [0.0, 0.0, 0.0, 0, 0, 0,
|
0, 0, 0, 0, 0, 0])
utils.assert_equal(p1.c0, [0, 0, 0])
p1.c0 = [10, 10, 10]
utils.assert_equal(p1.parameters, [10.0, 10.0, 10.0, 0,
|
0,
0, 0, 0, 0, 0, 0, 0])
def test_par_slicing(self):
"""
Test assigning to a parameter slice
"""
p1 = models.Polynomial1D(3, n_models=3)
p1.c0[:2] = [10, 10]
utils.assert_equal(p1.parameters, [10.0, 10.0, 0.0, 0, 0,
0, 0, 0, 0, 0, 0, 0])
def test_poly2d(self):
p2 = models.Polynomial2D(degree=3)
p2.c0_0 = 5
utils.assert_equal(p2.parameters, [5, 0, 0, 0, 0, 0, 0, 0, 0, 0])
def test_poly2d_multiple_sets(self):
kw = {'c0_0': [2, 3], 'c1_0': [1, 2], 'c2_0': [4, 5],
'c0_1': [1, 1], 'c0_2': [2, 2], 'c1_1': [5, 5]}
p2 = models.Polynomial2D(2, **kw)
utils.assert_equal(p2.parameters
|
cpcloud/numba
|
numba/tests/test_unicode.py
|
Python
|
bsd-2-clause
| 93,714 | 0.000151 |
# -*- coding: utf-8 -*-
from itertools import product
from itertools import permutations
from numba import njit
from numba.core import types, utils
import unittest
from numba.tests.support import (TestCase, no_pyobj_flags, MemoryLeakMixin)
from numba.core.errors import TypingError, UnsupportedError
from numba.cpython.unicode import _MAX_UNICODE
from numba.core.types.functions import _header_lead
from numba.extending import overload
_py37_or_later = utils.PYVERSION >= (3, 7)
def isascii(s):
return all(ord(c) < 128 for c in s)
def literal_usecase():
return '大处着眼,小处着手。'
def passthrough_usecase(x):
return x
def eq_usecase(x, y):
return x == y
def len_usecase(x):
return len(x)
def bool_usecase(x):
return bool(x)
def getitem_usecase(x, i):
return x[i]
def getitem_check_kind_usecase(x, i):
return hash(x[i])
def zfill_usecase(x, y):
return x.zfill(y)
def concat_usecase(x, y):
return x + y
def repeat_usecase(x, y):
return x * y
def inplace_concat_usecase(x, y):
x += y
return x
def in_usecase(x, y):
return x in y
def lt_usecase(x, y):
return x < y
def le_usecase(x, y):
return x <= y
def gt_usecase(x, y):
return x > y
def ge_usecase(x, y):
return x >= y
def partition_usecase(s, sep):
return s.partition(sep)
def find_usecase(x, y):
return x.find(y)
def find_with_start_only_usecase(x, y, start):
return x.find(y, start)
def find_with_start_end_usecase(x, y, start, end):
return x.find(y, start, end)
def rpartition_usecase(s, sep):
return s.rpartition(sep)
def count_usecase(x, y):
return x.count(y)
def count_with_start_usecase(x, y, start):
return x.count(y, start)
def count_with_start_end_usecase(x, y, start, end):
return x.count(y, start, end)
def rfind_usecase(x, y):
return x.rfind(y)
def rfind_with_start_only_usecase(x, y, start):
return x.rfind(y, start)
def rfind_with_start_end_usecase(x, y, start, end):
return x.rfind(y, start, end)
def replace_usecase(s, x, y):
return s.replace(x, y)
def replace_with_count_usecase(s, x, y, count):
return s.replace(x, y, count)
def rindex_usecase(x, y):
return x.rindex(y)
def rindex_with_start_only_usecase(x, y, start):
return x.rindex(y, start)
def rindex_with_start_end_usecase(x, y, start, end):
return x.rindex(y, start, end)
def index_usecase(x, y):
return x.index(y)
def index_with_start_only_usecase(x, y, start):
return x.index(y, start)
def index_with_start_end_usecase(x, y, start, end):
return x.index(y, start, end)
def startswith_usecase(x, y):
return x.startswith(y)
def endswith_usecase(x, y):
return x.endswith(y)
def expandtabs_usecase(s):
return s.expandtabs()
def expandtabs_with_tabsize_usecase(s, tabsize):
return s.expandtabs(tabsize)
def expandtabs_with_tabsize_kwarg_usecase(s, tabsize):
return s.expandtabs(tabsize=tabsize)
def endswith_with_start_only_usecase(x, y, start):
return x.endswith(y, start)
def endswith_with_start_end_usecase(x, y, start, end):
return x.endswith(y, start, end)
def split_usecase(x, y):
return x.split(y)
def split_with_maxsplit_usecase(x, y, maxsplit):
return x.split(y, maxsplit)
def split_with_maxsplit_kwarg_usecase(x, y, maxsplit):
return x.split(y, maxsplit=maxsplit)
def split_whitespace_usecase(x):
return x.split()
def splitlines_usecase(s):
return s.splitlines()
def splitlines_with_keepends_usecase(s, keepends):
return s.splitlines(keepends)
def splitlines_with_keepends_kwarg_usecase(s, keepends):
return s.splitlines(keepends=keepends)
def rsplit_usecase(s, sep):
return s.rsplit(sep)
def rsplit_with_maxsplit_usecase(s, sep, maxsplit):
return s.rsplit(sep, maxsplit)
def rsplit_with_maxsplit_kwarg_usecase(s, sep, maxsplit):
return s.rsplit(sep, maxsplit=maxsplit)
def rsplit_whitespace_usecase(s):
return s.rsplit()
def lstrip_usecase(x):
return x.lstrip()
def lstrip_usecase_chars(x, chars):
return x.lstrip(chars)
def rstrip_usecase(x):
return x.rstrip()
def rstrip_usecase_chars(x, chars):
return x.rstrip(chars)
def strip_usecase(x):
return x.strip()
def strip_usecase_chars(x, chars):
return x.strip(chars)
def join_usecase(x, y):
return x.join(y)
def join_empty_usecase(x):
# hack to make empty typed list
l = ['']
l.pop()
return x.join(l)
def center_usecase(x, y):
return x.center(y)
def center_usecase_fillchar(x, y, fillchar):
return x.center(y, fillchar)
def ljust_usecase(x, y):
return x.ljust(y)
def ljust_usecase_fillchar(x, y, fillchar):
return x.ljust(y, fillchar)
def rjust_usecase(x, y):
return x.rjust(y)
def rjust_usecase_fillchar(x, y, fillchar):
return x.rjust(y, fillchar)
def istitle_usecase(x):
return x.istitle()
def iter_usecase(x):
l = []
for i in x:
l.append(i)
|
return l
def title(x):
return x.title()
def literal_iter_usecase():
l = []
for i in '大处着眼,小处着手。':
l.append(i)
return l
def enumerated_iter_usecase(x):
buf = ""
scan = 0
for i, s in enumerate(x):
buf += s
scan += 1
return buf, scan
def iter_stopiteration_usecase(x):
n = len(x)
i = iter(x)
for _ in range(n + 1):
next(i)
def litera
|
l_iter_stopiteration_usecase():
s = '大处着眼,小处着手。'
i = iter(s)
n = len(s)
for _ in range(n + 1):
next(i)
def islower_usecase(x):
return x.islower()
def lower_usecase(x):
return x.lower()
def ord_usecase(x):
return ord(x)
def chr_usecase(x):
return chr(x)
class BaseTest(MemoryLeakMixin, TestCase):
def setUp(self):
super(BaseTest, self).setUp()
UNICODE_EXAMPLES = [
'',
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'🐍⚡',
'大处着眼,小处着手。',
]
UNICODE_ORDERING_EXAMPLES = [
'',
'a'
'aa',
'aaa',
'b',
'aab',
'ab',
'asc',
'ascih',
'ascii',
'ascij',
'大处着眼,小处着手',
'大处着眼,小处着手。',
'大处着眼,小处着手。🐍⚡',
]
UNICODE_COUNT_EXAMPLES = [
('', ''),
('', 'ascii'),
('ascii', ''),
('asc ii', ' '),
('ascii', 'ci'),
('ascii', 'ascii'),
('ascii', 'Ă'),
('ascii', '大处'),
('ascii', 'étú?'),
('', '大处 着眼,小处着手。大大大处'),
('大处 着眼,小处着手。大大大处', ''),
('大处 着眼,小处着手。大大大处', ' '),
('大处 着眼,小处着手。大大大处', 'ci'),
('大处 着眼,小处着手。大大大处', '大处大处'),
('大处 着眼,小处着手。大大大处', '大处 着眼,小处着手。大大大处'),
('大处 着眼,小处着手。大大大处', 'Ă'),
('大处 着眼,小处着手。大大大处', '大处'),
('大处 着眼,小处着手。大大大处', 'étú?'),
('', 'tú quién te crees?'),
('tú quién te crees?', ''),
('tú quién te crees?', ' '),
('tú quién te crees?', 'ci'),
('tú quién te crees?', 'tú quién te crees?'),
('tú quién te crees?', 'Ă'),
('tú quién te crees?', '大处'),
('tú quién te crees?', 'étú?'),
('abababab', 'a'),
('abababab', 'ab'),
('abababab', 'aba'),
('aaaaaaaaaa', 'aaa'),
('aaaaaaaaaa', 'aĂ'),
('aabbaaaabbaa', 'aa')
]
class TestUnicode(BaseTest):
def test_literal(self, flags=no_pyobj_flags):
pyfunc = literal_usecase
self.run_nullary_func(pyfunc, flags=flags)
def test_passthrough(self, flags=no_pyobj_flags):
pyfunc = passthrough_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
self.assertEqual(pyfunc(s), cfunc(s))
def test_eq(self, flags=no_pyobj_flags):
pyfunc = eq_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in reversed(UNICODE_EXAMPLES):
self.assertEqual(pyfunc(a, b),
cfunc(a, b), '%s, %s' % (a, b))
# comparing against something that's not unicode
self.assertEqual(pyfunc(a, 1),
cfunc(a, 1), '%s, %s' % (a, 1))
self.assertEqual(pyfunc(1, b),
cfunc(1, b), '%s, %s' % (1, b))
def _check_ordering_op(self, usecase):
pyfunc = usecase
cfunc = njit(pyfunc)
# Check comparison to self
for
|
blzq/infer
|
infer/lib/python/inferlib/capture/ant.py
|
Python
|
bsd-3-clause
| 2,914 | 0 |
# Copyright (c) 2015 - present Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import logging
import os
from . import util
from inferlib import jwlib
MODULE_NAME = __name__
MODULE_DESCRIPTION = '''Run analysis of code built with a command like:
ant [options] [target]
Analysis examples:
infer -- ant compile'''
LANG = ['java']
def gen_instance(*args):
return AntCapture(*args)
# This creates an empty argparser for the module, which provides only
# description/usage information and no arguments.
create_argparser = util.base_argparser(MODULE_DESCRIPTION, MODULE_NAME)
class AntCapture:
def __init__(self, args, cmd):
self.args = args
util.log_java_version()
logging.info(util.run_cmd_ignore_fail(['ant', '-version']))
# TODO: make the extraction of targets smarter
self.build_cmd = ['ant', '-verbose'] + cmd[1:]
def is_interesting(self, content):
return self.is_quoted(content) or content.endswith('.java')
def is_quoted(self, argument):
quote = '\''
return len(argument) > 2 and argument[0] == quote\
and argument[-1] == quote
def remove_quotes(self, argument):
if self.is_quoted(argument):
return argument[1:-1]
else:
return argument
def get_infer_commands(self, verbose_output):
javac_pattern = '[javac]'
argument_start_pattern = 'Compilation arguments'
calls = []
javac_arguments = []
collect = False
for line in verbose_output:
if javac_pattern in line:
if argument_start_pattern in line:
collect = True
if javac_arguments != []:
capture = jwlib.create_infer_command(javac_arguments)
calls.append(capture)
javac_arguments = []
if collect:
pos = line.index(javac_pattern) + len(javac_pattern)
content = line[pos:].strip()
if self.is_interesting(content):
arg = self.remov
|
e_quotes(content)
javac_arguments.append(arg)
if j
|
avac_arguments != []:
capture = jwlib.create_infer_command(javac_arguments)
calls.append(capture)
javac_arguments = []
return calls
def capture(self):
(code, verbose_out) = util.get_build_output(self.build_cmd)
if code != os.EX_OK:
return code
clean_cmd = '\'{}\' clean'.format(self.build_cmd[0])
cmds = self.get_infer_commands(verbose_out)
return util.run_compilation_commands(cmds, clean_cmd)
|
ic-hep/DIRAC
|
src/DIRAC/FrameworkSystem/Service/TokenManagerHandler.py
|
Python
|
gpl-3.0
| 8,162 | 0.001715 |
""" TokenManager service
.. literalinclude:: ../ConfigTemplate.cfg
:start-after: ##BEGIN TokenManager:
:end-before: ##END
:dedent: 2
:caption: TokenManager options
"""
import pprint
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Security import Properties
from DIRAC.Core.Tornado.Server.TornadoService import TornadoService
from DIRAC.FrameworkSystem.DB.TokenDB import TokenDB
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.Resources.IdProvider.IdProviderFactory import IdProviderFactory
class TokenManagerHandler(TornadoService):
__maxExtraLifeFactor = 1.5
__tokenDB = None
@classmethod
def initializeHandler(cls, serviceInfoDict):
try:
cls.__tokenDB = TokenDB()
except Exception as e:
gLogger.exception(e)
return S_ERROR("Could not connect to the database %s" % repr(e))
cls.idps = IdProviderFactory()
return S_OK()
def __generateUsersTokensInfo(self, users):
"""Generate information dict about user tokens
:return: dict
"""
tokensInfo = []
|
credDict = self.getRemoteCredentials()
result = Registry.getDNForUsername(credDict["username"])
if not result["OK"]:
return result
for dn in result["Value"]:
result = Registry.getIDFromDN(dn
|
)
if result["OK"]:
result = self.__tokenDB.getTokensByUserID(result["Value"])
if not result["OK"]:
gLogger.error(result["Message"])
tokensInfo += result["Value"]
return tokensInfo
def __generateUserTokensInfo(self):
"""Generate information dict about user tokens
:return: dict
"""
tokensInfo = []
credDict = self.getRemoteCredentials()
result = Registry.getDNForUsername(credDict["username"])
if not result["OK"]:
return result
for dn in result["Value"]:
result = Registry.getIDFromDN(dn)
if result["OK"]:
result = self.__tokenDB.getTokensByUserID(result["Value"])
if not result["OK"]:
gLogger.error(result["Message"])
tokensInfo += result["Value"]
return tokensInfo
def __addKnownUserTokensInfo(self, retDict):
"""Given a S_OK/S_ERR add a tokens entry with info of all the tokens a user has uploaded
:return: S_OK(dict)/S_ERROR()
"""
retDict["tokens"] = self.__generateUserTokensInfo()
return retDict
auth_getUserTokensInfo = ["authenticated"]
def export_getUserTokensInfo(self):
"""Get the info about the user tokens in the system
:return: S_OK(dict)
"""
return S_OK(self.__generateUserTokensInfo())
auth_getUsersTokensInfo = [Properties.PROXY_MANAGEMENT]
def export_getUsersTokensInfo(self, users):
"""Get the info about the user tokens in the system
:param list users: user names
:return: S_OK(dict)
"""
tokensInfo = []
for user in users:
result = Registry.getDNForUsername(user)
if not result["OK"]:
return result
for dn in result["Value"]:
uid = Registry.getIDFromDN(dn).get("Value")
if uid:
result = self.__tokenDB.getTokensByUserID(uid)
if not result["OK"]:
gLogger.error(result["Message"])
else:
for tokenDict in result["Value"]:
if tokenDict not in tokensInfo:
tokenDict["username"] = user
tokensInfo.append(tokenDict)
return S_OK(tokensInfo)
auth_uploadToken = ["authenticated"]
def export_updateToken(self, token, userID, provider, rt_expired_in=24 * 3600):
"""Request to delegate tokens to DIRAC
:param dict token: token
:param str userID: user ID
:param str provider: provider name
:param int rt_expired_in: refresh token expires time
:return: S_OK(list)/S_ERROR() -- list contain uploaded tokens info as dictionaries
"""
self.log.verbose("Update %s user token for %s:\n" % (userID, provider), pprint.pformat(token))
result = self.idps.getIdProvider(provider)
if not result["OK"]:
return result
idPObj = result["Value"]
result = self.__tokenDB.updateToken(token, userID, provider, rt_expired_in)
if not result["OK"]:
return result
for oldToken in result["Value"]:
if "refresh_token" in oldToken and oldToken["refresh_token"] != token["refresh_token"]:
self.log.verbose("Revoke old refresh token:\n", pprint.pformat(oldToken))
idPObj.revokeToken(oldToken["refresh_token"])
return self.__tokenDB.getTokensByUserID(userID)
def __checkProperties(self, requestedUserDN, requestedUserGroup):
"""Check the properties and return if they can only download limited tokens if authorized
:param str requestedUserDN: user DN
:param str requestedUserGroup: DIRAC group
:return: S_OK(bool)/S_ERROR()
"""
credDict = self.getRemoteCredentials()
if Properties.FULL_DELEGATION in credDict["properties"]:
return S_OK(False)
if Properties.LIMITED_DELEGATION in credDict["properties"]:
return S_OK(True)
if Properties.PRIVATE_LIMITED_DELEGATION in credDict["properties"]:
if credDict["DN"] != requestedUserDN:
return S_ERROR("You are not allowed to download any token")
if Properties.PRIVATE_LIMITED_DELEGATION not in Registry.getPropertiesForGroup(requestedUserGroup):
return S_ERROR("You can't download tokens for that group")
return S_OK(True)
# Not authorized!
return S_ERROR("You can't get tokens!")
def export_getToken(self, username, userGroup):
"""Get a access token for a user/group
* Properties:
* FullDelegation <- permits full delegation of tokens
* LimitedDelegation <- permits downloading only limited tokens
* PrivateLimitedDelegation <- permits downloading only limited tokens for one self
"""
userID = []
provider = Registry.getIdPForGroup(userGroup)
if not provider:
return S_ERROR("The %s group belongs to the VO that is not tied to any Identity Provider." % userGroup)
result = self.idps.getIdProvider(provider)
if not result["OK"]:
return result
idpObj = result["Value"]
result = Registry.getDNForUsername(username)
if not result["OK"]:
return result
err = []
for dn in result["Value"]:
result = Registry.getIDFromDN(dn)
if result["OK"]:
result = self.__tokenDB.getTokenForUserProvider(result["Value"], provider)
if result["OK"] and result["Value"]:
idpObj.token = result["Value"]
result = self.__checkProperties(dn, userGroup)
if result["OK"]:
result = idpObj.exchangeGroup(userGroup)
if result["OK"]:
return result
err.append(result.get("Message", "No token found for %s." % dn))
return S_ERROR("; ".join(err or ["No user ID found for %s" % username]))
def export_deleteToken(self, userDN):
"""Delete a token from the DB
:param str userDN: user DN
:return: S_OK()/S_ERROR()
"""
credDict = self.getRemoteCredentials()
if Properties.PROXY_MANAGEMENT not in credDict["properties"]:
if userDN != credDict["DN"]:
return S_ERROR("You aren't allowed!")
result = Registry.getIDFromDN(userDN)
return self.__tokenDB.removeToken(user_id=result["Value"]) if result["OK"] else result
|
arokem/bowties
|
params.py
|
Python
|
apache-2.0
| 464 | 0.064655 |
p = dict(
subject = 'EG009',
#Fixation size (in degrees)
|
:
fixation_size = 0.4,
monitor='testMonitor',
scanner=True,
screen_number = 1,
full_screen = True,
radial_cyc = 10,
angular_cyc = 15,
angular_width=30,
size = 60, #This just needs to be larger than the screen
temporal_freq = 2,
sf = 10,
n_blocks = 20, #20 blocks
|
= 200 sec = 3:20 minutes
block_duration=10,
color_dur = 1/3. # 2 Hz
)
|
gomezstevena/x-wind
|
src/navierstokesRun.py
|
Python
|
gpl-3.0
| 709 | 0.004231 |
import os
import sys
from numpy imp
|
ort *
from scipy.integrate import ode
from scipy.interpolate import griddata
from mesh import *
from navierstokes import NavierStokes
nE = 5000
dt = 0.0005
nsteps = 2000
Mach = 0.3
Re = 10000
HiRes = 1.
z = load('data/navierstokesInit.npz')
geom, v, t, b, soln = z['geom'], z['v'], z['t'], z['b'], z['soln']
solver = NavierStokes(v, t, b, Mach, Re, HiRes)
solver.integrate(1E-8, soln[-1])
for istep, T in enumerate(arange(1,nsteps+1) * dt):
solver.integrate(T)
sys.stdout.write('t
|
= {0}\n'.format(solver.time)); sys.stdout.flush()
fname = 'data/navierstokesStep{0:06d}.npz'.format(istep)
savez(fname, geom=array(geom), v=v, t=t, b=b, soln=solver.soln)
|
Curahelper/Cura
|
plugins/3MFWriter/ThreeMFWriter.py
|
Python
|
agpl-3.0
| 7,998 | 0.006627 |
# Copyright (c) 2015 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
from UM.Mesh.MeshWriter import MeshWriter
from UM.Math.Vector import Vector
from UM.Logger import Logger
from UM.Math.Matrix import Matrix
from UM.Application import Application
import UM.Scene.SceneNode
import Savitar
import numpy
MYPY = False
try:
if not MYPY:
import xml.etree.cElementTree as ET
except ImportError:
Logger.log("w", "Unable to load cElementTree, switching to slower version")
import xml.etree.ElementTree as ET
import zipfile
import UM.Application
class ThreeMFWriter(MeshWriter):
def __init__(self):
super().__init__()
self._namespaces = {
"3mf": "http://schemas.microsoft.com/3dmanufacturing/core/2015/02",
"content-types": "http://schemas.openxmlformats.org/package/2006/content-types",
"relationships": "http://schemas.openxmlformats.org/package/2006/relationships",
"cura": "http://software.ultimaker.com/xml/cura/3mf/2015/10"
}
self._unit_matrix_string = self._convertMatrixToString(Matrix())
self._archive = None
self._store_archive = False
def _convertMatrixToString(self, matrix):
result = ""
result += str(matrix._data[0, 0]) + " "
result += str(matrix._data[1, 0]) + " "
result += str(matrix._data[2, 0]) + " "
result += str(matrix._data[0, 1]) + " "
result += str(matrix._data[1, 1]) + " "
result += str(matrix._data[2, 1]) + " "
result += str(matrix._data[0, 2]) + " "
result += str(matrix._data[1, 2]) + " "
result += str(matrix._data[2, 2]) + " "
result += str(matrix._data[0, 3]) + " "
result += str(matrix._data[1, 3]) + " "
result += str(matrix._data[2, 3])
return result
## Should we store the archive
# Note that if this is true, the archive will not be closed.
# The object that set this parameter is then responsible for closing it correctly!
def setStoreArchive(self, store_archive):
self._store_archive = store_archive
## Convenience function that converts an Uranium SceneNode object to a SavitarSceneNode
# \returns Uranium Scenen node.
def _convertUMNodeToSavitarNode(self, um_node, transformation = Matrix()):
if type(um_node) is not UM.Scene.SceneNode.SceneNode:
return None
savitar_node = Savitar.SceneNode()
node_matrix = um_node.getLocalTransformation()
matrix_string = self._convertMatrixToString(node_matrix.preMultiply(transformation))
savitar_node.setTransformation(matrix_string)
mesh_data = um_node.getMeshData()
if mesh_data is not None:
savitar_node.getMeshData().setVerticesFromBytes(mesh_data.getVerticesAsByteArray())
indices_array = mesh_data.getIndicesAsByteArray()
if indices_array is not None:
savitar_node.getMeshData().setFacesFromBytes(indices_array)
else:
savitar_node.getMeshData().setFacesFromBytes(numpy.arange(mesh_data.getVertices().size / 3, dtype=numpy.int32).tostring())
# Handle per object settings (if any)
stack = um_node.callDecoration("getStack")
if stack is not None:
changed_setting_keys = set(stack.getTop().getAllKeys())
# Ensure that we save the extruder used for this object.
if stack.getProperty("machine_extruder_count", "value") > 1:
changed_setting_keys.add("extruder_nr")
# Get values for all changed settings & save them.
for key in changed_setting_keys:
savitar_node.setSetting(key, str(stack.getProperty(key, "value")))
for child_node in um_node.getChildren():
savitar_child_node = self._convertUMNodeToSavitarNode(child_node)
if savitar_child_node is not None:
savitar_node.addChild(savitar_child_node)
return savitar_node
def getArchive(self):
return self._archive
def write(self, stream, nodes, mode = MeshWriter.OutputMode.BinaryMode):
self._archive = None # Reset archive
archive = zipfile.ZipFile(stream, "w", compression = zipfile.ZIP_DEFLATED)
try:
model_file = zipfile.ZipInfo("3D/3dmodel.model")
# Because zipfile is stupid and ignores archive-level compression settings when writing with ZipInfo.
model_file.compress_type = zipfile.ZIP_DEFLATED
# Create content types file
content_types_file = zipfile.ZipInfo("[Content_Types].xml")
content_types_file.compress_type = zipfile.ZIP_DEFLATED
content_types = ET.Element("Types", xmlns = self._namespaces["content-types"])
rels_type = ET.SubElement(content_types, "Default", Extension = "rels", ContentType = "application/vnd.openxmlformats-package.relationships+xml")
model_type = ET.SubElement(content_types, "Default", Extension = "model", ContentType = "application/vnd.ms-package.3dmanufacturing-3dmodel+xml")
# Create _rels/.rels file
relations_file = zipfile.ZipInfo("_rels/.rels")
relations_file.compress_type = zipfile.ZIP_DEFLATED
relations_element = ET.Element("Relationships", xmlns = self._namespaces["relationships"])
model_relation_element = ET.SubElement(relations_element, "Relationship", Target = "/3D/3dmodel.model", Id = "rel0", Type = "http://schemas.microsoft.com/3dmanufacturing/2013/01/3dmodel")
savitar_scene = Savitar.Scene()
transformation_matrix = Matrix()
transformation_matrix._data[1, 1] = 0
transformation_matrix._data[1, 2] = -1
transformation_matrix._data[2, 1] = 1
transformation_matrix._data[2, 2] = 0
global_container_stack = Application.getInstance().getGlobalContainerStack()
# Second step: 3MF defines the left corner of the machine as center,
|
whereas cura uses the center of the
# build volume.
if global_container_stack:
translation_vector = Vector(x=global_container_stack.getProperty("machine_width", "value") / 2,
|
y=global_container_stack.getProperty("machine_depth", "value") / 2,
z=0)
translation_matrix = Matrix()
translation_matrix.setByTranslation(translation_vector)
transformation_matrix.preMultiply(translation_matrix)
root_node = UM.Application.Application.getInstance().getController().getScene().getRoot()
for node in nodes:
if node == root_node:
for root_child in node.getChildren():
savitar_node = self._convertUMNodeToSavitarNode(root_child, transformation_matrix)
if savitar_node:
savitar_scene.addSceneNode(savitar_node)
else:
savitar_node = self._convertUMNodeToSavitarNode(node, transformation_matrix)
if savitar_node:
savitar_scene.addSceneNode(savitar_node)
parser = Savitar.ThreeMFParser()
scene_string = parser.sceneToString(savitar_scene)
archive.writestr(model_file, scene_string)
archive.writestr(content_types_file, b'<?xml version="1.0" encoding="UTF-8"?> \n' + ET.tostring(content_types))
archive.writestr(relations_file, b'<?xml version="1.0" encoding="UTF-8"?> \n' + ET.tostring(relations_element))
except Exception as e:
Logger.logException("e", "Error writing zip file")
return False
finally:
if not self._store_archive:
archive.close()
else:
self._archive = archive
return True
|
dhermyt/WONS
|
configuration/Encoder.py
|
Python
|
bsd-2-clause
| 258 | 0 |
import json
class SettingsEncoder(json.JSONEncoder):
|
def default(self, obj):
if isinstance(obj, type):
return obj.__name__
if callable(obj):
|
return obj.__name__
return json.JSONEncoder.default(self, obj)
|
dankilman/clue
|
clue/tests/test_git.py
|
Python
|
apache-2.0
| 16,458 | 0 |
########
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
import sh
import yaml
from clue import tests
git = sh.git
class TestGit(tests.BaseTest):
def test_clone_location(self):
repo_base = 'custom'
repo_dir = self._install(
repo_base=repo_base,
properties={'location': str(self.repos_dir / 'custom')})
with repo_dir:
self.assertIn('master', git.status())
def test_clone_branch(self):
repo_dir = self._install(properties={'branch': '3.3.1-build'})
with repo_dir:
self.assertIn('3.3.1-build', git.status())
def test_clone_organization(self):
repo_dir = self._install(
repo='claw-scripts',
properties={'organization': 'dankilman'})
with repo_dir:
origin = git.config('remote.origin.url').stdout.strip()
if self.default_clone_method == 'ssh':
prefix = 'git@github.com:'
else:
prefix = 'https://github.com/'
self.assertEqual(origin, '{}dankilman/claw-scripts.git'.format(prefix))
def test_clone_method_https(self):
self._test_clone_method(clone_method='https')
# sort of problematic testing ssh clone method
# def test_clone_method_ssh(self):
# self._test_clone_method(clone_method='ssh')
def _test_clone_method(self, clone_method):
repo_dir = self._install(clone_method=clone_method)
with repo_dir:
origin = git.config('remote.origin.url').stdout.strip()
if clone_method == 'ssh':
prefix = 'git@github.com:'
elif clone_method == 'https':
prefix = 'https://github.com/'
else:
self.fail(clone_method)
self.assertEqual(origin, '{}cloudify-cosmo/cloudify-rest-client.git'
.format(prefix))
def test_configure(self):
name = 'John Doe'
email = 'john@example.com'
repo_dir = self._install(git_config={
'user.name': name,
'user.email': email
})
with repo_dir:
# Test commit message hook
jira = 'CFY-10000'
branch = '{}-hello-world'.format(jira)
commit_message = 'my commit message'
git.checkout('-b', branch)
(repo_dir / 'tox.ini').write_text('testing 123')
git.commit('-am', commit_message)
self.assertEqual(self._log_message(),
'{} {}'.format(jira, commit_message))
# Test git config
self.assertEqual(name, git.config('user.name').stdout.strip())
self.assertEqual(email, git.config('user.email').stdout.strip())
def test_pull(self):
repo_dir = self._install()
with repo_dir:
initial_status = git.status().stdout.strip()
git.reset('HEAD~')
self.assertNotEqual(initial_status, git.status().stdout.strip())
git.reset(hard=True)
self.clue.git.pull()
with repo_dir:
self.assertEqual(initial_status, git.status().stdout.strip())
def test_status(self):
core_repo_dir, _, _ = self._install_repo_types_with_branches()
output = self.clue.git.status().stdout.strip()
self.assertRegexpMatches(output,
r'.*cloudify-rest-client.*\| .*master')
self.assertIn('cloudify-script-plugin', output)
self.assertIn('flask-securest', output)
self.clue.git.checkout('.3.1-build')
with core_repo_dir:
|
git.reset('HEAD~')
output = self.clue.git.status().stdout.strip()
self.assertRegexpMatches(output,
r'.*cloudify-rest-client.*\| .*3.3.1-build')
self.assertRegexpMatches(output,
r'.*cloudify-rest-client.*\| .*'
r'M.*cloudify_rest_client/client.py')
# test active
with core_repo_dir:
git.reset('--hard'
|
, 'HEAD')
self.clue.feature.checkout('test')
output = self.clue.git.status(active=True).stdout.strip()
self.assertIn('cloudify-rest-client', output)
self.assertNotIn('flask-securest', output)
self.assertNotIn('cloudify-script-plugin', output)
def test_checkout(self):
(core_repo_dir,
plugin_repo_dir,
misc_repo_dir) = self._install_repo_types()
test_branches = {
'repos': {
'cloudify-rest-client': '3.3.1-build'
}
}
test_branches2 = {
'branch': '3.3.1-build',
'repos': ['cloudify-rest-client']
}
features_file = self.workdir / 'features.yaml'
features_file.write_text(yaml.safe_dump({
'test': test_branches,
'test2': test_branches2
}))
def assert_master():
for repo in [core_repo_dir, plugin_repo_dir, misc_repo_dir]:
with repo:
self.assertIn('master', git.status())
def assert_custom():
for repo, expected in [(core_repo_dir, '3.3.1-build'),
(plugin_repo_dir, '1.3.1-build'),
(misc_repo_dir, 'master')]:
with repo:
self.assertIn(expected, git.status())
def assert_features_file():
for repo, expected in [(core_repo_dir, '3.3.1-build'),
(plugin_repo_dir, 'master'),
(misc_repo_dir, 'master')]:
with repo:
self.assertIn(expected, git.status())
assert_master()
self.clue.git.checkout('.3.1-build')
assert_custom()
self.clue.git.checkout('master')
assert_master()
self.clue.feature.checkout('test')
assert_features_file()
self.clue.git.checkout('.3.1-build')
assert_custom()
self.clue.git.checkout('master')
assert_master()
self.clue.feature.checkout('test2')
assert_features_file()
self.clue.git.checkout('master')
assert_master()
with misc_repo_dir:
git.checkout('0.6')
self.assertIn('0.6', git.status())
self.clue.git.checkout('default')
assert_master()
def test_rebase(self):
branch = '3.2.1-build'
base = branch
core_repo_dir, _, _ = self._install_repo_types_with_branches(
branch=branch,
base=base)
# rebase with no "active" feature should not do anything
output = self.clue.git.rebase().stdout.strip()
self.assertEqual(len(output), 0)
# only "active" feature repos should be affected
self.clue.feature.checkout('test')
output = self.clue.git.rebase().stdout.strip()
self.assertEqual(len(output.split('\n')), 1)
self.assertIn('cloudify-rest-client', output)
self.assertIn('Current branch 3.2.1-build is up to date.', output)
# test repo type consideration (.2.1-build for core type should
# transform to 3.2.1-build)
output = self.clue.git.rebase().stdout.strip()
self.assertEqual(len(output.split('\n')), 1)
self.assertIn('cloudify-rest-client', output)
self.assertIn('Current branch 3.2.1-build is up to date.', output)
# being on a different branch then the one from the active state
# should result in a warning, and no state change
with core_repo_dir:
git.ch
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.