repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
prefix
stringlengths
0
8.16k
middle
stringlengths
3
512
suffix
stringlengths
0
8.17k
ITOO-UrFU/open-programs
open_programs/apps/competences/apps.py
Python
unlicense
141
0
from django.apps import AppConfi
g class CompetencesConfig(AppConfig): name = 'competenc
es' verbose_name = "Компетенции"
facebook/buck
test/com/facebook/buck/cxx/testdata/cxx_toolchain/tools/linker.py
Python
apache-2.0
531
0
#!/usr/bin/env python3 import os import sys from tools import impl parser = impl.argparser() parser.add_argument("-o", d
est="output", action=impl.StripQuotesAction) (options, args) = parser.parse_known_args() # ranlib may have hid the archive next to what buck thinks the archive is input = args[-1]
+ ".secret" if not os.path.exists(input): input = args[-1] with open(options.output, "w") as output: output.write("linker:\n") with open(input) as inputfile: output.write(inputfile.read()) sys.exit(0)
tensorflow/graphics
tensorflow_graphics/projects/points_to_3Dobjects/utils/image.py
Python
apache-2.0
2,459
0.011793
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image functions.""" # python3 from cvx2 import latest as cv2 import numpy as np def get_affine_transform(center, scale, rot, output_size, inverse=False): """Affine transform.""" if not isinstance(scale, (np.ndarray, list)): scale = np.array([scale, scale], dtype=np.float32) dst_w, dst_h = output_size[0], output_size[1] rot_rad = np.pi * rot / 180 src_dir = get_dir([0, scale[0] * -0.5], rot_rad) dst_dir = np.array([0, dst_w * -0.5], np.float32) src = np.zeros((3, 2), dtype=np.float32) dst = np.zeros((3, 2), dtype=np.float32) src[0, :], src[1, :] = center, center + src_dir dst[0, :] = [dst_w * 0.5, dst_h * 0.5] dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir src[2:, :] = get_3rd_point(src[0, :], src[1, :]) dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :]) if inverse: transform = cv2.getAffineTransform(np.float32(dst), np.float32(src)) else: transform = cv2.getAffineTransform(np.float32(src), np.float32(dst)) return transform def get_3rd_point(point_1, point_2): tmp_point = point_1 - point_2 return point_2 + np.array([-tmp_point[1], tmp_point[0]], dtype=np.float32)
def get_dir(point, rot_rad): sin_rot, cos_rot = np.sin(rot_rad), np.cos(rot_rad) result = [0, 0] result[0] = point[0] * cos_rot - point[1] * sin_rot result[1] = point[0] * sin_rot + point[1] * cos_rot return np.array(result) def transform_points(points, center, scale, output_size, inverse=False):
transform = get_affine_transform( center, scale, 0, output_size, inverse=inverse) new_points = np.concatenate([points, np.ones([points.shape[0], 1])], axis=1) points_transformed = np.dot(transform, new_points.T).T return points_transformed def transform_predictions(points, center, scale, output_size): return transform_points(points, center, scale, output_size, inverse=True)
remotesyssupport/cobbler-1
koan/register.py
Python
gpl-2.0
6,100
0.008033
""" registration tool for cobbler. Copyright 2009 Red Hat, Inc. Michael DeHaan <mdehaan@redhat.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """ import random import os import traceback try: from optparse import OptionParser except: from opt_parse import OptionParser # importing this for backwards compat with 2.2 import exceptions try: import subprocess as sub_process except: import sub_process import time import errno import sys import xmlrpclib import glob import socket import utils import string import pprint # usage: cobbler-register [--server=server] [--hostname=hostname] --profile=foo def main(): """ Command line stuff... """ p = OptionParser() p.add_option("-s", "--server", dest="server", default=os.environ.get("COBBLER_SERVER",""), help="attach to this cobbler server") p.add_option("-f", "--fqdn", dest="hostname", default="", help="override the discovered hostname") p.add_option("-p", "--port", dest="port", default="80", help="cobbler port (default 80)") p.add_option("-P", "--profile", dest="profile", default="", help="assign this profile to this system") p.add_option("-b", "--batch", dest="batch", action="store_true", help="indicates this is being run from a script") (options, args) = p.parse_args() #if not os.getuid() == 0: # print "koan requires root access" # return 3 try: k = Register() k.server = options.server k.port = options.port k.profile = options.profile k.hostname = options.hostname k.batch = options.batch k.run() except Exception, e: (xa, xb, tb) = sys.exc_info() try: getattr(e,"from_koan") print str(e)[1:-1] # nice exception, no traceback needed except: print xa print xb print string.join(traceback.format_list(traceback.extract_tb(tb))) return 1 return 0 #======================================================= class InfoException(exceptions.Exception): """ Custom exception for tracking of fatal errors. """ def __init__(self,value,**args): self.value = value % args self.from_koan = 1 def __str__(self): return repr(self.value) #======================================================= class Register: def __init__(self): """ Constructor. Arguments will be filled in by optparse... """ self.server = "" self.port = "" self.profile = "" self.hostname = "" self.batch = "" #--------------------------------------------------- def run(self): """ Commence with the registration already. """ # not really required, but probably best that ordinary users don't try # to run this not knowing what it does. if os.getuid() != 0: raise InfoException("root access is required to register") print "- preparing to koan home" self.conn = utils.connect_to_server(self.server, self.port) reg_info = {} print "- gathering network info" netinfo = utils.get_network_info() reg_info["interfaces"] = netinfo print "- checking hostname" sysname = "" if self.hostname != "" and self.hostname != "*AUTO*": hostname = self.hostname sysname = self.hostname else: hostname = socket.getfqdn() if hostname == "localhost.localdomain": if self.hostname == '*AUTO*': host
name = "" sysname = str(time.time()) else: raise InfoException("must specify --fqdn, could not discover") if
sysname == "": sysname = hostname if self.profile == "": raise InfoException("must specify --profile") # we'll do a profile check here just to avoid some log noise on the remote end. # network duplication checks and profile checks also happen on the remote end. avail_profiles = self.conn.get_profiles() matched_profile = False for x in avail_profiles: if x.get("name","") == self.profile: matched_profile=True break reg_info['name'] = sysname reg_info['profile'] = self.profile reg_info['hostname'] = hostname if not matched_profile: raise InfoException("no such remote profile, see 'koan --list-profiles'") if not self.batch: self.conn.register_new_system(reg_info) print "- registration successful, new system name: %s" % sysname else: try: self.conn.register_new_system(reg_info) print "- registration successful, new system name: %s" % sysname except: traceback.print_exc() print "- registration failed, ignoring because of --batch" return if __name__ == "__main__": main()
adjustive/caller
caller/settings/common.py
Python
gpl-3.0
4,021
0.002984
""" Common Django settings for the project. See the local, test, and production settings modules for the values used in each environment. For more information on this file, see https://docs.djangoproject.com/en/1.8/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.8/ref/settings/ """ from django.core.exceptions import ImproperlyConfigured import os BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) BASE_DIR = os.path.abspath(os.path.join(BASE_DIR, os.pardir)) # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'iguod@&u&_(3f_d-z)r_2g)o=mjor_rbo)9)b&19$ih*txgnta' ALLOWED_HOSTS = [] # Twilio API credentials TWILIO_ACCOUNT_SID = os.environ.get('TWILIO_ACCOUNT_SID') TWILIO_AUTH_TOKEN = os.environ.get('TWILIO_AUTH_TOKEN') # Twilio number TWILIO_NUMBER = os.environ.get('TWILIO_NUMBER') # TwiML Application SID TWIML_APPLICATION_SID = os.environ.get('TWIML_APPLICATION_SID') if not (TWILIO_ACCOUNT_SID and TWILIO_AUTH_TOKEN and TWILIO_NUMBER and TWIML_APPLICATION_SID): missing_config_values = \ """ You must set the TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN, TWILIO_NUMBER, and TWIML_APPLICATION_SID environment variables to run this app. Consult the README for instructions on how to find them. """ raise ImproperlyConfigured(missing_config_values) # Application definition DJANGO_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.humanize' ) THIRD_PARTY_APPS = ( ) LOCAL_APPS = ( 'dialer', ) INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'caller.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': ['templates/'], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages',
], }, }, ] WSGI_APPLICATION = 'caller.wsgi.application' # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'caller', 'USER': 'calluser', 'PASSWORD': 'qq', 'HOST': '127.0.0.1', 'PORT': '5432', }, #'default': { #'ENGINE': 'django.db.backends.sqlite3', #'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), #} } # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = [ os.path.join(BASE_DIR, "static") ] print STATICFILES_DIRS, BASE_DIR STATIC_ROOT = '' #BASE_DIR + '/static' # Messages settings for Bootstrap 3 from django.contrib.messages import constants as messages MESSAGE_TAGS = { messages.ERROR: 'danger' } # Redirect login to /support/dashboard LOGIN_REDIRECT_URL = '/support/dashbaord'
stackdump/txbitwrap
txbitwrap/_brython/ctl.py
Python
mit
7,943
0.001763
from browser import window, document as doc, console import net import sim import json CTX = None """ Bitwrap context """ CTL = None """ UI control interface """ def __onload(ctx): """ use snap to begin creating an SVG """ global CTL CTL = Editor() window.jQuery('#net').on('click', CTL.on_insert) window.jQuery('.select').on('click', CTL.select) window.jQuery('.symbol').on('click', CTL.symbol) window.jQuery('.tool').on('click', CTL.tool) window.jQuery('.simulator').on('click', CTL.simulator) global CTX CTX = ctx # TODO: control from UI # maybe add a dropdown #CTX.machine('counter', callback=CTL.load) CTX.machine('octoe', callback=CTL.load) def on_event(_, event): """ receive event over jQuery binding""" if 'action' in event: return CTL.simulation.execute(event['action']) class Controller(object): """ Provide interface for UI actions """ def load(self, res): """ store requested PNML and render as SVG """ pnet = json.loads(res.text) net.SCHEMA = pnet['machine']['name'] net.NETS[net.SCHEMA] = pnet['machine'] self.reset(callback=self.render) def reset(self, callback=None): """ clear SVG and prepare markers """ net.PAPER if not net.PAPER: net.PAPER=window.Snap('#net') net.PAPER.clear() net.on_load() if callable(callback): callback() def render(self, callback=None): """ development examples """ if not net.INSTANCE: net.PNet(self) net.INSTANCE.render() self.json_view() if callable(callback): callback() def json_view(self): _info = json.dumps({ 'places': net.INSTANCE.place_defs, 'transitions': net.INSTANCE.transition_defs, 'arcs': net.INSTANCE.arc_defs, 'place_names': net.INSTANCE.place_names, 'token_ledger': net.INSTANCE.token_ledger }) window.jQuery('#json').JSONView(_info) class EditorEvents(object): """ Editor event callbacks """ def on_click(self, event): """ handle mouse events """ self.callback(event) def on_select(self, event): """ callback to show attributes for selected element """ refid, symbol = self._selected(event) if not refid: return console.log('on_select', refid, symbol) # FIXME: should show info in editor def on_insert(self, event): """ insert a symbol into net """ if not self.selected_insert_symbol: return new_coords = [event.offsetX, event.offsetY] # TODO: make call to insert new symbol in INSTANCE if self.selected_insert_symbol == 'place': net.INSTANCE.insert_place(new_coords) else: net.INSTANCE.insert_transition(new_coords) self.reset(callback=self.render) def on_delete(self, event): """ callback when clicking elements when delete tool is active """ refid, symbol = self._selected(event) if not refid: return if symbol == 'place': net.INSTANCE.delete_place(refid) elif symbol == 'transition': net.INSTANCE.delete_transition(refid) else: # FIXME implement arc handle #net.INSTANCE.delete_arc(target_id) console.log('delete arc', refid) self.reset(callback=self.render) def on_trigger(self, event): """ callback when triggering a transition during a simulation """ action = CTL.simulation.trigger(event) console.log(net.SCHEMA, CTL.simulation.oid, action) CTX.dispatch(net.SCHEMA, self.simulation.oid, action) def on_token_inc(self, event): return self._token_changed(1, event) def on_token_dec(self, event): return self._token_changed(-1, event) def _token_changed(self, change, event): refid, symbol = self._selected(event) if not symbol == 'place': return current = net.INSTANCE.token_ledger[refid] new_token_count = current + change if new_token_count >= 0: net.INSTANCE.update_place_tokens(refid, new_token_count) self.reset(callback=self.render) def _selected(self, event): target_id = str(event.target.id) if not self.is_selectable(target_id): return [None, None] return target_id.split('-') def on_arc_begin(self, event): begin = self._selected(event) if not begin: return self.callback = self.on_arc_end self.selected_arc_endpoint = begin def on_arc_end(self, event): end = self._selected(event) if not end: return self.callback = self.on_arc_begin begin = self.selected_arc_endpoint if begin[1] == end[1]: return # cannot connect 2 symbols of same type if begin[1] == 'transition': txn = begin[0] place = end[0] direction = 'to' diff = 1 else: txn = end[0] place = begin[0] direction = 'from' diff = -1 if txn not in net.INSTANCE.arc_defs: net.INSTANCE.arc_defs[txn] = {'to': [], 'from': []} net.INSTANCE.arc_defs[txn][direction].append(place) offset = net.INSTANCE.place_defs[place]['offset'] net.INSTANCE.transition
_defs[txn]['delta
'][offset] = diff self.selected_arc_endpoint = None # reset self.reset(callback=self.render) class Editor(Controller, EditorEvents): """ Petri-Net editor controls """ def __init__(self): self.callback = self.on_select self.move_enabled = True self.selected_insert_symbol = None self.selected_arc_endpoint = None self.simulation = None def select(self, event): """ enter select/move mode """ self.move_enabled = True self.selected_insert_symbol = None self.callback = self.on_select def symbol(self, event): """ enter insert symbol mode """ sym = str(event.target.id) self.selected_insert_symbol = sym def simulator(self, event): """ control start/stop simulation mode """ target_id = event.target.text if target_id == 'reset': if self.simulation: self.simulation.reset() self.callback = self.on_select self.move_enabled = True doc['code'].value = '>>>' else: self.move_enabled = False oid = window.Date.now() self.simulation = sim.Simulation(oid, net.INSTANCE, self) CTX.create(net.SCHEMA, oid) CTX.subscribe(str(net.SCHEMA), str(oid)) console.log(net.SCHEMA, oid, 'NEW') self.callback = self.on_trigger def tool(self, event): """ modify existing symbol on net """ self.move_enabled = False self.selected_insert_symbol = None self.selected_arc_endpoint = None target_id = str(event.target.id) if target_id == 'arc': self.callback = self.on_arc_begin elif target_id == 'delete': self.callback = self.on_delete elif target_id == 'dec_token': self.callback = self.on_token_dec elif target_id == 'inc_token': self.callback = self.on_token_inc def is_selectable(self, target_id): """ determine if element allows user interaction """ # KLUDGE: relies on a naming convention # 'primary' labels for symbols are assumed not to use the char '-' # 'secondary' labels use IDs with the form <primary>-<secondary> if '-' not in target_id: return False else: return True
harwee/electrum-xvg-tor
lib/wallet.py
Python
gpl-3.0
73,225
0.003086
#!/usr/bin/env python # # Electrum - lightweight Bitcoin client # Copyright (C) 2011 thomasv@gitorious # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import sys import os import hashlib import ast impo
rt threading import random import time import math import json import
copy from operator import itemgetter from util import print_msg, print_error, NotEnoughFunds from util import profiler from bitcoin import * from account import * from version import * from transaction import Transaction from plugins import run_hook import bitcoin from synchronizer import WalletSynchronizer from mnemonic import Mnemonic import paymentrequest # internal ID for imported account IMPORTED_ACCOUNT = '/x' class WalletStorage(object): def __init__(self, path): self.lock = threading.RLock() self.data = {} self.path = path self.file_exists = False print_error( "wallet path", self.path ) if self.path: self.read(self.path) def read(self, path): """Read the contents of the wallet file.""" try: with open(self.path, "r") as f: data = f.read() except IOError: return try: self.data = json.loads(data) except: try: d = ast.literal_eval(data) #parse raw data from reading wallet file except Exception as e: raise IOError("Cannot read wallet file '%s'" % self.path) self.data = {} # In old versions of Electrum labels were latin1 encoded, this fixes breakage. labels = d.get('labels', {}) for i, label in labels.items(): try: unicode(label) except UnicodeDecodeError: d['labels'][i] = unicode(label.decode('latin1')) for key, value in d.items(): try: json.dumps(key) json.dumps(value) except: print_error('Failed to convert label to json format', key) continue self.data[key] = value self.file_exists = True def get(self, key, default=None): with self.lock: v = self.data.get(key) if v is None: v = default else: v = copy.deepcopy(v) return v def put(self, key, value, save = True): try: json.dumps(key) json.dumps(value) except: print_error("json error: cannot save", key) return with self.lock: if value is not None: self.data[key] = copy.deepcopy(value) elif key in self.data: self.data.pop(key) if save: self.write() def write(self): assert not threading.currentThread().isDaemon() temp_path = "%s.tmp.%s" % (self.path, os.getpid()) s = json.dumps(self.data, indent=4, sort_keys=True) with open(temp_path, "w") as f: f.write(s) f.flush() os.fsync(f.fileno()) # perform atomic write on POSIX systems try: os.rename(temp_path, self.path) except: os.remove(self.path) os.rename(temp_path, self.path) if 'ANDROID_DATA' not in os.environ: import stat os.chmod(self.path,stat.S_IREAD | stat.S_IWRITE) class Abstract_Wallet(object): """ Wallet classes are created to handle various address generation methods. Completion states (watching-only, single account, no seed, etc) are handled inside classes. """ def __init__(self, storage): self.storage = storage self.electrum_version = ELECTRUM_VERSION self.gap_limit_for_change = 6 # constant # saved fields self.seed_version = storage.get('seed_version', NEW_SEED_VERSION) self.use_change = storage.get('use_change',True) self.use_encryption = storage.get('use_encryption', False) self.seed = storage.get('seed', '') # encrypted self.labels = storage.get('labels', {}) self.frozen_addresses = set(storage.get('frozen_addresses',[])) self.stored_height = storage.get('stored_height', 0) # last known height (for offline mode) self.history = storage.get('addr_history',{}) # address -> list(txid, height) self.fee_per_kb = int(storage.get('fee_per_kb', RECOMMENDED_FEE)) # This attribute is set when wallet.start_threads is called. self.synchronizer = None # imported_keys is deprecated. The GUI should call convert_imported_keys self.imported_keys = self.storage.get('imported_keys',{}) self.load_accounts() self.load_transactions() self.build_reverse_history() # load requests self.receive_requests = self.storage.get('payment_requests', {}) # spv self.verifier = None # Transactions pending verification. Each value is the transaction height. Access with self.lock. self.unverified_tx = {} # Verified transactions. Each value is a (height, timestamp, block_pos) tuple. Access with self.lock. self.verified_tx = storage.get('verified_tx3',{}) # there is a difference between wallet.up_to_date and interface.is_up_to_date() # interface.is_up_to_date() returns true when all requests have been answered and processed # wallet.up_to_date is true when the wallet is synchronized (stronger requirement) self.up_to_date = False self.lock = threading.Lock() self.transaction_lock = threading.Lock() self.tx_event = threading.Event() self.check_history() # save wallet type the first time if self.storage.get('wallet_type') is None: self.storage.put('wallet_type', self.wallet_type, True) @profiler def load_transactions(self): self.txi = self.storage.get('txi', {}) self.txo = self.storage.get('txo', {}) self.pruned_txo = self.storage.get('pruned_txo', {}) tx_list = self.storage.get('transactions', {}) self.transactions = {} for tx_hash, raw in tx_list.items(): tx = Transaction(raw) self.transactions[tx_hash] = tx if self.txi.get(tx_hash) is None and self.txo.get(tx_hash) is None and (tx_hash not in self.pruned_txo.values()): print_error("removing unreferenced tx", tx_hash) self.transactions.pop(tx_hash) @profiler def save_transactions(self): with self.transaction_lock: tx = {} for k,v in self.transactions.items(): tx[k] = str(v) # Flush storage only with the last put self.storage.put('transactions', tx, False) self.storage.put('txi', self.txi, False) self.storage.put('txo', self.txo, False) self.storage.put('pruned_txo', self.pruned_txo, True) def clear_history(self): with self.transaction_lock: self.txi = {} self.txo = {} self.pruned_txo = {} self.save_transactions() with self.lock: self.history = {} self.tx_addr_hist = {} self.storage.put('addr_history', self.history, True) @profiler def build_reverse_history(
Antiun/yelizariev-addons
res_users_signature/__openerp__.py
Python
lgpl-3.0
443
0.015801
{ 'name' : 'Signature templates for user emails', 'version' : '1.0.0', 'author' : 'IT-Projects LLC, Ivan Yelizariev', 'license': 'GPL-3', 'category' : 'Social Network', 'website' : 'https://yelizariev.github.io', 'depends' : ['base'], 'data':[ 'res_users_signature_views.xml', 'security/res_use
rs_signature_security.xml', 'security/i
r.model.access.csv', ], 'installable': True }
ruymanengithub/vison
vison/metatests/chinj01.py
Python
gpl-3.0
34,380
0.006603
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Mon Jul 22 17:01:36 2019 @author: raf """ # IMPORT STUFF from pdb import set_trace as stop import copy import numpy as np from collections import OrderedDict import string as st import os import pandas as pd from vison.datamodel import cdp from vison.support import files from vison.fpa import fpa as fpamod from vison.metatests.metacal import MetaCal from vison.plot import plots_fpa as plfpa from vison.support import vcal, utils from vison.datamodel import core as vcore from vison.ogse import ogse from vison.inject import lib as ilib import matplotlib.cm as cm from matplotlib import pyplot as plt plt.switch_backend('TkAgg') from matplotlib.colors import Normalize # END IMPORT cols2keep = [ 'test', 'sn_ccd1', 'sn_ccd2', 'sn_ccd3', 'sn_roe', 'sn_rpsu', 'exptime', 'vstart', 'vend', 'rdmode', 'flushes', 'siflsh', 'siflsh_p', 'swellw', 'swelldly', 'inisweep', 'cdpu_clk', 'chinj', 'chinj_on', 'chinj_of', 'id_wid', 'id_dly', 'chin_dly', 'v_tpump', 's_tpump', 'v_tp_mod', 's_tp_mod', 'v_tp_cnt', 's_tp_cnt', 'dwell_v', 'dwell_s', 'toi_fl', 'toi_tp', 'toi_ro', 'toi_ch', 'motr', 'motr_cnt', 'motr_siz', 'source', 'wave', 'mirr_on', 'mirr_pos', 'R1C1_TT', 'R1C1_TB', 'R1C2_TT', 'R1C2_TB', 'R1C3_TT', 'R1C3_TB', 'IDL', 'IDH', 'IG1_1_T', 'IG1_2_T', 'IG1_3_T', 'IG1_1_B', 'IG1_2_B', 'IG1_3_B', 'IG2_T', 'IG2_B', 'OD_1_T', 'OD_2_T', 'OD_3_T', 'OD_1_B', 'OD_2_B', 'OD_3_B', 'RD_T', 'RD_B', 'time', 'HK_CCD1_TEMP_T', 'HK_CCD2_TEMP_T', 'HK_CCD3_TEMP_T', 'HK_CCD1_TEMP_B', 'HK_CCD2_TEMP_B', 'HK_CCD3_TEMP_B', 'HK_CCD1_OD_T', 'HK_CCD2_OD_T', 'HK_CCD3_OD_T', 'HK_CCD1_OD_B', 'HK_CCD2_OD_B', 'HK_CCD3_OD_B', 'HK_COMM_RD_T', 'HK_COMM_RD_B', 'HK_CCD1_IG1_T', 'HK_CCD2_IG1_T', 'HK_CCD3_IG1_T', 'HK_CCD1_IG1_B', 'HK_CCD2_IG1_B', 'HK_CCD3_IG1_B', 'HK_COMM_IG2_T', 'HK_COMM_IG2_B', 'HK_FPGA_BIAS_ID2', 'HK_VID_PCB_TEMP_T', 'HK_VID_PCB_TEMP_B', 'HK_RPSU_TEMP1', 'HK_FPGA_PCB_TEMP_T', 'HK_FPGA_PCB_TEMP_B', 'HK_RPSU_TEMP_2', 'HK_RPSU_28V_PRI_I', 'chk_NPIXOFF', 'chk_NPIXSAT', 'offset_pre', 'offset_ove', 'std_pre', 'std_ove'] class MetaChinj01(MetaCal): """ """ def __init__(self, **kwargs): """ """ super(MetaChinj01, self).__init__(**kwargs) self.testnames = ['CHINJ01'] self.incols = cols2keep self.ParsedTable = OrderedDict() allgains = files.cPickleRead(kwargs['cdps']['gain']) self.cdps['GAIN'] = OrderedDict() for block in self.blocks: self.cdps['GAIN'][block] = allgains[block]['PTC01'].copy() self.products['METAFIT'] = OrderedDict() self.products['VERPROFILES'] = OrderedDict() self.products['HORPROFILES'] = OrderedDict() self.init_fignames() self.init_outcdpnames() def parse_single_test(self, jrep, block, testname, inventoryitem): """ """ NCCDs = len(self.CCDs) NQuads = len(self.Quads) session = inventoryitem['session'] CCDkeys = ['CCD%i' % CCD for CCD in self.CCDs] IndexS = vcore.vMultiIndex([vcore.vIndex('ix', vals=[0])]) IndexCQ = vcore.vMultiIndex([vcore.vIndex('ix', vals=[0]), vcore.vIndex('CCD', vals=self.CCDs), vcore.vIndex('Quad', vals=self.Quads)]) #idd = copy.deepcopy(inventoryitem['dd']) sidd = self.parse_single_test_gen(jrep, block, testname, inventoryitem) # TEST SCPECIFIC # TO BE ADDED: # OFFSETS: pre, img, ove # RON: pre, img, ove # REFERENCES TO PROFILES CHAMBER = sidd.meta['inputs']['CHAMBER'] CHAMBER_key = CHAMBER[0] chamber_v = np.array([CHAMBER_key]) sidd.addColumn(chamber_v, 'CHAMBERKEY', IndexS, ix=0) block_v = np.array([block]) sidd.addColumn(block_v, 'BLOCK', IndexS, ix=0) test_v = np.array([jrep + 1]) sidd.addColumn(test_v, 'REP', IndexS, ix=0) test_v = np.array([session]) sidd.addColumn(test_v, 'SESSION', IndexS, ix=0) test_v = np.array([testname]) sidd.addColumn(test_v, 'TEST', IndexS, ix=0) productspath = os.path.join(inven
toryitem['resroot'], 'products') metafitcdp_pick = os.path.join(productspath, os.path.split(sidd.products['METAFIT_CDP'])[-1]) metafitcdp = files.cPickleRead(metafitcdp_pick) metafit = copy.deepcopy(metafitcdp['data']['ANALYSIS']) metafitkey = '%s_%s_%s_%i' % (testname, block, session, jrep + 1) self.products['METAFIT'][metafitkey] = copy.deepcopy(metafit) me
tafitkey_v = np.array([metafitkey]) sidd.addColumn(metafitkey_v, 'METAFIT', IndexS, ix=0) metacdp_pick = os.path.join(productspath, os.path.split( sidd.products['META_CDP'])[-1]) # change to META_CDP metacdp = files.cPickleRead(metacdp_pick) meta = metacdp['data']['ANALYSIS'] # this is a pandas DataFrame tmp_v_CQ = np.zeros((1, NCCDs, NQuads)) bgd_adu_v = tmp_v_CQ.copy() ig1_thresh_v = tmp_v_CQ.copy() ig1_notch_v = tmp_v_CQ.copy() slope_v = tmp_v_CQ.copy() n_adu_v = tmp_v_CQ.copy() for iCCD, CCDk in enumerate(CCDkeys): for kQ, Q in enumerate(self.Quads): ixloc = np.where((meta['CCD'] == iCCD + 1) & (meta['Q'] == kQ + 1)) bgd_adu_v[0, iCCD, kQ] = meta['BGD_ADU'][ixloc[0][0]] ig1_thresh_v[0, iCCD, kQ] = meta['IG1_THRESH'][ixloc[0][0]] ig1_notch_v[0, iCCD, kQ] = meta['IG1_NOTCH'][ixloc[0][0]] slope_v[0, iCCD, kQ] = meta['S'][ixloc[0][0]] n_adu_v[0, iCCD, kQ] = meta['N_ADU'][ixloc[0][0]] sidd.addColumn(bgd_adu_v, 'FIT_BGD_ADU', IndexCQ) sidd.addColumn(ig1_thresh_v, 'FIT_IG1_THRESH', IndexCQ) sidd.addColumn(ig1_notch_v, 'FIT_IG1_NOTCH', IndexCQ) sidd.addColumn(slope_v, 'FIT_SLOPE', IndexCQ) sidd.addColumn(n_adu_v, 'FIT_N_ADU', IndexCQ) # charge injection profiles verprofspick = os.path.join(productspath, os.path.split(sidd.products['PROFS_ALCOL'])[-1]) verprofs = files.cPickleRead(verprofspick) vprofkey = '%s_%s_%s_%i' % (testname, block, session, jrep + 1) self.products['VERPROFILES'][vprofkey] = verprofs.copy() vprofskeys_v = np.zeros((1),dtype='U50') vprofskeys_v[0] = vprofkey sidd.addColumn(vprofskeys_v, 'VERPROFS_KEY', IndexS) horprofspick = os.path.join(productspath, os.path.split(sidd.products['PROFS_ALROW'])[-1]) horprofs = files.cPickleRead(horprofspick) hprofkey = '%s_%s_%s_%i' % (testname, block, session, jrep + 1) self.products['HORPROFILES'][hprofkey] = horprofs.copy() hprofskeys_v = np.zeros((1),dtype='U50') hprofskeys_v[0] = hprofkey sidd.addColumn(hprofskeys_v, 'HORPROFS_KEY', IndexS) # flatten sidd to table sit = sidd.flattentoTable() return sit def _get_extractor_NOTCH_fromPT(self, units): """ """ def _extract_NOTCH_fromPT(PT, block, CCDk, Q): ixblock = self.get_ixblock(PT, block) column = 'FIT_N_ADU_%s_Quad%s' % (CCDk, Q) if units == 'ADU': unitsConvFactor = 1 elif units == 'E': unitsConvFactor = self.cdps['GAIN'][block][CCDk][Q][0] Notch = np.nanmedian(PT[column][ixblock]) * unitsConvFactor return Notch return _extract_NOTCH_fromPT def _get_injcurve(self, _chfitdf, ixCCD, ixQ, IG1raw, gain): """ """ ixsel = np.where((_chfitdf['CCD'] == ixCCD) & (_chfitdf['Q'] == i
orione7/plugin.video.streamondemand-pureita
core/scrapertools.py
Python
gpl-3.0
17,985
0.015934
# -*- coding: utf-8 -*- # ------------------------------------------------------------ # streamondemand 5 # Copyright 2015 tvalacarta@gmail.com # http://www.mimediacenter.info/foro/viewforum.php?f=36 # # Distributed under the terms of GNU General Public License v3 (GPLv3) # http://www.gnu.org/licenses/gpl-3.0.html # ------------------------------------------------------------ # This file is part of streamondemand 5. # # streamondemand 5 is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # streamondemand 5 is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with streamondemand 5. If not, see <http://www.gnu.org/licenses/>. # -------------------------------------------------------------------------------- # Scraper tools for reading and processing web elements # -------------------------------------------------------------------------------- import re import socket import time import logger from core import httptools def cache_page(url,post=None,headers=None,modo_cache=None, timeout=None): return cachePage(url,post,headers,modo_cache,timeout=timeout) def cachePage(url,post=None,headers=None,modoCache=None, timeout=None): data = downloadpage(url,post=post,headers=headers, timeout=timeout) return data def downloadpage(url,post=None,headers=None, follow_redirects=True, timeout=None, header_to_get=None): response = httptools.downloadpage(url, post=post, headers=headers, follow_redirects = follow_redirects, timeout=timeout) if header_to_get: return response.headers.get(header_to_get) else: return response.data def downloadpageWithResult(url,post=None,headers=None,follow_redirects=True, timeout=None, header_to_get=None): response = httptools.downloadpage(url, post=post, headers=headers, follow_redirects = follow_redirects, timeout=timeout) if header_to_get: return response.headers.get(header_to_get) else: return response.data, response.code def downloadpageWithoutCookies(url): response = httptools.downloadpage(url, cookies=False) return response.data def downloadpageGzip(url): response = httptools.downloadpage(url, add_referer=True) return response.data def getLocationHeaderFromResponse(url): response = httptools.downloadpage(url, only_headers=True, follow_redirects=False) return response.headers.get("location") def get_header_from_response(url, header_to_get="", post=None, headers=None, follow_redirects=False): header_to_get = header_to_get.lower() response = httptools.downloadpage(url, post=post, headers=headers, only_headers=True, follow_redirects=follow_redirects) return response.headers.get(header_to_get) def get_headers_from_response(url, post=None, headers=None, follow_redirects=False): response = httptools.downloadpage(url, post=post, headers=headers, only_headers=True, follow_redirects=follow_redirects) return response.headers.items() def read_body_and_headers(url, post=None, headers=None, follow_redirects=False, timeout=None): response = httptools.downloadpage(url, post=post, headers=headers, follow_redirects=follow_redirects, timeout=timeout) return response.data, response.headers def anti_cloudflare(url, headers=None, post=None): #anti_cloudfare ya integrado en httptools por defecto response = httptools.downloadpage(url, post=post, headers=headers) return response.data def printMatches(matches): i = 0 for match in matches: logger.info("streamondemand-pureita-master.core.scrapertools %d %s" % (i , match)) i = i + 1 def get_match(data,patron,index=0): matches = re.findall( patron , data , flags=re.DOTALL ) return matches[index] def find_single_match(data,patron,index=0): try: matches = re.findall( patron , data , flags=re.DOTALL ) return matches[index] except: return "" # Parse string and extracts multiple matches using regular expressions def find_multiple_matches(text,pattern): return re.findall(pattern,text,re.DOTALL) def entityunescape(cadena): return unescape(cadena) def unescape(text): """Removes HTML or XML character references and entities from a text string. keep &amp;, &gt;, &lt; in the source code. from Fredrik Lundh http://effbot.org/zone/re-sub.htm#unescape-html """ def fixup(m): text = m.group(0) if text[:2] == "&#": # character reference try: if text[:3] == "&#x": return unichr(int(text[3:-1], 16)).encode("utf-8") else: return unichr(int(text[2:-1])).encode("utf-8") except ValueError: logger.info("error de valor") pass else: # named entity try: ''' if text[1:-1] == "amp": text = "&amp;amp;" elif text[1:-1] == "gt": text = "&amp;gt;" elif text[1:-1] == "lt": text = "&amp;lt;" else: print text[1:-1] text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]).encode("utf-8") ''' import htmlentitydefs text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]).encode("utf-8") except KeyError: logger.info("keyerror") pass except: pass return text # leave as is return re.sub("&#?\w+;", fixup, text) # Convierte los codigos html "&ntilde;" y lo reemplaza por "ñ" caracter unicode utf-8 def decodeHtmlentities(string): string = entitiesfix(string) entity_re = re.compile("&(#?)(\d{1,5}|\w{1,8});") def substitute_entity(match): from htmlentitydefs import name2codepoint as n2cp ent = match.group(2) if mat
ch.group(1) == "#": return unichr(int(ent)).encode('utf-8') else: cp = n2cp.get(ent) if cp: return
unichr(cp).encode('utf-8') else: return match.group() return entity_re.subn(substitute_entity, string)[0] def entitiesfix(string): # Las entidades comienzan siempre con el símbolo & , y terminan con un punto y coma ( ; ). string = string.replace("&aacute","&aacute;") string = string.replace("&eacute","&eacute;") string = string.replace("&iacute","&iacute;") string = string.replace("&oacute","&oacute;") string = string.replace("&uacute","&uacute;") string = string.replace("&Aacute","&Aacute;") string = string.replace("&Eacute","&Eacute;") string = string.replace("&Iacute","&Iacute;") string = string.replace("&Oacute","&Oacute;") string = string.replace("&Uacute","&Uacute;") string = string.replace("&uuml" ,"&uuml;") string = string.replace("&Uuml" ,"&Uuml;") string = string.replace("&ntilde","&ntilde;") string = string.replace("&#191" ,"&#191;") string = string.replace("&#161" ,"&#161;") string = string.replace(";;" ,";") return string def htmlclean(cadena): cadena = re.compile("<!--.*?-->",re.DOTALL).sub("",cadena) cadena = cadena.replace("<center>","") cadena = cadena.replace("</center>","") cadena = cadena.replace("<cite>","") cadena = cadena.replace("</cite>","") cadena = cadena.replace("<em>","") cadena = cadena.replace("</em>","") cadena = cadena.replace("<u>","") cadena = cadena.replace("</u>","") cadena = cadena.replace("<li>","") cadena = cadena.replace("</li>","") cadena = cadena.replace("<t
pagekite/PyPagekite
pagekite/proto/conns.py
Python
agpl-3.0
74,627
0.011457
""" These are the Connection classes, relatively high level classes that handle incoming or outgoing network connections. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function ############################################################################## LICENSE = """\ This file is part of pagekite.py. Copyright 2010-2020, the Beanstalks Project ehf. and Bjarni Runar Einarsson This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see: <http://www.gnu.org/licenses/> """ ############################################################################## import six import socket import sys import threading import time from pagekite.compat import * from pagekite.common import * import pagekite.common as common import pagekite.logging as logging from .filters import HttpSecurityFilter from .selectables import * from .parsers import * from .proto import * SMTP_PORTS = (25, 465, 587, 2525) class Tunnel(ChunkParser): """A Selectable representing a PageKite tunnel.""" S_NAME = 0 S_PORTS = 1 S_RAW_PORTS = 2 S_PROTOS = 3 S_ADD_KITES = 4 S_IS_MOBILE = 5 S_VERSION = 6 S_WEBSOCKET = 7 def __init__(self, conns): ChunkParser.__init__(self, ui=conns.config.ui) if conns.config.websocket_chunks: self.PrepareWebsockets() self.server_info = ['x.x.x.x:x', [], [], [], False, False, None, False] self.Init(conns) def Init(self, conns): self.conns = conns self.users = {} self.remote_ssl = {} self.zhistory = {} self.backends = {} self.last_ping = 0 self.weighted_rtt = -1 self.using_tls = False self.filters = [] self.ip_limits = None self.maxread = int(common.MAX_READ_BYTES * common.MAX_READ_TUNNEL_X) def Cleanup(self, close=True): if self.users: for sid in list(six.iterkeys(self.users)): self.CloseStream(sid) ChunkParser.Cleanup(self, close=close) self.Init(None) def __html__(self): return ('<b>Server name</b>: %s<br>' '%s') % (self.server_info[self.S_NAME], ChunkParser.__html__(self)) def LogTrafficStatus(self, final=False): if self.ui: if final: message = 'Disconnected from: %s' % self.server_info[self.S_NAME] self.ui.Status('down', color=self.ui.GREY, message=message) else: self.ui.Status('traffic') def GetKiteRequests(self, parse): requests = [] for prefix in ('X-Beanstalk', 'X-PageKite'): for bs in parse.Header(prefix): # X-PageKite: proto:my.domain.com:token:signature proto, domain, srand, token, sign = bs.split(':') requests.append((proto.lower(), domain.lower(), srand, token, sign, prefix)) return requests def RejectTraffic(self, client_conn, address, host): # This function allows the tunnel to reject an incoming connection # based on the remote address and the requested host. For now we # only know how to discriminate by remote IP. return self.RejectRemoteIP(client_conn, str(address[0]), host) or False def RejectRemoteIP(self, client_conn, ip, host): if not self.ip_limits: return False if len(self.ip_limits) == 1: whitelist = self.ip_limits[0] delta = maxips = seen = None else: whitelist = None delta, maxips, seen = self.ip_limits # Do we have a whitelist-only policy for this tunnel? if whitelist: for prefix in whitelist: if ip.startswith(prefix): return False self.LogError('Rejecting connection from unrecognized IP') return 'not_whitelisted' # Do we have a delta/maxips policy? if delta and maxips: # Since IP addresses are often shared, we try to differentiate browsers # based on few of the request headers as well. We don't track cookies # since they're mutated by the site itself, which would lead to false # positives here. client = ip log_info = [] if hasattr(client_conn, 'parser'): if hasattr(client_conn.parser, 'Header'): client = sha1hex('/'.join([ip] + (client_conn.parser.Header('User-Agent') or []) + (client_conn.parser.Header('Accept-Language') or []))) if hasattr(client_conn.parser, 'method'): log_info.append( (str(client_conn.parser.method), str(client_conn.parser.path))) now = time.time() if client in seen: seen[client] = now return False for seen_ip in list(six.iterkeys(seen)): if seen[seen_ip] < now - delta: del seen[seen_ip] if len(seen) >= maxips: self.LogError('Rejecting connection from new client', [('client', client[:12]), ('ips_per_sec', '%d/%ds' % (maxips, delta)), ('domain', host)] + log_info) return 'ips_per_sec' else: seen[client] = now return False # All else is allowed return False def ProcessPageKiteHeaders(self, parser): for prefix in ('X-Beanstalk', 'X-PageKite'): for feature in parser.Header(prefix+'-Features'): if feature == 'ZChunks': if not self.conns.config.disable_zchunks: self.EnableZChunks(level=1) elif feature == 'AddKites': self.server_info[self.S_ADD_KITES] = True elif feature == 'Mobile': self.server_info[self.S_IS_MOBILE] = True # Track which versions we see in the wild. version = 'old' for v in parser.Header(prefix+'-Version'): version = v if common.gYamon: common.gYamon.vadd('version-%s' % version, 1, wrap=10000000) self.server_info[self.S_VERSION] = version for replace in parser.Header(prefix+'-Replace'): if replace in self.conns.conns_by_id: repl = self.conns.conns_by_id[replace] self.LogInfo('Disconnecting old tunnel: %s' % repl) repl.Die(discard_buffer=True) def _FrontEnd(conn, body, conns): """This is what the front-end does when a back-end requests a new tunnel.""" self = Tunnel(conns) try: if 'websocket' in conn.parser.Header('Upgrade'): self.server_info[self.S_ADD_KITES] = True
self.server_info[self.S_WEBSOCKET] = ( ''.join(conn.parser.Header('Sec-WebSocket-Key')) or True) self.ProcessPageKiteHeaders(conn.parser) requests = self
.GetKiteRequests(conn.parser) except Exception as err: self.LogError('Discarding connection: %s' % err) self.Cleanup() return None except socket.error as err: self.LogInfo('Discarding connection: %s' % err) self.Cleanup() return None try: ips, seconds = conns.config.GetDefaultIPsPerSecond() self.UpdateIP_Limits(ips, seconds) except ValueError: pass self.last_activity = time.time() self.CountAs('backends_live') self.SetConn(conn) if requests: conns.auth().check(requests[:], conn, lambda r, l: self.AuthCallback(conn, r, l)) elif self.server_info[self.S_WEBSOCKET]: self.AuthCallback(conn, [], []) return self def RecheckQuota(self, conns, when=None): if when is None: when = time.time() if (self.quota and self.quota[0] is not None and self.quota[1] and (self.quota[2] < when-900)): self.quota[2] = when self.LogDebug('Rechecking: %s' % (self.quota, )) conns.auth().check(self.quota[1], self, lambda r, l: self.QuotaCallback(conns, r, l)) def ProcessAuthResults(self, results, duplicates_ok=F
msegado/edx-platform
openedx/core/lib/tests/test_xblock_utils.py
Python
agpl-3.0
9,421
0.00138
""" Tests for xblock_utils.py """ import uuid import ddt import six from django.conf import settings from django.test.client import RequestFactory from mock import patch from opaque_keys.edx.asides import AsideUsageKeyV1, AsideUsageKeyV2 from web_fragments.fragment import Fragment from xblock.core import XBlockAside from openedx.core.lib.url_utils import quote_slashes from openedx.core.lib.xblock_builtin import get_css_dependencies, get_js_dependencies from openedx.core.lib.xblock_utils import ( get_aside_from_xblock, is_xblock_aside, replace_course_urls, replace_jump_to_id_urls, replace_static_urls, request_token, sanitize_html_id, wrap_fragment, wrap_xblock ) from xmodule.modulestore import ModuleStoreEnum from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory from xmodule.modulestore.tests.test_asides import AsideTestType @ddt.ddt class TestXblockUtils(SharedModuleStoreTestCase): """ Tests for xblock utility functions. """ @classmethod def setUpClass(cls): super(TestXblockUtils, cls).setUpClass() cls.course_mongo = CourseFactory.create( default_store=ModuleStoreEnum.Type.mongo, org='TestX', number='TS01', run='2015' ) cls.course_split = CourseFactory.create( default_store=ModuleStoreEnum.Type.split, org='TestX', number='TS02', run='2015' ) def create_fragment(self, content=None): """ Create a fragment. """ fragment = Fragment(content) fragment.add_css('body {background-color:red;}') fragment.add_javascript('alert("Hi!");') return fragment def test_wrap_fragment(self): """ Verify that wrap_fragment adds new content. """ new_content = '<p>New Content<p>' fragment = self.create_fragment() wrapped_fragment = wrap_fragment(fragment, new_content) self.assertEqual('<p>New Content<p>', wrapped_fragment.content) self.assertEqual('body {background-color:red;}', wrapped_fragment.resources[0].data) self.assertEqual('alert("Hi!");', wrapped_fragment.resources[1].data) def test_request_token(self): """ Verify that a proper token is returned. """ request_with_token = RequestFactory().get('/') request_with_token._xblock_token = '123' # pylint: disable=protected-access token = request_token(request_with_token) self.assertEqual(token, '123') request_without_token = RequestFactory().get('/') token = request_token(request_without_token) # Test to see if the token is an uuid1 hex value test_uuid = uuid.UUID(token, version=1) self.assertEqual(token, test_uuid.hex) @ddt.data( ('course_mongo', 'data-usage-id="i4x:;_;_TestX;_TS01;_course;_2015"'), ('course_split', 'data-usage-id="block-v1:TestX+TS02+2015+type@course+block@course"') ) @ddt.unpack def test_wrap_xblock(self, course_id, data_usage_id): """ Verify that new content is added and the resources are the same. """ fragment = self.create_fragment(u"<h1>Test!</h1>") course = getattr(self, course_id) test_wrap_output = wrap_xblock( runtime_class='TestRuntime', block=c
ourse, view='baseview', frag=fragment, context={"wrap_xblock_data": {"custom-attribute": "custom-value"}}, usage_id_s
erializer=lambda usage_id: quote_slashes(six.text_type(usage_id)), request_token=uuid.uuid1().hex ) self.assertIsInstance(test_wrap_output, Fragment) self.assertIn('xblock-baseview', test_wrap_output.content) self.assertIn('data-runtime-class="TestRuntime"', test_wrap_output.content) self.assertIn(data_usage_id, test_wrap_output.content) self.assertIn('<h1>Test!</h1>', test_wrap_output.content) self.assertIn('data-custom-attribute="custom-value"', test_wrap_output.content) self.assertEqual(test_wrap_output.resources[0].data, u'body {background-color:red;}') self.assertEqual(test_wrap_output.resources[1].data, 'alert("Hi!");') @ddt.data('course_mongo', 'course_split') def test_replace_jump_to_id_urls(self, course_id): """ Verify that the jump-to URL has been replaced. """ course = getattr(self, course_id) test_replace = replace_jump_to_id_urls( course_id=course.id, jump_to_id_base_url='/base_url/', block=course, view='baseview', frag=Fragment('<a href="/jump_to_id/id">'), context=None ) self.assertIsInstance(test_replace, Fragment) self.assertEqual(test_replace.content, '<a href="/base_url/id">') @ddt.data( ('course_mongo', '<a href="/courses/TestX/TS01/2015/id">'), ('course_split', '<a href="/courses/course-v1:TestX+TS02+2015/id">') ) @ddt.unpack def test_replace_course_urls(self, course_id, anchor_tag): """ Verify that the course URL has been replaced. """ course = getattr(self, course_id) test_replace = replace_course_urls( course_id=course.id, block=course, view='baseview', frag=Fragment('<a href="/course/id">'), context=None ) self.assertIsInstance(test_replace, Fragment) self.assertEqual(test_replace.content, anchor_tag) @ddt.data( ('course_mongo', '<a href="/c4x/TestX/TS01/asset/id">'), ('course_split', '<a href="/asset-v1:TestX+TS02+2015+type@asset+block/id">') ) @ddt.unpack def test_replace_static_urls(self, course_id, anchor_tag): """ Verify that the static URL has been replaced. """ course = getattr(self, course_id) test_replace = replace_static_urls( data_dir=None, course_id=course.id, block=course, view='baseview', frag=Fragment('<a href="/static/id">'), context=None ) self.assertIsInstance(test_replace, Fragment) self.assertEqual(test_replace.content, anchor_tag) def test_sanitize_html_id(self): """ Verify that colons and dashes are replaced. """ dirty_string = 'I:have-un:allowed_characters' clean_string = sanitize_html_id(dirty_string) self.assertEqual(clean_string, 'I_have_un_allowed_characters') @ddt.data( (True, ["combined.css"]), (False, ["a.css", "b.css", "c.css"]), ) @ddt.unpack def test_get_css_dependencies(self, pipeline_enabled, expected_css_dependencies): """ Verify that `get_css_dependencies` returns correct list of files. """ pipeline = settings.PIPELINE.copy() pipeline['PIPELINE_ENABLED'] = pipeline_enabled pipeline['STYLESHEETS'] = { 'style-group': { 'source_filenames': ["a.css", "b.css", "c.css"], 'output_filename': "combined.css" } } with self.settings(PIPELINE=pipeline): css_dependencies = get_css_dependencies("style-group") self.assertEqual(css_dependencies, expected_css_dependencies) @ddt.data( (True, ["combined.js"]), (False, ["a.js", "b.js", "c.js"]), ) @ddt.unpack def test_get_js_dependencies(self, pipeline_enabled, expected_js_dependencies): """ Verify that `get_js_dependencies` returns correct list of files. """ pipeline = settings.PIPELINE.copy() pipeline['PIPELINE_ENABLED'] = pipeline_enabled pipeline['JAVASCRIPT'] = { 'js-group': { 'source_filenames': ["a.js", "b.js", "c.js"], 'output_filename': "combined.js" } } with self.settings(PIPELINE=pipeline): js_dependencies
ktan2020/legacy-automation
win/Lib/test/test_cookielib.py
Python
mit
73,964
0.001731
# -*- coding: latin-1 -*- """Tests for cookielib.py.""" import cookielib import os import re import time from unittest import TestCase from test import test_support class DateTimeTests(TestCase): def test_time2isoz(self): from cookielib import time2isoz base = 1019227000 day = 24*3600 self.assertEqual(time2isoz(base), "2002-04-19 14:36:40Z") self.assertEqual(time2isoz(base+day), "2002-04-20 14:36:40Z") self.assertEqual(time2isoz(base+2*day), "2002-04-21 14:36:40Z") self.assertEqual(time2isoz(base+3*day), "2002-04-22 14:36:40Z") az = time2isoz() bz = time2isoz(500000) for text in (az, bz): self.assertTrue(re.search(r"^\d{4}-\d\d-\d\d \d\d:\d\d:\d\dZ$", text), "bad time2isoz format: %s %s" % (az, bz)) def test_http2time(self): from cookielib import http2time def parse_date(text): return time.gmtime(http2time(text))[:6] self.assertEqual(parse_date("01 Jan 2001"), (2001, 1, 1, 0, 0, 0.0)) # this test will break around year 2070 self.assertEqual(parse_date("03-Feb-20"), (2020, 2, 3, 0, 0, 0.0)) # this test will break around year 2048 self.assertEqual(parse_date("03-Feb-98"), (1998, 2, 3, 0, 0, 0.0)) def test_http2time_formats(self): from cookielib import http2time, time2isoz # test http2time for supported dates. Test cases with 2 digit year # will probably break in year 2044. tests = [ 'Thu, 03 Feb 1994 00:00:00 GMT', # proposed new HTTP format 'Thursday, 03-Feb-94 00:00:00 GMT', # old rfc850 HTTP format 'Thursday, 03-Feb-1994 00:00:00 GMT', # broken rfc850 HTTP format '03 Feb 1994 00:00:00 GMT', # HTTP format (no weekday) '03-Feb-94 00:00:00 GMT', # old rfc850 (no weekday) '03-Feb-1994 00:00:00 GMT', # broken rfc850 (no weekday) '03-Feb-1994 00:00 GMT', # broken rfc850 (no weekday, no seconds) '03-Feb-1994 00:00', # broken rfc850 (no weekday, no seconds, no tz) '03-Feb-94', # old rfc850 HTTP format (no weekday, no time) '03-Feb-1994', # broken rfc850 HTTP format (no weekday, no time) '03 Feb 1994', # proposed new HTTP format (no weekday, no time) # A few tests with extra space at various places ' 03 Feb 1994 0:00 ', ' 03-Feb-1994 ', ] test_t = 760233600 # assume broken POSIX counting of seconds result = time2isoz(test_t) expected = "1994-02-03 00:00:00Z" self.assertEqual(result, expected, "%s => '%s' (%s)" % (test_t, result, expected)) for s in tests: t = http2time(s) t2 = http2time(s.lower()) t3 = http2time(s.upper()) self.assertTrue(t == t2 == t3 == test_t, "'%s' => %s, %s, %s (%s)" % (s, t, t2, t3, test_t)) def test_http2time_garbage(self): from cookielib import http2time for test in [ '', 'Garbage', 'Mandag 16. September 1996', '01-00-1980', '01-13-1980', '00-01-1980', '32-01-1980', '01-01-1980 25:00:00', '01-01-1980 00:61:00', '01-01-1980 00:00:62', ]: self.assertTrue(http2time(test) is None, "http2time(%s) is not None\n" "http2time(test) %s" % (test, http2time(test)) ) class HeaderTests(TestCase): def test_parse_ns_headers_expires(self): from cookielib import parse_ns_headers # quotes should be stripped expected = [[('foo', 'bar'), ('expires', 2209069412L), ('version', '0')]] for hdr in [ 'foo=bar; expires=01 Jan 2040 22:23:32 GMT', 'foo=bar; expires="01 Jan 2040 22:23:32 GMT"', ]: self.assertEqual(parse_ns_headers([hdr]), expected) def test_parse_ns_headers_version(self): from cookielib import parse_ns_headers # quotes should be stripped expected = [[('foo', 'bar'), ('version', '1')]] for hdr in [ 'foo=bar; version="1"', 'foo=bar; Version="1"', ]: self.assertEqual(parse_ns_headers([hdr]), expected) def test_parse_ns_headers_special_names(self): # names such as 'expires' are not special in first name=value pair # of Set-Cookie: header from cookielib import parse_ns_headers # Cookie with name 'expires' hdr = 'expires=01 Jan 2040 22:23:32 GMT' expected = [[("expires", "01 Jan 2040 22:23:32 GMT"), ("version", "0")]] self.assertEqual(parse_ns_headers([hdr]), expected) def test_join_header_words(self): from cookielib import join_header_words joined = join_header_words([[("foo", None), ("bar", "baz")]]) self.assertEqual(joined, "foo; bar=baz") self.assertEqual(join_header_words([[]]), "") def test_split_header_words(self): from cookielib import split_header_words tests = [ ("foo", [[("foo", None)]]), ("foo=bar", [[("foo", "bar")]]), (" foo ", [[("foo", None)]]), (" foo= ", [[("foo", "")]]), (" foo=", [[("foo", "")]]), (" foo= ; ", [[("foo", "")]]), (" foo= ; bar= baz ", [[("foo", ""), ("bar", "baz")]]), ("foo=bar bar=baz", [[("foo", "bar"), ("bar", "baz")]]), # doesn't really matter if this next fails, but it works ATM ("foo= bar=baz", [[("foo", "bar=baz")]]), ("foo=bar;bar=baz", [[("foo", "bar"), ("bar", "baz")]]), ('foo bar baz', [[("foo", None), ("bar", None), ("baz", None)]]), ("a, b, c", [[("a", None)], [("b", None)], [("c", None)]]), (r'foo; bar=baz, spam=, foo="\,\;\"", bar= ', [[("foo", None), ("bar", "baz")], [("spam", "")], [("foo", ',;"')], [("bar", "")]]), ] for arg, expect in tests: try: result = split_header_words([arg]) except: import traceback, StringIO f = StringIO.StringIO() traceback.print_exc(None, f) result = "(error -- traceback follows)\n\n%s" % f.getvalue() self.assertEqual(result, expect, """ When parsing: '%s' Expected: '%s' Got: '%s' """ % (arg, expect, result)) def test_roundtrip(self): from cookielib import split_header_words, join_header_words tests = [ ("foo", "foo"), ("foo=bar", "foo=bar"), (" foo ", "foo"), ("foo=", 'foo=""'), ("foo=bar bar=baz", "foo=bar; bar=baz"), ("foo=bar;bar=baz", "foo=bar; bar=baz"), ('foo bar baz', "foo; bar; baz"), (r'foo="\"" bar="\\"', r'foo="\""; bar="\\"'), ('foo,,,bar', 'foo, bar'), ('foo=bar,bar=baz', 'foo=bar, bar=baz'), ('text/html; charset=iso-8859-1', 'text/html; charset="iso-8859-1"'), ('foo="bar"; port="80,81"; discard, bar=baz
', 'foo=bar; port="80,81"; discard, bar=baz'), (r'Basic realm="\"foo\\\\bar\""', r'Basic; realm="\"foo\\\\bar\""') ] for arg, expect in tests: input = split_header_words([arg]) res = join_head
er_words(input) self.assertEqual(res, expect, """ When parsing: '%s' Expected: '%s' Got: '%s' Input was: '%s' """ % (arg, expect, res, input)) class FakeResponse: def __init__(self, headers=[], url=None): """ headers: list of RFC822-style 'Key: value' strings """ import mimetool
ad-m/sledzenie_listow
sledzenie_listow/public/utils.py
Python
bsd-3-clause
1,220
0.002459
from bs4 import BeautifulSoup from requests imp
ort Session from ..extensions import cache # Settings URL = 'http://emonitoring.poczta-polska.pl/wssClient.php' # Init SESSION = Session() def get_number(real_number, s=None): s = s or Session() soup = BeautifulSoup(s.post(URL, data={'n': real_number}).text.encode('utf8')) sledzenie = soup.find(id='sledzenie_td') if sledzenie: return {'no': real_number, 'meta': sledzenie, 'history': soup.
find(id='zadarzenia_td')} return False def quest_number(nake_number, s=None): key = "nake_number=%s" % (nake_number) rv = cache.get(key) if rv is None: for i in range(0, 10): data = get_number("00" + str(nake_number) + str(i), s) if data: rv = data break cache.set(key, rv, timeout=60*60*6) # 6 hours cache return rv def quest_range(start_string='00559007734046803928', end_string='0055900773404680394', s=None): nake_start = int(start_string[0:19]) nake_end = int(end_string[0:19]) if nake_end-nake_start >= 50: return [] result = [] for x in range(nake_start, nake_end): result.append(quest_number(x)) return result
sunner/buzz2weibo
weibopy/error.py
Python
mit
256
0.003906
# Copyright 2009-2010 Joshua Roesslein # See LICENSE for details. class WeibopError(Exception): """Weib
opy exception""" def __init__(self, reason): self.reason = reason.encode('utf-8') def __str__(self): return self.re
ason
flacjacket/sympy
doc/src/conf.py
Python
bsd-3-clause
6,093
0.005416
# -*- coding: utf-8 -*- # # SymPy documentation build configuration file, created by # sphinx-quickstart.py on Sat Mar 22 19:34:32 2008. # # This file is execfile()d with the current directory set to its containing dir. # # The contents of this file are pickled, so don't put values in the namespace # that aren't pickleable (module imports are okay, they're removed automatically). # # All configuration values have a default value; values that are commented out # serve to show the default value. import sys # If your extensions are in another directory, add it here. sys.path.extend(['../sympy', 'ext']) # General configuration # --------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.addons.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.mathjax', 'numpydoc', 'sympylive',] # Use this to use pngmath instead #extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.pngmath', ] # MathJax file, which is free to use. See http://www.mathjax.org/docs/2.0/start.html mathjax_path = 'https://c328740.ssl.cf1.rackcdn.com/mathjax/latest/MathJax.js?config=TeX-AMS_HTML-full' # Add any paths that contain templates here, relative to this directory. templates_path = ['.templates'] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General substitutions. project = 'SymPy' copyright = '2008, 2009, 2010, 2011, 2012 SymPy Development Team' # The default replacements for |version| and |release|, also used in various # other places throughout the built documents. # # The short X.Y version. version = '0.7.2' # The full version, including alpha/beta/rc tags. release = '0.7.2-git' # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strft
ime call. today_fmt = '%B %d, %Y' # Translations: locale_dirs = ["i
18n/"] # List of documents that shouldn't be included in the build. #unused_docs = [] # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # Options for HTML output # ----------------------- # The style sheet to use for HTML and HTML Help pages. A file of that name # must exist either in Sphinx' static/ path, or in one of the custom paths # given in html_static_path. html_style = 'default.css' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%b %d, %Y' html_logo = '_static/sympylogo.png' html_favicon = '../logo/SymPy-Favicon.ico' html_theme_options = {'collapsiblesidebar': True} # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Content template for the index page. #html_index = '' # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If true, the reST sources are included in the HTML build as _sources/<name>. #html_copy_source = True # Output file base name for HTML help builder. htmlhelp_basename = 'SymPydoc' # Options for LaTeX output # ------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, document class [howto/manual], toctree_only). # toctree_only is set to True so that the start file document itself is not included in the # output, only the documents referenced by it via TOC trees. The extra stuff in the master # document is intended to show up in the HTML, but doesn't really belong in the LaTeX output. latex_documents = [('index', 'sympy-%s.tex' % release, 'SymPy Documentation', 'SymPy Development Team', 'manual', True)] # Additional stuff for the LaTeX preamble. # Tweaked to work with XeTeX. latex_elements = { 'babel': '', 'fontenc': r''' \usepackage{amssymb} \usepackage{fontspec} \defaultfontfeatures{Mapping=tex-text} \setmainfont{DejaVu Serif} \setsansfont{DejaVu Sans} \setmonofont{DejaVu Sans Mono} ''', 'fontpkg': '', 'inputenc': '', 'utf8extra': '', 'preamble': '' } # SymPy logo on title page latex_logo = '_static/sympylogo.png' # Documents to append as an appendix to all manuals. #latex_appendices = [] # Show page numbers next to internal references latex_show_pagerefs = True # We use False otherwise the module index gets generated twice. latex_use_modindex = False default_role = 'math' pngmath_divpng_args = ['-gamma 1.5','-D 110'] # Note, this is ignored by the mathjax extension # Any \newcommand should be defined in the file pngmath_latex_preamble = '\\usepackage{amsmath}\n'+\ '\\usepackage{bm}\n'+\ '\\usepackage{amsfonts}\n'+\ '\\usepackage{amssymb}\n'+\ '\\setlength{\\parindent}{0pt}\n' texinfo_documents = [ (master_doc, 'sympy', 'SymPy Documentation', 'SymPy Development Team', 'SymPy', 'Computer algebra system (CAS) in Python', 'Programming', 1), ]
espenak/awsfabrictasks
awsfabrictasks/tests/s3/test_api.py
Python
bsd-3-clause
1,830
0.002186
from __future__ import unicode_literals from unittest import TestCase from shutil import rmtree from tempfile import mkdtemp from os import makedirs from os.path import join, exists, dirname from awsfabrictasks.s3.api import dirlist_absfilenames from awsfabrictasks.s3.api import localpath_to_s3path from awsfabrictasks.s3.api import s3path_to_localpath def makefile(tempdir, path, contents): path = join(tempdir, *path.split('/')) if not exists(dirname(path)): makedirs(dirname(path)) open(path, 'wb').write(contents.encode('utf-8')) return path class TestDirlistAbsfilenames(TestCase): def setUp(self): self.tempdir = mkdtemp() files = (('hello/world.txt', 'Hello world'), ('test.py', 'print "test"'), ('hello/cruel/world.txt', 'Cruel?')) self.paths = set() for path, contents in files: realpath = makefile(self.tempdir, path, contents) self.paths.add(realpath) def tearDown(self): rmtree(self.tempdir) def test_dirlist_absfilenames(self): result = dirlist_absfilenames(self.tempdir) self.asser
tEquals(result, self.paths) class TestLocalpathToS3path(TestCase): def setUp(self): self.temp
dir = mkdtemp() makefile(self.tempdir, 'hello/world.txt', '') def tearDown(self): rmtree(self.tempdir) def test_localpath_to_s3path(self): s3path = localpath_to_s3path(self.tempdir, join(self.tempdir, 'hello/world.txt'), 'my/test') self.assertEquals(s3path, 'my/test/hello/world.txt') def test_s3path_to_localpath(self): localpath = s3path_to_localpath('mydir/', 'mydir/hello/world.txt', join(self.tempdir, 'my', 'test')) self.assertEquals(localpath, join(self.tempdir, 'my', 'test', 'hello', 'world.txt'))
seecr/meresco-harvester
test/throughputanalysertest.py
Python
gpl-2.0
6,185
0.011318
## begin license ## # # "Meresco Harvester" consists of two subsystems, namely an OAI-harvester and # a web-control panel. # "Meresco Harvester" is originally called "Sahara" and was developed for # SURFnet by: # Seek You Too B.V. (CQ2) http://www.cq2.nl # # Copyright (C) 2006-2007 SURFnet B.V. http://www.surfnet.nl # Copyright (C) 2007-2008 SURF Foundation. http://www.surf.nl # Copyright (C) 2007-2009, 2011 Seek You Too (CQ2) http://www.cq2.nl # Copyright (C) 2007-2009 Stichting Kennisnet Ict op school. http://www.kennisnetictopschool.nl # Copyright (C) 2009 Tilburg University http://www.uvt.nl # Copyright (C) 2011, 2020-2021 Stichting Kennisnet https://www.kennisnet.nl # Copyright (C) 2020-2021 Data Archiving and Network Services https://dans.knaw.nl # Copyright (C) 2020-2021 SURF https://www.surf.nl # Copyright (C) 2020-2021 Seecr (Seek You Too B.V.) https://seecr.nl # Copyright (C) 2020-2021 The Netherlands Institute for Sound and Vision https://beeldengeluid.nl # # This file is part of "Meresco Harvester" # # "Meresco Harvester" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # "Meresco Harvester" is distributed in the hope th
at it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with "Meresco Harvester"; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # ## end license ## import unittest import datetime, tempfile, os, shutil from meresco.harvester.throughputanalyser import parseToTime, ThroughputAnalyser, ThroughputReport class ThroughputAnalyserTest(unittest.TestCase): def setUp(self): self.mockAnalyseRepository_arguments = [] self.testdir = os.path.join(tempfile.gettempdir(), 'throughputanalysertest') not os.path.isdir(self.testdir) and os.makedirs(self.testdir) def tearDown(self): shutil.rmtree(self.testdir) def testParseToTime(self): timeString = "1999-12-03 12:34:35.123" date = parseToTime(timeString) self.assertEqual((1999,12,3,12,34,35,123000), (date.year,date.month,date.day, date.hour, date.minute, date.second, date.microsecond)) date = parseToTime("2006-08-04 10:40:50.644") self.assertEqual((2006,8,4,10,40,50,644000), (date.year,date.month,date.day, date.hour, date.minute, date.second, date.microsecond)) def testParseToTimeDiff(self): date1 = parseToTime("1999-12-03 12:34:35.123") date2 = parseToTime("1999-12-03 12:34:36.423") delta = date2 - date1 self.assertEqual(1.3, delta.seconds + delta.microseconds/1000000.0) def testAnalyse(self): t = ThroughputAnalyser(eventpath = self.testdir) t._analyseRepository = self.mockAnalyseRepository report = t.analyse(['repo1','repo2'], '2006-08-31') self.assertEqual(1000, report.records) self.assertEqual(2000.0, report.seconds) self.assertEqual(['repo1', 'repo2'], self.mockAnalyseRepository_arguments) def testAnalyseNothing(self): t = ThroughputAnalyser(eventpath = self.testdir) t._analyseRepository = self.mockAnalyseRepository report = t.analyse([], '2006-08-31') self.assertEqual(0, report.records) self.assertEqual(0.0, report.seconds) self.assertEqual('-' , report.recordsPerSecond()) self.assertEqual('-' , report.recordsPerDay()) def testAnalyseRepository(self): r = open(os.path.join(self.testdir, 'repo1.events'), 'w') try: r.write(""" [2006-08-30 00:00:15.500] ENDHARVEST [repo1] [2006-08-30 01:00:00.000] STARTHARVEST [repo1] Uploader connected ... [2006-08-30 01:00:10.000] SUCCES [repo1] Harvested/Uploaded/Deleted/Total: 200/200/0/1000, ResumptionToken: r1 [2006-08-30 01:00:15.500] ENDHARVEST [repo1] [2006-08-31 01:00:00.000] STARTHARVEST [repo1] Uploader connected ... [2006-08-31 01:00:10.000] SUCCES [repo1] Harvested/Uploaded/Deleted/Total: 200/200/0/1200, ResumptionToken: r1 [2006-08-31 01:00:15.500] ENDHARVEST [repo1] [2006-08-31 02:00:00.000] STARTHARVEST [repo1] Uploader connected ... [2006-08-31 02:00:10.000] SUCCES [repo1] Harvested/Uploaded/Deleted/Total: 200/200/0/1400, ResumptionToken: r2 [2006-08-31 02:00:25.500] ENDHARVEST [repo1] [2006-08-31 03:00:00.000] STARTHARVEST [repo1] Uploader connected ... [2006-08-31 03:00:10.000] SUCCES [repo1] Harvested/Uploaded/Deleted/Total: 200/200/0/1600, ResumptionToken: r3 [2006-08-31 03:00:35.500] ENDHARVEST [repo1] """) finally: r.close() t = ThroughputAnalyser(eventpath = self.testdir) records, seconds = t._analyseRepository('repo1', '2006-08-31') self.assertEqual(600, records) self.assertEqual(76.5, seconds) def testAnalyseNonExistingRepository(self): t = ThroughputAnalyser(eventpath = self.testdir) records, seconds = t._analyseRepository('repository', '2006-08-31') self.assertEqual(0, records) self.assertEqual(0.0, seconds) def testReportOnEmptyEventsFile(self): t = ThroughputAnalyser(eventpath = self.testdir) records, seconds = t._analyseRepository('repo1', '2006-08-31') self.assertEqual(0, records) self.assertEqual(0, seconds) def testReport(self): report = ThroughputReport() report.add(100000,10000.0) self.assertEqual('10.00', report.recordsPerSecond()) self.assertEqual('864000', report.recordsPerDay()) self.assertEqual("02:46:40", report.hmsString()) #Mock self shunt def mockAnalyseRepository(self, repositoryName, dateSince): self.mockAnalyseRepository_arguments.append(repositoryName) return 500, 1000.0
qguv/config
weechat/plugins/python/confversion.py
Python
gpl-3.0
4,417
0.010414
# -*- coding: utf-8 -*- # # Copyright (c) 2010-2010 by drubin <drubin at smartcube.co.za> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # Allows you to visually see if there are updates to your weechat system #Versions # 0.1 drubin - First release. # - Basic functionality to save version history of your config files (only git, bzr) # 0.2 ShockkPony - Fixed massive weechat startup time caused by initial config loading SCRIPT_NAME = "confversion" SCRIPT_AUTHOR = "drubin <drubin at smartcube.co.za>" SCRIPT_VERSION = "0.2" SCRIPT_LICENSE = "GPL3" SCRIPT_DESC = "Stores version controlled history of your configuration files" import_ok = True import subprocess try: import weechat except ImportError: print "This script must be run under WeeChat." print "Get WeeChat now at: http://www.weechat.org/" import_ok = False # script options settings = { #Currently supports git and bzr and possibly other that support simple "init" "add *.conf" "commit -m "message" " "versioning_method" : "git", "commit_each_change" : "true", "commit_message" : "Commiting changes", #Allows you to not auto commit stuff that relates to these configs #, (comma) seperated list of config options #The toggle_nicklist script can make this property annoying. "auto_commit_ignore" : "weechat.bar.nicklist.hidden", } def shell_in_home(cmd): try: output = file("/dev/null","w") subprocess.Popen(ver_method()+" "+cmd, cwd = weechat_home(), stdout= output, stderr=output, shell=True) except Exception as e: print e def weechat_home(): return weechat.info_get ("weechat_dir", "") def ver_method(): return weechat.config_get_plugin("versioning_method") def init_repo(): #Set up version control (doesn't matter if previously setup for bzr, git) shell_in_home("init") #Save first import OR on start up if needed. commit_cb() confversion_commit_finish_hook = 0 def commit_cb(data=None, remaning=None): global confv
ersion_commit_finish_hook # only hook timer if not already hook
ed if confversion_commit_finish_hook == 0: confversion_commit_finish_hook = weechat.hook_timer(500, 0, 1, "commit_cb_finish", "") return weechat.WEECHAT_RC_OK def commit_cb_finish(data=None, remaining=None): global confversion_commit_finish_hook # save before doing commit weechat.command("","/save") # add all config changes to git shell_in_home("add ./*.conf") # do the commit shell_in_home("commit -m \"%s\"" % weechat.config_get_plugin("commit_message")) # set hook back to 0 confversion_commit_finish_hook = 0 return weechat.WEECHAT_RC_OK def conf_update_cb(data, option, value): #Commit data if not part of ignore list. if weechat.config_get_plugin("commit_each_change") == "true" and not option in weechat.config_get_plugin("auto_commit_ignore").split(","): #Call use pause else /save will be called before the config is actually saved to disc #This is kinda hack but better input would be appricated. weechat.hook_timer(500, 0, 1, "commit_cb", "") return weechat.WEECHAT_RC_OK def confversion_cmd(data, buffer, args): commit_cb() return weechat.WEECHAT_RC_OK if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE, SCRIPT_DESC, "", ""): for option, default_value in settings.iteritems(): if weechat.config_get_plugin(option) == "": weechat.config_set_plugin(option, default_value) weechat.hook_command("confversion", "Saves configurations to version control", "", "", "", "confversion_cmd", "") init_repo() hook = weechat.hook_config("*", "conf_update_cb", "")
nmercier/linux-cross-gcc
win32/bin/Lib/email/errors.py
Python
bsd-3-clause
1,685
0.005341
# Copyright (C) 2001-2006 Python Software Foundation # Author: Barry Warsaw # Contact: email-sig@python.org """email package exception classes.""" class MessageError(Exception): """Base class for errors in the email package.""" class MessageParseError(MessageError): """Base class for message parsing errors.""" class HeaderParseError(MessageParseError): """Error while parsing headers.""" class BoundaryError(MessageParseError): """Couldn't find terminating boundary.""" class MultipartConversionError(MessageError, TypeError): """Conversion to a multipart is prohibited.""" class CharsetError(MessageError): """An illegal charset was given.""" # These are parsing defects which the parser was able to work around. class MessageDefect: """Base class for a message defect.""" def __init__(self, line=None): self.line = line class NoBoundaryInMultipartDefect(MessageDefect): """A message claimed to be a multipart but had no boundary p
arameter.""" class StartBoundaryNotFoundDefect(MessageDefect): """The claimed start boundary was never found.""" class FirstHeaderLineIsContinuationDefect(MessageDefect): """A message had a continuation line as its first header line.""" class MisplacedEnvelopeHeaderDefect(MessageDefect): """A 'Unix-from' header was found in the middle of a header block.""" class MalformedHeaderDefect(MessageDefect): """Found a header that was missi
ng a colon, or was otherwise malformed.""" class MultipartInvariantViolationDefect(MessageDefect): """A message claimed to be a multipart but no subparts were found."""
bendykst/deluge
deluge/core/eventmanager.py
Python
gpl-3.0
2,071
0.001931
# -*- coding: utf-8 -*- # # Copyright (C) 2009 Andrew Resch <andrewresch@gmail.com> # # This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with # the additional special exception to link portions of this program with the OpenSSL library. # See LICENSE for more details. # import logging import deluge.component as component log = logging.getLogger(__name__) class EventManager(component.Component): def __init__(self): component.Component.__init__(self, "EventManager") self.handlers = {} def emit(self, event): """ Emits the event to interested clients. :param event: DelugeEvent """ # Emit the event to the interested clients component.get("RPCServer").emit_event(event) # Call any handlers for the event if event.name in self.handlers: for handler in self.handlers[event.n
ame]: # log.debug("Running handler %s for event %s with args: %s", event.name, handler, event.args) try: hand
ler(*event.args) except Exception as ex: log.error("Event handler %s failed in %s with exception %s", event.name, handler, ex) def register_event_handler(self, event, handler): """ Registers a function to be called when a `:param:event` is emitted. :param event: str, the event name :param handler: function, to be called when `:param:event` is emitted """ if event not in self.handlers: self.handlers[event] = [] if handler not in self.handlers[event]: self.handlers[event].append(handler) def deregister_event_handler(self, event, handler): """ Deregisters an event handler function. :param event: str, the event name :param handler: function, currently registered to handle `:param:event` """ if event in self.handlers and handler in self.handlers[event]: self.handlers[event].remove(handler)
PLNech/thefuck
thefuck/shells/generic.py
Python
mit
2,400
0.000417
import io import os import shlex import six from ..utils import memoize from ..conf import settings class Generic(object): def get_aliases(self): return {} def _expand_aliases(self, command_script): aliases = self.get_aliases() binary = command_script.split(' ')[0] if binary in aliases: return command_script.replace(binary, aliases[binary], 1) else: return command_script def from_shell(self, command_script): """Prepares command before running in app.""" return self._expand_aliases(command_script) def to_shell(self, command_script): """Prepares command for running in shell.""" return command_script def app_alias(self, fuck): return "alias {0}='eval $(TF_ALIAS={0} PYTHONIOENCODING=utf-8 " \ "thefuck $(fc -ln -1))'".format(fuck) def _get_history_file_name(self): return '' def _get_history_line(self, command_script): return '' @memoize def get_history(self): return list(self._get_history_lines()) def _
get_history_lines(self): """Returns list of history entries.""" history_fi
le_name = self._get_history_file_name() if os.path.isfile(history_file_name): with io.open(history_file_name, 'r', encoding='utf-8', errors='ignore') as history_file: lines = history_file.readlines() if settings.history_limit: lines = lines[-settings.history_limit:] for line in lines: prepared = self._script_from_history(line) \ .strip() if prepared: yield prepared def and_(self, *commands): return u' && '.join(commands) def how_to_configure(self): return def split_command(self, command): """Split the command using shell-like syntax.""" if six.PY2: return [s.decode('utf8') for s in shlex.split(command.encode('utf8'))] return shlex.split(command) def quote(self, s): """Return a shell-escaped version of the string s.""" if six.PY2: from pipes import quote else: from shlex import quote return quote(s) def _script_from_history(self, line): return line
kg-bot/SupyBot
plugins/Gateway/gwcred.py
Python
gpl-3.0
5,416
0.003693
### # Copyright (c) 2005, Ali Afshar # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions, and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions, and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the author of this software nor the name of # contributors to this software may be used to endorse or promote products # derived from this software without specific prior written consent. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. ### from twisted.cred import portal, checkers, credentials, error from twisted.conch.checkers import SSHPublicKeyDatabase from twisted.conch.credentials import ISSHPrivateKey from twisted.python import failure from twisted.internet import defer from twisted.conch.ssh import keys class SBCredChecker(object): """ SSH Username and Password Credential checker """ # this implements line tells the portal that we can handle un/pw __implements__ = (checkers.ICredentialsChecker,) credentialInterfaces = (credentials.IUsernamePassword,) def requestAvatarId(self, credentials): self.cb.log.debug('twisted checker checking %s', credentials.username) """ Return an avatar id or return an error """ a = self.cb.getUser(protocol=self.cb.PROTOCOL, username=credentials.username, password=credentials.password, peer=credentials.peer) if a: return a else: return failure.Failure(error.UnauthorizedLogin()) class SBPublicKeyChecker(object): """ Pub
lic key checker """ __implements__ = (checkers.ICredentialsChecker,) credentialInterfaces = (ISSHPrivateKey,) def requestAvatarId(self, credentials): a = self.cb.getUser(protocol=self.cb.PROTOCOL, username=credentials.username, blob=credentials.blob, peer=credentials.peer) #except: # pass if a: return a els
e: return failure.Failure(error.UnauthorizedLogin()) #class SBPublicKeyChecker(SSHPublicKeyDatabase): # credentialInterfaces = ISSHPrivateKey, # __implements__ = ICredentialsChecker # # def requestAvatarId(self, credentials): # if not self.checkKey(credentials): # return defer.fail(UnauthorizedLogin()) # if not credentials.signature: # return defer.fail(error.ValidPublicKey()) # else: # try: # pubKey = keys.getPublicKeyObject(data = credentials.blob) # if keys.verifySignature(pubKey, credentials.signature, # credentials.sigData): # return defer.succeed(credentials.username) # except: # pass # return defer.fail(UnauthorizedLogin()) # # def checkKey(self, credentials): # sshDir = os.path.expanduser('~%s/.ssh/' % credentials.username) # if sshDir.startswith('~'): # didn't expand # return 0 # uid, gid = os.geteuid(), os.getegid() # ouid, ogid = pwd.getpwnam(credentials.username)[2:4] # os.setegid(0) # os.seteuid(0) # os.setegid(ogid) # os.seteuid(ouid) # for name in ['authorized_keys2', 'authorized_keys']: # if not os.path.exists(sshDir+name): # continue # lines = open(sshDir+name).xreadlines() # os.setegid(0) # os.seteuid(0) # os.setegid(gid) # os.seteuid(uid) # for l in lines: # l2 = l.split() # if len(l2) < 2: # continue # try: # if base64.decodestring(l2[1]) == credentials.blob: # return 1 # except binascii.Error: # continue # return 0 class SBPortal(portal.Portal): pass class SBRealm: __implements__ = portal.IRealm def __init__(self, userclass): self.userclass = userclass def requestAvatar(self, avatarId, mind, *interfaces): self.cb.cb.log.critical('%s', interfaces) av = self.userclass(avatarId) av.cb = self.cb return interfaces[0], av, lambda: None # vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
deets/raspberry-racer
python/setup.py
Python
mit
559
0.014311
from setuptools import setup, find_packages setup( name="RaspberryRacer", version="0.1", description="Raspberry Racer", author="Diez B. Roggisch", author_email="deets@web.de", entry_points= { 'console_scripts' : [ 'rracer = rracer.main:main', ]}, install_requires = [
], zip_safe=True, packages=find_pac
kages(), classifiers = [ 'Development Status :: 3 - Alpha', 'Operating System :: OS Independent', 'Programming Language :: Python', ], )
inbloom/legacy-projects
lri-middleware/ccss/insert.py
Python
apache-2.0
56,321
0.003196
############################################################################### # Insert ############################################################################### # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from bs4 import BeautifulSoup import json import os.path import random import requests import web from xml.sax import saxutils import httpconfig class InsertFactory(object): def CreateInsert(self, ccssType, action, opts, data): web.debug("CreateInsert") web.debug(" ccssType=%s" % ccssType) web.debug(" action=%s" % action) web.debug(" opts=%r" % opts) web.debug(" data=%r" % data) web.debug("CreateInsert: ") web.debug("CreateInsert: data = %s" % data) print("CreateInsert: data = %s" % data) insert = None if ccssType == "/initiative": if action == "/create": insert = InitiativeCreate(opts, data) elif action == "/update": insert = InitiativeUpdate(opts, data) elif ccssType == "/framework": if action == "/create": insert = FrameworkCreate(opts, data) elif action == "/update": insert = FrameworkUpdate(opts, data) elif ccssType == "/set": if action == "/create": insert = SetCreate(opts, data) elif action == "/update": insert = SetUpdate(opts, data) elif ccssType == "/grade_level": if action == "/create": insert = GradeCreate(opts, data) elif action == "/update": insert = GradeUpdate(opts, data) elif ccssType == "/domain": if action == "/create": insert = DomainCreate(opts, data) elif action == "/update": insert = DomainUpdate(opts, data) elif ccssType == "/cluster": if action == "/create": insert = ClusterCreate(opts, data) elif action == "/update": insert = ClusterUpdate(opts, data) elif ccssType == "/standard": if action == "/create": insert = StandardCreate(opts, data) elif action == "/update": insert = StandardUpdate(opts, data) else: raise web.NoMethod() elif ccssType == "/standard_component": if action == "/create": insert = ComponentCreate(opts, data) elif action == "/update": insert = ComponentUpdate(opts, data) elif ccssType == "/strand": if action == "/create": insert = StrandCreate(opts, data) elif action == "/update": insert = StrandUpdate(opts, data) elif ccssType == "/anchor_standard_section": if action == "/create": insert = SectionCreate(opts, data) elif action == "/update": insert = Section
Update(opts, data) elif ccssType == "/anchor_standard": if action == "/create": insert = AnchorCreate(opts, data) elif action == "/update": insert = AnchorUpdate(opts, data) elif ccssType == "/competency_path": web.debug("CreateInsert: Matched on type: %s" % ccssType) if action == "/create": web.debug("CreateInsert: Mat
ched on action: %s" % action) insert = PathCreate(opts, data) else: raise web.NoMethod() elif ccssType == "/learning_resource": if action == "/create": insert = ResourceCreate(opts, data) else: raise web.NoMethod() elif ccssType == "/competency_container": if action == "/create": insert = ContainerCreate(opts, data) elif action == "/update": insert = ContainerUpdate(opts, data) elif ccssType == "/competency": if action == "/create": insert = CompetencyCreate(opts, data) elif action == "/update": insert = CompetencyUpdate(opts, data) elif ccssType == "/strand": if action == "/create": insert = StrandCreate(opts, data) elif action == "/update": insert = StrandUpdate(opts, data) else: raise web.NoMethod() web.debug("CreateInsert: insert = %r" % insert) return insert class Insert(object): """Base class for inserts""" def __init__(self, type, opts, data, httpConfig=None): web.debug("Insert.__init__") self.type = type self.path = "/entity/create" self.opts = opts if not "access_token" in opts: opts["access_token"] = "letmein" if not "admin_access_tokens" in opts: opts["admin_access_tokens"] = {"letmein":"LRI_ADMIN_USER_0"} self.data = data self.returnFormat = opts.get("format", "xml") self.inserts = [] self.httpConfig = httpConfig if self.httpConfig is None: self.httpConfig = httpconfig.HttpConfig(web.ctx.env["DOCUMENT_ROOT"]) def __repr__(self): return "Insert(%s, %r, %r, httpConfig=%r)" % (self.type, self.opts, self.data, self.httpConfig) def __str__(self): return """Insert: type=%s, opts=%r, data=%r, returnFormat=%s, inserts=%r, httpConfig=%r, urls=%r""" % (self.type, self.opts, self.data, self.returnFormat, self.inserts, self.httpConfig, self.getUrls()) def buildUrn(self, parts): parts.insert(0, "urn") return ":".join(parts) def buildId(self, namespace, uid): parts = (namespace, self.type, uid) return self.buildUrn(parts) def toUrlForm(self, insert): decodedOpts = json.dumps(self.opts) web.debug("Insert.toUrlForm: decodedOpts = %s" % decodedOpts) self.url = "http://%s:%d%s?q=%s&opts=%s" % (self.httpConfig.config["serverhost"], self.httpConfig.config["serverport"], self.path, insert, decodedOpts) return self.url def getUrls(self): """Returns URL that can be sent to LRI server""" urls = [] for insert in self.inserts: urls.append(self.toUrlForm(insert)) return urls def getBaseProps(self, soup): """Extract common LRI properties from XML""" props = {} try: key = "urn:lri:property_type:contains" props["children"] = [x.getText().strip() for x in soup.find(key=key).find_all("value")] except AttributeError, e: web.debug("Key not found: %s: %r" % (key, e)) props["creator"] = soup.find(key="urn:lri:property_type:creator").getText().strip() props["desc"] = soup.find(key="urn:lri:property_type:description").getText().strip() props["id"] = soup.find(key="urn:lri:property_type:id").getText().strip() props["name"] = soup.find(key="urn:lri:property_type:name").getText().strip() props["uid"] = soup.find(key="uid").getText().strip() web.debug("Insert.getB
snowch/spark-cloudant
test/helpers/dataload.py
Python
apache-2.0
3,656
0.033643
#******************************************************************************* # Copyright (c) 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. #
You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #******************************************************************
************/ import requests import sys import os import json from helpers.dbutils import CloudantDbUtils from helpers.acmeair_utils import AcmeAirUtils import conftest # get the cloudant credentials from pytest config file test_properties = conftest.test_properties() class DataLoader: """ Test data loader related functions """ def load_AcmeData(self, num_of_cust): """ Reset databases and use the AcmeAir database loader to populate initial customer, flight and airportmapping data. Does NOT generate user data like bookings. """ print ("num_of_cust: ", num_of_cust) acmeair = AcmeAirUtils() try: if acmeair.is_acmeair_running() != 0: raise RuntimeError(""" AcmeAir is already running which may cause unexpected results when resetting databases. Please shut down the app and try again. """) else: cloudantUtils = CloudantDbUtils(test_properties) cloudantUtils.reset_databases() acmeair.start_acmeair() acmeair.load_data(num_of_cust) finally: acmeair.stop_acmeair() def remove_AcmeDb(self, num_of_cust): """ Drop all AcmeAir databases """ acmeair = AcmeAirUtils() if acmeair.is_acmeair_running() != 0: acmeair.stop_acmeair() cloudantUtils = CloudantDbUtils(test_properties) cloudantUtils.drop_all_databases() def load_SpecCharValuePredicateData(self): """ Create booking data needed to test SpecCharValuePredicate """ try: acmeair = AcmeAirUtils() acmeair.start_acmeair() # book flights AA93 and AA330 flight1 = "AA93" flight2 = "AA330" # Step#1 - need to find the flights generated _id required for booking flight1_id = acmeair.get_flightId_by_number(flight1) print ("{} id = {}".format(flight1, flight1_id)) flight2_id = acmeair.get_flightId_by_number(flight2) print ("{} id = {}".format(flight2, flight2_id)) # Step#2 - add the boooking acmeair.book_flights("uid0@email.com", flight1, flight2) finally: acmeair.stop_acmeair() if __name__ =='__main__': """ Utility to create test databases and load data """ import argparse parser = argparse.ArgumentParser(description="Utility to load AcmeAir data required for python spark-cloudant tests") group = parser.add_mutually_exclusive_group(required=True) group.add_argument('-cleanup', action='store_true', help='Drop all test databases') group.add_argument('-load', help='Reset and Load databases with the given # of users. -load 0 to just recreate databases and indexes.', type=int) args = parser.parse_args() dataloader = DataLoader() if args.load is not None: if args.load == 0: cloudantUtils = CloudantDbUtils(test_properties) cloudantUtils.reset_databases() else: dataloader.load_AcmeData(args.load) dataloader.load_SpecCharValuePredicateData() elif args.cleanup: cloudantUtils = CloudantDbUtils(test_properties) cloudantUtils.drop_all_databases()
rahulunair/nova
nova/objects/network.py
Python
apache-2.0
9,406
0
# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_utils import versionutils import nova.conf from nova.db import api as db from nova import exception from nova.i18n import _ from nova import objects from nova.objects import base as obj_base from nova.objects import fields CONF = nova.conf.CONF # TODO(berrange): Remove NovaObjectDictCompat @obj_base.NovaObjectRegistry.register class Network(obj_base.NovaPersistentObject, obj_base.NovaObject, obj_base.NovaObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Added in_use_on_host() # Version 1.2: Added mtu, dhcp_server, enable_dhcp, share_address VERSION = '1.2' fields = { 'id': fields.IntegerField(), 'label': fields.StringField(), 'injected': fields.BooleanField(), 'cidr': fields.IPV4NetworkField(nullable=True), 'cidr_v6': fields.IPV6NetworkField(nullable=True), 'multi_host': fields.BooleanField(), 'netmask': fields.IPV4AddressField(nullable=True), 'gateway': fields.IPV4AddressField(nullable=True), 'broadcast': fields.IPV4AddressField(nullable=True), 'netmask_v6': fields.IPV6AddressField(nullable=True), 'gateway_v6': fields.IPV6AddressField(nullable=True), 'bridge': fields.StringField(nullable=True), 'bridge_interface': fields.StringField(nullable=True), 'dns1': fields.IPAddressField(nullable=True), 'dns2': fields.IPAddressField(nullable=True), 'vlan': fields.IntegerField(nullable=True), 'vpn_public_address': fields.IPAddressField(nullable=True), 'vpn_public_port': fields.IntegerField(nullable=True), 'vpn_private_address': fields.IPAddressField(nullable=True), 'dhcp_start': fields.IPV4AddressField(nullable=True), 'rxtx_base': fields.IntegerField(nullable=True), 'project_id': fields.UUIDField(nullable=True), 'priority': fields.IntegerField(nullable=True), 'host': fields.StringField(nullable=True), 'uuid': fields.UUIDField(), 'mtu': fields.IntegerField(nullable=True), 'dhcp_server': fields.IPAddressField(nullable=True), 'enable_dhcp': fields.BooleanField(), 'share_address': fields.BooleanField(), } @staticmethod def _convert_legacy_ipv6_netmask(netmask): """Handle netmask_v6 possibilities from the database. Historically, this was stored as just an integral CIDR prefix, but in the future it should be stored as an actual netmask. Be tolerant of either here. """ try: prefix = int(netmask) return netaddr.IPNetwork('1::/%i' % prefix).netmask except ValueError: pass try: return netaddr.IPNetwork(netmask).netmask except netaddr.AddrFormatError: raise ValueError(_('IPv6 netmask "%s" must be a netmask ' 'or integral prefix') % netmask) def obj_make_compatible(self, primitive, target_version): target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 2): if 'mtu' in primitive: del primitive['mtu'] if 'enable_dhcp' in primitive: del primitive['enable_dhcp'] if 'dhcp_server' in primitive: del primitive['dhcp_server'] if 'share_address' in primitive: del primitive['share_address'] @staticmethod def _from_db_object(context, network, db_network): for field in network.fields: db_value = db_network[field] if field == 'netmask_v6' and db_value is not None: db_value = network._convert_legacy_ipv6_netmask(db_value) elif field == 'dhcp_server' and db_value is None: db_value = db_network['gateway'] network[field] = db_value network._context = context network.obj_reset_changes() return network @obj_base.remotable_classmethod def get_by_id(cls, context, network_id, project_only='allow_none'): db_network = db.network_get(context, network_id, project_only=project_only) return c
ls._from_db_object(context, cls(), db_network) @obj_base.remotable_classmethod def get_by_uuid(cls, context, network_uuid): db_network = db.network_get_by_uuid(context, network_uuid) return cls._from_db_object(context, cls(), db_network) @obj_base.remotable_classmethod def get_by_cidr(cls, context, cidr): db_network = db.network_get_by_cidr(context, cidr) return cls._from_db_object(context, cls(), db_network) # TODO(stephenfin): This is no longer used
and can be removed @obj_base.remotable_classmethod def associate(cls, context, project_id, network_id=None, force=False): db.network_associate(context, project_id, network_id=network_id, force=force) # TODO(stephenfin): This is no longer used and can be removed @obj_base.remotable_classmethod def disassociate(cls, context, network_id, host=False, project=False): db.network_disassociate(context, network_id, host, project) @obj_base.remotable_classmethod def in_use_on_host(cls, context, network_id, host): return db.network_in_use_on_host(context, network_id, host) def _get_primitive_changes(self): changes = {} for key, value in self.obj_get_changes().items(): if isinstance(value, netaddr.IPAddress): changes[key] = str(value) else: changes[key] = value return changes @obj_base.remotable def create(self): updates = self._get_primitive_changes() if 'id' in updates: raise exception.ObjectActionError(action='create', reason='already created') db_network = db.network_create_safe(self._context, updates) self._from_db_object(self._context, self, db_network) @obj_base.remotable def destroy(self): db.network_delete_safe(self._context, self.id) self.deleted = True self.obj_reset_changes(['deleted']) @obj_base.remotable def save(self): context = self._context updates = self._get_primitive_changes() if 'netmask_v6' in updates: # NOTE(danms): For some reason, historical code stores the # IPv6 netmask as just the CIDR mask length, so convert that # back here before saving for now. updates['netmask_v6'] = netaddr.IPNetwork( updates['netmask_v6']).netmask set_host = 'host' in updates if set_host: db.network_set_host(context, self.id, updates.pop('host')) if updates: db_network = db.network_update(context, self.id, updates) elif set_host: db_network = db.network_get(context, self.id) else: db_network = None if db_network is not None: self._from_db_object(context, self, db_network) @obj_base.NovaObjectRegistry.register class NetworkList(obj_base.ObjectListBase, obj_base.NovaObject): # Version 1.0: Initial version # Version 1.1: Added get_by_project() # Version 1.2: Network <= version 1.2 VERSION = '1.2' fields = { 'objects': fields.ListOfObjectsField('Network'), } @obj_base.remotable_classmethod def get_all(cls, context, project_only='
noyainrain/micro
micro/error.py
Python
lgpl-3.0
1,216
0.007401
# micro # Copyright (C) 2021 micro contributors # # This program is free software: you can redistribute it and/or modify it under the terms of the GNU # Les
ser General Public License as published by the Free Software Foundation, either version 3 of # the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without # even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License along with this program. # If not, see <http://www.gnu.org/licenses/>. """micro errors.""" import builtins from typing import Dict class Error(Exception): """Base for micro errors.""" def json(self) -> Dict[str, object]: """Return a JSON representation of the error.""" return {'__type__': type(self).__name__, 'message': str(self)} class ValueError(builtins.ValueError, Error): """See :ref:`ValueError`.""" class AuthenticationError(Error): """See :ref:`AuthenticationError`.""" class PermissionError(Error): """See :ref:`PermissionError`."""
joeythesaint/yocto-autobuilder
lib/python2.7/site-packages/autobuilder/buildsteps/BuildImages.py
Python
gpl-2.0
2,787
0.0061
''' Created on Jan 6, 2013 __author__ = "Elizabeth 'pidge' Flanagan" __copyright__ = "Copyright 2012-2013, Intel Corp." __credits__ = ["Elizabeth Flanagan"] __license__ = "GPL" __version__ = "2.0" __maintainer__ = "Elizabeth Flanagan" __email__ = "elizabeth.flanagan@intel.com" ''' from buildbot.steps.shell import ShellCommand from buildbot.process.buildstep import LogLineObserver from distutils.version import StrictVersion import os class BuildImages(ShellCommand): haltOnFailure = False flunkOnFailure = True name = "BuildImages" def __init__(self, factory, argdict=None, **kwargs): self.layerversion_yoctobsp=None self.machine="" self.images="" self._pendingLogObservers = [] self.factory = factory for k, v in argdict.iteritems(): setattr(self, k, v) # Timeout needs to be passed to LoggingBuildStep as a kwarg self.timeout = 100000 kwargs['timeout']=self.timeout ShellCommand.__init__(self, **kwargs) def start(self): self.layerversion_yoctobsp = self.getProperty("layerversion_yoctobsp") self.layerversion_core = self.getProperty("layerversion_core") self.machine = self.getProperty("MACHINE") # core-image-basic rename # See: http://git.yoctoproject.org/cgit/cgit.cgi/poky/commit/?id=b7f1cca517bbd4191828c6bae32e0c5041f1ff19 # I hate making people change their configs, so support both. if self.layerversion_core < "4": self.images=self.images.replace("core-image-full-cmdline", "core-image-basic") else: self.images=self.images.replace("core-image-basic", "core-image-full-cmdline") if self.layerversion_yoctobsp is not None and int(self.layerversion_yoctobsp) < 2 and self.machine is not None and self.machine == "genericx86-64": self.command = "echo 'Skipping Step.'" else: bitbakeflags = "-k " # -w only exists in bitbake 1.25 and newer, use distroversion string a
nd make sure we're on poky >1.7 if self.getProperty('bitbakeversion') and StrictVersion(self.getProperty('bitbakeversion')) >= StrictVersion("1.25"): bitbakeflags += "-w " self.command = ". ./oe-init-build-env; bitbake " + bitbakeflags + self.images self.description = ["Building " +
str(self.images)] ShellCommand.start(self) def describe(self, done=False): description = ShellCommand.describe(self, done) if self.layerversion_yoctobsp is not None and int(self.layerversion_yoctobsp) < 2 and self.machine is not None and self.machine == "genericx86-64": description.append("genericx86-64 does not exist in this branch. Skipping") return description
cangencer/hazelcast-python-client
hazelcast/protocol/codec/replicated_map_remove_entry_listener_codec.py
Python
apache-2.0
1,220
0.001639
from hazelcast.serialization.bits import * from hazelcast.protocol.client_message import ClientMessage from hazelcast.protocol.custom_codec import * from hazelcast.util import ImmutableLazyDataList from hazelcast.protocol.codec.replicated_map_message_type import * REQUEST_TYPE = REPLICATEDMAP_REMOVEENTRYLISTENER RESPONSE_TYPE = 101 RETRYABLE = True def calculate_size(name, registration_id): """ Calculates the request pay
load size""" data_size = 0 data_size += calculate_
size_str(name) data_size += calculate_size_str(registration_id) return data_size def encode_request(name, registration_id): """ Encode request into client_message""" client_message = ClientMessage(payload_size=calculate_size(name, registration_id)) client_message.set_message_type(REQUEST_TYPE) client_message.set_retryable(RETRYABLE) client_message.append_str(name) client_message.append_str(registration_id) client_message.update_frame_length() return client_message def decode_response(client_message, to_object=None): """ Decode response from client message""" parameters = dict(response=None) parameters['response'] = client_message.read_bool() return parameters
Djacket/djacket
core/backend/repository/admin.py
Python
mit
269
0.003717
from django.contrib import admin from repository.models import Repository, RepositoryAccess, RepositoryStar, R
epositoryFork admin.site.register(Repository) admin.site.register(RepositoryStar) admin.site.register(RepositoryFork) admin.
site.register(RepositoryAccess)
uArm-Developer/pyuarm
pyuarm/__init__.py
Python
mit
215
0.004651
import sys if sys.version > '3': PY3 = True else: PY3 = False from .uarm import UArm, UArmConnectExc
eption from .config import ua_dir, home_dir from .util import get_uarm from .ver
sion import __version__
zentralopensource/zentral
zentral/core/compliance_checks/__init__.py
Python
apache-2.0
1,092
0.001832
import logging from zentral.core.exceptions import ImproperlyConfigured default_app_config = "zentral.core.compliance_checks.apps.ZentralComplianceChecksAppConfig" logger = logging.getLogger("zentral.core.compliance_checks") # compliance checks classes compliance_check_classes = {} def register_compliance_check_class(compliance_check_class): try: model = compliance_check_class.get_model() except AttributeError: raise ImproperlyConfigured('Not a valid compliance check class') if model in compliance_check_classes: raise Impro
perlyConfigured(f'Compliance check class "{model}" already registered') compliance_check_classes[model] = compliance_check_
class logger.debug('Compliance check class "%s" registered', model) def compliance_check_class_from_model(model): try: return compliance_check_classes[model] except KeyError: logger.error('Unknown compliance check model "%s"', model) # BaseComplianceCheck registered in .compliance_checks return compliance_check_classes["BaseComplianceCheck"]
eduardomallmann/vim-and-bash
pythonx/UltiSnips/text_objects/_visual.py
Python
gpl-3.0
2,074
0.000965
#!/usr/bin/env python # encoding: utf-8 """A ${VISUAL} placeholder that will use the text that was last visually selected and insert it here. If there was no text visually selected, this will be the empty string. """ import re import textwrap from UltiSnips import _vim from UltiSnips.indent_util import IndentUtil from UltiSnips.text_objects._transformation import TextObjectTransformation from UltiSnips.text_objects._base import NoneditableTextObject _REPLACE_NON_WS = re.compile(r"[^ \t]") class Visual(NoneditableTextObject, TextObjectTransformation): """See module docstring.""" def __init__(self, parent, token): # Find our containing snippet for visual_content snippet = parent while snippet: try: self._text = snippet.visual_content.text self._mode = snippet.visual_content.mode break except AttributeError: snippet = snippet._parent # pylint:disable=protected-access if not self._text: self._text = token.alternative_text self._mode = "v" NoneditableTextObject.__init__(self, parent, token) TextObjectTransformation.__init__(self, token) def _update(self, done): if self._mode == "v": # Normal selection. text = self._text else: # Block selection or line selection. text_before = _vim.buf[self.start.line][:self.start.col] indent = _REPLACE_NON_WS.sub(" ", text_before) iu = IndentUtil() indent = iu.indent_to_spaces(indent) indent = iu.spaces_to_indent(indent) text = "" for idx, line in enumerate(textwrap.dedent( self._text).splitlines(True)): if idx != 0: text += indent text += line
text = text[:-
1] # Strip final '\n' text = self._transform(text) self.overwrite(text) self._parent._del_child(self) # pylint:disable=protected-access return True
Vdragon/git-cola
qtpy/_version.py
Python
gpl-2.0
72
0
version_info = (1, 4,
2) __version__ = '.'.join(map
(str, version_info))
rosenvladimirov/addons
currency_rate_update_bg/services/currency_getter.py
Python
agpl-3.0
2,676
0.001121
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (c) 2009 CamptoCamp. All rights reserved. # @author Nicolas Bessi # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import logging _logger = logging.getLogger(__name__) class AbstractClassError(Exception): def __str__(self): return 'Abstract Class' def __repr__(self): return 'Abstract Class' class AbstractMethodError(Exception): def __str__(self): return 'Abstract Method' def __repr__(self): return 'Abstract Method' class UnknowClassError(Exception): def __str__(self): return 'Unknown Class' def __repr__(self): return 'Unknown Class' class UnsuportedCurrencyError(Exception): def __init__(self, value): self.curr = value def __str__(self): return 'Unsupported currency %s' % self.curr def __repr__(self): return 'Unsupported currency %s' % self.curr class Currency_getter_factory(): """Factory pattern class that will return a currency getter class base on the name passed to the register method """ def register(self, class_name): allowed = [ 'CH_ADMIN_getter', 'PL_NBP_getter', 'ECB_getter', 'GOOGLE_getter', 'YAHOO_getter
', '
MX_BdM_getter', 'CA_BOC_getter', 'RO_BNR_getter', 'BG_CUSTOMS_getter', 'BG_SIBANK_getter', 'BG_UNICRDT_getter', ] if class_name in allowed: exec "from .update_service_%s import %s" % (class_name.replace('_getter', ''), class_name) class_def = eval(class_name) _logger.info("from .update_service_%s import %s: class_def %s:" % (class_name.replace('_getter', ''), class_name, class_def)) return class_def() else: raise UnknowClassError
xieta/mincoin
qa/rpc-tests/test_framework/mininode.py
Python
mit
55,509
0.00191
#!/usr/bin/env python3 # Copyright (c) 2010 ArtForz -- public domain half-a-node # Copyright (c) 2012 Jeff Garzik # Copyright (c) 2010-2016 The Bitcoin Core developers # Copyright (c) 2013-2019 The Mincoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # mininode.py - Mincoin P2P network half-a-node # # This python code was modified from ArtForz' public domain half-a-node, as # found in the mini-node branch of http://github.com/jgarzik/pynode. # # NodeConn: an object which manages p2p connectivity to a mincoin node # NodeConnCB: a base class that describes the interface for receiving # callbacks with network messages from a NodeConn # CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....: # data structures that should map to corresponding structures in # mincoin/primitives # msg_block, msg_tx, msg_headers, etc.: # data structures that represent network messages # ser_*, deser_*: functions that handle serialization/deserialization import struct import socket import asyncore import time import sys import random from .util import hex_str_to_bytes, bytes_to_hex_str from io import BytesIO from codecs import encode import hashlib from threading import RLock from threading import Thread import logging import copy import litecoin_scrypt from test_framework.siphash import siphash256 BIP0031_VERSION = 60000 MY_VERSION = 70014 # past bip-31 for ping/pong MY_SUBVERSION = b"/python-mininode-tester:0.0.3/" MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37) MAX_INV_SZ = 50000 MAX_BLOCK_BASE_SIZE = 1000000 COIN = 100000000 # 1 btc in satoshis NODE_NETWORK = (1 << 0) NODE_GETUTXO = (1 << 1) NODE_BLOOM = (1 << 2) NODE_WITNESS = (1 << 3) # Keep our own socket map for asyncore, so that we can track disconnects # ourselves (to workaround an issue with closing an asyncore socket when # using select) mininode_socket_map = dict() # One lock for synchronizing all data access between the networking thread (see # NetworkThread below) and the thread running the test logic. For simplicity, # NodeConn acquires this lock whenever delivering a message to to a NodeConnCB, # and whenever adding anything to the send buffer (in send_message()). This # lock should be acquired in the thread running the test logic to synchronize # access to any data shared with the NodeConnCB or NodeConn. mininode_lock = RLock() # Serialization/deserialization tools def sha256(s): return hashlib.new('sha256', s).digest() def ripemd160(s): return hashlib.new('ripemd160', s).digest() def hash256(s): return sha256(sha256(s)) def ser_compact_size(l): r = b"" if l < 253: r = struct.pack("B", l) elif l < 0x10000: r = struct.pack("<BH", 253, l) elif l < 0x100000000: r = struct.pack("<BI", 254, l) else: r = struct.pack("<BQ", 255, l) return r def deser_compact_size(f): nit = struct.unpack("<B", f.read(1))[0] if nit == 253: nit = struct.unpack("<H", f.read(2))[0] elif nit == 254: nit = struct.unpack("<I", f.read(4))[0] elif nit == 255: nit = struct.unpack("<Q", f.read(8))[0] return nit def deser_string(f): nit = deser_compact_size(f) return f.read(nit) def ser_string(s): return ser_compact_size(len(s)) + s def deser_uint256(f): r = 0 for i in range(8): t = struct.unpack("<I", f.read(4))[0] r += t << (i * 32) return r def ser_uint256(u): rs = b"" for i in range(8): rs += struct.pack("<I", u & 0xFFFFFFFF) u >>= 32 return rs def uint256_from_str(s): r = 0 t = struct.unpack("<IIIIIIII", s[:32]) for i in range(8): r += t[i] << (i * 32) return r def uint256_from_compact(c): nbytes = (c >> 24) & 0xFF v = (c & 0xFFFFFF) << (8 * (nbytes - 3)) return v def deser_vector(f, c): nit = deser_compact_size(f) r = [] for i in range(nit): t = c() t.deserialize(f) r.append(t) return r # ser_function_name: Allow for an alternate serialization function on the # entries in the vector (we use this for serializing the vector of transactions # for a witness block). def ser_vector(l, ser_function_name=None): r = ser_compact_size(len(l)) for i in l: if ser_function_name: r += getattr(i, ser_function_name)() else: r += i.serialize() return r def deser_uint256_vector(f): nit = deser_compact_size(f) r = [] for i in range(nit): t = deser_uint256(f) r.append(t) return r def ser_uint256_vector(l): r = ser_compact_size(len(l)) for i in l: r += ser_uint256(i) return r def deser_string_vector(f): nit = deser_compact_size(f) r = [] for i in range(nit): t = deser_string(f) r.append(t) ret
urn r def ser_string_vector(l): r = ser_compact_size(len(l)) for sv in l: r += ser_string(sv) return r def deser_int_vector(f): nit = deser_compact_size(f) r = [] for i in range(nit): t = struct.unpack("<i", f.read(4))[0] r.append(t) return r def ser_int_vector(l): r = ser_compact_size(len(l)) for i in l: r += struct.pack("<i", i) return r # Deserialize from a hex str
ing representation (eg from RPC) def FromHex(obj, hex_string): obj.deserialize(BytesIO(hex_str_to_bytes(hex_string))) return obj # Convert a binary-serializable object to hex (eg for submission via RPC) def ToHex(obj): return bytes_to_hex_str(obj.serialize()) # Objects that map to mincoind objects, which can be serialized/deserialized class CAddress(object): def __init__(self): self.nServices = 1 self.pchReserved = b"\x00" * 10 + b"\xff" * 2 self.ip = "0.0.0.0" self.port = 0 def deserialize(self, f): self.nServices = struct.unpack("<Q", f.read(8))[0] self.pchReserved = f.read(12) self.ip = socket.inet_ntoa(f.read(4)) self.port = struct.unpack(">H", f.read(2))[0] def serialize(self): r = b"" r += struct.pack("<Q", self.nServices) r += self.pchReserved r += socket.inet_aton(self.ip) r += struct.pack(">H", self.port) return r def __repr__(self): return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices, self.ip, self.port) MSG_WITNESS_FLAG = 1<<30 class CInv(object): typemap = { 0: "Error", 1: "TX", 2: "Block", 1|MSG_WITNESS_FLAG: "WitnessTx", 2|MSG_WITNESS_FLAG : "WitnessBlock", 4: "CompactBlock" } def __init__(self, t=0, h=0): self.type = t self.hash = h def deserialize(self, f): self.type = struct.unpack("<i", f.read(4))[0] self.hash = deser_uint256(f) def serialize(self): r = b"" r += struct.pack("<i", self.type) r += ser_uint256(self.hash) return r def __repr__(self): return "CInv(type=%s hash=%064x)" \ % (self.typemap[self.type], self.hash) class CBlockLocator(object): def __init__(self): self.nVersion = MY_VERSION self.vHave = [] def deserialize(self, f): self.nVersion = struct.unpack("<i", f.read(4))[0] self.vHave = deser_uint256_vector(f) def serialize(self): r = b"" r += struct.pack("<i", self.nVersion) r += ser_uint256_vector(self.vHave) return r def __repr__(self): return "CBlockLocator(nVersion=%i vHave=%s)" \ % (self.nVersion, repr(self.vHave)) class COutPoint(object): def __init__(self, hash=0, n=0): self.hash = hash self.n = n def deserialize(self, f): self.hash = deser_uint256(f) self.n = struct.unpack("<I", f.read(4))[0] def serialize(self): r = b"" r += ser_uint256(self.hash) r += struct.pack("<I", self.n) return r def __repr__(
okoye/spinnaker-monitoring
spinnaker-monitoring-daemon/spinnaker-monitoring/spectator_handlers.py
Python
apache-2.0
16,780
0.007867
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from datetime import datetime import json import command_processor import http_server import spectator_client def millis_to_time(millis): """Convert milliseconds to a time string.""" return datetime.fromtimestamp(millis / 1000).isoformat('T') + 'Z' def strip_non_html_params(options): """Return a copy of options with only those that are query parameters. This is to propagate options in web response URLs. """ params = {} for key in ['tagNameRegex', 'tagValueRegex', 'metricNameRegex']: if key in options: params[key] = options[key] return params class BaseSpectatorCommandHandler(command_processor.CommandHandler): def make_spectator_client(self, options): return spectator_client.SpectatorClient(options) def add_argparser(self, subparsers): parser = super(BaseSpectatorCommandHandler, self).add_argparser(subparsers) parser.add_argument('--by', default='service', help='Organize by "service" or by "metric" name.') spectator_client.SpectatorClient.add_standard_parser_arguments(parser) return parser def _get_data_map(self, catalog, options): restrict_services = options.get('services', None) if restrict_services: catalog = {service: config for service, config in catalog.items() if service in restrict_services.split(',')} spectator = self.make_spectator_client(options) by = options.get('by', 'service') if by == 'service': data_map = spectator.scan_by_service(catalog, params=options) else: data_map = spectator.scan_by_type(catalog, params=options) return data_map class DumpMetricsHandler(BaseSpectatorCommandHandler): def process_commandline_request(self, options): catalog = spectator_client.get_source_catalog(options) data_map = self._get_data_map(catalog, options) json_text = json.JSONEncoder(indent=2).encode(data_map) self.output(options, json_text) def process_web_request(self, request, path, params, fragment): options = dict(command_processor.get_global_options()) options.update(params) catalog = spectator_client.get_source_catalog(options) param_services = params.get('services', 'all').split(',') if param_services == ['all']: restricted_catalog = catalog else: restricted_catalog = {key: value for key, value in catalog.items() if key in param_services} data_map = self._get_data_map(restricted_catalog, options) body = json.JSONEncoder(indent=2).encode(data_map) request.respond(200, {'ContentType': 'application/json'}, body) class ExploreCustomDescriptorsHandler(BaseSpectatorCommandHandler): """Show all the current descriptors in use, and who is using them.""" def __get_type_and_tag_map_and_active_services(self, catalog, options): spectator = self.make_spectator_client(options) type_map = spectator.scan_by_type(catalog, params=options) service_tag_map, active_services = self.to_service_tag_map(type_map) return type_map, service_tag_map, active_services def process_commandline_request(self, options): catalog = spectator_client.get_source_catalog(options) type_map, service_tag_map, active_services = ( self.__get_type_and_tag_map_and_active_services( catalog, options)) params = strip_non_html_params(options) html = self.to_html(type_map, service_tag_map, active_services, params) html_doc = http_server.build_html_document( html, title='Metric Usage') self.output(options, html_doc) def process_web_request(self, request, path, params, fragment): options = dict(command_processor.get_global_options()) options.update(params) catalog = spectator_client.get_source_catalog(options) type_map, service_tag_map, active_services = ( self.__get_type_and_tag_map_and_active_services(catalog, options)) params = strip_non_html_params(options) html = self.to_html(type_map, service_tag_map, active_services, params) html_doc = http_server.build_html_document( html, title='Metric Usage') request.respond(200, {'ContentType': 'text/html'}, html_doc) @staticmethod def to_service_tag_map(type_map): service_tag_map = {} active_services = set() def process_endpoint_values_helper(key, service, values): if not isinstance(values, dict): return tagged_data = values.get('values', []) for tagged_point in tagged_data: tag_map = {tag['key']: tag['value'] for tag in tagged_point.get('tags')} if not tag_map: tag_map = {None: None} if key not in service_tag_map: service_tag_map[key] = {service: [tag_map]} else: service_map = service_tag_map[key] if service in service_map: service_map[service].append(tag_map) else: service_map[service] = [tag_map] for key, entry in sorted(type_map.items()): # pylint: disable=bad-indentation for service, value_list in sorted(entry.items()): active_services.add(service) for value in value_list: process_endpoint_values_helper(key, service, value) return service_tag_map, active_services @staticmethod def to_tag_service_map(col
umns, service_tag_map): tag_service_map = {} for service, tags in service_tag_map.items(): service_index = columns[service] for tag_group in tags: for tag_name, tag_value in tag_group.items(): if tag_name not in tag_service_map: tag_service_map[tag_name] = [set() for ignore in columns] tag_service_map[tag_name][service_index].add(tag_value) return tag_service_map def to_html(self, type_map, service_tag_map, active_services, params=None):
header_html = ['<tr>', '<th>Metric</th>', '<th>Label</th>'] columns = {} for service_name in sorted(active_services): columns[service_name] = len(columns) header_html.append('<th><A href="/show?services={0}">{0}</A></th>'.format( service_name)) header_html.append('</tr>') html = ['<table border=1>'] html.extend(header_html) for type_name, service_tag_map in sorted(service_tag_map.items()): tag_service_map = self.to_tag_service_map(columns, service_tag_map) num_labels = len(tag_service_map) row_html = ['<tr>'] row_span = ' rowspan={0}'.format(num_labels) if num_labels > 1 else '' query_params = dict(params or {}) query_params['meterNameRegex'] = type_name metric_url = '/show{0}'.format(self.params_to_query(query_params)) row_html.append( '<td{row_span}><A href="{url}">{type_name}</A></td>'.format( row_span=row_span, url=metric_url, type_name=type_name)) for label_name, service_values in tag_service_map.items(): if label_name is None: row_html.append('<td></td>') else: row_html.append( '<td><A href="/explore?tagNameRegex={0}">{0}</A></td>'.format( label_name)) for value_set in service_values: if value_set == set([None]): row_html.append('<td>n/a</td>') else: row_html.append( '<td>{0}</td>'.format(', '.join( ['<A href="/explore?tagValueRegex={v}">{v}</A>'.format( v=value) for value in sorted(value_set)]))) row_html.append('</tr>') html.append(''.join(row_html))
mysql/mysql-utilities
mysql-test/suite/replication/t/binlog_purge_ms.py
Python
gpl-2.0
4,696
0
# # Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 2 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # """ binlog_purge_rpl test for ms test and BUG#22543517 running binlogpurge on second master added to slave replication channels """ import replicate_ms from mysql.utilities.exception import MUTLibError _CHANGE_MASTER = ("CHANGE MASTER TO MASTER_HOST = 'localhost', " "MASTER_USER = 'rpl', MASTER_PASSWORD = 'rpl', " "MASTER_PORT = {0}, MASTER_AUTO_POSITION=1 " "FOR CHANNEL 'master-{1}'") def flush_server_logs_(server, times=5): """Flush logs on a server server[in] the instance server where to flush logs on times[in] number of times to flush the logs. """ # Flush master binary log server.exec_query("SET sql_log_bin = 0") for _ in range(times): server.exec_query("FLUSH LOCAL BINARY LOGS") server.exec_query("SET sql_log_bin = 1") class test(replicate_ms.test): """test binlog purge Utility This test runs the mysqlbinlogpurge utility on a known topology. """ master_datadir = None slaves = None mask_ports = [] def check_prerequisites(self): if not self.servers.get_server(0).check_version_com
pat(5, 7, 6): raise MUTLibError("Test requires server version 5.7.6 or later") return self.check_num_servers(1) def setup(self): self.res_fname = "result.txt" res = super(test, self).setup() if not res: return False # Setup multiple channels for slave m1_dict = self.get_connection_values(self.server2) m2_dict = self.get_connection_values(self.server3) for master in [self.server2, self.server3]:
master.exec_query("SET SQL_LOG_BIN= 0") master.exec_query("GRANT REPLICATION SLAVE ON *.* TO 'rpl'@'{0}' " "IDENTIFIED BY 'rpl'".format(self.server1.host)) master.exec_query("SET SQL_LOG_BIN= 1") self.server1.exec_query("SET GLOBAL relay_log_info_repository = " "'TABLE'") self.server1.exec_query(_CHANGE_MASTER.format(m1_dict[3], 1)) self.server1.exec_query(_CHANGE_MASTER.format(m2_dict[3], 2)) self.server1.exec_query("START SLAVE") return True def run(self): test_num = 0 master1_conn = self.build_connection_string(self.server2).strip(' ') master2_conn = self.build_connection_string(self.server3).strip(' ') cmd_str = "mysqlbinlogpurge.py --master={0} ".format(master1_conn) cmd_opts = ("--discover-slaves={0} --dry-run " "".format(master1_conn.split('@')[0])) test_num += 1 comment = ("Test case {0} - mysqlbinlogpurge: with discover " "and verbose options - master 1".format(test_num)) cmds = ("{0} {1} {2} -vv" "").format(cmd_str, cmd_opts, "binlog_purge{0}.log".format(1)) res = self.run_test_case(0, cmds, comment) if not res: raise MUTLibError("{0}: failed".format(comment)) flush_server_logs_(self.server1) cmd_str = "mysqlbinlogpurge.py --master={0} ".format(master2_conn) test_num += 1 comment = ("Test case {0} - mysqlbinlogpurge: with discover " "and verbose options - master 2".format(test_num)) cmds = ("{0} {1} {2} -vv" "").format(cmd_str, cmd_opts, "binlog_purge{0}.log".format(2)) res = self.run_test_case(0, cmds, comment) if not res: raise MUTLibError("{0}: failed".format(comment)) flush_server_logs_(self.server1) super(test, self).reset_ms_topology() return True def get_result(self): # If run method executes successfully without throwing any exceptions, # then test was successful return True, None def record(self): # Not a comparative test return True def cleanup(self): return super(test, self).cleanup()
google/mirandum
alerts/fanfunding/migrations/0003_auto_20160416_2023.py
Python
apache-2.0
2,470
0.002429
# -*- coding: utf-8 -*- # Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required
by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('main', '0006_updater_updaterevent'),
('fanfunding', '0002_auto_20160416_0621'), ] operations = [ migrations.RenameField( model_name='fanfundingevent', old_name='ffu', new_name='updater', ), migrations.RemoveField( model_name='fanfundingevent', name='funding_id', ), migrations.RemoveField( model_name='fanfundingevent', name='id', ), migrations.RemoveField( model_name='fanfundingupdate', name='failure_count', ), migrations.RemoveField( model_name='fanfundingupdate', name='id', ), migrations.RemoveField( model_name='fanfundingupdate', name='last_failure', ), migrations.RemoveField( model_name='fanfundingupdate', name='last_failure_message', ), migrations.RemoveField( model_name='fanfundingupdate', name='last_update', ), migrations.AddField( model_name='fanfundingevent', name='updaterevent_ptr', field=models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, default=1, serialize=False, to='main.UpdaterEvent'), preserve_default=False, ), migrations.AddField( model_name='fanfundingupdate', name='updater_ptr', field=models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, default=1, serialize=False, to='main.Updater'), preserve_default=False, ), ]
rhyolight/nupic.son
app/soc/mapreduce/convert_student_info.py
Python
apache-2.0
1,918
0.008342
# Copyright 2011 the Melange authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distribute
d on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """GSoCStudentInfo updating MapReduce.""" import logging from mapreduce import operation # MapReduce requires these models
to have been imported. # pylint: disable=unused-import from soc.models.user import User from soc.modules.gsoc.models.profile import GSoCStudentInfo from soc.modules.gsoc.models.proposal import GSoCProposal from soc.modules.gsoc.models.project import GSoCProject # pylint: enable=unused-import def process(student_info): logging.debug("Converting student_info '%r'", student_info.key()) proposals = GSoCProposal.all().ancestor(student_info.parent_key()).fetch(1000) projects = GSoCProject.all().ancestor(student_info.parent_key()).fetch(1000) proposals = [i for i in proposals if i.status != 'withdrawn'] projects = [i for i in projects if i.status != 'withdrawn'] nr_proposals = len(proposals) nr_projects = len(projects) orgs = [GSoCProject.org.get_value_for_datastore(i) for i in projects] student_info.number_of_proposals = nr_proposals student_info.number_of_projects = nr_projects student_info.project_for_orgs = orgs yield operation.db.Put(student_info) yield operation.counters.Increment("student_infos_converted") yield operation.counters.Increment("proposals_counted", delta=nr_proposals) yield operation.counters.Increment("projects_counted", delta=nr_projects)
sparkslabs/guild
examples/dogs_go_woof_actors.py
Python
apache-2.0
988
0.001012
#!/usr/bin/python from __future__ import print_function from guild.actor import Actor, actor_method, process_method, late_bind class Dog(Actor): @actor_method # Input - triggered by data coming in def woof(self): print("Woof", self) @process_method # Process - tr
iggered each time it's run def process(self): #print(" ", end="") pass @late_bind # Output def produce(self): pass class Shitzu(Dog): def __init__(self): self.count = 0 super(Dog, self).__init__() @process_method def process(self):
self.count += 1 print("I don't go meow", self.count) if self.count >= 20: self.stop() return False if __name__ == "__main__": import time dog = Dog() shitzu = Shitzu() dog.start() shitzu.start() dog.woof() shitzu.woof() time.sleep(0.1) shitzu.join() time.sleep(0.1) dog.stop() dog.join()
muehlburger/gnucash
gnucash/python/init.py
Python
gpl-2.0
4,709
0.014865
import sys from gnucash import * from gnucash import _sw_app_utils from gnucash import _sw_core_utils from gnucash._sw_core_utils import gnc_prefs_is_extra_enabled, gnc_prefs_is_debugging_enabled from gi import require_version require_version('Gtk', '3.0') from gi.repository import Gtk import os sys.path.append(os.path.dirname(__file__)) # output file location if gnucash has been started with # gnucash --extra if gnc_prefs_is_extra_enabled(): print("Python shell init file: %s" % (__file__)) print("\n" + "The following string should appear translated in your preferred language:" + "\n") print("\n" + _("Welcome to GnuCash") +"\n") # Importing the console class causes SIGTTOU to be thrown if GnuCash is # started in the background. This causes a hang if it is not handled, # so ignore it for the duration import signal old_sigttou = signal.signal(signal.SIGTTOU, signal.SIG_IGN) import pycons.console as cons # Restore the SIGTTOU handler signal.signal(signal.SIGTTOU, old_sigttou) # output debug information if gnucash has been started with # gnucash --debug --extra if gnc_prefs_is_extra_enabled() and gnc_prefs_is_debugging_enabled(): print("Hello from python!\n") print("sys.modules.keys(): ", sys.modules.keys(), "\n") print("dir(_sw_app_utils): ", dir(_sw_app_utils), "\n") #session = app_utils.gnc_get_current_session() #root account can later on be accessed by session.get_book().get_root_account() #print("test", dir(root), root.__class__) print("dir(gnucash_core_c): ", dir(gnucash_core_c)) #acct = Account(instance = root) #print("test3", dir(acct)) #print(acct.GetName()) #print(acct.GetBalance()) #print(acct.GetSplitList()) #print("test2", dir(gnucash.gnucash_core_c)) class Console (cons.Console): """ GTK python console """ def __init__(self, argv=[], shelltype='python', banner=[], filename=None, size=100, user_local_ns=None, user_global_ns=None): cons.
Console.__init__(self, argv, shelltype, banner, filename, size, user_local_ns=user_local_ns, user_global_ns=user_global_ns) self.buffer.create_tag('center', justification=Gtk.Justification.CENTER, font='Mono 4') self.figures = [] self.callbacks = [] self.last_figure = None self.active_canvas = None self.view.conne
ct ('key-press-event', self.key_press_event) self.view.connect ('button-press-event', self.button_press_event) self.view.connect ('scroll-event', self.scroll_event) def key_press_event (self, widget, event): """ Handle key press event """ if self.active_canvas: self.active_canvas.emit ('key-press-event', event) return True return cons.Console.key_press_event (self, widget, event) def scroll_event (self, widget, event): """ Scroll event """ if self.active_canvas: return True return False def button_press_event (self, widget, event): """ Button press event """ return self.refresh() def quit_event (self, widget, event): """ Event handler for closing of console window """ return self.quit() def refresh (self): """ Refresh drawing """ for fig in self.figures: figure, canvas, anchor = fig canvas.draw() return False def quit (self): """ quit """ self.write("\n" + _("Have a nice day!") + "\n") return super(Console, self).quit() # Change this to "if True:" to switch on a python console at gnucash # startup: # shelltype can either be "python" or "ipython" (the latter is not yet fully functional) if False: shelltype = "python" if shelltype=="python": shelltypeName = "Python" else: shelltypeName = "IPython" banner_style = 'title' # TRANSLATORS: %s is either Python or IPython banner = _("Welcome to GnuCash %s Shell") % shelltypeName console = Console(argv = [], shelltype = shelltype, banner = [[banner, banner_style]], size = 100) window = Gtk.Window(type = Gtk.WindowType.TOPLEVEL) window.set_position(Gtk.WindowPosition.CENTER) window.set_default_size(800,600) window.set_border_width(0) console = Console(argv = [], shelltype = shelltype, banner = [[banner, banner_style]], size = 100, user_local_ns=locals(), user_global_ns=globals()) window.connect('destroy-event', console.quit_event) window.connect('delete-event', console.quit_event) window.add (console) window.show_all() console.grab_focus()
antchain/antchain.org
web/ads.py
Python
mit
10,223
0.043138
# -*- coding:utf-8 -*- import math import web import tx def GetAdsPagination(assetid,page) : html = '' html = html + '<div name="pages" align="center">\n' if assetid != None : count = web.collection_ads.find({"asset":assetid}).count() else : count = web.collection_ads.find({"asset":{"$ne":"0"}}).count() if count == 0 : return '' pages = count / web.ADS_PER_PAGE if count % web.ADS_PER_PAGE != 0 : pages = pages + 1 if page <= 4 : displaystart = 1 else : if page - 4 > 1 : displaystart = page - 4 else : displaystart = 1 if page >= pages - 4 and pages > 9 : displaystart = p
ages - 9 displayend = pages else : if pages <= 9 : displayend = pages else : displayend = displaystart + 9 if assetid != None : html = html + '<a href="/address/' + assetid + '/page/' + str(1) + '"><<</a> ' else : html = html + '<a href="/address/page/' + str(1) + '"><<</a> ' for i in range(displaystart,displayend+1) : if i != page : if assetid != None : html = html + '<a h
ref="/address/' + assetid + '/page/' + str(i) + '">' + str(i) + '</a> ' else : html = html + '<a href="/address/page/' + str(i) + '">' + str(i) + '</a> ' else : html = html + str(i) + ' ' if assetid != None : html = html + '<a href="/address/' + assetid + '/page/' + str(pages) + '">>></a> ' else : html = html + '<a href="/address/page/' + str(pages) + '">>></a> ' html = html + '<br/>\n' html = html + '</div>\n' return html def GetAddressInternal(assetid,page,listnum) : if page <= 0 : return 'page index begin: 1' start = (page-1) * listnum html = '' html = html + '<div class="container">\n' html = html + '<table width="80%" border="0" cellpadding="3" cellspacing="0" align="center">' html = html + '<tr align="left">' html = html + '<th>'+ _("Address") +'</th><th>'+ _("AdsAsset") +'</th><th>'+ _("Value") +'</th><th>'+ _("Transaction Counts") +'</th><th>'+ _("Last Transaction Time") +'</th><th>'+ _("First Transaction Time") +'</th>' + '<br/>' html = html + '</tr>' if assetid != None : results = web.collection_ads.find({"asset":assetid}).sort("last_tx_time",-1).limit(listnum).skip(start) else : results = web.collection_ads.find({"asset":{"$ne":"0"}}).sort("last_tx_time",-1).limit(listnum).skip(start) if results : for result in results : html = html + '<tr>' html = html + '<td>' + '<a href="/address/' + result['address'] + '">' + result['address'] + '</a></td>' html = html + '<td>' + web.GetAssetName(result['asset']) + '</td>' html = html + '<td>' + str(result['value']) + '</td>' html = html + '<td>' + str(len(result['txid_list'])) + '</td>' html = html + '<td>' + web.GetLocalTime(result['last_tx_time']) + '</td>' html = html + '<td>' + web.GetLocalTime(result['first_tx_time']) + '</td>' html = html + '</tr>' html = html + '</table>\n' html = html + '</div>\n' return html def GetAddressPage(assetid,page) : html = web.GetHeader("address") html = html + '<div name="address" align="center">\n' html = html + '<br/><br/>\n' html = html + '<h2>'+ _("Address Information") +'</h2>\n' html = html + '<div class="container">\n' count = web.collection_txs.find({"type":"RegisterTransaction"}).count() results = web.collection_txs.find({"type":"RegisterTransaction"}).sort("height",1) row = int(math.ceil(count / 4)) r = 0 for i in range(0, row+1) : html = html + '<div class="row">\n' html = html + '<div class="column column-20"></div>\n' for j in range(0,4) : if i==0 and j==0 : if assetid == None : html = html + '<div class="column column-15"><a href="/address/"><b>[' + _('All Asset') + ']</b></a></div>\n' else : html = html + '<div class="column column-15"><a href="/address/">[' + _('All Asset') + ']</a></div>\n' continue if r >= count : html = html + '<div class="column column-15"></div>\n' elif assetid == results[r]['txid']: html = html + '<div class="column column-15"><a href="/address/' + results[r]['txid'] + '"><b>[' + web.GetAssetNameByAsset(results[r]['asset']) + ']</b></a></div>\n' else : html = html + '<div class="column column-15"><a href="/address/' + results[r]['txid'] + '">[' + web.GetAssetNameByAsset(results[r]['asset']) + ']</a></div>\n' r = r + 1 html = html + '<div class="column column-20"></div>\n' html = html + '</div>\n' html = html + '</div>\n' html = html + '<br/>\n' if assetid != None : html = html + '<h4>- '+ web.GetAssetName(assetid) +' -</h4>\n' Pagination = GetAdsPagination(assetid,page) html = html + Pagination html = html + GetAddressInternal(assetid,page,web.ADS_PER_PAGE) html = html + '<br/>\n' html = html + Pagination html = html + '</div>\n' html = html + web.GetFooter() return html def GetAddressPagination(address_all,page,listnum) : html = '' html = html + '<div name="pages" align="center">\n' count = len(address_all['txid_list']) pages = count / listnum if count % listnum != 0 : pages = pages + 1 if page <= 4 : displaystart = 1 else : if page - 4 > 1 : displaystart = page - 4 else : displaystart = 1 if page >= pages - 4 and pages > 9 : displaystart = pages - 9 displayend = pages else : if pages <= 9 : displayend = pages else : displayend = displaystart + 9 ads = address_all['address'] html = html + '<a href="/address/' + ads + '/page/' + str(1) + '"><<</a> ' for i in range(displaystart,displayend+1) : if i != page : html = html + '<a href="/address/' + ads + '/page/' + str(i) + '">' + str(i) + '</a> ' else : html = html + str(i) + ' ' html = html + '<a href="/address/' + ads + '/page/' + str(pages) + '">>></a> ' html = html + '<br/>\n' html = html + '</div>\n' return html def GetAddressResultInternal(address_all,page,listnum) : html = '' nstart = (page-1) * listnum i = -1 for txid in address_all['txid_list'] : i = i + 1 if i < nstart : continue if i >= (nstart + listnum) : break tx_result = web.collection_txs.find_one({"txid":txid['txid']}) html = html + tx.GetTxResultInternal(tx_result,address_all['address']) html = html + '<hr/>\n' return html def GetAddressResult(asset_address,address_all,page) : html = '' html = html + '<div class="container">\n' address = asset_address[0]['address'] html = html + '<div class="row">\n' html = html + '<div class="column column-15"><b>'+ _("Address") +'</b></div><div class="column"><b>' + address + '</b></div>\n' html = html + '</div>\n' ncount = 0 results = {} for result in asset_address : html = html + '<div class="row">\n' html = html + '<div class="column column-15"><b>'+ _("Asset") +'</b></div><div class="column">' + str(result['value']) + ' <b>' + web.GetAssetName(result['asset']) + '</b></div>\n' html = html + '</div>\n' results[ncount] = result ncount = ncount + 1 html = html + '<div class="row">\n' html = html + '<div class="column column-15"><b>'+ _("First Transaction Time") +'</b></div><div class="column">' + web.GetLocalTime(address_all['first_tx_time']) + '</div>\n' html = html + '</div>\n' html = html + '<div class="row">\n' html = html + '<div class="column column-15"><b>'+ _("Last Transaction Time") +'</b></div><div class="column">' + web.GetLocalTime(address_all['last_tx_time']) + '</div>\n' html = html + '</div>\n' html = html + '<div class="row">\n' html = html + '<div class="column column-15"><b>'+ _("Transaction Nums") +'</b></div><div class="column">' + str(len(address_all['txid_list'])) + '</div>\n' html = html + '</div>\n' html = html + '</div>\n' html = html + '<hr/>\n' ######################################################################### # list all asset html = html + '<div class="container">\n' row = int(math.ceil(ncount / 4)) r = 0 for i in range(0, row+1) : html = html + '<div class="row">\n' html = html + '<div class="column column-20"></div>\n' for j in range(0,4) : if i==0 and j==0 : if address_all['asset'] == "0" : html = html + '<div class="column column-15"><a href="/address/' + address_all['address'] + '"><b>[' + _('All Asset') + ']</b></a></div>\n' else : html = html + '<div class="colu
rdevon/cortex
cortex/_lib/utils.py
Python
bsd-3-clause
2,635
0
'''Utility methods ''' import logging import os import numpy as np import torch __author__ = 'R Devon Hjelm' __author_email__ = 'erroneus@gmail.com' logger = logging.getLogger('cortex.util') try: _, _columns = os.popen('stty size', 'r').read().split() _columns = int(_columns) except ValueError: _columns = 1 def print_section(s): '''For printing sections to scripts nicely. Args: s (str): string of section ''' h = s + ('-' * (_columns - len(s))) print(h) def update_dict_of_lists(d_to_update, **d): '''Updates a dict of list with kwargs. Args: d_to_update (dict): dictionary of lists. **d: keyword arguments to append. ''' for k, v in d.items(): if isinstance(v, dict): if k not in d_to_update.keys(): d_to_update[k] = {} update_dict_of_lists(d_to_update[k], **v) elif k in d_to_update.keys(): d_to_update[k].append(v) else: d_to_update[k] = [v] def bad_values(d): failed = {} for k, v in d.items(): if isinstance(v, dict): v_ = bad_values(v) if v_: failed[k] = v_ else: if isinstance(v, (list, tuple)): v_ = [] for v__ in v: if isinstance(v__, torch.Tensor): v_.append(v__.item()) else: v_.append(v__) v_ = np.array(v_).sum() elif isinstance(v, torch.Tensor): v_ = v.item() else: v_ = v if np.isnan(v_) or np.isinf(v_): failed[k] = v_ if len(failed) == 0: return False return failed def convert_to_numpy(o): if isinstance(o, torch.Tensor): o = o.data.cpu().numpy() if len(o.shape) == 1 and o.shape[0] == 1: o = o[0] elif isinstance(o, (torch.cuda.FloatTensor, torch.cuda.LongTensor)): o = o.cpu().numpy() elif isinstance(o, list): for i in range(len(o)): o[i] = convert_to_numpy(o[i]) elif isinstance(o, tuple): o_ = tuple() for i in range(len(o)): o_ = o_ + (convert_to_numpy(o[i]),) o = o_ elif isinstance(o, dict): for k in o.keys(): o[k] = convert_to_numpy(o[k]) return o def compute_tsne(X, perplexity=40, n_iter=300, init='pca'
): fro
m sklearn.manifold import TSNE tsne = TSNE(2, perplexity=perplexity, n_iter=n_iter, init=init) points = X.tolist() return tsne.fit_transform(points)
muk-it/muk_dms
muk_dms_view/controllers/__init__.py
Python
lgpl-3.0
1,019
0.003925
################################################################################### # # Copyright (c) 2017-2019 MuK IT GmbH. # # This file is part of MuK Documents View # (see https://mukit.at). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licens
es/>. # ################################################################
################### from . import main from . import backend
menren/openshift-ansible
roles/lib_zabbix/library/zbx_trigger.py
Python
apache-2.0
8,027
0.00436
#!/usr/bin/env python ''' ansible module for zabbix triggers ''' # vim: expandtab:tabstop=4:shiftwidth=4 # # Zabbix trigger ansible module # # # Copyright 2015 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This is in place because each module looks similar to each other. # These need duplicate code as their behavior is very similar # but different for each zabbix class. # pylint: disable=duplicate-code # pylint: disable=import-error from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection def exists(content, key='result'): ''' Check if key exists in content or the size of content[key] > 0 ''' if not content.has_key(key): return False if not content[key]: return False return True def get_priority(priority): ''' determine priority ''' prior = 0 if 'info' in priority: prior = 1 elif 'warn' in priority: prior = 2 elif 'avg' == priority or 'ave' in priority: prior = 3 elif 'high' in priority: prior = 4 elif 'dis' in priority: prior = 5 return prior def get_deps(zapi, deps): ''' get trigger dependencies ''' results = [] for desc in deps: content = zapi.get_content('trigger', 'get', {'filter': {'description': desc}, 'expandExpression': True, 'selectDependencies': 'triggerid', }) if content.has_key('result'): results.append({'triggerid': content['result'][0]['triggerid']}) return results def get_trigger_status(inc_status): ''' Determine the trigger's status 0 is enabled 1 is disabled ''' r_status = 0 if inc_status == 'disabled': r_status = 1 return r_status def get_template_id(zapi, template_name): ''' get related templates ''' template_ids = [] app_ids = {} # Fetch templates by name content = zapi.get_content('template', 'get', {'search': {'host': template_name}, 'selectApplications': ['applicationid', 'name']}) if content.has_key('result'): template_ids.append(content['result'][0]['templateid']) for app in content['result'][0]['applications']: app_ids[app['name']] = app['applicationid'] return template_ids, app_ids def main(): ''' Create a trigger in zabbix Example: "params": { "description": "Processor load is too high on {HOST.NAME}", "expression": "{Linux server:system.cpu.load[percpu,avg1].last()}>5", "dependencies": [ { "triggerid": "14062" } ] }, ''' module = AnsibleModule( argument_spec=dict( zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'), zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'), zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'), zbx_debug=dict(default=False, type='bool'), expression=d
ict(default=None, type='str'), name=dict(default=Non
e, type='str'), description=dict(default=None, type='str'), dependencies=dict(default=[], type='list'), priority=dict(default='avg', type='str'), url=dict(default=None, type='str'), status=dict(default=None, type='str'), state=dict(default='present', type='str'), template_name=dict(default=None, type='str'), hostgroup_name=dict(default=None, type='str'), query_type=dict(default='filter', choices=['filter', 'search'], type='str'), ), #supports_check_mode=True ) zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'], module.params['zbx_user'], module.params['zbx_password'], module.params['zbx_debug'])) #Set the instance and the template for the rest of the calls zbx_class_name = 'trigger' idname = "triggerid" state = module.params['state'] tname = module.params['name'] templateid = None if module.params['template_name']: templateid, _ = get_template_id(zapi, module.params['template_name']) content = zapi.get_content(zbx_class_name, 'get', {module.params['query_type']: {'description': tname}, 'expandExpression': True, 'selectDependencies': 'triggerid', 'templateids': templateid, 'group': module.params['hostgroup_name'], }) # Get if state == 'list': module.exit_json(changed=False, results=content['result'], state="list") # Delete if state == 'absent': if not exists(content): module.exit_json(changed=False, state="absent") content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]]) module.exit_json(changed=True, results=content['result'], state="absent") # Create and Update if state == 'present': params = {'description': tname, 'comments': module.params['description'], 'expression': module.params['expression'], 'dependencies': get_deps(zapi, module.params['dependencies']), 'priority': get_priority(module.params['priority']), 'url': module.params['url'], 'status': get_trigger_status(module.params['status']), } # Remove any None valued params _ = [params.pop(key, None) for key in params.keys() if params[key] is None] #******# # CREATE #******# if not exists(content): # if we didn't find it, create it content = zapi.get_content(zbx_class_name, 'create', params) if content.has_key('error'): module.exit_json(failed=True, changed=True, results=content['error'], state="present") module.exit_json(changed=True, results=content['result'], state='present') ######## # UPDATE ######## differences = {} zab_results = content['result'][0] for key, value in params.items(): if zab_results[key] != value and zab_results[key] != str(value): differences[key] = value if not differences: module.exit_json(changed=False, results=zab_results, state="present") # We have differences and need to update differences[idname] = zab_results[idname] content = zapi.get_content(zbx_class_name, 'update', differences) module.exit_json(changed=True, results=content['result'], state="present") module.exit_json(failed=True, changed=False, results='Unknown state passed. %s' % state, state="unknown") # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled # import module snippets. This are required from ansible.module_utils.basic import * main()
eredmiles/GeneralDataScienceToolsDSSG2015
WorldBank2015/Code/data_pipeline_src/supplier_feature_gen.py
Python
mit
24,730
0.016781
import pandas as pd from datetime import date, timedelta import time import numpy as np import re import psycopg2 import ConfigParser import argparse from sqlalchemy import create_engine import random import sql parser = argparse.ArgumentParser() parser.add_argument('-cf','--contract_file',help='Contract data file') parser.add_argument('-if','--invest_file',help='Labelled data file') parser.add_argument('-a','--amounts',action='store_true',default=False,help='Calculate aggregated amount features') parser.add_argument('-dist','-dist',action='store_true',default=True,help='Calculate distribution features') parser.add_argument('-dom','-dom',action='store_true',default=False,help='Calculate dominance features') parser.add_argument('-y','--num_years',default=0,help='Time periods in years') parser.add_argument('-cat','--categ',default=['major_sector'],nargs='*',help='Categoricals to use') parser.add_argument('-id','--table_id',default=time.strftime("%Y%m%d"),help='ID for SQL tables') parser.add_argument('-lim','--contract_num_lim',default=5000,help='Maximum number of rows to use') args = parser.parse_args() def connect(): """Connect to database""" #read password from config file config = ConfigParser.RawConfigParser() config.read('config') password = config.get('SQL','password') #open connection with database config = ConfigParser.RawConfigParser() config.read('config') password = config.get('SQL','password') con = psycopg2.connect(host="localhost",user='dssg',password=password,db
name="world_bank") return con def snake_case(name): """Clean entity name strings""" remove_list = ['llc','ltd','llc','ltd','co','corporation','srl','nv','limited','pvtltd'] remove = '|'.join(remove_list) regex = re.compile(r'\b('+remove+r')\b', flags=re.IGNORECASE) try: s1 = name.lower() s1 = s1.replace('.','') s1 = r
egex.sub("", s1) s1 = s1.strip() s1 = re.sub(' +','_',s1) s1 = re.sub('-','_',s1) s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', s1) s1 = s1.replace('*','') s1 = s1.replace('(','') s1 = s1.replace(')','') s1 = s1.replace('"','') s1 = s1.replace(',','') s1 = s1.replace('#','') s1 = s1.replace(':','_') s1 = s1.replace('&','_') s1 = s1.replace('\'','') s1 = s1.replace('/','_') s1 = re.sub('_+','_',s1) except: s1 = '' return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() def reformat(data,column,inplace=False,shorten=False): if inplace: data[column] = data[column].map(lambda x: snake_case(x)) else: data[column + '_reformat'] = data[column].map(lambda x: snake_case(x)) if shorten: data[column] = [re.sub(r'and', '', x).replace('__','_') for x in data[column]] data[column] = [re.sub(r'[aeiou]', '', x) for x in data[column]] return data def binarize(data,fields): dummies = pd.get_dummies(data[fields]).astype('int64') dummies.columns = ['_'.join(('is',fields,col,'ct')) for col in dummies.columns] data = data.merge(dummies,left_index=True,right_index=True,how='left') return data def conditional_amounts(data): for col in data.columns: if 'is' in col and 'total' not in col and 'cum' not in col and 'percent' not in col and 'dominance' not in col: data[re.sub('_ct$','',col) + '_amt'] = data[col]*data['amount_standardized'] return data def distribution(data,field,amount=False): cols_to_use = [] for col in data.columns: if 'is' in col and 'cum' in col and field in col and 'total' not in col and 'percent' not in col and 'dominance' not in col: if amount and 'amt' in col: cols_to_use.append(col) elif not amount and not 'amt' in col: cols_to_use.append(col) subset = data[cols_to_use] dist = subset.apply(lambda x: 100.0*x/x.sum(), axis=1) dist.columns = [col + '_percent' for col in dist.columns] return dist def count_previous_contracts(data,days=0,amount = True, count = False): """Count number of data entries in the past n days from each entry""" def sum_func(column): def inner_func(t): if days == 0: min_date_lim = 0 else: min_date_lim = t - timedelta(days) total = data.ix[(min_date_lim < data['contract_signing_date']) & (data['contract_signing_date'] <= t),[column,'amount_standardized']] if amount: total_sum = ((total[column] != 0)*total['amount_standardized']).cumsum() else: total_sum = total[column].cumsum() return total_sum return inner_func data = data.sort('contract_signing_date') count = 0 for col in data.columns: if 'is' in col and 'total' not in col and 'cum' not in col and 'full' not in col and 'year' not in col: func = sum_func(col) result_temp = data[['contract_signing_date']].apply(func) result_temp = pd.DataFrame(result_temp) result_temp.columns = [col + '_cum'] if count == 0: result = result_temp else: result = result.merge(result_temp,left_index=True,right_index=True,how='left') count += 1 data = data.merge(result,left_index=True,right_index=True,how='left') return data def dominance(data,field,not_field=[]): col_list = [] for col in data.columns: if 'is' in col and 'cum' in col and field in col and 'total' not in col and 'percent' not in col and 'dominance' not in col: col_list.append(col+'_dominance') data[col + '_dominance'] = data[col]/data[col + '_total'] data.replace([np.inf, -np.inf], np.nan,inplace=True) data[col + '_dominance'] = data[col + '_dominance'].fillna(0) return data def rank(data,col_base,no=[]): """Rank the values in a set of fields to create anonymous ranking fields e.g. first_major_sector_percent, second_major_sector_percent, ...""" #find matching columns col_list = [] for col in data.columns: match = True for base in col_base: if base not in col: match = False if match: col_list.append(col) data_sub = data[col_list] #sort the columns by value data_array = np.array(data_sub) data_array.sort(axis=1) data_array = np.fliplr(data_array) #create data frame with column names df = pd.DataFrame(data_array,index=data.index,columns=['_'.join(('_'.join(col_base),str(i + 1))) for i in range(len(col_list))]) return df def get_engine(): config = ConfigParser.RawConfigParser() config.read('config') password = config.get('SQL','password') engine = create_engine(r'postgresql://dssg:' + password + '@localhost/world_bank') return engine def write_sql_query(fields,table_name,years=0,amount=False,total=False,table_name2=''): if table_name2 == '': table_name2 = table_name sql_base = 'SELECT st1.supplier_reformat,st1.contract_signing_date, st1.amount_standardized,st1.unique_id' for field in fields: if not total: sql_base += ',\nSUM(st2."' + field + '") AS "' + field + '_cum"' else: sql_base += ',\nSUM(st2."' + field + '") AS "' + field + '_cum_total"' s
cliffano/swaggy-jenkins
clients/python-experimental/generated/openapi_client/api/remote_access_api.py
Python
mit
2,651
0.006413
# coding: utf-8 """ Swaggy Jenkins Jenkins API clients generated from Swagger / Open API specification # noqa: E501 The version of the OpenAPI document: 1.1.2-pre.0 Contact: blah@cliffano.com Generated by: https://openapi-generator.tech """ from openapi_client.api_client import ApiClient from openapi_client.api.remote_access_api_endpoints.get_computer import GetComputer from openapi_client.api.remote_access_api_endpoints.get_jenkins import GetJenkins from openapi_client.api.remote_access_api_endpoints.get_job import GetJob from openapi_client.api.remote_access_api_endpoints.get_job_config import GetJobConfig from openapi_client.api.remote_access_api_endpoints.get_job_last_build import GetJobLastBuild from openapi_client.api.remote_access_api_endpoints.get_job_progressive_text import GetJobProgressiveText from openapi_client.api.remote_access_api_endpoints.get_queue import GetQueue from openapi_client.api.remote_access_api_endpoints.get_queue_item import GetQueueItem from openapi_client.api.remote_access_api_endpoints.get_view import GetView from openapi_client.api.remote_access_api_endpoints.get_view_config import GetViewConfig from openapi_client.api.remote_access_api_endpoints.head_jenkins import HeadJenkins from openapi_client.api.remote_access_api_endpoints.post_create_item import PostCreateItem from openapi_client.api.remote_access_api_endpoints.post_create_view import PostCreateView from openapi_client.api.remote_access_api_endpoints.post_job_build import PostJobBuild from openapi_client.api.remote_access_api_endpoints.post_job_config import PostJobConfig from openapi_client.api.remote_access_api_endpoints.post_job_delete import PostJobDelete from openapi_client.api.remote_access_api_endpoints.post_job_disable import PostJobDisable from openapi_client.api.remote_access_api_endpoints.post_job_enable import PostJobEnable from openapi_client.api.remote_access_api_endpoints.post_job_last_build_stop import PostJobLastBuildStop from openapi_client.api.remote_access_api_endpoints.post_view_config import PostViewConfig class RemoteAccessApi( GetComputer, GetJenkins, GetJob, GetJobConfig, GetJobLastBuild, GetJobProgressiveText, GetQueue, GetQueueItem, GetView, GetViewConfig, HeadJenkins, PostCrea
teItem, PostCreateView, PostJobBuild, PostJobConfig, P
ostJobDelete, PostJobDisable, PostJobEnable, PostJobLastBuildStop, PostViewConfig, ApiClient, ): """NOTE: This class is auto generated by OpenAPI Generator Ref: https://openapi-generator.tech Do not edit the class manually. """ pass
wenbinli/rl
cliffWalk_QL.py
Python
mit
6,866
0.026507
# solve cliff-walking task with Q-Learning, very similar to SARSA # original example problem from the book, introduction for reinforcement learning # Author: Wenbin Li # numeric backend import pygame from pygame.locals import * import numpy as np grid_size = 100 n_row = 4 n_col = 12 state = np.zeros((n_row * grid_size, n_col * grid_size)) step_size = 0.5 epsilon = 0.1 # parameter for epislon-greedy N_actions = 4 # number of actions {left,up,right,down} N_episodes = 600 # number of episodes # as suggested by the book, reach optimality by 8000 time steps # rewards of -1 until the goal state is reached # -100 for entering cliff region and instantly return to starting position # specify goal location goal_r = 3 goal_c = 11 # specify start location start_r = 3 start_c = 0 # initialize state-action value function q = np.zeros((n_row,n_col,N_actions)) # num_row by num_col by num_states # Note: Q(terminal-state,.) = 0 # undiscounted and episodic task n_steps = 0 n_episodes = 0 # epsilon-greedy strategy def ep_greedy(epsilon,num_actions,q,i,j): roll = np.random.uniform(0,1) # epsilon-greedy strategy if roll < epsilon: # exploration a = np.random.randint(0,num_actions) else: # exploitation a = np.argmax(q[i,j,:]) return a # translate action into state-change def action2state(i,j,a): # Note: coordintate system start from the upper-left corner and # right/downwards are the positive direction if a == 0: # to left i_next = i j_next = j - 1 elif a == 1: # upwards i_next = i - 1 j_next = j elif a == 2: # to right i_next = i j_next = j + 1 else: # downwards i_next = i + 1 j_next = j return i_next,j_next # Sarsa method while n_episodes < N_episodes: # begin of an episode i = start_r j = start_c # end of an episode n_episodes += 1 print "episode ",str(n_episodes),"..." while True: n_steps += 1 # print " step ",str(n_steps),"..." # choose A from S using policy derived from Q (epsilon-greedy) a = ep_greedy(epsilon,N_actions,q,i,j) # translate action into state-change with windy effect i_next,j_next = action2state(i,j,a) # update the state-action value function with Sarsa/Q-Learning of choice # state transitions end in the goal state # state should be in the range of the gridworld if i_next == goal_r and j_next == goal_c: # reach the goal position # q[i,j] = q[i,j] + step_size * (-1 + 0 - q[i,j]) #the Q(terminal,.) = 0 q[i,j,a] = q[i,j,a] + step_size * (-1 + 0 - q[i,j,a]) #the Q(terminal,.) = 0 # Note, transition from noterminal to terminal also gets reward of -1 in this case break # different reward/consequence when entering the cliff region elif i_next == 3 and j_next > 1 and j_next < n_col - 1: i_next = start_r j_next = start_c r = -100 elif i_next < 0 or i_next > n_row -1: i_next = i r = -1 elif j_next < 0 or j_next > n_col - 1: j_next = j r = -1 else: r = -1 # a_next = ep_greedy(epsilon,N_actions,q,i_next,j_next) q[i,j,a] = q[i,j,a] + step_size * (r + max(q[i_next,j_next,:]) - q[i,j,a]) i = i_next j = j_next # visualize the solution/GUI-backend # plot the gridworld as background # (optional) mark wind direction pygame.init() pygame.display.set_mode((n_col * grid_size,n_row * grid_size)) pygame.display.set_caption('Cliff Walking') screen = pygame.display.get_surface() surface = pygame.Surface(screen.get_size()) bg = pygame.Surface(screen.get_size()) # draw background, with mark on start/end states & cliff region def draw_bg(surface,n_row,n_col,grid_size,start_r,start_c,goal_r,goal_c): for i in range(n_col): for j in range(n_row): x = i * grid_size y = j * grid_size coords = pygame.Rect(x,y,grid_size,grid_size) pygame.draw.rect(surface,(255,255,255),coords,1) # draw start state pygame.draw.circle(surface,(192,192,192),(start_c * grid_size + grid_size/2, start_r * grid_size + grid_size/2),grid_size/4) # draw goal state pygame.draw.circle(surface,(102,204,0),(goal_c * grid_size + grid_size/2, goal_r * grid_size + grid_size/2),grid_size/4) # draw cliff region x = 1 * grid_size y = 3 * grid_size coords = pygame.Rect(x,y,grid_size*10,grid_size) pygame.draw.rect(surface,(192,192,192),coords) # use state-action function to find one-step optimal policy def step_q(q,s_r,s_c,n_row,n_col): print "state-action value:" print q[s_r,s_c,:] a = np.argmax(q[s_r,s_c,:]) # greedy only # display debug if a == 0: print "move left" elif a == 1: print "move upward" elif a == 2: print "move right" else: print "move downwards" s_r_next,s_c_next = action2state(s_r,s_c,a) # define rules especially when the agent enter the cliff region if s_r_next == 3 and s_c_next > 1 and s_c_next < n_col - 1: s_r_next = start_r s_c_next = start_c # in theory, the produced optimal policy should not enter this branch elif s_r_next < 0 or s_r_next > n_row -1: s_r_next = s_r elif s_c_next < 0 or s_c_next > n_col - 1: s_c_next = s_c return s_r_next,s_c_
next s_r = start_r s_c = start_c while True: for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() # draw gridworld background draw_bg(bg,n_row,n_col,grid_size,start_r,start_c,goal_r,goal_c) screen.blit(bg,(0,0)) # draw the state of the agent, i.e. the path (start --> end) as the foreground surface.fill((0,0,0)) # use state-act
ion function to find a optimal policy # in the loop, should provide a step function #print (s_r,s_c) s_r_next,s_c_next = step_q(q,s_r,s_c,n_row,n_col) #print (s_r_next,s_c_next) if s_r_next != goal_r or s_c_next != goal_c: pygame.draw.circle(surface,(255,255,255),(s_c_next * grid_size + grid_size/2, s_r_next * grid_size + grid_size/2),grid_size/4) bg.blit(surface,(0,0)) pygame.display.flip() # update pygame.time.delay(1000) s_r,s_c = s_r_next,s_c_next # update coordinate
glogiotatidis/mozillians-new
mozillians/funfacts/tests/test_helpers.py
Python
bsd-3-clause
563
0
from mock import patch from nose.tools import eq_ from test_utils import TestCase from mozillians.funfacts.helpers import random_funfact from mozill
ians.funfacts.tests import FunFactFactory class HelperTests(TestCase): @patch('mozillians.funfacts.helpers.FunFact.objects') def test_helper_calls_random(self, funfact_mock): funfact_mock.objects.random.assert_called() def test_helper_returns_none(self): """Test helper returns None when no published FunFacts.""
" FunFactFactory.create() eq_(random_funfact(), None)
eeue56/code-golf
game-of-life/game_of_life.py
Python
bsd-3-clause
957
0.018809
z = len u = x = y = r = 0 def v(): global u if z(r) < 3: if y < 1: c, n = r else: n, c = r s = 0 else: l, c, n = r s = l[x - 1] + l[x] + (l[x+1] if x == z(l) else 0) u = s + {x : sum(c[x - 1:x+2] + n[x - 1:x + 2]) - c[x], 0 : n[0] + n[1] + c[1], z(c) - 1 : n[x - 1] + n[x] + c[x - 1] }[x] def d(w): global r, x, y a = list.append m = [[int(i) for i in l.strip()] for l in
open(w)] n = range(1, z(m)) r = m[0:2] v() e = [{u:0,2: r[0][0], 3: 1}[u]] for x in n: v() a(e, {u:0,2: r[1][x], 3: 1}[u]) o = [e] for y in n: r = m[y - 1:y + 2] x = 0 v() e = [{u:0,2: r[1][1], 3: 1}[u]] for x in n: v() a(e, {u:0,2: r[1][x], 3: 1}
[u]) a(o, e) f = open(w, 'w') f.write('\n'.join(''.join(map(str, q)) for q in o))
Jgarcia-IAS/Fidelizacion_odoo
openerp/addons/auth_ldap/users_ldap.py
Python
agpl-3.0
10,747
0.002885
############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import ldap import logging from ldap.filter import filter_format import openerp.exceptions from openerp import tools from openerp.osv import fields, osv from openerp import SUPERUSER_ID from openerp.modules.registry import RegistryManager _logger = logging.getLogger(__name__) class CompanyLDAP(osv.osv): _name = 'res.company.ldap' _order = 'sequence' _rec_name = 'ldap_server' def get_ldap_dicts(self, cr, ids=None): """ Retrieve res_company_ldap resources from the database in dictionary format. :param list ids: Valid ids of model res_company_ldap. If not \ specified, process all resources (unlike other ORM methods). :return: ldap configurations :rtype: list of dictionaries """ if ids: id_clause = 'AND id IN (%s)' args = [tuple(ids)] else: id_clause = '' args = [] cr.execute(""" SELECT id, company, ldap_server, ldap_server_port, ldap_binddn, ldap_password, ldap_filter, ldap_base, "user", create_user, ldap_tls FROM res_company_ldap WHERE ldap_server != '' """ + id_clause + """ ORDER BY sequence """, args) return cr.dictfetchall() def connect(self, conf): """ Connect to an LDAP server specified by an ldap configuration dictionary. :param dict conf: LDAP configuration :return: an LDAP object """ uri = 'ldap://%s:%d' % (conf['ldap_server'], conf['ldap_server_port']) connection = ldap.initialize(uri) if conf['ldap_tls']: connection.start_tls_s() return connection def authenticate(self, conf, login, password): """ Authenticate a user against the specified LDAP server. In order to prevent an unintended 'unauthenticated authentication', which is an anonymous bind with a valid dn and a blank password, check for empty passwords explicitely (:rfc:`4513#section-6.3.1`) :param dict conf: LDAP configuration :param login: username :param password: Password for the LDAP user :return: LDAP entry of authenticated user or False :rtype: dictionary of attributes """ if not password: return False entry = False filter = filter_format(conf['ldap_filter'], (login,)) try: results = self.query(conf, filter) # Get rid of (None, attrs) for searchResultReference replies results = [i for i in results if i[0]] if results and len(results) == 1: dn = results[0][0] conn = self.connect(conf) conn.simple_bind_s(dn, password.encode('utf-8')) conn.unbind() entry = results[0] except ldap.INVALID_CREDENTIALS: return False except ldap.LDAPError, e: _logger.error('An LDAP exception occurred: %s', e) return entry def query(self, conf, filter, retrieve_attributes=None): """ Query an LDAP server with the filter argument and scope subtree. Allow for all authentication methods of the simple authentication method: - authenticated bind (non-empty binddn + valid password) - anonymous bind (empty binddn + empty password) - unauthenticated authentication (non-empty binddn + empty password) .. seealso:: :rfc:`4513#section-5.1` - LDAP: Simple Authentication Method. :param dict conf: LDAP configuration :param filter: valid LDAP filter :param list retrieve_attributes: LDAP attributes to be retrieved. \ If not specified, return all attributes. :return: ldap entries :rtype: list of tuples (dn, attrs) """ results = [] try: conn = self.connect(conf) conn.simple_bind_s(conf['ldap_binddn'] or '', conf['ldap_password'].encode('utf-8') or '') results = conn.search_st(conf['ldap_base'], ldap.SCOPE_SUBTREE, filter, retrieve_attributes, timeout=60) conn.unbind() except ldap.INVALID_CREDENTIALS: _logger.error('LDAP bind failed.') except ldap.LDAPError, e: _logger.error('An LDAP exception occurred: %s', e) return results def map_ldap_attributes(self, cr, uid, conf, login, ldap_entry): """ Compose values for a new resource of model res_users, based upon the retrieved ldap entry and the LDAP settings. :param dict conf: LDAP configuration :param login: the new user's login :param tuple ldap_entry: single LDAP result (dn, attrs) :return: parameters for a new resource of model res_us
ers :rtype: dict """ values = { 'name': ldap_entry[1]['cn'][0], 'login': login, 'company_id': conf['company'] }
return values def get_or_create_user(self, cr, uid, conf, login, ldap_entry, context=None): """ Retrieve an active resource of model res_users with the specified login. Create the user if it is not initially found. :param dict conf: LDAP configuration :param login: the user's login :param tuple ldap_entry: single LDAP result (dn, attrs) :return: res_users id :rtype: int """ user_id = False login = tools.ustr(login.lower().strip()) cr.execute("SELECT id, active FROM res_users WHERE lower(login)=%s", (login,)) res = cr.fetchone() if res: if res[1]: user_id = res[0] elif conf['create_user']: _logger.debug("Creating new Odoo user \"%s\" from LDAP" % login) user_obj = self.pool['res.users'] values = self.map_ldap_attributes(cr, uid, conf, login, ldap_entry) if conf['user']: values['active'] = True user_id = user_obj.copy(cr, SUPERUSER_ID, conf['user'], default=values) else: user_id = user_obj.create(cr, SUPERUSER_ID, values) return user_id _columns = { 'sequence': fields.integer('Sequence'), 'company': fields.many2one('res.company', 'Company', required=True, ondelete='cascade'), 'ldap_server': fields.char('LDAP Server address', required=True), 'ldap_server_port': fields.integer('LDAP Server port', required=True), 'ldap_binddn': fields.char('LDAP binddn', help=("The user account on the LDAP server that is used to query " "the directory. Leave empty to connect anonymously.")), 'ldap_password': fields.char('LDAP password', help=("The password of the user account on the LDAP server that is " "used to query the directory.")), 'l
timcera/tsgettoolbox
docs/conf.py
Python
bsd-3-clause
8,064
0.001736
# -*- coding: utf-8 -*- # # TSgettoolbox documentation build configuration file, created by # sphinx-quickstart on Mon Jun 10 23:11:56 2013. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.doctest", "sphinx.ext.todo", "sphinx.ext.coverage", "sphinx.ext.mathjax", "sphinx.ext.autosummary", "sphinxcontrib.programoutput", ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix of source filenames. source_suffix = ".rst" # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = "index" # General information about the project. project = "TSgettoolbox" copyright = "2013, Tim Cera" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = open("../VERSION", "r").readline().strip() # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = "%Y-%m-%d" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ["_build"] # The reST default role (used for this markup: `text`) to use for all documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "pyramid" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain cu
stom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) t
o use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. html_show_sphinx = False # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = "TSgettoolboxdoc" # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ("index", "TSgettoolbox.tex", "TSgettoolbox Documentation", "Tim Cera", "manual") ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [("index", "tsgettoolbox", "TSgettoolbox Documentation", ["Tim Cera"], 1)] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( "index", "tsgettoolbox", "TSgettoolbox Documentation", "Tim Cera", "tsgettoolbox", "One line description of project.", "Miscellaneous", ) ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote'
mahim97/zulip
zerver/webhooks/gogs/tests.py
Python
apache-2.0
7,893
0.003547
# -*- coding: utf-8 -*- from typing import Optional, Text from mock import MagicMock, patch from zerver.lib.test_classes import WebhookTestCase from zerver.lib.webhooks.git import COMMITS_LIMIT class GogsHookTests(WebhookTestCase): STREAM_NAME = 'commits' URL_TEMPLATE = "/api/v1/external/gogs?&api_key={api_key}" FIXTURE_DIR_NAME = 'gogs' def test_push(self) -> None: expected_subject = u"try-git / master" expected_message = u"""john [pushed](http://localhost:3000/john/try-git/compare/479e6b772b7fba19412457483f50b201286d0103...d8fce16c72a2ff56a5afc8a08645a6ce45491794) 1 commit to branch master. Commits by John (1). * Webhook Test ([d8fce16](http://localhost:3000/john/try-git/commit/d8fce16c72a2ff56a5afc8a08645a6ce45491794))""" self.send_and_test_stream_message('push', expected_subject, expected_message, HTTP_X_GOGS_EVENT='push') def test_push_multiple_commit
ters(self) -> None: commit_info = u'* Webhook Test ([d8fce16](http://localhost:3000/john/try-git/commit/d8fce16c72a2ff56a5afc8a08645a6ce45491794))\n' expected_subject = u"try-git / master" expected_messa
ge = u"""john [pushed](http://localhost:3000/john/try-git/compare/479e6b772b7fba19412457483f50b201286d0103...d8fce16c72a2ff56a5afc8a08645a6ce45491794) 2 commits to branch master. Commits by Benjamin (1) and John (1).\n\n{}* Webhook Test ([d8fce16](http://localhost:3000/john/try-git/commit/d8fce16c72a2ff56a5afc8a08645a6ce45491794))""".format(commit_info) self.send_and_test_stream_message('push_commits_multiple_committers', expected_subject, expected_message, HTTP_X_GOGS_EVENT='push') def test_push_multiple_committers_filtered_by_branches(self) -> None: self.url = self.build_webhook_url(branches='master,development') commit_info = u'* Webhook Test ([d8fce16](http://localhost:3000/john/try-git/commit/d8fce16c72a2ff56a5afc8a08645a6ce45491794))\n' expected_subject = u"try-git / master" expected_message = u"""john [pushed](http://localhost:3000/john/try-git/compare/479e6b772b7fba19412457483f50b201286d0103...d8fce16c72a2ff56a5afc8a08645a6ce45491794) 2 commits to branch master. Commits by Benjamin (1) and John (1).\n\n{}* Webhook Test ([d8fce16](http://localhost:3000/john/try-git/commit/d8fce16c72a2ff56a5afc8a08645a6ce45491794))""".format(commit_info) self.send_and_test_stream_message('push_commits_multiple_committers', expected_subject, expected_message, HTTP_X_GOGS_EVENT='push') def test_push_filtered_by_branches(self) -> None: self.url = self.build_webhook_url(branches='master,development') expected_subject = u"try-git / master" expected_message = u"""john [pushed](http://localhost:3000/john/try-git/compare/479e6b772b7fba19412457483f50b201286d0103...d8fce16c72a2ff56a5afc8a08645a6ce45491794) 1 commit to branch master. Commits by John (1). * Webhook Test ([d8fce16](http://localhost:3000/john/try-git/commit/d8fce16c72a2ff56a5afc8a08645a6ce45491794))""" self.send_and_test_stream_message('push', expected_subject, expected_message, HTTP_X_GOGS_EVENT='push') def test_push_commits_more_than_limits(self) -> None: expected_subject = u"try-git / master" commits_info = "* Webhook Test ([d8fce16](http://localhost:3000/john/try-git/commit/d8fce16c72a2ff56a5afc8a08645a6ce45491794))\n" expected_message = u"john [pushed](http://localhost:3000/john/try-git/compare/479e6b772b7fba19412457483f50b201286d0103...d8fce16c72a2ff56a5afc8a08645a6ce45491794) 30 commits to branch master. Commits by John (30).\n\n{}[and {} more commit(s)]".format( commits_info * COMMITS_LIMIT, 30 - COMMITS_LIMIT ) self.send_and_test_stream_message('push_commits_more_than_limits', expected_subject, expected_message, HTTP_X_GOGS_EVENT='push') def test_push_commits_more_than_limits_filtered_by_branches(self) -> None: self.url = self.build_webhook_url(branches='master,development') expected_subject = u"try-git / master" commits_info = "* Webhook Test ([d8fce16](http://localhost:3000/john/try-git/commit/d8fce16c72a2ff56a5afc8a08645a6ce45491794))\n" expected_message = u"john [pushed](http://localhost:3000/john/try-git/compare/479e6b772b7fba19412457483f50b201286d0103...d8fce16c72a2ff56a5afc8a08645a6ce45491794) 30 commits to branch master. Commits by John (30).\n\n{}[and {} more commit(s)]".format( commits_info * COMMITS_LIMIT, 30 - COMMITS_LIMIT ) self.send_and_test_stream_message('push_commits_more_than_limits', expected_subject, expected_message, HTTP_X_GOGS_EVENT='push') def test_new_branch(self) -> None: expected_subject = u"try-git / my_feature" expected_message = u"john created [my_feature](http://localhost:3000/john/try-git/src/my_feature) branch" self.send_and_test_stream_message('branch', expected_subject, expected_message, HTTP_X_GOGS_EVENT='create') def test_pull_request_opened(self) -> None: expected_subject = u"try-git / PR #1 Title Text for Pull Request" expected_message = u"""john opened [PR #1](http://localhost:3000/john/try-git/pulls/1) from `feature` to `master`""" self.send_and_test_stream_message('pull_request_opened', expected_subject, expected_message, HTTP_X_GOGS_EVENT='pull_request') def test_pull_request_closed(self) -> None: expected_subject = u"try-git / PR #1 Title Text for Pull Request" expected_message = u"""john closed [PR #1](http://localhost:3000/john/try-git/pulls/1) from `feature` to `master`""" self.send_and_test_stream_message('pull_request_closed', expected_subject, expected_message, HTTP_X_GOGS_EVENT='pull_request') def test_pull_request_merged(self) -> None: expected_subject = u"try-git / PR #2 Title Text for Pull Request" expected_message = u"""john merged [PR #2](http://localhost:3000/john/try-git/pulls/2) from `feature` to `master`""" self.send_and_test_stream_message('pull_request_merged', expected_subject, expected_message, HTTP_X_GOGS_EVENT='pull_request') @patch('zerver.webhooks.gogs.view.check_send_stream_message') def test_push_filtered_by_branches_ignore(self, check_send_stream_message_mock: MagicMock) -> None: self.url = self.build_webhook_url(branches='changes,development') payload = self.get_body('push') result = self.client_post(self.url, payload, HTTP_X_GOGS_EVENT='push', content_type="application/json") self.assertFalse(check_send_stream_message_mock.called) self.assert_json_success(result) @patch('zerver.webhooks.gogs.view.check_send_stream_message') def test_push_commits_more_than_limits_filtered_by_branches_ignore( self, check_send_stream_message_mock): # type: (MagicMock) -> None self.url = self.build_webhook_url(branches='changes,development') payload = self.get_body('push_commits_more_than_limits') result = self.client_post(self.url, payload, HTTP_X_GOGS_EVENT='push', content_type="application/json") self.assertFalse(check_send_stream_message_mock.called) self.assert_json_success(result) @patch('zerver.webhooks.gogs.view.check_send_stream_message') def test_push_multiple_committers_filtered_by_branches_ignore( self, check_send_stream_message_mock): # type: (MagicMock) -> None self.url = self.build_webhook_url(branches='changes,development') payload = self.get_body('push_commits_multiple_committers') result = self.client_post(self.url, payload, HTTP_X_GOGS_EVENT='push', content_type="application/json") self.assertFalse(check_send_stream_message_mock.called) self.assert_json_success(result)
orontee/porte-monnaie
site/tracker/tests/test_views.py
Python
gpl-3.0
19,399
0
"""Tests for views of tracker application.""" from django.contrib.auth import get_user_model from django.core.urlresolvers import reverse from django.test import TestCase from tracker.models import (Expenditure, Purse) User = get_user_model() class HomeTest(TestCase): """Test home view.""" def setUp(self): self.url = reverse('tracker:home') def test_get(self): """Get home view.""" response = self.client.get(self.url) self.assertEqual(response.status_code, 200) def create_user(**kwargs): """Create a user.""" u = User.objects.create_user(**kwargs) u.save() return u def create_purse(user=None, **kwargs): """Create a purse. If user is not None, add it to the created purse. """ p = Purse.objects.create(**kwargs) p.save() if user is not None: p.users.add(user) return p def create_expenditure(**kwargs): """Create an expenditure.""" e = Expenditure.objects.create(**kwargs) e.save() return e class ExpenditureAddTest(TestCase): """Test expenditure add view.""" def setUp(self): self.url = reverse('tracker:add') def test_get_non_authentified(self): """Get page while no user is authentified.""" response = self.client.get(self.url) self.assertEqual(response.status_code, 302) url = '/tracker/login?next=/tracker/expenditures/add/' self.assertEqual(response.url, url) def test_get_authentified_without_purse(self): """Get page while user is authentified but has no purse. """ credentials = {'username': 'username', 'password': 'password'} create_user(**credentials) self.client.login(**credentials) response = self.client.get(self.url) expected_url = '/tracker/purses/create/' self.assertRedirects(response, expected_url) def test_get_authentified_without_default_purse(self): """Get page while user is authentified but has no default purse.""" credentials = {'username': 'username', 'password': 'password'} u = create_user(**credentials) self.client.login(**credentials) create_purse(u) response = self.client.get(self.url) self.assertEqual(response.status_code, 200) # self.assertEqual(u.default_purse, p) # TODO Check messages def test_post(self): """Get page then post.""" credentials = {'username': 'username', 'password': 'password'} u = create_user(**credentials) self.client.login(**credentials) p = create_purse(u) u.default_purse = p u.save() response = self.client.get(self.url) self.assertEqual(response.status_code, 200) token = response.cookies['csrftoken'].value data = {'amount': 100, 'date': '24/05/2014', 'description': 'expenditure description', 'occurrences': '1', 'csrftoken': token} response = self.client.post(self.url, data) self.assertEqual(response.status_code, 302) url = '/tracker/expenditures/' self.assertEqual(response.url, url) self.assertEqual(u.expenditure_set.count(), 1) def test_post_and_save_other(self): """Get page then post and save other.""" credentials = {'username': 'username', 'password': 'password'} u = create_user(**credentials) self.client.login(**credentials) p = create_purse(u) u.default_purse = p u.save() response = self.client.get(self.url) self.assertEqual(response.status_code, 200) token = response.cookies['csrftoken'].value data = {'amount': 300, 'date': '25/05/2014', 'description': 'other expenditure description', 'occurrences': '1', 'save_other': True, 'csrftoken':
token} response = self.client.post(self.url, data) self.assertEqual(response.status_code, 302) url = self.url + '?date=2014-05-25' self.assertEqual(response.ur
l, url) self.assertEqual(u.expenditure_set.count(), 1) def test_post_with_multiple_occurence(self): """Get page then post to create multiple expenditures.""" credentials = {'username': 'username', 'password': 'password'} u = create_user(**credentials) self.client.login(**credentials) p = create_purse(u) u.default_purse = p u.save() response = self.client.get(self.url) self.assertEqual(response.status_code, 200) token = response.cookies['csrftoken'].value data = {'amount': 100, 'date': '24/05/2014', 'description': 'expenditure description', 'occurrences': '3', 'csrftoken': token} response = self.client.post(self.url, data) self.assertEqual(response.status_code, 302) url = '/tracker/expenditures/' self.assertEqual(response.url, url) self.assertEqual(u.expenditure_set.count(), 3) class ExpenditureDeleteTest(TestCase): """Test expenditure delete view.""" def setUp(self): credentials = {'username': 'username', 'password': 'password'} u = create_user(**credentials) p = create_purse(u) u.default_purse = p u.save() e = Expenditure.objects.create(amount=199, author=u, purse=p) self.url = reverse('tracker:delete', kwargs={'pk': e.pk}) def test_get_non_authentified(self): """Get page while no user is authentified.""" response = self.client.get(self.url) self.assertEqual(response.status_code, 302) expected_url = '/tracker/login?next=' expected_url += self.url self.assertEqual(response.url, expected_url) def test_get_authentified(self): """Get page then delete resource while user is authentified.""" credentials = {'username': 'username', 'password': 'password'} self.client.login(**credentials) response = self.client.get(self.url) self.assertEqual(response.status_code, 200) response = self.client.delete(self.url) self.assertEqual(response.status_code, 302) self.assertEqual(Expenditure.objects.count(), 0) self.assertEqual(response.url, '/tracker/expenditures/') class ExpenditureUpdateTest(TestCase): """Test expenditure update view.""" def setUp(self): credentials = {'username': 'username', 'password': 'password'} self.u = create_user(**credentials) p = create_purse(self.u) self.u.default_purse = p self.u.save() e = Expenditure.objects.create(amount=199, author=self.u, purse=p) self.url = reverse('tracker:update', kwargs={'pk': e.pk}) def test_get_non_authentified(self): """Get page while no user is authentified.""" response = self.client.get(self.url) self.assertEqual(response.status_code, 302) expected_url = '/tracker/login?next=' expected_url += self.url self.assertEqual(response.url, expected_url) def test_get_authentified(self): """Get page then update resource while user is authentified.""" credentials = {'username': 'username', 'password': 'password'} self.client.login(**credentials) response = self.client.get(self.url) self.assertEqual(response.status_code, 200) token = response.cookies['csrftoken'].value data = {'amount': 100, 'date': '24/05/2014', 'description': 'expenditure description', 'occurrences': '1', 'csrftoken': token} response = self.client.post(self.
robmcmullen/peppy
editra.in/syntax/pike.py
Python
gpl-2.0
3,246
0.005237
############################################################################### # Name: pike.py # # Purpose: Define highlighting/syntax for Pike programming language # # Author: Cody Precord <cprecord@editra.org> # # Copyright: (c) 2007 Cody Precord <staff@editra.org> # # License: wxWindows License # ############################################################################### """ FILE: pike.py @summary: Defines syntax and highlighting settings for the Pike programming language. Pike is very similar in form to C/CPP so the Cpp lexer is used to provide the highlighting settings. """ __author__ = "Cody Precord <cprecord@editra.org>" __svnid__ = "$Id: pike.py 55174 2008-08-22 15:12:27Z CJP $" __revision__ = "$Revision: 55174 $" #-----------------------------------------------------------------------------# # Local Imports import synglob import cpp #-----------------------------------------------------------------------------# #---- Keyword Definitions ----# PIKE_KW = (0, "goto break return continue case default if else switch while " "foreach do gauge destruct lambda inherit import typeof catch " "for inline nomask") PIKE_TYPE = (1, "private protected public static " "int string void float mapping array multiset mixed program " "object function") PIKE_DOC = tuple(cpp.DOC_KEYWORDS) #---- End Keyword Definitions ----# #---- Syntax Style Specs ----# SYNTAX_ITEMS = list(cpp.SYNTAX_ITEMS) #---- Extra Properties ----# # Fetched from cpp module on request #-----------------------------------------------------------------------------# #---- Required Module Functions ----# def Keywords(lang_id=0): """Returns Specified Keywords List @keyword lang_id: used to select specific subset of keywords """ if lang_id == synglob.ID_LANG_PIKE: return [PIKE_KW, PIKE_TYPE, PIKE_DOC] else: return list() def SyntaxSpec(lang_id=0): """Syntax Specifications @keyword lang_id: used for selecting a specific subset of syntax specs """ if lang_id == synglob.ID_LANG_PIKE: return SYNTAX_ITEMS else: return list() def Properties(lang_id=0): """Returns a list of Extra Properties to set @keyword lang_id: used to select a specific set of properties """ if lang_id == synglob.ID_LANG_P
IKE: return cpp.Properties(synglob.ID_LANG_CPP) else: return list() def CommentPattern(lang_id=0): """Returns a list of characters used to comment a block of code @keyword lang_id: used to select a specific sub
set of comment pattern(s) """ if lang_id == synglob.ID_LANG_PIKE: return cpp.CommentPattern(synglob.ID_LANG_CPP) else: return list() #---- End Required Module Functions ----# AutoIndenter = cpp.AutoIndenter #---- Syntax Modules Internal Functions ----# def KeywordString(): """Returns the specified Keyword String @note: not used by most modules """ return None #---- End Syntax Modules Internal Functions ----#
jmschrei/yabn
yabn/__init__.py
Python
mit
1,279
0.009382
# __init__.py: Yet Another Bayes Net library # Contact: Jacob Schreiber ( jmschreiber91@gmail.com ) """ For detailed documentation and examples, see the README. """ # Make our dependencies explicit so compiled Cython code won't segfault trying # to load them
. import networkx, matplotlib.pyplot, scipy import numpy as np import os import pyximport # Adapted from Cython docs https://github.com/cython/cython/wiki/ # InstallingOnWindows#mingw--numpy--pyximport-at-runtime if os.name == 'nt': if 'CPATH' in os.environ: os.environ['CPATH'] = os.environ['CPATH'] + np.get_include() else: os.environ['CPATH'] = np.get_include() # XXX: we're assuming that MinGW is installed
in C:\MinGW (default) if 'PATH' in os.environ: os.environ['PATH'] = os.environ['PATH'] + ';C:\MinGW\bin' else: os.environ['PATH'] = 'C:\MinGW\bin' mingw_setup_args = { 'options': { 'build_ext': { 'compiler': 'mingw32' } } } pyximport.install(setup_args=mingw_setup_args) elif os.name == 'posix': if 'CFLAGS' in os.environ: os.environ['CFLAGS'] = os.environ['CFLAGS'] + ' -I' + np.get_include() else: os.environ['CFLAGS'] = ' -I' + np.get_include() pyximport.install() from yabn import * __version__ = '0.1.0'
neillc/zookeepr
zkpylons/controllers/secret_hash.py
Python
gpl-2.0
4,319
0.005094
##### import sys import inspect from pylons import config import logging import zkpylons.lib.helpers as h from pylons import request, response, session, tmpl_context as c from zkpylons.lib.helpers import redirect_to from pylons.util import class_name_from_module_name from zkpylons.model import meta from pylons.controllers.util import abort from zkpylons.lib.base import BaseController, render from zkpylons.model import URLHash log = logging.getLogger(__name__) class SecretHashController(BaseController): def lookup(self, hash): c.hash = URLHash.find_by_hash(hash) if c.hash is None: abort(404, "Sorry, Invalid Hash.") return self.transfer(url=c.hash.url) # as per http://www.mail-archive.com/pylons-discuss@googlegroups.com/msg06643.html def transfer(controller = None, action = None, url = None, **kwargs): """usage: 1. result = transfer(url = "/someurl/someaction") 2. result = transfer(controller = "/controller1/sub_controller2", action = "test") # kwargs will pass to action. """ if (url != None): route_map = config['routes.map'] match_route= route_map.match(url) if (match_route == None): raise(Exception("no route matched url '%s'" % url)) # if controller = match_route["controller"].replace("/", ".") action = match_route["action"] del(match_route["controller"]) del(match_route["action"]) kwargs.update(match_route) else: if (controller == None): route_map = config['routes.map'] match_route = route_map.match("/") if (match_route == None): raise(Exception("no route matched url '%s'" % url)) # if controller = match_route["controller"].
replace("/", ".") if (action == None): action = match_route["action"] # if del(match_route["controller"])
del(match_route["action"]) kwargs.update(match_route) else: controller = controller.replace("/", ".") if (action == None): action = "index" # if # if # if full_module_name = config['pylons.package'] + '.controllers.' + controller __traceback_hide__ = 'before_and_this' try: __import__(full_module_name) except ImportError, e: raise(NotImplementedError("'%s' not found: %s" % (controller, e))) # try module_name = controller.split('.')[-1] class_name = class_name_from_module_name(module_name) + 'Controller' controller_class = getattr(sys.modules[full_module_name], class_name) controller_inst = controller_class() if (hasattr(controller_inst, action)): action_method = getattr(controller_inst, action, None) #if (not isinstance(action_method, types.MethodType)): # raise(NotImplementedError("action '%s' not found in '%s'" % (action, controller))) # if if (hasattr(controller_inst, "__before__")): before_method = getattr(controller_inst, "__before__", None) #if (isinstance(before_method, types.MethodType)): # before_method(action) # if # if action_args_name, action_args, action_kargs, action_defaults = inspect.getargspec(action_method) del(action_args_name[0]) call_kargs = {} for k, v in kwargs.iteritems(): if (k in action_args_name): call_kargs[k] = v # if # for result = action_method(**call_kargs) if (hasattr(controller_inst, "__after__")): after_method = getattr(controller_inst, "__after__", None) #if (isinstance(after_method, types.MethodType)): # after_method(action) # if # if return(result) else: raise(NotImplementedError("action '%s' not found in '%s'" % (action, controller))) # if # def
kmike/DAWG-Python
tests/utils.py
Python
mit
479
0.004175
# -*- coding: utf-8 -*- from __future__ import absolute_import import os import zipfile DEV_DATA_PATH = os.path.join( os.path.dirname(__file__), '..',
'dev_data', ) def data_path(*args): """ Returns a path to dev data """ return os.path.join(DEV_DATA_PATH, *args) def words100k(): zip_name = data_path('words100k.txt.
zip') zf = zipfile.ZipFile(zip_name) txt = zf.open(zf.namelist()[0]).read().decode('utf8') return txt.splitlines()
omco/mpir
yasm/tools/python-yasm/pyxelator/genpyx.py
Python
lgpl-3.0
18,513
0.022471
#!/usr/bin/env python """ genpyx.py - parse c declarations (c) 2002, 2003, 2004, 2005 Simon Burton <simon@arrowtheory.com> Released under GNU LGPL license. version 0.xx This is a module of mixin classes for ir.py . Towards the end of ir.py our global class definitions are remapped to point to the class definitions in ir.py . So, for example, when we refer to Node we get ir.Node . """ import sys from datetime import datetime # XX use this Context class instead of all those kw dicts !! XX class Context(object): " just a record (struct) " def __init__( self, **kw ): for key, value in kw.items(): setattr( self, key, value ) def __getattr__( self, name ): return None # ? def __getitem__( self, name ): return getattr(self, name) class OStream(object): def __init__( self, filename=None ): self.filename = filename self.tokens = [] self._indent = 0 def put( self, token="" ): assert type(token) is str self.tokens.append( token ) def startln( self, token="" ): assert type(token) is str self.tokens.append( ' '*self._indent + token ) def putln( self, ln="" ): assert type(ln) is str self.tokens.append( ' '*self._indent + ln + '\n') def endln( self, token="" ): assert type(token) is str self.tokens.append( token + '\n') def indent( self ): self._indent += 1 def dedent( self ): self._indent -= 1 assert self._indent >= 0, self._indent def join( self ): return ''.join( self.tokens ) def close( self ): s = ''.join( self.tokens ) f = open( self.filename, 'w' ) f.write(s) # ############################################################################### # class Node(object): """ tree structure """ _unique_id = 0 def get_unique_id(cls): Node._unique_id += 1 return Node._unique_id get_unique_id = classmethod(get_unique_id) # XX toks: use a tree of tokens: a list that can be push'ed and pop'ed XX def pyxstr(self,toks=None,indent=0,**kw): """ Build a list of tokens; return the joined tokens string """ if toks is None: toks = [] for x in self: if isinstance(x,Node): x.pyxstr(toks, indent, **kw) else: toks.insert(0,str(x)+' ') s = ''.join(toks) return s # ################################################# class Named(object): "has a .name property" pass class BasicType(object): "float double void char int" pass class Qualifier(object): "register signed unsigned short long const volatile inline" def pyxstr(self,toks=None,indent=0,**kw): if toks is None: toks = [] x = self[0] if x not in ( 'const','volatile','inline','register'): # ignore these toks.insert(0,str(x)+' ') s = ''.join(toks) return s class StorageClass(object): "extern static auto" def pyxstr(self,toks=None,indent=0,**kw): return "" class Ellipses(object): "..." pass class GCCBuiltin(BasicType): "things with __builtin prefix" pass class Identifier(object): """ """ def pyxstr(self,toks=None,indent=0,**kw): if toks is None: toks=[] if self.name: toks.append( self.name ) return " ".join(toks) class TypeAlias(object): """ typedefed things, eg. size_t """ def pyxstr(self,toks=None,indent=0,cprefix="",**kw): if toks is None: toks = [] for x in self: if isinstance(x,Node): x.pyxstr(toks, indent, cprefix=cprefix, **kw) else: s = str(x)+' ' if cprefix: s = cprefix+s toks.insert(0,s) s = ''.join(toks) return s class Function(object): """ """ def pyxstr(self,toks,indent=0,**kw): #print '%s.pyxstr(%s)'%(self,toks) _toks=[] assert len(self) i=0 while isinstance(self[i],Declarator): if not self[i].is_void(): _toks.append( self[i].pyxstr(indent=indent, **kw) ) i=i+1 toks.append( '(%s)'% ', '.join(_toks) ) while i<len(self): self[i].pyxstr(toks, indent=indent, **kw) i=i+1 return " ".join(toks) class Pointer(object): """ """ def pyxstr(self,toks,indent=0,**kw): assert len(self) node=self[0] toks.insert(0,'*') if isinstance(node,Function): toks.insert(0,'(') toks.append(')') elif isinstance(node,Array): toks.insert(0,'(') toks.append(')') return Node.pyxstr(self,toks,indent, **kw) class Array(object): """ """ def pyxstr(self,toks,indent=0,**kw): if self.size is None: toks.append('[]') else: try: int(self.size) toks.append('[%s]'%self.size) except: toks.append('[]') return Node( *self[:-1] ).pyxstr( toks,indent, **kw ) class Tag(object): " the tag of a Struct, Union or Enum " pass class Taged(object): "Struct, Union or Enum " pass class Compound(Taged): "Struct or Union" def pyxstr(self,_toks=None,indent=0,cprefix="",shadow_name=True,**kw): if _toks is None:
_toks=[] names = kw.get('names',{}) kw['names'] = names tag_lookup = kw.get('tag_lookup') if self.tag: tag=self.tag.name else: tag = '' if isinstance(self,Struct): descr = 'struct' elif isinstan
ce(self,Union): descr = 'union' _node = names.get(self.tag.name,None) if ( _node is not None and _node.has_members() ) or \ ( _node is not None and not self.has_members() ): descr = '' # i am not defining myself here #print "Compound.pyxstr", tag #print self.deepstr() if descr: if cprefix and shadow_name: tag = '%s%s "%s"'%(cprefix,tag,tag) elif cprefix: tag = cprefix+tag toks = [ descr+' '+tag ] # struct foo if self.has_members(): toks.append(':\n') for decl in self[1:]: # XX self.members toks.append( decl.pyxstr(indent=indent+1, cprefix=cprefix, shadow_name=shadow_name, **kw)+"\n" ) # shadow_name = False ? #elif not tag_lookup.get( self.tag.name, self ).has_members(): # define empty struct here, it's the best we're gonna get #pass else: if cprefix: # and shadow_name: tag = cprefix+tag toks = [ ' '+tag+' ' ] # foo while toks: _toks.insert( 0, toks.pop() ) return "".join( _toks ) class Struct(Compound): """ """ pass class Union(Compound): """ """ pass class Enum(Taged): """ """ def pyxstr(self,_toks=None,indent=0,cprefix="",shadow_name=True,**kw): if _toks is None: _toks=[] names = kw.get('names',{}) kw['names'] = names if self.tag: tag=self.tag.name else: tag = '' _node = names.get(self.tag.name,None) if ( _node is not None and _node.has_members() ) or \ ( _node is not None and not self.has_members() ): descr = '' # i am not defining myself here else: descr = 'enum' if descr: #if not names.has_key(self.tag.name): toks = [ descr+' '+tag ] # enum foo toks.append(':\n') idents = [ ident for ident in self.members if ident.name not in names ] for ident in idents: if cprefix and shadow_name: ident = ident.clone() ident.name = '%s%s "%s"' % ( cprefix, ident.name, ident.name )
sbg/sevenbridges-python
sevenbridges/models/user.py
Python
apache-2.0
2,815
0
import logging from sevenbridges.meta.resource import Resource from sevenbridges.meta.fields import HrefField, StringField from sevenbridges.meta.transformer import Transform logger = logging.getLogger(__name__) class User(Resource): """ Central resource for managing users. """ _URL = { 'me': '/user', 'get': '/users/{id}', 'query': '/users', 'delete': '/users/{username}' } href = HrefField(read_only=Tru
e) username = StringField(read_only=True) email = StringField(read_only=True) first_name = StringField(read_only=True) last_name = StringField(read_only=True) affiliation = StringField(read_only=True) phone = StringField(read_only=True) address = StringF
ield(read_only=True) state = StringField(read_only=True) country = StringField(read_only=True) zip_code = StringField(read_only=True) city = StringField(read_only=True) role = StringField(read_only=True) def __eq__(self, other): if type(other) is not type(self): return False return self is other or self.username == other.username def __str__(self): return f'<User: username={self.username}>' @classmethod def me(cls, api=None): """ Retrieves current user information. :param api: Api instance. :return: User object. """ api = api if api else cls._API extra = { 'resource': cls.__name__, 'query': {} } logger.info('Fetching user information', extra=extra) user_data = api.get(cls._URL['me']).json() return User(api=api, **user_data) @classmethod def get(cls, user, api=None): api = api if api else cls._API user = Transform.to_user(user) return super().get(id=user, api=api) @classmethod def query(cls, division, role=None, offset=None, limit=None, api=None): """Query division users :param division: Division slug. :param role: User role in division. :param offset: Pagination offset. :param limit: Pagination limit. :param api: Api instance. :return: Collection object. """ api = api or cls._API params = { 'division': Transform.to_division(division), } if role: params['role'] = role return super()._query( url=cls._URL['query'], api=api, offset=offset, limit=limit, **params ) def disable(self, api=None): """ Disable user :param api: Api instance. :return: """ api = api or self._API api.delete( url=self._URL['delete'].format(username=self.username) )
funkypawz/MakerRobot
peewee-master/playhouse/tests/test_apsw.py
Python
gpl-3.0
4,048
0.002223
import apsw import datetime from playhouse.apsw_ext import * from playhouse.tests.base import ModelTestCase db = APSWDatabase(':memory:') class BaseModel(Model): class Meta: database = db class User(BaseModel): username = CharField() class Message(BaseModel): user = ForeignKeyField(User) message = TextField() pub_date = DateTimeField() published = BooleanField() class APSWTestCase(ModelTestCase): requires = [Message, User] def test_db_register_functions(self): result = db.execute_sql('SELECT date_part(?, ?)', ( 'day', '2015-01-02 03:04:05')).fetchone()[0] self.assertEqual(result, 2) result = db.execute_sql('SELECT date_trunc(?, ?)', ( 'day', '2015-01-02 03:04:05')).fetchone()[0] self.assertEqual(result, '2015-01-02') def test_db_pragmas(self): test_db = APSWDatabase(':memory:', pragmas=( ('cache_size', '1337'), )) test_db.connect() cs = test_db.execute_sql('PRAGMA cache_size;').fetchone()[0] self.assertEqual(cs, 1337) def test_select_insert(self): users = ('u1', 'u2', 'u3') for user in users: User.create(username=user) self.assertEqual([x.username for x in User.select()], ['u1', 'u2', 'u3']) self.assertEqual([x.username for x in User.select().filter(username='x')], []) self.assertEqual([x.username for x in User.select().filter(userna
me__in=['u1', 'u3'])], ['u1', 'u3']) dt = datet
ime.datetime(2012, 1, 1, 11, 11, 11) Message.create(user=User.get(username='u1'), message='herps', pub_date=dt, published=True) Message.create(user=User.get(username='u2'), message='derps', pub_date=dt, published=False) m1 = Message.get(message='herps') self.assertEqual(m1.user.username, 'u1') self.assertEqual(m1.pub_date, dt) self.assertEqual(m1.published, True) m2 = Message.get(message='derps') self.assertEqual(m2.user.username, 'u2') self.assertEqual(m2.pub_date, dt) self.assertEqual(m2.published, False) def test_update_delete(self): u1 = User.create(username='u1') u2 = User.create(username='u2') u1.username = 'u1-modified' u1.save() self.assertEqual(User.select().count(), 2) self.assertEqual(User.get(username='u1-modified').id, u1.id) u1.delete_instance() self.assertEqual(User.select().count(), 1) def test_transaction_handling(self): dt = datetime.datetime(2012, 1, 1, 11, 11, 11) def do_ctx_mgr_error(): with db.transaction(): User.create(username='u1') raise ValueError self.assertRaises(ValueError, do_ctx_mgr_error) self.assertEqual(User.select().count(), 0) def do_ctx_mgr_success(): with db.transaction(): u = User.create(username='test') Message.create(message='testing', user=u, pub_date=dt, published=1) do_ctx_mgr_success() self.assertEqual(User.select().count(), 1) self.assertEqual(Message.select().count(), 1) @db.commit_on_success def create_error(): u = User.create(username='test') Message.create(message='testing', user=u, pub_date=dt, published=1) raise ValueError self.assertRaises(ValueError, create_error) self.assertEqual(User.select().count(), 1) @db.commit_on_success def create_success(): u = User.create(username='test') Message.create(message='testing', user=u, pub_date=dt, published=1) create_success() self.assertEqual(User.select().count(), 2) self.assertEqual(Message.select().count(), 2) def test_exists_regression(self): User.create(username='u1') self.assertTrue(User.select().where(User.username == 'u1').exists()) self.assertFalse(User.select().where(User.username == 'ux').exists())
pulkitpahwa/smart-image-coordinates
smarter/base/views.py
Python
mit
9,603
0.001041
from django.shortcuts import render_to_response, get_object_or_404 from django.http import JsonResponse, HttpResponseRedirect from django.template import RequestContext from django.core.urlresolvers import reverse from django.views.decorators.csrf import csrf_exempt from django.db.models import Avg from django.db import IntegrityError from .models import (Category, TemplateFormat, TemplateElement, Document, ExtractedElements) from .forms import CreateCategoryForm, TemplateFormatForm, DocumentForm from .serializers import get_templates_for_category import json def home(request): """ View for home page. """ return render_to_response("home.html", {}, context_instance=RequestContext(request)) def get_all_formats(request, category): """ View to get all template formats that exist for the particular category """ try: category = Category.objects.get(slug=category) except Category.DoesNotExist: message = "Invalid category selected" return JsonResponse({"error": "true", "message": message}) all_templates = get_templates_for_category(category) return JsonResponse({"error": "false", "data": all_templates}) def create_category(request): """ View to create category """ if request.method == "GET": form = CreateCategoryForm() return render_to_response("create_category.html", {"form": form},
context_instance=RequestContext(request)) elif request.method == "POST": form = CreateCategoryForm(data=request.POST) if not form.is_
valid(): return render_to_response("create_category.html", {"form": form, "errors": form.errors}, context_instance=RequestContext(request)) try: category = Category.objects.create( category_name=form.cleaned_data['category_name'], description=form.cleaned_data['description']) except IntegrityError: message = "Category with the same name already exist" return render_to_response("create_category.html", {"form": form, "errors": message}, context_instance=RequestContext(request)) redirect_url = reverse('create-template') redirect_url += "?categ=%s" %(category.slug) return HttpResponseRedirect(redirect_url) def create_template_format(request): """ View to create new template format. """ if request.method == "GET": form = TemplateFormatForm() return render_to_response("create_format.html", {"form": form}, context_instance=RequestContext(request)) elif request.method == "POST": form = TemplateFormatForm(data=request.POST) if not form.is_valid(): return render_to_response("create_format.html", {"form": form, "errors": form.errors}, context_instance=RequestContext(request)) category = get_object_or_404(Category, slug=form.cleaned_data['category']) try: template = TemplateFormat.objects.create( category=category, template_name=form.cleaned_data['template_name'] ) except IntegrityError: message = "Template Name Already exist" return render_to_response("create_format.html", {"form": form, "errors": message}, context_instance=RequestContext(request)) redirect_url = reverse('upload_document') redirect_url += "?categ=%s&format=%s" %(category.slug, template.slug) return HttpResponseRedirect(redirect_url) def upload_document(request): """ View for handling document upload """ if request.method == "GET": form = DocumentForm() return render_to_response("upload_document.html", {"form": form}, context_instance=RequestContext(request)) elif request.method == "POST": form = DocumentForm(request.POST, request.FILES) if not form.is_valid(): return render_to_response("upload_document.html", {"form": form, "errors": form.errors}, context_instance=RequestContext(request)) template = get_object_or_404(TemplateFormat, slug=form.cleaned_data['template']) document = Document.objects.create( template_format=template, document_name=form.cleaned_data['document_name'], document=request.FILES['document'] ) return HttpResponseRedirect( reverse('particular_document', kwargs={"unique_id": document.id} )) @csrf_exempt def particular_document(request, unique_id): """ View to display a particular document and let the end user to select elements from it on the frontend and save them """ document = get_object_or_404(Document, id=unique_id) all_elements = document.template_format.templateelement_set.all() if request.method == "GET": if document.extractedelements_set.all().count() > 0 : return HttpResponseRedirect(reverse('preview_document', kwargs={"unique_id":document.id})) return render_to_response('document_selector.html', {"document": document, "elements": all_elements}, context_instance=RequestContext(request)) elif request.method == "POST": data = json.loads(json.loads(request.POST['data'])) if document.image_resolution_x and document.image_resolution_y: pass else: document.image_resolution_x = data["image_width"] document.image_resolution_y = data["image_height"] document.save() template = document.template_format document.extractedelements_set.all().delete() for element_name in data["elements"]: element = TemplateElement.objects.get_or_create( template=template, element_name=element_name)[0] extracted_element = ExtractedElements.objects.get_or_create( document=document, element=element)[0] extracted_element.x1_coordinate = data[element_name]["x"] extracted_element.y1_coordinate = data[element_name]["y"] extracted_element.block_width = data[element_name]["width"] extracted_element.block_height = data[element_name]["height"] extracted_element.save() return JsonResponse({"error": "false", "message": "Successfully saved elements"}) def all_documents(request): """ View to display all documents """ documents = Document.objects.all() if request.method == "GET": return render_to_response("all_documents.html", {"documents": documents}, context_instance=RequestContext(request)) def document_preview(request, unique_id): """ View to preview/ update a document. Any document for which the elements have been created is eligible for preview/ update """ document = get_object_or_404(Document, id=unique_id) elements = document.template_format.templateelement_set.all() return render_to_response("document_elements.html", {"document": document, "elements": elements}, context_instance=RequestContext(re
Yipit/pyeqs
tests/unit/test_connection.py
Python
mit
4,100
0.002439
# -*- coding: utf-8 -*- from __future__ import unicode_literals import httpretty import json import sure from pyeqs import QuerySet, Filter from pyeqs.dsl import Term, Sort, ScriptScore from tests.helpers import homogeneous @httpretty.activate def test_create_queryset_with_host_string(): """ Create a queryset with a host given as a string """ # When create a queryset t = QuerySet("localhost", index="bar") # And I have records response = { "took": 1, "hits": { "total": 1, "max_score": 1, "hits": [ { "_index": "bar", "_type": "baz", "_id": "1", "_score": 10, "_source": { "foo": "bar" }, "sort": [ 1395687078000 ] } ] } } httpretty.register_uri(httpretty.GET, "http://localhost:9200/bar/_search", body=json.dumps(response), content_type="application/json") # When I run a query results = t[0:1] # Then I see the response. len(results).should.equal(1) @httpretty.activate def test_create_queryset_with_host_dict(): """ Create a queryset with a host given as a dict """ # When create a queryset connection_info = {"host": "localhost", "port": 8080} t = QuerySet(connection_info, index="bar") # And I have records good_response = { "took": 1, "hits": { "total": 1, "max_score": 1, "hits": [ { "_index": "bar", "_type": "baz", "_id": "1", "_score": 10, "_source": { "foo": "bar" }, "sort": [ 1395687078000 ] } ] } } bad_response = { "took": 1, "hits": { "total": 0, "max_score": None, "hits": [] } } httpretty.register_uri(httpretty.GET, "http://localhost:9200/bar/_search", body=json.dumps(bad_response), content_type="application/json") httpretty.register_uri(httpretty.GET, "http://localhost:8080/bar/_search", body=json.dumps(good_response), content_type="application/json") # When I run a query results = t[0:1] # Then I see the res
ponse. len(results).should.equal(1) results[0]["_source"]["foo"].should.equal("bar") @httpretty.activate def test_create_queryset_with_host_list(): """ Create a queryset with a host given as a list """ # When create a queryset connection_info = [{"host": "localhost", "port": 8080}] t = QuerySet(conne
ction_info, index="bar") # And I have records good_response = { "took": 1, "hits": { "total": 1, "max_score": 1, "hits": [ { "_index": "bar", "_type": "baz", "_id": "1", "_score": 10, "_source": { "foo": "bar" }, "sort": [ 1395687078000 ] } ] } } bad_response = { "took": 1, "hits": { "total": 0, "max_score": None, "hits": [] } } httpretty.register_uri(httpretty.GET, "http://localhost:9200/bar/_search", body=json.dumps(bad_response), content_type="application/json") httpretty.register_uri(httpretty.GET, "http://localhost:8080/bar/_search", body=json.dumps(good_response), content_type="application/json") # When I run a query results = t[0:1] # Then I see the response. len(results).should.equal(1) results[0]["_source"]["foo"].should.equal("bar")
cga-harvard/cga-worldmap
geonode/sitemap.py
Python
gpl-3.0
398
0.005025
from django.contrib.sitemaps import Sitemap from geonode.maps.models import Layer, Map class LayerSitemap(Sitemap): changefreq = "never" priority = 0.5 def items(self): return Layer.objects.all() de
f lastmod
(self, obj): return obj.date class MapSitemap(Sitemap): changefreq = "never" priority = 0.5 def items(self): return Map.objects.all()
google-research/google-research
smu/parser/smu_atomic_input_verifier.py
Python
apache-2.0
2,945
0.006452
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Verifies that we can correctly generate atomic2 input files.""" import os from absl import app from absl import flags from absl import logging from tensorflow.io import gfile from smu.parser import smu_parser_lib from smu.parser import smu_writer_lib flags.DEFINE_string( 'input_glob', None, 'Glob of .dat files to read. ' 'These files are expected to be in the SMU file format provided by Uni Basel.' ) flags.DEFINE_string( 'atomic_input_dir', None, 'Directory containing .inp files named like x07_c2n2f3h3.253852.001.inp ' 'These are the desired outputs') flags.DEFINE_string('output_dir', None, 'If given, given to write files with mismatches') flags.mark_flag_as_required('input_glob') flags.mark_flag_as_required('atomic_input_dir') FLAGS = flags.FLAGS def main(argv): if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') atomic_writer = smu_writer_lib.AtomicInputWriter() file_count = 0 conformer_count = 0 mismatches = 0 for filepath in gfile.glob(FLAGS.input_glob): logging.info('Processing file %s', filepath) file_count += 1 smu_parser = smu_parser_lib.SmuParser(filepath) for conformer, _ in smu_parser.process_stage2(): conformer_count += 1 actual_contents = atomic_writer.process(conformer) expected_fn = atomic_writer.get_filename_for_atomic_input(conformer) with gfile.GFile(os.path.join(FLAGS.atomic_input_dir, expected_fn)) as expected_f: expected_contents = expected_f.readlines() try: smu_writer_lib.check_dat_formats_match(expected_contents, actual_contents.splitlines()) except smu_w
riter_lib
.DatFormatMismatchError as e: mismatches += 1 print(e) if FLAGS.output_dir: with gfile.GFile( os.path.join( FLAGS.output_dir, atomic_writer.get_filename_for_atomic_input(conformer)), 'w') as f: f.write(actual_contents) status_str = ('COMPLETE: Read %d files, %d conformers, %d mismatches\n' % (file_count, conformer_count, mismatches)) logging.info(status_str) print(status_str) if __name__ == '__main__': app.run(main)
lm-tools/situational
situational/apps/travel_report/migrations/0001_initial.py
Python
bsd-3-clause
1,406
0.002845
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.utils.timezone import jsonfield.fields import model_utils.fields class Migration(migrations.Migration): dependencies = [ ('travel_times', '0002_auto_20150717_1221'), ] operations = [ migrations.CreateModel( name='Report', fields=[
('id', models.AutoField(primary_key=True, serialize=False, auto_cre
ated=True, verbose_name='ID')), ('created', model_utils.fields.AutoCreatedField(editable=False, default=django.utils.timezone.now, verbose_name='created')), ('modified', model_utils.fields.AutoLastModifiedField(editable=False, default=django.utils.timezone.now, verbose_name='modified')), ('postcode', models.CharField(max_length=14)), ('place_name', models.CharField(max_length=255, blank=True)), ('location_json', jsonfield.fields.JSONField()), ('top_categories', jsonfield.fields.JSONField()), ('top_companies', jsonfield.fields.JSONField()), ('latest_jobs', jsonfield.fields.JSONField()), ('travel_times_map', models.ForeignKey(to='travel_times.TravelTimesMap')), ], options={ 'abstract': False, }, ), ]
dmlc/tvm
python/tvm/topi/math.py
Python
apache-2.0
15,214
0.000329
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Elementwise operators""" # pylint: disable=redefined-builtin import tvm from tvm import te from . import tag from . import cpp @tvm.te.tag_scope(tag=tag.ELEMWISE) def identity(x): """Take identity of input x. Parameters ---------- x : tvm.te.Tensor Input argument. Returns ------- y : tvm.te.Tensor The result. """ # pylint: disable=unnecessary-lambda return te.compute(x.shape, lambda *i: x(*i)) @tvm.te.tag_scope(tag=tag.ELEMWISE) def negative(x): """Take negation of input x. Parameters ---------- x : tvm.te.Tensor Input argument. Returns ------- y : tvm.te.Tensor The result. """ # pylint: disable=unnecessary-lambda return te.compute(x.shape, lambda *i: -x(*i)) @tvm.te.tag_scope(tag=tag.ELEMWISE) def exp(x): """Take exponential of input x. Parameters ---------- x : tvm.te.Tensor Input argument. Returns ------- y : tvm.te.Tensor The result. """ return te.compute(x.shape, lambda *i: te.exp(x(*i))) @tvm.te.tag_scope(tag=tag.ELEMWISE) def erf(x): """Take gauss error function of input x. Parameters ---------- x : tvm.te.Tensor Input argument. Returns ------- y : tvm.te.Tensor The result. """ return te.compute(x.shape, lambda *i: te.erf(x(*i))) @tvm.te.tag_scope(tag=tag.ELEMWISE) def tanh(x): """Take hyperbolic tanh of input x. Parameters ---------- x : tvm.te.Tensor Input argument. Returns ------- y : tvm.te.Tensor The result. """ return te.compute(x.shape, lambda *i: te.tanh(x(*i))) @tvm.te.tag_scope(tag=tag.ELEMWISE) def tan(x): """Take tan of input x. Parameters ---------- x : tvm.te.Tensor Input argument. Returns ------- y : tvm.te.Tensor The result. """ return te.compute(x.shape, lambda *i: te.tan(x(*i))) @tvm.te.tag_scope(tag=tag.ELEMWISE) def cos(x): """Take cos of input x. Parameters ---------- x : tvm.te.Tensor Input argument. Returns ------- y : tvm.te.Tensor The result. """ return te.compute(x.shape, lambda *i: te.cos(x(*i))) @tvm.te.tag_scope(tag=tag.ELEMWISE) def cosh(x): """Take cosh of input x. Parameters ---------- x : tvm.te.Tensor Input argument. Returns ------- y : tvm.te.Tensor The result. """ return te.compute(x.shape, lambda *i: te.cosh(x(*i))) @tvm.te.tag_scope(tag=tag.ELEMWISE) def sin(x): """Take sin of input x. Parameters ---------- x : tvm.te.Tensor Input argument. Returns ------- y : tvm.te.Tensor The result. """ return te.compute(x.shape, lambda *i: te.sin(x(*i))) @tvm.te.tag_scope(tag=tag.ELEMWISE) def sinh(x): """Take sinh of input x. Parameters ---------- x : tvm.te.Tensor Input argument. Returns ------- y : tvm.te.Tensor The result. """ return te.compute(x.shape, lambda *i: te.sinh(x(*i))) @tvm.te.tag_scope(tag=tag.ELEMWISE) def acos(x): """Take arc cos of input x. Parameters ---------- x : tvm.te.Tensor Input argument. Returns ------- y : tvm.te.Tensor The result. """ return te.compute(x.shape, lambda *i: te.acos(x(*i))) @tvm.te.tag_scope(tag=tag.ELEMWISE) def acosh(x): """Take arc cosh of input x. Parameters ---------- x : tvm.te.Tensor Input argument. Returns ------- y : tvm.te.Tensor The result. """ return te.compute(x.shape, lambda *i: te.acosh(x(*i))) @tvm.te.tag_scope(tag=tag.ELEMWISE) def asin(x): """Take arc sin of input x. Parameters ---------- x : tvm.te.Tensor Input argument. Returns ------- y : tvm.te.Tensor The result. """ return te.compute(x.shape, lambda *i: te.asin(x(*i))) @tvm.te.tag_scope(tag=tag.ELEMWISE) def asinh(x): """Take arc sinh of input x. Parameters ---------- x : tvm.te.Tensor Input argument. Returns ------- y : tvm.te.Tensor The result. """ return te.compute(x.shape, lambda *i: te.asinh(x(*i))) @tvm.te.tag_scope(tag=tag.ELEMWISE) def atan(x): """Take atan of input x. Parameters ---------- x : tvm.te.Tensor Input argument. Returns ------- y : tvm.te.Tensor The result. """ return te.compute(x.shape, lambda *i: te.atan(x(*i))) @tvm.te.tag_scope(tag=tag.ELEMWISE) def atanh(x): """Take atanh of input x. Parameters ---------- x : tvm.te.Tensor Input argument. Returns ------- y : tvm.te.Tensor The result. """ return te.compute(x.shape, lambda *i: te.atanh(x(*i))) @tvm.te.tag_scope(tag=tag.ELEMWISE) def floor(x): """Take floor of input x. Parameters ---------- x : tvm.te.Tensor Input argument. Returns ------- y : tvm.te.Tensor The result. """ return te.compute(x.shape, lambda *i: te.floor(x(*i))) @tvm.te.tag_scope(tag=tag.ELEMWISE) def ceil(x): """Take ceil of input x. Parameters ---------- x : tvm.te.Tensor Input argument. Returns ------- y : tvm.te.Tensor The result. """ return te.compute(x.shape, lambda *i: te.ceil(x(*i))) def sign(x): """Returns -1, 0, 1 based on sign of x. Parameters ---------- x : tvm.te.Tensor Input argument. Returns ------- y : tvm.te.Tensor The result. """ return cpp.sign(x) @tvm.te.tag_scope(tag=tag.ELEMWISE) def trunc(x): """Take truncated value of the input of x, element-wise. Parameters ---------- x : tvm.te.Tensor Input argument. Returns ------- y : tvm.te.Tensor The result. """ return te.compute(x.shape, lambda *i: te.trunc(x(*i))) @tvm.te.tag_scope(tag=tag.ELEMWISE) def abs(x): """Take absolute value of the input of x, element-wise. Parameters ---------- x : tvm.te.Tensor Input argument. Returns ------- y : tvm.te.Tensor The result. """ return te.compute(x.shape, lambda *i: te.abs(x(*i))) @tvm.te.tag_scope(tag=tag.ELEMWISE) def isnan(x): """Check if value of x is NaN, element-wise. Parameters ---------- x : tvm.te.Tensor Input argument. Returns ------- y : tvm.te.Tensor The result. """ return te.compute(x.shape, lambda *i: te.isnan(x(*i))) @tvm.te.tag_scope(tag=tag.ELEMWISE) def isfinite(x): """Check if value of x is finite, element-wise. Parameters ---------- x : tvm.te.Tensor Input argument. Returns ------- y : tvm.te.Tensor The result. """ return te.compute(x.shape, lambda *i: te.isfinite(x(*
i))) @tvm.te.tag_scope(tag=tag.ELEMWISE) def isinf(x): """Check if value of x is infinite, element-wise. Parameters ---------- x : tvm
.te.Tensor Input argument. Returns ------- y : tvm.te.Tensor The result. """ return te.compute(x.shape, lambda *i: te.isinf(x(*i))) @tvm.te.tag_scope(tag=tag.ELEMWISE) def round(x): """Round elements of x to nearest in
opentext/storyteller
docplatform/distribution/py/pfdesigns/javascript/lint.py
Python
apache-2.0
1,390
0.010791
import docapi import docbuilder import testtools g_trace = """\ lint-syntax.js:3:32 JSLintWarning: Unexpected trailing space. lint-syntax.js:7:10 JSLintWarni
ng: Redefinition of 'some_function' from line 2. lint-syntax.js:8:16 JSLintWarning: Expected '{' and instead saw 'return'. lint-syntax.js:9:2 JSLintWarning: Use spaces, not tabs. lint-semantics.js:
3:10 JSLintWarning: Unused 'find_key'. lint-semantics.js:3:20 JSLintWarning: Unexpected space between '(' and 'obj'. lint-semantics.js:3:29 JSLintWarning: Unexpected space between 'val' and ')'. lint-semantics.js:5:5 JSLintWarning: Unexpected 'for'. lint-semantics.js:5:8 JSLintWarning: Expected one space between 'for' and '('. lint-semantics.js:5:10 JSLintWarning: Unexpected space between '(' and 'key'. lint-semantics.js:5:21 JSLintWarning: Unexpected space between 'obj' and ')'. lint-semantics.js:6:11 JSLintWarning: Expected one space between 'if' and '('. lint-semantics.js:6:13 JSLintWarning: Unexpected space between '(' and 'obj'. lint-semantics.js:7:26 JSLintWarning: Unexpected trailing space. """ def DefineDocument( doc ): docbuilder.parse( __file__.replace( '.py', '.xml' ), doc ) doc.MakeReadOnly() view = testtools.create_view_with_default_procctx( doc, None, docapi.IProcessingContext.PM_RUNTIME_STREAM ) testtools.should_trace( lambda: view.Update(), g_trace, False )
brake/python-utl
utl/files.py
Python
mit
6,385
0.000783
#!/usr/bin/env python # -*- coding: UTF-8 -*- # ------------------------------------------------------------------------------ # Name: files.py # Package: utl # Project: utl # # Created: 10.10.13 11:37 # Copyright 2013-2016 © Constantin Roganov # License: The MIT License # ------------------------------------------------------------------------------ # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ------------------------------------------------------------------------------ """File related utilities""" from __future__ import absolute_import, unicode_literals, print_function from builtins import * import os import fileinput import functools from .text import lines_parser, lines_stripped __author__ = 'Constantin Roganov' binary_file = functools.partial(open, mode='rb') binary_file.__doc__ = 'Open binary file for reading' writable_binary_file = functools.partial(open, mode='wb') writable_binary_file.__doc__ = 'Open binary file for writing' text_file = functools.partial(open, mode='r') text_file.__doc__ = 'Open text file for reading' writable_text_file = functools.partial(open, mode='w') writable_text_file.__doc__ = 'Open text file for writing' utf8_bom_text_file = functools.partial(open, mode='r', encoding='utf_8_sig') utf8_bom_text_file.__doc__ = 'Open UTF8 text file with BOM for reading' def file_lines_count(filename): """Count lines in a text file""" # source: # http://stackoverflow.com/questions/845058/how-to-get-line-count-cheaply-in-python with open(filename) as fo: lines = 0 buf_size = 1024 * 1024 read_f = fo.read # loop optimization file_has_data = False buf = read_f(buf_size) if buf: file_has_data = True while buf: lines += buf.count('\n') buf = read_f(buf_size) # nonempty file has 1 line at least if file_has_data: lines += 1 return lines def _reverse_blocks_generator(fd, block_size=4096): """Return generator which reads file as series of blocks from the tail of file up to to head. The data itself is in normal order, only the order of the blocks is reversed. ie. "hello world" -> ["ld","wor", "lo ", "hel"] Note that the file must be opened in binary mode. """ # source: # http://cybervadim.blogspot.ru/2009/10/reverse-file-iterator-in-python.html if 'b' not in fd.mode.lower(): raise TypeError('File must be opened in binary mode') size = os.stat(fd.name).st_size fullblocks, lastblock = divmod(size, block_size) # The first(end of file) block will be short, since this leaves # the rest aligned on a blocksize boundary. This may be more # efficient than having the last (first in file) block be short fd.seek(-lastblock, os.SEEK_END) yield fd.read(lastblock) for i in range(fullblocks - 1, -1, -1): fd.seek(i * block_size) yield fd.read(block_size) def reverse_lines(fd, keepends=False, block_size=4096, encoding='ascii'): """Iterate through the lines of a file in reverse order. If keepends is true, line endings are kept as part of the line. Return generator. """ # source: # http://cybervadim.blogspot.ru/2009/10/reverse-file-iterator-in-python.html buf = '' for block in _reverse_blocks_generator(fd, block_size): buf = block.decode(encoding) + buf lines = buf.splitlines(keepends) # Return all lines except the first (since may be partial) if lines: lines.reverse() buf = lines.pop() # Last line becomes end of new first line. for line in lines: yield line yield buf # First line. def filelist_processor(iterable, parse_line, progress_co=None): """Generator of parsed lines from each text file (path) in iterable. iterable - sequence of file paths or None (there sys.argv[1:] will be used) parse_line - callable for processing of single line progress_co - coroutine with API like below: progress_co = progress_generator() progress_co.send((filename, lines_r
ead, line
s_total, lines_processed)) ... progress_co.send(lines_saved) # finalizing work Generates output data in format produced by parse_line() """ files = None if iterable is None else lines_stripped(iterable) inp = fileinput.input(files=files) pth, name, lines_total = (None, ) * 3 for stats, data in lines_parser(lines_stripped(inp), parse_line): if inp.isfirstline() or inp.filename() != pth: pth = inp.filename() name = os.path.basename(pth) lines_total = file_lines_count(pth) if progress_co: progress_co.send((name, inp.filelineno(), lines_total, stats.processed)) yield data def offset_iter(fd): r"""Generator of pairs (offset_from_beginning_of_file, string) for file object 'fd'. """ # source: http://bytes.com/topic/python/answers/855199-file-tell-loop tell = fd.tell readline = fd.readline while True: addr = tell() line = readline() if not line: break yield addr, line
ARTbio/tools-artbio
scripts/helper_scripts/rename_history_items/rename_hist_items.py
Python
mit
2,334
0.008997
#!/usr/bin/env python2.7 from bioblend.galaxy import GalaxyInstance import requests import datetime import argparse requests.packages.urllib3.disable_warnings() def parse_args(): args = argparse.ArgumentParser(description="Rename history items using a tabular file." +"\n" + "Example usage: python rename_hist_items.py -url misssissippi.snv.jussieu.fr \ -key $your_api_key -hid $your_history_id -table $your_tabular_file \n \ See test-data/sample_table.tab for an example file.") args.add_argument("-url", "--galaxy_url", required=Tru
e, help="url of galaxy instance") args.add_argument("-key", "--api_key", required=True, help="api key for galaxy instance" ) args.add_argument("-hid", "--history_id", required=True, help="History id of hitory containing files to be renamed") args.add_argument("-table", "--rename_table", required=True, type=file, help="tab-sep
erated file with first column current filename,\ and second column the desired name") return args.parse_args() def return_datetime(string_representation): """ returns current time, to find last modified history. Currently ununsed, may be used in the future. """ date, time = string_representation.split('T') return datetime.datetime.strptime(date + ' ' + time, "%Y-%m-%d %H:%M:%S.%f") def get_rename_list(rename_table): return [(line.split('\t')[0],line.split('\t')[1].strip()) for line in rename_table] def get_instance(url, api_key): return GalaxyInstance(url, api_key) def get_name_id_d(gi, hid): return {dataset[u'name']:dataset[u'id'] for dataset in gi.histories.show_history(hid, contents=True)} def update_names(gi, hid, rename_list, name_id_d ): for old_name, new_name in rename_list: dataset_id = name_id_d[old_name] gi.histories.update_dataset(history_id=hid, dataset_id=dataset_id, name=new_name) def main(): args = parse_args() hid = args.history_id rename_list = get_rename_list(args.rename_table) gi = get_instance(args.galaxy_url, args.api_key) name_id_d = get_name_id_d(gi, hid) rval = update_names(gi, hid, rename_list, name_id_d) if __name__ == "__main__": main()
ATNF/askapsdp
Tools/Dev/rbuild/askapdev/rbuild/dependencies/dependency.py
Python
gpl-2.0
18,614
0.005104
## @file # Module to gather dependency information for ASKAP packages # # @copyright (c) 2006 CSIRO # Australia Telescope National Facility (ATNF) # Commonwealth Scientific and Industrial Research Organisation (CSIRO) # PO Box 76, Epping NSW 1710, Australia # atnf-enquiries@csiro.au # # This file is part of the ASKAP software distribution. # # The ASKAP software distribution is free software: you can redistribute it # and/or modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the License # or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. # # @author Malte Marquarding <
malte.marquarding@csiro.au> # import glob import os import socket import askapdev.rbuild.utils as utils from ..exceptions import BuildError from ordereddict import OrderedDict ## An object to hold information about a dependency tree for a nominated package # This package can be added manually, or if None specified,
the information # will be taken from a file 'dependencies.{default,hostname}' in the current # directory. # # Example: # @code # f = file("dependencies.default") # f.write("numpy=3rdParty/numpy/numpy-1.0.2;==1.0.2\n") # f.close() # from askapdev.rbuild.dependencies import Dependency # dep = Dependency() # dep.add_package() # @endcode # class Dependency: ## Construct an empty dependency tree # @param self the object reference # @param silent minimal feedback # @param autobuild warn rather than fail on multiple version dependnecies. XXX def __init__(self, silent=True, autobuild=False): ## The ASKAP top-level directory self.ASKAPROOT = os.environ.get("ASKAP_ROOT") if self.ASKAPROOT is None: msg = "ASKAP_ROOT environment variable is not defined" raise BuildError(msg) # self.DEPFILE = "dependencies" # The basename of the dependency file self.INSTALL_SUBDIR = "install" self._deps = OrderedDict() # self._bindirs = [] self._incdirs = [] self._libdirs = [] self._rootdirs = [] # self._cppflags = [] # XXX "defs" in package.info. LOFAR/log4cxx # self._env = [] self._jars = [] self._libs = [] self._packages = [] # self._ldlibpath = "" self._pypath = "" # self._autobuild = autobuild self._silent = silent # mimimal output self.selfupdate = False # should object request updates from svn def q_print(self, msg): if self._silent: return utils.q_print(msg) ## Get the path of the specified dependency package # @param self the current object # @param key the label of the package dependency # @return the path (relative to ASKAP_ROOT) to the package def get_dep_path(self, key): return self._deps[key]["path"] # Used by "in" test. # object.__contains__(self, item) # # Called to implement membership test operators. Should return true if item # is in self, false otherwise. For mapping objects, this should consider # the keys of the mapping rather than the values or the key-item pairs. # # For objects that do not define __contains__(), the membership test first # tries iteration via __iter__(), then the old sequence iteration protocol # via __getitem__(), see this section in the language reference. # # http://docs.python.org/reference/datamodel.html def __contains__(self, key): return self._deps.has_key(key) ## Get the absolute path to the dependency packages installed location # @param self the current object # @param key the label of the package dependency # @return the absolute path to the package installed location def get_install_path(self, key): rel_path = self._deps[key]["path"] full_path = os.path.join(self.ASKAPROOT, rel_path, self.INSTALL_SUBDIR) return os.path.abspath(full_path) def get_path(self): return os.path.pathsep.join(self._bindirs) ## Get the CPPFLAGS retrieved in the dependency analysis # @param self the object reference # @return a list of library names def get_libs(self, mapped=False): if mapped: return self._libs[:] else: return [ m[0] for m in self._libs ] ## Get the environment variables retrieved in the dependency analysis # @param self the object reference # @return a dictionary of ENVVAR => value pairs def get_env(self): return dict([i.split("=") for i in self._env]) ## Get the the java classpath for the depencies # @param self the object reference # @return a classpath string of the form x/y/z.jar:a/b/c.jar def get_classpath(self): return os.path.pathsep.join(self._jars) ## Get the root directories of the tags retrieved in the dependency analysis # @param self the object reference # @return a list of directory names def get_rootdirs(self, mapped=False): # XXX used in ant.py builder with mapped=true. if mapped: return [ (k, os.path.join( self.ASKAPROOT, v['path'])) \ for k,v in self._deps.iteritems()] return self._rootdirs[:] ## Get the LIBRARY directories retrieved in the dependency analysis # @param self the object reference # @param mapped return directory tuples (rootdir, libdir) # @return a list of library directories or tuples of rootdirs and libdirs def get_librarydirs(self, mapped=False): if mapped: return self._libdirs[:] else: return [ m[0] for m in self._libdirs ] ## Get the LD_LIBRARY_PATH accumulated in the dependency analysis # @param self the object reference # @return a string representing the LD_LIBRARY_PATH def get_ld_library_path(self): return self._ldlibpath.strip(":") ## Get the INCLUDE directories retrieved in the dependency analysis # @param self the object reference # @return a list of header file directories def get_includedirs(self): return self._incdirs[:] ## Get the CPPFLAGS retrieved in the dependency analysis # @param self the object reference # @return a list preprocessor flags def get_cppflags(self): return self._cppflags[:] def get_pythonpath(self): return self._pypath.strip(":") ## Get a list of doxygen tag files in the dependencies. This is used for # cross-referencing the documentation # @todo Re-enable: This has been disabled until it is working for python # @param self the object reference # @return a list of TAGFILES entries # XXX used only in scons_tools/askap_package.py def get_tagfiles(self): tagfiles = [] for pth in self._rootdirs: tagname = utils.tag_name(pth) tagpath = os.path.join(pth, tagname) if os.path.exists(tagpath): tagfiles.append('"%s=%s/html"' % (tagpath, pth) ) return tagfiles def _get_dependencies(self, package): codename = utils.get_platform()['codename'] hostname = socket.gethostname().split(".")[0] for ext in ['default', codename, hostname]: if ext: depfile = '%s.%s' % (self.DEPFILE, ext) if package: depfile = os.path.join(self.ASKAPROOT, package, depfile) if self.selfupdate: # always update if it is the "root/target" package basedir = os.path.split(depfile)[0] or "." if not os.path.exists(basedir):
runekaagaard/django-contrib-locking
django/contrib/gis/db/backends/postgis/operations.py
Python
bsd-3-clause
15,371
0.001106
import re from django.conf import settings from django.contrib.gis.db.backends.base.operations import BaseSpatialOperations from django.contrib.gis.db.backends.postgis.adapter import PostGISAdapter from django.contrib.gis.db.backends.utils import SpatialOperator from django.contrib.gis.geometry.backend import Geometry from django.contrib.gis.measure import Distance from django.core.exceptions import ImproperlyConfigured from django.db.backends.postgresql_psycopg2.operations import DatabaseOperations from django.db.utils import ProgrammingError from django.utils.functional import cached_property from .models import PostGISGeometryColumns, PostGISSpatialRefSys class PostGISOperator(SpatialOperator): def __init__(self, geography=False, **kwargs): # Only a subset of the operators and functions are available # for the geography type. self.geography = geography super(PostGISOperator, self).__init__(**kwargs) def as_sql(self, connection, lookup, *args): if lookup.lhs.output_field.geography and not self.geography: raise ValueError('PostGIS geography does not support the "%s" ' 'function/operator.' % (self.func or self.op,)) return super(PostGISOperator, self).as_sql(connection, lookup, *args) class PostGISDistanceOperator(PostGISOperator): sql_template = '%(func)s(%(lhs)s, %(rhs)s) %(op)s %%s' def as_sql(self, connection, lookup, template_params, sql_params): if not lookup.lhs.output_field.geography and lookup.lhs.output_field.geodetic(connection): sql_template = self.sql_template if len(lookup.rhs) == 3 and lookup.rhs[-1] == 'spheroid': template_params.update({'op': self.op, 'func': 'ST_Distance_Spheroid'}) sql_template = '%(func)s(%(lhs)s, %(rhs)s, %%s) %(op)s %%s' else: template_params.update({'op': self.op, 'func': 'ST_Distance_Sphere'}) return sql_template % template_params, sql_params return super(PostGISDistanceOperator, self).as_sql(connection, lookup, template_params, sql_params) class PostGISOperations(BaseSpatialOperations, DatabaseOperations): name = 'postgis' postgis = True geography = True geom_func_prefix = 'ST_' version_regex = re.compile(r'^(?P<major>\d)\.(?P<minor1>\d)\.(?P<minor2>\d+)') Adapter = PostGISAdapter Adaptor = Adapter # Backwards-compatibility alias. gis_operators = { 'bbcontains': PostGISOperator(op='~'), 'bboverlaps': PostGISOperator(op='&&', geography=True), 'contained': PostGISOperator(op='@'), 'contains': PostGISOperator(func='ST_Contains'), 'overlaps_left': PostGISOperator(op='&<'), 'overlaps_right': PostGISOperator(op='&>'), 'overlaps_below': PostGISOperator(op='&<|'), 'overlaps_above': PostGISOperator(op='|&>'), 'left': PostGISOperator(op='<<'), 'right': PostGISOperator(op='>>'), 'strictly_below': PostGISOperator(op='<<|'), 'stricly_above': PostGISOperator(op='|>>'), 'same_as': PostGISOperator(op='~='), 'exact': PostGISOperator(op='~='), # alias of same_as 'contains_properly': PostGISOperator(func='ST_ContainsProperly'), 'coveredby': PostGISOperator(func='ST_CoveredBy', geography=True), 'covers': PostGISOperator(func='ST_Covers', geography=True), 'crosses': PostGISOperator(func='ST_Crosses)'), 'disjoint': PostGISOperator(func='ST_Disjoint'), 'equals': PostGISOperator(func='ST_Equals'), 'intersects': PostGISOperator(func='ST_Intersects', geography=True), 'overlaps': PostGISOperator(func='ST_Overlaps'), 'relate': PostGISOperator(func='ST_Relate'), 'touches': PostGISOperator(func='ST_Touches'), 'within': PostGISOperator(func='ST_Within'), 'dwithin': PostGISOperator(func='ST_DWithin', geography=True), 'distance_gt': PostGISDistanceOperator(func='ST_Distance', op='>', geography=True), 'distance_gte': PostGISDistanceOperator(func='ST_Distance', op='>=', geography=True), 'distance_lt': PostGISDistanceOperator(func='ST_Distance', op='<', geography=True), 'distance_lte': PostGISDistanceOperator(func='ST_Distance', op='<=', geography=True), } def __init__(self, connection): super(PostGISOperations, self).__init__(connection) prefix = self.geom_func_prefix self.area = prefix + 'Area' self.bounding_circle = prefix + 'MinimumBoundingCircle' self.centroid = prefix + 'Centroid' self.collect = prefix + 'Collect' self.difference = prefix + 'Difference' self.distance = prefix + 'Distance' self.distance_sphere = prefix + 'distance_sphere' self.distance_spheroid = prefix + 'distance_spheroid' self.envelope = prefix + 'Envelope' self.extent = prefix + 'Extent' self.force_rhr = prefix + 'ForceRHR' self.geohash = prefix + 'GeoHash' self.geojson = prefix + 'AsGeoJson' self.gml = prefix + 'AsGML' self.intersection = prefix + 'Intersection' self.kml = prefix + 'AsKML' self.length = prefix + 'Length' self.length_spheroid = prefix + 'length_spheroid' self.makeline = prefix + 'MakeLine' self.mem_size = prefix + 'mem_size' self.num_geom = prefix + 'NumGeometries' self.num_points = prefix + 'npoints' self.perimeter = prefix + 'Perimeter' self.point_on_surface = prefix + 'PointOnSurface' self.polygonize = prefix + 'Polygonize' self.reverse = prefix + 'Reverse' self.scale = prefix + 'Scale' self.snap_to_grid
= prefix + 'SnapToGrid' self.svg = prefix + 'AsSVG' self.sym_difference = prefix + 'SymDifference' self.transform = prefix + 'Transform' self.translate = prefix + 'Translate' self.union = prefix + 'Union' self.unionagg = prefix + 'Union' # Followin
g "attributes" are properties due to the spatial_version check and # to delay database access @property def extent3d(self): if self.spatial_version >= (2, 0, 0): return self.geom_func_prefix + '3DExtent' else: return self.geom_func_prefix + 'Extent3D' @property def length3d(self): if self.spatial_version >= (2, 0, 0): return self.geom_func_prefix + '3DLength' else: return self.geom_func_prefix + 'Length3D' @property def perimeter3d(self): if self.spatial_version >= (2, 0, 0): return self.geom_func_prefix + '3DPerimeter' else: return self.geom_func_prefix + 'Perimeter3D' @property def geometry(self): # Native geometry type support added in PostGIS 2.0. return self.spatial_version >= (2, 0, 0) @cached_property def spatial_version(self): """Determine the version of the PostGIS library.""" # Trying to get the PostGIS version because the function # signatures will depend on the version used. The cost # here is a database query to determine the version, which # can be mitigated by setting `POSTGIS_VERSION` with a 3-tuple # comprising user-supplied values for the major, minor, and # subminor revision of PostGIS. if hasattr(settings, 'POSTGIS_VERSION'): version = settings.POSTGIS_VERSION else: try: vtup = self.postgis_version_tuple() except ProgrammingError: raise ImproperlyConfigured( 'Cannot determine PostGIS version for database "%s". ' 'GeoDjango requires at least PostGIS version 1.5. ' 'Was the database created from a spatial database ' 'template?' % self.connection.settings_dict['NAME'] ) version = vtup[1:] return version def convert_extent(self, box, srid): """ Returns a 4-tuple extent for the `Extent` aggregate by converting the
raildo/python-keystoneclient
python-keystoneclient-0.4.1.7.gdca1d42/keystoneclient/service_catalog.py
Python
apache-2.0
11,013
0
# Copyright 2011 OpenStack Foundation # Copyright 2011, Piston Cloud Computing, Inc. # Copyright 2011 Nebula, Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keystoneclient import exceptions class ServiceCatalog(object): """Helper methods for dealing with a Keystone Service Catalog.""" @classmethod def factory(cls, resource_dict, token=None, region_name=None): """Create ServiceCatalog object given a auth token.""" if ServiceCatalogV3.is_valid(resource_dict): return ServiceCatalogV3(token, resource_dict, region_name) elif ServiceCatalogV2.is_valid(resource_dict): return ServiceCatalogV2(resource_dict, region_name) else: raise NotImplementedError('Unrecognized auth response') def get_token(self): """Fetch token details from service catalog. Returns a dictionary containing the following:: - `id`: Token's ID - `expires`: Token's expiration - `user_id`: Authenticated user's ID - `tenant_id`: Authorized project's ID - `domain_id`: Authorized domain's ID """ raise NotImplementedError() def get_endpoints(self, service_type=None, endpoint_type=None): """Fetch and filter endpoints for the specified service(s). Returns endpoints for the specified service (or all) and that contain the specified type (or all). """ raise NotImplementedError() def get_urls(self, attr=None, filter_value=None, service_type='identity', endpoint_type='publicURL'): """Fetch endpoint urls from the service catalog. Fetch the endpoints from the service catalog for a particular endpoint attribute. If no attribute is given, return the first endpoint of the specified type. :param string attr: Endpoint attribute name. :param string filter_value: Endpoint attribute value. :param string service_type: Service type of the endpoint. :param string endpoint_type: Type of endpoint. Possible values: public or publicURL, internal or internalURL, admin or adminURL :param string region_name: Region of the endpoint. :returns: tuple of urls or None (if no match found) """ raise NotImplementedError() def url_for(self, attr=None, filter_value=None, service_type='identity', endpoint_type='publicURL'): """Fetch an endpoint from the service catalog. Fetch the specified endpoint from the service catalog for a particular endpoint attribute. If no attribute is given, return the first endpoint of the specified type. Valid endpoint types: `public` or `publicURL`, `internal` or `internalURL`, `admin` or 'adminURL` """ raise NotImplementedError() def get_data(self): """Get the raw catalog structure. Get the version dependant catalog structure as it is presented within the resource. :returns: dict containing raw catalog data or None """ raise NotImplementedError() class ServiceCatalogV2(ServiceCatalog): """An object for encapsulating the service catalog using raw v2 auth token from Keystone. """ def __init__(self, resource_dict, region_name=None): self.catalog = resource_dict self.region_name = region_name @classmethod def is_valid(cls, resource_dict): # This class is also used for reading token info of an unscoped token. # Unscoped token does not have 'serviceCatalog' in V2, checking this # will not work. Use 'token' attribute instead. return 'token' in resource_dict def get_data(self): return self.catalog.get('serviceCatalog') def get_token(self): token = {'id': self.catalog['token']['id'], 'expires': self.catalog['token']['expires']} try: token['user_id'] = self.catalog['user']['id'] token['tenant_id'] = self.catalog['token']['tenant']['id'] except Exception: # just leave the tenant and user out if it doesn't exist pass return token def get_endpoints(self, service_type=None, endpoint_type=None): if endpoint_type and 'URL' not in endpoint_type: endpoint_type = endpoint_type + 'URL' sc = {} for service in (self.get_data() or []): if service_type and service_type != service['type']: continue sc[service['type']] = [] for endpoint in service['endpoints']: if endpoint_type and endpoint_type not in endpoint: continue sc[service['type']].append(endpoint) return sc def get_urls(self, attr=None, filter_value=None, service_type='identity', endpoint_type='publicURL'): sc_endpoints = self.get_endpoints(service_type=service_type, endpoint_type=endpoint_type) endpoints = sc_endpoints.get(service_type) if not endpoints: return if endpoint_type and 'URL' not in endpoint_type: endpoint_type = endpoint_type + 'URL' return tuple(endpoint[endpoint_type] for endpoint in endpoints if (endpoint_type in endpoint and (not self.region_name or endpoint.get('region') == self.region_name) and (not filter_value or endpoint.get(attr) == filter_value))) def url_for(self, attr=None, filter_value=None, service_type='identity', endpoint_type='publicURL'): catalog = self.get_data() if not catalog: raise exceptions.EmptyCatalog('The service catalog is empty.') if 'URL' not in endpoint_type: endpoint_type = endpoint_type + 'URL' for service in catalog: if service['type'] != service_type: continue endpoints = service['endpoints']
for endpoint in endpoints: if (self.region_name and endpoint.get('region') != self.region_name): continue
if not filter_value or endpoint.get(attr) == filter_value: return endpoint[endpoint_type] raise exceptions.EndpointNotFound('%s endpoint for %s not found.' % (endpoint_type, service_type)) class ServiceCatalogV3(ServiceCatalog): """An object for encapsulating the service catalog using raw v3 auth token from Keystone. """ def __init__(self, token, resource_dict, region_name=None): self._auth_token = token self.catalog = resource_dict self.region_name = region_name @classmethod def is_valid(cls, resource_dict): # This class is also used for reading token info of an unscoped token. # Unscoped token does not have 'catalog', checking this # will not work. Use 'methods' attribute instead. return 'methods' in resource_dict def get_data(self): return self.catalog.get('catalog') def get_token(self): token = {'id': self._auth_token, 'expires': self.catalog['expires_at']} try: token['user_id'] = self.catalog['user']['id']
incuna/incuna-test-utils
tests/testcases/test_api_request.py
Python
bsd-2-clause
918
0
from django.test import override_settings from incuna_test_utils.testcases.api_request import ( BaseAPIExampleTestCase, BaseAPIRequestTestCase, ) from tests.factories import UserFactory class APIRequestTestCase(BaseAPIRequestTestCase): user_factory = UserFactory def test_create_request_format(self):
request = self.create_request() assert request.META['format'] == 'json' def test_create_request_auth(self): request = self.create_request() assert request.user.is_authenticated def test_create_request_no_auth(self): request = self.create_request(auth=False) assert not request.user.is_authenticated class APIExampleTestCase(BaseAPI
ExampleTestCase): @override_settings(ALLOWED_HOSTS=['localhost']) def test_create_request(self): request = self.create_request(auth=False) assert request.get_host() == self.SERVER_NAME
nsi-iff/nsi_site
apps/members/views.py
Python
mit
1,044
0.003831
from django.shortcuts import render_to_response from django
.template import RequestContext from apps.members.models import Member def show_all_current_members(request): members = Member.objects.filter(is_renegade=False).order_by('function', 'started_nsi_date') return render_to_response( 'show_all_current_members.html', {'members': members}, context_instance=RequestContext(request) ) def show_member(r
equest, slug): member = Member.objects.get(slug=slug) participation_list = member.participation_set.all() members = Member.objects.all() return render_to_response( 'show_member.html', {'member': member, 'participation_list': participation_list, 'members': members}, context_instance=RequestContext(request) ) def show_all_former_members(request): members = Member.objects.filter(is_renegade=True) return render_to_response( 'show_all_former_members.html', {'members': members}, context_instance=RequestContext(request) )
jhl667/galaxy_tools
tools/jhl_tools/send_to_cgd.py
Python
apache-2.0
2,897
0.007249
#!/usr/bin/env python ### <command interpreter="python">send_to_cgd.py ### $pipeline_output $endpoint $cgd_url $output $runid $barcodeid $qcversion ### </command> ### Galaxy wrapper for cgd_client.jar. ### CGD_CLIENT is hard coded, but this is not expected to move. import argparse import subprocess from subprocess import Popen, STDOUT, PIPE import os import sys import shutil def renameOutput(runid, barcodeid, endpoint): """ CGD needs the filename to be restructured. """ if endpoint == "uploadqcsheet": newfile = "/tmp/" + '_'.join([runid, barcodeid, "R1"]) + ".html" elif endpoint == "uploadqcsheetrtwo": newfile = "/tmp/" + '_'.join([runid, barcodeid, "R2"]) + ".html" else: print("Not sending FastQC.") return None return newfile def splitUrl(url, n): return url.split('/')[-n:] def main(): # CGD_CLIENT="/opt/installed/cgd_client-1.0.7.jar" CGD_CLIENT="/home/exacloud/clinical/installedTest/cgd_client-1.0.7.jar" parser = argparse.Argu
mentParser(description='') parser.add_argument("--pipeline_out", help='') parser.add_argument("--cgd_url", help='') parser.add_argument(dest='stdout_log', help='') parser.add_argument(dest='endpoint', help='') parser.add_argument("--runid", help='') parser.add_argumen
t("--barcodeid", help='') parser.add_argument("--qcversion", help='') args = parser.parse_args() if args.endpoint != "none": newfile = renameOutput(args.runid, args.barcodeid, args.endpoint) else: id_list = splitUrl(args.cgd_url, 3) newfile = renameOutput(id_list[1], id_list[2], id_list[0]) if args.endpoint == "uploadqcsheet" or args.endpoint == "uploadqcsheetrtwo": print("Copying to " + newfile) shutil.copyfile(args.pipeline_out, newfile) cmd = ["java", "-jar", CGD_CLIENT, "-f", newfile, "-n", args.endpoint] else: if args.pipeline_out: cmd = ["java", "-jar", CGD_CLIENT, "-f", args.pipeline_out, "-n", args.endpoint] else: cmd = ["java", "-jar", CGD_CLIENT, "-n", args.endpoint] if args.cgd_url: # cmd.append("-u") # cmd.append(args.cgd_url) cmd = ["java", "-jar", CGD_CLIENT, "-f", newfile, "-u", args.cgd_url] if args.runid: cmd.append("-r") cmd.append(args.runid) if args.barcodeid: cmd.append("-b") cmd.append(args.barcodeid) if args.qcversion: cmd.append("-v") cmd.append(args.qcversion) cmd.append("-d") print("We are running this command:") print(' '.join(cmd)) proc = subprocess.call(cmd) outfile = open(args.stdout_log, 'w') outfile.write("The process has run.") outfile.close() ## Clean up temp file. if newfile != None: os.remove(newfile) if __name__ == "__main__": main()
LightStage-Aber/LightStage-Repo
exp/Lettvin_Repulsion/helpers/parse_diffuse_results_to_obj_file.py
Python
apache-2.0
1,750
0.017714
""" Module to parse a Lettvin Results File C array of vertex points (and distances) into an OBJ file format of vertex points. arg1: the path to the results file. (File must exist) arg2: the file extension of the output OBJ filename. (e.g. ".obj") Default is ".obj" (File must not exist) Vertex points are written in the OBJ file format, as they are found in the result file. WARNING: - The faces of the OBJ file are not known and are therefore are written to the OBJ file as a single triangle, from vertices 1,2,3. """ import sys, os # Check input file exists: path = sys.argv[1]
assert os.path.exists( path ) , "Input file does not exist, aborting.\nFilename: "+str(path) file_extension = sys.argv[2] if sys.argv[2] == "" else ".obj" assert not os.path.exists( path+file_extension ), "Output file already exists, aborting.\nFilename: "+str(path+file_extension) # extract the c array of vertex points from result file: l = [] with open(path, 'rb') as f: c = 0 for line in f: if line.strip().startswith("{"):
l.append( line.strip() ) c+=1 # convert string of c array to python list positions = eval("["+("".join(l)).replace("{","[").replace("}","]")+"]") # remove different value pos = [x[0:3] for x in positions] # prepare the Obj file format header and content: w = [] w.append("""# Blender v2.69 (sub 0) OBJ File: '"""+path+"""' www.blender.org mtllib positions_diffuse.mtl o """+path) for x in pos: w.append("v "+str(x[0])+" "+str(x[1])+" "+str(x[2])) # include an arbitrary face to hide file format parse errors later.. w.append("""usemtl None s off f 1 2 3""") # write out the obj file: f = open(str(path)+str(file_extension),'w') for s in w: f.write( str(s) +"\n")
leongold/lago
ovirtlago/reposetup.py
Python
gpl-2.0
7,082
0
#!/usr/bin/env python # Copyright 2016 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. #
# This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Publ
ic License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # Refer to the README and COPYING files for full details of the license # import functools import re import itertools import shutil import logging import os from lago import log_utils from lago.utils import ( run_command, LockFile, ) from . import utils LOGGER = logging.getLogger(__name__) LogTask = functools.partial(log_utils.LogTask, logger=LOGGER) log_task = functools.partial(log_utils.log_task, logger=LOGGER) class RepositoryError(Exception): pass class RepositoryMergeError(RepositoryError): pass def merge(output_dir, sources, repoman_config=None): """ Run repoman on ``sources``, creating a new RPM repository in ``output_dir`` Args: output_dir(str): Path to create new repository sources(list of str): repoman sources repoman_config(str): repoman configuration file, if not passed it will use default repoman configurations, equivalent to: | [main] | on_empty_source=warn | [store.RPMStore] | on_wrong_distro=copy_to_all | with_srcrpms=false | with_sources=false Raises: :exc:`RepositoryMergeError`: If repoman command failed. :exc:`IOError`: If ``repoman_config`` is passed but does not exists. Returns: None """ cmd = [] cmd_suffix = [ '--option=store.RPMStore.rpm_dir=', output_dir, 'add' ] + sources if repoman_config is None: repoman_params = [ '--option=main.on_empty_source=warn', '--option=store.RPMStore.on_wrong_distro=copy_to_all', '--option=store.RPMStore.with_srcrpms=false', '--option=store.RPMStore.with_sources=false', ] cmd = ['repoman'] + repoman_params + cmd_suffix else: if os.path.isfile(repoman_config): cmd = ['repoman', '--config={0}'.format(repoman_config) ] + cmd_suffix else: raise IOError( ('error running repoman, {0} not ' 'found').format(repoman_config) ) with LogTask('Running repoman'): res = run_command(cmd) if res.code: raise RepositoryMergeError( ( 'Failed merging repoman sources: {0} into directory: {1}, ' 'check lago.log for repoman output ' ).format(sources, output_dir) ) def with_repo_server(func): @functools.wraps(func) def wrapper(*args, **kwargs): with utils.repo_server_context(args[0]): return func(*args, **kwargs) return wrapper def _fix_reposync_issues(reposync_out, repo_path): """ Fix for the issue described at:: https://bugzilla.redhat.com//show_bug.cgi?id=1399235 https://bugzilla.redhat.com//show_bug.cgi?id=1332441 """ if len(repo_path) == 0 or len(reposync_out) == 0: LOGGER.warning( ( 'unable to run _fix_reposync_issues, no reposync output ' 'or empty repo path.' ) ) return rpm_regex = r'[a-z]{1}[a-zA-Z0-9._\\-]+' wrong_version = re.compile( r'(?P<package_name>' + rpm_regex + r'): \[Errno 256\]' ) wrong_release = re.compile(r'(?P<package_name>' + rpm_regex + r') FAILED') packages = set( itertools.chain( wrong_version.findall(reposync_out), wrong_release.findall(reposync_out) ) ) count = 0 LOGGER.debug( 'detected package errors in reposync output in repo_path:%s: %s', repo_path, ','.join(packages) ) for dirpath, _, filenames in os.walk(repo_path): rpms = ( file for file in filenames if file.endswith('.rpm') and dirpath.startswith(repo_path) ) for rpm in rpms: if any(map(rpm.startswith, packages)): bad_package = os.path.join(dirpath, rpm) LOGGER.info('removing conflicting RPM: %s', bad_package) os.unlink(bad_package) count = count + 1 if count > 0: LOGGER.debug( ( 'removed %s conflicting packages, see ' 'https://bugzilla.redhat.com//show_bug.cgi?id=1399235 ' 'for more details.' ), count ) def sync_rpm_repository(repo_path, yum_config, repos): lock_path = os.path.join(repo_path, 'repolock') if not os.path.exists(repo_path): os.makedirs(repo_path) reposync_base_cmd = [ 'reposync', '--config=%s' % yum_config, '--download_path=%s' % repo_path, '--newest-only', '--delete', '--cachedir=%s/cache' % repo_path ] with LogTask('Running reposync'): for repo in repos: with LockFile(lock_path, timeout=180): reposync_cmd = reposync_base_cmd + ['--repoid=%s' % repo] ret, out, _ = run_command(reposync_cmd) if not ret: LOGGER.debug('reposync on repo %s: success.' % repo) continue LOGGER.info('repo: %s: failed, re-running.', repo) _fix_reposync_issues( reposync_out=out, repo_path=os.path.join(repo_path, repo) ) ret, _, _ = run_command(reposync_cmd) if not ret: continue LOGGER.info( 'repo: %s: failed. clearing cache and re-running.', repo ) shutil.rmtree('%s/cache' % repo_path) ret, out, err = run_command(reposync_cmd) if ret: LOGGER.error( 'reposync command failed for repoid: %s', repo ) LOGGER.error( 'reposync stdout for repoid: %s: \n%s', repo, out ) LOGGER.error( 'reposync stderr for repoid: %s: \n%s', repo, err ) raise RuntimeError( ( 'Failed to run reposync 3 times ' 'for repoid: %s, aborting.' ) % repo )
M4rtinK/anaconda
tests/unit_tests/pyanaconda_tests/modules/boss/test_set_file_contexts_task.py
Python
gpl-2.0
2,587
0
# # Copyrigh
t (C) 2021 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your opt
ion) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # import unittest from unittest.mock import patch from pyanaconda.modules.boss.installation import SetContextsTask class SetContextsTaskTest(unittest.TestCase): @patch("pyanaconda.modules.boss.installation.execWithRedirect") def test_run(self, exec_mock): """Test SetContextsTask success.""" task = SetContextsTask("/somewhere") with self.assertLogs() as cm: task.run() exec_mock.assert_called_once_with( "restorecon", [ "-ir", "/boot", "/dev", "/etc", "/lib64", "/root", "/usr/lib64", "/var/cache/yum", "/var/home", "/var/lib", "/var/lock", "/var/log", "/var/media", "/var/mnt", "/var/opt", "/var/roothome", "/var/run", "/var/spool", "/var/srv" ], root="/somewhere" ) logs = "\n".join(cm.output) assert "not available" not in logs @patch("pyanaconda.modules.boss.installation.execWithRedirect") def test_restorecon_missing(self, exec_mock): """Test SetContextsTask with missing restorecon.""" exec_mock.side_effect = FileNotFoundError("testing") task = SetContextsTask("/somewhere") with self.assertLogs() as cm: task.run() # asserts also that exception is not raised logs = "\n".join(cm.output) assert "not available" in logs
aio-libs/aiobotocore
tests/test_sqs.py
Python
apache-2.0
2,198
0
import time import pytest @pytest.mark.moto @pytest.mark.asyncio async def test_list_queues(sqs_client, sqs_queue_url): response = await sqs_client.list_queues() pytest.aio.assert_status_code(response, 200) assert sqs_queue_url in response['QueueUrls'] @pytest.mark.moto @pytest.mark.asyncio async def test_get_queue_name(sqs_client, sqs_queue_url): queue_name = sqs_queue_url.rsplit('/', 1)[-1] response = await sqs_client.get_queue_url(QueueName=queue_name) pytest.aio.assert_status_code(response, 200) assert sqs_queue_url == response['QueueUrl'] @pytest.mark.moto @pytest.mark.asyncio async def test_put_pull_delete_test(sqs_client, sqs_queue_url): response = await sqs_client.send_message( QueueUrl=sqs_queue_url, MessageBody='test_message_1', MessageAttributes={ 'attr1': {'DataType': 'Str
ing', 'StringValue': 'value1'} } ) pytest.aio.assert_status_code(response, 200) response = await sqs_client.receive_message( QueueUrl=sqs_queue_url, MessageAttributeNames=['attr1'] ) pytest.aio.assert_status_code(response, 200) # Messages wont be a key if its empty assert len(respo
nse.get('Messages', [])) == 1 msg = response['Messages'][0] assert msg['Body'] == 'test_message_1' assert msg['MessageAttributes']['attr1']['StringValue'] == 'value1' receipt_handle = response['Messages'][0]['ReceiptHandle'] response = await sqs_client.delete_message( QueueUrl=sqs_queue_url, ReceiptHandle=receipt_handle ) pytest.aio.assert_status_code(response, 200) response = await sqs_client.receive_message( QueueUrl=sqs_queue_url, ) pytest.aio.assert_status_code(response, 200) assert len(response.get('Messages', [])) == 0 @pytest.mark.moto @pytest.mark.asyncio async def test_put_pull_wait(sqs_client, sqs_queue_url): start = time.perf_counter() response = await sqs_client.receive_message( QueueUrl=sqs_queue_url, WaitTimeSeconds=2 ) end = time.perf_counter() pytest.aio.assert_status_code(response, 200) assert 'Messages' not in response assert end - start > 1.5
AdmitHub/heroku-auto-ssl
hooks/heroku-auto-ssl/hook.py
Python
mit
8,401
0.00488
#!/usr/bin/env python import sys, logging, getpass, subprocess, os, json # List of Heroku App ids to update _heroku_app_ids = None _HEROKU_APP_IDS_ENV_KEY = "HEROKU_APP_IDS" def get_heroku_app_ids(): global _heroku_app_ids # Lazy load if _heroku_app_ids is None: env = os.environ.get(_HEROKU_APP_IDS_ENV_KEY) # Check if environment var is set if env is None: err_txt = "{} not set".format(_HEROKU_APP_IDS_ENV_KEY) logging.exception(err_txt) raise ValueError(err_txt) # Parse env into list try: _heroku_app_ids = json.loads(env) except json.JSONDecodeError as e: err_txt = "Error parsing {} environment variable to json".format(_HEROKU_APP_IDS_ENV_KEY) logging.exception(err_txt) raise SyntaxError(err_txt) return _heroku_app_ids # str to log with identifying info _identifying_info = None """Get identifying information of computer user in loggable str This includes: - Computer user name - Git user name - Git user email Note: This doesn't actually get identifiable information in a computer forensics way. More of a "Who at the company did what last" way (At best: Who broke what). Returns: - str: Identifying information for computer user in format: user.username="{}", git.user.name="{}", git.user.email="{}" """ def get_identifying_info(): global _identifying_info # Lazy load if _identifying_info is None: # Get user's name username = None try: username = getpass.getuser() except: logging.exception("Error while trying to get user's username") # Get Git information git_user = None git_email = None if which("git") is not None: try: git_user = cmd_output(["git", "config", "user.name"]) except Exception as e: logging.exception("Error while trying to find user's git.user.name") try: git_email = cmd_output(["git", "config", "user.email"]) except Exception as e: logging.exception("Error while trying to find user's git.user.email") _identifying_info = "user.username=\"{}\", git.user.name=\"{}\", git.user.email=\"{}\"".format(username, git_user, git_email) return _identifying_info # Determines if file path is exe, credit to author of which function (See docs) def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) """Command which emulates `which` UNIX command Credit to users Jay(https://stackoverflow.com/users/20840/jay) and harmv(https://stackoverflow.com/users/328384/harmv) on SO for the code: http://stackoverflow.com/a/377028/1478191 Returns the full path to a program accessable from the PATH. Args: - program (str): Name of program Returns: - str: Full path to excitable file - None: If executable is not found anywhere """ def which(program): fpath, fname = os.path.split(program) if fpath: if is_exe(program): return program else: for path in os.environ["PATH"].split(os.pathsep): path = path.strip('"') exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file return None def clean_cmd_output (output): if output is not None: output = output.decode("utf-8").rstrip() return output """Returns the output of the given command Args: - cmds (str[]): Array of commands parts Returns: - str: Command output - None: If no command output was received """ def cmd_output(cmds): proc = subprocess.Popen(cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) output, err = proc.communicate() # Trimp output if exists output = clean_cmd_output(output) err = clean_cmd_output(err) # Raises Exception if stderr output exists if (err is not None) and (len(err) > 0): raise Exception("Error while running command: \"{}\"".format(err)) return output ################### # HOOKS # ################### """Dehydrated 'deploy_cert' hook handler Purpose is to deploy successfully retrieved SSL certs to Heroku applications Args: - args (object[]): Command lind arguments without filename or hook name. Expected format: [domain, key_file, cert_file, full_chain_file, chain_file, timestamp] - domain (str): Root domain name on SSL cert: certificate common name (CN). - key_file (str): Path to SSL cert private key file, second argument in heroku certs:update. - cert_file (str): Path to SSL cert signed certificate file, first argument in heroku certs:update. - full_chain_file (str): Path to SSL cert full certificate chain file.
- chain_file (str): Path to SSL intermediate certificates file. - timestamp (str): Timestamp when the SSL cert was created """ def deploy_cert(args): # Extract args domain, key_file, cert_file, full_chain_file, chain_file, timestamp = args # Get Heroku app Id for domain heroku
_app_ids = None try: heroku_app_ids = get_heroku_app_ids() logging.debug("Got Heroku Ids=\"{}\"".format(heroku_app_ids)) except ValueError as e: # If ENV['HEROKU_AUTO_SSL_DOMAIN_MAPPING'] isn't set logging.exception("Failed to deploy certificate for domain=\"{}\", HEROKU_AUTO_SSL_DOMAIN_MAPPING environment variable not set".format(domain)) return except SyntaxError as e: logging.exception("Failed to deploy certificate for domain=\"{}\", HEROKU_AUTO_SSL_DOMAIN_MAPPING syntax invalid".format(domain)) return # Deploy certs logging.info("Deploying certificates to Heroku apps: {}".format(heroku_app_ids)) command_parts = ["heroku", "certs:update", cert_file, key_file, "--app", "APP ID", "--confirm", "APP ID"] for id in heroku_app_ids: # Set Heroku app id in command command_parts[len(command_parts) - 1] = id command_parts[len(command_parts) - 3] = id # Run proc = subprocess.Popen(command_parts) logging.debug("Ran: $ {}".format(" ".join(command_parts))) logging.info("Deployed certificate for Heroku app: {}".format(id)) # END HOOKS """Main function called below Called if __name__ is '__main__' Args: - argv (object[]): Command lind arguments (With first filename arg from Python removed) """ def main(argv): # Register hooks that we handle operations = { 'deploy_cert': deploy_cert } """Call Hook Handler Fields: hook_name (str): Name of hook, picked from argv[0], one of: - 'deploy_challenge' - 'clean_challenge' - 'deploy_cert' - 'unchanged_cert' - invalid_challenge' - 'request_failure' - 'exit_hook' (From: https://github.com/lukas2511/dehydrated/blob/master/docs/examples/hook.sh) hook_handler_args (str[]): Hook arguments, set by argv[1:] """ hook_name = argv[0] hook_handler_args = argv[1:] # Log hook called logging.debug("Hook called. hook.name='{}', hook.args={}".format(hook_name, hook_handler_args)) # Log more specific info depending on hook_name if hook_name not in operations: # We don't handle this hook logging.debug("heroku-auto-ssl/hook.py doesn't currently handle: hook.name=\"{}\"".format(hook_name)) elif hook_name in ['deploy_cert']: # This hook could be considered a "security event" logging.info("heroku-auto-ssl/hook.py handled: hook.name=\"{}\", by: {}".format(hook_name, get_identifying_info())) else: # Regular hook logging.debug("heroku-auto-ssl/hook.py handled: hook.name=\"{}\"".format(hook_name)) # Call hook if we handle it if hook_name in operations: operations[hook_name](hook_handler_args) # Call main if __name__ == '__main__': # Setup logging logging.basicConfig(filename="heroku-auto-ssl.log", level=logging.DEBUG, format='%(asctime)s %(module)s %(name)s.%(f
markovmodel/msmtools
msmtools/analysis/dense/decomposition.py
Python
lgpl-3.0
17,097
0.000643
# This file is part of MSMTools. # # Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER) # # MSMTools is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. r"""This module provides matrix-decomposition based functions for the analysis of stochastic matrices Below are the dense implementations for functions specified in api Dense matrices are represented by numpy.ndarrays throughout this module. .. moduleauthor:: B.Trendelkamp-Schroer <benjamin DOT trendelkamp-schroer AT fu-berlin DOT de> """ import numpy as np import numbers import warnings from scipy.linalg import eig, eigh, eigvals, eigvalsh, solve from ...util.exceptions import SpectralWarning, ImaginaryEigenValueWarning from .stationary_vector import stationary_distribution from .assessment import is_reversible def eigenvalues(T, k=None, reversible=False, mu=None): r"""Compute eigenvalues of given transition matrix. Parameters ---------- T : (d, d) ndarray Transition matrix (stochastic matrix) k : int or tuple of ints, optional Compute the first k eigenvalues of T reversible : bool, optional Indicate that transition matrix is reversible mu : (d,) ndarray, optional Stationary distribution of T Returns ------- eig : (n,) ndarray, The eigenvalues of T ordered with decreasing absolute value. If k is None then n=d, if k is int then n=k otherwise n is the length of the given tuple of eigenvalue indices. Notes ----- Eigenvalues are computed using the numpy.linalg interface for the corresponding LAPACK routines. If reversible=True the the eigenvalues of the similar symmetric matrix `\sqrt(\mu_i / \mu_j) p_{ij}` will be computed. The precomputed stationary distribution will only be used if reversible=True. """ if reversible: try: evals = eigenvalues_rev(T, k=k, mu=mu) except ValueError: evals = eigvals(T).real # use fallback code but cast to real else: evals = eigvals(T) # nonreversible """Sort by decreasing absolute value""" ind = np.argsort(np.abs(evals))[::-1] evals = evals[ind] if isinstance(k, (list, set, tuple)): try: return [evals[n] for n in k] except IndexError: raise ValueError("given indices do not exist: ", k) elif k is not None: return evals[: k] else: return evals def eigenvalues_rev(T, k=None, mu=None): r"""Compute eigenvalues of reversible transition matrix. Parameters ---------- T : (d, d) ndarray Transition matrix (stochastic matrix) k : int or tuple of ints, optional Compute the first k eigenvalues of T mu : (d,) ndarray, optional Stationary distribut
ion of T Returns ------- eig : (n,) ndarray, The eigenvalues of T ordered with decreasing absolute value. If k is None then n=d, if k is int then n=k otherwise n is the length of the given tuple of eigenvalue indices. Raises ------ ValueError If
stationary distribution is nonpositive. """ """compute stationary distribution if not given""" if mu is None: mu = stationary_distribution(T) if np.any(mu <= 0): raise ValueError('Cannot symmetrize transition matrix') """ symmetrize T """ smu = np.sqrt(mu) S = smu[:,None] * T / smu """ symmetric eigenvalue problem """ evals = eigvalsh(S) return evals def eigenvectors(T, k=None, right=True, reversible=False, mu=None): r"""Compute eigenvectors of transition matrix. Parameters ---------- T : (d, d) ndarray Transition matrix (stochastic matrix) k : int or tuple of ints, optional Compute the first k eigenvalues of T right : bool, optional If right=True compute right eigenvectors, left eigenvectors otherwise reversible : bool, optional Indicate that transition matrix is reversible mu : (d,) ndarray, optional Stationary distribution of T Returns ------- eigvec : (d, n) ndarray The eigenvectors of T ordered with decreasing absolute value of the corresponding eigenvalue. If k is None then n=d, if k is int then n=k otherwise n is the length of the given tuple of eigenvector indices Notes ----- Eigenvectors are computed using the numpy.linalg interface for the corresponding LAPACK routines. If reversible=True the the eigenvectors of the similar symmetric matrix `\sqrt(\mu_i / \mu_j) p_{ij}` will be used to compute the eigenvectors of T. The precomputed stationary distribution will only be used if reversible=True. """ if reversible: eigvec = eigenvectors_rev(T, right=right, mu=mu) else: eigvec = eigenvectors_nrev(T, right=right) """ Return eigenvectors """ if k is None: return eigvec elif isinstance(k, numbers.Integral): return eigvec[:, 0:k] else: ind = np.asarray(k) return eigvec[:, ind] def eigenvectors_nrev(T, right=True): r"""Compute eigenvectors of transition matrix. Parameters ---------- T : (d, d) ndarray Transition matrix (stochastic matrix) k : int or tuple of ints, optional Compute the first k eigenvalues of T right : bool, optional If right=True compute right eigenvectors, left eigenvectors otherwise Returns ------- eigvec : (d, d) ndarray The eigenvectors of T ordered with decreasing absolute value of the corresponding eigenvalue """ if right: val, R = eig(T, left=False, right=True) """ Sorted eigenvalues and left and right eigenvectors. """ perm = np.argsort(np.abs(val))[::-1] # eigval=val[perm] eigvec = R[:, perm] else: val, L = eig(T, left=True, right=False) """ Sorted eigenvalues and left and right eigenvectors. """ perm = np.argsort(np.abs(val))[::-1] # eigval=val[perm] eigvec = L[:, perm] return eigvec def eigenvectors_rev(T, right=True, mu=None): r"""Compute eigenvectors of reversible transition matrix. Parameters ---------- T : (d, d) ndarray Transition matrix (stochastic matrix) right : bool, optional If right=True compute right eigenvectors, left eigenvectors otherwise mu : (d,) ndarray, optional Stationary distribution of T Returns ------- eigvec : (d, d) ndarray The eigenvectors of T ordered with decreasing absolute value of the corresponding eigenvalue """ if mu is None: mu = stationary_distribution(T) """ symmetrize T """ smu = np.sqrt(mu) S = smu[:,None] * T / smu val, eigvec = eigh(S) """Sort eigenvectors""" perm = np.argsort(np.abs(val))[::-1] eigvec = eigvec[:, perm] if right: return eigvec / smu[:, np.newaxis] else: return eigvec * smu[:, np.newaxis] def rdl_decomposition(T, k=None, reversible=False, norm='standard', mu=None): r"""Compute the decomposition into left and right eigenvectors. Parameters ---------- T : (M, M) ndarray Transition matrix k : int (optional) Number of eigenvector/eigenvalue pairs norm: {'standard', 'reversible', 'auto'} standard: (L'R) = Id, L[:,0] is a probability distribution, the stationary distribution mu of T. Right eigenvector
rangsimanketkaew/NWChem
contrib/marat/python/XYZ_file.py
Python
mit
2,936
0.012602
''' Created on Feb 3, 2012 @author: marat ''' import string from myvector import Vector class XYZ_file: ''' classdocs ''' def __init__(self,name=None): if name==None: self.FileName = " " self.NumAtom = 0 self.AtomName = [] self.AtomPos = [] self.AtomVel = [] self.AtomId = [] print "am here" else: print "loading file", name self.LoadFile(name) def LoadFile(self,FileName): fp = open(str(FileName),'r') self.FileName = F
ileName lines = fp.readlines() NumAtom = string.atoi(lines[0]) self.NumAtom = NumAtom AtomStr = []*4 self.AtomName = [None]*NumAtom self.AtomId = [None]*NumAtom self.AtomPos = [None]*NumAtom self.AtomVel = [None]*NumAtom x = 0.0 y = 0.0 z = 0.0
for i in range(NumAtom): self.AtomId[i] = i+1 AtomStr = string.split(lines[i+2])[0:4] self.AtomName[i] = AtomStr[0] x = string.atof(AtomStr[1]) y = string.atof(AtomStr[2]) z = string.atof(AtomStr[3]) self.AtomPos[i] = Vector(x,y,z) self.AtomVel[i] = Vector() def AddAtom(self,Name,x,y,z): self.AtomId.append(self.NumAtom+1) self.AtomName.append(Name) self.AtomPos.append(Vector(x,y,z)) self.NumAtom = len(self.AtomId) def MoveAtom(self,i,dr): self.AtomPos[i-1] = self.AtomPos[i-1] + dr def SetAtomVel(self,i,v): self.AtomVel[i-1] = v def BondVector(self,i1,i2): dr = self.AtomPos[i2-1] - self.AtomPos[i1-1] return dr def BondLength(self,i1,i2): dr = self.AtomPos[i2-1] - self.AtomPos[i1-1] return dr.length() def WriteFile(self,FileName): fp = open(str(FileName),'w') fp.write(str(self.NumAtom)) fp.write("\n") fp.write("molecule") fp.write("\n") for i in range(self.NumAtom): fp.write(self.AtomName[i]) fp.write(" ") fp.write(str(self.AtomPos[i])) # fp.write(" ") # fp.write(str(self.AtomVel[i])) fp.write("\n") def AppendFile(self,FileName): fp = open(str(FileName),'a') fp.write(str(self.NumAtom)) fp.write("\n") fp.write("molecule") fp.write("\n") for i in range(self.NumAtom): fp.write(self.AtomName[i]) fp.write(" ") fp.write(str(self.AtomPos[i])) # fp.write(" ") # fp.write(str(self.AtomVel[i])) fp.write("\n") if __name__ == '__main__': a = XYZ_file("test.xyz") print a.AtomPos[0] print a.BondLength(1, 2) a.WriteFile("test1.xyz")
ypid-bot/check_mk
web/plugins/cron/user_sync.py
Python
gpl-2.0
1,544
0
#!/usr/bin/python # -*- encoding: utf-8; py-indent-offset: 4 -*- # +------------------------------------------------------------------+ # | ____ _ _ __ __ _ __ | # | / ___| |__ ___ ___| | __ | \/ | |/ / | # | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | # | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | # +------------------------------------------------------------------+ # # This file is part of Check_MK. # The official homepage is at http://mathias-kettner.de/check_mk. # # check_mk is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation in version 2. check_mk is distributed # in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- # out even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. See the GNU General Public License for more de- # tails. You should have received a copy of the GNU General Public # License along with G
NU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. import userdb multisite_cronjobs.append(userdb.execute_userdb_job)
googleads/google-ads-python
google/ads/googleads/v8/services/services/conversion_upload_service/client.py
Python
apache-2.0
25,206
0.001071
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict from distutils import util import os import re from typing import Dict, Optional, Sequence, Tuple, Type, Union from google.api_core import client_options as client_options_lib # type: ignore from google.api_core import exceptions as core_exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore from google.ads.googleads.v8.services.types import conversion_upload_service from google.rpc import status_pb2 # type: ignore from .transports.base import ( ConversionUploadServiceTransport, DEFAULT_CLIENT_INFO, ) from .transports.grpc import ConversionUploadServiceGrpcTransport class ConversionUploadServiceClientMeta(type): """Metaclass for the ConversionUploadService client. This provides class-level methods for building and
retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = ( OrderedDict() ) # type: Dict[str, Type[ConversionUploadServiceTransport]] _transport_registry["grpc"] = ConversionUploadServiceGrpcTransport def get_transport_class( cls, label: str = None, ) -> Type[ConversionUploadServiceTransport]: """Return an appropriate transport clas
s. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values())) class ConversionUploadServiceClient( metaclass=ConversionUploadServiceClientMeta ): """Service to upload conversions.""" @staticmethod def _get_default_mtls_endpoint(api_endpoint): """Convert api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "googleads.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials info. Args: info (dict): The service account private key info. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: ConversionUploadServiceClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_info( info ) kwargs["credentials"] = credentials return cls(*args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: ConversionUploadServiceClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file( filename ) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @property def transport(self) -> ConversionUploadServiceTransport: """Return the transport used by the client instance. Returns: ConversionUploadServiceTransport: The transport used by the client instance. """ return self._transport @staticmethod def conversion_custom_variable_path( customer_id: str, conversion_custom_variable_id: str, ) -> str: """Return a fully-qualified conversion_custom_variable string.""" return "customers/{customer_id}/conversionCustomVariables/{conversion_custom_variable_id}".format( customer_id=customer_id, conversion_custom_variable_id=conversion_custom_variable_id, ) @staticmethod def parse_conversion_custom_variable_path(path: str) -> Dict[str, str]: """Parse a conversion_custom_variable path into its component segments.""" m = re.match( r"^customers/(?P<customer_id>.+?)/conversionCustomVariables/(?P<conversion_custom_variable_id>.+?)$", path, ) return m.groupdict() if m else {} @staticmethod def common_billing_account_path(billing_account: str,) -> str: """Return a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @staticmethod def parse_common_billing_account_path(path: str) -> Dict[str, str]: """Parse a billing_account path into its component segments.""" m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_folder_path(folder: str,) -> str: """Return a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod def parse_common_folder_path(path: str) -> Dict[str, str]: """Parse a folder path into its component segments.""" m = re.match(r"^folders/(?P<folder>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_organization_path(organization: str,) -> str: """Return a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod def parse_common_organization_path(path: str) -> Dict[str, str]: """Parse a organization path into its component segments.""" m = re.match(r"^organizations/(?P<organization>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_project_path(project
vabs22/zulip
zerver/lib/create_user.py
Python
apache-2.0
3,927
0.002546
from __future__ import absolute_import from django.contrib.auth.models import UserManager from django.utils.timezone import now as timezone_now from zerver.models import UserProfile, Recipient, Subscription, Realm, Stream import base64 import ujson import os import string from six.moves import range from typing import Optional, Text def random_api_key(): # type: () -> Text choices = string.ascii_letters + string.digits altchars = ''.join([choices[ord(os.urandom(1)) % 62] for _ in range(2)]).encode("utf-8") return base64.b64encode(os.urandom(24), altchars=altchars).decode("utf-8") # create_user_profile is based on Django's User.objects.create_user, # except that we don't save to the database so it can used in # bulk_creates # # Only use this for bulk_create -- for normal usage one should use # create_user (below) which will also make the Subscription and # Recipient objects def create_user_profile(realm, email, password, active, bot_type, full_name, short_name, bot_owner, is_mirror_dummy, tos_version, timezone, tutorial_status=UserProfile.TUTORIAL_WAITING, enter_sends=False): # type: (Realm, Text, Optional[Text], bool, Optional[int], Text, Text, Optional[UserProfile], bool, Text, Optional[Text], Optional[Text], bool) -> UserProfile now = timezone_now() email = UserManager.normalize_email(email) user_profile = UserProfile(email=email, is_staff=False, is_active=active, full_name=full_name, short_name=short_name, last_login
=now, date_joined=now, realm=realm, pointer=-1, i
s_bot=bool(bot_type), bot_type=bot_type, bot_owner=bot_owner, is_mirror_dummy=is_mirror_dummy, tos_version=tos_version, timezone=timezone, tutorial_status=tutorial_status, enter_sends=enter_sends, onboarding_steps=ujson.dumps([]), default_language=realm.default_language) if bot_type or not active: password = None user_profile.set_password(password) user_profile.api_key = random_api_key() return user_profile def create_user(email, password, realm, full_name, short_name, active=True, bot_type=None, bot_owner=None, tos_version=None, timezone=u"", avatar_source=UserProfile.AVATAR_FROM_GRAVATAR, is_mirror_dummy=False, default_sending_stream=None, default_events_register_stream=None, default_all_public_streams=None, user_profile_id=None): # type: (Text, Optional[Text], Realm, Text, Text, bool, Optional[int], Optional[UserProfile], Optional[Text], Text, Text, bool, Optional[Stream], Optional[Stream], Optional[bool], Optional[int]) -> UserProfile user_profile = create_user_profile(realm, email, password, active, bot_type, full_name, short_name, bot_owner, is_mirror_dummy, tos_version, timezone) user_profile.avatar_source = avatar_source user_profile.timezone = timezone user_profile.default_sending_stream = default_sending_stream user_profile.default_events_register_stream = default_events_register_stream # Allow the ORM default to be used if not provided if default_all_public_streams is not None: user_profile.default_all_public_streams = default_all_public_streams if user_profile_id is not None: user_profile.id = user_profile_id user_profile.save() recipient = Recipient.objects.create(type_id=user_profile.id, type=Recipient.PERSONAL) Subscription.objects.create(user_profile=user_profile, recipient=recipient) return user_profile
foyzur/gpdb
gpMgmt/bin/gppylib/operations/backup_utils.py
Python
apache-2.0
38,070
0.00633
import fnmatch import glob import os import re import tempfile from gppylib import gplog from gppylib.commands.base import WorkerPool, Command, REMOTE from gppylib.commands.unix import Scp from gppylib.db import dbconn from gppylib.db.dbconn import execSQL from gppylib.gparray import GpArray from pygresql import pg from gppylib.operations.utils import DEFAULT_NUM_WORKERS import gzip logger = gplog.get_default_logger() def expand_partitions_and_populate_filter_file(dbname, partition_list, file_prefix): expanded_partitions = expand_partition_tables(dbname, partition_list) dump_partition_list = list(set(expanded_partitions + partition_list)) return create_temp_file_from_list(dump_partition_list, file_prefix) def populate_filter_tables(table, rows, non_partition_tables, partition_leaves): if not rows: non_partition_tables.append(table) else: for (schema_name, partition_leaf_name) in rows: partition_leaf = schema_name.strip() + '.' + partition_leaf_name.strip() partition_leaves.append(partition_leaf) return (non_partition_tables, partition_leaves) def get_all_parent_tables(dbname): SQL = "SELECT DISTINCT (schemaname || '.' || tablename) FROM pg_partitions" data = [] with dbconn.connect(dbconn.DbURL(dbname=dbname)) as conn: curs = dbconn.execSQL(conn, SQL) data = curs.fetchall() return set([d[0] for d in data]) def list_to_quoted_string(filter_tables): filter_string = "'" + "', '".join([pg.escape_string(t) for t in filter_tables]) + "'" return filter_string def convert_parents_to_leafs(dbname, parents): partition_leaves_sql = """ SELECT x.partitionschemaname || '.' || x.partitiontablename FROM ( SELECT distinct schemaname, tablename, partitionschemaname, partitiontablename, partitionlevel FROM pg_partitions WHERE schemaname || '.' || tablename in (%s) ) as X, (SELECT schemaname, tablename maxtable, max(partitionlevel) maxlevel FROM pg_partitions group by (tablename, schemaname) ) as Y WHERE x.schemaname = y.schemaname and x.tablename = Y.maxtable and x.partitionlevel = Y.maxlevel; """ if not parents: return [] conn = dbconn.connect(dbconn.DbURL(dbname=dbname)) partition_sql = partition_leaves_sql % list_to_quoted_string(parents) curs = dbconn.execSQL(conn, partition_sql) rows = curs.fetchall() return [r[0] for r in rows] #input: list of tables to be filtered #output: same list but parent tables converted to leafs def expand_partition_tables(dbname, filter_tables): if not filter_tables or len(filter_tables) == 0: return filter_tables parent_tables = list() non_parent_tables = list() expanded_list = list() all_parent_tables = get_all_parent_tables(dbname) for table in filter_tables: if table in all_parent_tables: parent_tables.append(table) else: non_pa
rent_tables.append(table) expanded_list += non_parent_tables local_batch_size = 1000 for (s, e) in get_batch_from_list(len(parent_tables), local_batch_size): tmp = convert_parents_to_leafs(dbname, parent_tables[s:e])
expanded_list += tmp return expanded_list def get_batch_from_list(length, batch_size): indices = [] for i in range(0, length, batch_size): indices.append((i, i+batch_size)) return indices def create_temp_file_from_list(entries, prefix): """ When writing the entries into temp file, don't do any strip as there might be white space in schema name and table name. """ if len(entries) == 0: return None fd = tempfile.NamedTemporaryFile(mode='w', prefix=prefix, delete=False) for entry in entries: fd.write(entry + '\n') tmp_file_name = fd.name fd.close() verify_lines_in_file(tmp_file_name, entries) return tmp_file_name def create_temp_file_with_tables(table_list): return create_temp_file_from_list(table_list, 'table_list_') def create_temp_file_with_schemas(schema_list): return create_temp_file_from_list(schema_list, 'schema_file_') def validate_timestamp(timestamp): if not timestamp: return False if len(timestamp) != 14: return False if timestamp.isdigit(): return True else: return False def check_successful_dump(report_file_contents): for line in report_file_contents: if line.strip() == 'gp_dump utility finished successfully.': return True return False def get_ddboost_backup_directory(): """ The gpddboost --show-config command, gives us all the ddboost \ configuration details. Third line of the command output gives us the backup directory \ configured with ddboost. """ cmd_str = 'gpddboost --show-config' cmd = Command('Get the ddboost backup directory', cmd_str) cmd.run(validateAfter=True) config = cmd.get_results().stdout.splitlines() for line in config: if line.strip().startswith("Default Backup Directory:"): ddboost_dir = line.split(':')[-1].strip() if ddboost_dir is None or ddboost_dir == "": logger.error("Expecting format: Default Backup Directory:<dir>") raise Exception("DDBOOST default backup directory is not configured. Or the format of the line has changed") return ddboost_dir logger.error("Could not find Default Backup Directory:<dir> in stdout") raise Exception("Output: %s from command %s not in expected format." % (config, cmd_str)) # raise exception for bad data def convert_reportfilename_to_cdatabasefilename(report_file, dump_prefix, ddboost=False): (dirname, fname) = os.path.split(report_file) timestamp = fname[-18:-4] if ddboost: dirname = get_ddboost_backup_directory() dirname = "%s/%s" % (dirname, timestamp[0:8]) return "%s/%sgp_cdatabase_1_1_%s" % (dirname, dump_prefix, timestamp) def get_lines_from_dd_file(filename): cmd = Command('DDBoost copy of master dump file', 'gpddboost --readFile --from-file=%s' % (filename)) cmd.run(validateAfter=True) contents = cmd.get_results().stdout.splitlines() return contents def check_cdatabase_exists(dbname, report_file, dump_prefix, ddboost=False, netbackup_service_host=None, netbackup_block_size=None): try: filename = convert_reportfilename_to_cdatabasefilename(report_file, dump_prefix, ddboost) except Exception: return False if ddboost: cdatabase_contents = get_lines_from_dd_file(filename) elif netbackup_service_host: restore_file_with_nbu(netbackup_service_host, netbackup_block_size, filename) cdatabase_contents = get_lines_from_file(filename) else: cdatabase_contents = get_lines_from_file(filename) dbname = escapeDoubleQuoteInSQLString(dbname, forceDoubleQuote=False) for line in cdatabase_contents: if 'CREATE DATABASE' in line: dump_dbname = get_dbname_from_cdatabaseline(line) if dump_dbname is None: continue else: if dbname == checkAndRemoveEnclosingDoubleQuote(dump_dbname): return True return False def get_dbname_from_cdatabaseline(line): """ Line format: CREATE DATABASE "DBNAME" WITH TEMPLATE = template0 ENCODING = 'UTF8' OWNER = gpadmin; To get the dbname: substring between the ending index of the first statement: CREATE DATABASE and the starting index of WITH TEMPLATE whichever is not inside any double quotes, based on the fact that double quote inside any name will be escaped by extra double quote, so there's always only one WITH TEMPLATE not inside any doubles, means its previous and post string should have only even number of double
EdDev/vdsm
lib/yajsonrpc/stompreactor.py
Python
gpl-2.0
22,060
0
# Copyright (C) 2014-2017 Red Hat Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA from __future__ import absolute_import import logging from collections import deque from uuid import uuid4 import functools from vdsm import concurrent from vdsm import constants from vdsm import utils from vdsm.config import config from vdsm.common import api from vdsm.common.compat import json from vdsm.sslutils import SSLSocket from . import JsonRpcClient, JsonRpcServer from . import stomp from .betterAsyncore import Dispatcher, Reactor _STATE_LEN = "Waiting for message length" _STATE_MSG = "Waiting for message" def parseHeartBeatHeader(v): try: x, y = v.split(",", 1) except ValueError: x, y = (0, 0) try: x = int(x) except ValueError: x = 0 try: y = int(y) except ValueError: y = 0 return (x, y) class StompAdapterImpl(object): log = logging.getLogger("Broker.StompAdapter") """ This class is responsible for stomp message processing in the server side. It uses two dictionaries to track request/response destinations. sub_map - maps a destination id to _Subsctiption object representing stomp subscription. req_dest - maps a request id to a destination. """ def __init__(self, reactor, sub_map, req_dest): self._reactor = reactor self._outbox = deque() self._sub_dests = sub_map self._req_dest = req_dest self._sub_ids = {} request_queues = config.get('addresses', 'request_queues') self.request_queues = request_queues.split(",") self._commands = { stomp.Command.CONNECT: self._cmd_connect, stomp.Command.SEND: self._cmd_send, stomp.Command.SUBSCRIBE: self._cmd_subscribe, stomp.Command.UNSUBSCRIBE: self._cmd_unsubscribe, stomp.Command.DISCONNECT: self._cmd_disconnect} @property def has_outgoing_messages(self): return (len(self._outbox) > 0) def peek_message(self): return self._outbox[0] def pop_message(self): return self._outbox.popleft() def queue_frame(self, frame): self._outbox.append(frame) def remove_subscriptions(self): for sub in self._sub_ids.values(): self._remove_subscription(sub) self._sub_ids.clear() def _cmd_connect(self, dispatcher, frame): self.log.info("Processing CONNECT request") version = frame.headers.get(stomp.Headers.ACCEPT_VERSION, None) if version != "1.2": resp = stomp.Frame( stomp.Command.ERROR, None, "Version unsupported" ) else: resp = stomp.Frame(stomp.Command.CONNECTED, {"version": "1.2"}) cx, cy = parseHeartBeatHeader( frame.headers.get(stomp.Headers.HEARTEBEAT, "0,0") ) # Make sure the heart-beat interval is sane if cy != 0: cy = max(cy, 1000) # The server can send a heart-beat every cy ms and doesn't want # to receive any heart-beat from the client. resp.headers[stomp.Headers.HEARTEBEAT] = "%d,0" % (cy,) dispatcher.setHeartBeat(cy) self.queue_frame(resp) self._reactor.wakeup() def _cmd_subscribe(self, dispatcher, frame): self.log.info("Subscribe command received") destination = frame.headers.get("destination", None) sub_id = frame.headers.get("id", None) if not destination or not sub_id: self._send_error("Missing destination or subscription id header", dispatcher.connection) return if sub_id in self._sub_ids: self._send_error("Subscription id already exists", dispatcher.connection) return ack = frame.headers.get("ack", stomp.AckMode.AUTO) subscription = stomp._Subscription(dispatcher.connection, destination, sub_id, ack, None) self._sub_dests[destination].append(subscription) self._sub_ids[sub_id] = subscription def _send_error(self, msg, con
nection): res = stomp.Frame( stomp.Command.ERROR, None, msg ) connection.send_raw(res) def _cmd_unsubscribe(self, dispatcher, frame): self.log.info("Unsubscribe command received") sub_id = frame.headers.get("id", None) if not sub_id: self._send_error("Missing id header", dispatcher.connection) return try: subscr
iption = self._sub_ids.pop(sub_id) except KeyError: self.log.debug("No subscription for %s id", sub_id) return else: self._remove_subscription(subscription) def _cmd_disconnect(self, dispatcher, frame): self.log.info("Disconnect command received") r_id = frame.headers[stomp.Headers.RECEIPT] if not r_id: self.log.debug("No receipt id for disconnect frame") # it is not mandatory to send receipt frame return headers = {stomp.Headers.RECEIPT_ID: r_id} dispatcher.connection.send_raw(stomp.Frame(stomp.Command.RECEIPT, headers)) def _remove_subscription(self, subscription): subs = self._sub_dests[subscription.destination] if len(subs) == 1: del self._sub_dests[subscription.destination] else: if subscription in subs: subs.remove(subscription) def _cmd_send(self, dispatcher, frame): destination = frame.headers.get(stomp.Headers.DESTINATION, None) # Get the list of all known subscribers. subs = self.find_subscribers(destination) # Forward the message to all explicit subscribers. for subscription in subs: self._forward_frame(subscription, frame) # Is this a command that is meant to be answered # by the internal implementation? if any(destination == queue or destination.startswith(queue + ".") for queue in self.request_queues): self._handle_internal(dispatcher, frame.headers.get(stomp.Headers.REPLY_TO), frame.headers.get(stomp.Headers.FLOW_ID), frame.body) return # This was not a command nor there were any subscribers, # return an error! if not subs: self._send_error("Subscription not available", dispatcher.connection) def _forward_frame(self, subscription, frame): """ This method creates a new frame with the right body and updated headers and forwards it to the subscriber. """ headers = {stomp.Headers.SUBSCRIPTION: subscription.id} headers.update(frame.headers) res = stomp.Frame( stomp.Command.MESSAGE, headers, frame.body ) subscription.client.send_raw(res) def _handle_internal(self, dispatcher, req_dest, flow_id, request): """ We need to build response dictionary which maps message id with destination. For legacy mode we use known 3.5 destination or for standard mode we use 'reply-to' header.
Yadnyawalkya/integration_tests
cfme/utils/ports.py
Python
gpl-2.0
747
0.002677
# -*- coding: utf-8 -*- """Storage for ports. Set defaults here, then :py:mod:`fixtures.portset` will make overrides."""
import sys from cfme.utils import clear_property_cache from cfme.utils.log import logger class Ports(object): SSH = 22 DB = 5432 TOWER = 54322 logger = logger @property def _top(self, m=sys.modules): mod = m.get('utils.appliance') return mod and mod.stack.top def __setattr__(self, attr, value): super(self.__class__, self).__setattr__(attr, value)
if self._top is not None: self.logger.info("Invalidating lazy_cache ssh_client current_appliance object") clear_property_cache(self._top, 'ssh_client') sys.modules[__name__] = Ports()
DrZhang/LogAnalyst
Analysts/AbsAnalyst.py
Python
lgpl-3.0
353
0.05949
import re import Queue class AbsAn
alyst(object): """docstring for AbsAnalyst""" LOGTIME_REGEXP = re.compile("(?P<log_time>\w{4}-\w{2}-\w{2} \w{2}:\w{2}:\w{2})") def __init__(self): raise NotImplemented def isMatch(self, line): raise NotImplemented def doSta
tistic(self): raise NotImplemented def doAnalyse(self): raise NotImplemented
open-synergy/runbot-addons
runbot_language/models/__init__.py
Python
agpl-3.0
433
0
# -*- coding: utf-8 -*- ############################################################## # Module Writen For Odoo, Open Source Management Solution # # Copyright (c) 2011 Vauxoo - http://www.vauxoo.com #
All Righ
ts Reserved. # info Vauxoo (info@vauxoo.com) # coded by: moylop260@vauxoo.com ############################################################################ from . import runbot_repo from . import runbot_build
Acehaidrey/incubator-airflow
airflow/providers/amazon/aws/hooks/redshift.py
Python
apache-2.0
1,282
0.00078
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Interact with AWS Redshift clusters.""" import warnings from airflow.providers.amazon.aws.ho
oks.redshift_cluster import RedshiftHook fr
om airflow.providers.amazon.aws.hooks.redshift_sql import RedshiftSQLHook warnings.warn( "This module is deprecated. Please use `airflow.providers.amazon.aws.hooks.redshift_cluster` " "or `airflow.providers.amazon.aws.hooks.redshift_sql` as appropriate.", DeprecationWarning, stacklevel=2, ) __all__ = ["RedshiftHook", "RedshiftSQLHook"]
AutorestCI/azure-sdk-for-python
azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/ipv6_express_route_circuit_peering_config.py
Python
mit
2,366
0.001691
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class Ipv6ExpressRouteCircuitPeeringConfig(Model): """Contains IPv6 peering config. :param primary_peer_address_prefix: The primary address prefix. :type primary_peer_address_prefix: str :param secondary_peer_address_prefix: The secondary address prefix. :type secondary_peer_address_prefix: str :param microsoft_peering_config: The Microsoft peering configuration. :type microsoft_peering_config: ~azure.mgmt.network.v2017_09_01.models.ExpressRouteCircuitPeeringConfig :param route_filter: The reference of the RouteFilter resource. :type route_filter: ~azure.mgmt.network.v2017_09_01.models.RouteFilter :param state: The state of peering. Possible values are: 'Disabled' and 'Enabled'. Possible values include: 'Disabled', 'Enabled' :type state: str or ~azure.mgmt.network.v2017_09_01.models.ExpressRouteCircuitPeeringState """ _attribute_map = { 'primary_peer_address_prefix': {'key': 'primaryPeerAddressPrefix', 'type': 'str'}, 'secondary_peer_address_prefix': {'key': 'secondaryPeerAddressPrefix', 'type': 'str'}, 'microsoft_peering_config': {'key': 'microsoftPeeringConfig', 'type': 'ExpressRouteCircuitPeeringConfig'}, 'route_filter': {'key': 'routeFilter', 'type': 'RouteFilter'}, 'state': {'key': 'state', 'type': 'str'}, } def __init__(self, primary_peer_address_prefix=None, secondary_peer_address_prefix=None, microsoft_peering_config=None, route_filter
=None, state=None): super(Ipv6ExpressRouteCircuitPeeringConfig, self).__init__() self.primary_peer_address_prefix = primary_peer_address_prefix sel
f.secondary_peer_address_prefix = secondary_peer_address_prefix self.microsoft_peering_config = microsoft_peering_config self.route_filter = route_filter self.state = state
alessandroHenrique/coinpricemonitor
dashboard/urls.py
Python
mit
134
0
from .views imp
ort IndexView from django.conf.u
rls import url urlpatterns = [ url(r'^$', IndexView.as_view(), name='index'), ]
lchi/blimp
blimp/commands/terminate.py
Python
mit
1,260
0.001587
from pprint import pformat import boto3 from clint.textui import indent, puts, prompt def _get_instance_details(instance): return { 'tags': instance.tags, 'launch_time': instance.launch_time.isoformat(), 'instance_type': instance.instance_type, 'state': instance.s
tate, 'key_name': instance.key_name, 'public_dns_name': instance.public_dns_name, 'private_dns_name': instance.private_dns_name, 'placement': instance.placement, } def terminate(args, config): ec2 = boto3.resource('ec2') instance_id = args.instance_id instance = ec2.Instance(instance_id) puts("Instance details:") with indent(4): puts(pformat(_get_instance_details(instance))) confirm = prompt.query("Terminate instance {}? (y/n)".
format(instance_id), validators=[]) if confirm is "y": instance.terminate() puts("termination request issued") else: puts("aborted") def _register_terminate(subparsers): parser_terminate = subparsers.add_parser('terminate', help='terminate help') parser_terminate.add_argument('instance_id', type=str, help='id of the instance to terminate')
geertj/gruvi
tests/test_jsonrpc.py
Python
mit
27,989
0.000322
# # This file is part of Gruvi. Gruvi is free software available under the # terms of the MIT license. See the file "LICENSE" that was provided # together with this source file for the licensing terms. # # Copyright (c) 2012-2017 the Gruvi authors. See the file "AUTHORS" for a # complete list. from __future__ import absolute_import, print_function import os import json import unittest import six import gruvi from gruvi import jsonrpc from gruvi.jsonrpc import JsonRpcError, JsonRpcVersion from gruvi.jsonrpc import JsonRpcProtocol, JsonRpcClient, JsonRpcServer from gruvi.jsonrpc_ffi import ffi as _ffi, lib as _lib from gruvi.transports import TransportError from support import UnitTest, MockTransport _keepalive = None def set_buffer(ctx, buf): global _keepalive # See note in JsonRpcProtocol _keepalive = ctx.buf = _ffi.from_buffer(buf) ctx.buflen = len(buf) ctx.offset = 0 def split_string(s): ctx = _ffi.new('struct split_context *') set_buffer(ctx, s) _lib.json_split(ctx) return ctx JsonRpcProtocol.default_version = '1.0' class TestJsonSplitter(UnitTest): def test_simple(self): r = b'{ "foo": "bar" }' ctx = split_string(r) self.assertEqual(ct
x.error, 0) self.assertEqual(ctx.offset, len(r)) def test_leading_whitespace(self): r = b' { "foo": "bar" }' ctx = split_string(r) self.assertEqual(ctx.error, 0) self.assertEqual(ctx.offse
t, len(r)) r = b' \t\n{ "foo": "bar" }' ctx = split_string(r) self.assertEqual(ctx.error, 0) self.assertEqual(ctx.offset, len(r)) def test_trailing_whitespace(self): r = b'{ "foo": "bar" } ' ctx = split_string(r) self.assertEqual(ctx.error, 0) self.assertEqual(ctx.offset, len(r)-1) error = _lib.json_split(ctx) self.assertEqual(error, ctx.error) == _lib.INCOMPLETE self.assertEqual(ctx.offset, len(r)) def test_brace_in_string(self): r = b'{ "foo": "b{r" }' ctx = split_string(r) self.assertEqual(ctx.error, 0) self.assertEqual(ctx.offset, len(r)) r = b'{ "foo": "b}r" }' ctx = split_string(r) self.assertEqual(ctx.error, 0) self.assertEqual(ctx.offset, len(r)) def test_string_escape(self): r = b'{ "foo": "b\\"}" }' ctx = split_string(r) self.assertEqual(ctx.error, 0) self.assertEqual(ctx.offset, len(r)) def test_error(self): r = b' x { "foo": "bar" }' ctx = split_string(r) self.assertEqual(ctx.error, _lib.ERROR) self.assertEqual(ctx.offset, 1) r = b'[ { "foo": "bar" } ]' ctx = split_string(r) self.assertEqual(ctx.error, _lib.ERROR) self.assertEqual(ctx.offset, 0) def test_multiple(self): r = b'{ "foo": "bar" } { "baz": "qux" }' ctx = split_string(r) self.assertEqual(ctx.error, 0) self.assertEqual(ctx.offset, 16) error = _lib.json_split(ctx) self.assertEqual(error, ctx.error) == 0 self.assertEqual(ctx.offset, len(r)) def test_incremental(self): r = b'{ "foo": "bar" }' ctx = _ffi.new('struct split_context *') for i in range(len(r)-1): set_buffer(ctx, r[i:i+1]) error = _lib.json_split(ctx) self.assertEqual(error, ctx.error) == _lib.INCOMPLETE self.assertEqual(ctx.offset, 1) set_buffer(ctx, r[-1:]) error = _lib.json_split(ctx) self.assertEqual(error, ctx.error) == 0 self.assertEqual(ctx.offset, 1) class TestJsonRpcV1(UnitTest): def setUp(self): super(TestJsonRpcV1, self).setUp() self.version = JsonRpcVersion.create('1.0') def test_check_request(self): v = self.version msg = {'id': 1, 'method': 'foo', 'params': []} self.assertEqual(v.check_message(msg), jsonrpc.REQUEST) msg = {'id': None, 'method': 'foo', 'params': []} self.assertEqual(v.check_message(msg), jsonrpc.REQUEST) def test_check_request_missing_id(self): v = self.version msg = {'method': 'foo', 'params': []} self.assertRaises(ValueError, v.check_message, msg) def test_check_request_missing_method(self): v = self.version msg = {'id': 1, 'params': []} self.assertRaises(ValueError, v.check_message, msg) def test_check_request_illegal_method(self): v = self.version msg = {'id': 1, 'method': None, 'params': []} self.assertRaises(ValueError, v.check_message, msg) msg = {'id': 1, 'method': 1, 'params': []} self.assertRaises(ValueError, v.check_message, msg) msg = {'id': 1, 'method': {}, 'params': []} self.assertRaises(ValueError, v.check_message, msg) msg = {'id': 1, 'method': [], 'params': []} self.assertRaises(ValueError, v.check_message, msg) msg = {'id': 1, 'method': [1], 'params': []} self.assertRaises(ValueError, v.check_message, msg) def test_check_request_missing_params(self): v = self.version msg = {'id': 1, 'method': 'foo'} self.assertRaises(ValueError, v.check_message, msg) def test_check_request_illegal_params(self): v = self.version msg = {'id': 1, 'method': 'foo', 'params': None} self.assertRaises(ValueError, v.check_message, msg) msg = {'id': 1, 'method': 'foo', 'params': 1} self.assertRaises(ValueError, v.check_message, msg) msg = {'id': 1, 'method': 'foo', 'params': 'foo'} self.assertRaises(ValueError, v.check_message, msg) msg = {'id': 1, 'method': 'foo', 'params': {}} def test_check_request_extraneous_fields(self): v = self.version msg = {'id': 1, 'method': 'foo', 'params': [], 'bar': 'baz'} self.assertRaises(ValueError, v.check_message, msg) def test_check_response(self): v = self.version msg = {'id': 1, 'result': 'foo', 'error': None} self.assertEqual(v.check_message(msg), jsonrpc.RESPONSE) def test_check_response_null_result(self): v = self.version msg = {'id': 1, 'result': None, 'error': None} self.assertEqual(v.check_message(msg), jsonrpc.RESPONSE) def test_check_response_error(self): v = self.version msg = {'id': 1, 'result': None, 'error': {'code': 1}} self.assertEqual(v.check_message(msg), jsonrpc.RESPONSE) def test_check_response_missing_id(self): v = self.version msg = {'result': 'foo', 'error': None} self.assertRaises(ValueError, v.check_message, msg) def test_check_response_missing_result(self): v = self.version msg = {'id': 1, 'error': None} self.assertRaises(ValueError, v.check_message, msg) def test_check_response_missing_error(self): v = self.version msg = {'id': 1, 'result': None} self.assertRaises(ValueError, v.check_message, msg) def test_check_response_illegal_error(self): v = self.version msg = {'id': 1, 'result': None, 'error': 1} self.assertRaises(ValueError, v.check_message, msg) msg = {'id': 1, 'result': None, 'error': 'foo'} self.assertRaises(ValueError, v.check_message, msg) msg = {'id': 1, 'result': None, 'error': []} self.assertRaises(ValueError, v.check_message, msg) def test_check_response_result_error_both_set(self): v = self.version msg = {'id': 1, 'result': 1, 'error': 0} self.assertRaises(ValueError, v.check_message, msg) def test_check_response_extraneous_fields(self): v = self.version msg = {'id': 1, 'result': 1, 'error': None, 'bar': 'baz'} self.assertRaises(ValueError, v.check_message, msg) def test_create_request(self): v = self.version msg = v.create_request('foo', []) self.assertIsInstance(msg['id'], six.string_types) self.assertEqual(msg['method'], 'foo') self.assertEqual(msg['params'], []) self.assertEqual(len(msg), 3) def test_create_request_notification(self): v =
xhava/hippyvm
hippy/module/hash/cjoaat.py
Python
mit
1,324
0.001511
from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rtyper.tool import rffi_platform as platform from rpython.rtyper.lltypesystem import rffi, lltype from hippy.tool.platform import get_gmake import subprocess import py LIBDIR = py.path.local(__file__).join('..', 'lib', 'joaat/') subprocess.check_
call([get_gmake(), '-C', str(LIBDIR)]) eci = ExternalCompilationInfo( includes=['joaat.h'], library_dirs=[str(LIBDIR)], libraries=['joaat1'], testonly_libraries=['joaat'], include_dirs=[st
r(LIBDIR)]) class CConfig: _compilation_info_ = eci JOAAT_CTX = platform.Struct('JOAAT_CTX', []) globals().update(platform.configure(CConfig)) def external(name, args, result): return rffi.llexternal(name, args, result, compilation_info=eci, releasegil=False) PTR_JOAAT_CTX = lltype.Ptr(JOAAT_CTX) c_JOAATInit = external('JOAATInit', [PTR_JOAAT_CTX], lltype.Void) c_JOAATUpdate = external('JOAATUpdate', [PTR_JOAAT_CTX, rffi.CCHARP, rffi.UINT], lltype.Void) c_JOAATFinal = external('JOAATFinal', [rffi.CCHARP, PTR_JOAAT_CTX], lltype.Void)
denisenkom/django
tests/proxy_models/models.py
Python
bsd-3-clause
4,379
0.005481
""" By specifying the 'proxy' Meta attribute, model subclasses can specify that they will take data directly from the table of their base class table rather than using a new table of their own. This allows them to act as simple proxies, providing a modified interface to the data from the base class. """ from django.db import models from django.utils.encoding import python_2_unicode_compatible # A couple of managers for testing managing overriding in proxy model cases. class PersonManager(models.Manager): def get_queryset(self): return super(PersonManager, self).get_queryset().exclude(name="fred") class SubManager(models.Manager): def get_queryset(self): return super(SubManager, self).get_queryset().exclude(name="wilma") @python_2_unicode_compatible class Person(models.Model): """ A simple concrete base class. """ name = models.CharField(max_length=50) objects = PersonManager() def __str__(self): return self.name class Abstract(models.Model): """ A simple abstract base class, to be used for error checking. """ data = models.CharField(max_length=10) class Meta: abstract = True class MyPerson(Person): """ A proxy subclass, this should not get a new table. Overrides the default manager. """ class Meta: proxy = True ordering = ["name"] permissions = ( ("display_users", "May display users information"), ) objects = SubManager() other = PersonManager() def has_special_name(self): return self.name.lower() == "special" class ManagerMixin(models.Model): excluder = SubManager() class Meta: abstract = True class OtherPerson(Person, ManagerMixin): """ A class with the default manager from Person, plus an secondary manager. """ class Meta: proxy = True ordering = ["name"] class StatusPerson(MyPerson): """ A non-proxy subclass of a proxy, it should get a new table. """ status = models.CharField(max_length=80) # We can even have proxies of proxies (and subclass of those). class MyPersonProxy(MyPerson): class Meta: proxy = True class LowerStatusPerson(MyPersonProxy): status = models.CharField(max_length=80) @python_2_unicode_compatible class User(models.Model): name = models.CharField(max_length=100) def __str__(self): return self.name class UserProxy(User): class Meta: proxy = True class UserProxyProxy(UserProxy): class Meta: proxy = True # We can still use `select_related()` to include related models in our querysets. class Country(models.Model): name = models.CharField(max_length=50) @python_2_unicode_compatible class State(models.Model): name = models.CharField(max_length=50) country = models.ForeignKey(Country) def __str__(self): return self.name class StateProx
y(State): class Meta: proxy = True # Proxy models still works with filters (on related fields) # and select_related, even when mixed with model inheritance @python_2_unicode_compatible class BaseUser(models.Model): name = models.CharField(max_length=255) def __str__(self): return ':'.join((self.__class__.__name__, self.name,)) class TrackerUser(BaseUser): status = models.CharField(max_length=50) class ProxyTrackerUser(Tracke
rUser): class Meta: proxy = True @python_2_unicode_compatible class Issue(models.Model): summary = models.CharField(max_length=255) assignee = models.ForeignKey(TrackerUser) def __str__(self): return ':'.join((self.__class__.__name__, self.summary,)) class Bug(Issue): version = models.CharField(max_length=50) reporter = models.ForeignKey(BaseUser) class ProxyBug(Bug): """ Proxy of an inherited class """ class Meta: proxy = True class ProxyProxyBug(ProxyBug): """ A proxy of proxy model with related field """ class Meta: proxy = True class Improvement(Issue): """ A model that has relation to a proxy model or to a proxy of proxy model """ version = models.CharField(max_length=50) reporter = models.ForeignKey(ProxyTrackerUser) associated_bug = models.ForeignKey(ProxyProxyBug) class ProxyImprovement(Improvement): class Meta: proxy = True