repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
dmacvicar/spacewalk
|
backend/server/rhnSQL/sql_types.py
|
Python
|
gpl-2.0
| 1,465 | 0 |
#
# Copyright (c) 2008--2010 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
|
.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
# Database types we support for out variables
#
# Data types
class DatabaseDataType:
|
type_name = None
def __init__(self, value=None, size=None):
self.size = size or 1
self.set_value(value)
def get_value(self):
return self.value
def set_value(self, value):
self.value = value
def __str__(self):
return self.type_name
class NUMBER(DatabaseDataType):
type_name = "NUMBER"
class STRING(DatabaseDataType):
type_name = "STRING"
def __init__(self, value=None, size=None):
DatabaseDataType.__init__(self, value=value, size=size)
if not size:
self.size = 4000
class BINARY(DatabaseDataType):
type_name = "BINARY"
class LONG_BINARY(DatabaseDataType):
type_name = "LONG_BINARY"
# XXX More data types to be added as we find need for them
|
falbassini/googleads-dfa-reporting-samples
|
python/v2.2/target_ad_to_remarketing_list.py
|
Python
|
apache-2.0
| 2,651 | 0.00679 |
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example targets an ad to a remarketing list.
The first targetable remarketing list, either owned by or shared to the ad's
advertiser, will be used. To create a remarketing list, see
create_remarketing_list.py. To share a remarketing list with the ad's
advertiser, see share_remarketing_list_to_advertiser.py.
"""
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'profile_id', type=int,
help='The ID of the profile to use for targeting')
argparser.add_argument('ad_id', type=int, help='The ID of the ad to target')
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'dfareporting', 'v2.2', __doc__, __file__, parents=[argparser],
scope=['https://www.googleapis.com/auth/dfareporting',
'https://www.googleapis.com/auth/dfatrafficking'])
profile_id = flags.profile_id
ad_id = flags.ad_id
try:
# Retrieve the ad.
ad = service.ads().get(profileId=profile_id, id=ad_id).execute()
# Retrieve a single targetable remarketing list for the ad.
lists = service.targetableRemarketingLists().list(
profileId=profile_id, advertiserId=ad['advertiserId'],
maxResults=1).execute()
if lists['targetableRemarketingLists']:
list = lists['targetableRemarketingLists'][0]
# Update the ad with a list targeting expression
ad['remarketing_list_expression'] = { 'expression': list['id'] }
response = service.ads().update(profileId=profile_id, body=ad).execute()
print ('Ad %s updated to use remarketing list expression: "%s".'
% (response['id'],
response['remarketing_list_expression']['expression']))
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, plea
|
se re-run the '
'application to re-authorize')
if
|
__name__ == '__main__':
main(sys.argv)
|
5monkeys/blues
|
blues/redis.py
|
Python
|
mit
| 1,983 | 0.001513 |
"""
Redis Blueprint
===============
**Fabric environment:**
.. code-block:: yaml
blueprints:
- blues.redis
settings:
redis:
# bind: 0.0.0.0 # Set the bind address specifically (Default: 127.0.0.1)
"""
import re
from fabric.decorators import task
from fabric.utils import abort
from refabric.context_managers import sudo
from refabric.contrib import blueprints
from . import debian
from refabric.operations import run
__all__ = ['start', 'stop', 'restart', 'setup', 'configure']
blueprint = blueprints.get(__name__)
start = debian.service_task('redis-server', 'start')
stop = debian.service_task('redis-server', 'stop')
restart = debian.service_task('redis-server', 'restart')
@task
def s
|
etup():
"""
Install and configure Redis
"""
install()
configure()
def install():
with sudo():
debian.apt_get('install', 'redis-server')
def get_installed_version():
"""
Get installed version as tuple.
Parsed output format:
Redis server
|
v=2.8.4 sha=00000000:0 malloc=jemalloc-3.4.1 bits=64 build=a...
"""
retval = run('redis-server --version')
m = re.match('.+v=(?P<version>[0-9\.]+).+', retval.stdout)
try:
_v = m.group('version')
v = tuple(map(int, str(_v).split('.')))
return v
except IndexError:
abort('Failed to get installed redis version')
@task
def configure():
"""
Configure Redis
"""
context = {
'bind': blueprint.get('bind', '127.0.0.1')
}
version = get_installed_version()
if version <= (2, 4):
config = 'redis-2.4.conf'
elif version < (3, 0):
config = 'redis-2.8.conf'
else:
config = 'redis-3.conf'
uploads = blueprint.upload(config, '/etc/redis/redis.conf', context)
if uploads:
if debian.lbs_release() >= '16.04':
debian.chown(location='/etc/redis/redis.conf',
owner='redis', group='root')
restart()
|
praekelt/panya-show
|
show/urls.py
|
Python
|
bsd-3-clause
| 804 | 0.007463 |
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns(
'show.views',
url(r'^radioshow/entrylist/$', 'radioshow_entryitem_list', name='radioshow_entryitem_list'),
url(r'^showc
|
ontributor/list/(?P<slug>[\w-]+)/$', 'showcontributor_content_list', name='showcontributor_content_list'),
url(r'^showcontributor/appearance/(?P<slug>[\w-]+)/$', 'showcontributor_appearance_list', name='showcontributor_appearance_list'),
url(r'^showcont
|
ributor/(?P<slug>[\w-]+)/$', 'showcontributor_detail', name='showcontributor_detail'),
url(r'^showcontributor/content/(?P<slug>[\w-]+)/$', 'showcontributor_content_detail', name='showcontributor_content_detail'),
url(r'^showcontributor/contact/(?P<slug>[\w-]+)/$', 'showcontributor_contact', name='showcontributor_contact'),
)
|
Sarthak30/Codeforces
|
soft_drinking.py
|
Python
|
gpl-2.0
| 109 | 0.027523 |
n, k, l, c, d
|
, p, nl, np = map(int,raw_input().split())
a = k*l
x = a/nl
y = c*d
z = p/np
print
|
min(x,y,z)/n
|
Marcdnd/cryptoescudo
|
contrib/spendfrom/spendfrom.py
|
Python
|
mit
| 10,054 | 0.005968 |
#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'
|
], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, f
|
p):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 51142 if testnet else 61142
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amoun
|
ozamiatin/oslo.messaging
|
oslo_messaging/_drivers/zmq_driver/client/publishers/dealer/zmq_dealer_publisher_direct.py
|
Python
|
apache-2.0
| 6,687 | 0.00015 |
# Copyright 2015-2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import tenacity
from oslo_messaging._drivers.zmq_driver.client.publishers.dealer \
import zmq_dealer_publisher_base
from oslo_messaging._drivers.zmq_driver.client import zmq_receivers
from oslo_messaging._drivers.zmq_driver.client import zmq_routing_table
from oslo_messaging._drivers.zmq_driver.client import zmq_senders
from oslo_messaging._drivers.zmq_driver.client import zmq_sockets_manager
from oslo_messaging._drivers
|
.zmq_driver import zmq_address
from oslo_messaging._drivers.zmq_driver import zmq_async
from oslo_messaging._drivers.zmq_driver import zmq_names
LOG = logging.getLogger(__name__)
zmq = zmq_async.import_zmq()
class DealerPublisherDirect(zmq_dealer_publisher_base.DealerPublisherBase):
"""DEALER-publisher using direct dynamic connections.
Publishing directly to remote
|
services assumes the following:
- All direct connections are dynamic - so they live per message,
thus each message send executes the following:
* Open a new socket
* Connect to some host got from the RoutingTable
* Send message(s)
* Close connection, destroy socket
- RoutingTable/RoutingTableUpdater implements local cache of
matchmaker (e.g. Redis) for target resolution to the list of
available hosts. Cache updates in a background thread.
- Caching of connections is not appropriate for directly connected
OS services, because finally it results in a full-mesh of
connections between services.
- Yes we lose on performance opening and closing connections
for each message, but that is done intentionally to implement
the dynamic connections concept. The key thought here is to
have minimum number of connected services at the moment.
- Using the local RoutingTable cache is done to optimise access
to the matchmaker so we don't call the matchmaker per each message
"""
def __init__(self, conf, matchmaker):
sender = zmq_senders.RequestSenderDirect(conf, async=True)
receiver = zmq_receivers.ReceiverDirect(conf)
super(DealerPublisherDirect, self).__init__(conf, matchmaker,
sender, receiver)
self.routing_table = zmq_routing_table.RoutingTableAdaptor(
conf, matchmaker, zmq.ROUTER)
def _get_round_robin_host_connection(self, target, socket):
host = self.routing_table.get_round_robin_host(target)
socket.connect_to_host(host)
failover_hosts = self.routing_table.get_all_round_robin_hosts(target)
upper_bound = self.conf.oslo_messaging_zmq.zmq_failover_connections
for host in failover_hosts[:upper_bound]:
socket.connect_to_host(host)
def _get_fanout_connection(self, target, socket):
for host in self.routing_table.get_fanout_hosts(target):
socket.connect_to_host(host)
def acquire_connection(self, request):
if request.msg_type in zmq_names.DIRECT_TYPES:
socket = self.sockets_manager.get_socket()
self._get_round_robin_host_connection(request.target, socket)
return socket
elif request.msg_type in zmq_names.MULTISEND_TYPES:
socket = self.sockets_manager.get_socket(immediate=False)
self._get_fanout_connection(request.target, socket)
return socket
def _finally_unregister(self, socket, request):
super(DealerPublisherDirect, self)._finally_unregister(socket, request)
self.receiver.unregister_socket(socket)
def _do_send(self, socket, request):
if request.msg_type in zmq_names.MULTISEND_TYPES:
for _ in range(socket.connections_count()):
self.sender.send(socket, request)
else:
self.sender.send(socket, request)
def send_request(self, socket, request):
@tenacity.retry(retry=tenacity.retry_if_exception_type(zmq.Again),
stop=tenacity.stop_after_delay(
self.conf.rpc_response_timeout))
def send_retrying():
self._do_send(socket, request)
return send_retrying()
def cleanup(self):
self.routing_table.cleanup()
super(DealerPublisherDirect, self).cleanup()
class DealerPublisherDirectStatic(DealerPublisherDirect):
"""DEALER-publisher using direct static connections.
For some reason direct static connections may be also useful.
Assume a case when some agents are not connected with control services
over RPC (Ironic or Cinder+Ceph), and RPC is used only between controllers.
In this case number of RPC connections doesn't matter (very small) so we
can use static connections without fear and have all performance benefits
from it.
"""
def __init__(self, conf, matchmaker):
super(DealerPublisherDirectStatic, self).__init__(conf, matchmaker)
self.fanout_sockets = zmq_sockets_manager.SocketsManager(
conf, matchmaker, zmq.DEALER)
def acquire_connection(self, request):
target_key = zmq_address.target_to_key(
request.target, zmq_names.socket_type_str(zmq.ROUTER))
if request.msg_type in zmq_names.MULTISEND_TYPES:
hosts = self.routing_table.get_fanout_hosts(request.target)
return self.fanout_sockets.get_cached_socket(target_key, hosts,
immediate=False)
else:
hosts = self.routing_table.get_all_round_robin_hosts(
request.target)
return self.sockets_manager.get_cached_socket(target_key, hosts)
def send_request(self, socket, request):
self._do_send(socket, request)
def _finally_unregister(self, socket, request):
self.receiver.untrack_request(request)
def cleanup(self):
self.fanout_sockets.cleanup()
super(DealerPublisherDirectStatic, self).cleanup()
|
InnovativeTravel/s3-keyring
|
tests/conftest.py
|
Python
|
mit
| 714 | 0 |
"""Global test fixtures."""
import uuid
import pytest
from s3keyring.s3 import S3Keyring
from s3keyring.settings import config
from keyring.errors import PasswordDeleteError
|
@pytest.fixture
def keyring(scope="module"):
config.boto_config.activate_profile("test")
return S3Keyring()
@pytest.yield_fixture
def random_entry(keyring, scope="function"):
service = str(uuid.uuid4())
user = str(uuid.uuid4())
|
pwd = str(uuid.uuid4())
yield (service, user, pwd)
# Cleanup
try:
keyring.delete_password(service, user)
except PasswordDeleteError as err:
if 'not found' not in err.args[0]:
# It's ok if the entry has been already deleted
raise
|
georgetown-analytics/skidmarks
|
bin/cluster.py
|
Python
|
mit
| 2,943 | 0.016989 |
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
from sklearn.preprocessing import Imputer
from sklearn import linear_model
import statsmodels.api as sm
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from sklearn import preprocessing
# Some colors for later
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
###
#load data from a CSV to a dataframe
with open("./lin.csv") as in_data:
crime_data = pd.DataFrame.from_csv(in_data, sep=',')
#crime_data=crime_data.fillna(value=-999)
#load all numeric data into an array. The offense column from the crime data
#is excluded
as_array = np.asfarray(crime_data[['Average Velocity (mph)','Aggressive Turns']])#'Max Velocity', 'Velocity Stdev','Average Acceleration (mph per s)', 'Max Acceleration (mph per s)', ' Acceleration Stdev','Displacement','Total Distance Traveled','Max Direction Change per sec', ' Direction Stdev','Time (s)', 'Turns', 'Aggressive Turns', 'Stops', 'Large Deceleration Events', 'Deceleration Events', 'Max Deceleration Event']])
#number of groups
n_clusters=4
#Correct missing data
imputer = Imputer(missing_values="NaN", strategy="mean")
patched = imputer.fit_transform(as_array)
# Preprocessing tricks
#patched = StandardScaler().fit_transform(patched)
#patched = scale(patched, axis=0, with_mean=True)
patched = preprocessing.normalize(patched, norm='l2')
#min_max_scaler = preprocessing.MinMaxScaler()
#patched = min_max_scaler.fit_transform(patched)
#cluster data
cluster = KMeans(n_cluste
|
rs=n_clusters)
cluster.fit_transform(patched)
#assigned grouped labe
|
ls to the crime data
labels = cluster.labels_
#copy dataframe (may be memory intensive but just for illustration)
skid_data = crime_data.copy()
#print pd.Series(classified_data)
#print pd.Series(prediction_data)
skid_data['Cluster Class'] = pd.Series(labels, index=skid_data.index)
print skid_data.describe()
print skid_data
#print list(skid_data.columns)
skid_data.plot( x = 'Aggressive Turns', y = 'Cluster Class', kind = 'scatter')
plt.show()
# Make Predictions
predictions = cluster.predict(patched)
SilouetteCoefficient = metrics.silhouette_score(patched, labels, metric='euclidean')
print "The Silouette Coefficient is", SilouetteCoefficient
model = sm.OLS(labels, patched)
results = model.fit()
print results.summary()
# Find centers
centers = cluster.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
#plt.subplot(1,4,idx+1)
plt.scatter(patched[:, 0], patched[:, 1], color=colors[predictions].tolist(), s=10)
plt.xticks(())
plt.yticks(())
plt.ylabel('$x_1$')
plt.xlabel('$x_0$')
plt.show()
|
AllenDowney/MarriageNSFG
|
thinkstats2.py
|
Python
|
mit
| 75,264 | 0.000864 |
"""This file contains code for use with "Think Stats" and
"Think Bayes", both by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
"""This file contains class definitions for:
Hist: represents a histogram (map from values to integer frequencies).
Pmf: represents a probability mass function (map from values to probs).
_DictWrapper: private parent class for Hist and Pmf.
Cdf: represents a discrete cumulative distribution function
Pdf: represents a continuous probability density function
"""
import bisect
import copy
import logging
import math
import random
import re
from collections import Counter
from operator import itemgetter
import thinkplot
import numpy as np
import pandas
import scipy
from scipy import stats
from scipy import special
from scipy import ndimage
from scipy.special import gamma
from io import open
ROOT2 = math.sqrt(2)
def RandomSeed(x):
"""Initialize the random and np.rand
|
om generators.
x: int seed
"""
random.seed(x)
np.random.seed(x)
def Odds(p):
"""Computes odds for a given probability.
Examp
|
le: p=0.75 means 75 for and 25 against, or 3:1 odds in favor.
Note: when p=1, the formula for odds divides by zero, which is
normally undefined. But I think it is reasonable to define Odds(1)
to be infinity, so that's what this function does.
p: float 0-1
Returns: float odds
"""
if p == 1:
return float('inf')
return p / (1 - p)
def Probability(o):
"""Computes the probability corresponding to given odds.
Example: o=2 means 2:1 odds in favor, or 2/3 probability
o: float odds, strictly positive
Returns: float probability
"""
return o / (o + 1)
def Probability2(yes, no):
"""Computes the probability corresponding to given odds.
Example: yes=2, no=1 means 2:1 odds in favor, or 2/3 probability.
yes, no: int or float odds in favor
"""
return yes / (yes + no)
class Interpolator(object):
"""Represents a mapping between sorted sequences; performs linear interp.
Attributes:
xs: sorted list
ys: sorted list
"""
def __init__(self, xs, ys):
self.xs = xs
self.ys = ys
def Lookup(self, x):
"""Looks up x and returns the corresponding value of y."""
return self._Bisect(x, self.xs, self.ys)
def Reverse(self, y):
"""Looks up y and returns the corresponding value of x."""
return self._Bisect(y, self.ys, self.xs)
def _Bisect(self, x, xs, ys):
"""Helper function."""
if x <= xs[0]:
return ys[0]
if x >= xs[-1]:
return ys[-1]
i = bisect.bisect(xs, x)
frac = 1.0 * (x - xs[i - 1]) / (xs[i] - xs[i - 1])
y = ys[i - 1] + frac * 1.0 * (ys[i] - ys[i - 1])
return y
# When we plot Hist, Pmf and Cdf objects, they don't appear in
# the legend unless we override the default label.
DEFAULT_LABEL = '_nolegend_'
class _DictWrapper(object):
"""An object that contains a dictionary."""
def __init__(self, obj=None, label=None):
"""Initializes the distribution.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
label: string label
"""
self.label = label if label is not None else DEFAULT_LABEL
self.d = {}
# flag whether the distribution is under a log transform
self.log = False
if obj is None:
return
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.label = label if label is not None else obj.label
if isinstance(obj, dict):
self.d.update(obj.items())
elif isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.d.update(obj.Items())
elif isinstance(obj, pandas.Series):
self.d.update(obj.value_counts().iteritems())
else:
# finally, treat it like a list
self.d.update(Counter(obj))
if len(self) > 0 and isinstance(self, Pmf):
self.Normalize()
def __hash__(self):
return id(self)
def __str__(self):
cls = self.__class__.__name__
if self.label == DEFAULT_LABEL:
return '%s(%s)' % (cls, str(self.d))
else:
return self.label
def __repr__(self):
cls = self.__class__.__name__
if self.label == DEFAULT_LABEL:
return '%s(%s)' % (cls, repr(self.d))
else:
return '%s(%s, %s)' % (cls, repr(self.d), repr(self.label))
def __eq__(self, other):
try:
return self.d == other.d
except AttributeError:
return False
def __len__(self):
return len(self.d)
def __iter__(self):
return iter(self.d)
def iterkeys(self):
"""Returns an iterator over keys."""
return iter(self.d)
def __contains__(self, value):
return value in self.d
def __getitem__(self, value):
return self.d.get(value, 0)
def __setitem__(self, value, prob):
self.d[value] = prob
def __delitem__(self, value):
del self.d[value]
def Copy(self, label=None):
"""Returns a copy.
Make a shallow copy of d. If you want a deep copy of d,
use copy.deepcopy on the whole object.
label: string label for the new Hist
returns: new _DictWrapper with the same type
"""
new = copy.copy(self)
new.d = copy.copy(self.d)
new.label = label if label is not None else self.label
return new
def Scale(self, factor):
"""Multiplies the values by a factor.
factor: what to multiply by
Returns: new object
"""
new = self.Copy()
new.d.clear()
for val, prob in self.Items():
new.Set(val * factor, prob)
return new
def Log(self, m=None):
"""Log transforms the probabilities.
Removes values with probability 0.
Normalizes so that the largest logprob is 0.
"""
if self.log:
raise ValueError("Pmf/Hist already under a log transform")
self.log = True
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
if p:
self.Set(x, math.log(p / m))
else:
self.Remove(x)
def Exp(self, m=None):
"""Exponentiates the probabilities.
m: how much to shift the ps before exponentiating
If m is None, normalizes so that the largest prob is 1.
"""
if not self.log:
raise ValueError("Pmf/Hist not under a log transform")
self.log = False
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
self.Set(x, math.exp(p - m))
def GetDict(self):
"""Gets the dictionary."""
return self.d
def SetDict(self, d):
"""Sets the dictionary."""
self.d = d
def Values(self):
"""Gets an unsorted sequence of values.
Note: one source of confusion is that the keys of this
dictionary are the values of the Hist/Pmf, and the
values of the dictionary are frequencies/probabilities.
"""
return self.d.keys()
def Items(self):
"""Gets an unsorted sequence of (value, freq/prob) pairs."""
return self.d.items()
def SortedItems(self):
"""Gets a sorted sequence of (value, freq/prob) pairs.
It items are unsortable, the result is unsorted.
"""
def isnan(x):
try:
return math.isnan(x)
except TypeError:
return False
if any([isnan(x) for x in self.Values()]):
msg = 'Keys contain NaN, may not sort correctly.'
logging.warning(msg)
try:
return sorted(self.d.items())
except TypeError:
return self.d.items()
def Render(self, **options):
"""Generates a sequence of points suit
|
GoogleCloudPlatform/gsutil
|
gslib/tests/test_notification.py
|
Python
|
apache-2.0
| 6,042 | 0.002648 |
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for notification command."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import re
import time
import uuid
import boto
from gslib.cloud_api_delegator import CloudApiDelegator
import gslib.tests.testcase as testcase
from gslib.tests.util import ObjectToURI as suri
from gslib.tests.util import unittest
from gslib.utils.retry_util import Retry
from six import add_move, MovedModule
add_move(MovedModule('mock', 'mock', 'unittest.mock'))
from six.moves import mock
def _LoadNotificationUrl():
return boto.config.get_value('GSUtil', 'test_notification_url')
NOTIFICATION_URL = _LoadNotificationUrl()
class TestNotificationUnit(testcase.GsUtilUnitTestCase):
@mock.patch.object(CloudApiDelegator,
'CreateNotificationConfig',
autospec=True)
def test_notification_splits_dash_m_value_correctly(self,
mock_create_notification):
bucket_uri = self.CreateBucket(bucket_name='foo_notification')
stdout = self.RunCommand(
'notification',
['create', '-f', 'none', '-s', '-m', 'foo:bar:baz',
suri(bucket_uri)],
return_stdout=True)
mock_create_notification.assert_called_once_with(
mock.ANY, # Client instance.
'foo_notification',
pubsub_topic=mock.ANY,
payload_format=mock.ANY,
custom_attributes={'foo': 'bar:baz'},
event_types=None,
object_name_prefix=mock.ANY,
provider=mock.ANY)
class TestNotification(testcase.GsUtilIntegrationTestCase):
"""Integration tests for notification command."""
@unittest.skipUnless(NOTIFICATION_URL,
'Test requires notification URL configuration.')
def test_watch_bucket(self):
"""Tests creating a notification channel on a bucket."""
bucket_uri = self.CreateBucket()
self.RunGsUtil(
['notification', 'watchbucket', NOTIFICATION_URL,
suri(bucket_uri)])
identifier = str(uuid.uuid4())
token = str(uuid.uuid4())
stderr = self.RunGsUtil([
'notification', 'watchbucket', '-i', identifier, '-t', token,
NOTIFICATION_URL,
suri(bucket_uri)
],
return_stderr=True)
self.assertIn('token: %s' % token, stderr)
self.assertIn('identifier: %s' % identifier, stderr)
@unittest.skipUnless(NOTIFICATION_URL,
'Test requires notification URL configurat
|
ion.')
def test_stop_channel(self):
"""Tests stopping a notification channel on a bucket."""
bucket_uri = self.CreateBucket()
stderr = self.RunGsUtil(
['notification', 'watchbucket', NOTIFICATION_URL,
suri(bucket_uri)],
return_stderr=True)
channel_id = re.findall(r'channel
|
identifier: (?P<id>.*)', stderr)
self.assertEqual(len(channel_id), 1)
resource_id = re.findall(r'resource identifier: (?P<id>.*)', stderr)
self.assertEqual(len(resource_id), 1)
channel_id = channel_id[0]
resource_id = resource_id[0]
self.RunGsUtil(['notification', 'stopchannel', channel_id, resource_id])
@unittest.skipUnless(NOTIFICATION_URL,
'Test requires notification URL configuration.')
def test_list_one_channel(self):
"""Tests listing notification channel on a bucket."""
# TODO(b/132277269): Re-enable these once the service-side bug is fixed.
return unittest.skip('Functionality has been disabled due to b/132277269')
bucket_uri = self.CreateBucket()
# Set up an OCN (object change notification) on the newly created bucket.
self.RunGsUtil(
['notification', 'watchbucket', NOTIFICATION_URL,
suri(bucket_uri)],
return_stderr=False)
# The OCN listing in the service is eventually consistent. In initial
# tests, it almost never was ready immediately after calling WatchBucket
# above, so we A) sleep for a few seconds before the first OCN listing
# attempt, and B) wrap the OCN listing attempt in retry logic in case
# it raises a BucketNotFoundException (note that RunGsUtil will raise this
# as an AssertionError due to the exit status not being 0).
@Retry(AssertionError, tries=3, timeout_secs=5)
def _ListObjectChangeNotifications():
stderr = self.RunGsUtil(['notification', 'list', '-o',
suri(bucket_uri)],
return_stderr=True)
return stderr
time.sleep(5)
stderr = _ListObjectChangeNotifications()
channel_id = re.findall(r'Channel identifier: (?P<id>.*)', stderr)
self.assertEqual(len(channel_id), 1)
resource_id = re.findall(r'Resource identifier: (?P<id>.*)', stderr)
self.assertEqual(len(resource_id), 1)
push_url = re.findall(r'Application URL: (?P<id>.*)', stderr)
self.assertEqual(len(push_url), 1)
subscriber_email = re.findall(r'Created by: (?P<id>.*)', stderr)
self.assertEqual(len(subscriber_email), 1)
creation_time = re.findall(r'Creation time: (?P<id>.*)', stderr)
self.assertEqual(len(creation_time), 1)
def test_invalid_subcommand(self):
stderr = self.RunGsUtil(['notification', 'foo', 'bar', 'baz'],
return_stderr=True,
expected_status=1)
self.assertIn('Invalid subcommand', stderr)
|
bdaroz/the-blue-alliance
|
controllers/datafeed_controller.py
|
Python
|
mit
| 31,351 | 0.002775 |
import logging
import os
import datetime
import tba_config
import time
import json
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from consts.event_type import EventType
from consts.media_type import MediaType
from consts.media_tag import MediaTag
from datafeeds.datafeed_fms_api import DatafeedFMSAPI
from datafeeds.datafeed_first_elasticsearch import DatafeedFIRSTElasticSearch
from datafeeds.datafeed_tba import DatafeedTba
from datafeeds.datafeed_resource_library import DatafeedResourceLibrary
from helpers.district_manipulator import DistrictManipulator
from helpers.event_helper import EventHelper
from helpers.event_manipulator import EventManipulator
from helpers.event_details_manipulator import EventDetailsManipulator
from helpers.event_team_manipulator import EventTeamManipulator
from helpers.match_manipulator import MatchManipulator
from helpers.match_helper import MatchHelper
from helpers.award_manipulator import AwardManipulator
from helpers.media_manipulator import MediaManipulator
from helpers.team_manipulator import TeamManipulator
from helpers.district_team_manipulator import DistrictTeamManipulator
from helpers.robot_manipulator import RobotManipulator
from helpers.event.offseason_event_helper import OffseasonEventHelper
from helpers.suggestions.suggestion_creator import SuggestionCreator
from models.district_team import DistrictTeam
from models.event import Event
from models.event_details import EventDetails
from models.event_team import EventTeam
from models.media import Media
from models.robot import Robot
from models.sitevar import Sitevar
from models.team import Team
from sitevars.website_blacklist import WebsiteBlacklist
class FMSAPIAwardsEnqueue(webapp.RequestHandler):
"""
Handles enqueing getting awards
"""
def get(self, when):
if when == "now":
events = EventHelper.getEventsWithinADay()
events = filter(lambda e: e.official, events)
else:
event_keys = Event.q
|
uery(Event.official == True).filter(Event.year == int(when)).fetch(500, keys_only=True)
events = ndb.get_multi(event_keys)
for event in events:
taskqueue.add(
queue_name='datafeed',
url='/tasks/get/fmsapi_awards/%s' % (event.key_name),
method='GET')
template_values = {
'events': events,
}
if 'X-Appengine-Taskname' not in self.request.headers: # Only w
|
rite out if not in taskqueue
path = os.path.join(os.path.dirname(__file__), '../templates/datafeeds/usfirst_awards_enqueue.html')
self.response.out.write(template.render(path, template_values))
class FMSAPIAwardsGet(webapp.RequestHandler):
"""
Handles updating awards
"""
def get(self, event_key):
datafeed = DatafeedFMSAPI('v2.0', save_response=True)
event = Event.get_by_id(event_key)
awards = datafeed.getAwards(event)
if event and event.remap_teams:
EventHelper.remapteams_awards(awards, event.remap_teams)
new_awards = AwardManipulator.createOrUpdate(awards)
if new_awards is None:
new_awards = []
elif type(new_awards) != list:
new_awards = [new_awards]
# create EventTeams
team_ids = set()
for award in new_awards:
for team in award.team_list:
team_ids.add(team.id())
teams = TeamManipulator.createOrUpdate([Team(
id=team_id,
team_number=int(team_id[3:]))
for team_id in team_ids])
if teams:
if type(teams) is not list:
teams = [teams]
event_teams = EventTeamManipulator.createOrUpdate([EventTeam(
id=event_key + "_" + team.key.id(),
event=event.key,
team=team.key,
year=event.year)
for team in teams])
template_values = {
'awards': new_awards,
}
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
path = os.path.join(os.path.dirname(__file__), '../templates/datafeeds/usfirst_awards_get.html')
self.response.out.write(template.render(path, template_values))
class FMSAPIEventAlliancesEnqueue(webapp.RequestHandler):
"""
Handles enqueing getting alliances
"""
def get(self, when):
if when == "now":
events = EventHelper.getEventsWithinADay()
events = filter(lambda e: e.official, events)
elif when == "last_day_only":
events = EventHelper.getEventsWithinADay()
events = filter(lambda e: e.official and e.ends_today, events)
else:
event_keys = Event.query(Event.official == True).filter(Event.year == int(when)).fetch(500, keys_only=True)
events = ndb.get_multi(event_keys)
for event in events:
taskqueue.add(
queue_name='datafeed',
url='/tasks/get/fmsapi_event_alliances/' + event.key_name,
method='GET')
template_values = {
'events': events
}
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
path = os.path.join(os.path.dirname(__file__), '../templates/datafeeds/usfirst_event_alliances_enqueue.html')
self.response.out.write(template.render(path, template_values))
class FMSAPIEventAlliancesGet(webapp.RequestHandler):
"""
Handles updating an event's alliances
"""
def get(self, event_key):
df = DatafeedFMSAPI('v2.0', save_response=True)
event = Event.get_by_id(event_key)
alliance_selections = df.getEventAlliances(event_key)
if event and event.remap_teams:
EventHelper.remapteams_alliances(alliance_selections, event.remap_teams)
event_details = EventDetails(
id=event_key,
alliance_selections=alliance_selections
)
EventDetailsManipulator.createOrUpdate(event_details)
template_values = {'alliance_selections': alliance_selections,
'event_name': event_details.key.id()}
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
path = os.path.join(os.path.dirname(__file__), '../templates/datafeeds/usfirst_event_alliances_get.html')
self.response.out.write(template.render(path, template_values))
class FMSAPIEventRankingsEnqueue(webapp.RequestHandler):
"""
Handles enqueing getting rankings
"""
def get(self, when):
if when == "now":
events = EventHelper.getEventsWithinADay()
events = filter(lambda e: e.official, events)
else:
event_keys = Event.query(Event.official == True).filter(Event.year == int(when)).fetch(500, keys_only=True)
events = ndb.get_multi(event_keys)
for event in events:
taskqueue.add(
queue_name='datafeed',
url='/tasks/get/fmsapi_event_rankings/' + event.key_name,
method='GET')
template_values = {
'events': events,
}
if 'X-Appengine-Taskname' not in self.request.headers: # Only write out if not in taskqueue
path = os.path.join(os.path.dirname(__file__), '../templates/datafeeds/usfirst_event_rankings_enqueue.html')
self.response.out.write(template.render(path, template_values))
class FMSAPIEventRankingsGet(webapp.RequestHandler):
"""
Handles updating an event's rankings
"""
def get(self, event_key):
df = DatafeedFMSAPI('v2.0', save_response=True)
event = Event.get_by_id(event_key)
rankings, rankings2 = df.getEventRankings(event_key)
if event and event.remap_teams:
EventHelper.remapteams_rankings(rankings, event.remap_teams)
EventHelper.remapteams_rankings2(rankings2, event.remap_team
|
CERNDocumentServer/cds-videos
|
cds/modules/records/bundles.py
|
Python
|
gpl-2.0
| 2,122 | 0 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016, 2017 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""JS/CSS bundles for Records."""
from __future__ import absolute_import, print_function
from flask_assets import Bundle
from invenio_assets import NpmBundle
stats_js = NpmBundle(
"node_modules/invenio-charts-js/dist/lib.bundle.js",
"js/cds_records/stats.js",
output="gen/cds.records.stats.%(version)s.js",
npm={
"invenio-charts-js": "^0.2.2",
},
)
stats_css = Bundle(
Bundle(
"node_modules/invenio-charts-js/src/styles/styles.scss",
"scss/stats.scss",
filters="node-scss,cleancssurl",
),
output="gen/cds.stats.%(version)s.css",
)
js = NpmBundle(
Bundle(
"node_modules/cds/dist/cds.js",
"node_modules/angular-sanitize/angular-sanitize.js",
"node_modules/angular-strap/dist/angular-strap.js",
"node
|
_modules/invenio-files-js/dist/invenio-files-js.js",
"node_modules/ngmodal/dist/ng-modal.js",
"js/cds_records/main.js",
"js/cds_records/user_actions_logger.js",
|
filters="jsmin",
),
depends=("node_modules/cds/dist/*.js",),
filters="jsmin",
output="gen/cds.record.%(version)s.js",
npm={
"angular": "~1.4.10",
"angular-sanitize": "~1.4.10",
"angular-loading-bar": "~0.9.0",
"cds": "~0.2.0",
"ng-dialog": "~0.6.0",
"ngmodal": "~2.0.1",
},
)
|
praekelt/molo
|
molo/core/management/commands/add_language_to_pages.py
|
Python
|
bsd-2-clause
| 702 | 0 |
from __future__ import absolute_import, unicode_literals
from django.core.management.base import BaseCommand
from molo.core.models import LanguageRelation
from molo.core.models import Page
class Command(BaseCommand):
def handle(self, *args, **options):
for relation in LanguageRelation.objects.all():
if relation.page and relation.language:
|
page = Page.objects.get(pk=relation.page.pk).specific
page.language = relation.language
page.save()
else:
s
|
elf.stdout.write(self.style.NOTICE(
'Relation with pk "%s" is missing either page/language'
% (relation.pk)))
|
onehao/opensource
|
pyml/inaction/ch03/decisiontree/trees.py
|
Python
|
apache-2.0
| 4,087 | 0.015659 |
'''
@author: Michael Wan
@since : 2014-11-08
'''
from math import log
import operator
def createDataSet():
dataSet = [[1, 1, 'yes'],
[1, 1, 'yes'],
[1, 0, 'no'],
[0, 1, 'no'],
[0, 1, 'no']]
labels = ['no surfacing','flippers']
#change to discrete values
return dataSet, labels
def calcShannonEnt(dataSet):
numEntries = len(dataSet)
labelCounts = {}
for featVec in dataSet: #the the number of unique elements and their occurance
currentLabel = featVec[-1]
if currentLabel not in labelCounts.keys(): labelCounts[currentLabel] = 0
labelCounts[currentLabel] += 1
shannonEnt = 0.0
for key in labelCounts:
prob = float(labelCounts[key])/numEntries
shannonEnt -= prob * log(prob,2) #log base 2
return shannonEnt
def splitDataSet(dataSet, axis, value):
retDataSet = []
for featVec in dataSet:
if featVec[axis] == value:
reducedFeatVec = featVec[:axis] #chop out axis used for splitting
reducedFeatVec.extend(featVec[axis+1:])
retDataSet.append(reducedFeatVec)
return retDataSet
def chooseBestFeatureToSplit(dataSet):
numFeatures = len(dataSet[0]) - 1 #the last column is used for the labels
baseEntropy = calcShannonEnt(dataSet)
bestInfoGain = 0.0; bestFeature = -1
for i in range(numFeatures): #iterate over all the features
featList = [example[i] for example in dataSet]#create a list of all the examples of this feature
uniqueVals = set(featList) #get a set of unique values
newEntropy = 0.0
for value in uniqueVals:
subDataSet = splitDataSet(dataSet, i, value)
prob = len(subDataSet)/float(len(dataSet))
newEntropy += prob * calcShannonEnt(subDataSet)
infoGain = baseEntropy - newEntropy #calculate the info gain; ie reduction in entropy
if (infoGain > bestInfoGain): #compare this to the best gain so far
bestInfoGain = infoGain #if better than current best, set to best
bestFeature = i
return bestFeature #returns an integer
def majorityCnt(classList):
classCount={}
for vote in classList:
if vote not in classCount.keys(): classCount[vote] = 0
classCount[vote] += 1
sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
def createTree(dataSet,labels):
classList = [example[-1] for example in dataSet]
if classList.count(classList[0]) == len(classList):
return classList[0]#stop splitting when all of the classes are equal
if len(dataSet[0]) == 1: #stop splitting when there are no more features in dataSet
return majorityCnt(classList)
bestFeat = chooseBestFeatureToSplit(dataSet)
bestFeatLabel = labels[bestFeat]
myTree = {bestFeatLabel:{}}
del(labels[bestFeat])
featValues = [example[bestFeat] for example in dataSet]
uniqueVals = set(featValues)
for value in uniqueVals:
subLabels = labels[:] #copy all of labels, so trees don't mess up existing labels
myTree[bestFeatLabel][value] = createTree(splitDataSet(dat
|
aSet, bestFeat, value),subLabels)
return myTree
|
def classify(inputTree,featLabels,testVec):
firstStr = inputTree.keys()[0]
secondDict = inputTree[firstStr]
featIndex = featLabels.index(firstStr)
key = testVec[featIndex]
valueOfFeat = secondDict[key]
if isinstance(valueOfFeat, dict):
classLabel = classify(valueOfFeat, featLabels, testVec)
else: classLabel = valueOfFeat
return classLabel
def storeTree(inputTree,filename):
import pickle
fw = open(filename,'w')
pickle.dump(inputTree,fw)
fw.close()
def grabTree(filename):
import pickle
fr = open(filename)
return pickle.load(fr)
|
dstcontrols/osisoftpy
|
examples/mini_signal_example.py
|
Python
|
apache-2.0
| 5,723 | 0.007863 |
import random, inspect
from sched import scheduler
from time import time, sleep
from datetime import datetime
####################################################################################################
# Minimal implementation of the signaling library
class Signal(object):
def __init__(self, name):
self.name = name
self.receivers = {}
# This is all we need for a callback function to be registered for a signal:
def connect(self, receiver):
# print(id(receiver), receiver)
self.receivers.setdefault(id(receiver), receiver)
return receiver
# When a person expends effort and call their signal.send(), they really iterate through their
# receivers (callback functions) and __call__() each one, sending it themselves and ++kwargs
def send(self, sender, **kwargs):
# For Edwin: Understand This Syntax
# print("{} ({}) has the following receivers: {}".format(self.name, id(self), self.receivers))
if not self.receivers:
return []
# below is an example of tuple unpacking in python
# print ("wheee {}".format([(receiver, receiver(sender, **kwargs)) for k, receiver in self.receivers.items()]))
return [(receiver, receiver(sender, **kwargs)) for k, receiver in self.receivers.items()]
return [receiver(sender, **kwargs) for k, receiver in self.receivers.items()]
# Makes Signals(name) singletons
class Namespace(dict):
def signal(self, name):
try:
return self[name]
except KeyError:
return self.setdefault(name, Signal(name))
signal = Namespace().signal
####################################################################################################
## Minimal implementation of a Person class,
class Person(object):
def __init__(self, name):
self.name = name
self._energy = 100
@property
def energy(self):
return self._energy
def work(self):
effort = random.randint(-10, 10)
self._energy += effort
# People will emit a signal when they expend effort
if effort != 0:
# the signal will call the callback functon provided by the receiver on connect()
signal(self.name).send(self, effort=effort)
####################################################################################################
## Now the script - Let's start with the function we'll call to subscribe to signals and callback
def seamus_callback1(sender):
print("calling seamus_callback1! sender: {}".format(sender))
def seamus_callback2(sender):
print("calling seamus_callback2! sender: {}".format(sender))
def seamus_callback3(sender):
print("calling seamus_callback3! sender: {}".format(sender))
seamus = Person('seamus')
seamus_signal = signal(seamus.name)
print("{} is calling send. Debug: sender: {} signal: {} output: {}!".format(seamus.name, seamus, seamus_signal, seamus_signal.send(seamus)))
seamus_signal.connect(seamus_callback1)
seamus_signal.connect(seamus_callback2)
seamus_signal.connect(seamus_callback3)
print("{} is calling send again. Debug: sender: {} signal: {} output: {}!".format(seamus.name, seamus, seamus_signal, seamus_signal.send(seamus)))
seamus_signal.disconnect(seamus_callback1)
seamus_signal.disconnect(seamus_callback2)
print("{} is calling send again. Debug: sender: {} signal: {} output: {}!".format(seamus.n
|
ame, seamus, seamus_signal, seamus_signal.send(seamus)))
## Subscribing to signals
def monitor_changes_in_effort(people):
# For each person, we call the signal method. signal() will either return an existing signal for
|
# that person, or return a new signal for that person. - hence the singletome comment above.
signals = [signal(person.name) for person in people]
# list comprehension
# signals = [functionToCall() for thing in someList]
# signals = []
# for person in people:
# s = signal(person.name)
# signals.append(s)
# for each signal we just got, let's connect to it and tell it what callback function we want
# to have executed when the signal is emitted.
[s.connect(track_work) for s in signals]
# This is our callback function - we send this to the signal as the callback that we want executed.
# this will handle the signal that the person sends - we know fro mthe person class that when a
# person expends effort, then emit a signal, and pass in themselves and amount of effort expended.
def track_work(sender, effort):
verb = 'rose' if effort > 0 else 'dropped'
if sender.energy < 100: # and sender not in hardworkers:
hardworkers.add(sender)
else:
hardworkers.discard(sender)
return effort
def print_person(person):
print(person.name)
print(person.energy)
# Creating the people objects from a list of names
people = [Person(name) for name in ['ye', 'bryan', 'andrew', 'edwin',
'jerry', 'jose', 'nathan', 'nate']]
## Set we'll add people whose energy levels have changed
hardworkers = set([])
# Observing the people we just created
monitor_changes_in_effort(people)
# Starting a 2 second loop that makes each person work 20 times
start_time = time()
duration = 0.5
interval = duration / 20
while time() < start_time + duration:
# print('Time: ')
# print(datetime.fromtimestamp(time()))
# [print_person(person) for person in people]
[person.work() for person in people]
sleep(interval - ((time() - start_time) % interval))
# print the list of people who were found to have worked:
print('\n\nThe following people finished the day with less energy than they started:\n')
[print_person(person) for person in hardworkers]
print('\n')
# and that's the gist of things.
|
skomendera/PyMyTools
|
providers/terminal.py
|
Python
|
mit
| 2,039 | 0.00049 |
import os
def get_terminal_columns():
terminal_rows, terminal_columns = os.popen('stty size', 'r').read().split()
return int(terminal_columns)
def get_terminal_rows():
terminal_rows, terminal_columns = os.popen('stty size', 'r').read().split()
return int(terminal_rows)
def get_header_l1(lines_list, width=None):
text_output = []
if width is None:
width = get_terminal_columns()
text_output.append('')
text_output.append('%s%s%s' % ('+', '-' * (width-2), '+'))
for line in lines_list:
text_output.append('| {:<{width}}|'.format(line, width=width-3))
text_output.append('%s%s%s' % ('+', '-' * (width - 2), '+'))
text_output.append('')
return '\n'.join(text_output)
def get_header_l2(lines_list, width=None):
text_output = []
if width is None:
width = 0
for line in lines_list:
if len(line) > width:
width = len(line)
width += 5
text_output.append('')
text_output.append('#')
text_output.append('##')
for line in lines_list:
text_output.append('### ' + line)
text_output.append('-' * width)
text_output.append('')
return '\n'.join(text_output)
def get_key_value_adjusted(key, value, key_width):
return '{:>{width}}'.format(key, width=key_width) + ': ' + str(value)
def format_seconds(seconds):
output = []
seconds = int(seconds
|
)
if seconds > 86400:
output.append('%s days' % round(seconds / 86400))
seconds %= 86400
if seconds > 36
|
00:
output.append('%s hours' % round(seconds / 3600))
seconds %= 3600
if seconds > 60:
output.append('%s minutes' % round(seconds / 60))
seconds %= 60
if seconds > 0:
output.append('%s seconds' % seconds)
return ' '.join(output)
def format_documentation_list(links_list):
text_output = ['Documentation:', '']
for l in links_list:
text_output.append('- %s' % l)
return '\n'.join(text_output)
|
emanueldima/b2share
|
b2share/modules/deposit/utils.py
|
Python
|
gpl-2.0
| 823 | 0 |
"""Utilities for B2share deposit."""
from flask import request
from werkzeug.local import LocalProxy
from werkzeug.routing import PathConverter
def file_id_to_key(value):
"""Convert file UUID to value if in request context."""
from invenio_fi
|
les_rest.models import ObjectVersion
_, record = request.view_args['pid_value'].data
if value in record.files:
return value
object_version = ObjectVersion.query.filter_by(
bucket_id=record.files.bucket.id, file_id=value
).first()
if object_version:
return object_version.key
return value
class FileKeyConverter(PathConverter):
"""Convert file
|
UUID for key."""
def to_python(self, value):
"""Lazily convert value from UUID to key if need be."""
return LocalProxy(lambda: file_id_to_key(value))
|
coodoing/piconv
|
support_encodings.py
|
Python
|
apache-2.0
| 13,095 | 0.002596 |
#-*-coding=utf-8-*-
class SupportEncodings(object):
"""
Given the support encoding of piconv
"""
supports = []
def __init__(self):
self.supports = ['ASCII','UTF-8','UTF-16','UTF-32',\
'BIG5','GBK','GB2312','GB18030','EUC-JP', 'SHIFT_JIS', 'ISO-2022-JP'\
'WINDOWS-1252']
def get_support_enc
|
odings(self):
return self.supports
def get_all_coded_character_set(self):
return ['']
"""
437, 500, 500V1, 850, 851, 852, 855, 856, 857, 860, 861, 862, 863, 864, 865,
866, 866NAV, 869, 874, 904, 1026, 1046, 1047, 8859_1, 8859_2, 8859_3, 8859_4,
8859_5, 8859_6, 8859_7, 8859_8, 88
|
59_9, 10646-1:1993, 10646-1:1993/UCS4,
ANSI_X3.4-1968, ANSI_X3.4-1986, ANSI_X3.4, ANSI_X3.110-1983, ANSI_X3.110,
ARABIC, ARABIC7, ARMSCII-8, ASCII, ASMO-708, ASMO_449, BALTIC, BIG-5,
BIG-FIVE, BIG5-HKSCS, BIG5, BIG5HKSCS, BIGFIVE, BRF, BS_4730, CA, CN-BIG5,
CN-GB, CN, CP-AR, CP-GR, CP-HU, CP037, CP038, CP273, CP274, CP275, CP278,
CP280, CP281, CP282, CP284, CP285, CP290, CP297, CP367, CP420, CP423, CP424,
CP437, CP500, CP737, CP770, CP771, CP772, CP773, CP774, CP775, CP803, CP813,
CP819, CP850, CP851, CP852, CP855, CP856, CP857, CP860, CP861, CP862, CP863,
CP864, CP865, CP866, CP866NAV, CP868, CP869, CP870, CP871, CP874, CP875,
CP880, CP891, CP901, CP902, CP903, CP904, CP905, CP912, CP915, CP916, CP918,
CP920, CP921, CP922, CP930, CP932, CP933, CP935, CP936, CP937, CP939, CP949,
CP950, CP1004, CP1008, CP1025, CP1026, CP1046, CP1047, CP1070, CP1079,
CP1081, CP1084, CP1089, CP1097, CP1112, CP1122, CP1123, CP1124, CP1125,
CP1129, CP1130, CP1132, CP1133, CP1137, CP1140, CP1141, CP1142, CP1143,
CP1144, CP1145, CP1146, CP1147, CP1148, CP1149, CP1153, CP1154, CP1155,
CP1156, CP1157, CP1158, CP1160, CP1161, CP1162, CP1163, CP1164, CP1166,
CP1167, CP1250, CP1251, CP1252, CP1253, CP1254, CP1255, CP1256, CP1257,
CP1258, CP1282, CP1361, CP1364, CP1371, CP1388, CP1390, CP1399, CP4517,
CP4899, CP4909, CP4971, CP5347, CP9030, CP9066, CP9448, CP10007, CP12712,
CP16804, CPIBM861, CSA7-1, CSA7-2, CSASCII, CSA_T500-1983, CSA_T500,
CSA_Z243.4-1985-1, CSA_Z243.4-1985-2, CSA_Z243.419851, CSA_Z243.419852,
CSDECMCS, CSEBCDICATDE, CSEBCDICATDEA, CSEBCDICCAFR, CSEBCDICDKNO,
CSEBCDICDKNOA, CSEBCDICES, CSEBCDICESA, CSEBCDICESS, CSEBCDICFISE,
CSEBCDICFISEA, CSEBCDICFR, CSEBCDICIT, CSEBCDICPT, CSEBCDICUK, CSEBCDICUS,
CSEUCKR, CSEUCPKDFMTJAPANESE, CSGB2312, CSHPROMAN8, CSIBM037, CSIBM038,
CSIBM273, CSIBM274, CSIBM275, CSIBM277, CSIBM278, CSIBM280, CSIBM281,
CSIBM284, CSIBM285, CSIBM290, CSIBM297, CSIBM420, CSIBM423, CSIBM424,
CSIBM500, CSIBM803, CSIBM851, CSIBM855, CSIBM856, CSIBM857, CSIBM860,
CSIBM863, CSIBM864, CSIBM865, CSIBM866, CSIBM868, CSIBM869, CSIBM870,
CSIBM871, CSIBM880, CSIBM891, CSIBM901, CSIBM902, CSIBM903, CSIBM904,
CSIBM905, CSIBM918, CSIBM921, CSIBM922, CSIBM930, CSIBM932, CSIBM933,
CSIBM935, CSIBM937, CSIBM939, CSIBM943, CSIBM1008, CSIBM1025, CSIBM1026,
CSIBM1097, CSIBM1112, CSIBM1122, CSIBM1123, CSIBM1124, CSIBM1129, CSIBM1130,
CSIBM1132, CSIBM1133, CSIBM1137, CSIBM1140, CSIBM1141, CSIBM1142, CSIBM1143,
CSIBM1144, CSIBM1145, CSIBM1146, CSIBM1147, CSIBM1148, CSIBM1149, CSIBM1153,
CSIBM1154, CSIBM1155, CSIBM1156, CSIBM1157, CSIBM1158, CSIBM1160, CSIBM1161,
CSIBM1163, CSIBM1164, CSIBM1166, CSIBM1167, CSIBM1364, CSIBM1371, CSIBM1388,
CSIBM1390, CSIBM1399, CSIBM4517, CSIBM4899, CSIBM4909, CSIBM4971, CSIBM5347,
CSIBM9030, CSIBM9066, CSIBM9448, CSIBM12712, CSIBM16804, CSIBM11621162,
CSISO4UNITEDKINGDOM, CSISO10SWEDISH, CSISO11SWEDISHFORNAMES,
CSISO14JISC6220RO, CSISO15ITALIAN, CSISO16PORTUGESE, CSISO17SPANISH,
CSISO18GREEK7OLD, CSISO19LATINGREEK, CSISO21GERMAN, CSISO25FRENCH,
CSISO27LATINGREEK1, CSISO49INIS, CSISO50INIS8, CSISO51INISCYRILLIC,
CSISO58GB1988, CSISO60DANISHNORWEGIAN, CSISO60NORWEGIAN1, CSISO61NORWEGIAN2,
CSISO69FRENCH, CSISO84PORTUGUESE2, CSISO85SPANISH2, CSISO86HUNGARIAN,
CSISO88GREEK7, CSISO89ASMO449, CSISO90, CSISO92JISC62991984B, CSISO99NAPLPS,
CSISO103T618BIT, CSISO111ECMACYRILLIC, CSISO121CANADIAN1, CSISO122CANADIAN2,
CSISO139CSN369103, CSISO141JUSIB1002, CSISO143IECP271, CSISO150,
CSISO150GREEKCCITT, CSISO151CUBA, CSISO153GOST1976874, CSISO646DANISH,
CSISO2022CN, CSISO2022JP, CSISO2022JP2, CSISO2022KR, CSISO2033,
CSISO5427CYRILLIC, CSISO5427CYRILLIC1981, CSISO5428GREEK, CSISO10367BOX,
CSISOLATIN1, CSISOLATIN2, CSISOLATIN3, CSISOLATIN4, CSISOLATIN5, CSISOLATIN6,
CSISOLATINARABIC, CSISOLATINCYRILLIC, CSISOLATINGREEK, CSISOLATINHEBREW,
CSKOI8R, CSKSC5636, CSMACINTOSH, CSNATSDANO, CSNATSSEFI, CSN_369103,
CSPC8CODEPAGE437, CSPC775BALTIC, CSPC850MULTILINGUAL, CSPC862LATINHEBREW,
CSPCP852, CSSHIFTJIS, CSUCS4, CSUNICODE, CSWINDOWS31J, CUBA, CWI-2, CWI,
CYRILLIC, DE, DEC-MCS, DEC, DECMCS, DIN_66003, DK, DS2089, DS_2089, E13B,
EBCDIC-AT-DE-A, EBCDIC-AT-DE, EBCDIC-BE, EBCDIC-BR, EBCDIC-CA-FR,
EBCDIC-CP-AR1, EBCDIC-CP-AR2, EBCDIC-CP-BE, EBCDIC-CP-CA, EBCDIC-CP-CH,
EBCDIC-CP-DK, EBCDIC-CP-ES, EBCDIC-CP-FI, EBCDIC-CP-FR, EBCDIC-CP-GB,
EBCDIC-CP-GR, EBCDIC-CP-HE, EBCDIC-CP-IS, EBCDIC-CP-IT, EBCDIC-CP-NL,
EBCDIC-CP-NO, EBCDIC-CP-ROECE, EBCDIC-CP-SE, EBCDIC-CP-TR, EBCDIC-CP-US,
EBCDIC-CP-WT, EBCDIC-CP-YU, EBCDIC-CYRILLIC, EBCDIC-DK-NO-A, EBCDIC-DK-NO,
EBCDIC-ES-A, EBCDIC-ES-S, EBCDIC-ES, EBCDIC-FI-SE-A, EBCDIC-FI-SE, EBCDIC-FR,
EBCDIC-GREEK, EBCDIC-INT, EBCDIC-INT1, EBCDIC-IS-FRISS, EBCDIC-IT,
EBCDIC-JP-E, EBCDIC-JP-KANA, EBCDIC-PT, EBCDIC-UK, EBCDIC-US, EBCDICATDE,
EBCDICATDEA, EBCDICCAFR, EBCDICDKNO, EBCDICDKNOA, EBCDICES, EBCDICESA,
EBCDICESS, EBCDICFISE, EBCDICFISEA, EBCDICFR, EBCDICISFRISS, EBCDICIT,
EBCDICPT, EBCDICUK, EBCDICUS, ECMA-114, ECMA-118, ECMA-128, ECMA-CYRILLIC,
ECMACYRILLIC, ELOT_928, ES, ES2, EUC-CN, EUC-JISX0213, EUC-JP-MS, EUC-JP,
EUC-KR, EUC-TW, EUCCN, EUCJP-MS, EUCJP-OPEN, EUCJP-WIN, EUCJP, EUCKR, EUCTW,
FI, FR, GB, GB2312, GB13000, GB18030, GBK, GB_1988-80, GB_198880,
GEORGIAN-ACADEMY, GEORGIAN-PS, GOST_19768-74, GOST_19768, GOST_1976874,
GREEK-CCITT, GREEK, GREEK7-OLD, GREEK7, GREEK7OLD, GREEK8, GREEKCCITT,
HEBREW, HP-GREEK8, HP-ROMAN8, HP-ROMAN9, HP-THAI8, HP-TURKISH8, HPGREEK8,
HPROMAN8, HPROMAN9, HPTHAI8, HPTURKISH8, HU, IBM-803, IBM-856, IBM-901,
IBM-902, IBM-921, IBM-922, IBM-930, IBM-932, IBM-933, IBM-935, IBM-937,
IBM-939, IBM-943, IBM-1008, IBM-1025, IBM-1046, IBM-1047, IBM-1097, IBM-1112,
IBM-1122, IBM-1123, IBM-1124, IBM-1129, IBM-1130, IBM-1132, IBM-1133,
IBM-1137, IBM-1140, IBM-1141, IBM-1142, IBM-1143, IBM-1144, IBM-1145,
IBM-1146, IBM-1147, IBM-1148, IBM-1149, IBM-1153, IBM-1154, IBM-1155,
IBM-1156, IBM-1157, IBM-1158, IBM-1160, IBM-1161, IBM-1162, IBM-1163,
IBM-1164, IBM-1166, IBM-1167, IBM-1364, IBM-1371, IBM-1388, IBM-1390,
IBM-1399, IBM-4517, IBM-4899, IBM-4909, IBM-4971, IBM-5347, IBM-9030,
IBM-9066, IBM-9448, IBM-12712, IBM-16804, IBM037, IBM038, IBM256, IBM273,
IBM274, IBM275, IBM277, IBM278, IBM280, IBM281, IBM284, IBM285, IBM290,
IBM297, IBM367, IBM420, IBM423, IBM424, IBM437, IBM500, IBM775, IBM803,
IBM813, IBM819, IBM848, IBM850, IBM851, IBM852, IBM855, IBM856, IBM857,
IBM860, IBM861, IBM862, IBM863, IBM864, IBM865, IBM866, IBM866NAV, IBM868,
IBM869, IBM870, IBM871, IBM874, IBM875, IBM880, IBM891, IBM901, IBM902,
IBM903, IBM904, IBM905, IBM912, IBM915, IBM916, IBM918, IBM920, IBM921,
IBM922, IBM930, IBM932, IBM933, IBM935, IBM937, IBM939, IBM943, IBM1004,
IBM1008, IBM1025, IBM1026, IBM1046, IBM1047, IBM1089, IBM1097, IBM1112,
IBM1122, IBM1123, IBM1124, IBM1129, IBM1130, IBM1132, IBM1133, IBM1137,
IBM1140, IBM1141, IBM1142, IBM1143, IBM1144, IBM1145, IBM1146, IBM1147,
IBM1148, IBM1149, IBM1153, IBM1154, IBM1155, IBM1156, IBM1157, IBM1158,
IBM1160, IBM1161, IBM1162, IBM1163, IBM1164, IBM1166, IBM1167, IBM1364,
IBM1371, IBM1388, IBM1390, IBM1399, IBM4517, IBM4899, IBM4909, IBM4971,
IBM5347, IBM9030, IBM9066, IBM9448, IBM12712, IBM16804, IEC_P27-1, IEC_P271,
INIS-8, INIS-CYRILLIC, INIS, INIS8, INISCYRILLIC, ISIRI-3342, ISIRI3342,
ISO-2022-CN-EXT, ISO-2022-CN, ISO-2022-JP-2, ISO-2022-JP-3, ISO-2022-JP,
ISO-2022-KR, ISO-8859-1, ISO-8859-2, ISO-8859-3, ISO-8859-4, ISO-8859-5,
ISO-8859-6, ISO-8859-7, ISO-8859-
|
fameyer/comatmor
|
src/comatmor/__init__.py
|
Python
|
gpl-2.0
| 96 | 0 |
# module in
|
cludes
import elliptic
import heat
import IRT
print "Loading comatmor version 0.0.1"
| |
hyades/whatsapp-client
|
src/layers/receivers/receipt_receiver.py
|
Python
|
gpl-3.0
| 221 | 0 |
from layers.receivers.base_receiever import BaseRe
|
ceiver
class ReceiptReceiver(BaseReceiver):
def onReceipt(self, receiptEntity):
ack = ReceiptReceiver.getAckEntity(receiptEnt
|
ity)
self.toLower(ack)
|
agendaodonto/server
|
app/schedule/serializers/patient.py
|
Python
|
agpl-3.0
| 559 | 0.001789 |
from rest_framework.serializers import ModelSerializer
from app
|
.schedule.models.patient import Patient
from app.schedule.serializers.clinic import ClinicListSerializer
from app.schedule.serializers.dental_plan import DentalPlanSerializer
|
class PatientSerializer(ModelSerializer):
class Meta:
model = Patient
fields = ('id', 'name', 'last_name', 'sex', 'phone', 'clinic', 'created', 'modified', 'dental_plan')
class PatientListSerializer(PatientSerializer):
clinic = ClinicListSerializer()
dental_plan = DentalPlanSerializer()
|
Rcoko/flaskLearn
|
app/main/views.py
|
Python
|
mit
| 3,576 | 0.014646 |
# -- coding: utf-8 --
from flask import render_template, session, redirect, url_for, current_app, request
from .. import db
from ..models import Detail,Contents,Keywords,WXUrls
from . import main
from .forms import NameForm
import wechatsogou
import hashlib
from .errors import *
from ..API.reqweb import *
@main.route('/', methods=['GET', 'POST'])
def index():
form = NameForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.name.data).first()
if user is None:
user = User(username=form.name.data)
db.session.add(user)
session['known'] = False
if current_app.config['FLASKY_ADMIN']:
send_email(current_app.config['FLASKY_ADMIN'], 'New User',
'mail/new_user', user=user)
else:
session['known'] = True
|
session['name'] = form.name.data
return redirect(url_for('.index'))
return render_template('index.html',
form=form, name=session.get('name'),
known=session.get('known', False))
@main.route('/test/')
def test():
content = Contents(name="test内容");
todo1 = Detail(title='teest title', keywords='列表列表列表',description='描述描述描述描述描述',contents=content)
todo1.save()
ss = Detail.objects().all()
objLen =
|
len(ss)
s1 = ss[0]
a = 4
#todo1.save()
return render_template('detail.html',detail = s1)
@main.route('/content/',methods=['GET', 'POST'])
def content():
keyword=request.args.get('key')
vx_obj = wechatsogou.WechatSogouAPI()
lists = []
sugg_keywords = []
md5_string = ''
keywords = ''
title = ''
des = ''
#try:
if keyword.strip() != '':
lists = vx_obj.search_article(keyword)
for list in lists:
wx_url = list['article']['url']
hash = hashlib.md5()
hash.update(bytes(wx_url))
md5_str = hash.hexdigest()
#list['article'].append('wx_url_md5')
list['article']['wx_url_md5']=md5_str
wx_urls = WXUrls(md5_str = md5_str,wx_url=wx_url)
wx_urls.save()
sugg_keywords = vx_obj.get_sugg(keyword)
#except:
# print('value errot')
key_count = len(sugg_keywords)
if key_count == 1:
title = keywords= sugg_keywords[0]
elif key_count > 1:
title = keyword+'_'+sugg_keywords[0]
for sugg_key in sugg_keywords:
keywords = keywords+ ','+sugg_key
keywords = keywords[1:]
else:
title =keywords= keyword
if title.strip() != '':
hash = hashlib.md5()#md5对象,md5不能反解,但是加密是固定的,就是关系是一一对应,所以有缺陷,可以被对撞出来
hash.update(bytes(title))#要对哪个字符串进行加密,就放这里
md5_string = hash.hexdigest()#拿到加密字符串
keywrods_id = Keywords(md5_string = md5_string,title=keyword)
keywrods_id.save()
else:
print '404.html'
return render_template('content.html',content_list = lists,title=title,keywords=keywords,des=des,sugg_keywords=sugg_keywords)
@main.route('/post/',methods=['GET', 'POST'])
def post():
url_md5=request.args.get('md5')
wx_urls = WXUrls.objects(md5_str=url_md5)[:1]
if wx_urls.count() == 1:
wx_url=wx_urls[0].wx_url
ReqWebInfo.get_wx_article_info(wx_url)
return render_template('detail.html',)
else:
return render_template('404.html')
|
bschug/neverending-story
|
markov.py
|
Python
|
mit
| 5,169 | 0.001161 |
import argparse
from collections import defaultdict, Counter, deque
import random
import json
import time
from tqdm import tqdm
import wikipedia
class MarkovModel(object):
def __init__(self):
self.states = defaultdict(lambda: Counter())
self.totals = Counter()
def add_sample(self, state, followup):
self.states[state][followup] += 1
self.totals[state] += 1
def generate(self):
result = []
for followup in self.iter_chain():
result.append(followup)
return result
def iter_chain(self, state=tuple()):
while state in self.states:
followup = self.next(state)
state = state[1:] + followup
for token in followup:
yield token
def next(self, state):
r = random.randint(0, self.totals[state] - 1)
for followup, weight in self.states[state].items():
r -= weight
if r < 0:
return followup
raise ValueError("Mismatch of totals / weights for state {}".format(state))
def to_json(self):
converted = {' '.join(state): list(followups.keys()) for state, followups in self.states.items()}
return json.dumps(converted)
def iter_states(tokens, state_size, start_state=tuple(), end_marker=None):
# First transition is from empty state to first token-based state
yield start_state, tuple(tokens[0:state_size])
state = tuple(tokens[0:state_size])
for token in tokens[state_size:]:
# Each additional token means last state to that token
yield state, (token,)
# New state is last {state_size} tokens we yielded
state = state[1:] + (token,)
# End is marked by None
yield state, end_marker
def tokenize_story(story):
story = deque(story)
yield "\n"
while len(story) > 0:
token = eat_one_token(story)
if token is not None:
yield token
def eat_one_token(story):
while len(story) > 0 and isinvalid(story[0]):
story.popleft()
if len(story) == 0:
return None
if isalnum(story[0]):
return eat_word(story)
if ispunctuation(story[0]):
return eat_punctuation(story)
if isnewline(story[0]):
return eat_newline(story)
def isinvalid(char):
return not isalnum(char) and not ispunctuation(char) and not isnewline(char)
def isalnum(char):
return char.isalnum() or char == "'" or char == "’"
def ispunctuation(char):
return char in ",.-!?:&"
def isnewline(char):
return char == '\n'
def eat_word(story):
word = [story.popleft()]
while len(story) > 0 and isalnum(story[0]):
word.append(story.popleft())
return ''.join(word)
def eat_punctuation(story):
token = [story.popleft()]
while len(story) > 0 and ispunctuation(story[0]):
token.append(story.popleft())
return ''.join(token)
def eat_newline(story):
while len(story) > 0 and story[0].isspace():
story.popleft()
return '\n'
def load_story(filenames):
stories = []
for filename in filenames:
with open(filename) as fp:
story = fp.read()
if filename.endswith('.ftxt'):
story = remove_single_newlines(story)
stories.append(story)
return '\n'.join(stories)
def remove_single_newlines(story):
paragraphs = [[]]
for line in story.splitlines():
if len(line.strip()) == 0:
paragraphs.append([])
else:
paragraphs[-1].append(line)
return '\n'.join(' '.join(x for x in p) for p in paragraphs)
def load_wikipedia(num_articles):
lines = []
while num_articles > 0:
chunk = min(10, num_articles)
num_articles -= 10
for article in wikipedia.random(chunk):
try:
page = wikipedia.page(article)
except wikipedia.DisambiguationError as ex:
page = wikipedia.page(ex.args[1][0])
print(article)
lines.extend(x for x in page.content.splitlines() if not x.startswith('==') and len(x) > 0)
return '\n'.join(lines)
def main(args):
model = MarkovModel()
if args.mode == 'txt':
story = load_story(args.txt)
elif args.mode == 'wikipedia':
story = load_wikipedia(100)
else:
raise ValueError("invalid mode {}".format(args.mode))
tokens = list(tqdm(tokenize_story(story), desc="tokenizing"))
for state, followup in tqdm(iter_states(tokens, 3, start_state=tuple('\n'), end_marker=()), desc="building model"):
model.add_sample(state, followup)
print("Saving Model...")
with open("model.json", "w") as fp:
fp.write(model.to_json())
print("Generating Story:")
for token in model.iter_chain(tuple('\n')):
if not ispunctuation(token):
print(" ", end="")
print(token, end="", flush=True)
time.sleep(0.05)
def parse_args():
ap = argparse.ArgumentParser()
ap.add_argument('mode', choices=['txt', 'wikipedia'])
ap.add_argument('--txt', action='append')
return ap.parse_args()
if __name__ == '__main__':
|
ma
|
in(parse_args())
|
pronexo-odoo/odoo-argentina
|
l10n_ar_account_check_debit_note/invoice.py
|
Python
|
agpl-3.0
| 31,158 | 0.008634 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 OpenERP - Team de Localización Argentina.
# https://launchpad.net/~openerp-l10n-ar-localization
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
import decimal_precision as dp
import netsvc
import pooler
from osv import fields, osv, orm
from tools.translate import _
import logging
class account_invoice(osv.osv):
_name = 'account.invoice'
_inherit = 'account.invoice'
_description = 'Account Invoice Debit Note'
_columns = {
'type': fields.selection([
('out_invoice', 'Customer Invoice'),
('in_invoice', 'Supplier Invoice'),
('out_refund', 'Customer Refund'),
('in_refund', 'Supplier Refund'),
('in_debit', 'Supplier Debit Note'), # Added
('out_debit', 'Client Debit Note'), # Added
], 'Type', readonly=True, select=True), # Modified
}
# Modified
def _get_analytic_lines(self, cr, uid, id,context=None):
if context is None:
context = {}
inv = self.browse(cr, uid, id)
cur_obj = self.pool.get('res.currency')
company_currency = inv.company_id.currency_id.id
if inv.type in ('out_invoice', 'in_refund'):
sign = 1
else:
sign = -1
iml = self.pool.get('account.invoice.line').move_line_get(cr, uid, inv.id,context=context)
for il in iml:
if il['account_analytic_id']:
if inv.type in ('in_invoice', 'in_refund', 'in_debit'): # Modified
ref = inv.reference
else:
ref = self._convert_ref(cr, uid, inv.number)
if not inv.journal_id.analytic_journal_id:
raise osv.except_osv(_('No Analytic Journal !'),_("You have to define an analytic journal on the '%s' journal!") % (inv.journal_id.name,))
il['analytic_lines'] = [(0,0, {
'name': il['name'],
'date': inv['date_invoice'],
'account_id': il['account_analytic_id'],
'unit_amount': il['quantity'],
'amount': cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, il['price'], context={'date': inv.date_invoice}) * sign,
'product_id': il['product_id'],
'product_uom_id': il['uos_id'],
'general_account_id': il['account_id'],
'journal_id': inv.journal_id.analytic_journal_id.id,
'ref': ref,
})]
return iml
# Modified
def _get_journal(self, cr, uid, context=None):
if context is None:
context = {}
type_inv = context.get('type', 'out_invoice')
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = context.get('company_id', user.company_id.id)
type2journal = {'out_invoice': 'sale', 'out_debit': 'sale', 'in_invoice': 'purchase', 'in_debit': 'purchase', 'out_refund': 'sale_refund', 'in_refund': 'purchase_refund'} # Modified
refund_journal = {'out_invoice': False, 'out_debit': False, 'in_invoice': False, 'in_debit': False, 'out_refund': True, 'in_refund': True} # Modified
journal_obj = self.pool.get('account.journal')
res = journal_obj.search(cr, uid, [('type', '=', type2journal.get(type_inv, 'sale')),
('company_id', '=', company_id)],
# ('refund_journal', '=', refund_journal.get(type_inv, False))],
limit=1)
return res and res[0] or False # Modified
# Modified
def _get_journal_analytic(self, cr, uid, type_inv, context=None):
type2journal = {'out_invoice': 'sale', 'out_debit': 'sale', 'in_invoice': 'purchase', 'in_debit': 'purchase', 'out_refund': 'sale', 'in_refund': 'purchase'} # Modified
tt = type2journal.get(type_inv, 'sale')
result = self.pool.get('account.analytic.journal').search(cr, uid, [('type','=',tt)], context=context)
if not result:
raise osv.except_osv(_('No Analytic Journal !'),_("You must define an analytic journal of type '%s' !") % (tt,))
return result and result[0] or False # Modified
# Modified
def onchange_partner_id(self, cr, uid, ids, type, partner_id,\
date_invoice=False, payment_term=False,
|
partner_bank_id=False, company_id=False):
invoice_addr_id = False
contact_addr_id = False
partner_payment_term = False
acc_id = False
bank_id = False
fiscal_position = False
opt = [('uid', str(uid))]
if partner_id:
opt.insert(0, ('id', partner_id))
res = self.pool.get('res.partner').address_get(cr, uid, [partner_id], ['contact', 'invoice'])
contact_addr_id = res['cont
|
act']
invoice_addr_id = res['invoice']
p = self.pool.get('res.partner').browse(cr, uid, partner_id)
if company_id:
if not p.property_account_receivable or not p.property_account_payable:
raise osv.except_osv(_('Error!'),
_('You need define you account plan to your company'))
if p.property_account_receivable.company_id.id != company_id and p.property_account_payable.company_id.id != company_id:
property_obj = self.pool.get('ir.property')
rec_pro_id = property_obj.search(cr,uid,[('name','=','property_account_receivable'),('res_id','=','res.partner,'+str(partner_id)+''),('company_id','=',company_id)])
pay_pro_id = property_obj.search(cr,uid,[('name','=','property_account_payable'),('res_id','=','res.partner,'+str(partner_id)+''),('company_id','=',company_id)])
if not rec_pro_id:
rec_pro_id = property_obj.search(cr,uid,[('name','=','property_account_receivable'),('company_id','=',company_id)])
if not pay_pro_id:
pay_pro_id = property_obj.search(cr,uid,[('name','=','property_account_payable'),('company_id','=',company_id)])
rec_line_data = property_obj.read(cr,uid,rec_pro_id,['name','value_reference','res_id'])
pay_line_data = property_obj.read(cr,uid,pay_pro_id,['name','value_reference','res_id'])
rec_res_id = rec_line_data and rec_line_data[0].get('value_reference',False) and int(rec_line_data[0]['value_reference'].split(',')[1]) or False
pay_res_id = pay_line_data and pay_line_data[0].get('value_reference',False) and int(pay_line_data[0]['value_reference'].split(',')[1]) or False
if not rec_res_id and not pay_res_id:
raise osv.except_osv(_('Configuration Error !'),
_('Can not find account chart for this company, Please Create account.'))
account_obj = self.pool.get('account.account')
rec_obj_acc = account_obj.browse(cr, uid, [rec_res_id])
pay_obj_acc = account_obj.browse(cr, uid, [pay_res_id])
p.property_account_receivable = rec_obj_acc[0]
p.property_account_payable = pay_obj_acc[0]
|
endlessm/chromium-browser
|
third_party/swiftshader/third_party/llvm-7.0/llvm/utils/llvm-build/llvmbuild/main.py
|
Python
|
bsd-3-clause
| 34,146 | 0.002577 |
from __future__ import absolute_import
import filecmp
import os
import sys
import llvmbuild.componentinfo as componentinfo
from llvmbuild.util import fatal, note
###
def cmake_quote_string(value):
"""
cmake_quote_string(value) -> str
Return a quoted form of the given value that is suitable for use in CMake
language files.
"""
# Currently, we only handle escaping backslashes.
value = value.replace("\\", "\\\\")
return value
def cmake_quote_path(value):
"""
cmake_quote_path(value) -> str
Return a quoted form of the given value that is suitable for use in CMake
language files.
"""
# CMake has a bug in it's Makefile generator that doesn't properly quote
# strings it generates. So instead of using proper quoting, we just use "/"
# style paths. Currently, we only handle escaping backslashes.
value = value.replace("\\", "/")
return value
def make_install_dir(path):
"""
make_install_dir(path) -> None
Create the given directory path for installation, including any parents.
"""
# os.makedirs considers it an error to be called with an existent path.
if not os.path.exists(path):
os.makedirs(path)
###
class LLVMProjectInfo(object):
@staticmethod
def load_infos_from_path(llvmbuild_source_root):
def recurse(subpath):
# Load the LLVMBuild file.
llvmbuild_path = os.path.join(llvmbuild_source_root + subpath,
'LLVMBuild.txt')
if not os.path.exists(llvmbuild_path):
fatal("missing LLVMBuild.txt file at: %r" % (llvmbuild_path,))
# Parse the components from it.
common,info_iter = componentinfo.load_from_path(llvmbuild_path,
subpath)
for info in info_iter:
yield info
# Recurse into the specified subdirectories.
for subdir in common.get_list("subdirectories"):
for item in recurse(os.path.join(subpath, subdir)):
yield item
return recurse("/")
@staticmethod
def load_from_path(source_root, llvmbuild_source_root):
infos = list(
LLVMProjectInfo.load_infos_from_path(llvmbuild_source_root))
return LLVMProjectInfo(source_root, infos)
def __init__(self, source_root, component_infos):
# Store our simple ivars.
self.source_root = source_root
self.component_infos = list(component_infos)
self.component_info_map = None
self.ordered_component_infos = None
def validate_components(self):
"""validate_components() -> None
Validate that the project components are well-defined. Among other
things, this checks that:
- Components have valid references.
- Components references do not form cycles.
We also construct the map from component names to info, and the
topological ordering of components.
"""
# Create the component info map and validate that component names are
# unique.
self.component_info_map = {}
for ci in self.component_infos:
existing = self.component_info_map.get(ci.name)
if existing is not None:
# We found a duplicate component name, report it and error out.
fatal("found duplicate component %r (at %r and %r)" % (
ci.name, ci.subpath, existing.subpath))
self.component_info_map[ci.name] = ci
# Disallow 'all' as a component name, which is a special case.
if 'all' in self.component_info_map:
fatal("project is not allowed to define 'all' component")
# Add the root component.
if '$ROOT' in self.component_info_map:
fatal("project is not allowed to define $ROOT component")
self.component_info_map['$ROOT'] = componentinfo.GroupComponentInfo(
'/', '$ROOT', None)
self.component_infos.append(self.component_info_map['$ROOT'])
# Topologically order the component information according to their
# component references.
def visit_component_info(ci, current_stack, current_set):
# Check for a cycles.
if ci in current_set:
# We found a cycle, report it and error out.
cycle_description = ' -> '.join(
'%r (%s)' % (ci.name, relation)
for relation,ci in current_stack)
fatal("found cycle to %r after following: %s -> %s" % (
ci.name, cycle_description, ci.name))
# If we have already visited this item, we are done.
if ci not in components_to_visit:
return
# Otherwise, mark the component info as visited and traverse.
components_to_visit.remove(ci)
# Validate the parent reference, which we treat specially.
if ci.parent is not None:
parent = self.component_info_map.get(ci.parent)
if parent is None:
fatal("component %r has invalid reference %r (via %r)" % (
ci.name, ci.parent, 'parent'))
ci.set_parent_instance(parent)
for relation,referent_name in ci.get_component_references():
# Validate that the reference is ok.
referent = self.component_info_map.get(referent_name)
if referent is None:
fatal(
|
"component %r has invalid reference %r (via %r)" % (
ci.name, referent_name, relation))
# Visit the reference.
|
current_stack.append((relation,ci))
current_set.add(ci)
visit_component_info(referent, current_stack, current_set)
current_set.remove(ci)
current_stack.pop()
# Finally, add the component info to the ordered list.
self.ordered_component_infos.append(ci)
# FIXME: We aren't actually correctly checking for cycles along the
# parent edges. Haven't decided how I want to handle this -- I thought
# about only checking cycles by relation type. If we do that, it falls
# out easily. If we don't, we should special case the check.
self.ordered_component_infos = []
components_to_visit = sorted(
set(self.component_infos),
key = lambda c: c.name)
while components_to_visit:
visit_component_info(components_to_visit[0], [], set())
# Canonicalize children lists.
for c in self.ordered_component_infos:
c.children.sort(key = lambda c: c.name)
def print_tree(self):
def visit(node, depth = 0):
print('%s%-40s (%s)' % (' '*depth, node.name, node.type_name))
for c in node.children:
visit(c, depth + 1)
visit(self.component_info_map['$ROOT'])
def write_components(self, output_path):
# Organize all the components by the directory their LLVMBuild file
# should go in.
info_basedir = {}
for ci in self.component_infos:
# Ignore the $ROOT component.
if ci.parent is None:
continue
info_basedir[ci.subpath] = info_basedir.get(ci.subpath, []) + [ci]
# Compute the list of subdirectories to scan.
subpath_subdirs = {}
for ci in self.component_infos:
# Ignore root components.
if ci.subpath == '/':
continue
# Otherwise, append this subpath to the parent list.
parent_path = os.path.dirname(ci.subpath)
subpath_subdirs[parent_path] = parent_list = subpath_subdirs.get(
parent_path, set())
parent_list.add(os.path.basename(ci.subpath))
# Generate the build files.
for subpath, infos in info_basedir.items():
# Order the components by name to have a canonical ordering.
info
|
hlmnrmr/liveblog
|
server/liveblog/themes/template/loaders.py
|
Python
|
agpl-3.0
| 3,378 | 0.001184 |
import os
import logging
from superdesk import get_resource_service
from jinja2.loaders import FileSystemLoader, ModuleLoader, ChoiceLoader, DictLoader, PrefixLoader
from liveblog.mongo_util import decode as mongodecode
__all__ = ['ThemeTemplateLoader', 'CompiledThemeTemplateLoader']
logger = logging.getLogger('superdesk')
class ThemeTemplateLoader(FileSystemLoader):
"""
Theme template loader for jinja2 SEO themes.
"""
def __init__(self, theme, encoding='utf-8', followlinks=False):
theme_name = theme['name']
themes = get_resource_service('themes')
theme_dirname = themes.get_theme_path(theme_name)
self.searchpath = [os.path.join(theme_dirname, 'templates')]
parent_theme = theme.get('extends')
if parent_theme:
parent_dirname = themes.get_theme_path(parent_theme)
self.searchpath.append(os.path.join(parent_dirname, 'templates'))
self.encoding = encoding
self.followlinks = followlinks
class CompiledThemeTemplateLoader(ChoiceLoader):
def __init__(self, theme):
"""
A Mixed logic template loader module. It will use Compiled theme template
for current theme and will also use FileSystemLoader like in order to enable
inheritance
"""
self.loaders = []
theme_name = theme['name']
themes = get_resource_service('themes')
parent_theme = theme.get('extends')
files = theme.get('files', {'templates': {}})
if files.get('templates'):
self.addDict
|
onary(theme)
if parent_theme:
parent = themes.find_one(req=None, name=parent_theme)
self.addDictonary(parent)
else:
compiled = themes.get_theme_
|
compiled_templates_path(theme_name)
self.loaders.append(ModuleLoader(compiled))
if parent_theme:
parent_compiled = themes.get_theme_compiled_templates_path(parent_theme)
self.loaders.append(ModuleLoader(parent_compiled))
# let's now add the parent theme prefix loader
if parent_theme:
prefix_loader = self._parent_prefix_loader(parent_theme)
self.loaders.append(prefix_loader)
def _parent_prefix_loader(self, name):
"""
Creates a PrefixLoader in order to be able to extends parent theme
templates using as prefix the parent theme name
Example:
{% extends 'parent_theme_name/template_name.html' %}
{% include 'parent_theme_name/template_name.html' %}
Args:
name (`str`): Parent theme name
Returns:
PrefixLoader instance with parent_name as prefix
"""
themes = get_resource_service('themes')
parent_dirname = themes.get_theme_path(name)
search_paths = [os.path.join(parent_dirname, 'templates')]
return PrefixLoader({name: FileSystemLoader(search_paths)})
def addDictonary(self, theme):
"""
Add template files as dictionary in the loaders.
"""
files = theme.get('files', {'templates': {}})
if files.get('templates'):
compiled = {}
for file, content in files.get('templates').items():
compiled[mongodecode(file)] = content
self.loaders.append(DictLoader(compiled))
|
OCA/partner-contact
|
base_location/models/res_partner.py
|
Python
|
agpl-3.0
| 5,851 | 0.001196 |
# Copyright 2016 Nicolas Bessi, Camptocamp SA
# Copyright 2018 Tecnativa - Pedro M. Baeza
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from lxml import etree
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
class ResPartner(models.Model):
_inherit = "res.partner"
zip_id = fields.Many2one(
comodel_name="res.city.zip",
string="ZIP Location",
index=True,
compute="
|
_compute_zip_id",
readonly=False,
store=True,
)
city_id = fields.Many2one(
index=True, # add index for performance
compute="_compute_city_id",
readonly=False,
store=True,
)
city = fields.Char(compute="_compute_city", readonly=False, store=True)
zip = fields.Char(compute="_compute_zip", readonly=False, store=True)
country_id = fields.Many2one(
compute="_compute_country_id", readonly=False, store=True
)
|
state_id = fields.Many2one(compute="_compute_state_id", readonly=False, store=True)
@api.depends("state_id", "country_id", "city_id", "zip")
def _compute_zip_id(self):
"""Empty the zip auto-completion field if data mismatch when on UI."""
for record in self.filtered("zip_id"):
fields_map = {
"zip": "name",
"city_id": "city_id",
"state_id": "state_id",
"country_id": "country_id",
}
for rec_field, zip_field in fields_map.items():
if (
record[rec_field]
and record[rec_field] != record._origin[rec_field]
and record[rec_field] != record.zip_id[zip_field]
):
record.zip_id = False
break
@api.depends("zip_id")
def _compute_city_id(self):
if hasattr(super(), "_compute_city_id"):
super()._compute_city_id() # pragma: no cover
for record in self:
if record.zip_id:
record.city_id = record.zip_id.city_id
elif not record.country_enforce_cities:
record.city_id = False
@api.depends("zip_id")
def _compute_city(self):
if hasattr(super(), "_compute_city"):
super()._compute_city() # pragma: no cover
for record in self:
if record.zip_id:
record.city = record.zip_id.city_id.name
@api.depends("zip_id")
def _compute_zip(self):
if hasattr(super(), "_compute_zip"):
super()._compute_zip() # pragma: no cover
for record in self:
if record.zip_id:
record.zip = record.zip_id.name
@api.depends("zip_id", "state_id")
def _compute_country_id(self):
if hasattr(super(), "_compute_country_id"):
super()._compute_country_id() # pragma: no cover
for record in self:
if record.zip_id.city_id.country_id:
record.country_id = record.zip_id.city_id.country_id
elif record.state_id:
record.country_id = record.state_id.country_id
@api.depends("zip_id")
def _compute_state_id(self):
if hasattr(super(), "_compute_state_id"):
super()._compute_state_id() # pragma: no cover
for record in self:
state = record.zip_id.city_id.state_id
if state and record.state_id != state:
record.state_id = record.zip_id.city_id.state_id
@api.constrains("zip_id", "country_id", "city_id", "state_id", "zip")
def _check_zip(self):
if self.env.context.get("skip_check_zip"):
return
for rec in self:
if not rec.zip_id:
continue
if rec.zip_id.city_id.country_id != rec.country_id:
raise ValidationError(
_("The country of the partner %s differs from that in location %s")
% (rec.name, rec.zip_id.name)
)
if rec.zip_id.city_id.state_id != rec.state_id:
raise ValidationError(
_("The state of the partner %s differs from that in location %s")
% (rec.name, rec.zip_id.name)
)
if rec.zip_id.city_id != rec.city_id:
raise ValidationError(
_("The city of partner %s differs from that in location %s")
% (rec.name, rec.zip_id.name)
)
if rec.zip_id.name != rec.zip:
raise ValidationError(
_("The zip of the partner %s differs from that in location %s")
% (rec.name, rec.zip_id.name)
)
def _zip_id_domain(self):
return """
[
("city_id", "=?", city_id),
("city_id.country_id", "=?", country_id),
("city_id.state_id", "=?", state_id),
]
"""
@api.model
def _fields_view_get_address(self, arch):
# We want to use a domain that requires city_id to be on the view
# but we can't add it directly there, otherwise _fields_view_get_address
# in base_address_city won't do its magic, as it immediately returns
# if city_id is already in there. On the other hand, if city_id is not in the
# views, odoo won't let us use it in zip_id's domain.
# For this reason we need to set the domain here.
arch = super()._fields_view_get_address(arch)
doc = etree.fromstring(arch)
for node in doc.xpath("//field[@name='zip_id']"):
node.attrib["domain"] = self._zip_id_domain()
return etree.tostring(doc, encoding="unicode")
@api.model
def _address_fields(self):
return super()._address_fields() + ["zip_id"]
|
ZeroCater/Eyrie
|
interface/models.py
|
Python
|
mit
| 4,056 | 0.00074 |
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.postgres.search import SearchVector, SearchQuery, SearchRank
from django.core.urlresolvers import reverse
from django.db import models
from github import UnknownObjectException
from social.apps.django_app.default.models import UserSocialAuth
from documents.tasks.wiki_processor import process_wiki
from interface.utils import get_github
from interface.path_processor import PathProcessor
class UserProxy(User):
class Meta:
proxy = True
def get_auth(self):
try:
data = UserSocialAuth.objects.filter(user=self).values_list('extra_data')[0][0]
except:
return None
username = data['login']
password = data['access_token']
return (username, password)
class Repo(models.Model):
user = models.ForeignKey(UserProxy, related_name='repos')
full_name = models.TextField(unique=True)
webhook_id = models.IntegerField(null=True, blank=True)
is_private = models.BooleanField(default=True)
wiki_branch = models.TextField(default='master')
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['full_name']
def __str__(self):
return self.full_name
def get_absolute_url(self):
return reverse('repo_detail', kwargs={'full_name': self.full_name})
@property
def clone_url(self):
return 'https://github.com/{}.git'.format(self.full_name)
def delete(self, *args, **kwargs):
self.remove_webhook()
return super(Repo, self).delete(*args, **kwargs)
def remove_webhook(self):
if not settings.DEBUG:
g = get_github(self.user)
grepo = g.get_repo(self.full_name)
try:
hook = grepo.get_hook(self.webhook_id)
hook.delete()
except UnknownObjectException:
pass
self.webhook_id = None
self.save()
def user_is_collaborator(self, user):
if not user.is_authenticated():
return False
if self.user == user or user.is_staff:
return True
g = get_github(user)
grepo = g.get_repo(self.full_name)
guser = g.get_user(user.username)
return grepo.has_in_collaborators(guser)
def add_webhook(self, request):
if settings.DEBUG:
self.webhook_id = 123
else:
g = get_github(self.user)
grepo = g.get_repo(self.full_name)
hook = grepo.create_hook(
'web',
{
'content_type': 'json',
'url': request.build_absolute_uri(reverse('hooksgithub')),
'secret': settings.WEBHOOK_SECRET
},
events=['push'],
active=True
)
self.webhook_id = hook.id
self.save()
@property
def directory(self):
path_processor = PathProcessor(self.full_name, is_directory=True)
return path_processor.repo_disk_path
def enqueue(self, file_change=None):
file_change = file_change or {}
process_wiki.delay(self.id, file_change)
def get_folder_contents(self, path, documents):
folders = []
docs = []
for document in documents:
doc_path = document.path
if path != '/':
doc_path = doc_path.replace(path, '')
if not doc_path.startswith('/'):
doc_path = '/{}'.format(doc_path)
if doc_path == '/':
|
docs.append(document.filename)
else:
first_seg = doc_path.split('/', maxsplit=2)[1]
if first_seg:
folder_name = '{}/'.format(first_seg)
if folder_name not in folders:
folders.append(folder_name)
folders = sorted(folders)
|
docs = sorted(docs)
folders.extend(docs)
return folders
|
deepmind/brave
|
brave/training/trainer.py
|
Python
|
apache-2.0
| 3,771 | 0.003447 |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The functions for computing gradient updates."""
from typing import Callable, NamedTuple, Sequence
import chex
import haiku as hk
import jax
import optax
from brave.datasets import datasets
from brave.models import embedding_model
class ModelUp
|
dates(NamedTuple):
params: hk.Params
sta
|
te: hk.State
opt_state: optax.OptState
scalars: embedding_model.Scalars
UpdateFn = Callable[
[chex.PRNGKey, datasets.MiniBatch, hk.Params, hk.State, optax.OptState],
ModelUpdates]
def build_update_fn(optimizer: optax.GradientTransformation,
loss_fn: embedding_model.LossFn) -> UpdateFn:
"""Returns a function for computing model updates.
Args:
optimizer: The optimizer to use e.g. the result of optax.sgd(...).
loss_fn: An instance of the loss function, pmapped across all devices.
Returns:
A callable function that takes one step in the optimization problem using
the gradients of the loss computed by the model loss function.
"""
def update_fn(rng: chex.PRNGKey, minibatch: datasets.MiniBatch,
params: hk.Params, state: hk.State,
opt_state: optax.OptState) -> ModelUpdates:
grad_fn = jax.grad(loss_fn, has_aux=True)
grad, (state, scalars) = grad_fn(params, state, rng, minibatch)
grad = jax.lax.pmean(grad, axis_name='i')
scalars = jax.lax.pmean(scalars, axis_name='i')
updates, opt_state = optimizer.update(grad, opt_state, params)
params = optax.apply_updates(params, updates)
return ModelUpdates(params, state, opt_state, scalars)
return update_fn
def get_batch_dims(global_batch_size: int, device_count: int,
local_device_count: int) -> Sequence[int]:
"""Compute the batch dims for this host.
The global_batch_size is the number of data samples that are optimized over
in one step of the optimization. This value must be split up so that each
individual device gets some share of the batch.
When running with multiple devices, there may be multiple hosts, each
with multiple local devices. Each host has a local copy of the program, and
runs a local copy of the code. Each host must therefore use a batch size
so that when all of the hosts run together, the total number of batched
elements matches the global batch size. We do this by splitting up the global
batch size evenly amongst all devices, and setting the batch size per host
to the number of host devices times the device batch size.
Args:
global_batch_size: The target total batch size per optimization step.
device_count: The total number of devices sharing computation per step.
local_device_count: The number of devices available on the current host.
Returns:
The batch dimensions to use on the currently running host.
"""
per_device_batch_size, remainder = divmod(global_batch_size, device_count)
if remainder:
raise ValueError(
f'Cannot split batch of {global_batch_size} evenly across {local_device_count} devices.'
)
host_batch_dims = (local_device_count, per_device_batch_size)
return host_batch_dims
|
anirudhvenkats/clowdflows
|
workflows/management/commands/__init__.py
|
Python
|
gpl-3.0
| 23 | 0 |
_
|
_author__
|
= 'matjaz'
|
c3nav/c3nav
|
src/c3nav/site/migrations/0001_announcement.py
|
Python
|
apache-2.0
| 1,128 | 0.004433 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-07 22:51
from __future__ import unicode_literals
import c3nav.mapdata.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Announcement',
fields=[
('id', mo
|
dels.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='created')),
('active_until', models.DateTimeField(null=True, verbose_name='active until')),
('active', models.BooleanField(default=True, verbose_name='active')),
('message', c3nav.mapdata.fields.I18nField(verbose
|
_name='Message')),
],
options={
'verbose_name': 'Announcement',
'verbose_name_plural': 'Announcements',
'get_latest_by': 'created',
'default_related_name': 'announcements',
},
),
]
|
calpeyser/google-cloud-python
|
vision/google/cloud/vision_v1/types.py
|
Python
|
apache-2.0
| 1,284 | 0 |
# Copyright 2017, Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under th
|
e License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
|
__future__ import absolute_import
import sys
from google.cloud.proto.vision.v1 import geometry_pb2
from google.cloud.proto.vision.v1 import image_annotator_pb2
from google.cloud.proto.vision.v1 import text_annotation_pb2
from google.cloud.proto.vision.v1 import web_detection_pb2
from google.gax.utils.messages import get_messages
names = []
for module in (geometry_pb2, image_annotator_pb2,
text_annotation_pb2, web_detection_pb2):
for name, message in get_messages(module).items():
message.__module__ = 'google.cloud.vision_v1.types'
setattr(sys.modules[__name__], name, message)
names.append(name)
__all__ = tuple(sorted(names))
|
phodal-archive/scrapy-elasticsearch-demo
|
dianping/dianping/spiders/rotateAgent.py
|
Python
|
mit
| 2,814 | 0.013859 |
#!/usr/local/bin/python
# -*-coding:utf8-*-
from scrapy.contrib.downloadermiddleware.useragent import UserAgentMiddleware
import random
class RotateUserAgentMiddleware(UserAgentMiddleware):
def __init__(self, user_agent=''):
self.user_agent = user_agent
def process_request(self, request, spider):
ua = random.choice(self.user_agent_list)
if ua:
request.headers.setdefault('User-Agent', ua)
|
# print '********user-agent:',ua
# the default user_agent_list composes chrome,I E,firefox,Mozilla,opera,netscape
#for more user agent strings,you can find it in http://www.useragentstring.com/pages/useragentstring.php
user_agent_list = [ \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1" \
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11", \
|
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6", \
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1", \
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5", \
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3", \
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", \
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
|
agmscode/agms_python
|
agms/__init__.py
|
Python
|
mit
| 305 | 0 |
from __future__ import absolute_import
f
|
rom agms.configuration import Configuration
from agms.agms import Agms
from agms.transaction import Transaction
from agms.safe import SAFE
from agms.report import Report
from agms.recurring import Recurring
from agms.hpp import HPP
from agms.version import V
|
ersion
|
openqt/algorithms
|
projecteuler/pe303-multiples-with-small-digits.py
|
Python
|
gpl-3.0
| 418 | 0.014423 |
#!/u
|
sr/bin/env python
# coding=utf-8
"""303. Multiples with small digits
https://projecteuler.net/problem=303
For a positive integer n, define f(n) as the least positive multiple of n
that, written in base 10, uses only digits ≤ 2.
Thus f(2)=2, f(3)=12, f(7)=21, f(42)=210, f(89)=1121222.
Also, $\sum \limits_{n = 1}^{100} {\dfrac{f(n)}{n}} = 11363107$.
Find $\sum \limits_{n=1}^{10000} {\dfrac{f(n
|
)}{n}}$.
"""
|
mtils/ems
|
ems/support/bootstrappers/validation.py
|
Python
|
mit
| 1,933 | 0.002587 |
import json
import os.path
from ems.app import Bootstrapper, absolute_path
from ems.inspection.util import classes
from ems.validation.abstract import Validator, MessageProvider
from ems.validation.registry import Registry
from ems.validation.rule_validator import RuleValidator, SimpleMessageProvider
from ems.validation.validators.base import *
from ems.validation.validators.filesystem import *
class AppPathNormalizer(PathNormalizer):
def normalize(self, path):
return absolute_path(path)
class ValidationBootstrapper(Bootstrapper):
validatorModules = set([
'ems.validation.validators.base',
'ems.validation.validators.filesystem',
])
messagesFile = os.path.join('resources','lan
|
g','de','validation.json')
def bootstrap(self, app):
self.app = app
app.share(Registry, self.createRegistry)
app.share(MessageProvider, self.createMessageProvider)
app.share(PathNormalizer, self.createPathNormalizer)
def createRegistry(self):
regi
|
stry = Registry(self.app)
self.addValidatorClasses(registry)
return registry
def createPathNormalizer(self):
return AppPathNormalizer()
def addValidatorClasses(self, registry):
for module in self.validatorModules:
for cls in self.findModuleValidatorClasses(module):
registry += cls
def createMessageProvider(self):
with open(self.messagesFilePath()) as jsonFile:
messages = json.load(jsonFile)
return SimpleMessageProvider(messages)
def messagesFilePath(self):
return os.path.join(self.app.appPath, self.messagesFile)
@classmethod
def findModuleValidatorClasses(cls, moduleName):
validatorClasses = []
for clazz in classes(moduleName):
if issubclass(clazz, Validator):
validatorClasses.append(clazz)
return validatorClasses
|
valeriocos/perceval
|
perceval/backends/core/twitter.py
|
Python
|
gpl-3.0
| 14,309 | 0.002238 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2019 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later vers
|
ion.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA.
#
# Authors:
# Valerio Cosentino <valcos@bitergia.com>
#
import json
import logging
from grimoirelab_toolkit.datetime import datetime_utcnow, str_to_datetime
from ...backend import (Backend,
BackendCommand,
BackendCommandArgumentParser)
from ...client import HttpClient, RateLimitHandler
from ...errors import BackendError
CATEGORY_TWEET = "tweet"
MAX_SEARCH_QUERY = 500
TWITTER_URL = 'https://twitter.com/'
TWITTER_API_URL = 'https://api.twitter.com/1.1/search/tweets.json'
MAX_ITEMS = 100
# Range before sleeping until rate limit reset
MIN_RATE_LIMIT = 1
# Time to avoid too many request exception
SLEEP_TIME = 30
TWEET_TYPE_MIXED = "mixed"
TWEET_TYPE_RECENT = "recent"
TWEET_TYPE_POPULAR = "popular"
RATE_LIMIT_HEADER = "x-rate-limit-remaining"
RATE_LIMIT_RESET_HEADER = "x-rate-limit-reset"
logger = logging.getLogger(__name__)
class Twitter(Backend):
"""Twitter backend.
This class allows to fetch samples of tweets containing specific
keywords. Initialize this class passing API key needed
for authentication with the parameter `api_key`.
:param query: query to fetch tweets
:param api_token: token or key needed to use the API
:param max_items: maximum number of issues requested on the same query
:param sleep_for_rate: sleep until rate limit is reset
:param min_rate_to_sleep: minimun rate needed to sleep until
it will be reset
:param sleep_time: minimun waiting time to avoid too many request
exception
:param tag: label used to mark the data
:param archive: archive to store/retrieve items
"""
version = '0.2.2'
CATEGORIES = [CATEGORY_TWEET]
def __init__(self, query, api_token, max_items=MAX_ITEMS,
sleep_for_rate=False, min_rate_to_sleep=MIN_RATE_LIMIT,
sleep_time=SLEEP_TIME,
tag=None, archive=None):
origin = TWITTER_URL
if len(query) >= MAX_SEARCH_QUERY:
msg = "Search query length exceeded %s, max is %s" % (len(query), MAX_SEARCH_QUERY)
raise BackendError(cause=msg)
super().__init__(origin, tag=tag, archive=archive)
self.query = query
self.api_token = api_token
self.max_items = max_items
self.sleep_for_rate = sleep_for_rate
self.min_rate_to_sleep = min_rate_to_sleep
self.sleep_time = sleep_time
self.client = None
def fetch(self, category=CATEGORY_TWEET, since_id=None, max_id=None,
geocode=None, lang=None,
include_entities=True, tweets_type=TWEET_TYPE_MIXED):
"""Fetch the tweets from the server.
This method fetches tweets from the TwitterSearch API published in the last seven days.
:param category: the category of items to fetch
:param since_id: if not null, it returns results with an ID greater than the specified ID
:param max_id: when it is set or if not None, it returns results with an ID less than the specified ID
:param geocode: if enabled, returns tweets by users located at latitude,longitude,"mi"|"km"
:param lang: if enabled, restricts tweets to the given language, given by an ISO 639-1 code
:param include_entities: if disabled, it excludes entities node
:param tweets_type: type of tweets returned. Default is “mixed”, others are "recent" and "popular"
:returns: a generator of tweets
"""
kwargs = {"since_id": since_id,
"max_id": max_id,
"geocode": geocode,
"lang": lang,
"include_entities": include_entities,
"result_type": tweets_type}
items = super().fetch(category, **kwargs)
return items
def fetch_items(self, category, **kwargs):
"""Fetch the tweets
:param category: the category of items to fetch
:param kwargs: backend arguments
:returns: a generator of items
"""
since_id = kwargs['since_id']
max_id = kwargs['max_id']
geocode = kwargs['geocode']
lang = kwargs['lang']
entities = kwargs['include_entities']
tweets_type = kwargs['result_type']
logger.info("Fetching tweets %s from %s to %s",
self.query, str(since_id),
str(max_id) if max_id else '--')
tweets_ids = []
min_date = None
max_date = None
group_tweets = self.client.tweets(self.query, since_id=since_id, max_id=max_id, geocode=geocode,
lang=lang, include_entities=entities, result_type=tweets_type)
for tweets in group_tweets:
for i in range(len(tweets)):
tweet = tweets[i]
tweets_ids.append(tweet['id'])
if tweets[-1] == tweet:
min_date = str_to_datetime(tweets[-1]['created_at'])
if tweets[0] == tweet and not max_date:
max_date = str_to_datetime(tweets[0]['created_at'])
yield tweet
logger.info("Fetch process completed: %s (unique %s) tweets fetched, from %s to %s",
len(tweets_ids), len(list(set(tweets_ids))), min_date, max_date)
@classmethod
def has_archiving(cls):
"""Returns whether it supports archiving items on the fetch process.
:returns: this backend supports items archive
"""
return True
@classmethod
def has_resuming(cls):
"""Returns whether it supports to resume the fetch process.
:returns: this backend supports items resuming
"""
return False
@staticmethod
def metadata_id(item):
"""Extracts the identifier from a Twitter item."""
return str(item['id_str'])
@staticmethod
def metadata_updated_on(item):
"""Extracts and coverts the update time from a Twitter item.
The timestamp is extracted from 'created_at' field and converted
to a UNIX timestamp.
:param item: item generated by the backend
:returns: a UNIX timestamp
"""
ts = item['created_at']
ts = str_to_datetime(ts)
return ts.timestamp()
@staticmethod
def metadata_category(item):
"""Extracts the category from a Twitter item.
This backend only generates one type of item which is
'tweet'.
"""
return CATEGORY_TWEET
def _init_client(self, from_archive=False):
"""Init client"""
return TwitterClient(self.api_token, self.max_items,
self.sleep_for_rate, self.min_rate_to_sleep, self.sleep_time,
self.archive, from_archive)
class TwitterClient(HttpClient, RateLimitHandler):
"""Twitter API client.
Client for fetching information from the Twitter server
using its REST API v1.1.
:param api_key: key needed to use the API
:param max_items: maximum number of items per request
:param sleep_for_rate: sleep until rate limit is reset
:param min_rate_to_sleep: minimun rate needed to sleep until
it will be reset
:param sleep_time: time to sleep in case
of connection problems
:param archive: an archive to store/read fetched data
:param from_archive: it tells whether to write/read the archive
"""
d
|
Artemkaaas/indy-sdk
|
vcx/wrappers/python3/generate_docs.py
|
Python
|
apache-2.0
| 952 | 0.003151 |
import os
import pydoc
import sys
class DocTree:
def __init__(self, src, dest):
self.basepath = os
|
.getcwd()
sys.path.append(os.path.join(self.basepath, src))
self.src = src
self.dest = dest
self._make_dest(dest)
self._make_docs(src)
self._move_docs(dest)
def _make_dest(self, dest):
path = os.path.join(self.basepath, dest)
if os.path.isdir(path):
os.rmdir(path)
os.makedirs(path)
def _make_docs(self, src):
print('making htm
|
ls for ' + src)
pydoc.writedocs(src)
print(os.listdir())
def _move_docs(self, dest):
for f in os.listdir():
if f.endswith('.html'):
_dest = os.path.join(dest, f)
os.rename(f, _dest)
def main():
dest = 'docs'
src = 'vcx/api'
src = os.path.join(os.getcwd(), src)
DocTree(src, dest)
if __name__ == '__main__':
main()
|
igor-toga/local-snat
|
neutron/tests/unit/agent/linux/test_iptables_firewall.py
|
Python
|
apache-2.0
| 86,232 | 0.000116 |
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from neutron_lib import constants
from oslo_config import cfg
import six
import testtools
from neutron.agent.common import config as a_cfg
from neutron.agent import firewall
from neutron.agent.linux import ipset_manager
from neutron.agent.linux import iptables_comments as ic
from neutron.agent.linux import iptables_firewall
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron.conf.agent import securitygroups_rpc as security_config
from neutron.tests import base
from neutron.tests.unit.api.v2 import test_base
_uuid = test_base._uuid
#TODO(mangelajo): replace all 'IPv4', 'IPv6' to constants
FAKE_PREFIX = {'IPv4': '10.0.0.0/24',
'IPv6': 'fe80::/48'}
FAKE_IP = {'IPv4': '10.0.0.1',
'IPv6': 'fe80::1'}
#TODO(mangelajo): replace all '*_sgid' strings for the constants
FAKE_SGID = 'fake_sgid'
OTHER_SGID = 'other_sgid'
_IPv6 = constants.IPv6
_IPv4 = constants.IPv4
RAW_TABLE_OUTPUT = """
# Generated by iptables-save v1.4.21 on Fri Jul 31 16:13:28 2015
*raw
:PREROUTING ACCEPT [11561:3470468]
:OUTPUT ACCEPT [11504:4064044]
:neutron-openvswi-OUTPUT - [0:0]
:neutron-openvswi-PREROUTING - [0:0]
-A PREROUTING -j neutron-openvswi-PREROUTING
-A OUTPUT -j neutron-openvswi-OUTPUT
-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvbe804433b-61 -j CT --zone 1
-A neutron-openvswi-PREROUTING -m physdev --physdev-in tape804433b-61 -j CT --zone 1
-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb95c24827-02 -j CT --zone 2
-A neutron-openvswi-PREROUTING -m physdev --physdev-in tap95c24827-02 -j CT --zone 2
-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb61634509-31 -j CT --zone 2
-A neutron-openvswi-PREROUTING -m physdev --physdev-in tap61634509-31 -j CT --zone 2
-A neutron-openvswi-PREROUTING -m physdev --physdev-in qvb8f46cf18-12 -j CT --zone 9
-A neutron-openvswi-PREROUTING -m physdev --physdev-in tap8f46cf18-12 -j CT --zone 9
COMMIT
# Completed on Fri Jul 31 16:13:28 2015
""" # noqa
class BaseIptablesFirewallTestCase(base.BaseTestCase):
def setUp(self):
super(BaseIptablesF
|
irewallTestCase, self).setUp()
cfg.CONF.register_opts(a_cfg.ROOT_HELPER_OPTS, 'AGENT')
security_config.register_securitygroups_opts()
cfg.CONF.set_override('comment_iptables_rules', False, 'AGENT')
self.uti
|
ls_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.iptables_cls_p = mock.patch(
'neutron.agent.linux.iptables_manager.IptablesManager')
iptables_cls = self.iptables_cls_p.start()
self.iptables_inst = mock.Mock()
self.v4filter_inst = mock.Mock()
self.v6filter_inst = mock.Mock()
self.iptables_inst.ipv4 = {'filter': self.v4filter_inst,
'raw': self.v4filter_inst
}
self.iptables_inst.ipv6 = {'filter': self.v6filter_inst,
'raw': self.v6filter_inst
}
iptables_cls.return_value = self.iptables_inst
self.iptables_inst.get_rules_for_table.return_value = (
RAW_TABLE_OUTPUT.splitlines())
self.firewall = iptables_firewall.IptablesFirewallDriver()
self.firewall.iptables = self.iptables_inst
class IptablesFirewallTestCase(BaseIptablesFirewallTestCase):
def _fake_port(self):
return {'device': 'tapfake_dev',
'mac_address': 'ff:ff:ff:ff:ff:ff',
'network_id': 'fake_net',
'fixed_ips': [FAKE_IP['IPv4'],
FAKE_IP['IPv6']]}
def test_prepare_port_filter_with_no_sg(self):
port = self._fake_port()
self.firewall.prepare_port_filter(port)
calls = [mock.call.add_chain('sg-fallback'),
mock.call.add_rule(
'sg-fallback', '-j DROP',
comment=ic.UNMATCH_DROP),
mock.call.remove_chain('sg-chain'),
mock.call.add_chain('sg-chain'),
mock.call.add_chain('ifake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-out tapfake_dev '
'--physdev-is-bridged '
'-j $ifake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule(
'ifake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-m state --state INVALID -j DROP',
comment=None),
mock.call.add_rule(
'ifake_dev',
'-j $sg-fallback', comment=None),
mock.call.add_chain('ofake_dev'),
mock.call.add_rule('FORWARD',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged '
'-j $sg-chain', comment=ic.VM_INT_SG),
mock.call.add_rule('sg-chain',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.SG_TO_VM_SG),
mock.call.add_rule('INPUT',
'-m physdev --physdev-in tapfake_dev '
'--physdev-is-bridged -j $ofake_dev',
comment=ic.INPUT_TO_SG),
mock.call.add_chain('sfake_dev'),
mock.call.add_rule(
'sfake_dev',
'-s 10.0.0.1/32 -m mac --mac-source FF:FF:FF:FF:FF:FF '
'-j RETURN',
comment=ic.PAIR_ALLOW),
mock.call.add_rule(
'sfake_dev', '-j DROP',
comment=ic.PAIR_DROP),
mock.call.add_rule(
'ofake_dev',
'-s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp '
'--sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule('ofake_dev', '-j $sfake_dev',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 68 --dport 67 -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-p udp -m udp --sport 67 -m udp --dport 68 -j DROP',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=None),
mock.call.add_rule(
'ofake_dev',
'-m state --state INVALID -j DROP', comment=None),
mock.call.add_rule(
'ofake_dev',
|
libchaos/algorithm-python
|
bit/add_with_op.py
|
Python
|
mit
| 233 | 0.017167 |
def add_with
|
out_op(x, y):
while y !=0:
carry = x & y
x = x ^ y
y = carry << 1
print(x)
def main():
x, y = map(int, input().split())
add_
|
without_op(x, y)
if __name__ == "__main__":
main()
|
NEMO-NOC/NEMOsphere
|
lego5.py
|
Python
|
gpl-2.0
| 14,256 | 0.012556 |
#!/usr/bin/env python
from __future__ import print_function
import os, platform
from argparse import ArgumentParser
import numpy as np
import time
import resource
from mayavi import mlab
from netCDF4 import Dataset
from mpl_toolkits.basemap import Basemap
from numba import jit
@jit
def add_triangles_from_square(x, x1, x2, x3, x4, k):
'''
inserts values of kth and k+1th triangles into array x in place
from face values x1, x2, x3, x4
'''
k1 = k + 1
x[k,0], x[k,1], x[k,2] = x1, x2, x3
x[k1,0], x[k1,1], x[k1,2] = x2, x3, x4
@jit
def get_triangles_j(j, nx, k, t_land, ds_max, lambda_f, phi_f, dep,
x, y, z, t):
'''
inserts coordinates of & col shading for triangles around ij points on j-line:
-- horz face centred on T points at dep[j,i]
+ surrounding vertical faces (only count where neighvouring point is shallower
to avoid double counting)
into max_trianglesx3 arrays x, y, z, t,
starting with the horz face at
x[k,1..3], y[k,1..3], z[k,1..3] and t[k,1..3]
On land points, with dep[j,i]==0 t[k,1..3] set to t_land
'''
jm1, jp1 = j-1, j+1
xx01, xx11, yy01, yy11 = lambda_f[jm1,0], lambda_f[j,0], phi_f[jm1,0],phi_f[j,0]
for i in range(1, nx-1):
im1, ip1 = i-1, i+1
xx00, xx10, yy00, yy10 = xx01, xx11, yy01, yy11
xx01, xx11, yy01, yy11 = lambda_f[jm1,i], lambda_f[j,i], phi_f[jm1,i],phi_f[j,i]
if abs(xx01 - xx00) +
|
abs(yy01 - yy00) + abs(xx11 - xx10) + abs(yy11 - yy10) > ds_max:
continue
# x & y coordinates of f-points surrounding T-point i,j
# 00 = SW, 10 = NW, 01 = SE, 11 = NE
# do horizontal faces of T-box, zig-zag points SW, NW, SE, NE
# insert x & y for 2-triangles (kth & k+1th)
add_triangles_from_square(x, xx00, xx10, xx01, xx11, k)
add_triangles_from_square(y, yy00, yy10, yy01, yy11, k)
# .. con
|
stant z
dep00 = dep[j,i]
add_triangles_from_square(z, dep00, dep00, dep00, dep00, k)
# color depends on z
if dep00 == 0.:
add_triangles_from_square(t, t_land, t_land, t_land, t_land, k)
else:
add_triangles_from_square(t, dep00, dep00, dep00, dep00, k)
# & increment k by 2
k += 2
# do vertical faces surrounding T-box
for di, dj in ((1,0),(-1,0),(0,1),(0,-1)):
dep01 = dep[j+dj, i+di]
if dep01 > dep00:
# vertical face zig-zag z points:
add_triangles_from_square(z, dep00, dep01, dep00, dep01, k)
# color is shaded
add_triangles_from_square(t, dep00, dep01, dep00, dep01, k)
if di==-1:
# face at u-points, constant i
add_triangles_from_square(x, xx00, xx00, xx10, xx10, k)
add_triangles_from_square(y, yy00, yy00, yy10, yy10, k)
elif di==1:
add_triangles_from_square(x, xx01, xx01, xx11, xx11, k)
add_triangles_from_square(y, yy01, yy01, yy11, yy11, k)
elif dj ==-1:
# face at v-points, constant j
add_triangles_from_square(y, yy00, yy00, yy01, yy01, k)
add_triangles_from_square(x, xx00, xx00, xx01, xx01, k)
elif dj ==1:
add_triangles_from_square(y, yy10, yy10, yy11, yy11, k)
add_triangles_from_square(x, xx10, xx10, xx11, xx11, k)
k += 2
return k
def get_triangles(dep, lambda_f, phi_f, t_land, ds_max):
'''
takes 2-D array of depths dep, assumed to be positioned at j & i values
Creates mesh of triangles covering lego-block topography consistent with dep
Outputs four ntriangles x 3 arrays x, y, z, t where
x(k,1..3), y(k,1..3), z(k,1..3) and t(k,1..3) are the x, y, z and color values for the kth triangle
'''
# arrays in C-order so last index is x
ny,nx = dep.shape
# max_no_triangles is maximum number of triangles ....
# (ny-2)*(nx-2) is npts with all 4 sides available
# factor of 3 for top and 2 sides; factor of 2 since 2 triangles in each face
# add 2*(ny-2+nx-2) since edge interfaces not accounted for
max_no_triangles = (ny-2)*(nx-2)*3*2 + 2*(ny-2+nx-2)
# can iterate through 1st '0th' index of array, to give 4 2d arrays max_triangles x 3
x, y, z, t = np.zeros((4, max_no_triangles, 3), dtype=dep.dtype)
# first array created will be for first (0th) triangle
k = 0
# loop through each j-line of T-points ...
# note range(m,n) = (m, ....n-1)
for j in range(1, ny-1):
# get triangles for all i-points on j-line
k = get_triangles_j(j, nx, k, t_land, ds_max, lambda_f, phi_f, dep, x, y, z, t)
# k is now total no of triangles; chop off unused parts of the arrays & copy ...
x, y, z, t = [a[:k,:].copy() for a in (x, y, z, t)]
return k, x, y, z, t
def wrap_lon(lon):
"""
Ensures longitude is between -180 & 180. Not really necessary.
"""
# Need [] to ensure lon is changed in-place instead of making new variable
lon[...] = (lon[...] + 180.) % 360. - 180.
class Topography(object):
def __init__(self, xs=None, xe=None, ys=None, ye=None,
domain_dir='.', bathymetry_file='bathy_meter.nc', coordinate_file='coordinates.nc',
bottom = 6000., cmap='gist_earth', map2d = None, globe = False, zs_rat = 0.1):
# xem1, yem1 = xe - 1, ye - 1
xem1, yem1 = xe, ye
t1 = time.time()
pathname = os.path.join(domain_dir,bathymetry_file)
with Dataset(pathname) as f:
# print(f.variables.keys())
dep = f.variables['Bathymetry'][ys:ye,xs:xe]
pathname = os.path.join(domain_dir,coordinate_file)
if not os.path.exists(pathname):
pathname = os.path.join(domain_dir,'mesh_hgr.nc')
with Dataset(pathname) as f:
# print(f.variables.keys())
lambda_f = f.variables['glamf'][...,ys:ye,xs:xe].squeeze()
phi_f = f.variables['gphif'][...,ys:ye,xs:xe].squeeze()
t1, t0 = time.time(), t1
print('%10.5f s taken to read in data\n' % (t1 - t0) )
if globe:
# Plug the South Pole if the bathymetry doesn't extend far enough
minlat = phi_f[:,0].min()
if minlat > -89.9 and minlat < -75.:
nj,ni = phi_f.shape
nextra = 10
dy_deg = (minlat + 90.)/nextra
lonfill = np.empty((nj+nextra,ni), dtype=lambda_f.dtype)
latfill = np.empty((nj+nextra,ni), dtype=phi_f.dtype)
depfill = np.empty((nj+nextra,ni), dtype=dep.dtype)
lonfill[nextra:,:] = lambda_f
latfill[nextra:,:] = phi_f
depfill[nextra:,:] = dep
lonfill[:nextra,:] = lambda_f[0,:]
# Add new dimension None to 1D y-array so it can be 'Broadcast' over longitude
latfill[:nextra,:] = np.arange(-90,minlat,dy_deg)[:,None]
depfill[:nextra,:] = 0.0
phi_f, lambda_f, dep = latfill, lonfill, depfill
del latfill, lonfill, depfill
# Ellipsoidal earth
self.rsphere_eq, self.rsphere_pol = 6378137.00, 6356752.3142
dist = self.rsphere_eq + self.rsphere_pol
self.proj = self.globe_proj
elif map2d is not None:
wrap_lon(lambda_f)
lambda_f, phi_f = map2d(lambda_f, phi_f)
# need to scale heights/depths for consistency with picture using horizontal axes i & j
dlam = lambda_f.max() - lambda_f.min()
dphi = phi_f.max() - phi_f.min()
dist = np.sqrt(dlam*dlam + dphi*dphi)
self.map2d = map2d
self.proj = self.map_proj
ny, nx = lambda_f.shape
ds_max = 20.*dist/max(ny,nx)
# ... and convert from depths--> heights
# ... and scale depth of saturated colorscale
zscale = zs_rat*dist/6000.
self.zscale = zscale
dep = -zscale*dep.astype
|
lookout/dd-agent
|
checks.d/mysql.py
|
Python
|
bsd-3-clause
| 62,670 | 0.002266 |
# stdlib
import re
import traceback
from contextlib import closing, contextmanager
from collections import defaultdict
# 3p
import pymysql
try:
import psutil
PSUTIL_AVAILABLE = True
except ImportError:
PSUTIL_AVAILABLE = False
# project
from config import _is_affirmative
from checks import AgentCheck
GAUGE = "gauge"
RATE = "rate"
COUNT = "count"
MONOTONIC = "monotonic_count"
# Vars found in "SHOW STATUS;"
STATUS_VARS = {
# Command Metrics
'Slow_queries': ('mysql.performance.slow_queries', RATE),
'Questions': ('mysql.performance.questions', RATE),
'Queries': ('mysql.performance.queries', RATE),
'Com_select': ('mysql.performance.com_select', RATE),
'Com_insert': ('mysql.performance.com_insert', RATE),
'Com_update': ('mysql.performance.com_update', RATE),
'Com_delete': ('mysql.performance.com_delete', RATE),
'Com_replace': ('mysql.performance.com_replace', RATE),
'Com_load': ('mysql.performance.com_load', RATE),
'Com_insert_select': ('mysql.performance.com_insert_select', RATE),
'Com_update_multi': ('mysql.performance.com_update_multi', RATE),
'Com_delete_multi': ('mysql.performance.com_delete_multi', RATE),
'Com_replace_select': ('mysql.performance.com_replace_select', RATE),
# Connection Metrics
'Connections': ('mysql.net.connections', RATE),
'Max_used_connections': ('mysql.net.max_connections', GAUGE),
'Aborted_clients': ('mysql.net.aborted_clients', RATE),
'Aborted_connects': ('mysql.net.aborted_connects', RATE),
# Table Cache Metrics
'Open_files': ('mysql.performance.open_files', GAUGE),
'Open_tables': ('mysql.performance.open_tables', GAUGE),
# Network Metrics
'Bytes_sent': ('mysql.performance.bytes_sent', RATE),
'Bytes_received': ('mysql.performance.bytes_received', RATE),
# Query Cache Metrics
'Qcache_hits': ('mysql.performance.qcache_hits', RATE),
'Qcache_inserts': ('mysql.performance.qcache_inserts', RATE),
'Qcache_lowmem_prunes': ('mysql.performance.qcache_lowmem_prunes', RATE),
# Table Lock Metrics
'Table_locks_waited': ('mysql.performance.table_locks_waited', GAUGE),
'Table_locks_waited_rate': ('mysql.performance.table_locks_waited.rate', RATE),
# Temporary Table Metrics
'Created_tmp_tables': ('mysql.performance.created_tmp_tables', RATE),
'Created_tmp_disk_tables': ('mysql.performance.created_tmp_disk_tables', RATE),
'Created_tmp_files': ('mysql.performance.created_tmp_files', RATE),
# Thread Metrics
'Threads_connected': ('mysql.performance.threads_connected', GAUGE),
'Threads_running': ('mysql.performance.threads_running', GAUGE),
# MyISAM Metrics
'Key_buffer_bytes_unflushed': ('mysql.myisam.key_buffer_bytes_unflushed', GAUGE),
'Key_buffer_bytes_used': ('mysql.myisam.key_buffer_bytes_used', GAUGE),
'Key_read_requests': ('mysql.myisam.key_read_requests', RATE),
'Key_reads': ('mysql.myisam.key_reads', RATE),
'Key_write_requests': ('mysql.myisam.key_write_requests', RATE),
'Key_writes': ('mysql.myisam.key_writes', RATE),
}
# Possibly from SHOW GLOBAL VARIABLES
VARIABLES_VARS = {
'Key_buffer_size': ('my
|
sql.myisam.key_buffer_size', GAUGE),
'Key_cache_utilization': ('mysql.performance.key_cache_utilization', GAUGE),
'max_connections': ('mysql.net.max_connections_available', GAUGE),
'query_cache_size': ('mysql.performance.qcache_size',
|
GAUGE),
'table_open_cache': ('mysql.performance.table_open_cache', GAUGE),
'thread_cache_size': ('mysql.performance.thread_cache_size', GAUGE)
}
INNODB_VARS = {
# InnoDB metrics
'Innodb_data_reads': ('mysql.innodb.data_reads', RATE),
'Innodb_data_writes': ('mysql.innodb.data_writes', RATE),
'Innodb_os_log_fsyncs': ('mysql.innodb.os_log_fsyncs', RATE),
'Innodb_mutex_spin_waits': ('mysql.innodb.mutex_spin_waits', RATE),
'Innodb_mutex_spin_rounds': ('mysql.innodb.mutex_spin_rounds', RATE),
'Innodb_mutex_os_waits': ('mysql.innodb.mutex_os_waits', RATE),
'Innodb_row_lock_waits': ('mysql.innodb.row_lock_waits', RATE),
'Innodb_row_lock_time': ('mysql.innodb.row_lock_time', RATE),
'Innodb_row_lock_current_waits': ('mysql.innodb.row_lock_current_waits', GAUGE),
'Innodb_current_row_locks': ('mysql.innodb.current_row_locks', GAUGE),
'Innodb_buffer_pool_bytes_dirty': ('mysql.innodb.buffer_pool_dirty', GAUGE),
'Innodb_buffer_pool_bytes_free': ('mysql.innodb.buffer_pool_free', GAUGE),
'Innodb_buffer_pool_bytes_used': ('mysql.innodb.buffer_pool_used', GAUGE),
'Innodb_buffer_pool_bytes_total': ('mysql.innodb.buffer_pool_total', GAUGE),
'Innodb_buffer_pool_read_requests': ('mysql.innodb.buffer_pool_read_requests', RATE),
'Innodb_buffer_pool_reads': ('mysql.innodb.buffer_pool_reads', RATE),
'Innodb_buffer_pool_pages_utilization': ('mysql.innodb.buffer_pool_utilization', GAUGE),
}
# Calculated from "SHOW MASTER LOGS;"
BINLOG_VARS = {
'Binlog_space_usage_bytes': ('mysql.binlog.disk_use', GAUGE),
}
# Additional Vars found in "SHOW STATUS;"
# Will collect if [FLAG NAME] is True
OPTIONAL_STATUS_VARS = {
'Binlog_cache_disk_use': ('mysql.binlog.cache_disk_use', GAUGE),
'Binlog_cache_use': ('mysql.binlog.cache_use', GAUGE),
'Handler_commit': ('mysql.performance.handler_commit', RATE),
'Handler_delete': ('mysql.performance.handler_delete', RATE),
'Handler_prepare': ('mysql.performance.handler_prepare', RATE),
'Handler_read_first': ('mysql.performance.handler_read_first', RATE),
'Handler_read_key': ('mysql.performance.handler_read_key', RATE),
'Handler_read_next': ('mysql.performance.handler_read_next', RATE),
'Handler_read_prev': ('mysql.performance.handler_read_prev', RATE),
'Handler_read_rnd': ('mysql.performance.handler_read_rnd', RATE),
'Handler_read_rnd_next': ('mysql.performance.handler_read_rnd_next', RATE),
'Handler_rollback': ('mysql.performance.handler_rollback', RATE),
'Handler_update': ('mysql.performance.handler_update', RATE),
'Handler_write': ('mysql.performance.handler_write', RATE),
'Opened_tables': ('mysql.performance.opened_tables', RATE),
'Qcache_total_blocks': ('mysql.performance.qcache_total_blocks', GAUGE),
'Qcache_free_blocks': ('mysql.performance.qcache_free_blocks', GAUGE),
'Qcache_free_memory': ('mysql.performance.qcache_free_memory', GAUGE),
'Qcache_not_cached': ('mysql.performance.qcache_not_cached', RATE),
'Qcache_queries_in_cache': ('mysql.performance.qcache_queries_in_cache', GAUGE),
'Select_full_join': ('mysql.performance.select_full_join', RATE),
'Select_full_range_join': ('mysql.performance.select_full_range_join', RATE),
'Select_range': ('mysql.performance.select_range', RATE),
'Select_range_check': ('mysql.performance.select_range_check', RATE),
'Select_scan': ('mysql.performance.select_scan', RATE),
'Sort_merge_passes': ('mysql.performance.sort_merge_passes', RATE),
'Sort_range': ('mysql.performance.sort_range', RATE),
'Sort_rows': ('mysql.performance.sort_rows', RATE),
'Sort_scan': ('mysql.performance.sort_scan', RATE),
'Table_locks_immediate': ('mysql.performance.table_locks_immediate', GAUGE),
'Table_locks_immediate_rate': ('mysql.performance.table_locks_immediate.rate', RATE),
'Threads_cached': ('mysql.performance.threads_cached', GAUGE),
'Threads_created': ('mysql.performance.threads_created', MONOTONIC)
}
# Status Vars added in Mysql 5.6.6
OPTIONAL_STATUS_VARS_5_6_6 = {
'Table_open_cache_hits': ('mysql.performance.table_cache_hits', RATE),
'Table_open_cache_misses': ('mysql.performance.table_cache_misses', RATE),
}
# Will collect if [extra_innodb_metrics] is True
OPTIONAL_INNODB_VARS = {
'Innodb_active_transactions': ('mysql.innodb.active_transactions', GAUGE),
'Innodb_buffer_pool_bytes_data': ('mysql.innodb.buffer_pool_data', GAUGE),
'Innodb_buffer_pool_pages_data': ('mysql.innodb.buffer_pool_pages_data', GAUGE),
'Innodb_buffer_pool_pages_dirty': ('mysql.innodb.buffer_pool_pages_dirty', GAUGE),
'Innodb_buffer_pool_pages_flushed': ('mysql.innodb.buffer_pool_pages_flushed', RATE),
'Innodb_buffer_pool_pages_free
|
quadrismegistus/prosodic
|
meters/strength_and_resolution.py
|
Python
|
gpl-3.0
| 10,457 | 0.005929 |
############################################
# [config.py]
# CONFIGURATION SETTINGS FOR A PARTICULAR METER
#
#
# Set the long-form name of this meter
name = "*PEAK only"
#
# [Do not remove or uncomment the following line]
Cs={}
############################################
############################################
# STRUCTURE PARAMETERS
#
# Parameters subject to conscious control by the poet. Kiparsky & Hanson (1996)
# call these "formally independent of phonological structure." By contrast,
# "realization parameters"--e.g., the size of a metrical position, which positions
# are regulated, and other constraints--"determine the way the structure is
# linguistically manifested, and are dependent on the prosodic givens of languge."
#
#
####
# [Number of feet in a line]
#
#Cs['number_feet!=2'] = 1 # require dimeter
#Cs['number_feet!=3'] = 1 # require trimeter
#Cs['number_feet!=4'] = 1 # require tetrameter
#Cs['number_feet!=5'] = 1 # require pentameter
#Cs['number_feet!=6'] = 1 # require hexameter
#Cs['number_feet!=7'] = 1 # require heptameter
#
#
####
# [Headedness of the line]
#
#Cs['headedness!=falling'] = 1 # require a falling rhythm (e.g. trochaic, dactylic)
#Cs['headedness!=rising'] = 1 # require a rising rhythm (e.g., iambic, anapestic)
#
############################################
############################################
# REALIZATION PARAMETERS
#
# All subsequent constraints can be seen as "realization parameters."
# See note to "structure parameters" above for more information.
#
#############################################
# METRICAL PARSING: POSITION SIZE
#
# Select how many syllables are at least *possible* in strong or weak positions
# cf. Kiparsky & Hanson's "position size" parameter ("Parametric Theory" 1996)
#
#
######
# [Maximum position size]
#
# The maximum number of syllables allowed in strong metrical positions (i.e. "s")
maxS=2
#
# The maximum number of syllables allowed in weak metrical positions (i.e. "w")
maxW=2
#
#
######
# [Minimum position size]
#
# (Recommended) Positions are at minimum one syllable in size
splitheavies=0
#
# (Unrecommended) Allow positions to be as small as a single mora
# i.e. (a split heavy syllable can straddle two metrical positions)
#splitheavies=1
############################################
############################################
# METRICAL PARSING: METRICAL CONSTRAINTS
#
# Here you can configure the constraints used by the metrical parser.
# Each constraint is expressed in the form:
# Cs['(constraint name)']=(constraint weight)
# Constraint weights do not affect harmonic bounding (i.e. which parses
# survive as possibilities), but they do affect how those possibilities
# are sorted to select the "best" parse.
#
#
######
# [Constraints regulating the 'STRENGTH' of a syllable]
#
# A syllable is strong if it is a peak in a polysyllabic word:
# the syllables in 'liberty', stressed-unstressed-unstressed,
# are, in terms of *strength*, strong-weak-neutral, because
# the first syllable is more stressed than its neighbor;
# the second syllable less stressed; and the third equally stressed.
#
###
# [Stricter versions:]
#
# A strong metrical position should not contain any weak syllables ("troughs"):
#Cs['strength.s=>-u']=1
#
# A weak metrical position may not contain any strong syllables ("peaks"):
# [Kiparsky and Hanson believe this is Shakespeare's meter]
Cs['strength.w=>-p']=1
#
###
# [Laxer versions:]
#
# A strong metrical position should contain at least one strong syllable:
#Cs['strength.s=>p']=3
#
# A weak metrical position should contain at least one weak syllable:
#Cs['strength.w=>u']=3
#
#
#
######
# [Constraints regulating the STRESS of a syllable]
#
###
# [Stricter versions:]
#
# A strong metrical position should not contain any unstressed syllables:
# [Kiparsky and Hanson believe this is Hopkins' meter]
#Cs['stress.s=>-u']=1
#
# A weak metrical position should not contain any stressed syllables:
#Cs['stress.w=>-p']=1
#
###
# [Laxer versions:]
#
# A strong metrical position should contain at least one stressed syllable:
#Cs['stress.s=>p']=2
#
# A weak metrical position must contain at least one unstressed syllable;
#Cs['stress.w=>u']=2
#
#
#
######
# [Constraints regulating the WEIGHT of a syllable]
#
# The weight of a syllable is its "quantity": short or long.
# These constraints are designed for "quantitative verse",
# as for example in classical Latin and Greek poetry.
#
###
# [Stricter versions:]
#
# A strong metrical position should not contain any light syllables:
#Cs['weight.s=>-u']=2
#
# A weak metrical position should not contain any heavy syllables:
#Cs['weight.w=>-p']=2
#
###
# [Laxer versions:]
#
# A strong metrical position should contain at least one heavy syllable:
#Cs['weight.s=>p']=2
#
# A weak metrical position must contain at least one light syllable;
#Cs['weight.w=>u']=2
#
#
#
######
# [Constraints regulating what's permissible as a DISYLLABIC metrical position]
# [(with thanks to Sam Bowman, who programmed many of these constraints)]
#
###
# [Based on weight:]
#
# A disyllabic metrical position should not contain more than a minimal foot:
# i.e. W-resolution requires first syllable to be light and unstressed.
Cs['footmin-w-resolution']=1
#
#
# A disyllabic metrical position should not contain more than a minimal foot:
# (i.e. allowed positions are syllables weighted light-light or light-heavy)
#Cs['footmin-noHX']=1000
#
#
# A disyllabic STRONG metrical position should not contain more than a minimal foot:
# (i.e. allowed positions are syllables weighted light-light or light-heavy)
#Cs['footmin-s-noHX']=1
#
# A disyllabic metrical position should be syllables weighted light-light:
#Cs['footmin-noLH-noHX']=1
#
###
# [Categorical:]
#
# A metrical position should not contain more than one syllable:
# [use to discourage disyllabic positions]
#Cs['footmin-none']=1
#
# A strong metrical position should not contain more than one syllable:
#Cs['footmin-no-s']=1
#
# A weak metrical position should not contain more than one syllable:
#Cs['footmin-no-w']=1
#
# A metrical position should not contain more than one syllable,
# *unless* that metrical position is the *first* or *second* in the line:
# [use to discourage disyllabic positions, but not trochaic inversions,
# or an initial "extrametrical" syllable]
#Cs['footmin-none-unless-in-first-two-positions']=1
#
# A metrical position should not contain more than one syllable,
# *unless* that metrical position is the *second* in the line:
# [use to discourage disyllabic positions, but not trochaic inversions]
#Cs['footmin-none-unless-in-second-position']=1
#
# A strong metrical positi
|
on should not contain more than one syllable,
# *unless* it is preceded by a disyllabic *weak* metrical po
|
sition:
# [use to implement the metrical pattern described by Derek Attridge,
# in The Rhythms of English Poetry (1982), and commented on by Bruce Hayes
# in his review of the book in Language 60.1 (1984).
# e.g. Shakespeare's "when.your|SWEET.IS|ue.your|SWEET.FORM|should|BEAR"
# [this implementation is different in that it only takes into account
# double-weak beats *preceding* -- due to the way in which the parser
# throws away bounded parses as it goes, it might not be possible for now
# to write a constraint referencing future positions]
#Cs['footmin-no-s-unless-preceded-by-ww']=10
# [The version that does reference future positions; but appears to be unstable]:
#Cs['attridge-ss-not-by-ww']=10
#
###
# [For disyllabic positions crossing a word boundary...
# (i.e. having two syllables, each from a different word)...
#
# ...allow only F-resolutions:
# (both words must be function words and be in a weak metrical position)
Cs['footmin-f-resolution']=1
#
# ...it should never cross a word boundary to begin with:
#Cs['footmin-wordbound']=1000
#
# ...both words should be function words:
#Cs['footmin-wordbound-bothnotfw']=1
#
# ...at least one word should be a function word:
#Cs['footmin-wordbound-neitherfw']=1
#
# ...the left-hand syllable should be a function-word:
#Cs['footmin-wordbound-leftfw']=1
#
# ...the right-hand syllable should be a function word:
#Cs['footmin-wordbound
|
MehmetNuri/ozgurlukicin
|
feedjack/fjlib.py
|
Python
|
gpl-3.0
| 9,326 | 0.006005 |
# -*- coding: utf-8 -*-
"""
feedjack
Gustavo Picón
fjlib.py
"""
from django.conf import settings
from django.db import connection
from django.core.paginator import Paginator, InvalidPage
from django.http import Http404
from django.utils.encoding import smart_unicode
from oi.feedjack import models
from oi.feedjack import fjcache
# this is taken from django, it was removed in r8191
class ObjectPaginator(Paginator):
"""
Legacy ObjectPaginator class, for backwards compatibility.
Note that each method on this class that takes page_number expects a
zero-based page number, whereas the new API (Paginator/Page) uses one-based
page numbers.
"""
def __init__(self, query_set, num_per_page, orphans=0):
Paginator.__init__(self, query_set, num_per_page, orphans)
#import warnings
#warnings.warn("The ObjectPaginator is deprecated. Use django.core.paginator.Paginator instead.", DeprecationWarning)
# Keep these attributes around for backwards compatibility.
self.query_set = query_set
self.num_per_page = num_per_page
self._hits = self._pages = None
def validate_page_number(self, page_number):
try:
page_number = int(page_number) + 1
except ValueError:
raise PageNotAnInteger
return self.validate_number(page_number)
def get_page(self, page_number):
try:
page_number = int(page_number) + 1
except ValueError:
raise PageNotAnInteger
return self.page(page_number).object_list
def has_next_page(self, page_number):
return page_number < self.pages - 1
def has_previous_page(self, page_number):
return page_number > 0
def first_on_page(self, page_number):
"""
Returns the 1-based index of the first object on the given page,
relative to total objects found (hits).
"""
page_number = self.validate_page_number(page_number)
return (self.num_per_page * (page_number - 1)) + 1
def last_on_page(self, page_number):
|
"""
Returns the 1-based index of the last object on the given page,
relative to total objects found (hits).
"""
page_number = self.validate_page_number(
|
page_number)
if page_number == self.num_pages:
return self.count
return page_number * self.num_per_page
# The old API called it "hits" instead of "count".
hits = Paginator.count
# The old API called it "pages" instead of "num_pages".
pages = Paginator.num_pages
def sitefeeds(siteobj):
""" Returns the active feeds of a site.
"""
return siteobj.subscriber_set.filter(is_active=True).select_related()
#return [subscriber['feed'] \
# for subscriber \
# in siteobj.subscriber_set.filter(is_active=True).values('feed')]
def getquery(query):
""" Performs a query and get the results.
"""
try:
conn = connection.cursor()
conn.execute(query)
data = conn.fetchall()
conn.close()
except:
data = []
return data
def get_extra_content(site, sfeeds_ids, ctx):
""" Returns extra data useful to the templates.
"""
# get the subscribers' feeds
if sfeeds_ids:
basefeeds = models.Feed.objects.filter(id__in=sfeeds_ids)
try:
ctx['feeds'] = basefeeds.order_by('name').select_related()
except:
ctx['feeds'] = []
# get the last_checked time
try:
ctx['last_modified'] = basefeeds.filter(\
last_checked__isnull=False).order_by(\
'-last_checked').select_related()[0].last_checked.ctime()
except:
ctx['last_modified'] = '??'
else:
ctx['feeds'] = []
ctx['last_modified'] = '??'
ctx['site'] = site
ctx['media_url'] = '%s/feedjack/%s' % (settings.MEDIA_URL, site.template)
def get_posts_tags(object_list, sfeeds_obj, user_id, tag_name):
""" Adds a qtags property in every post object in a page.
Use "qtags" instead of "tags" in templates to avoid innecesary DB hits.
"""
tagd = {}
user_obj = None
tag_obj = None
tags = models.Tag.objects.extra(\
select={'post_id':'%s.%s' % (\
connection.ops.quote_name('feedjack_post_tags'), \
connection.ops.quote_name('post_id'))}, \
tables=['feedjack_post_tags'], \
where=[\
'%s.%s=%s.%s' % (\
connection.ops.quote_name('feedjack_tag'), \
connection.ops.quote_name('id'), \
connection.ops.quote_name('feedjack_post_tags'), \
connection.ops.quote_name('tag_id')), \
'%s.%s IN (%s)' % (\
connection.ops.quote_name('feedjack_post_tags'), \
connection.ops.quote_name('post_id'), \
', '.join([str(post.id) for post in object_list]))])
for tag in tags:
if tag.post_id not in tagd:
tagd[tag.post_id] = []
tagd[tag.post_id].append(tag)
if tag_name and tag.name == tag_name:
tag_obj = tag
subd = {}
for sub in sfeeds_obj:
subd[sub.feed.id] = sub
for post in object_list:
if post.id in tagd:
post.qtags = tagd[post.id]
else:
post.qtags = []
post.subscriber = subd[post.feed.id]
if user_id and int(user_id) == post.feed.id:
user_obj = post.subscriber
return user_obj, tag_obj
def getcurrentsite(http_post, path_info, query_string):
""" Returns the site id and the page cache key based on the request.
"""
url = u'http://%s/%s' % (smart_unicode(http_post.rstrip('/')), \
smart_unicode(path_info.lstrip('/')))
pagecachekey = '%s?%s' % (smart_unicode(path_info), \
smart_unicode(query_string))
hostdict = fjcache.hostcache_get()
if not hostdict:
hostdict = {}
if url not in hostdict:
default, ret = None, None
for site in models.Site.objects.all():
if url.startswith(site.url):
ret = site
break
if not default or site.default_site:
default = site
if not ret:
if default:
ret = default
else:
# Somebody is requesting something, but the user didn't create
# a site yet. Creating a default one...
ret = models.Site(name='Default Feedjack Site/Planet', \
url='www.feedjack.org', \
title='Feedjack Site Title', \
description='Feedjack Site Description. ' \
'Please change this in the admin interface.')
ret.save()
hostdict[url] = ret.id
fjcache.hostcache_set(hostdict)
return hostdict[url], pagecachekey
def get_paginator(site, sfeeds_ids, page=0, tag=None, user=None):
""" Returns a paginator object and a requested page from it.
"""
if tag:
try:
localposts = models.Tag.objects.get(name=tag).post_set.filter(\
feed__in=sfeeds_ids)
except:
raise Http404
else:
localposts = models.Post.objects.filter(feed__in=sfeeds_ids)
if user:
try:
localposts = localposts.filter(feed=user)
except:
raise Http404
if site.order_posts_by == 2:
localposts = localposts.order_by('-date_created', '-date_modified')
else:
localposts = localposts.order_by('-date_modified')
paginator = ObjectPaginator(localposts.select_related(), \
site.posts_per_page)
try:
object_list = paginator.get_page(page)
except InvalidPage:
if page == 0:
object_list = []
else:
raise Http404
return (paginator, object_list)
def page_context(request, site, tag=None, user_id=None, sfeeds=None):
""" Returns the context dictionary for a page view.
"""
sfeeds_obj, sfeeds_ids = sfeeds
try:
page = int(request.GET.get('page', 0))
except ValueError:
page = 0
paginator, object_list = get_paginator(site, sfeeds_ids, \
page=page, tag=tag, user=user_
|
Kurpilyansky/street-agitation-telegram-bot
|
manage.py
|
Python
|
gpl-3.0
| 818 | 0.001222 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "street_agitation_bot.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it'
|
s installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to
|
activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
ml6973/Course
|
tf-hands-on/slim/python/slim/nets/alexnet_test.py
|
Python
|
apache-2.0
| 5,839 | 0.008392 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.alexn
|
et."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.slim.nets import alexnet
slim = tf.contrib.slim
class AlexnetV2Test(tf.test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_
|
session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
height, width = 300, 400
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'alexnet_v2/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 4, 7, num_classes])
def testEndPoints(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = alexnet.alexnet_v2(inputs, num_classes)
expected_names = ['alexnet_v2/conv1',
'alexnet_v2/pool1',
'alexnet_v2/conv2',
'alexnet_v2/pool2',
'alexnet_v2/conv3',
'alexnet_v2/conv4',
'alexnet_v2/conv5',
'alexnet_v2/pool5',
'alexnet_v2/fc6',
'alexnet_v2/fc7',
'alexnet_v2/fc8'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testModelVariables(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
alexnet.alexnet_v2(inputs, num_classes)
expected_names = ['alexnet_v2/conv1/weights',
'alexnet_v2/conv1/biases',
'alexnet_v2/conv2/weights',
'alexnet_v2/conv2/biases',
'alexnet_v2/conv3/weights',
'alexnet_v2/conv3/biases',
'alexnet_v2/conv4/weights',
'alexnet_v2/conv4/biases',
'alexnet_v2/conv5/weights',
'alexnet_v2/conv5/biases',
'alexnet_v2/fc6/weights',
'alexnet_v2/fc6/biases',
'alexnet_v2/fc7/weights',
'alexnet_v2/fc7/biases',
'alexnet_v2/fc8/weights',
'alexnet_v2/fc8/biases',
]
model_variables = [v.op.name for v in slim.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
with self.test_session():
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = tf.argmax(logits, 1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 300, 400
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = alexnet.alexnet_v2(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = alexnet.alexnet_v2(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 4, 7, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 224, 224
with self.test_session() as sess:
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = alexnet.alexnet_v2(inputs)
sess.run(tf.initialize_all_variables())
output = sess.run(logits)
self.assertTrue(output.any())
if __name__ == '__main__':
tf.test.main()
|
tcstewar/finger_gnosis
|
pointer.py
|
Python
|
gpl-2.0
| 16,229 | 0.00875 |
import numpy as np
import nengo
import ctn_benchmark
# define the inputs when doing number comparison task
class NumberExperiment:
def __init__(self, p):
self.p = p
self.pairs = []
self.order = []
rng = np.random.RandomState(seed=p.seed)
for i in range(1, 10):
for j in range(i + 1, 10):
order = rng.choice([-1, 1])
self.order.append(order)
if order < 0:
self.pairs.append((i, j))
else:
self.pairs.append((j, i))
rng.shuffle(self.pairs)
#self.pairs = self.pairs[:3]
self.trial_time = 1.0
self.T = len(self.pairs) * self.trial_time
def input0(self, t):
return [0]*self.p.pointer_count
def display(self, t):
index = int(t / self.trial_time)
t = t % self.trial_time
a, b = self.pairs[index % len(self.pairs)]
if 0.1<t<0.2:
self.display.im_func._nengo_html_ = '<h1>%d</h1>' % a
return
if 0.3<t<0.4:
self.display.im_func._nengo_html_ = '<h1>%d</h1>' % b
return
self.display.im_func._nengo_html_ = ''
def input1(self, t):
index = int(t / self.trial_time)
t = t % self.trial_time
a, b = self.pairs[index % len(self.pairs)]
if 0.1<t<0.2:
return [a * 0.1 - 1]
if 0.3<t<0.4:
return [b * 0.1 - 1]
return [0]
def pointer_source(self, t):
return [0, 1]
def pointer_target(self, t):
t = t % self.trial_time
v = [0]*self.p.pointer_count
if 0.1<t<0.2: v[0]=1
if 0.3<t<0.4: v[1]=1
return v
def report_finger(self, t):
return [0]
def report_compare(self, t):
t = t % self.trial_time
if 0.5<t<self.trial_time:
return [1]
else:
return [0]
def memory_clear(self, t):
t = t % self.trial_time
if 1.0 - self.p.time_clear_mem < t < 1.0:
return [1]
else:
return [0]
# define the inputs when doing the finger touching task
class FingerTouchExperiment:
def __init__(self, p):
self.p = p
self.pairs = []
rng = np.random.RandomState(seed=p.seed)
for i in range(self.p.pointer_count):
for j in range(i + 1, self.p.pointer_count):
self.pairs.append((i, j))
rng.shuffle(self.pairs)
self.trial_time = 1.0
self.T = len(self.pairs) * self.trial_time
def input0(self, t):
r=[0]*self.p.pointer_count
index = int(t / self.trial_time)
t = t % self.trial_time
if 0.1<t<0.2:
for i in self.pairs[index]:
r[i]=1
return r
def display(self, t):
self.display.im_func._nengo_html_ = ''
def input1(self, t):
return [0]
def pointer_source(self, t):
return [1,0]
def pointer_target(self, t):
return [1]*self.p.pointer_count
def report_finger(self, t):
t = t % self.trial_time
if 0.3<t<1.0:
return [1]
else:
return [0]
def report_compare(self, t):
return [0]
def memory_clear(self, t):
t = t % self.trial_time
if 1.0 - self.p.time_clear_mem < t < 1.0:
return [1]
else:
return [0]
class FingerGnosis(ctn_benchmark.Benchmark):
def params(self):
self.default('number of input areas', input_count=2)
self.default('neurons for input', N_input=200)
self.default('neurons per pointer', N_pointer=400)
self.default('neurons per decoded reference', N_reference=1000)
self.default('neurons for memory', N_memory=2000)
self.default('neurons for comparison', N_compare=400)
self.default('neurons for reporting', N_report=100)
self.default('number of pointers', pointer_count=3)
self.default('memory synapse time', memory_synapse=0.1)
self.default('clear memory time', time_clear_mem=0.1)
self.default('crosstalk', crosstalk=0.2)
self.default('task', task='compare')
self.default('evidence scale', evidence_scale=1.0)
def model(self, p):
model = nengo.Network()
if p.task == 'compare':
self.exp = NumberExperiment(p=p)
elif p.task == 'fingers':
self.exp = FingerTouchExperiment(p=p)
with model:
input0 = nengo.Node(self.exp.input0)
input1 = nengo.Node(self.exp.input1)
if hasattr(self.exp, 'display'):
display = nengo.Node(self.exp.display)
pointer_source = nengo.Node(self.exp.pointer_source)
pointer_target = nengo.Nod
|
e(self.exp.pointer_target)
report_finger = nengo.Node(self.exp.report_finger)
report_compare = nengo.Node(self.exp.report_compare)
memory_clear = nengo.Node(self.exp.memory_clear)
# create neural models for the
|
two input areas
# (fingers and magnitude)
area0 = nengo.Ensemble(p.N_input*p.pointer_count, p.pointer_count,
radius=np.sqrt(p.pointer_count),
label='area0')
area1 = nengo.Ensemble(p.N_input, 1, label='area1')
nengo.Connection(input0, area0)
nengo.Connection(input1, area1)
# define the connections to create the pointers
def matrix(n,m,pre=None,post=None,value=1):
m=[[0]*n for i in range(m)]
if pre is None: pre=range(n)
if post is None: post=range(m)
for i in range(max(len(pre),len(post))):
m[post[i%len(post)]][pre[i%len(pre)]]=value
return m
pointers = nengo.Network(label='pointers')
with pointers:
for i in range(p.pointer_count):
nengo.Ensemble(p.N_pointer,
dimensions = p.input_count*2+1,
radius = np.sqrt(p.input_count*2+1),
label='%d' % i)
for i in range(p.pointer_count):
pointer = pointers.ensembles[i]
nengo.Connection(pointer_source, pointer,
transform=matrix(
p.input_count, p.input_count*2+1,
post=[k*2 for k in range(p.input_count)]))
nengo.Connection(pointer_target,pointer,
transform=matrix(p.pointer_count,
p.input_count*2+1,
pre=[i],
post=[p.input_count*2]))
nengo.Connection(area0, pointer,
transform=matrix(p.pointer_count,
p.input_count*2+1,
pre=[i],post=[1]))
nengo.Connection(area1, pointer,
transform=matrix(1,p.input_count*2+1,
pre=[0],post=[3]))
# define the connections to extract the current value
# from the pointers
def ref_func(x):
if x[-1]<0.5: return 0
sum=0
for i in range(p.input_count):
if x[2*i]>0.5: sum+=x[2*i+1]
return sum
basis=[]
for i in range(p.pointer_count):
b=[0]*p.pointer_count
b[i]=1
basis.append(b)
b=[0]*p.pointer_count
b[i]=-1
basis.append(b)
reference=nengo.Ensemble(p.N_reference,p.pointer_count,
radius=np.sqrt(p.pointer_count),
encoders=nengo.dists.Choice(basis),
intercepts=nengo.dists.Uniform(0.1,0.9),
|
ZeX2/TWTools
|
CustomDialogs.py
|
Python
|
gpl-3.0
| 3,155 | 0.005071 |
from PySide2 import QtGui, QtCore, QtWidgets
from design import SidUi, DdUi
from ServersData import ServersDownloadThread, servers
import sys
class SpeedInputDialog(QtWidgets.QDialog, SidUi):
def __init__(self):
QtWidgets.QDialog.__init__(self)
self.setupUi()
def get_data(self):
self.accept()
return self.world_speedBox.value(), self.unit_speedBox.value()
def showEvent(self, event):
geom = self.frameGeometry()
geom.moveCenter(QtGui.QCursor.pos())
self.setGeometry(geom)
class ServersDownloadDialog(QtWidgets.QDialog, DdUi):
def __init__(self, servers_json_path):
QtWidgets.QDialog.__init__(self)
self.downloaded = False
self.servers_amount = len(servers)
self.setupUi()
self.servers_json_path = servers_json_path
self.servers_download_function()
def servers_download_function(self):
self.get_servers_download_thread = ServersDownloadThread(self.servers_json_path)
self.connect(self.get_servers_download_thread, QtCore.SIGNAL("update_progress_text(PyObject)"), self.update_progress_text)
self.connect(self.get_servers_download_thread, QtCore.SIGNAL("update_progress_bar(PyObject)"), self.update_progress_bar)
self.connect(self.get_serve
|
rs_download_thread, QtCore.SIGNAL("update_button()"), self.update_button)
self.connect(self.get_servers_download_thread, QtCore.SIGNAL("download_error(PyObject)"), self.download_error)
self.get_servers_download_thread.start()
def update_progress_text(self, text):
self.progress_text.append(text)
def update_progress_b
|
ar(self, value):
self.progress_bar.setValue(value)
def update_button(self):
self.horizontalLayout.removeWidget(self.cancelButton)
self.cancelButton.deleteLater()
self.cancelButton = None
self.downloaded = True
self.okButton = QtWidgets.QPushButton("Ok")
self.horizontalLayout.addWidget(self.okButton)
self.okButton.clicked.connect(self.ok_function)
def cancel_function(self):
reply = QtWidgets.QMessageBox.question(self, 'Message',
"Are you sure that you want to cancel downloading? This will exit the program.", QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
sys.exit()
def closeEvent(self, event):
if self.downloaded:
return event.accept()
reply = QtWidgets.QMessageBox.question(self, 'Message',
"The server config with the worlds is downloading, would you like to exit the program anyway?", QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
event.accept()
sys.exit()
else:
event.ignore()
def ok_function(self):
self.close()
def download_error(self, error_text):
QtWidgets.QMessageBox.critical(self, "Download Error", error_text)
sys.exit()
|
jsilhan/rpg
|
rpg/plugins/lang/c.py
|
Python
|
gpl-2.0
| 2,122 | 0 |
from rpg.plugin import Plugin
from rpg.command import Command
from rpg.utils import path_to_str
from re import compile
from subprocess import CalledProcessError
import logging
class CPlugin(Plugin):
EXT_CPP = [r"cc", r"cxx", r"cpp", r"c\+\+", r"ii", r"ixx",
r"ipp", r"i\+\+", r"hh", r"hxx", r"hpp", r"h\+\+",
r"c", r"h"]
def patched(self, project_dir, spec, sack):
""" Finds dependencies via makedepend - This is not garanteed to be
all of them. Makedepend uses macro preprocessor and if it throws
and error makedepend didn't print deps. """
out = Command([
"find " + path_to_str(project_dir) + " -name " +
" -o -name ".join(
["'*." + ex + "'" for ex in self.EXT_CPP]
)
]).execute()
cc_makedep = ""
cc_included_files = []
for _f in out.splitlines():
try:
|
cc_makedep = Command("makedepend -w 1000 " + str(_f) +
" -f- 2>/dev/null").execute()
except CalledProcessError as e:
logging.warn(str(e.cmd) + "\n" + str(e.output))
continue
cc_included_files += [
s for s in cc_makedep.split()
if (s.startswith("/usr") or s.startswith("/include"))
|
and str(project_dir) not in s]
spec.required_files.update(cc_included_files)
spec.build_required_files.update(cc_included_files)
MOCK_C_ERR = compile(r"fatal error\: ([^:]*\.[^:]*)\: "
r"No such file or directory")
def mock_recover(self, log, spec):
""" This find dependencies makedepend didn't find. """
for err in log:
_missing = self.MOCK_C_ERR.search(err)
if _missing:
_missing = _missing.group(1)
logging.debug("Adding missing file " + _missing)
spec.required_files.update(["*" + _missing])
spec.build_required_files.update(["*" + _missing])
return True
return False
|
LRGH/amoco
|
amoco/arch/eBPF/formats.py
|
Python
|
gpl-2.0
| 1,781 | 0 |
# -*- coding: utf-8 -*-
from .env import *
from amoco.cas.expressions import regtype
from amoco.arch.core import Formatter, Token
def mnemo(i):
mn = i.mnemonic.lower()
return [(Token.Mnemonic, "{: <12}".format(mn))]
def deref(opd):
return "[%s+%d]" % (opd.a.base, opd.a.disp)
def opers(i):
s = []
for op in i.operands:
if op._is_mem:
s.append((Token.Memory, deref(op)))
elif op._is_cst:
if i.misc["imm_ref"] is not None:
s.append((Token.Address, "%s" % (i.misc["imm_ref"])))
elif op.sf:
s.append((Token.Constant, "%+d" % op.value))
else:
s.append((Token.Constant, op.__str__()))
elif op._is_reg:
s.append((Token.Register, op.__str__()))
s.append((Token.Literal, ", "))
if len(s) > 0:
s.pop()
return s
def opers_adr(i):
s = opers(i)
if i.address is None:
s[-1] = (Token.Address, ".%+d" % i.operands[-1])
else:
imm_ref = i.address + i.length + (i.operands[-1] * 8)
s[-1] = (Token.Address, "#%s"
|
% (imm_ref))
return s
def opers_adr2(i):
s = opers(i)
if i.address is None:
s[-3] = (Token.Address, ".%+d" % i.opera
|
nds[-2])
s[-1] = (Token.Address, ".%+d" % i.operands[-1])
else:
imm_ref1 = i.address + i.length * (i.operands[-2] + 1)
imm_ref2 = i.address + i.length * (i.operands[-1] + 1)
s[-3] = (Token.Address, "#%s" % (imm_ref1))
s[-1] = (Token.Address, "#%s" % (imm_ref2))
return s
format_default = (mnemo, opers)
eBPF_full_formats = {
"ebpf_jmp_": (mnemo, opers_adr),
"bpf_jmp_": (mnemo, opers_adr2),
}
eBPF_full = Formatter(eBPF_full_formats)
eBPF_full.default = format_default
|
ryokochang/Slab-GCS
|
bin/Release/Scripts/example6.py
|
Python
|
gpl-3.0
| 3,744 | 0.029129 |
# from http://diydrones.com/forum/topics/mission-planner-python-script?commentId=705844%3AComment%3A2035437&xg_source=msg_com_forum
import socket
import sys
import math
from math import sqrt
import clr
import time
import re, string
clr.AddReference("MissionPlanner.Utilities")
import MissionPlanner #import *
clr.AddReference("MissionPlanner.Utilities") #includes the Utilities class
from MissionPlanner.Utilities import Locationwp
HOST = 'localhost' # Symbolic name meaning all available interfaces
#SPORT = 5000 # Arbitrary no
|
n-privileged port
RPORT = 4000 # Arbitrary non-privileged port
REMOTE = ''
# Datagram (udp) socket
rsock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
print 'Sockets created'
# Bind socket to local host and port
try:
rsock.bind((HOST,RPORT))
except socket.error, msg:
#print 'Bind failed. Error Code:'
|
sys.stderr.write("[ERROR] %s\n" % msg[1])
rsock.close()
sys.exit()
print 'Receive Socket bind complete on ' + str(RPORT)
print 'Starting Follow'
Script.ChangeMode("Guided") # changes mode to "Guided"
print 'Guided Mode'
#keep talking with the Mission Planner server
while 1:
msg = rsock.recv(1024)
pattern = re.compile("[ ]")
parameters = pattern.split(msg)
latData = parameters[0]
lngData = parameters[1]
headingData = parameters[2]
altData = parameters[3]
float_lat = float(latData)
float_lng = float(lngData)
float_heading = float(headingData)
float_alt = float(altData)
"""Safety Manual Mode Switch"""
#while True:
if cs.mode == 'MANUAL':
Script.ChangeMode("Manual")
rsock.close()
else:
#print cs.mode
"""Follower Offset"""
XOffset= float(0) #User Input for x axis offset
YOffset= float(-2) #User Input for y axis offset
brng = math.radians(float_heading)
# brng = float_heading*math.pi/180 #User input heading angle of follower in relation to leader. 0 degrees is forward.
d = math.sqrt((XOffset**2)+(YOffset**2)) #Distance in m
MperLat = 69.172*1609.34 #meters per degree of latitude. Length of degree (miles) at equator * meters in a mile
MperLong = math.cos(float_lat)*69.172*1609.34 #meters per degree of longitude
Lat_Offset_meters = YOffset/MperLat #lat distance offset in meters
Long_Offset_meters = XOffset/MperLong #long distance offset in meters
Follower_lat = float_lat + (Long_Offset_meters*math.sin(brng)) + (Lat_Offset_meters*math.cos(brng)) #rotates lat follower offset in relation to heading of leader
Follower_long = float_lng - (Long_Offset_meters*math.cos(brng)) + (Lat_Offset_meters*math.sin(brng)) #rotates long follower offset in relation to heading of leader
Follower_alt = float_alt + 10
#Follower_alt = 10
float_lat = float(Follower_lat)
float_lng = float(Follower_long)
float_alt = float(Follower_alt) #4-5 second lag induced on altitude waypoint line, unless alt is set to 0
print(float_lat)
print(float_lng)
print(float_heading)
print(float_alt)
"""Writing Waypoints"""
item = MissionPlanner.Utilities.Locationwp() # creating waypoint
MissionPlanner.Utilities.Locationwp.lat.SetValue(item,float_lat)
MissionPlanner.Utilities.Locationwp.lng.SetValue(item,float_lng)
#MissionPlanner.Utilities.Locationwp.groundcourse.SetValue(item,float_heading)
MissionPlanner.Utilities.Locationwp.alt.SetValue(item,float_alt) #Can only use lat,lng, or alt
MAV.setGuidedModeWP(item) #set waypoint
print 'Waypoint Sent'
print time.strftime('%X %x %Z')
# exit
rsock.close()
print 'Script End'
|
itbabu/django-oscar
|
tests/functional/catalogue/review_tests.py
|
Python
|
bsd-3-clause
| 2,113 | 0 |
from oscar.test.testcases import WebTestCase
from oscar.test.factories import create_product, UserFactory
from oscar.core.compat import get_user_model
from oscar.apps.catalogue.reviews.signals import review_added
from oscar.test.contextmanagers import mock_signal_receiver
class TestACustomer(WebTe
|
stCase):
def setUp(self):
self.product = create_product()
def test_can_add_a_review_when_anonymous(self):
|
detail_page = self.app.get(self.product.get_absolute_url())
add_review_page = detail_page.click(linkid='write_review')
form = add_review_page.forms['add_review_form']
form['title'] = 'This is great!'
form['score'] = 5
form['body'] = 'Loving it, loving it, loving it'
form['name'] = 'John Doe'
form['email'] = 'john@example.com'
form.submit()
self.assertEqual(1, self.product.reviews.all().count())
def test_can_add_a_review_when_signed_in(self):
user = UserFactory()
detail_page = self.app.get(self.product.get_absolute_url(),
user=user)
add_review_page = detail_page.click(linkid="write_review")
form = add_review_page.forms['add_review_form']
form['title'] = 'This is great!'
form['score'] = 5
form['body'] = 'Loving it, loving it, loving it'
form.submit()
self.assertEqual(1, self.product.reviews.all().count())
def test_adding_a_review_sends_a_signal(self):
review_user = UserFactory()
detail_page = self.app.get(self.product.get_absolute_url(),
user=review_user)
with mock_signal_receiver(review_added) as receiver:
add_review_page = detail_page.click(linkid="write_review")
form = add_review_page.forms['add_review_form']
form['title'] = 'This is great!'
form['score'] = 5
form['body'] = 'Loving it, loving it, loving it'
form.submit()
self.assertEqual(receiver.call_count, 1)
self.assertEqual(1, self.product.reviews.all().count())
|
gustavovaliati/ci724-ppginfufpr-2016
|
exerc-3a/main.py
|
Python
|
gpl-3.0
| 930 | 0.022581 |
#!/usr/bin/python
import cv2
import numpy as np
import sys, getopt
import matplotlib
matplotlib.use('Agg') # Force matplotlib to not use any Xwindows backend.
from matplotlib import pyplot as plt
image_path = None
def printHelp():
print 'main.py\n' \
' -i <Image Path. Ex: /home/myImage.jpg > (Mandatory)\n' \
' \n Example: python main.py -i myOriginalImage.jpg \n '
try:
opts, args = getopt.getopt(sys.argv[1:],"hi:")
except getopt.GetoptError:
printHelp()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
printHelp()
sys.exit()
elif opt in
|
("-i"):
image_path = arg
if image_path == None:
p
|
rint "Input file missing"
printHelp()
sys.exit()
img = cv2.imread(image_path)
color = ('b','g','r')
for i,col in enumerate(color):
hist = cv2.calcHist([img],[i],None,[256],[0,256])
plt.plot(hist,color = col)
plt.xlim([0,256])
plt.savefig("hist.png")
|
vuntz/glance
|
glance/cmd/cache_manage.py
|
Python
|
apache-2.0
| 16,590 | 0.000241 |
#!/usr/bin/env python
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A simple cache management utility for Glance.
"""
from __future__ import print_function
import functools
import optparse
import os
import sys
import time
from oslo_utils import encodeutils
from oslo_utils import timeutils
from glance.common import utils
from six.moves import input
# If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir)
from glance.common import exception
import glance.image_cache.client
from glance.version import version_info as version
SUCCESS = 0
FAILURE = 1
def catch_error(action):
"""Decorator to provide sensible default error handling for actions."""
def wrap(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
ret = func(*args, **kwargs)
return SUCCESS if ret is None else ret
except exception.NotFound:
options = args[0]
print("Cache management middleware not enabled on host %s" %
options.host)
return FAILURE
except exception.Forbidden:
print("Not authorized to make this request.")
return FAILURE
except Exception as e:
options = args[0]
if options.debug:
raise
print("Failed to %s. Got error:" % action)
pieces = encodeutils.exception_to_unicode(e).split('\n')
for piece in pieces:
print(piece)
return FAILURE
return wrapper
return wrap
@catch_error('show cached images')
def list_cached(options, args):
"""%(prog)s list-cached [options]
List all images currently cached.
"""
client = get_client(options)
images = client.get_cached_images()
if not images:
print("No cached images.")
return SUCCESS
print("Found %d cached images..." % len(images))
pretty_table = utils.PrettyTable()
pretty_table.add_column(36, label="ID"
|
)
pretty_table.add_column(19, label="Last Accessed (UTC)")
pretty_table.add_column(19, label="Last Modified (UTC)")
# 1 TB takes 13 characters to display: len(str(2**40)) ==
|
13
pretty_table.add_column(14, label="Size", just="r")
pretty_table.add_column(10, label="Hits", just="r")
print(pretty_table.make_header())
for image in images:
last_modified = image['last_modified']
last_modified = timeutils.iso8601_from_timestamp(last_modified)
last_accessed = image['last_accessed']
if last_accessed == 0:
last_accessed = "N/A"
else:
last_accessed = timeutils.iso8601_from_timestamp(last_accessed)
print(pretty_table.make_row(
image['image_id'],
last_accessed,
last_modified,
image['size'],
image['hits']))
@catch_error('show queued images')
def list_queued(options, args):
"""%(prog)s list-queued [options]
List all images currently queued for caching.
"""
client = get_client(options)
images = client.get_queued_images()
if not images:
print("No queued images.")
return SUCCESS
print("Found %d queued images..." % len(images))
pretty_table = utils.PrettyTable()
pretty_table.add_column(36, label="ID")
print(pretty_table.make_header())
for image in images:
print(pretty_table.make_row(image))
@catch_error('queue the specified image for caching')
def queue_image(options, args):
"""%(prog)s queue-image <IMAGE_ID> [options]
Queues an image for caching
"""
if len(args) == 1:
image_id = args.pop()
else:
print("Please specify one and only ID of the image you wish to ")
print("queue from the cache as the first argument")
return FAILURE
if (not options.force and
not user_confirm("Queue image %(image_id)s for caching?" %
{'image_id': image_id}, default=False)):
return SUCCESS
client = get_client(options)
client.queue_image_for_caching(image_id)
if options.verbose:
print("Queued image %(image_id)s for caching" %
{'image_id': image_id})
return SUCCESS
@catch_error('delete the specified cached image')
def delete_cached_image(options, args):
"""
%(prog)s delete-cached-image <IMAGE_ID> [options]
Deletes an image from the cache
"""
if len(args) == 1:
image_id = args.pop()
else:
print("Please specify one and only ID of the image you wish to ")
print("delete from the cache as the first argument")
return FAILURE
if (not options.force and
not user_confirm("Delete cached image %(image_id)s?" %
{'image_id': image_id}, default=False)):
return SUCCESS
client = get_client(options)
client.delete_cached_image(image_id)
if options.verbose:
print("Deleted cached image %(image_id)s" % {'image_id': image_id})
return SUCCESS
@catch_error('Delete all cached images')
def delete_all_cached_images(options, args):
"""%(prog)s delete-all-cached-images [options]
Remove all images from the cache.
"""
if (not options.force and
not user_confirm("Delete all cached images?", default=False)):
return SUCCESS
client = get_client(options)
num_deleted = client.delete_all_cached_images()
if options.verbose:
print("Deleted %(num_deleted)s cached images" %
{'num_deleted': num_deleted})
return SUCCESS
@catch_error('delete the specified queued image')
def delete_queued_image(options, args):
"""
%(prog)s delete-queued-image <IMAGE_ID> [options]
Deletes an image from the cache
"""
if len(args) == 1:
image_id = args.pop()
else:
print("Please specify one and only ID of the image you wish to ")
print("delete from the cache as the first argument")
return FAILURE
if (not options.force and
not user_confirm("Delete queued image %(image_id)s?" %
{'image_id': image_id}, default=False)):
return SUCCESS
client = get_client(options)
client.delete_queued_image(image_id)
if options.verbose:
print("Deleted queued image %(image_id)s" % {'image_id': image_id})
return SUCCESS
@catch_error('Delete all queued images')
def delete_all_queued_images(options, args):
"""%(prog)s delete-all-queued-images [options]
Remove all images from the cache queue.
"""
if (not options.force and
not user_confirm("Delete all queued images?", default=False)):
return SUCCESS
client = get_client(options)
num_deleted = client.delete_all_queued_images()
if options.verbose:
print("Deleted %(num_deleted)s queued images" %
{'num_deleted': num_deleted})
return SUCCESS
def get_client(options):
"""Return a new client object to a Glance server.
specified by the --host and --port options
supplied to the CLI
"""
return glance.image_cache.client.get_client(
host=options.host,
port=options.po
|
mete0r/gpl
|
mete0r_gpl/__init__.py
|
Python
|
agpl-3.0
| 810 | 0 |
# -*- coding: utf
|
-8 -*-
#
# mete0r.gpl : Manage GPL'ed source code files
# Copyright (C) 2015 mete0r <mete0r@sarangbang.or.kr>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
#
|
This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
|
freeitaly/Trading-System
|
vn.trader/ctaAlgo/strategyTickBreaker.py
|
Python
|
mit
| 8,133 | 0.00262 |
# encoding: UTF-8
import talib as ta
import numpy as np
from ctaBase import *
from ctaTemplate import CtaTemplate
import time
########################################################################
class TickBreaker(CtaTemplate):
"""跳空追击策略(MC版本转化)"""
className = 'TickBreaker'
author = u'融拓科技'
# 策略参数
forward = 5 # 正向tick数量
backward = 2 # 反向tick数量
reForward = 1 # 再次转向tick数量
maPeriod = 5 # 均线参数
# 策略变量
tickHistory = [] # 缓存tick报价的数组
maxHistory = 7 # 最大缓存数量
forwardNo = EMPTY_INT # 正向tick数量
backwardNo = EMPTY_INT # 反向tick数量
reForwardNo = EMPTY_INT # 再次转向tick数量
# 参数列表,保存了参数的名称
paramList = ['name',
'className',
'author',
'vtSymbol',
'forward',
'backward',
'reForward'
]
# 变量列表,保存了变量的名称
varList = ['inited',
'trading',
'pos',
'forwardNo',
'backwardNo',
'reForwardNo'
]
# condition1 = False # >=5个上涨tick
# condition2 = False # 2个下跌tick
# condition3 = False # 1个上涨tick
# ----------------------------------------------------------------------
def __init__(self, ctaEngine, setting):
"""Constructor"""
super(TickBreaker, self).__init__(ctaEngine, setting)
# 注意策略类中的可变对象属性(通常是list和dict等),在策略初始化时需要重新创建,
# 否则
|
会出现多个策略实例之间数据共享的情况,有可能导致潜在的策略逻辑错误风险,
# 策略类中的这些可变对象属性可以选择不写,全都放在__init__下面,写主要是为了阅读
# 策略时方便(更多是
|
个编程习惯的选择)
# 策略变量
self.tickHistory = [] # 缓存tick报价的数组
self.maxHistory = 7 # 最大缓存数量
self.forwardNo = EMPTY_INT # 正向tick数量
self.backwardNo = EMPTY_INT # 反向tick数量
self.reForwardNo = EMPTY_INT # 再次转向tick数量
self.oldPrice = 0 # 上一个tick的lastPrice
# ----------------------------------------------------------------------
def onInit(self):
"""初始化策略(必须由用户继承实现)"""
self.writeCtaLog(u'tick策略初始化')
self.putEvent()
# ----------------------------------------------------------------------
def onStart(self):
"""启动策略(必须由用户继承实现)"""
self.writeCtaLog(u'tick策略启动')
self.putEvent()
# ----------------------------------------------------------------------
def onStop(self):
"""停止策略(必须由用户继承实现)"""
self.writeCtaLog(u'tick策略停止')
self.putEvent()
# ----------------------------------------------------------------------
def onTick(self, tick):
"""收到行情TICK推送(必须由用户继承实现)"""
# 把最新的收盘价缓存到列表中
start = time.time()
if tick.lastPrice != self.oldPrice:
self.tickHistory.append(tick.lastPrice)
self.oldPrice = tick.lastPrice
else:
return
# 检查列表长度,如果超过缓存上限则移除最老的数据
# 这样是为了减少计算用的数据量,提高速度
if len(self.tickHistory) > self.maxHistory:
self.tickHistory.pop(0)
# 如果小于缓存上限,则说明初始化数据尚未足够,不进行后续计算
else:
return
# # 将缓存的收盘价数转化为numpy数组后,传入talib的函数SMA中计算
# closeArray = np.array(self.closeHistory)
# sma = ta.SMA(closeArray, self.maPeriod)
# # >=5个上涨tick
# condition1 = self.tickHistory[0] < self.tickHistory[1] < self.tickHistory[2] < self.tickHistory[3] < self.tickHistory[4]
# # 2个下跌tick
# condition2 = self.tickHistory[4] > self.tickHistory[5] > self.tickHistory[6]
# # 1个上涨tick
# condition3 = self.tickHistory[6] < self.tickHistory[7]
# print self.tickHistory
# print 'buy: ', int(condition1), ' ', int(condition2), ' ', int(condition3)
# buyCondition = condition1 and condition2 and condition3
#
# # >=5个下跌tick
# condition1 = self.tickHistory[0] > self.tickHistory[1] > self.tickHistory[2] > self.tickHistory[3] > self.tickHistory[4]
# # 2个上涨tick
# condition2 = self.tickHistory[4] < self.tickHistory[5] < self.tickHistory[6]
# # 1个下跌tick
# condition3 = self.tickHistory[6] > self.tickHistory[7]
# print 'sell: ', int(condition1), ' ', int(condition2), ' ', int(condition3)
#
# sellCondition = condition1 and condition2 and condition3
# >=5个上涨tick
condition1 = self.tickHistory[0] < self.tickHistory[1] < self.tickHistory[2] < self.tickHistory[3]
# 2个下跌tick
condition2 = self.tickHistory[3] > self.tickHistory[4] > self.tickHistory[5]
# 1个上涨tick
condition3 = self.tickHistory[5] < self.tickHistory[6]
# print self.tickHistory
# print 'buy: ', int(condition1), ' ', int(condition2), ' ', int(condition3)
buyCondition = condition1 and condition2 and condition3
# >=5个下跌tick
condition1 = self.tickHistory[0] > self.tickHistory[1] > self.tickHistory[2] > self.tickHistory[3]
# 2个上涨tick
condition2 = self.tickHistory[3] < self.tickHistory[4] < self.tickHistory[5]
# 1个下跌tick
condition3 = self.tickHistory[5] > self.tickHistory[6]
# print 'sell: ', int(condition1), ' ', int(condition2), ' ', int(condition3)
sellShortCondition = condition1 and condition2 and condition3
# 金叉和死叉的条件是互斥
if buyCondition:
# 如果金叉时手头没有持仓,则直接做多
if self.pos == 0:
self.buy(tick.lastPrice, 1)
# 如果有空头持仓,则先平空,再做多
elif self.pos < 0:
self.cover(tick.lastPrice, 1)
self.buy(tick.lastPrice, 1)
# 死叉和金叉相反
elif sellShortCondition:
if self.pos == 0:
self.short(tick.lastPrice, 1)
elif self.pos > 0:
self.sell(tick.lastPrice, 1)
self.short(tick.lastPrice, 1)
sellCondition = self.tickHistory[4] > self.tickHistory[5] > self.tickHistory[6]
buyCoverCondition = self.tickHistory[4] < self.tickHistory[5] < self.tickHistory[6]
# if self.pos > 0 and sellCondition:
# self.sell(tick.lastPrice, 1)
#
# if self.pos < 0 and buyCoverCondition:
# self.cover(tick.lastPrice, 1)
# print time.time() - start
# 发出状态更新事件
self.putEvent()
# ----------------------------------------------------------------------
def onBar(self, bar):
"""收到Bar推送(必须由用户继承实现)"""
pass
# ----------------------------------------------------------------------
def onOrder(self, order):
"""收到委托变化推送(必须由用户继承实现)"""
# 对于无需做细粒度委托控制的策略,可以忽略onOrder
pass
# ----------------------------------------------------------------------
def onTrade(self, trade):
"""收到成交推送(必须由用户继承实现)"""
# 对于无需做细粒度委托控制的策略,可以忽略onOrder
pass
|
nshafer/django-hashid-field
|
sandbox/sandbox/urls.py
|
Python
|
mit
| 1,076 | 0 |
"""sandbox URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topic
|
s/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL
|
to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework import routers
from library import views as library_views
router = routers.DefaultRouter()
router.register('authors', library_views.AuthorViewSet)
router.register('books', library_views.BookViewSet)
urlpatterns = [
url(r'^', include('library.urls')),
url(r'^api/', include(router.urls)),
url(r'^admin/', admin.site.urls),
]
|
adfinis-sygroup/timed-backend
|
timed/subscription/serializers.py
|
Python
|
agpl-3.0
| 2,642 | 0.000379 |
from datetime import timedelta
from django.db.models import Sum
from django.utils.duration import duration_string
from rest_framework_json_api.serializers import (
CharField,
ModelSerializer,
SerializerMethodField,
)
from timed.projects.models import Project
from timed.tracking.models import Report
from .models import Order, Package
class SubscriptionProjectSerializer(ModelSerializer):
purchased_time = SerializerMethodField(source="get_purchased_time")
spent_time = SerializerMethodField(source="get_spent_time")
def get_purchased_time(self, obj):
"""
Calculate purchased time for given project.
Only acknowledged hours are included.
"""
orders = Order.objects.filter(project=obj, acknowledged=True)
data = orders.aggregate(purchased_time=Sum("duration"))
return duration_string(data["purchased_time"] or timedelta(0))
def get_spent_time(self, obj):
"""
Calculate spent time for given project.
Reports which are not billable or are in review are excluded.
"""
reports = Report.objects.filter(
task__project=obj, not_billable=False, review=False
)
data = reports.aggregate(spent_time=Sum("duration"))
|
return duration_string(data["spent_time"] or timedelta())
included_serializers = {
"billing_type": "timed.projects.serializers.BillingTypeSerializer",
"cost_center": "timed.projects.serializers.CostCenterSerializer",
"customer": "timed.projects.serializers.CustomerSerializer",
"orders": "timed.subscription.serializers.OrderSerializer",
}
class Meta:
model = Project
resource_name = "subscription-projects"
fields = (
"name",
"billing_type",
"cost_center",
"purchased_time",
"spent_time",
"customer",
"orders",
)
class PackageSerializer(ModelSerializer):
price = CharField()
"""CharField needed as it includes currency."""
included_serializers = {
"billing_type": "timed.projects.serializers.BillingTypeSerializer"
}
class Meta:
model = Package
resource_name = "subscription-packages"
fields = ("duration", "price", "billing_type")
class OrderSerializer(ModelSerializer):
included_serializers = {
"project": ("timed.subscription.serializers" ".SubscriptionProjectSerializer")
}
class Meta:
model = Order
resource_name = "subscription-orders"
fields = ("duration", "acknowledged", "ordered", "project")
|
|
tlakshman26/cinder-https-changes
|
cinder/tests/unit/test_ibm_xiv_ds8k.py
|
Python
|
apache-2.0
| 30,480 | 0 |
# Copyright 2013 IBM Corp.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Authors:
# Erik Zaadi <erikz@il.ibm.com>
# Avishay Traeger <avishay@il.ibm.com>
import copy
from mox3 import mox
from oslo_config import cfg
from cinder import context
from cinder import exception
from cinder.i18n import _
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.ibm import xiv_ds8k
from cinder.volume import volume_types
FAKE = "fake"
CANNOT_DELETE = "Can not delete"
TOO_BIG_VOLUME_SIZE = 12000
POOL_SIZE = 100
CONSISTGROUP_ID = 1
VOLUME = {'size': 16,
'name': FAKE,
'id': 1,
'consistencygroup_id': CONSISTGROUP_ID,
'status': 'available'}
MANAGED_FAKE = "managed_fake"
MANAGED_VOLUME = {'size': 16,
'name': MANAGED_FAKE,
'id': 2}
REPLICA_FAKE = "repicated_fake"
REPLICATED_VOLUME = {'size': 64,
'name': REPLICA_FAKE,
'id': 2}
CONTEXT = {}
CONSISTGROUP = {'id': CONSISTGROUP_ID, }
CG_SNAPSHOT_ID = 1
CG_SNAPSHOT = {'id': CG_SNAPSHOT_ID,
'consistencygroup_id': CONSISTGROUP_ID}
CONNECTOR = {'initiator': "iqn.2012-07.org.fake:01:948f189c4695", }
CONF = cfg.CONF
class XIVDS8KFakeProxyDriver(object):
"""Fake IBM XIV and DS8K Proxy Driver."""
def __init__(self, xiv_ds8k_info, logger, expt, driver=None):
"""Initialize Proxy."""
self.xiv_ds8k_info = xiv_ds8k_info
self.logger = logger
self.exception = expt
self.xiv_ds8k_portal = \
self.xiv_ds8k_iqn = FAKE
self.volumes = {}
self.snapshots = {}
self.driver = driver
def setup(self, context):
if self.xiv_ds8k_info['xiv_ds8k_user'] != self.driver\
.configuration.san_login:
raise self.exception.NotAuthorized()
if self.xiv_ds8k_info['xiv_ds8k_address'] != self.driver\
.configuration.san_ip:
raise self.exception.HostNotFound(host='fake')
def create_volume(self, volume):
if volume['size'] > POOL_SIZE:
raise self.exception.VolumeBackendAPIException(data='blah')
self.volumes[volume['name']] = volume
def volume_exists(self, volume):
return self.volumes.get(volume['name'], None) is not None
def delete_volume(self, volume):
if self.volumes.get(volume['name'], None) is not None:
del self.volumes[volume['name']]
def manage_volume_get_size(self, volume, existing_ref):
if self.volumes.get(existing_ref['source-name'], None) is None:
raise self.exception.VolumeNotFound(volume_id=volume['id'])
return self.volumes[existing_ref['source-name']]['size']
def manage_volume(self, volume, existing_ref):
if self.volumes.get(existing_ref['source-name'], None) is None:
raise self.exception.VolumeNotFound(volume_id=volume['id'])
volume['size'] = MANAGED_VOLUME['size']
return {}
def unmanage_volume(self, volume):
pass
def initialize_connection(self, volume, connector):
if not self.volume_exists(volume):
|
raise self.exception.VolumeNotFound(volume_id=volume['id'])
lun_id = volume['id']
self.volumes[volume['name']]['attached'] = connector
return {'driver_volume_type': 'iscsi',
'data': {'target_discovered': True,
'target_discovered': True,
'target_portal': self.xiv_ds8k_portal,
'target_iqn': self.xiv_ds8k_iqn,
|
'target_lun': lun_id,
'volume_id': volume['id'],
'multipath': True,
'provider_location': "%s,1 %s %s" % (
self.xiv_ds8k_portal,
self.xiv_ds8k_iqn,
lun_id), },
}
def terminate_connection(self, volume, connector):
if not self.volume_exists(volume):
raise self.exception.VolumeNotFound(volume_id=volume['id'])
if not self.is_volume_attached(volume, connector):
raise self.exception.NotFound(_('Volume not found for '
'instance %(instance_id)s.')
% {'instance_id': 'fake'})
del self.volumes[volume['name']]['attached']
def is_volume_attached(self, volume, connector):
if not self.volume_exists(volume):
raise self.exception.VolumeNotFound(volume_id=volume['id'])
return (self.volumes[volume['name']].get('attached', None)
== connector)
def reenable_replication(self, context, volume):
model_update = {}
if volume['replication_status'] == 'inactive':
model_update['replication_status'] = 'active'
elif volume['replication_status'] == 'invalid_status_val':
raise exception.CinderException()
model_update['replication_extended_status'] = 'some_status'
model_update['replication_driver_data'] = 'some_data'
return model_update
def get_replication_status(self, context, volume):
if volume['replication_status'] == 'invalid_status_val':
raise exception.CinderException()
return {'replication_status': 'active'}
def promote_replica(self, context, volume):
if volume['replication_status'] == 'invalid_status_val':
raise exception.CinderException()
return {'replication_status': 'inactive'}
def create_replica_test_volume(self, volume, src_vref):
if volume['size'] != src_vref['size']:
raise exception.InvalidVolume(
reason="Target and source volumes have different size.")
return
def retype(self, ctxt, volume, new_type, diff, host):
volume['easytier'] = new_type['extra_specs']['easytier']
return True, volume
def create_consistencygroup(self, ctxt, group):
volumes = [volume for k, volume in self.volumes.items()
if volume['consistencygroup_id'] == group['id']]
if volumes:
raise exception.CinderException(
message='The consistency group id of volume may be wrong.')
return {'status': 'available'}
def delete_consistencygroup(self, ctxt, group):
volumes = []
for volume in self.volumes.values():
if (group.get('id', None)
== volume.get('consistencygroup_id', None)):
if volume['name'] == CANNOT_DELETE:
raise exception.VolumeBackendAPIException(
message='Volume can not be deleted')
else:
volume['status'] = 'deleted'
volumes.append(volume)
# Delete snapshots in consistency group
self.snapshots = {k: snap for k, snap in self.snapshots.items()
if not(snap.get('consistencygroup_id', None)
== group.get('id', None))}
# Delete volume in consistency group
self.volumes = {k: vol for k, vol in self.volumes.items()
if not(vol.get('consistencygroup_id', None)
== group.get('id', None))}
return {'status': 'deleted'}, volumes
def create_cgsnapshot(self, ctxt, cgsnapshot):
snapshots = []
for volume in self.volumes.values():
if (cgsnapshot.get('consiste
|
codervikash/algorithms
|
Python/Graphs/breath_first_traversal.py
|
Python
|
mit
| 2,188 | 0.002285 |
"""
Pseudo code
Breadth-First-Search(Graph, root):
create empty set S
create empty queue Q
root.parent = NIL
Q.enqueue(root)
while Q is not empty:
current = Q.dequeue()
if current is the goal:
return current
for each node n that is adjacent to current:
if n is not in S:
add n to S
n.parent = current
Q.enqueue(n)
Implementation
"""
from collections import deque
from directional_graph impor
|
t Graph
def BFS(Graph, s):
|
graph = Graph.graph()
if s not in graph:
raise Exception("Edge %s not in graph" % s)
q = deque([s])
visited = set([s])
while len(q) != 0:
node = q.pop()
for each in graph[node]:
print visited
if each not in visited:
visited.add(each)
q.append(each)
return visited
if __name__ == "__main__":
g = {
"a": {"d": 4},
"b": {"c": 2},
"c": {"b": 2, "c": 5, "d": 1, "e": 7},
"d": {"a": 4, "c": 1},
"e": {"c": 7}
}
graph = Graph(g)
print("Vertices of graph:")
print(graph.list_vertices())
print("\nEdges of graph:")
print(graph.list_edges())
print("\nAdding a vertice")
graph.add_vertex("g")
print (graph.list_vertices())
graph.add_edge(("g", "a"))
graph.add_edge(("a", "c"))
graph.add_edge(("g", "c"))
print("\nEdges of graph:")
print(graph.list_edges())
print (graph.list_vertices())
print(graph.graph())
print(graph.has_edge(("a", "c")))
print(graph.graph())
print("\nDeleting edge (a, d):")
graph.delete_edge(("a", "d"))
print(graph.list_edges())
print (graph.list_vertices())
print(graph.graph())
# print("\nDeleting vertex a:")
# graph.delete_vertex("a")
print (graph.list_vertices())
print(graph.list_edges())
print(graph.graph())
print("\nPath between b to e:")
print(graph.find_path("b", "e"))
print("\nSetting edge weight for (c, e):")
graph.set_edge_weight(("c", "e"), 2)
print(graph.graph())
print '\n'
print (BFS(graph, 'e'))
|
duke605/RunePy
|
commands/choices.py
|
Python
|
mit
| 957 | 0.003135 |
from util.arguments import Arguments
from discord.ext import commands
from shlex import split
import random
class Choices:
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=['choose'], description='Randomly picks a 1 of the given choices.')
async def choices(self, *, msg):
parser = Arguments(allow_abbrev=False, prog='choices')
parser.add
|
_argument('choices', nargs='+', help='The choices to randomly pick from.')
try:
args = parser.parse_args(split(msg))
except SystemExit:
await self.bot.say('```%s```' % parser.format_help())
return
except Exception as e:
await self.bot.say('```%s```' % str(e))
return
choice =
|
args.choices[random.SystemRandom().randint(0, len(args.choices) - 1)]
await self.bot.say('**%s** has randomly been selected.' % choice)
def setup(bot):
bot.add_cog(Choices(bot))
|
droberin/blackhouse
|
blackhouse/__init__.py
|
Python
|
mit
| 2,518 | 0.000397 |
import logging
import socket
from . import arcade
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
class Switch:
remote_ip = None
remote_port = 9999
state = None
commands = {'info': '{"system":{"get_sysinfo":{}}}',
'on': u'{"system":{"set_relay_state":{"state":1}}}',
'off': '{"system":{"set_relay_state":{"state":0}}}',
'cloudinfo': '{"cnCloud":{"get_info":{}}}',
'wlanscan': '{"netif":{"get_scaninfo":{"refresh":0}}}',
'time':
|
'{"time":{"get_time":{}}}',
'schedule': '{"schedule":{"get_rules":{}}}',
'countdown': '{"count_down":{"get_rules":{}}}',
'antitheft': '{"anti_theft":{"get_rules":{}}}',
'reboot': '{"system":{"reboot":{"delay":1}}}',
'reset': '{"system":{"reset":{"delay":1}}}'
}
def __init__(self, server, port=80):
self.remote_ip = server
self.remote_port =
|
int(port)
def activate(self):
self.switch_requester(self.commands.get('on'))
def deactivate(self):
self.switch_requester(self.commands.get('off'))
def info(self):
self.switch_requester(self.commands.get('info'))
def switch_requester(self, content=None):
if content is None:
print("Fail")
return False
else:
try:
sock_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock_tcp.connect((self.remote_ip, self.remote_port))
print("Sending: ", content)
# sock_tcp.send(bytes(self.encrypt(content), 'utf8'))
sock_tcp.send(self.encrypt(content).encode('utf8'))
data = sock_tcp.recv(2048)
sock_tcp.close()
print("Sent: ", content)
print("Received: ", str(self.decrypt(data[4:])))
except socket.error:
return False
return False
def encrypt(self, string):
key = 171
result = "\0\0\0\0"
for i in string:
a = key ^ ord(i)
key = a
result += chr(a)
return result
def decrypt(self, string):
key = 171
result = ""
string = string.decode('utf8')
for i in string:
i = str(i)
a = key ^ ord(i)
key = ord(i)
result += chr(a)
return result
|
plinecom/pydpx_meta
|
pydpx_meta/low_header_big_endian.py
|
Python
|
mit
| 4,246 | 0 |
import ctypes
class _DpxGenericHeaderBigEndian(ctypes.BigEndianStructure):
_fields_ = [
('Magic', ctypes.c_char * 4),
('ImageOffset', ctypes.c_uint32),
('Version', ctypes.c_char * 8),
('FileSize', ctypes.c_uint32),
('DittoKey', ctypes.c_uint32),
('GenericSize', ctypes.c_uint32),
('IndustrySize', ctypes.c_uint32),
('UserSize', ctypes.c_uint32),
('FileName', ctypes.c_char * 100),
('TimeDate', ctypes.c_char * 24),
('Creator', ctypes.c_char * 100),
('Project', ctypes.c_char * 200),
('Copyright', ctypes.c_char * 200),
('EncryptKey', ctypes.c_uint32),
('Reserved', ctypes.c_char * 104)
]
class _DpxGenericImageElementBigEndian(ctypes.BigEndianStructure):
_fields_ = [
('DataSign', ctypes.c_uint32),
('LowData', ctypes.c_int32),
('LowQuantity', ctypes.c_float),
('HighData', ctypes.c_int32),
('HighQuantity', ctypes.c_float),
('Descriptor', ctypes.c_byte),
('Transfer', ctypes.c_byte),
('Colorimetric', ctypes.c_byte),
('BitSize', ctypes.c_byte),
('Packing', ctypes.c_uint16),
('Encoding', ctypes.c_uint16),
('DataOffset', ctypes.c_uint32),
('EndOfLinePadding', ctypes.c_uint32),
('EndOfImagePadding', ctypes.c_uint32),
('Description', ctypes.c_char * 32)
]
class _DpxGenericImageHeaderBigEndian(ctypes.BigEndianStructure):
_fields_ = [
('Orientation', ctypes.c_uint16),
('NumberElements', ctypes.c_uint16),
('PixelsPerLine', ctypes.c_uint32),
('LinesPerElement', ctypes.c_uint32),
('ImageElement', _DpxGenericImageElementBigEndian * 8),
('Reserved', ctypes.c_char * 52)
]
class _DpxGenericOrientationHeaderBigEndian(ctypes.BigEndianStructure):
_fields_ = [
('XOffset', ctypes.c_uint32),
('YOffset', ctypes.c_uint32),
('XCenter', ctypes.c_float),
('YCenter', ctypes.c_float),
('XOriginalSize', ctypes.c_uint32),
('YOriginalSize', ctypes.c_uint32),
('FileName', ctypes.c_char * 100),
('TimeDate', ctypes.c_char * 24),
('InputName', ctypes.c_char * 32),
('InputSN', ctypes.c_char * 32),
('Border', ctypes.c_uint16 * 4),
('AspectRatio', ctypes.c_uint32 * 2),
('Reserved', ctypes.c_byte * 28)
]
class _DpxIndustryFilmInfoHeaderBigEndian(ctypes.BigEndianStructure):
_fields_ = [
('FilmMfgId', ctypes.c_char * 2),
('FilmType', ctypes.c_char * 2),
('Offset', ctypes.c_char * 2),
('Prefix', ctypes.c_char * 6),
('Count', ctypes.c_char * 4),
('Format', ctypes.c_char * 32),
('FramePosition', ctypes.c_uint32),
('SequenceLen', ctypes.c_int32),
('HeldCount', ctypes.c_int32),
('FrameRate', ctypes.c_float),
('ShutterAngle', ctypes.c_float),
('FrameId', ctypes.c_char * 32),
('SlateInfo', ctypes.c_char * 100),
('Reserved', ctypes.c_byte * 56)
]
class _DpxIndustryTelevisionInfoHeaderBigEndian(ctypes.BigEndianStructure):
_fields_ = [
('TimeCode', ctypes.c_uint32),
('UserBits', ctypes.c_uint
|
32),
('Interlace', ctypes.c_byte),
('FieldNumber', ctypes.c_byte),
('VideoSignal', ctypes.c_byte),
('Padding', ctypes.c_byte),
('HorzSampleRate', ctypes.c_float),
('VertSampleRate', ctypes.c_float),
('FrameRate', ctypes.c_float),
|
('TimeOffset', ctypes.c_float),
('Gamma', ctypes.c_float),
('BlackLevel', ctypes.c_float),
('BlackGain', ctypes.c_float),
('Breakpoint', ctypes.c_float),
('WhiteLevel', ctypes.c_float),
('IntegrationTimes', ctypes.c_float),
('Reserved', ctypes.c_byte * 76)
]
class DpxHeaderBigEndian(ctypes.BigEndianStructure):
_fields_ = [
('FileHeader', _DpxGenericHeaderBigEndian),
('ImageHeader', _DpxGenericImageHeaderBigEndian),
('OrientHeader', _DpxGenericOrientationHeaderBigEndian),
('FilmHeader', _DpxIndustryFilmInfoHeaderBigEndian),
('TvHeader', _DpxIndustryTelevisionInfoHeaderBigEndian)
]
|
tolomea/gatesym
|
gatesym/tests/blocks/test_latches.py
|
Python
|
mit
| 2,835 | 0 |
import random
from gatesym import core, gates, test_utils
from gatesym.blocks import latches
def test_gated_d_latch():
network = core.Network()
clock = gates.Switch(network)
data = gates.Switch(network)
latch = latches.gated_d_latch(data, clock)
network.drain()
assert not latch.read()
data.write(True)
network.drain()
assert not latch.read()
clock.write(True)
network.drain()
assert latch.read()
data.write(False)
network.drain()
assert not latch.read()
def test_ms_d_flop_basic():
network = core.Network()
clock = gates.Switch(network)
data = gates.Switch(network)
flop = latches.ms_d_flop(data, clock)
network.drain()
assert not flop.read()
# clock a 1 through
data.write(True)
network.drain()
assert not flop.read()
clock.write(True)
network.drain()
assert not flop.read()
clock.write(False)
network.drain()
assert flop.read()
# and back to 0
data.write(False)
network.drain()
assert flop.read()
clock.w
|
rite(True)
network.drain()
assert flop.read()
clock.write(False)
network.drain()
assert not flop.read()
def test_ms_d_flop_timing():
network = core.Network()
clock = gates.Switch(network)
data = gates.Switch(network)
flop = latches.ms_d_flop
|
(data, clock)
network.drain()
assert not flop.read()
# clock a 1 through
data.write(True)
network.drain()
assert not flop.read() # data has no impact
clock.write(True)
network.drain()
assert not flop.read() # clock high data in
clock.write(False)
data.write(False)
network.drain()
assert flop.read() # clock low stored data out
# and back to 0
data.write(False)
network.drain()
assert flop.read() # data has no impact
clock.write(True)
network.drain()
assert flop.read() # clock high data in
clock.write(False)
data.write(True)
network.drain()
assert not flop.read() # clock low stored data out
def test_register():
network = core.Network()
clock = gates.Switch(network)
data = test_utils.BinaryIn(network, 8)
register = latches.register(data, clock)
res = test_utils.BinaryOut(register)
network.drain()
assert res.read() == 0
# clock a value through
v1 = random.randrange(256)
data.write(v1)
network.drain()
assert res.read() == 0
clock.write(True)
network.drain()
assert res.read() == 0
clock.write(False)
network.drain()
assert res.read() == v1
# and a different value
v2 = random.randrange(256)
data.write(v2)
network.drain()
assert res.read() == v1
clock.write(True)
network.drain()
assert res.read() == v1
clock.write(False)
network.drain()
assert res.read() == v2
|
google/vulncode-db
|
data/forms/__init__.py
|
Python
|
apache-2.0
| 4,727 | 0.001269 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask_wtf import FlaskForm # type: ignore
from wtforms import ( # type: ignore
StringField,
TextAreaField,
SubmitField,
FieldList,
FormField,
IntegerField,
HiddenField,
BooleanField,
)
from wtforms import validators
from data.models import VulnerabilityGitCommits, VulnerabilityResources
from data.models.base import db
class BaseForm(FlaskForm):
@property
def non_hidden_fields(self):
for field in self:
if isinstance(field, HiddenField):
continue
yield field
class ModelFieldList(FieldList):
def __init__(self, *args, **kwargs):
self.model = kwargs.pop("model", None)
super().__init__(*args, **kwargs)
if not self.model:
raise ValueError("ModelFieldList requires model to be set
|
")
def populate_obj(self, obj, name):
if not hasattr(obj, name):
setattr(obj, name, [])
while len(
|
getattr(obj, name)) < len(self.entries):
new_model = self.model()
db.session.add(new_model)
getattr(obj, name).append(new_model)
while len(getattr(obj, name)) > len(self.entries):
db.session.delete(getattr(obj, name).pop())
super().populate_obj(obj, name)
class CommitLinksForm(FlaskForm):
repo_url = StringField(
"Git Repo URL", validators=[validators.Optional(), validators.URL()]
)
commit_hash = StringField("Commit Hash", validators=[])
# Commit data is optional -> otherwise use: validators.DataRequired(),
commit_link = StringField(
"Main commit link", validators=[validators.Optional(), validators.URL()]
)
repo_name = StringField("Repository Name", validators=[])
class Meta:
csrf = False
class VulnerabilityResourcesForm(FlaskForm):
link = StringField("Link", validators=[validators.DataRequired(), validators.URL()])
class Meta:
csrf = False
class VulnerabilityDetailsForm(FlaskForm):
commits = ModelFieldList(
FormField(CommitLinksForm),
model=VulnerabilityGitCommits,
min_entries=1,
default=[VulnerabilityGitCommits],
)
# Changing the CVE ID is disabled for now.
# The filters argument is used to have Null fields instead of empty strings.
# This is important since the cve_id is supposed to be unique OR Null.
# cve_id = StringField(
# "CVE-ID",
# filters=[lambda x: x and str(x).upper().strip(), lambda x: x or None],
# validators=[
# validators.Optional(),
# validators.Regexp(r"^CVE-\d{4}-\d+$")
# ],
# )
comment = TextAreaField(
"High-Level Bug Overview", validators=[validators.DataRequired()]
)
resources = ModelFieldList(
FormField(VulnerabilityResourcesForm), model=VulnerabilityResources
)
submit = SubmitField("Propose change")
class VulnerabilityProposalReject(FlaskForm):
review_feedback = TextAreaField(
"Feedback what should be changed", validators=[validators.DataRequired()]
)
submit_reject = SubmitField("Ask for improvements")
class VulnerabilityProposalApprove(FlaskForm):
submit_approve = SubmitField("Approve proposal")
class VulnerabilityProposalAssign(FlaskForm):
submit_assign = SubmitField("Take review")
class VulnerabilityProposalUnassign(FlaskForm):
submit_unassign = SubmitField("Unassign from this review")
class VulnerabilityProposalPublish(FlaskForm):
submit_publish = SubmitField("Publish entry")
class VulnerabilityDeleteForm(FlaskForm):
delete_entry = IntegerField("Delete entry", [validators.DataRequired()])
submit = SubmitField()
class UserProfileForm(BaseForm):
full_name = StringField(
"Name",
description=(
'<small class="form-text text-muted">'
"What should be shown next to your contributions.</small>"
),
)
hide_name = BooleanField("Hide Name")
profile_picture = StringField(
"Profile Picture URL", validators=[validators.Optional(), validators.URL()]
)
hide_picture = BooleanField("Hide Profile Picture")
|
TNT-Samuel/Coding-Projects
|
File Sending/V1.0/output/receivefile.py
|
Python
|
gpl-3.0
| 5,213 | 0.005179 |
import socket,sys,os,hashlib,codecs,time # Import socket module
#filecodec = 'cp037'
filecodec = None
buffersize = 1024
failed = False
def filehash(filepath):
openedFile = codecs.open(filepath,'rb',filecodec)
# readFile = openedFile.read().encode()
readFile = openedFile.read()
openedFile.close()
sha1Hash = hashlib.sha1(readFile)
sha1Hashed = sha1Hash.hexdigest()
return sha1Hashed
def namehash(strtohash):
sha1Hash = hashlib.sha1(strtohash.encode())
sha1Hashed = sha1Hash.hexdigest()
return sha1Hashed
c = socket.socket() # Create a socket object
host = socket.gethostname() # Get local machine name
port = 12345 # Reserve a port for your service.
connected = False
while not connected:
try:
c.connect((host, port))
connected = True
except Exception as ex:
print("An error occured when connecting: " + str(ex))
time.sleep(5)
try:
print('Connected to ', host)
gotfname = False
tries = 0
while not gotfname:
fnamehash = c.recv(buffersize).decode()
c.send("Next".encode())
fname = c.recv(buffersize).decode()
tmphash = namehash(fname)
tries = tries + 1
if tmphash == fnamehash:
c.send("Y".encode())
gotfname = True
print("Filename Valid")
elif tries >= 5:
print("Filename Invalid")
c.send("N".encode())
print("An error occured when receiving the filename")
c.close()
else:
print("Filename Invalid")
print("Attempting to get the filename again ...")
print()
c.send("N".encode())
umoded = c.recv(buffersize).decode()
if umoded == "y":
umode = True
else:
|
umode = False
c.send("Start".encode())
exist = False
gothash = False
while not gothash:
|
cfhash = c.recv(buffersize).decode()
c.send(cfhash.encode())
returndata = c.recv(buffersize).decode()
if returndata == "y":
gothash = True
try:
if cfhash == filehash(fname):
exist = True
except:
pass
if not exist:
c.send("n".encode())
print("File not found or out of date, downloading new version...")
gotfile = False
tries = 0
while not (gotfile or failed):
try:
try:
os.remove(fname + ".tmp")
except:
pass
flen = int(c.recv(buffersize).decode())
c.send("Continue".encode())
fhash = c.recv(buffersize).decode()
f = codecs.open(fname + ".tmp",'wb',filecodec)
c.send("Ready.".encode())
print("Receiving file: " + fname)
print("File Length: " + str(flen) + " Chunk(s)")
flenc = 0
print()
while flenc < flen:
sys.stdout.write("\rReceiving Chunk " + str(flenc + 1) + "...")
# l = c.recv(buffersize).decode(filecodec)
l = c.recv(buffersize)
if (l):
f.write(l)
flenc = flenc + 1
f.close()
print("Done Receiving")
ofhash = filehash(fname + ".tmp")
tries = tries + 1
if ofhash == fhash:
print("File Valid")
c.send("Y".encode())
gotfile = True
elif tries >= 5:
print("File Invalid")
c.send("N".encode())
print("An error occured when receiving the file")
failed = True
c.close()
else:
print("File Invalid")
print("Attempting to restart the download...")
print()
c.send("N".encode())
except Exception as ex:
try:
f.close()
except:
pass
try:
c.send("N".encode())
except:
pass
print("An error occured when receiving the file: " + str(ex))
if not failed:
print("Saving File...")
if umode:
try:
os.remove(__file__)
except:
pass
try:
os.remove(fname)
except:
pass
os.rename(fname + ".tmp", fname)
print("Done Saving")
else:
c.send("y".encode())
print("File already exists and is up to date")
if not failed:
print(c.recv(buffersize).decode())
c.close()
if umode:
os.system(fname)
sys.exit()
except Exception as ex:
try:
c.close()
except:
pass
try:
f.close()
except:
pass
try:
os.remove(fname + ".tmp")
except:
pass
print("An error occured: " + str(ex))
input()
|
Arno-Nymous/pyload
|
module/plugins/hoster/YoutubeCom.py
|
Python
|
gpl-3.0
| 42,175 | 0.004268 |
# -*- coding: utf-8 -*-
import operator
import os
import re
import subprocess
import time
import urllib
from xml.dom.minidom import parseString as parse_xml
from module.network.CookieJar import CookieJar
from module.network.HTTPRequest import HTTPRequest
from ..internal.Hoster import Hoster
from ..internal.misc import exists, isexecutable, json, reduce, renice, replace_patterns, which
from ..internal.Plugin import Abort, Skip
class BIGHTTPRequest(HTTPRequest):
"""
Overcome HTTPRequest's load() size limit to allow
loading very big web pages by overrding HTTPRequest's write() function
"""
# @TODO: Add 'limit' parameter to HTTPRequest in v0.4.10
def __init__(self, cookies=None, options=None, limit=2000000):
self.limit = limit
HTTPRequest.__init__(self, cookies=cookies, options=options)
def write(self, buf):
""" writes response """
if self.limit and self.rep.tell() > self.limit or self.abort:
rep = self.getResponse()
if self.abort:
raise Abort()
f = open("response.dump", "wb")
f.write(rep)
f.close()
raise Exception("Loaded Url exceeded limit")
self.rep.write(buf)
class Ffmpeg(object):
_RE_DURATION = re.compile(r'Duration: (\d{2}):(\d{2}):(\d{2})\.(\d{2}),')
_RE_TIME = re.compile(r'time=(\d{2}):(\d{2}):(\d{2})\.(\d{2})')
_RE_VERSION = re.compile((r'ffmpeg version (.+?) '))
CMD = None
priority = 0
streams = []
start_time = (0, 0)
output_filename = None
error_message = ""
def __init__(self, priority, plugin=None):
self.plugin = plugin
self.priority = priority
self.streams = []
self.start_time = (0, 0)
self.output_filename = None
self.error_message = ""
self.find()
@classmethod
def find(cls):
"""
Check for ffmpeg
"""
if cls.CMD is not None:
return True
try:
if os.name == "nt":
ffmpeg = os.path.join(pypath, "ffmpeg.exe") if isexecutable(os.path.join(pypath, "ffmpeg.exe")) \
else "ffmpeg.exe"
else:
ffmpeg = "ffmpeg"
cmd = which(ffmpeg) or ffmpeg
p = subprocess.Popen([cmd, "-version"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = (_r.strip() if _r else "" for _r in p.communicate())
except OSError:
return False
m = cls._RE_VERSION.search(out)
if m is not None:
cls.VERSION = m.group(1)
cls.CMD = cmd
return True
@property
def found(self):
return self.CMD is not None
def add_stream(self, streams):
if isinstance(streams, list):
self.streams.extend(streams)
else:
self.streams.append(streams)
def set_start_time(self, start_time):
self.start_time = start_time
def set_output_filename(self, output_filename):
self.output_filename = output_filename
def run(self):
if self.CMD is None or self.output_filename is None:
return False
maps = []
args = []
meta = []
for i, stream in enumerate(self.streams):
args.extend(["-i", stream[1]])
maps.extend(["-map", "%s:%s:0" % (i, stream[0])])
if stream[0] == 's':
meta.extend(["-metadata:s:s:0:%s" % i, "language=%s" % stream[2]])
args.extend(maps)
args.extend(meta)
args.extend(["-y",
"-vcodec", "copy",
"-acodec", "copy",
"-scodec", "copy",
"-ss", "00:%s:%s.00" % (self.start_time[0], self.start_time[1]),
"-sub_charenc", "utf8"])
call = [self.CMD] + args +
|
[self.output_filename]
p = subprocess.Popen(
call,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
renice(p
|
.pid, self.priority)
duration = self._find_duration(p)
if duration:
last_line = self._progress(p, duration)
else:
last_line = ""
out, err = (_r.strip() if _r else "" for _r in p.communicate())
if err or p.returncode:
self.error_message = last_line
return False
else:
self.error_message = ""
return True
def _find_duration(self, process):
duration = 0
while True:
line = process.stderr.readline() #: ffmpeg writes to stderr
#: Quit loop on eof
if not line:
break
m = self._RE_DURATION.search(line)
if m is not None:
duration = sum(int(v) * [60 * 60 * 100, 60 * 100, 100, 1][i]
for i, v in enumerate(m.groups()))
break
return duration
def _progress(self, process, duration):
line = ""
last_line = ""
while True:
c = process.stderr.read(1) #: ffmpeg writes to stderr
#: Quit loop on eof
if not c:
break
elif c == "\r":
last_line = line.strip('\r\n')
line = ""
m = self._RE_TIME.search(last_line)
if m is not None:
current_time = sum(int(v) * [60 * 60 * 100, 60 * 100, 100, 1][i]
for i, v in enumerate(m.groups()))
if self.plugin:
progress = current_time * 100 / duration
self.plugin.pyfile.setProgress(progress)
else:
line += c
continue
return last_line #: Last line may contain error message
class YoutubeCom(Hoster):
__name__ = "YoutubeCom"
__type__ = "hoster"
__version__ = "0.68"
__status__ = "testing"
__pattern__ = r'https?://(?:[^/]*\.)?(?:youtu\.be/|youtube\.com/watch\?(?:.*&)?v=)[\w\-]+'
__config__ = [("activated", "bool", "Activated", True),
("quality", "sd;hd;fullhd;240p;360p;480p;720p;1080p;1440p;2160p;3072p;4320p", "Quality Setting", "hd"),
("vfmt", "int", "Video FMT/ITAG Number (0 for auto)", 0),
("afmt", "int", "Audio FMT/ITAG Number (0 for auto)", 0),
(".mp4", "bool", "Allow .mp4", True),
(".flv", "bool", "Allow .flv", True),
(".webm", "bool", "Allow .webm", True),
(".mkv", "bool", "Allow .mkv", True),
(".3gp", "bool", "Allow .3gp", False),
("aac", "bool", "Allow aac audio (DASH video only)", True),
("vorbis", "bool", "Allow vorbis audio (DASH video only)", True),
("opus", "bool", "Allow opus audio (DASH video only)", True),
("ac3", "bool", "Allow ac3 audio (DASH video only)", True),
("dts", "bool", "Allow dts audio (DASH video only)", True),
("3d", "bool", "Prefer 3D", False),
("subs_dl", "off;all_specified;first_available", "Download subtitles", "off"),
("subs_dl_langs", "str", "Subtitle language codes (ISO639-1) to download (comma separated)", ""),
("subs_embed", "bool", "Embed subtitles inside the output file (.mp4 and .mkv only)", False),
("priority", "int", "ffmpeg process priority", 0)]
__description__ = """Youtube.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("spoob", "spoob@pyload.org"),
("zoidberg", "zoidberg@mujmail.cz"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
URL_REPLACEMENTS = [(r'youtu\.be/', 'youtube.com/watch?v=')]
#: Invalid characters that must be removed from the file name
invalid_chars = u'\u2605:?><"|\\'
#: name, width, height, quality ranking, 3D, type
formats = {
# 3gp
17: {'ext': ".3gp", 'width': 176, 'hei
|
renxiaoyi/project_euler
|
problem_143.py
|
Python
|
unlicense
| 739 | 0.002706 |
import math
# According to Law of cosines, p^2 + p*r + r^2 = c^2.
# Let
|
c = r+k, => r = (p^2-k^2)/(2*k-p) and p > k > p/2 (k is even).
# Suppose p <= q <= r.
max_sum = 120000
d = {} # p => set(r)
for p in range(1, max_sum/2+1):
if p%10000 == 0:
print p
d[p] = set()
mink = int(p/2)+1
maxk = int((math.sqrt(3)-1)*p) # so that r >= p
for k in range(mink
|
, maxk+1):
if (p**2-k**2)%(2*k-p) == 0:
q = (p**2-k**2)/(2*k-p)
d[p].add(q)
ans = set()
for p in d.keys():
for q in d[p]:
if q in d and len(d[q]) > 0:
for r in d[p].intersection(d[q]):
if p + q + r > max_sum:
continue
ans.add(p+q+r)
print sum(ans)
|
woddx/privacyidea
|
privacyidea/api/realm.py
|
Python
|
agpl-3.0
| 10,603 | 0.000283 |
# -*- coding: utf-8 -*-
#
# http://www.privacyidea.org
# (c) cornelius kölbel, privacyidea.org
#
# 2014-12-08 Cornelius Kölbel, <cornelius@privacyidea.org>
# Complete rewrite during flask migration
# Try to provide REST API
#
# privacyIDEA is a fork of LinOTP. Some code is adapted from
# the system-controller from LinOTP, which is
# Copyright (C) 2010 - 2014 LSE Leading Security Experts GmbH
# License: AGPLv3
# contact: http://www.linotp.org
# http://www.lsexperts.de
# linotp@lsexperts.de
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__doc__ = """The realm endpoints are used to define realms.
A realm groups together many users. Administrators can manage the tokens of
the users in such a realm. Policies and tokens can be assigned to realms.
A realm consists of several resolvers. Thus you can create a realm and gather
users from LDAP and flat file source into one realm or you can pick resolvers
that collect users from different points from your vast LDAP directory and
group these users into a realm.
You will only be able to see and use user object, that are contained in a realm.
The code of this module is tested in tests/test_api_system.py
"""
from flask import (Blueprint,
request, current_app)
from lib.utils import (getParam,
required,
send_result, get_priority_from_param)
from ..lib.log import log_with
from ..lib.realm import get_realms
from ..lib.realm import (set_default_realm,
get_default_realm,
set_realm,
delete_realm)
from ..lib.policy import ACTION
from ..api.lib.prepolicy import prepolicy, check_base_action
from flask import g
from gettext import gettext as _
import logging
log = logging.getLogger(__name__)
realm_blueprint = Blueprint('realm_blueprint', __name__)
defaultrealm_blueprint = Blueprint('defaultrealm_blueprint', __name__)
# ----------------------------------------------------------------
#
# REALM functions
#
#
@log_with(log)
@realm_blueprint.route('/<realm>', methods=['POST'])
@prepolicy(check_base_action, request, ACTION.RESOLVERWRITE)
def set_realm_api(realm=None):
"""
This call creates a new realm or reconfigures a realm.
The realm contains a list of resolvers.
In the result it returns a list of added resolvers and a list of
resolvers, that could not be added.
:param realm: The unique name of the realm
:param resolvers: A comma separated list of unique resolver names or a
list object
:type resolvers: string or list
:param priority: Additional parameters priority.<resolvername> define the
priority of the resolvers within this realm.
:return: a json result with a list of Realms
**Example request**:
To create a new realm "newrealm", that consists of the resolvers
"reso1_with_realm" and "reso2_with_realm" call:
.. sourcecode:: http
POST /realm/newrealm HTTP/1.1
Host: example.com
Accept: application/json
Content-Length: 26
Content-Type: application/x-www-form-urlencoded
resolvers=reso1_with_realm, reso2_with_realm
priority.reso1_with_realm=1
priority.reso2_with_realm=2
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"status": true,
"value": {
"added": ["reso1_with_realm", "reso2_with_realm"],
"failed": []
}
}
"version": "privacyIDEA unknown"
}
"""
param = request.all_data
resolvers = getParam(param, "resolvers", required)
priority = get_priority_from_param(param)
if type(resolvers) == "list":
Resolvers = resolvers
else:
Resolvers = resolvers.split(',')
(added, failed) = set_realm(realm, Resolvers, priority=priority)
g.audit_object.log({'success': len(added) == len(Resolvers),
'info': "realm: %r, resolvers: %r" % (realm,
resolvers)})
return send_result({"added": added,
"failed": failed})
@log_with(log)
@realm_blueprint.route('/', methods=['GET'])
def get_realms_api():
"""
This call returns the list of all defined realms.
It takes no arguments.
:return: a json result with a list of realms
**Example request**:
.. sourcecode:: http
GET / HTTP/1.1
Host: example.com
Accept: application/json
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"
|
id": 1,
"jsonrpc": "2.0",
"result": {
"status": true,
|
"value": {
"realm1_with_resolver": {
"default": true,
"resolver": [
{
"name": "reso1_with_realm",
"type": "passwdresolver"
}
]
}
}
},
"version": "privacyIDEA unknown"
}
"""
realms = get_realms()
g.audit_object.log({"success": True})
# If the admin is not allowed to see all realms,
# (policy scope=system, action=read)
# the realms, where he has no administrative rights need,
# to be stripped.
'''
polPost = self.Policy.checkPolicyPost('system',
'getRealms',
{'realms': realms})
res = polPost['realms']
'''
return send_result(realms)
@log_with(log)
@realm_blueprint.route('/superuser', methods=['GET'])
def get_super_user_realms():
"""
This call returns the list of all superuser realms
as they are defined in *pi.cfg*.
See :ref:`cfgfile` for more information about this.
:return: a json result with a list of realms
**Example request**:
.. sourcecode:: http
GET /superuser HTTP/1.1
Host: example.com
Accept: application/json
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"status": true,
"value": ["superuser",
"realm2"]
}
},
"version": "privacyIDEA unknown"
}
"""
superuser_realms = current_app.config.get("SUPERUSER_REALM", [])
g.audit_object.log({"success": True})
return send_result(superuser_realms)
@log_with(log)
@defaultrealm_blueprint.route('/<realm>', methods=['POST'])
@prepolicy(check_base_action, request, ACTION.RESOLVERWRITE)
def set_default_realm_api(realm=None):
"""
This call sets the default realm.
:param realm: the name of the realm, that should be the default realm
:return: a json result with either 1 (success) or 0 (fail)
"""
realm = realm.lower().strip()
r = set_default_realm(realm)
g.audit_object.log({"success": r,
"info": realm})
return send_result(r)
@log_with(log)
@defaultrealm_blueprint.route('', methods=['DELETE'])
@prepolicy(check_base_action, request, ACTION.RESOLVERDELETE)
def delete_default_realm_api(realm=None):
"""
This call deletes the default realm.
:return: a j
|
babyliynfg/cross
|
tools/project-creator/Python2.6.6/Lib/distutils/tests/test_build_py.py
|
Python
|
mit
| 3,817 | 0.000786 |
"""Tests for distutils.command.build_py."""
import os
import sys
import StringIO
import unittest
from distutils.command.build_py import build_py
from distutils.core import Distribution
from distutils.errors import DistutilsFileError
from distutils.tests import support
class BuildPyTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_package_data(self):
sources = self.mkdtemp()
f = open(os.path.join(sources, "__init__.py"), "w")
f.write("# Pretend this is a package.")
f.close()
f = open(os.path.join(sources, "README.txt"), "w")
f.write("Info about this package")
f.close()
destination = self.mkdtemp()
dist = Distribution({"packages": ["pkg"],
"package_dir": {"pkg": sources}})
# script_name need not exist, it just need to be initialized
dist.script_name = os.path.join(sources, "setup.py")
dist.command_obj["build"] = support.DummyCommand(
force=0,
build_lib=destination)
dist.packages = ["pkg"]
dist.package_data = {"pkg": ["README.txt"]}
dist.package_dir = {"pkg": sources}
cmd = build_py(dist)
cmd.compile = 1
cmd.ensure_finalized()
self.assertEqual(cmd.package_data, dist.package_data)
cmd.run()
# This makes sure the list of outputs includes byte-compiled
# files for Python modules but not for package data files
# (there shouldn't *be* byte-code files for those!).
#
self.assertEqual(len(cmd.get_outputs()), 3)
pkgdest = os.path.join(destination, "pkg")
files = os.listdir(pkgdest)
self.assert_("__init__.py" in files)
self.assert_("__init__.pyc" in files)
self.assert_("README.txt" in files)
def test_empty_package_dir (self):
# See SF 1668596/1720897.
cwd = os.getcwd()
# create the distribution files.
sources = self.mkdtemp()
open(os.path.join(sources, "__init__.py"), "w").close()
testdir = os.path.join(sources, "doc")
os.mkdir(testdir)
open(os.path.join(testdir, "testfile"), "w").close()
os.chdir(sources)
old_stdout = sys.stdout
sys.stdout = StringIO.StringIO()
try:
dist = Distribution({"packages": ["pkg"],
"package_dir": {"pkg": ""},
"package_data": {"pkg": ["doc/*"]}})
# script_name need not exist, it just need to be initialized
dist.script_name = os.path.join(sources, "setup.py")
dist.script_args = ["build"]
dist.parse_command_line()
try:
dist.run_commands()
except DistutilsFileError:
self.fail("failed package_data test when package_dir is ''")
finally:
# Restore state.
|
os.chdir(cwd)
sys.stdout = old_stdout
def test_dont_write_bytecode(self):
# makes sure byte_compile is not used
pkg_dir, dist = self.create_dist()
cmd = build_py(dist)
cmd.compile = 1
cmd.optimize = 1
old_dont_write_bytecode = sys.dont_write_bytecode
|
sys.dont_write_bytecode = True
try:
cmd.byte_compile([])
finally:
sys.dont_write_bytecode = old_dont_write_bytecode
self.assertTrue('byte-compiling is disabled' in self.logs[0][1])
def test_suite():
return unittest.makeSuite(BuildPyTestCase)
if __name__ == "__main__":
unittest.main(defaultTest="test_suite")
|
stcioc/localdocindex
|
python/scrape_ps3.py
|
Python
|
mit
| 7,074 | 0.003675 |
# -------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Stefan
#
# Created: 11.07.2017
# Copyright: (c) Stefan 2017
# Licence: <your licence>
# -------------------------------------------------------------------------------
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from scrape_interface import ScrapeProcessor
import re
from datetime import datetime
# number of entity for which we download
_ENTITY = 4420465
_titleOverride = {
"2016pv 31 03": "pv 31.03.2016",
"2016PV 27.06. 2016": "PV 27.06.2016",
"2015Pv 20.08": "Pv 20.08.2015",
"2014Pv 22.05": "pv 22.05.2014",
"2014Pv 29.04": "Pv 29.04.2014",
"2014Pv 20.08": "pv 20.08.2014",
"2014Pv 28.07": "Pv 28.07.2014",
"2014PV 30 Septembrie 1": "PV 30.09.2014",
"2014Pv 31.10": "Pv 31.10.2014",
"2014Pv 24.10": "Pv 24.10.2014",
"2014PV 10.12 Sed Indata": "PV 10.12.2014",
"2014PV 6.10": "pv 06.10.2014",
"2014Pv 10.11.": "pv 10.11.2014",
"2014Pv 20.10": "pv 20.10.2014"
}
def extractdata(sp):
print("Start processing entity " + str(_ENTITY))
_process_main(sp, "http://www.primarie3.ro/consiliu-local/hotarari-de-consiliu/", False)
_process_main(sp, "http://www.primarie3.ro/consiliu-local/procese-verbale-de-sedinta/", True)
print("End processing entity " + str(_ENTITY))
# main processing - take all years and process until no more pages
def _process_main(sp, configaddress, ispvpage):
html = ScrapeProcessor.download_page(configaddress)
_process_year(sp, html, ispvpage)
if sp.get_processmode() in (ScrapeProcessor.ProcessMode.DELTA, ScrapeProcessor.ProcessMode.DELTA_DOWNLOAD):
return
soup = BeautifulSoup(html, 'html.parser')
arhiva = soup.find("div", {"class": "list_buget list_buget_arhiva MB30"}).find("ul")
if not arhiva:
print("ERROR: can't find div with class list_buget")
return
for li in arhiva.find_all("li"):
alink = li.find("a")
if not alink.has_attr("href"):
print("ERROR: link was expected to have href")
continue
link = alink["href"]
html2 = ScrapeProcessor.download_page(link)
_process_year(sp, html2, ispvpage)
# process a page with a year from the site
# html = page contents in string
def _process_year(sp, html, ispvpage):
soup = BeautifulSoup(html, 'html.parser')
pagetitle = soup.find("h2", {"class": "MT0"})
if pagetitle is None:
print("ERROR: no H2 title found")
return
match = re.search("(20[0-9]{2})", pagetitle.string)
if not match:
print("ERROR: H2 title was expected to contain a year" + pagetitle.string)
return
year = match.group(1)
lista = soup.find("ul", {"class": "list_buget_p"})
for li in lista.find_all("li"):
alink = li.a
href = alink["href"]
if not href.startswith("http"):
href = urljoin("http://www.primarie3.ro", href)
title = alink.string
if (str(year) + title) in _titleOverride:
title = _titleOverride[str(year) + title]
if ispvpage:
number = 0
else:
match = re.search("hc.*?[ .](\d+)($|\D)", title, re.IGNORECASE)
if not match:
match = re.search("hc.*?[ .](\d+)-(\d+)($|\D)", title, re.IGNORECASE)
if not match:
print("ERROR| Titlul nu incepe cu hc: " + title)
continue
number1 = int(match.group(1))
number2 = int(match.group(2))
if (number2 - number1) < 0 or (number2 - number1) > 10:
print("ERROR|gama invalida: " + title)
continue
for n in range(number1, number2 + 1):
_process_doc(sp, n, year, title, href, "", ispvpage)
return
number = match.group(1)
datetext = ""
if ispvpage:
datetext = ScrapeProcessor.finddate(title)
if datetext == "":
print("ERROR|PV should have a date: " + title)
continue
else:
match = re.search("din (\d+\.\d+\.\d+)", title, re.IGNORECASE)
if match:
datetext = match.group(1)
date = datetime.strptime(datetext, '%d.%m.%Y')
datetext = date.strftime("%Y-%m-%d")
if datetext[:4] != str(year):
print("WARNING| date mismatch " + datetext + " vs year " + str(year))
datetext = ""
# process the found document
code, result = _process_doc(sp, number, year, title, href, datetext, ispvpage)
if code == "ERROR":
print("ERROR|" + title + "|" + result)
# process the info regarding a document (decision)
# decision info should come in docInfo with the following tags:
# date, link, number, year, title
def _process_doc(sp, number, year, title, link, date, ispvpage):
annex = 0
doctype = "MAIN"
#analyse type and post decision
if ispvpage:
number = ScrapeProcessor.dayinyear(date)
code, result = sp.post_decision("PRVB", number, year, _ENTITY, date, title)
if code == "ERROR":
return code, result
decisionid = result
else:
match = re.search("anexa(\d+)", title, re.IGNORECASE)
if match:
annex = match.group(1)
else:
match = re.search("anexa", title, re.IGNORECASE)
if match:
annex = 1
if annex:
code, result = sp.get_decision("HOTA", number, year, _ENTITY)
if code == "ERROR":
return code, result
decisionid = result
doctype = "ANEX"
else:
# add the decision to server
code, result = sp.post_decision("HOTA", number, year, _ENTITY, date, title)
if code == "ERROR":
return code, result
decisionid = result
# download page
code, result = sp.download_file(link)
if code == "ERROR":
sp.post_document(doctype, decisionid, annex, "ERROR_DOWNLOAD", "", link)
return code, result
fname = result
code, result, filetype = sp.ocr_document(fname)
if code == "ERROR":
sp.post_document(doctype, decisionid, annex, "ERROR_OCR", "", link)
return code, result
ocrfname = result
outstr, cssstr
|
= ScrapeProcessor.preparehtml(ocrfname, filetype)
return sp.post_document(doctype, decisionid, annex, outstr, cssstr, link)
if __name__ == '__main__':
localsp = ScrapeProcessor("http://192.168.56.10", "stefan_cioc", "parola1234")
loca
|
lsp.set_folders("X:/hot/S3I", "X:/hot/S3O")
localsp.set_processmode(ScrapeProcessor.ProcessMode.FULL)
extractdata(localsp)
|
garrettcap/Bulletproof-Backup
|
wx/tools/Editra/src/ed_statbar.py
|
Python
|
gpl-2.0
| 12,020 | 0.001165 |
###############################################################################
# Name: ed_statbar.py #
# Purpose: Custom statusbar with builtin progress indicator #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2008 Cody Precord <staff@editra.org> #
# License: wxWindows License #
###############################################################################
"""
Custom StatusBar for Editra that contains a progress bar that responds to
messages from ed_msg to display progress of different actions.
@summary: Editra's StatusBar class
"""
__author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: ed_statbar.py 70229 2012-01-01 01:27:10Z CJP $"
__revision__ = "$Revision: 70229 $"
#--------------------------------------------------------------------------#
# Imports
import wx
import wx.stc
# Editra Libraries
import ed_glob
import util
import ed_msg
import ed_menu
from syntax.synglob import GetDescriptionFromId
from eclib import ProgressStatusBar, EncodingDialog
from extern.decorlib import anythread
#--------------------------------------------------------------------------#
_ = wx.GetTranslation
#--------------------------------------------------------------------------#
class EdStatBar(ProgressStatusBar):
"""Custom status bar that handles dynamic field width adjustment and
automatic expiration of status messages.
"""
ID_CLEANUP_TIMER = wx.NewId()
def __init__(self, parent):
super(EdStatBar, self).__init__(parent, style=wx.ST_SIZEGRIP)
# Attributes
self._pid = parent.GetId() # Save parents id for filtering msgs
self._widths = list()
self._cleanup_timer = wx.Timer(self, EdStatBar.ID_CLEANUP_TIMER)
self._eolmenu = wx.Menu()
self._lexmenu = None
self._log = wx.GetApp().GetLog()
# Setup
self.SetFieldsCount(6) # Info, vi stuff, line/progress
self.SetStatusWidths([-1, 90
|
, 40, 40, 40, 155])
self._eolmenu.Append(ed_glob.ID_EOL_MAC, u"CR",
_("Change line endings to %s") % u"CR",
kind=wx.ITEM_CHECK)
self._eolmenu.Append(ed_glob.ID_EOL_WIN, u"CRLF",
_("Change line endings to %s") % u"CRLF",
kind=wx.ITEM_CHECK)
|
self._eolmenu.Append(ed_glob.ID_EOL_UNIX, u"LF",
_("Change line endings to %s") % u"LF",
kind=wx.ITEM_CHECK)
# Event Handlers
self.Bind(wx.EVT_WINDOW_DESTROY, self.OnDestroy, self)
self.Bind(wx.EVT_LEFT_DCLICK, self.OnLeftDClick)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.Bind(wx.EVT_TIMER, self.OnExpireMessage,
id=EdStatBar.ID_CLEANUP_TIMER)
# Messages
ed_msg.Subscribe(self.OnProgress, ed_msg.EDMSG_PROGRESS_SHOW)
ed_msg.Subscribe(self.OnProgress, ed_msg.EDMSG_PROGRESS_STATE)
ed_msg.Subscribe(self.OnUpdateText, ed_msg.EDMSG_UI_SB_TXT)
ed_msg.Subscribe(self.OnUpdateDoc, ed_msg.EDMSG_UI_NB_CHANGED)
ed_msg.Subscribe(self.OnUpdateDoc, ed_msg.EDMSG_FILE_SAVED)
ed_msg.Subscribe(self.OnUpdateDoc, ed_msg.EDMSG_FILE_OPENED)
ed_msg.Subscribe(self.OnUpdateDoc, ed_msg.EDMSG_UI_STC_LEXER)
def OnDestroy(self, evt):
"""Unsubscribe from messages"""
if self._lexmenu:
self._lexmenu.Destroy()
if self._eolmenu:
self._eolmenu.Destroy()
if evt.GetId() == self.GetId():
ed_msg.Unsubscribe(self.OnProgress)
ed_msg.Unsubscribe(self.OnUpdateText)
ed_msg.Unsubscribe(self.OnUpdateDoc)
evt.Skip()
def __SetStatusText(self, txt, field):
"""Safe method to use for setting status text with CallAfter.
@param txt: string
@param field: int
"""
try:
super(EdStatBar, self).SetStatusText(txt, field)
self.AdjustFieldWidths()
if field == ed_glob.SB_INFO and txt != u'':
# Start the expiration countdown
if self._cleanup_timer.IsRunning():
self._cleanup_timer.Stop()
self._cleanup_timer.Start(10000, True)
except wx.PyDeadObjectError, wx.PyAssertionError:
# Getting some odd assertion errors on wxMac so just trap
# and ignore them for now
# glyphCount == (text.length()+1)" failed at graphics.cpp(2048)
# in GetPartialTextExtents()
pass
except TypeError, err:
self._log("[edstatbar][err] Bad status message: %s" % str(txt))
self._log("[edstatbar][err] %s" % err)
def AdjustFieldWidths(self):
"""Adjust each field width of status bar basing on the field text
@return: None
"""
widths = [-1]
# Calculate required widths
# NOTE: Order of fields is important
for field in [ed_glob.SB_BUFF,
ed_glob.SB_LEXER,
ed_glob.SB_ENCODING,
ed_glob.SB_EOL,
ed_glob.SB_ROWCOL]:
width = self.GetTextExtent(self.GetStatusText(field))[0] + 20
if width == 20:
width = 0
widths.append(width)
# Adjust widths
if widths[-1] < 155:
widths[-1] = 155
# Only update if there are changes
if widths != self._widths:
self._widths = widths
self.SetStatusWidths(self._widths)
def GetMainWindow(self):
"""Method required for L{ed_msg.mwcontext}"""
return self.TopLevelParent
def OnExpireMessage(self, evt):
"""Handle Expiring the status message when the oneshot timer
tells us it has expired.
"""
if evt.GetId() == EdStatBar.ID_CLEANUP_TIMER:
wx.CallAfter(self.__SetStatusText, u'', ed_glob.SB_INFO)
else:
evt.Skip()
def OnLeftDClick(self, evt):
"""Handlers mouse left double click on status bar
@param evt: wx.MouseEvent
@note: Assumes parent is MainWindow instance
"""
pt = evt.GetPosition()
if self.GetFieldRect(ed_glob.SB_ROWCOL).Contains(pt):
mw = self.GetParent()
mpane = mw.GetEditPane()
mpane.ShowCommandControl(ed_glob.ID_GOTO_LINE)
else:
evt.Skip()
def OnLeftUp(self, evt):
"""Handle left clicks on the status bar
@param evt: wx.MouseEvent
"""
pt = evt.GetPosition()
if self.GetFieldRect(ed_glob.SB_EOL).Contains(pt):
rect = self.GetFieldRect(ed_glob.SB_EOL)
self.PopupMenu(self._eolmenu, (rect.x, rect.y))
elif self.GetFieldRect(ed_glob.SB_ENCODING).Contains(pt):
nb = self.GetTopLevelParent().GetNotebook()
buff = nb.GetCurrentCtrl()
dlg = EncodingDialog(nb,
msg=_("Change the encoding of the current document."),
title=_("Change Encoding"),
default=buff.GetEncoding())
bmp = wx.ArtProvider.GetBitmap(str(ed_glob.ID_DOCPROP),
wx.ART_OTHER)
if bmp.IsOk():
dlg.SetBitmap(bmp)
dlg.CenterOnParent()
# TODO: should add EdFile callbacks for modification events instead
# of using explicit statusbar refresh.
if dlg.ShowModal() == wx.ID_OK:
buff.SetEncoding(dlg.GetEncoding())
self.UpdateFields()
# NOTE: Got an error report about a PyDeadObject error here. The
# error does not make any sense since the dialog is not
# destroyed or deleted by anything before this. Add validity
# check to ensure reference is still valid.
if dlg:
dlg.Destr
|
magfest/ubersystem
|
tests/uber/site_sections/test_summary.py
|
Python
|
agpl-3.0
| 3,732 | 0 |
from datetime import datetime, date
import pytest
from pytz import UTC
from uber.config import c
from uber.models import Attendee, Session
from uber.site_sections import summary
@pytest.fixture
def birthdays():
dates = [
date(1964, 12, 30),
date(1964, 12, 31),
date(1964, 1, 1),
date(1964, 1, 2),
date(1964, 1, 9),
date(1964, 1, 10),
date(1964, 1, 11),
date(1964, 1, 12),
date(1964, 1, 30),
date(1964, 1, 31),
date(1964, 2, 1),
date(1964, 2, 2),
date(1964, 2, 27),
date(1964, 2, 28),
date(1964, 2, 29),
date(1964, 3, 1),
date(1964, 3, 2)]
attendees = []
for d in dates:
attendees.append(Attendee(
placeholder=True,
first_name='Born on',
last_name=d.strftime('%B %-d, %Y'),
ribbon=c.VOLUNTEER_RIBBON,
staffing=True,
birthdate=d))
ids = []
with Session() as session:
session.bulk_insert(attendees)
ids = [a.id for a in attendees]
yield ids
with Session() as session:
session.query(Attendee).filter(Attendee.id.in_(ids)).delete(
synchronize_session=False)
class TestBirthdayCalendar(object):
@pytest.mark.parametrize('year', [None, 2027, 2028])
def test_attendee_birthday_calendar(
self,
admin_attendee,
year,
birthdays,
monkeypatch):
if year:
assert str(year)
response = summary.Root().attendee_birthday_calendar(year=year)
else:
assert str(datetime.now(UTC).year)
response = summary.Root().attendee_birthday_calendar()
if isinstance(response, bytes):
response = response.decode('utf-8')
lines = response.strip().split('\n')
assert len(lines) == (17 + 1) # Extra line for the header
@pytest.mark.parametrize('epoch,eschaton,expected', [
(datetime(2018, 1, 10), datetime(2018, 1, 11), 2), # Normal dates
(datetime(2017, 12, 31), datetime(2018, 1, 1), 2), # Crossing the year
(datetime(2018, 1, 31), datetime(2018, 2, 1), 2), # Crossing the month
(datetime(2018, 2, 28), datetime(2018, 3, 1), 3), # Leap day
(datetime(2018, 1, 1), datetime(2018, 3, 4), 15), # Multi-month
(datetime(2017, 12, 28), datetime(2018, 3, 4), 17), # Everybody
])
def test_event_birthday_calendar(
self,
admin_attendee,
epoch,
eschaton,
expected,
birthdays,
monkeypatch):
monkeypatch.setattr(c, 'EPOCH', epoch)
monkeypatch.setattr(c, 'ESCHATON', eschaton)
response = summary.Root().event_birthday_calendar()
if isinstance(response, bytes):
response = response.decode('utf-8')
lines = response.strip().split('\n')
assert len(lines) == (expected + 1) # Extra line for the header
def test_event_birthday_calendar_correct_birthday_years(
self,
admin_attendee,
birthdays,
monkeypatch
|
):
monkeypatch.setattr(c, 'EPOCH', datetime(2017, 12, 31))
monkeypatch.setattr(c, 'ESCHATON', datetime(2018, 1, 1))
response = summary.Root().event_birthday_calendar()
if isinstance(response, bytes):
response = response.decode('utf-8')
assert '"Born on December 31, 1964\'s Birthday",2017-12-31' in response
assert '"Born on January 1, 1964\'s Birthday",2018-01-01' in response
lines = response.strip().split('\n')
assert len
|
(lines) == (2 + 1) # Extra line for the header
|
opennode/nodeconductor
|
waldur_core/structure/migrations/0040_make_is_active_nullable.py
|
Python
|
mit
| 634 | 0 |
#
|
-*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('structure', '0039_remove_permission_groups'),
]
operations = [
migrations.AlterField(
model_name='customerpermission',
name='is_active',
field=models.NullBooleanField(default=True, db_index=True),
|
),
migrations.AlterField(
model_name='projectpermission',
name='is_active',
field=models.NullBooleanField(default=True, db_index=True),
),
]
|
alliedel/anomalyframework_python
|
anomalyframework/results.py
|
Python
|
mit
| 2,930 | 0.024232 |
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
import scipy.signal
import shutil
import display_pyutils
def apply_averaging_filter(x, filter_size=5):
return np.convolve(x, np.ones(filter_size,) / float(filter_size), mode='valid')
def apply_med
|
ian_filter(x, filter_size=5):
return scipy.signal.medfilt(x, filter_size)
def postprocess_signal(anomaly_ratings):
signal = anomaly_ratings / (1 - anomaly_ratings)
bottom_ninetyfive_percent = sorted(signal)[:int(np.floor(len(signal) * 0.95))]
smoothed_signal = apply_averaging_filter(signal, 100)
threshold = (np.median(signal) + 2 * np.std(bottom_ninetyfive_percent))).astype(float) * signal
return smoothed_signal, thre
|
shold
def save_anomaly_plot(signal, pars):
plt.figure(1); plt.clf()
plot_anomaly_ratings(signal)
title = 'video: {}\nlambda: {}\nmax_buffer_size:{}'.format(
os.path.basename(pars.paths.files.infile_features), pars.algorithm.discriminability.lambd,
pars.algorithm.discriminability.max_buffer_size)
plt.title(title)
print('Saving figure to {}.png in workspace'.format(plt.gcf().number))
display_pyutils.save_fig_to_workspace()
def plot_anomaly_ratings(signal):
plt.fill_between(range(len(signal)), signal, facecolor=display_pyutils.GOOD_COLOR_CYCLE[0],
alpha=1.0) # alpha=0.5
signal_sorted = np.sort(signal)
bottom_ninetyfive_percent = signal_sorted[:int(np.floor(len(signal_sorted) * 0.95))]
y_max = np.median(bottom_ninetyfive_percent) + 3*np.std(bottom_ninetyfive_percent)
plt.ylim([0, y_max])
# Given :
# - a set of anomaly ratings (continuous plus threshold or binary -- start with binary)
# - path to frames of a video
# - path to destination frames
# Output :
# - populate path to destination frames w/ video that highlights the anomaly frames (in red) /
# slows them down and speeds up non-anomalies.
def create_output_frames(anomaly_rating_binary_per_frame, input_frames, output_dir,
normal_fps=30*4, anomalous_fps=15):
an_binary = anomaly_rating_binary_per_frame
input_frames
def main():
LOCAL_SED_VIDEO_DIR = '/home/allie/projects/aladdin/videos/'
results_dirs = glob.glob('/home/allie/workspace/server_sync/2017_09_14/*')
for results_dir in results_dirs:
pars = pickle.load(open(os.path.join(results_dir, 'pars.pickle'), 'rb'))
an = np.load(results_dir + '/anomaly_ratings.npy')
signal, threshold = postprocess_signal(an)
save_anomaly_plot(signal, pars)
videoname = pars.paths.files.infile_features
anomalous_frames = sorted(glob.glob('/home/allie/projects/aladdin/videos/{}'
'frames'.format(videoname)))
input_frames =
create_output_frames(signal > threshold, input_frames, output_dir)
# 'image-%06d' % frame_num + '.png')
|
afaheem88/tempest_neutron
|
tempest/services/image/v1/json/image_client.py
|
Python
|
apache-2.0
| 11,416 | 0 |
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import errno
import json
import os
import time
import urllib
from tempest.common import glance_http
from tempest.common import rest_client
from tempest.common.utils import misc as misc_utils
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
CONF = config.CONF
LOG = logging.getLogger(__name__)
class ImageClientJSON(rest_client.RestClient):
def __init__(self, auth_provider):
super(ImageClientJSON, self).__init__(
auth_provider,
CONF.image.catalog_type,
CONF.image.region or CONF.identity.region,
endpoint_type=CONF.image.endpoint_type)
self._http = None
def _image_meta_from_headers(self, headers):
meta = {'properties': {}}
for key, value in headers.iteritems():
if key.startswith('x-image-meta-property-'):
_key = key[22:]
meta['properties'][_key] = value
elif key.startswith('x-image-meta-'):
_key = key[13:]
meta[_key] = value
for key in ['is_public', 'protected', 'deleted']:
if key in meta:
meta[key] = meta[key].strip().lower() in ('t', 'true', 'yes',
'1')
for key in ['size', 'min_ram', 'min_disk']:
if key in meta:
try:
meta[key] = int(meta[key])
except ValueError:
pass
return meta
def _image_meta_to_headers(self, fields):
headers = {}
fields_copy = copy.deepcopy(fields)
copy_from = fields_copy.pop('copy_from', None)
if copy_from is not None:
headers['x-glance-api-copy-from'] = copy_from
for key, value in fields_copy.pop('properties', {}).iteritems():
headers['x-image-meta-property-%s' % key] = str(value)
for key, value in fields_copy.pop('api', {}).iteritems():
headers['x-glance-api-property-%s' % key] = str(value)
for key, value in fields_copy.iteritems():
headers['x-image-meta-%s' % key] = str(value)
return headers
def _get_file_size(self, obj):
"""Analyze file-like object and attempt to determine its size.
:param obj: file-like object, typically redirected from stdin.
:retval The file's size or None if it cannot be determined.
"""
# For large images, we need to supply the size of the
# image file. See LP Bugs #827660 and #845788.
if hasattr(obj, 'seek') and hasattr(obj, 'tell'):
try:
obj.seek(0, os.SEEK_END)
obj_size = obj.tell()
obj.seek(0)
return obj_size
except IOError as e:
if e.errno == errno.ESPIPE:
# Illegal seek. This means the user is trying
# to pipe image data to the client, e.g.
# echo testdata | bin/glance add blah..., or
# that stdin is empty, or that a file-like
# object which doesn't support 'seek/tell' has
# been supplied.
return None
else:
raise
else:
# Cannot determine size of input image
return None
def _get_http(self):
dscv = CONF.identity.disable_ssl_certificate_validation
ca_certs = CONF.identity.ca_certificates_file
return glance_http.HTTPClient(auth_provider=self.auth_provider,
filters=self.filters,
insecure=dscv, ca_certs=ca_certs)
def _create_with_data(self, headers, data):
resp, body_iter = self.http.raw_request('POST', '/v1/images',
headers=headers, body=data)
self._error_checker('POST', '/v1/images', headers, data, resp,
body_iter)
body = json.loads(''.join([c for c in body_iter]))
return resp, body['image']
def _update_with_data(self, image_id, headers, data):
url = '/v1/images/%s' % image_id
resp, body_iter = self.http.raw_request('PUT', url, headers=headers,
body=data)
self._error_checker('PUT', url, headers, data,
resp, body_iter)
body = json.loads(''.join([c for c in body_iter]))
return resp, body['image']
@property
def http(self):
if self._http is None:
if CONF.service_available.glance:
self._http = self._get_http()
return self._http
def create_image(self, name, container_format, disk_format, **kwargs):
params = {
"name": name,
"container_format": container_format,
"disk_format": disk_format,
}
headers = {}
for option in ['is_public', 'location', 'properties',
'copy_from', 'min_ram']:
if option in kwargs:
params[option] = kwargs.get(option)
headers.update(self._image_meta_to_headers(params))
if 'data' in kwargs:
return self._create_with_data(headers, kwargs.get('data'))
resp, body = self.post('v1/images', None, headers)
self.expected_success(201, resp.status)
body = json.loads(body)
return resp, body['image']
def update_image(self, image_id, name=None, container_format=None,
data=None, properties=None):
params = {}
|
headers = {}
if name is not None:
params['name'] = name
if container_format is not None:
params['container_format'] = container_format
if properties is not None:
|
params['properties'] = properties
headers.update(self._image_meta_to_headers(params))
if data is not None:
return self._update_with_data(image_id, headers, data)
url = 'v1/images/%s' % image_id
resp, body = self.put(url, data, headers)
self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['image']
def delete_image(self, image_id):
url = 'v1/images/%s' % image_id
resp, body = self.delete(url)
self.expected_success(200, resp.status)
return resp, body
def image_list(self, **kwargs):
url = 'v1/images'
if len(kwargs) > 0:
url += '?%s' % urllib.urlencode(kwargs)
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['images']
def image_list_detail(self, properties=dict(), changes_since=None,
**kwargs):
url = 'v1/images/detail'
params = {}
for key, value in properties.items():
params['property-%s' % key] = value
kwargs.update(params)
if changes_since is not None:
kwargs['changes-since'] = changes_since
if len(kwargs) > 0:
url += '?%s' % urllib.urlencode(kwargs)
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['images']
def get_image_meta(self, image_id):
url = 'v1/images/%s' % image_id
resp, __ = self.head(url)
self.expected_success
|
akhambhati/Echobase
|
Echobase/Statistics/FDA/fda.py
|
Python
|
gpl-3.0
| 1,634 | 0 |
"""
Functional Data Analysis Routines
"""
from __future__ import division
import numpy as np
def _curve_area(A, B):
r1 = np.mean(A-B)
r2 = np.mean(B-A)
if r1 > r2:
return r1
else:
return r2
def curve_test(Y, cnd_1, cnd_2, n_pe
|
rm=1000):
"""
Assess whether two curves are statistically significant based on
permutation test over conditions and replicates.
Parameters
----------
Y: 2d array, shape: (time x var)
Observations matrix for each variable over time.
cnd_1: list, shape: (n
|
_reps_1)
List of replicate indices in columns of Y for condition 1
cnd_2: list, shape: (n_reps_2)
List of replicate indices in columns of Y for condition 2
n_perm: int
Number of permutations to group
Returns
-------
p: int
Two-tailed p-value
"""
n_reps_1 = len(cnd_1)
n_reps_2 = len(cnd_2)
n_reps = Y.shape[1]
assert n_reps == (n_reps_1 + n_reps_2)
# Get true area between condition curves
Y_1 = np.mean(Y[:, cnd_1], axis=1)
Y_2 = np.mean(Y[:, cnd_2], axis=1)
true_area = _curve_area(Y_1, Y_2)
# Estimate null distribution of area between curves
p_count = 0
for pr in xrange(n_perm):
rnd_reps = np.random.permutation(n_reps)
rnd_cnd_1 = rnd_reps[:n_reps_1]
rnd_cnd_2 = rnd_reps[n_reps_1:]
rnd_Y_1 = np.mean(Y[:, rnd_cnd_1], axis=1)
rnd_Y_2 = np.mean(Y[:, rnd_cnd_2], axis=1)
rnd_area = _curve_area(rnd_Y_1, rnd_Y_2)
if rnd_area > true_area:
p_count += 1
p = p_count / n_perm
return p
|
blakeboswell/valence
|
pyvalence/__init__.py
|
Python
|
bsd-3-clause
| 44 | 0 |
""" pyvalence
"""
__version__ = '0.0
|
.1.3'
| |
theysconator/Scribbler
|
scribbler.py
|
Python
|
lgpl-3.0
| 2,173 | 0.02485 |
#!/usr/bin/python
def conver
|
sionMap():
""" returns conversionmap """
|
return {
'a': [1, -2, -1],
'b': [1, 2, 1],
'c': [1, 2, -1],
'd': [1, -2, 1],
'e': [-1, 1, 1],
'f': [1, -2, 2],
'g': [-2, 1, 2],
'h': [-2, -1, 2],
'i': [-1, -1, 1],
'j': [2, 1, 2],
'k': [2, -1, 2],
'l': [-1, 1, 2],
'm': [-1, 2, 1],
'n': [-1, -2, 1],
'o': [-1, -1, 2],
'p': [-2, -1, -2],
'q': [-2, 2, -1],
'r': [-2, 1, -2],
's': [-2, -1, 1],
't': [-2, 2, 1],
'u': [2, 1, -2],
'v': [-1, -2, -1],
'w': [-1, -2, 2],
'x': [2, -1, 1],
'y': [2, -1, -2],
'z': [-2, 1, 1],
'char_empty': [0, 2, -2, 0],
'char_eol': [0, 2, -2, 2, -2, 0],
}
def convertCharacter(c, m):
""" c = character to convert, m = conversionMap """
return m[c]
def convertCharacters(s, m):
""" s = string, m = conversionMap """
o = []
e = []
for c in s:
if c == ' ':
c = 'char_empty'
elif c == '.':
c = 'char_eol'
if c in m:
o += m[c]
else:
e.append(c)
if len(e) > 0:
return {'e': True, 'l': e}
else:
return {'e': False, 'l': o}
def addBaseLines(a):
""" a = array to add baselines to """
o = []
p = None
for c in a:
if p is not None:
if ((p - 1) == int(c) or (p) == int(c) or (p + 1) == int(c)) and (p != 0) and (c != 0):
o.append(0)
p = int(c)
o.append(int(c))
return o
def main(prefix = False, string = '', suffix = False):
print 'Input:'
print 'var_prefix: ' + str(prefix)
print 'var_string: ' + str(string)
print 'var_suffix: ' + str(suffix)
if string == '':
print 'No string entered.'
print 'Application will now exit.'
return
o = []
if prefix:
string = ' ' + string
if suffix:
string = string + '.'
o = convertCharacters(string, conversionMap())
print ''
print 'Output:'
if o['e']:
print 'The string could not be parsed because of the following characters:'
print o['l']
else:
o = addBaseLines(o['l'])
print o
return
if __name__ == "__main__":
cin = raw_input("Enter string: ")
main(
prefix = False,
string = cin,
suffix = False,
)
|
fopina/tgbotplug
|
tests/examples/test_guess.py
|
Python
|
mit
| 1,935 | 0.00155 |
from tgbot import plugintest
from plugin_examples.guess import GuessPlugin
class GuessPluginTest(plugintest.PluginTestCase):
def setUp(self):
self.plugin = GuessPlugin()
self.bot = self.fake_bot('', plugins=[self.plugin])
def test_play(self):
self.receive_message('/guess_start')
self.assertReplied("I'm going to think of a number between 0 and 9 and you have to guess it! What's your guess?")
number = self.plugin.read_data(1)
self.assertIsNotNone(number)
self.assertGreaterEqual(number, 0)
self.assertLessEqual(number, 9)
# force number for testing
self.plugin.save_data(1, obj=5)
self.receive_message('1')
self.assertReplied("I'm thinking higher...")
self.receive_message('6')
self.assertReplied("I'm thinking lower...")
self.receive_message('gief error')
self.assertReplied('Invalid guess!')
self.receive_message('5')
self.assertReplied('Congratz, you nailed it John')
def test_stop(self):
self.receive_message('/guess_start')
self.assertReplied
|
("I'm going to think of
|
a number between 0 and 9 and you have to guess it! What's your guess?")
self.assertIsNotNone(self.plugin.read_data(1))
self.receive_message('/guess_stop')
self.assertReplied('Ok :(')
self.assertIsNone(self.plugin.read_data(1))
def test_stop_on_group(self):
chat = {
'id': -1,
'type': 'group',
'title': 'Test'
}
self.receive_message('/guess_start', chat=chat)
self.assertReplied("I'm going to think of a number between 0 and 9 and you have to guess it! What's your guess?")
self.assertIsNotNone(self.plugin.read_data(-1))
self.receive_message('/guess_stop', chat=chat)
self.assertReplied('Ok :(')
self.assertIsNone(self.plugin.read_data(-1))
|
beiko-lab/gengis
|
bin/Lib/site-packages/numpy/compat/__init__.py
|
Python
|
gpl-3.0
| 434 | 0 |
"""
C
|
ompatibility module.
This module contains duplicated code from Python itself or 3rd party
extensions, which may be included for the following reasons:
* compatibility
* we may only need a small subset of the copied library/module
"""
import _inspect
import py3k
from _inspect import getargspec, formatargspec
from py3k import *
__all__ = []
__all__
|
.extend(_inspect.__all__)
__all__.extend(py3k.__all__)
|
w3gh/ghost.py
|
plugins/join.py
|
Python
|
mit
| 716 | 0.023743 |
# -*- coding: utf-8 -*-
import hook
import bnetprotocol
from misc import *
from config import config
#settings = config[__name__.split('.')[-1]]
def message_received(bn, d):
if d.event == bnetprotocol.EID_TALK:
msg_list = str(d.message).split(' ', 1)
try:
command, payload = msg_list
except Value
|
Error:
command = msg_list[0]
payload = ''
if command == '.join' and len(payload) > 0:
'''if str(d.message).split(' ')[0] == settings['trigger'] + 'join':'''
bn.send_packet(bnetprotocol.SEND_SID_CHATCOMMAND('/join %s' % (payload)))
def install():
hook.register('after-handle_sid_chateven
|
t', message_received)
def uninstall():
hook.unregister('after-handle_sid_chatevent', message_received)
|
gi11es/thumbor
|
tests/filters/test_max_age.py
|
Python
|
mit
| 2,711 | 0.001107 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from preggy import expect
from tornado.testing import gen_test
from tests.base import TestCase
from thumbor.config import Config
from thumbor.importer
|
import Importer
class BaseMaxAgeFilterTestCase(TestCase):
def get_fixture_path(self, name):
return "./tests/fixtures/%s" % name
def get_config(self):
return Config.load(self.get_fixture_path("max_age_conf.py"))
def get_importer(self):
importer = Importer(self.config)
importer.import_modules()
return importer
class MaxAgeFilterTestCase(BaseMaxAgeFilterTestCase):
@gen_test
|
async def test_max_age_filter_with_regular_image(self):
response = await self.async_fetch("/unsafe/smart/image.jpg", method="GET")
expect(response.code).to_equal(200)
expect(response.headers["Cache-Control"]).to_equal("max-age=2,public")
expect(response.headers).to_include("Expires")
@gen_test
async def test_max_age_url(self):
response = await self.async_fetch(
"/unsafe/filters:max_age(30)/image.jpg", method="GET"
)
expect(response.code).to_equal(200)
expect(response.headers["Cache-Control"]).to_equal("max-age=30,public")
expect(response.headers).to_include("Expires")
class MaxAgeDetectorFilterTestCase(BaseMaxAgeFilterTestCase):
def get_config(self):
config = super(MaxAgeDetectorFilterTestCase, self).get_config()
config.DETECTORS = ["tests.fixtures.prevent_result_storage_detector"]
return config
@gen_test
async def test_max_age_filter_with_non_storaged_image(self):
response = await self.async_fetch("/unsafe/smart/image.jpg", method="GET")
expect(response.code).to_equal(200)
expect(response.headers["Cache-Control"]).to_equal("max-age=1,public")
expect(response.headers).to_include("Expires")
class MaxAgeErrorDectectorFilterTestCase(BaseMaxAgeFilterTestCase):
def get_config(self):
config = super(MaxAgeErrorDectectorFilterTestCase, self).get_config()
config.DETECTORS = ["tests.fixtures.detection_error_detector"]
return config
@gen_test
async def test_with_detection_error_image(self):
response = await self.async_fetch("/unsafe/smart/image.jpg", method="GET")
expect(response.code).to_equal(200)
expect(response.headers["Cache-Control"]).to_equal("max-age=1,public")
expect(response.headers).to_include("Expires")
|
jaor/python
|
bigml/predicates.py
|
Python
|
apache-2.0
| 2,025 | 0.000494 |
# -*- coding: utf-8 -*-
#
# Copyright 2014-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Predicates structure for the BigML local AnomalyTree
This module defines an auxiliary Predicates structure that is used in the
AnomalyTree to save the node's predicates info.
"""
from bigml.predicate import Predicate
class Predicates():
"""A list of predicates to be evaluated in an anomaly tree's node.
"""
def __init__(self, predicates_list):
self.predicates = []
for predicate in predicates_list:
if predicate is True:
self.predicates.append(True)
else:
self.predicates.append(
Predicate(predicate.get('op'),
predicate.get('field'),
predicate.get('value'),
predicate.get('term')))
def to_rule(self, fields, label='name'):
""" Builds rule string from a predicates
|
list
"""
return " and ".join([predicate.to_rule(fields, label=label) for
predicate in self.predicates
if not isinstance(predicate, bool)])
def apply(self, input_data, fields):
""" Applies the operators defined in each of the predicates to
the provided input data
|
"""
return all([predicate.apply(input_data, fields) for
predicate in self.predicates
if isinstance(predicate, Predicate)])
|
akatsoulas/mozillians
|
mozillians/users/views.py
|
Python
|
bsd-3-clause
| 9,979 | 0.001203 |
from functools import reduce
from operator import or_
from django.db.models import Q
from django.conf import settings
from django.contrib.auth.models import User
from django.http import JsonResponse
from cities_light.models import City, Country, Region
from dal import autocomplete
from pytz import country_timezones
from mozillians.common.templatetags.helpers import get_object_or_none
from mozillians.groups.models import GroupMembership
from mozillians.phonebook.forms import get_timezones_list
from mozillians.users.models import IdpProfile, UserProfile
class BaseProfileAdminAutocomplete(autocomplete.Select2QuerySetView):
"""Base class for django-autocomplete-light."""
def get_queryset(self):
"""Base queryset used only in admin.
R
|
eturn all the users who have completed their profile registration.
"""
if not self.request.user.is_staff:
return UserProfile.objects.none()
qs = UserProfile.objects.complete()
self.q_base_filter = (Q(ful
|
l_name__icontains=self.q)
| Q(user__email__icontains=self.q)
| Q(user__username__icontains=self.q))
if self.q:
qs = qs.filter(self.q_base_filter)
return qs
class UsersAdminAutocomplete(autocomplete.Select2QuerySetView):
"""Base class for django-autocomplete-light."""
def get_queryset(self):
"""Base queryset used only in admin.
Return all the users who have completed their profile registration.
"""
if not self.request.user.is_staff:
return User.objects.none()
qs = User.objects.all()
self.q_base_filter = (Q(userprofile__full_name__icontains=self.q)
| Q(email__icontains=self.q)
| Q(username__icontains=self.q))
if self.q:
qs = qs.filter(self.q_base_filter)
return qs
class VoucherAutocomplete(BaseProfileAdminAutocomplete):
def get_queryset(self):
"""Augment base queryset by returning only users who can vouch."""
qs = super(VoucherAutocomplete, self).get_queryset().filter(can_vouch=True)
if self.q:
qs = qs.filter(self.q_base_filter)
return qs
class VouchedAutocomplete(BaseProfileAdminAutocomplete):
def get_queryset(self):
"""Augment base queryset by returning only vouched users."""
qs = super(VouchedAutocomplete, self).get_queryset().vouched()
if self.q:
qs = qs.filter(self.q_base_filter)
return qs
class CuratorsAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
"""Augment base queryset by returning only vouched users."""
# Allow only vouched users to perform this query.
if not self.request.user.userprofile.is_vouched:
return UserProfile.objects.none()
qs = UserProfile.objects.vouched()
if self.q:
qs = qs.filter(Q(full_name__icontains=self.q)
| Q(user__email__icontains=self.q)
| Q(user__username__icontains=self.q))
return qs
def get_autocomplete_location_query(qs, q):
"""Return qs if ``istartswith`` filter exists, else fallback to ``icontains``."""
startswith_qs = qs.filter(name__istartswith=q)
if startswith_qs.exists():
return startswith_qs
return qs.filter(name__icontains=q)
class StaffProfilesAutocomplete(autocomplete.Select2QuerySetView):
def get_results(self, context):
"""Modify the text in the results of the group invitation form."""
results = []
for result in context['object_list']:
pk = self.get_result_value(result)
if not pk:
continue
profile = UserProfile.objects.get(pk=pk)
idp = get_object_or_none(IdpProfile, profile=profile, primary=True)
text = self.get_result_label(result)
# Append the email used for login in the autocomplete text
if idp:
text += ' ({0})'.format(idp.email)
item = {
'id': pk,
'text': text
}
results.append(item)
return results
def get_queryset(self):
if not self.request.user.userprofile.is_vouched:
return UserProfile.objects.none()
queries = []
# Query staff profiles
for domain in settings.AUTO_VOUCH_DOMAINS:
pks = IdpProfile.objects.filter(
email__endswith='@' + domain).values_list('profile__pk', flat=True)
queries.append(Q(pk__in=pks))
query = reduce(or_, queries)
qs = UserProfile.objects.filter(query).distinct()
if self.q:
qs = qs.filter(Q(full_name__icontains=self.q)
| Q(user__email__icontains=self.q)
| Q(user__username__icontains=self.q))
return qs
class AccessGroupInvitationAutocomplete(StaffProfilesAutocomplete):
def get_queryset(self):
staff_qs = super(AccessGroupInvitationAutocomplete, self).get_queryset()
staff_ids = staff_qs.values_list('pk', flat=True)
# Query NDA memberships
nda_members_ids = (
GroupMembership.objects.filter(Q(group__name=settings.NDA_GROUP)
| Q(group__name=settings.NDA_STAFF_GROUP))
.filter(status=GroupMembership.MEMBER).distinct()
.values_list('userprofile__pk', flat=True)
)
query = Q(pk__in=staff_ids) | Q(pk__in=nda_members_ids)
qs = UserProfile.objects.filter(query).distinct()
if self.q:
qs = qs.filter(Q(full_name__icontains=self.q)
| Q(user__email__icontains=self.q)
| Q(user__username__icontains=self.q))
return qs
class NDAGroupInvitationAutocomplete(StaffProfilesAutocomplete):
def get_queryset(self):
staff_qs = super(NDAGroupInvitationAutocomplete, self).get_queryset()
staff_ids = staff_qs.values_list('pk', flat=True)
mfa_idps_query = (IdpProfile.objects.filter(primary=True)
.filter(Q(type=IdpProfile.PROVIDER_GITHUB)
| Q(type=IdpProfile.PROVIDER_FIREFOX_ACCOUNTS)
| Q(type=IdpProfile.PROVIDER_GOOGLE)
| Q(type=IdpProfile.PROVIDER_LDAP)))
mfa_idps_pks = mfa_idps_query.values_list('profile__id', flat=True)
qs = UserProfile.objects.filter(Q(pk__in=mfa_idps_pks) | Q(pk__in=staff_ids))
if self.q:
qs = qs.filter(Q(full_name__icontains=self.q)
| Q(user__email__icontains=self.q)
| Q(user__username__icontains=self.q))
return qs
class CountryAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
"""Country queryset from cities_light."""
if not self.request.user.is_authenticated():
return Country.objects.none()
qs = Country.objects.all()
if self.q:
return get_autocomplete_location_query(qs, self.q)
return qs
class RegionAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
"""Region queryset from cities_light."""
country_id = self.forwarded.get('country')
if not self.request.user.is_authenticated():
return Region.objects.none()
qs = Region.objects.all()
if country_id:
country = Country.objects.get(id=country_id)
qs = qs.filter(country=country)
if self.q:
return get_autocomplete_location_query(qs, self.q)
return qs
class CityAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
"""City queryset from cities_light."""
region_id = self.forwarded.get('region')
country_id = self.forwarded.get('
|
ak0ska/rundeck-puppetdb-nodes
|
rundeck-puppetdb-nodes-plugin/contents/rundeck_puppetdb_nodes.py
|
Python
|
apache-2.0
| 5,881 | 0.006802 |
# Daniel Fernandez Rodriguez <danielfr@cern.ch>
from argparse import ArgumentParser
from collections import defaultdict
from requests_kerberos import HTTPKerberosAuth
import json
import requests
import subprocess
import logging
import sys
class PuppetDBNodes(object):
def __init__(self, args):
for k, v in args.items():
setattr(self, k, v)
def negociate_krb_ticket(self, keytab_path, username):
kinit = '/usr/bin/kinit'
kinit_args = [kinit, '-kt', keytab_path, username]
kinit = subprocess.Popen(kinit_args)
kinit.wait()
def destroy_krb_ticket(self):
subprocess.call(["kdestroy"])
def get_facts_puppetdb(self, apiurl, facts, hostgroup):
url ='%s/facts' % apiurl
query_base = '["and",["or",%s],["in", "certname", ["extract", "certname", ["select-facts", ["and", ["=", "name", "hostgroup"], ["~", "value", "%s"]]]]]]'
query_facts = ','.join(['["=","name","%s"]' % fact for fact in facts])
query = query_base % (query_facts, hostgroup)
headers = {'Content-Type': 'application/json','Accept': 'application/json, version=2'}
payload = {'query': query}
logging.info("Getting facts from '%s', query: '%s'" % (url, query))
r = requests.get(url, params=payload, headers=headers, auth=HTTPKerberosAuth())
if r.status_code == requests.codes.ok:
logging.info("Request code: '%s'" % r.status_code)
return json.loads(r.text)
else:
logging.error("The request failed with code '%s'" % r.status_code)
return None
def print_puppetdb_nodes(self, apiurl, hostgroup, factlist):
'''
Queries PuppetDB and prints out the nodes information in a supported format for Rundeck
.
'''
factlist.extend(["operatingsystem", "operatingsystemrelease", "hostgroup"])
raw_data = self.get_facts_puppetdb(apiurl, factlist, hostgroup)
data = defaultdict(lambda: {})
if raw_data != None:
for entry in raw_data:
data[entry['certname']] = dict(data[entry['certname']].items() + [(entry['name'], entry['value'])])
logging.info("Printing node list using standard output...")
for node in data.keys():
print ('%s:'%node)
print (" "*4 + "hostname: " + node)
print (" "*4 + "username: root")
for fact in factlist:
if data[node].has_key(fact):
print (" "*4 + fact + ": " + data[node][fact] )
logging.info("Node list printed successfully")
else:
logging.error("Fact list empty. Check PuppetDB connection params")
def store_puppetdb_nodes(self, apiurl, hostgroup, factlist, filename):
'''
Instead of querying PuppetDB every time, saves the list of nodes on a local file
so Rundeck can access it localy.
'''
factlist.extend(["operatingsystem", "operatingsystemrelease", "hostgroup"])
raw_data = self.get_facts_puppetdb(apiurl, factlist, hostgroup)
data = defaultdict(lambda: {})
if raw_data != None:
for entry in raw_data:
data[entry['certname']] = dict(data[entry['certname']].items() + [(entry['name'], entry['value'])])
logging.info("Saving node list in '%s'..." % filename)
with open(filename, 'w') as file:
for node in data.keys():
file.write('%s:\n'%node)
file.write(" "*4 + "hostname: " + node + '\n')
file.write(" "*4 + "username: root" + '\n')
for fact in factlist:
if data[node].has_key(fact):
file.write(" "*4 + fact + ": " + data[node][fact] + '\n')
logging.info("Node list saved successfully")
else:
logging.error("Fact list empty. Check PuppetDB connection params")
def run(self):
self.negociate_krb_ticket(self.keytab, self.username)
if self.store:
self.store_puppetdb_nodes(self.apiurl, self.hostgroup, self.factlist, self.file)
else:
self.print_puppetdb_nodes(self.apiurl, self.hostgroup, self.factlist)
def main():
parser = ArgumentParser(description="Populate Rundeck list of nodes from PuppetDB")
parser.add_argument("-v", "--verbose", help="increas
|
e output verbosity", action="store_true")
parser.add_argument("-d", "--debug", help="increase output to debug messages", action="store_true")
|
parser.add_argument("--apiurl", help="PuppetDB API url (https://<SERVER>:<PORT>/<API VERSION>)", required=True)
parser.add_argument("--hostgroup", help="Foreman hostgroup", required=True)
parser.add_argument("--keytab", help="Keytab", required=True)
parser.add_argument("--username", help="Username to connect to PuppetDB", required=True)
parser.add_argument("--factlist", nargs='*', default=[], help="List of facts to retrieve for every node")
parser.add_argument("--file", default="/tmp/nodes.yaml", help="File path where the node list info will be stored")
behaviour = parser.add_mutually_exclusive_group()
behaviour.add_argument('--store', action='store_true')
behaviour.add_argument('--print', action='store_false')
args = parser.parse_args()
#trick to get the factlist as an object list when called it from Rundeck
if len(args.factlist) == 1:
args.factlist = args.factlist[0].split()
if args.verbose:
logging.basicConfig(level=logging.INFO)
elif args.debug:
logging.basicConfig(level=logging.DEBUG)
plugin = PuppetDBNodes(args.__dict__)
plugin.run()
if __name__ == "__main__":
try:
main()
except Exception, e:
logging.error(e)
sys.exit(-1)
|
ekaradon/demihi
|
core/admin.py
|
Python
|
mit
| 348 | 0.028736 |
from django.contrib import admin
from core.models import Language
# Register your models here.
class LanguageA
|
dmin(admin.ModelAdmin):
model = Language
fieldsets = [
(''
|
, {'fields': ['name', 'locale']})
]
list_display = ['name', 'locale']
search_fields = ['name', 'locale']
ordering = ('name',)
admin.site.register(Language, LanguageAdmin)
|
nkoech/csacompendium
|
csacompendium/research/api/experimentunit/experimentunitviews.py
|
Python
|
mit
| 2,054 | 0.003408 |
from csacompendium.research.models import ExperimentUnit
from csacompendium.utils.pagination import APILimi
|
tOffsetPagination
from csacompendium.utils.permissions import IsOwnerOrReadOnly
from csacompendium.utils.viewsutils import DetailViewUpdateDelete, CreateAPIViewHook
from rest_framework.filters import DjangoFilterBackend
from rest_framework.generics import ListAPIView
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from .filters import ExperimentUnitListFilter
from csacompendium.research.api.experimentun
|
it.experimentunitserializers import experiment_unit_serializers
def experiment_unit_views():
"""
Experiment unit views
:return: All experiment unit views
:rtype: Object
"""
experiment_unit_serializer = experiment_unit_serializers()
class ExperimentUnitCreateAPIView(CreateAPIViewHook):
"""
Creates a single record.
"""
queryset = ExperimentUnit.objects.all()
serializer_class = experiment_unit_serializer['ExperimentUnitDetailSerializer']
permission_classes = [IsAuthenticated]
class ExperimentUnitListAPIView(ListAPIView):
"""
API list view. Gets all records API.
"""
queryset = ExperimentUnit.objects.all()
serializer_class = experiment_unit_serializer['ExperimentUnitListSerializer']
filter_backends = (DjangoFilterBackend,)
filter_class = ExperimentUnitListFilter
pagination_class = APILimitOffsetPagination
class ExperimentUnitDetailAPIView(DetailViewUpdateDelete):
"""
Updates a record.
"""
queryset = ExperimentUnit.objects.all()
serializer_class = experiment_unit_serializer['ExperimentUnitDetailSerializer']
permission_classes = [IsAuthenticated, IsAdminUser]
lookup_field = 'slug'
return {
'ExperimentUnitListAPIView': ExperimentUnitListAPIView,
'ExperimentUnitDetailAPIView': ExperimentUnitDetailAPIView,
'ExperimentUnitCreateAPIView': ExperimentUnitCreateAPIView
}
|
cypreess/PyrateDice
|
game_server/game_server/manage.py
|
Python
|
mit
| 254 | 0 |
#!/usr/bin/env python
import os
import sys
if __name__ =
|
= "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "game_server.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv
|
)
|
Unofficial-Extend-Project-Mirror/openfoam-extend-Breeder-other-scripting-PyFoam
|
bin/pyFoamPVSnapshotMesa.py
|
Python
|
gpl-2.0
| 140 | 0.014286 |
#! /usr/bin/env python
from PyFoam.Applications.ChangePython import changePython
changePython("pv
|
python","PVSnapshot",options=["-
|
-mesa"])
|
punalpatel/st2
|
st2common/tests/unit/test_db_rule_enforcement.py
|
Python
|
apache-2.0
| 2,863 | 0 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the 'License'); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bson
import mock
from st2common.models.db.rule_enforcement import RuleEnforcementDB
from st2common.persistence.rule_enforcement import RuleEnforcement
from st2common.transport.publishers import PoolPublisher
from st2common.exceptions.db import StackStormDBObjectNotFoundError
from st2tests import DbTestCase
SKIP_DELETE = False
@mock.patch.object(PoolPublisher, 'publish', mock.MagicMock())
class RuleEnforcementModelTest(DbTestCase):
def test_ruleenforcment_crud(self):
saved = RuleEnforcementModelTest._create_save_rule_enforcement()
retrieved = RuleEnforcement.get_by_id(saved.id)
self.assertEqual(saved.rule.ref, retrieved.rule.ref,
'Same rule enforcement was not returned.')
self.assertTrue(retrieved.enforced_at is not None)
# test update
RULE_ID = str(bson.ObjectId())
self.assertEqual(retrieved.rule.id, None)
retrieved.rule.id = RULE_ID
saved = RuleEnforcement.add_or_update(retrieved)
retrieved = RuleEnforcement.get_by_id(saved.id)
self.assertEqual(retrieved.rule.id, RULE_ID,
'Update to rule enforcement failed.')
# cleanup
RuleEnforcementModelTest._delete([retrieved])
try:
retr
|
ieved = RuleEnforcement.get_by_id(saved.id)
except StackStormDBObjectNotFoundError:
retrieved = None
self.asser
|
tIsNone(retrieved, 'managed to retrieve after delete.')
@staticmethod
def _create_save_rule_enforcement():
created = RuleEnforcementDB(trigger_instance_id=str(bson.ObjectId()),
rule={'ref': 'foo_pack.foo_rule',
'uid': 'rule:foo_pack:foo_rule'},
execution_id=str(bson.ObjectId()))
return RuleEnforcement.add_or_update(created)
@staticmethod
def _delete(model_objects):
global SKIP_DELETE
if SKIP_DELETE:
return
for model_object in model_objects:
model_object.delete()
|
jsafrane/openlmi-storage
|
test/test_create_lv.py
|
Python
|
lgpl-2.1
| 12,008 | 0.002498 |
#!/usr/bin/python
# -*- Coding:utf-8 -*-
#
# Copyright (C) 2012 Red Hat, Inc. All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this
|
library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
#
# Authors: Jan Safranek <jsafrane@redhat.com>
from test_base import StorageTestBase, short_tests_only
import unittest
import pywbem
MEGABYTE = 1024 * 1024
class TestCreateLV(StorageTestBase):
"""
Test CreateOrModifyLV method.
"""
VG_CLASS = "LMI_VGStoragePool"
STYLE_GPT = 3
PARTITION_CLASS = "LMI_GenericDiskPartition"
def setUp(self):
""" Find storage service. """
super(TestCreateLV, self).setUp()
self.service = self.wbemconnection.EnumerateInstanceNames(
"LMI_StorageConfigurationService")[0]
self.part_service = self.wbemconnection.EnumerateInstanceNames(
"LMI_DiskPartitionConfigurationService")[0]
vgname = self._create_vg()
self.vg = self.wbemconnection.GetInstance(vgname)
self.lvcaps_name = self.wbemconnection.AssociatorNames(vgname,
AssocClass="LMI_LVElementCapabilities")[0]
def tearDown(self):
self._destroy_vg(self.vg.path)
super(TestCreateLV, self).tearDown()
def _create_vg(self):
"""
Create a partition and Volume Group on it and return its
CIMInstanceName.
"""
(ret, outparams) = self.wbemconnection.InvokeMethod(
"CreateOrModifyVG",
self.service,
InExtents=self.partition_names[:1],
ElementName='tstName')
self.assertEqual(ret, 0)
return outparams['pool']
def _destroy_vg(self, vgname):
""" Destroy VG and its partition. """
self.wbemconnection.DeleteInstance(vgname)
def test_create_no_pool(self):
""" Test CreateOrModifyLV without InPool."""
self.assertRaises(pywbem.CIMError, self.wbemconnection.InvokeMethod,
"CreateOrModifyLV",
self.service,
Size=pywbem.Uint64(40 * MEGABYTE))
def test_create_no_size(self):
""" Test CreateOrModifyLV without Size."""
self.assertRaises(pywbem.CIMError, self.wbemconnection.InvokeMethod,
"CreateOrModifyLV",
self.service,
InPool=self.vg.path)
def test_create_wrong_size(self):
""" Test CreateOrModifyLV with wrong Size."""
self.assertRaises(pywbem.CIMError, self.wbemconnection.InvokeMethod,
"CreateOrModifyLV",
self.service,
InPool=self.vg.path,
Size=pywbem.Uint64(0))
# TODO: test this:
# self.assertRaises(pywbem.CIMError, self.wbemconnection.InvokeMethod,
# "CreateOrModifyLV",
# self.service,
# InPool=self.vg.path,
# Size=pywbem.Uint64(self.vg['TotalManagedSpace'] * 10))
def test_create_missing_goal(self):
""" Test CreateOrModifyLV with missing Goal."""
goal_name = pywbem.CIMInstanceName(
classname="LMI_LVStorageSetting",
keybindings={
"InstanceID": "LMI:LMI_LVStorageSetting:not-existing"
})
self.assertRaises(pywbem.CIMError, self.wbemconnection.InvokeMethod,
"CreateOrModifyLV",
self.service,
InPool=self.vg.path,
Size=pywbem.Uint64(40 * MEGABYTE),
Goal=goal_name)
def _create_setting(self):
"""
Create new LMI_LVStorageSetting with default values and return
its CIMInstance.
"""
(ret, outparams) = self.wbemconnection.InvokeMethod(
"CreateLVStorageSetting",
self.lvcaps_name)
self.assertEqual(ret, 0)
setting_name = outparams['setting']
setting = self.wbemconnection.GetInstance(setting_name)
return setting
def test_create_wrong_goal(self):
""" Test CreateOrModifyLV with wrong Goal."""
setting = self._create_setting()
setting['ExtentStripeLengthMin'] = pywbem.Uint16(100)
self.wbemconnection.ModifyInstance(setting)
self.assertRaises(pywbem.CIMError, self.wbemconnection.InvokeMethod,
"CreateOrModifyLV",
self.service,
InPool=self.vg.path,
Size=pywbem.Uint64(40 * MEGABYTE),
Goal=setting.path)
self.wbemconnection.DeleteInstance(setting.path)
def test_create_no_goal(self):
""" Test CreateOrModifyLV without any Goal."""
(retval, outparams) = self.wbemconnection.InvokeMethod(
"CreateOrModifyLV",
self.service,
InPool=self.vg.path,
Size=pywbem.Uint64(10 * self.vg['ExtentSize']))
self.assertEqual(retval, 0)
self.assertEqual(len(outparams), 2)
self.assertEqual(outparams['Size'], 10 * self.vg['ExtentSize'])
lv_name = outparams['theelement']
lv = self.wbemconnection.GetInstance(lv_name)
vg_setting = self.wbemconnection.Associators(self.vg.path,
AssocClass="LMI_VGElementSettingData")[0]
lv_setting = self.wbemconnection.Associators(lv_name,
AssocClass="LMI_LVElementSettingData")[0]
self.assertEqual(
lv['BlockSize'] * lv['NumberOfBlocks'],
10 * self.vg['ExtentSize'])
self.assertEqual(
lv['NoSinglePointOfFailure'],
lv_setting['NoSinglePointOfFailure'])
self.assertEqual(
lv['NoSinglePointOfFailure'],
vg_setting['NoSinglePointOfFailure'])
self.assertEqual(
lv['DataRedundancy'],
lv_setting['DataRedundancyGoal'])
self.assertEqual(
lv['DataRedundancy'],
vg_setting['DataRedundancyGoal'])
self.assertEqual(
lv['PackageRedundancy'],
lv_setting['PackageRedundancyGoal'])
self.assertEqual(
lv['PackageRedundancy'],
vg_setting['PackageRedundancyGoal'])
self.assertEqual(
lv['ExtentStripeLength'],
lv_setting['ExtentStripeLength'])
self.assertEqual(
lv['ExtentStripeLength'],
vg_setting['ExtentStripeLength'])
# check vg is reduced
new_vg = self.wbemconnection.GetInstance(self.vg.path)
self.assertEqual(
new_vg['RemainingExtents'],
self.vg['RemainingExtents'] - 10)
self.assertEqual(
new_vg['RemainingManagedSpace'],
self.vg['RemainingManagedSpace'] - 10 * self.vg['ExtentSize'])
self.wbemconnection.DeleteInstance(lv_name)
def test_create_goal_name(self):
""" Test CreateOrModifyLV with a Goal and elementname."""
goal = self._create_setting()
(retval, outparams) = self.wbemconnection.InvokeMethod(
"CreateOrModifyLV",
self.service,
InPool=self.vg.path,
Size=pywbem.Uint64(10 * self.vg['ExtentSize']),
Goal=goal.path,
ElementName="tstNAME")
self.assertEqual(retval, 0)
self.assertEqual(len(outparams), 2)
self.assertEqual(outparams['Size'], 10 * self.vg['ExtentSize'])
lv_name = outparams['theelement']
lv = self.wbemconnecti
|
MDAnalysis/RotamerConvolveMD
|
rotcon/library.py
|
Python
|
gpl-2.0
| 5,002 | 0.003798 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# Convolve MTSS rotamers with MD trajectory.
# Copyright (c) 2011-2017 Philip Fowler and AUTHORS
# Published under the GNU Public Licence, version 2 (or higher)
#
# Includes a rotamer library for MTSS at 298 K by Gunnar Jeschke,
# which is published under the same licence by permission.
"""\
Rotamer library handling
========================
:mod:`rotamers.library` contains the data (:data:`LIBRARIES`) to load
a rotamer library, represented by a :class:`RotamerLibrary`.
"""
from __future__ import absolute_import, division, print_function
import MDAnalysis, MDAnalysis.lib.util
import logging
logger = logging.getLogger("MDAnalysis.app")
import numpy as np
import os.path
import pkg_resources
#: Name of the directory in the package that contains the library data.
LIBDIR = "data"
# This could be turned into a YAML file.
#: Registry of libraries, indexed by name.
LIBRARIES = {
'MTSSL 298K 2011': {
'topology': "rotamer1_R1A_298K_2011.pdb",
'ensemble': "rotamer1_R1A_298K_2011.dcd",
'populations': "R1A_298K_populations_2011.dat",
'author': "Gunnar Jeschke",
'licence': "GPL v2",
'citation': "Polyhach Y, Bordignon E, Jeschke G. "
"Phys Chem Chem Phys. 2011; 13(6):2356-2366. doi: 10.1039/c0cp01865a",
},
'MTSSL 298K 2015': {
'topology': "rotamer1_R1A_298K_2015.pdb",
'ensemble': "rotamer1_R1A_298K_2015.dcd",
'populations': "R1A_298K_populations_2015.dat",
'author': "Gunnar Jeschke",
'licence': "GPL v2",
'citation': "Polyhach Y, Bordignon E, Jeschke G. "
"Phys Chem Chem Phys. 2011; 13(6):2356-2366. doi: 10.1039/c0cp01865a",
'information': "updated version of the MTSSL rotamer library from 2015"
},
}
def find_file(filename, pkglibdir=LIBDIR):
"""Return full path to file *filename*.
1) If the *filename* exists, return rooted canonical path.
2) Otherwise, create a path to file in the installed *pkglibdir*.
.. note::
A file name is *always* returned, even if the file does not
exist (because this is how :func:`pkg_resources.resource_filename`
works).
"""
if os.path.exists(filename):
return MDAnalysis.lib.util.realpath(filename)
return pkg_resources.resource_filename(__name__, os.path.join(pkglibdir, filename))
class RotamerLibrary(object):
"""Rotamer library
The library makes available the attributes :attr:`rotamers`, and :attr:`weights`.
.. attribute:: rotamers
:class:`MDAnalysis.core.AtomGroup.Universe` instance that
records all rotamers as a trajectory
.. attribute:: weights
NumPy array containing the population of each rotomer.
.. attribute:: name
Name of the library.
.. attribute:: lib
Dictionary containing the file names and meta data for the library :attr:`name`.
"""
def __init__(self, name):
"""RotamerLibrary(name)
:Arguments:
*name*
name of the library (must exist in the registry of libraries, :data:`LIBRARIES`)
"""
self.name = name
self.lib = {}
try:
self.lib.update(LIBRARIES[name]) # make a copy
except KeyError:
raise ValueError("No rotamer library with name {0} known: must be one of {1}".format(name,
LIBRARIES.keys()))
logger.info("Using rotamer library '{0}' by {1[author]}".format(self.name, self.lib))
logger.info("Please cite: {0[citation]}".format(self.lib))
# adjust paths
for k in 'ensemble', 'topology', 'populations':
self.lib[k] = find_file(self.lib[k])
logger.debug("[rotamers] ensemble = {0[ensemble]} with topology = {0[topology]}".format(self.lib))
logger.debug("[rotamers] populations = {0[populations]}".format(self.lib))
self.
|
rotamers = MDAnalysis.Universe(self.lib['topology'], self.lib['ensemble'])
self.weights =
|
self.read_rotamer_weights(self.lib['populations'])
if len(self.rotamers.trajectory) != len(self.weights):
err_msg = "Discrepancy between number of rotamers ({0}) and weights ({1})".format(
len(self.rotamers.trajectory), len(self.weights))
logger.critical(err_msg)
raise ValueError(err_msg)
def read_rotamer_weights(self, filename):
"""read in the rotamer weights from *filename*
There is one weight per conformer (frame) in the trajectory.
"""
return np.loadtxt(filename)
def __repr__(self):
return "<RotamerLibrary '{0}' by {1} with {2} rotamers>".format(self.name, self.lib['author'],
len(self.weights))
|
nilp0inter/cpe
|
cpe/cpeset2_2.py
|
Python
|
lgpl-3.0
| 3,512 | 0.000571 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of cpe package.
This module of is an implementation of name matching
algorithm in accordance with version 2.2 of CPE (Common Platform
Enumeration) specification.
Copyright (C) 2013 Alejandro Galindo García, Roberto Abdelkader Martínez Pérez
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
For any problems using the cpe package, or general questions and
feedback about it, please contact:
- Alejandro Galindo García: galindo.garcia.alejandro@gmail.com
- Roberto Abdelkader Martínez Pérez: robertomartinezp@gmail.com
"""
from .cpe import CPE
from .cpeset import C
|
PESet
class CPESet2_2(CPESet):
"""
Represents a set of CPE Names.
This class allows:
- create set of CPE Names.
- match a CPE element against a set of CPE Names.
"""
###############
# CONSTANTS #
###############
#: Version of CPE set
VERSION = "2.2"
####################
# OBJECT METHODS #
####################
def append(self, cpe):
"""
Adds a CPE Name to the set if not already.
:param CPE cp
|
e: CPE Name to store in set
:returns: None
:exception: ValueError - invalid version of CPE Name
TEST:
>>> from .cpeset2_2 import CPESet2_2
>>> from .cpe2_2 import CPE2_2
>>> uri1 = 'cpe:/h:hp'
>>> c1 = CPE2_2(uri1)
>>> s = CPESet2_2()
>>> s.append(c1)
"""
if cpe.VERSION != CPE.VERSION_2_2:
errmsg = "CPE Name version {0} not valid, version 2.2 expected".format(
cpe.VERSION)
raise ValueError(errmsg)
for k in self.K:
if cpe.cpe_str == k.cpe_str:
return None
self.K.append(cpe)
def name_match(self, cpe):
"""
Accepts a set of known instances of CPE Names and a candidate CPE Name,
and returns 'True' if the candidate can be shown to be
an instance based on the content of the known instances.
Otherwise, it returns 'False'.
:param CPESet self: A set of m known CPE Names K = {K1, K2, …, Km}.
:param CPE cpe: A candidate CPE Name X.
:returns: True if X matches K, otherwise False.
:rtype: boolean
TEST: matching with ANY values explicit
>>> from .cpe2_2 import CPE2_2
>>> uri1 = 'cpe:/o:microsoft:windows:vista'
>>> uri2 = 'cpe:/o:cisco:ios:12.3:enterprise'
>>> c1 = CPE2_2(uri1)
>>> c2 = CPE2_2(uri2)
>>> s = CPESet2_2()
>>> s.append(c1)
>>> s.append(c2)
>>> uri3 = 'cpe:/o:microsoft::vista'
>>> c3 = CPE2_2(uri3)
>>> s.name_match(c3)
True
"""
return super(CPESet2_2, self).name_match(cpe)
if __name__ == "__main__":
import doctest
doctest.testmod()
doctest.testfile("tests/testfile_cpeset2_2.txt")
|
seecr/meresco-components
|
meresco/components/http/pathrename.py
|
Python
|
gpl-2.0
| 1,768 | 0.003394 |
## begin license ##
#
# "Meresco Components" are components to build searchengines, repositories
# and archives, based on "Meresco Core".
#
# Copyright (C) 2007-2009 SURF Foundation. http://www.surf.nl
# Copyright (C) 2007 SURFnet. http://www.surfnet.nl
# Copyright (C) 2007-2010
|
Seek You Too (CQ2) http://www.cq2.nl
# Copyright (C) 2007-2009 Stichting Kennisnet Ict op school. http://www.kennisnetictopschool.nl
# Copyright (C) 2012, 2017 Seecr (Seek You Too B.V.) http://seecr.nl
# Copyright (C) 2017 SURF http://www.surf.nl
# Copyright (C) 2017 Stichting Kennisnet http://www.kennisnet.nl
#
# This file is part of "Meresco Components"
#
# "Meresco Components" is free software; you can redistribute it and/or modify
# it under t
|
he terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Meresco Components" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Meresco Components"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
from meresco.core import Transparent
class PathRename(Transparent):
def __init__(self, rename):
Transparent.__init__(self)
self._rename = rename
def handleRequest(self, path, *args, **kwargs):
originalPath = kwargs.pop('originalPath', path)
yield self.all.handleRequest(path=self._rename(path), originalPath=originalPath, *args, **kwargs)
|
littlecodersh/EasierLife
|
Plugins/ChatLikeCMD/ChatLikeCMD.py
|
Python
|
mit
| 7,974 | 0.007399 |
#coding=utf8
import thread, time, sys, os, platform
try:
import termios, tty
termios.tcgetattr, termios.tcsetattr
import threading
OS = 'Linux'
except (ImportError, AttributeError):
try:
import msvcrt
OS = 'Windows'
except ImportError:
raise Exception('Mac is currently not supported')
OS = 'Mac'
else:
getch = msvcrt.getwch
else:
def fn():
try:
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
tty.setraw(fd)
ch = sys.stdin.read(1)
except:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
raise Exception
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
getch = fn
CMD_HISTORY = 30
class ChatLikeCMD():
def __init__(self, header = 'LittleCoder', symbol = '>', inPip = None, inputMaintain = False):
self.strBuff = []
self.cmdBuff = []
self.historyCmd = -1
self.cursor = 0
self.inPip = [] if inPip == None else inPip
self.outPip = []
self.isLaunch = False
self.isPause = False
self.header = header
self.symbol = symbol
self.inputMaintain = inputMaintain
def reprint_input(self):
sys.stdout.write(self.header + self.symbol)
if self.strBuff:
for i in self.strBuff: sys.stdout.write(i)
sys.stdout.flush()
def getch(self):
c = getch()
return c if c != '\r' else '\n'
def get_history_command(self, direction):
if direction == 'UP':
if self.historyCmd < CMD_HISTORY - 1 and self.historyCmd < len(self.cmdBuff) - 1: self.historyCmd += 1
else:
if self.historyCmd == 0: return ''
if self.historyCmd > 0: self.historyCmd -= 1
if -1 < self.historyCmd < len(self.cmdBuff): return self.cmdBuff[self.historyCmd]
def output_command(self, s):
self.outPip.append(s if isinstance(s, unicode) else s.decode(sys.stdin.encoding))
if len(self.cmdBuff) >= CMD_HISTORY: self.cmdBuff = self.cmdBuff[::-1].pop()[::-1]
self.cmdBuff.append(s)
def print_thread(self):
while self.isLaunch:
if self.inPip:
sys.stdout.write('\r' + ' ' * 50 + '\r')
sys.stdout.flush()
print self.inPip.p
|
op()
# linux special
s
|
ys.stdout.write('\r')
sys.stdout.flush()
self.reprint_input()
time.sleep(0.01)
def fast_input_test(self):
timer = threading.Timer(0.001, thread.interrupt_main)
c = None
try:
timer.start()
c = getch()
except:
pass
timer.cancel()
return c
def process_direction_char(self, c):
if OS == 'Windows':
if ord(c) == 72:
c = 'A'
elif ord(c) == 80:
c = 'B'
elif ord(c) == 77:
c = 'C'
elif ord(c) == 75:
c = 'D'
if ord(c) == 68: # LEFT
self.process_char('\b')
return
# cursor bugs
if self.cursor > 0:
if OS == 'Windows':
sys.stdout.write(chr(224) + chr(75))
else:
sys.stdout.write(chr(27) + '[C')
self.cursor -= 1
elif ord(c) == 67: # RIGHT
return
# cursor bugs
if self.cursor < len(self.strBuff):
if OS == 'Windows':
sys.stdout.write(chr(224) + chr(77))
else:
sys.stdout.write(chr(27) + '[D')
self.cursor += 1
elif ord(c) == 65: # UP
hc = self.get_history_command('UP')
if not hc is None:
self.strBuff = [i for i in hc]
self.cursor = len(hc)
sys.stdout.write('\r' + ' ' * 50 + '\r')
self.reprint_input()
elif ord(c) == 66: # DOWN
hc = self.get_history_command('DOWN')
if not hc is None:
self.strBuff = [i for i in hc]
self.cursor = len(hc)
sys.stdout.write('\r' + ' ' * 50 + '\r')
self.reprint_input()
else:
raise Exception(c)
def process_char(self, c):
if ord(c) == 27: # Esc
if OS == 'Linux':
fitc1 = self.fast_input_test()
if ord(fitc1) == 91:
fitc2 = self.fast_input_test()
if 65 <= ord(fitc2) <= 68:
self.process_direction_char(fitc2)
return
sys.stdout.write('\r' + ' ' * 50 + '\r')
sys.stdout.flush()
self.reprint_input()
self.outPip.append(c)
time.sleep(0.02)
if 'fitc1' in dir():
self.process_char(fitc1)
self.cursor += 1
if 'fitc2' in dir():
self.process_char(fitc2)
self.cursor += 1
elif ord(c) == 3: # Ctrl+C
self.stop()
self.isPause = True
if raw_input('Exit?(y) ') == 'y':
sys.stdout.write('Command Line Exit')
else:
self.start()
self.isPause = False
elif ord(c) in (8, 127): # Backspace
if self.strBuff:
if ord(self.strBuff[-1]) < 128:
sys.stdout.write('\b \b')
else:
sys.stdout.write('\b\b \b')
if OS == 'Linux':
self.strBuff.pop()
self.strBuff.pop()
self.strBuff.pop()
self.cursor -= 1
elif c == '\n':
if self.strBuff:
if self.inputMaintain:
sys.stdout.write(c)
else:
sys.stdout.write('\r' + ' ' * 50 + '\r')
sys.stdout.flush()
self.reprint_input()
self.output_command(''.join(self.strBuff))
self.strBuff = []
self.historyCmd = -1
elif ord(c) == 224: # Windows direction
if OS == 'Windows':
direction = self.getch()
self.process_direction_char(direction)
else:
sys.stdout.write(c)
sys.stdout.flush()
self.strBuff.append(c)
self.cursor += 1
def command_thread(self):
c = None
while self.isLaunch:
c = self.getch()
self.process_char(c)
time.sleep(0.01)
def start(self):
self.isLaunch = True
thread.start_new_thread(self.print_thread, ())
self.reprint_input()
thread.start_new_thread(self.command_thread, ())
def stop(self):
sys.stdout.write('\r' + ' ' * 50 + '\r')
sys.stdout.flush()
self.isLaunch = False
def print_line(self, msg = None):
self.inPip.append(msg)
def clear(self):
os.system('cls' if platform.system() == 'Windows' else 'clear')
self.reprint_input()
def get_command_pip(self):
return self.outPip
def set_header(self, header):
self.header = header
if __name__ == '__main__':
c = ChatLikeCMD()
s = c.get_command_pip()
c.start()
def loopinput(c):
while True:
c.print_line('LOOP INPUT......')
time.sleep(3)
thread.start_new_thread(loopinput, (c,))
while c.isLaunch or c.isPause:
if s:
c.print_line(s.pop())
time.sleep(0.01)
|
raycarnes/project
|
project_issue_baseuser/__openerp__.py
|
Python
|
agpl-3.0
| 1,661 | 0 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Daniel Reis, 2013
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more det
|
ails.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Projects Issue extensions for user roles',
'version': '1.0',
'category': 'Project Management',
'summary': 'Extend Project user roles to support more complex use cases
|
',
'description': """\
Also implements the Project user role extensions to the Project Issue
documents.
This module is automatically installed if the Issue Tracker is also installed.
Please refer to the ``project_baseuser`` module for more details.
""",
'author': "Daniel Reis,Odoo Community Association (OCA)",
'license': 'AGPL-3',
'depends': [
'project_issue',
'project_baseuser',
],
'data': [
'security/ir.model.access.csv',
'security/project_security.xml',
],
'installable': True,
'auto_install': True,
}
|
tommyip/zulip
|
zerver/management/commands/create_stream.py
|
Python
|
apache-2.0
| 919 | 0.002176 |
from argparse import ArgumentParser
from typing import Any
from zerver.lib.actions import create_stream_if_needed
from zerver.lib.management import ZulipBaseCommand
class Command(ZulipBaseCommand):
help = """Create a stream, and subscribe all active users (excluding bots).
This should be used for TESTING only, unless you understand the limitations of
the command."""
def add_arguments(self, parser: ArgumentParser) -> None:
self.add_realm_args(parser, True, "realm in which to create the stream")
parser.add_argument('stream_name', metavar='<stream name>', type=str,
help='name of stream to create')
def handle(self, *args: Any, **options: str) -> None:
realm = self.get_realm(options)
assert realm is not None
|
# Should be ensured by parser
stream_name = options['s
|
tream_name']
create_stream_if_needed(realm, stream_name)
|
anhstudios/swganh
|
data/scripts/templates/object/building/poi/shared_lok_nymshenchman_medium.py
|
Python
|
mit
| 454 | 0.046256 |
#### NOTICE: THIS FILE IS AUTOGENERATE
|
D
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from
|
swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/poi/shared_lok_nymshenchman_medium.iff"
result.attribute_template_id = -1
result.stfName("poi_n","base_poi_building")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.