repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
PhonologicalCorpusTools/PolyglotDB
|
tests/test_acoustics_vot.py
|
Python
|
mit
| 1,599 | 0.005003 |
import os
from decimal import Decimal
import pytest
from polyglotdb import CorpusContext
@pytest.mark.acoustic
def test_analyze_vot(acoustic_utt_config, vot_classifier_path):
pytest.skip()
with CorpusContext(acoustic_utt_config) as g:
g.reset_acoustics()
g.reset_vot()
stops = ['p', 't', 'k'] # , 'b', 'd', 'g']
g.encode_class(stops, 'stops')
g.analyze_vot(stop_label="stops",
classifier=vot_classifier_path,
vot_min=15,
vot_max=250,
window_min=-30,
window_max=30)
q = g.query_graph(g.phone).filter(g.phone.label.in_(stops)).columns(g.phone.label, g.phone.begin, g.phone.end,
g.phone.id, g.phone.vot.begin,
g.phone.vot.end).order_by(g.phone.begin)
p_returns
|
= q.all()
p_true = [(1.593, 1.649), (1.832, 1.848), (1.909, 1.98), (2.116, 2.137), (2.687, 2.703),
(2.829, 2.8440000000000003), (2.934, 2.9490000000000003), (3.351, 3.403), (5.574, 5.593999999999999),
(6.207, 6.2219999999999995), (6.736, 6.755999999999999), (7.02, 7.0489999999999995), (9.255, 9.287),
(9.498, 9.514999999999999), (11.424, 11.479999999999999),
|
(13.144, 13.206), (13.498, 13.523),
(25.125, 25.14)]
for t, r in zip(p_true, p_returns):
assert (r["node_vot_begin"][0], r["node_vot_end"][0]) == t
|
baocongchen/M101P-MONGODB-FOR-PYTHON-DEVELOPERS
|
week4/hw4-4/hw4-4.py
|
Python
|
mit
| 595 | 0.021849 |
#In this problem you will analyze a profile log taken from a mongoDB instance. To start, please download sysprofile.json
#fr
|
om Download Handout link and import it with the following command:
#
#mongoimport -d m101 -c profile < sysprofile.json
#Now query the profile data, looking for all queries to the students collection in the database school2, sorted in order
#of decreasing latency. What is the latency of the longest running operation to the collection, in milliseconds?
db.profile.find().pretty()
db.profile.ensureIndex({millis:-1})
db
|
.profile.find().sort({millis:-1}).pretty().limit(1)
|
jiahaoliang/group-based-policy
|
gbpservice/tests/contrib/gbpfunctests/testcases/tc_gbp_ri_func_4.py
|
Python
|
apache-2.0
| 3,891 | 0.000257 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable l
|
aw or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitatio
|
ns
# under the License.
import commands
import logging
import sys
from libs import config_libs
from libs import utils_libs
from libs import verify_libs
def main():
# Run the Testcase:
test = test_gbp_ri_func_4()
test.run()
class test_gbp_ri_func_4(object):
# Initialize logging
logging.basicConfig(
format='%(asctime)s [%(levelname)s] %(name)s - %(message)s',
level=logging.WARNING)
_log = logging.getLogger(__name__)
cmd = 'rm /tmp/test_gbp_ri_func_4.log'
commands.getoutput(cmd)
hdlr = logging.FileHandler('/tmp/test_gbp_ri_func_4.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
_log.addHandler(hdlr)
_log.setLevel(logging.INFO)
_log.setLevel(logging.DEBUG)
def __init__(self):
"""
Init def
"""
self.gbpcfg = config_libs.Gbp_Config()
self.gbpverify = verify_libs.Gbp_Verify()
self.spec_name = 'demo_sc_spec'
self.fw_name = 'demo_fw'
self.lb_name = 'demo_lb'
def cleanup(self, fail=0):
for obj in ['node', 'spec']:
self.gbpcfg.gbp_del_all_anyobj(obj)
if fail != 0:
self._log.info("\n## TESTCASE_GBP_RI_FUNC_4: FAILED")
utils_libs.report_results('test_gbp_ri_func_4', 'test_results.txt')
sys.exit(1)
def run(self):
self._log.info(
"\n## TESTCASE_GBP_RI_FUNC_4: RESOURCE INTEGRITY AMONG "
"SERVICE-CHAIN OBJECTS")
# Testcase work-flow starts
# ============ ALL POLICY OBJECTS ARE TO BE CREATED AND VERIFIED =
self._log.info("\n## Step 1: Create Service Chain Nodes LB & FW ##\n")
lb_uuid = self.gbpcfg.gbp_sc_cfg_all(1, 'node', self.lb_name)
if lb_uuid == 0:
self._log.info(
"# Step 1: Create Service Chain Loadbalance Node == Failed")
self.cleanup(fail=1)
fw_uuid = self.gbpcfg.gbp_sc_cfg_all(1, 'node', self.lb_name)
if fw_uuid == 0:
self._log.info(
"# Step 1A: Create Service Chain Firewall Node == Failed")
self.cleanup(fail=1)
######
self._log.info("\n## Step 2: Create ServiceChain Spec ##\n")
# Ensur that node names or node uuids passed as val to param
# 'nodes',MUST be in order of FW and then LB.. this order is required
# from gbp pov
spec_uuid = self.gbpcfg.gbp_sc_cfg_all(
1, 'spec', self.spec_name, nodes='%s %s' %
(fw_uuid, lb_uuid))
if spec_uuid == 0:
self._log.info("# Step 2: Create ServiceChain Spec == Failed")
self.cleanup(fail=1)
######
self._log.info("\n## Step 3: Delete the Service Chain Nodes ##\n")
cnt = 0
for nodeid in [lb_uuid, fw_uuid]:
if self.gbpcfg.gbp_sc_cfg_all(0, 'node', nodeid) != 0:
self._log.info(
"# Step 4: Deletion of ServiceChain did NOT fail")
cnt += 1
if cnt > 0:
self.cleanup(fail=1)
else:
self._log.info("\n## TESTCASE_GBP_RI_FUNC_4: PASSED")
self.cleanup()
utils_libs.report_results('test_gbp_ri_func_4', 'test_results.txt')
sys.exit(1)
if __name__ == '__main__':
main()
|
Pirolf/Cabot
|
categorize.py
|
Python
|
mit
| 2,623 | 0.0122 |
import numpy as np
import scimath as sm
import matplotlib.pyplot as plt
import json
import yaml
import sys
import os
# Make sure that caffe is on the python path:
configStream = open("FluffyHaiiro.yaml", "r")
config = yaml.load(configStream)
caffe_root = config.get(':caffe_root_path')
sys.path.insert(0, caffe_root + 'python')
imp
|
ort caffe
plt.rcParams['figure.figsize'] = (10, 10)
plt.rcParams['
|
image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
if not os.path.isfile(caffe_root + '/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'):
#print("Downloading pre-trained CaffeNet model...")
os.system('python ../scripts/download_model_binary.py ../models/bvlc_reference_caffenet')
caffe.set_mode_cpu()
net = caffe.Net(caffe_root + '/models/bvlc_reference_caffenet/deploy.prototxt',
caffe_root + '/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel',
caffe.TEST)
# input preprocessing: 'data' is the name of the input blob == net.inputs[0]
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2,0,1))
transformer.set_mean('data', np.load(caffe_root + '/python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1)) # mean pixel
transformer.set_raw_scale('data', 255) # the reference model operates on images in [0,255] range instead of [0,1]
transformer.set_channel_swap('data', (2,1,0)) # the reference model has channels in BGR order instead of RGB
# set net to batch size of 50
net.blobs['data'].reshape(50,3,227,227)
#net.blobs['data'].data[...] = transformer.preprocess('data', caffe.io.load_image(caffe_root + 'examples/images/cat.jpg'))
imagePath = ""
if len(sys.argv) == 2:
imagePath = str(sys.argv[1])
else:
imagePath = '/home/mizu/Downloads/sashimi.jpg'
net.blobs['data'].data[...] = transformer.preprocess('data', caffe.io.load_image(imagePath))
out = net.forward()
#print("Predicted class is #{}.".format(out['prob'].argmax()))
imagenet_labels_filename = caffe_root + '/data/ilsvrc12/synset_words.txt'
try:
labels = np.loadtxt(imagenet_labels_filename, str, delimiter='\t')
except:
os.system('. ../data/ilsvrc12/get_ilsvrc_aux.sh')
labels = np.loadtxt(imagenet_labels_filename, str, delimiter='\t')
# sort top k predictions from softmax output
top_k = net.blobs['prob'].data[0].flatten().argsort()[-1:-6:-1]
#write to a file?
#commaSeparatedResult = labels[top_k]
#for label in commaSeparatedResult:
# print label
print json.dumps(labels[top_k].tolist())
# CPU mode: how much time used
# net.forward() # call once for allocation
# %timeit net.forward()
|
jorisvandenbossche/pandas
|
pandas/tests/io/json/test_json_table_schema.py
|
Python
|
bsd-3-clause
| 28,054 | 0.000857 |
"""Tests for Table Schema integration."""
from collections import OrderedDict
import json
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
PeriodDtype,
)
import pandas as pd
from pandas import DataFrame
import pandas._testing as tm
from pandas.io.json._table_schema import (
as_json_table_type,
build_table_schema,
convert_json_field_to_pandas_type,
convert_pandas_type_to_json_field,
set_default_names,
)
class TestBuildSchema:
def setup_method(self, method):
self.df = DataFrame(
{
"A": [1, 2, 3, 4],
"B": ["a", "b", "c", "c"],
"C": pd.date_range("2016-01-01", freq="d", periods=4),
"D": pd.timedelta_range("1H", periods=4, freq="T"),
},
index=pd.Index(range(4), name="idx"),
)
def test_build_table_schema(self):
result = build_table_schema(self.df, version=False)
expected = {
"fields": [
{"name": "idx", "type": "integer"},
{"name": "A", "type": "integer"},
{"name": "B", "type": "string"},
{"name": "C", "type": "datetime"},
{"name": "D", "type": "duration"},
],
"primaryKey": ["idx"],
}
assert result == expected
result = build_table_schema(self.df)
assert "pandas_version" in result
def test_series(self):
s = pd.Series([1, 2, 3], name="foo")
result = build_table_schema(s, version=False)
expected = {
"fields": [
{"name": "index", "type": "integer"},
{"name": "foo", "type": "integer"},
],
"primaryKey": ["index"],
}
assert result == expected
result = build_table_schema(s)
assert "pandas_version" in result
def test_series_unnamed(self):
result = build_table_schema(pd.Series([1, 2, 3]), version=False)
expected = {
"fields": [
{"name": "index", "type": "integer"},
{"name": "values", "type": "integer"},
],
"primaryKey": ["index"],
}
assert result == expected
def test_multiindex(self):
df =
|
self.df.copy()
idx = pd.MultiIndex.from_product([("a", "b"), (1, 2)])
df.index = idx
result = build_table_schema(df, version=False)
expected = {
"fields": [
{"name": "level_0",
|
"type": "string"},
{"name": "level_1", "type": "integer"},
{"name": "A", "type": "integer"},
{"name": "B", "type": "string"},
{"name": "C", "type": "datetime"},
{"name": "D", "type": "duration"},
],
"primaryKey": ["level_0", "level_1"],
}
assert result == expected
df.index.names = ["idx0", None]
expected["fields"][0]["name"] = "idx0"
expected["primaryKey"] = ["idx0", "level_1"]
result = build_table_schema(df, version=False)
assert result == expected
class TestTableSchemaType:
@pytest.mark.parametrize("int_type", [int, np.int16, np.int32, np.int64])
def test_as_json_table_type_int_data(self, int_type):
int_data = [1, 2, 3]
assert as_json_table_type(np.array(int_data, dtype=int_type).dtype) == "integer"
@pytest.mark.parametrize("float_type", [float, np.float16, np.float32, np.float64])
def test_as_json_table_type_float_data(self, float_type):
float_data = [1.0, 2.0, 3.0]
assert (
as_json_table_type(np.array(float_data, dtype=float_type).dtype) == "number"
)
@pytest.mark.parametrize("bool_type", [bool, np.bool_])
def test_as_json_table_type_bool_data(self, bool_type):
bool_data = [True, False]
assert (
as_json_table_type(np.array(bool_data, dtype=bool_type).dtype) == "boolean"
)
@pytest.mark.parametrize(
"date_data",
[
pd.to_datetime(["2016"]),
pd.to_datetime(["2016"], utc=True),
pd.Series(pd.to_datetime(["2016"])),
pd.Series(pd.to_datetime(["2016"], utc=True)),
pd.period_range("2016", freq="A", periods=3),
],
)
def test_as_json_table_type_date_data(self, date_data):
assert as_json_table_type(date_data.dtype) == "datetime"
@pytest.mark.parametrize("str_data", [pd.Series(["a", "b"]), pd.Index(["a", "b"])])
def test_as_json_table_type_string_data(self, str_data):
assert as_json_table_type(str_data.dtype) == "string"
@pytest.mark.parametrize(
"cat_data",
[
pd.Categorical(["a"]),
pd.Categorical([1]),
pd.Series(pd.Categorical([1])),
pd.CategoricalIndex([1]),
pd.Categorical([1]),
],
)
def test_as_json_table_type_categorical_data(self, cat_data):
assert as_json_table_type(cat_data.dtype) == "any"
# ------
# dtypes
# ------
@pytest.mark.parametrize("int_dtype", [int, np.int16, np.int32, np.int64])
def test_as_json_table_type_int_dtypes(self, int_dtype):
assert as_json_table_type(int_dtype) == "integer"
@pytest.mark.parametrize("float_dtype", [float, np.float16, np.float32, np.float64])
def test_as_json_table_type_float_dtypes(self, float_dtype):
assert as_json_table_type(float_dtype) == "number"
@pytest.mark.parametrize("bool_dtype", [bool, np.bool_])
def test_as_json_table_type_bool_dtypes(self, bool_dtype):
assert as_json_table_type(bool_dtype) == "boolean"
@pytest.mark.parametrize(
"date_dtype",
[
np.datetime64,
np.dtype("<M8[ns]"),
PeriodDtype("D"),
DatetimeTZDtype("ns", "US/Central"),
],
)
def test_as_json_table_type_date_dtypes(self, date_dtype):
# TODO: datedate.date? datetime.time?
assert as_json_table_type(date_dtype) == "datetime"
@pytest.mark.parametrize("td_dtype", [np.timedelta64, np.dtype("<m8[ns]")])
def test_as_json_table_type_timedelta_dtypes(self, td_dtype):
assert as_json_table_type(td_dtype) == "duration"
@pytest.mark.parametrize("str_dtype", [object]) # TODO
def test_as_json_table_type_string_dtypes(self, str_dtype):
assert as_json_table_type(str_dtype) == "string"
def test_as_json_table_type_categorical_dtypes(self):
# TODO: I think before is_categorical_dtype(Categorical)
# returned True, but now it's False. Figure out why or
# if it matters
assert as_json_table_type(pd.Categorical(["a"]).dtype) == "any"
assert as_json_table_type(CategoricalDtype()) == "any"
class TestTableOrient:
def setup_method(self, method):
self.df = DataFrame(
{
"A": [1, 2, 3, 4],
"B": ["a", "b", "c", "c"],
"C": pd.date_range("2016-01-01", freq="d", periods=4),
"D": pd.timedelta_range("1H", periods=4, freq="T"),
"E": pd.Series(pd.Categorical(["a", "b", "c", "c"])),
"F": pd.Series(pd.Categorical(["a", "b", "c", "c"], ordered=True)),
"G": [1.0, 2.0, 3, 4.0],
"H": pd.date_range("2016-01-01", freq="d", periods=4, tz="US/Central"),
},
index=pd.Index(range(4), name="idx"),
)
def test_build_series(self):
s = pd.Series([1, 2], name="a")
s.index.name = "id"
result = s.to_json(orient="table", date_format="iso")
result = json.loads(result, object_pairs_hook=OrderedDict)
assert "pandas_version" in result["schema"]
result["schema"].pop("pandas_version")
fields = [{"name": "id", "type": "integer"}, {"name": "a", "type": "integer"}]
schema = {"fields": fields, "primaryKey": ["id"]}
expected = OrderedDict(
[
("schema", schema),
(
"data",
[
|
kevinarpe/kevinarpe-rambutan3
|
rambutan3/check_args/set/RSetMatcher.py
|
Python
|
gpl-3.0
| 347 | 0 |
from rambutan3 import RArgs
fr
|
om rambutan3.check_a
|
rgs.base.RInstanceMatcher import RInstanceMatcher
from rambutan3.check_args.set.RSetEnum import RSetEnum
class RSetMatcher(RInstanceMatcher):
def __init__(self, set_enum: RSetEnum):
RArgs.check_is_instance(set_enum, RSetEnum, "set_enum")
super().__init__(*(set_enum.value))
|
cpacia/Subspace
|
subspace/pyelliptic/hash.py
|
Python
|
mit
| 1,739 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Yann GUIBET <yannguibet@gmail.com>
# See LICENSE for details.
from .openssl import OpenSSL
# For python3
|
def _equals_bytes(a,
|
b):
if len(a) != len(b):
return False
result = 0
for x, y in zip(a, b):
result |= x ^ y
return result == 0
def _equals_str(a, b):
if len(a) != len(b):
return False
result = 0
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
def equals(a, b):
if isinstance(a, str):
return _equals_str(a, b)
else:
return _equals_bytes(a, b)
def hmac_sha256(k, m):
"""
Compute the key and the message with HMAC SHA5256
"""
key = OpenSSL.malloc(k, len(k))
d = OpenSSL.malloc(m, len(m))
md = OpenSSL.malloc(0, 32)
i = OpenSSL.pointer(OpenSSL.c_int(0))
OpenSSL.HMAC(OpenSSL.EVP_sha256(), key, len(k), d, len(m), md, i)
return md.raw
def hmac_sha512(k, m):
"""
Compute the key and the message with HMAC SHA512
"""
key = OpenSSL.malloc(k, len(k))
d = OpenSSL.malloc(m, len(m))
md = OpenSSL.malloc(0, 64)
i = OpenSSL.pointer(OpenSSL.c_int(0))
OpenSSL.HMAC(OpenSSL.EVP_sha512(), key, len(k), d, len(m), md, i)
return md.raw
def pbkdf2(password, salt=None, i=10000, keylen=64):
if salt is None:
salt = OpenSSL.rand(8)
p_password = OpenSSL.malloc(password, len(password))
p_salt = OpenSSL.malloc(salt, len(salt))
output = OpenSSL.malloc(0, keylen)
OpenSSL.PKCS5_PBKDF2_HMAC(p_password, len(password), p_salt,
len(p_salt), i, OpenSSL.EVP_sha256(),
keylen, output)
return salt, output.raw
|
jawilson/home-assistant
|
tests/components/fronius/test_sensor.py
|
Python
|
apache-2.0
| 29,800 | 0.004832 |
"""Tests for the Fronius sensor platform."""
from homeassistant.components.fronius.const import DOMAIN
from homeassistant.components.fronius.coordinator import (
FroniusInverterUpdateCoordinator,
FroniusMeterUpdateCoordinator,
FroniusPowerFlowUpdateCoordinator,
)
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.const import STATE_UNKNOWN
from homeassistant.helpers import device_registry as dr
from homeassistant.util import dt
from . import enable_all_entities, mock_responses, setup_fronius_integration
from tests.common import async_fire_time_changed
async def test_symo_inverter(hass, aioclient_mock):
"""Test Fronius Symo inverter entities."""
def assert_state(entity_id, expected_state):
state = hass.states.get(entity_id)
assert state.state == str(expected_state)
# Init at night
mock_responses(aioclient_mock, night=True)
config_entry = await setup_fronius_integration(hass)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 23
await enable_all_entities(
hass, config_entry.entry_id, FroniusInverterUpdateCoordinator.default_interval
)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 55
assert_state("sensor.current_dc_fronius_inverter_1_http_fronius", 0)
assert_state("sensor.energy_day_fronius_inverter_1_http_fronius", 10828)
assert_state("sensor.energy_total_fronius_inverter_1_http_fronius", 44186900)
assert_state("sensor.energy_year_fronius_inverter_1_http_fronius", 25507686)
assert_state("sensor.voltage_dc_fronius_inverter_1_http_fronius", 16)
# Second test at daytime when inverter is producing
mock_responses(aioclient_mock, night=False)
async_fire_time_changed(
hass, dt.utcnow() + FroniusInverterUpdateCoordinator.default_interval
)
await hass.async_block_till_done()
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 57
await enable_all_entities(
hass, config_entry.entry_id, FroniusInverterUpdateCoordinator.default_interval
)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 59
# 4 additional AC entities
assert_state("sensor.current_dc_fronius_inverter_1_http_fronius", 2.19)
assert_state("sensor.energy_day_fronius_inverter_1_http_fronius", 1113)
assert_state("sensor.energy_total_fronius_inverter_1_http_fronius", 44188000)
assert_state("sensor.energy_year_fronius_inverter_1_http_fronius", 25508798)
assert_state("sensor.voltage_dc_fronius_inverter_1_http_fronius", 518)
assert_state("sensor.current_ac_fronius_inverter_1_http_fronius", 5.19)
assert_state("sensor.frequency_ac_fronius_inverter_1_http_fronius", 49.94)
assert_state("sensor.power_ac_fronius_inverter_1_http_fronius", 1190)
assert_state("sensor.voltage_ac_fronius_inverter_1_http_fronius", 227.90)
# Third test at nighttime - additional AC entities aren't changed
mock_responses(aioclient_mock, night=True)
async_fire_time_changed(
hass, dt.utcnow() + FroniusInverterUpdateCoordinator.default_interval
)
await hass.async_block_till_done()
assert_state("sensor.current_ac_fronius_inverter_1_http_fronius", 5.19)
assert_state("sensor.frequency_ac_fronius_inverter_1_http_fronius", 49.94)
assert_state("sensor.power_ac_fronius_inverter_1_http_fronius", 1190)
assert_state("sensor.voltage_ac_fronius_inverter_1_http_fronius", 227.90)
async def test_symo_logger(hass, aioclient_mock):
"""Test Fronius Symo logger entities."""
def assert_state(entity_id, ex
|
pected_state):
state = hass.states.get(entity_id)
assert state
assert state.state == str(expected_state)
mock_responses(aioclient_mock)
await setup_fronius_integration(hass)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 25
# ignored constant entities:
# hardware_platform, hardware_version, product_type
# software_version, time_zone, time_zone_location
# time_stamp, unique_identifier, utc_offset
#
# sta
|
tes are rounded to 4 decimals
assert_state(
"sensor.cash_factor_fronius_logger_info_0_http_fronius",
0.078,
)
assert_state(
"sensor.co2_factor_fronius_logger_info_0_http_fronius",
0.53,
)
assert_state(
"sensor.delivery_factor_fronius_logger_info_0_http_fronius",
0.15,
)
async def test_symo_meter(hass, aioclient_mock):
"""Test Fronius Symo meter entities."""
def assert_state(entity_id, expected_state):
state = hass.states.get(entity_id)
assert state
assert state.state == str(expected_state)
mock_responses(aioclient_mock)
config_entry = await setup_fronius_integration(hass)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 25
await enable_all_entities(
hass, config_entry.entry_id, FroniusMeterUpdateCoordinator.default_interval
)
assert len(hass.states.async_all(domain_filter=SENSOR_DOMAIN)) == 59
# ignored entities:
# manufacturer, model, serial, enable, timestamp, visible, meter_location
#
# states are rounded to 4 decimals
assert_state("sensor.current_ac_phase_1_fronius_meter_0_http_fronius", 7.755)
assert_state("sensor.current_ac_phase_2_fronius_meter_0_http_fronius", 6.68)
assert_state("sensor.current_ac_phase_3_fronius_meter_0_http_fronius", 10.102)
assert_state(
"sensor.energy_reactive_ac_consumed_fronius_meter_0_http_fronius", 59960790
)
assert_state(
"sensor.energy_reactive_ac_produced_fronius_meter_0_http_fronius", 723160
)
assert_state("sensor.energy_real_ac_minus_fronius_meter_0_http_fronius", 35623065)
assert_state("sensor.energy_real_ac_plus_fronius_meter_0_http_fronius", 15303334)
assert_state("sensor.energy_real_consumed_fronius_meter_0_http_fronius", 15303334)
assert_state("sensor.energy_real_produced_fronius_meter_0_http_fronius", 35623065)
assert_state("sensor.frequency_phase_average_fronius_meter_0_http_fronius", 50)
assert_state("sensor.power_apparent_phase_1_fronius_meter_0_http_fronius", 1772.793)
assert_state("sensor.power_apparent_phase_2_fronius_meter_0_http_fronius", 1527.048)
assert_state("sensor.power_apparent_phase_3_fronius_meter_0_http_fronius", 2333.562)
assert_state("sensor.power_apparent_fronius_meter_0_http_fronius", 5592.57)
assert_state("sensor.power_factor_phase_1_fronius_meter_0_http_fronius", -0.99)
assert_state("sensor.power_factor_phase_2_fronius_meter_0_http_fronius", -0.99)
assert_state("sensor.power_factor_phase_3_fronius_meter_0_http_fronius", 0.99)
assert_state("sensor.power_factor_fronius_meter_0_http_fronius", 1)
assert_state("sensor.power_reactive_phase_1_fronius_meter_0_http_fronius", 51.48)
assert_state("sensor.power_reactive_phase_2_fronius_meter_0_http_fronius", 115.63)
assert_state("sensor.power_reactive_phase_3_fronius_meter_0_http_fronius", -164.24)
assert_state("sensor.power_reactive_fronius_meter_0_http_fronius", 2.87)
assert_state("sensor.power_real_phase_1_fronius_meter_0_http_fronius", 1765.55)
assert_state("sensor.power_real_phase_2_fronius_meter_0_http_fronius", 1515.8)
assert_state("sensor.power_real_phase_3_fronius_meter_0_http_fronius", 2311.22)
assert_state("sensor.power_real_fronius_meter_0_http_fronius", 5592.57)
assert_state("sensor.voltage_ac_phase_1_fronius_meter_0_http_fronius", 228.6)
assert_state("sensor.voltage_ac_phase_2_fronius_meter_0_http_fronius", 228.6)
assert_state("sensor.voltage_ac_phase_3_fronius_meter_0_http_fronius", 231)
assert_state(
"sensor.voltage_ac_phase_to_phase_12_fronius_meter_0_http_fronius", 395.9
)
assert_state(
"sensor.voltage_ac_phase_to_phase_23_fronius_meter_0_http_fronius", 398
)
assert_state(
"sensor.voltage_ac_phase_to_phase_31_fronius_meter_0_http_fronius", 398
)
async def test_symo_power_flow(hass, aioclient_mock):
"""Test Fronius Symo power flow entities."""
async_fire_time_changed(hass, dt.utcnow())
def assert_state(entity_id, expected_state)
|
planrich/pypy-simd-benchmark
|
user/add.py
|
Python
|
gpl-3.0
| 458 | 0.019651 |
im
|
port time
import sys
import array
# show start
def py_add(A,B,size):
i = 0
while i < size:
A[i] = A[i] + B[i]
i += 1
# show stop
if __name__ == '__main__':
s = int(sys.argv[1])
j = int(sys.argv[2])
a = array.array('d', [0.0]*s)
b = array.array('d', [1.0]*s)
for i in range(j//10):
py_add(a,b,s)
t = time.time()
for i in range(j):
py_add(a,b,s)
print "time:", (tim
|
e.time()-t)
|
IPVL/Tanvin-PythonWorks
|
pythonOOP/codes/duck_test.py
|
Python
|
mit
| 722 | 0.01662 |
#! /usr/bin/env python
class Duck:
"""
this class implies a new way to express polymorphism using duck typing.
This class has 2 functions: quack() and fly() consisting no parameter.
"""
def quack(self):
print("Quack, quack!");
def fly(self):
print("Flap, Flap!");
class Person:
def quack(self):
p
|
rint("I'm Quackin'!");
def fly(self):
print("I'm Flyin'!");
def in_the_forest(mallard):
""" This functi
|
on is used for express polymorphism behavior except inheritance """
mallard.quack()
mallard.fly()
duck = Duck()
person = Person()
# passing object to in_the_forest() function
in_the_forest(Duck())
in_the_forest(Person())
|
D4rkC00d3r/locatebssid
|
locatebssid.py
|
Python
|
gpl-3.0
| 1,239 | 0.002421 |
im
|
port urllib.request
import json
import webbrowser
___author___ = 'D4rkC00d3r'
bssid = input('Enter a BSSID: ') # Mac address of AP you want to locate
api_uri = 'https://api.mylnikov.org/geolocation/wifi?v=1.1&data=open&bssid=' # Api endpoint for database.
map_url = 'http://find-wifi.mylnikov.or
|
g/#' # Map provider for plotting results.
# Example of a MAC address; 00:0C:42:1F:65:E9 this can be used for testing.
def mappin():
while True:
confirm = input('Show on map? (Y)es or (N)o: ')
if 'Y' in confirm:
webbrowser.open(map_url + bssid)
return
else:
break
def results():
if 'desc' in data:
print(data['desc'])
else:
print('Device has last been seen at:')
print('Lat: {0}'.format(data['data']['lat']))
print('Lon: {0}'.format(data['data']['lon']))
print('Meter accuracy: {0}'.format(data['data']['range']))
mappin()
# used to write the results of the api call to a .json file.
with urllib.request.urlopen(api_uri + bssid) as url:
data = json.loads(url.read().decode())
json_str = json.dumps(data)
with open('locatebssid.json', 'w') as f:
json.dump(data, f, indent=4)
results()
|
everaldo/example-code
|
19-dyn-attr-prop/oscon/test_schedule2.py
|
Python
|
mit
| 1,753 | 0 |
import shelve
import pytest
import schedule2 as schedule
@pytest.yield_
|
fixture
def db():
with shelve.open(schedule.DB_NAME) as the_db:
if schedule.CONFERENCE not in the_db:
schedule.load_db(the_db)
yield the_db
def test_record_attr_access():
rec = schedule.Record(spam=99, eggs=12)
assert rec.spam == 99
assert rec.eggs == 12
def test_record_repr():
rec = schedule.DbRecord(spam=99, eggs=1
|
2)
assert 'DbRecord object at 0x' in repr(rec)
rec2 = schedule.DbRecord(serial=13)
assert repr(rec2) == "<DbRecord serial=13>"
def test_conference_record(db):
assert schedule.CONFERENCE in db
def test_speaker_record(db):
speaker = db['speaker.3471']
assert speaker.name == 'Anna Martelli Ravenscroft'
def test_missing_db_exception():
with pytest.raises(schedule.MissingDatabaseError):
schedule.DbRecord.fetch('venue.1585')
def test_dbrecord(db):
schedule.DbRecord.set_db(db)
venue = schedule.DbRecord.fetch('venue.1585')
assert venue.name == 'Exhibit Hall B'
def test_event_record(db):
event = db['event.33950']
assert repr(event) == "<Event 'There *Will* Be Bugs'>"
def test_event_venue(db):
schedule.Event.set_db(db)
event = db['event.33950']
assert event.venue_serial == 1449
assert event.venue == db['venue.1449']
assert event.venue.name == 'Portland 251'
def test_event_speakers(db):
schedule.Event.set_db(db)
event = db['event.33950']
assert len(event.speakers) == 2
anna_and_alex = [db['speaker.3471'], db['speaker.5199']]
assert event.speakers == anna_and_alex
def test_event_no_speakers(db):
schedule.Event.set_db(db)
event = db['event.36848']
assert len(event.speakers) == 0
|
henningpohl/WeMo
|
wemo.py
|
Python
|
mit
| 327 | 0.015291 |
from ctypes import *
|
wemoDll
|
= cdll.LoadLibrary("WeMo.dll")
def turn_on():
wemoDll.turnOn()
def turn_off():
wemoDll.turnOff()
def get_state():
return wemoDll.getState() > 0
if __name__ == '__main__':
import time
turn_on()
print get_state()
time.sleep(3.0)
turn_off()
print get_state()
|
valhallasw/pywikibot-core
|
pywikibot/backports.py
|
Python
|
mit
| 6,204 | 0.000322 |
# -*- coding: utf-8 -*-
"""
This module contains backports to support older Python versions.
They contain the backported code originally developed for Python. It is
therefore distributed under the PSF license, as follows:
PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
--------------------------------------------
1. This LICENSE AGREEMENT is between the Python Software Foundation
("PSF"), and the Individual or Organization ("Licensee") accessing and
otherwise using this software ("Python") in source or binary form and
its associated documentation.
2. Subject to the terms and conditions of this License Agreement, PSF hereby
grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
analyze, test, perform and/or display publicly, prepare derivative works,
distribute, and otherwise use Python alone or in any derivative version,
provided, however, that PSF's License Agreement and PSF's notice of copyright,
i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
2011, 2012, 2013, 2014 Python Software Foundation; All Rights Reserved" are
retained in Python alone or in any derivative version prepared by Licensee.
3. In the event Licensee prepares a derivative work that is based on
or incorporates Python or any part thereof, and wants to make
the derivative work available to others as provided herein, then
Licensee hereby agrees to include in any such work a brief summary of
the changes made to Python.
4. PSF is making Python available to Licensee on an "AS IS"
basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
INFRINGE ANY THIRD PARTY RIGHTS.
5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
6. This License Agreement will automatically terminate upon a material
breach of its terms and conditions.
7. Nothing in this License Agreement shall be deemed to create any
relationship of agency, partnership, or joint venture between PSF and
Licensee. This License Agreement does not grant permission to use PSF
trademarks or trade name in a trademark sense to endorse or promote
products or services of Licensee, or any third party.
8. By copying, installing or otherwise using Python, Licensee
agrees to be bound by the terms and conditions of this License
Agreement.
"""
#
# (C) Python Software Foundation, 2001-2014
# (C) with modifications from Pywikibot team, 2015
#
# Distributed under the terms of the PSF license.
#
from __future__ import unicode_literals
import logging
import warnings
def format_range_unified(start, stop):
"""
Convert range to the "ed" format.
Copied from C{difflib._format_range_unified()} which was introduced in
Python 2.7.2.
@see: https://hg.python.org/cpython/file/8527427914a2/Lib/difflib.py#l1147
"""
# Per the diff spec at http://www.unix.org/single_unix_specification/
beginning = start + 1 # lines start numbering with one
length = stop - start
if length == 1:
return '{0}'.format(beginning)
if not length:
beginning -= 1 # empty ranges begin at line just before the range
return '{0},{1}'.format(beginning, length)
# Logging/War
|
nings integration
_warnings_showwarning = None
class NullHandler(lo
|
gging.Handler):
"""
This handler does nothing.
It's intended to be used to avoid the "No handlers could be found for
logger XXX" one-off warning. This is important for library code, which
may contain code to log events. If a user of the library does not configure
logging, the one-off warning might be produced; to avoid this, the library
developer simply needs to instantiate a NullHandler and add it to the
top-level logger of the library module or package.
Copied from C{logging.NullHandler} which was introduced in Python 2.7.
@see: http://bugs.python.org/issue4384
"""
def handle(self, record):
"""Dummy handling."""
pass
def emit(self, record):
"""Dummy handling."""
pass
def createLock(self):
"""Dummy handling."""
self.lock = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
"""
Implementation of showwarnings which redirects to logging.
It will first check to see if the file parameter is None. If a file is
specified, it will delegate to the original warnings implementation of
showwarning. Otherwise, it will call warnings.formatwarning and will log
the resulting string to a warnings logger named "py.warnings" with level
logging.WARNING.
Copied from C{logging._showwarning} which was introduced in Python 2.7.
@see: http://bugs.python.org/issue4384
"""
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(message, category, filename, lineno, file, line)
else:
s = warnings.formatwarning(message, category, filename, lineno, line)
logger = logging.getLogger("py.warnings")
if not logger.handlers:
logger.addHandler(NullHandler())
logger.warning("%s", s)
def captureWarnings(capture):
"""
Capture warnings into logging.
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
Copied from C{logging.captureWarnings} which was introduced in Python 2.7.
@see: http://bugs.python.org/issue4384
"""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
|
anokata/pythonPetProjects
|
var_scripts/sdl_test.py
|
Python
|
mit
| 1,486 | 0.003365 |
import sys
import sdl2.ext
def run():
resources = sdl2.ext.Resources(__file__, "platformer")
sdl2.ext.init()
window = sdl2.ext.Window("SRG", size=(200, 200))
window.show()
factory = sdl2.ext.SpriteFactory(sdl2.ext.SOFTWARE)
sprite = factory.from_image(resources.get_path("sky0.png"))
s2 = sprite.subsprite(
|
(2, 2, 10, 10))
s2r = sdl2.ext.Renderer(s2)
tfactory = sdl2.ext.SpriteFactory(sdl2.ext.TEXTURE, renderer=s2r)
texture = tfactory.from_image(resources.get_path("sky0.png"))
#s2r.copy(texture)
spriterenderer = factory.create_sprite_render_system(window)
running = True
i = 1000
fps = 0
while running:
events = sdl2.ext.get_events()
for event in events:
if event.type == sdl2.SDL_QUIT:
running = Fal
|
se
break
if event.type == sdl2.SDL_KEYDOWN:
key = event.key.keysym.sym
if key == 27:
running = False
break
if event.key.keysym.sym == sdl2.SDLK_UP:
print(key, sdl2.SDLK_q)
i -= 1
if i == 0:
i = 1000
x, y = sprite.position
sdl2.ext.fill(spriterenderer.surface, sdl2.ext.Color(0, 0, 0))
sprite.position = x+1, y+1
spriterenderer.render([sprite, s2])
window.refresh()
sdl2.ext.quit()
return 0
if __name__ == "__main__":
sys.exit(run())
|
trychameleon/snippet.js
|
examples/chameleon-python.py
|
Python
|
mit
| 415 | 0.031325 |
{% if current_user %}<script>
// Add the snippet here with account id
// Assuming your page has loaded the current user as the object current_user
chmln.identify({
uid:
|
'{{ cur
|
rent_user.id }}',
created: '{{ current_user.created_at }}',
email: '{{ current_user.email }}',
plan: '{{ current_user.account.plan_name }}',
spend: '{{ current_user.account.plan_cost }}'
});
</script>{% endif %}
|
chris-wood/onpath-NFD
|
docs/conf.py
|
Python
|
gpl-3.0
| 9,075 | 0.00595 |
# -*- coding: utf-8 -*-
#
# NFD - Named Data Networking Forwarding Daemon documentation build configuration file, created by
# sphinx-quickstart on Sun Apr 6 19:58:22 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'redmine_issue',
]
def addExtensionIfExists(extension):
try:
__import__(extension)
extensions.append(extension)
except ImportError:
sys.stderr.write("Extension '%s' in not available. "
"Some documentation may not build correctly.\n" % extension)
sys.stderr.write("To install, use \n"
" sudo pip install %s\n" % extension.replace('.', '-'))
addExtensionIfExists('sphinxcontrib.doxylink')
if os.getenv('GOOGLE_ANALYTICS', None):
addExtensionIfExists('sphinxcontrib.googleanalytics')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'NFD - Named Data Networking Forwarding Daemon'
copyright = u'2014, Named Data Networking Project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
html
|
_theme = 'named_data_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available fo
|
r each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['./']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
html_file_suffix = ".html"
# Output file base name for HTML help builder.
htmlhelp_basename = 'nfd-docs'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'nfd-docs.tex', u'NFD - Named Data Networking Forwarding Daemon Documentation',
u'Named Data Networking Project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('manpages/nfd', 'nfd', u'Named Data Networking Forwarding Daemon', None, 1),
('manpages/ndn-autoconfig-server', 'ndn-autoconfig-server',
u'NFD Auto-configuration Server', None, 1),
('manpages/ndn-autoconfig', 'ndn-autoconfig',
u'NFD Auto-c
|
quake0day/oj
|
ttt.py
|
Python
|
mit
| 123 | 0.00813 |
prices = [2, 1, 2, 1, 2, 1]
total = 0
for
|
i in xrange(1, len(pr
|
ices), 2):
total += prices[i] - prices[i-1]
print total
|
google-research/disentanglement_lib
|
disentanglement_lib/evaluation/abstract_reasoning/relational_layers_test.py
|
Python
|
apache-2.0
| 4,868 | 0.003698 |
# coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for relational_layers.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from disentanglement_lib.evaluation.abstract_reasoning import relational_layers
import numpy as np
import tensorflow.compat.v1 as tf
def _create_positional_encoding_matrices():
"""Shared input/output pair for the positional encoding tests."""
input_array = np.arange(24, dtype=np.float64).reshape((1, 4, 3, 2))
output_array = np.eye(4)
output_array = np.repeat(np.expand_dims(output_array, -1), 2, axis=-1)
output_array = np.expand_dims(output_array, 0)
return input_array, output_array
class RelationalLayersTest(tf.test.TestCase):
def test_repeat_for_tensor(self):
a = np.arange(24).reshape((1, 4, 3, 2))
shouldbe = np.concatenate([a] * 3, axis=-2)
result = self.evaluate(relational_layers.repeat(tf.constant(a), 3, axis=-2))
self.assertAllClose(shouldbe, result)
def test_pairwise_edge_embeddings_for_tensor(self):
a = np.array([[[1], [2]]])
shouldbe = np.array([[[[1, 1], [1, 2]], [[2, 1], [2, 2]]]])
layer = relational_layers.PairwiseEdgeEmbeddings()
result = self.evaluate(layer(tf.constant(a)))
self.assertAllClose(shouldbe, result)
def test_relational_layer_for_tensor(self):
a = np.array([[[1], [2]]])
shouldbe = np.array([[[2, 3], [4, 3]]])
layer = relational_layers.RelationalLayer(
tf.keras.layers.Lambda(lambda x: x),
tf.keras.layers.Lambda(lambda x: tf.reduce_sum(x, axis=-2)))
result = self.evaluate(layer(tf.constant(a)))
self.assertAllClose(shouldbe, result)
def test_positional_encoding_like_for_static_shape_tensor(self):
value, shouldbe = _create_positional_encoding_matrices()
a = tf.constant(value)
output_tensor = relational_layers.positional_encoding_like(a, -3, -2)
result = self.evaluate(output_tensor)
self.assertEqual((1, 4, 4, 2), result.shape)
self.assertAllClose(shouldbe, result)
def test_positional_encoding_like_for_dynamic_shape_tensor(self):
value, shouldbe = _create_positional_encoding_matrices()
a = tf.placeholder(tf.float32, shape=(None, 4, 3, 2))
output_tensor = relational_layers.positional_encoding_like(a, -3, -2)
# Check the static shape.
self.assertEqual([None, 4, 4, 2], output_tensor.get_shape().as_list())
# Check the solution.
with self.session() as sess:
result = sess.run(output_tensor, feed_dict={a: value})
self.assertAllClose(shouldbe, result)
def test_add_positional_encoding_layer_for_tensor(self):
value, shouldbe_positional = _create_positional_encoding_matrices()
shouldbe = np.concatenate([value, shouldbe_positional], axis=-2)
a = tf.constant(value)
output_tensor = relational_layers.AddPositionalEncoding(-3, -2)(a)
result = self.evaluate(output_tensor)
self.assertAllClose(shouldbe, result)
def test_stack_answers_for_tensors(self):
# Tensors used for testing.
context = np.arange(24).reshape((2, 3, 4))
answers = np.arange(24, 48).reshape((2, 3, 4))
# Compute the correct solutions.
results = []
for i in range(answers.shape[-1]):
results.append(
np.concatenate([context, answers[:, :, i:(i + 1)]], axis=-1))
shouldbe = np.stack(results, axis=-2)
# Compute the solution based on the layer.
layer = relational_layers.StackAnswers(answer_axis=-1, stack_axis=-2)
result = self.evaluate(layer([tf.constant(context), tf.constant(answers)]))
# Check that they are the same.
self.assertAllClose(shouldbe, result)
def test_multi_dim_batch_apply_for_tensors(self):
# Tensors used for testing.
input_tensor = np.arange(24).reshape((2, 3, 4))
kernel = np.arange(24, 36).resha
|
pe((4, 3))
# Compute the correct solutions.
shouldbe = np.matmul(input_tensor, kernel)
# Compute the solution based on the layer.
layer = relational_layers.MultiDimBatchApply(
tf.keras.layers.Lambda(lambda x: tf.matmul(x, tf.constant(kernel))),
num_dims_to_keep=1)
result = se
|
lf.evaluate(layer(tf.constant(input_tensor)))
# Check that they are the same.
self.assertAllClose(shouldbe, result)
if __name__ == '__main__':
tf.test.main()
|
MattJDavidson/python-adventofcode
|
advent/problem_03.py
|
Python
|
bsd-2-clause
| 3,424 | 0.000292 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""--- Day 3: Perfectly Spherical Houses in a Vacuum ---
Santa is delivering presents to an infinite two-dimensional grid of houses.
He begins by delivering a present to the house at his starting location, and
then an elf at the North Pole calls him via radio and tells him where to move
next. Moves are always exactly one house to the north (^), south (v), east (>),
or west (<). After each move, he delivers another present to the house at his
new location.
However, the elf back at the north pole has had a little too much eggnog, and
so his directions are a little off, and Santa ends up visiting some houses more
than once. How many houses receive at least one present?
For example:
- > delivers presents to 2 houses: one at the starting location, and one to the
east.
- ^>v< delivers presents to 4 houses in a square, including twice to the
house at his starting/ending location.
- ^v^v^v^v^v delivers a bunch of presents
to some very lucky children at only 2 houses.
--- Part Two ---
The next year, to speed up the process, Santa creates a robot version of
himself, Robo-Santa, to deliver presents with him.
Santa and Robo-Santa start at the same location (delivering two presents to the
same starting house), then take turns moving based on instructions from the
elf, who is eggnoggedly reading from the same script as the previous year.
This year, how many houses receive at least one present?
For example:
- ^v delivers presents to 3 houses, because Santa goes north, and then Robo-Santa
goes south.
- ^>v< now delivers presents to 3 houses, and Santa and Robo-Santa
end up back where they started.
- ^v^v^v^v^v now delivers presents to 11 houses,
with Santa going one direction and Robo-Santa going the other.
"""
import sys
import click
def update_point(move, point):
"""Returns new point representing position after move"""
moves = {
'^': (0, -1),
'<': (-1,
|
0),
'v': (0, 1),
'>': (1, 0),
}
return (point[0]+moves.get(move, (0, 0))[0],
point[1]+moves.get(move, (0, 0))[1])
def map_single_delivery(text):
point = (0, 0)
points = set({point})
for move in text:
point = update_point(move, point)
points.add(point)
return points
def number_of_houses_covered(text, robo_santa=False):
return len(map_single_delivery(text)) if not robo_santa else \
|
len(map_multiple_deliveries(text))
def split_directions(directions):
lists = ('', '')
try:
lists = directions[0::2], directions[1::2]
except IndexError:
pass
return lists
def map_multiple_deliveries(text):
directions = split_directions(text)
points = map_single_delivery(directions[0])
return points.union(map_single_delivery(directions[1]))
def calculate_solution_1(text):
return number_of_houses_covered(text)
def calculate_solution_2(text):
return number_of_houses_covered(text, robo_santa=True)
@click.command()
@click.option('--source_file', default='data/03.txt',
help='source data file for problem')
def main(source_file):
"""Simple solution to adventofcode problem 3."""
data = ''
with open(source_file) as source:
data = source.read()
print('Santa gave at least one present to {} houses.'.format(
number_of_houses_covered(data)))
if __name__ == "__main__":
sys.exit(main())
|
absalon-james/graphite-api
|
graphite_api/utils.py
|
Python
|
apache-2.0
| 2,513 | 0.000398 |
"""Copyright 2008 Orbitz WorldWide
License
|
d under the Apache License,
|
Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import calendar
import hashlib
import pytz
from flask import request
def is_pattern(s):
return '*' in s or '?' in s or '[' in s or '{' in s
class RequestParams(object):
"""Dict-like structure that allows accessing request params
whatever their origin (json body, form body, request args)."""
def __getitem__(self, key):
if request.json and key in request.json:
return request.json[key]
if key in request.form:
return request.form.getlist(key)[-1]
if key in request.args:
return request.args.getlist(key)[-1]
raise KeyError
def __contains__(self, key):
try:
self[key]
return True
except KeyError:
return False
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def getlist(self, key):
if request.json and key in request.json:
value = self[key]
if not isinstance(value, list):
value = [value]
return value
if key in request.form:
return request.form.getlist(key)
return request.args.getlist(key)
RequestParams = RequestParams()
def hash_request():
keys = set()
if request.json:
keys.update(request.json.keys())
if request.form:
keys.update(request.form.keys())
keys.update(request.args.keys())
params = u",".join([
u"{0}={1}".format(key, u"&".join(sorted(RequestParams.getlist(key))))
for key in sorted(keys) if not key.startswith('_')
])
md5 = hashlib.md5()
md5.update(params.encode('utf-8'))
return md5.hexdigest()
def to_seconds(delta):
return abs(delta.seconds + delta.days * 86400)
def epoch(dt):
"""
Returns the epoch timestamp of a timezone-aware datetime object.
"""
return calendar.timegm(dt.astimezone(pytz.utc).timetuple())
|
swegener/gruvi
|
tests/test_poll.py
|
Python
|
mit
| 8,350 | 0.004192 |
#
# This file is part of Gruvi. Gruvi is free software available under the
# terms of the MIT license. See the file "LICENSE" that was provided
# together with this source file for the licensing terms.
#
# Copyright (c) 2012-2014 the Gruvi authors. See the file "AUTHORS" for a
# complete list.
from __future__ import absolute_import, print_function
import unittest
from gruvi import get_hub, Event
from gruvi.poll import MultiPoll, Poller, READABLE, WRITABLE
from gruvi.poll import check as check_mpoll
from support import UnitTest, socketpair
class TestMultiPoll(UnitTest):
def test_basic(self):
s1, s2 = socketpair()
fd = s2.fileno()
mp = MultiPoll(get_hub().loop, fd)
check_mpoll(mp)
cbargs = []
called = Event()
def callback(fd, events):
cbargs.append((fd, events))
called.set()
mp.add_callback(READABLE, callback)
check_mpoll(mp)
called.wait(0.01)
self.assertEqual(cbargs, [])
s1.send(b'x')
called.wait(0.1)
self.assertEqual(cbargs, [(fd, READABLE)])
self.assertEqual(s2.recv(10), b'x')
del cbargs[:]; called.clear()
called.wait(0.01)
self.assertEqual(cbargs, [])
mp.close()
check_mpoll(mp)
s1.close(); s2.close()
def test_multiple(self):
s1, s2 = socketpair()
fd = s2.fileno()
mp = MultiPoll(get_hub().loop, fd)
cbargs = []
called = Event()
def callback(arg=0):
def _callback(fd, events):
cbargs.append((fd, events, arg))
called.set()
return _callback
mp.add_callback(READABLE, callback(0))
check_mpoll(mp)
mp.add_callback(READABLE, callback(1))
check_mpoll(mp)
mp.add_callback(WRITABLE, callback(2))
check_mpoll(mp)
mp.add_callback(WRITABLE, callback(3))
check_mpoll(mp)
called.wait(0.1)
self.assertEqual(cbargs, [(fd, WRITABLE, 2), (fd, WRITABLE, 3)])
del cbargs[:]; called.clear()
s1.send(b'x')
called.wait(0.1)
self.assertEqual(cbargs, [(fd, READABLE, 0), (fd, READABLE, 1),
(fd, WRITABLE, 2), (fd, WRITABLE, 3)])
self.assertEqual(s2.recv(10), b'x')
mp.close()
check_mpoll(mp)
s1.close(); s2.close()
def test_remove(self):
s1, s2 = socketpair()
fd = s2.fileno()
mp = MultiPoll(get_hub().loop, fd)
cbargs = []
called = Event()
def callback(arg=0):
def _callback(fd, events):
cbargs.append((fd, events, arg))
called.set()
return _callback
h1 = mp.add_callback(READABLE, callback(0))
check_mpoll(mp)
h2 = mp.add_callback(READABLE, callback(1))
check_mpoll(mp)
s1.send(b'x')
called.wait(0.1)
self.assertEqual(cbargs, [(fd, READABLE, 0), (fd, READABLE, 1)])
del cbargs[:]; called.clear()
mp.remove_callback(h1)
check_mpoll(mp)
called.wait(0.1)
self.assertEqual(cbargs, [(fd, READABLE, 1)])
mp.remove_callback(h2)
check_mpoll(mp)
mp.close()
check_mpoll(mp)
s1.close(); s2.close()
def test_update(self):
s1, s2 = socketpair()
fd = s2.fileno()
mp = MultiPoll(get_hub().loop, fd)
cbargs = []
called = Event()
def callback(fd, events):
cbargs.append((fd, events))
called.set()
h1 = mp.add_callback(READABLE, callback)
check_mpoll(mp)
s1.send(b'x')
called.wait(0.1)
self.assertEqual(cbargs, [(fd, READABLE)])
del cbargs[:]; called.clear()
mp.update_callback(h1, READABLE|WRITABLE)
check_mpoll(mp)
s1.send(b'x')
called.wait(0.1)
self.assertEqual(cbargs, [(fd, READABLE|WRITABLE)])
del cbargs[:]; called.clear()
mp.update_callback(h1, WRITABLE)
check_mpoll(mp)
s1.send(b'x')
called.wait(0.1)
self.assertEqual(cbargs, [(fd, WRITABLE)])
del cbargs[:]; called.clear()
mp.update_callback(h1, 0)
check_mpoll(mp)
s1.send(b'x')
called.wait(0.01)
self.assertEqual(cbargs, [])
mp.close()
check_mpoll(mp)
s1.close(); s2.close()
def test_close(self):
s1, s2 = socketpair()
fd = s2.fileno()
mp = MultiPoll(get_hub().loop, fd)
cbargs = []
called = Event()
def callback(fd, events):
cbargs.append((fd, events))
called.set()
h1 = mp.add_callback(READABLE, callback)
h2 = mp.add_callback(READABLE, callback)
s1.send(b'x')
called.wait(0.1)
self.assertEqual(cbargs, [(fd, READABLE), (fd, READABLE)])
del cbargs[:]; called.clear()
mp.close()
called.wait(0.01)
self.assertEqual(cbargs, [])
self.assertRaises(RuntimeError, mp.add_callback, READABLE, callback)
self.assertRaises(RuntimeError, mp.remove_callback, h1)
self.assertRaises(RuntimeError, mp.remove_callback, h2)
self.assertRaises(RuntimeError, mp.update_callback, h1, WRITABLE)
self.assertRaises(RuntimeError, mp.update_callback, h2, WRITABLE)
s1.close(); s2.close()
class TestPoller(UnitTest):
def test_add_remove(self):
poll = Poller(get_hub().loop)
cbargs = []
called = Event()
def callback(fd, events):
cbargs.append((fd, events))
called.set()
s1, s2 = socketpair()
fd = s2.fileno()
handle = poll.add_callback(fd, READABLE, callback)
self.assertIsNotNone(handle)
called.wait(0.01)
self.assertEqual(cbargs, [])
s1.send(b'x')
called.wait(0.1)
self.assertEqual(cbargs, [(fd, READABLE)])
del cbargs[:]; called.clear()
poll.remove_callback(fd, handle)
called.wait(0.01)
self.assertEqual(cbargs, [])
poll.close()
s1.close(); s2.close()
def test_update(self):
poll = Poller(get_hub().loop)
cbargs = []
called = Event()
def callback(fd, events):
cbargs.append((fd, events))
called.set()
|
s1, s2 = socketpair()
fd = s2.fileno()
handle = poll.add_callback(fd, WRITABLE, callback)
self.assertIsNotNone(handle)
called.wait(0.1)
self.assertEqual(cba
|
rgs, [(fd, WRITABLE)])
del cbargs[:]; called.clear()
poll.update_callback(fd, handle, READABLE|WRITABLE)
s1.send(b'x')
called.wait(0.1)
self.assertEqual(cbargs, [(fd, READABLE|WRITABLE)])
del cbargs[:]; called.clear()
poll.close()
s1.close(); s2.close()
def test_close(self):
poll = Poller(get_hub().loop)
cbargs = []
called = Event()
def callback(fd, events):
cbargs.append((fd, events))
called.set()
s1, s2 = socketpair()
fd = s2.fileno()
handle = poll.add_callback(fd, READABLE, callback)
self.assertIsNotNone(handle)
s1.send(b'x')
poll.close()
called.wait(0.01)
self.assertEqual(cbargs, [])
self.assertRaises(RuntimeError, poll.add_callback, fd, READABLE, callback)
self.assertRaises(RuntimeError, poll.remove_callback, fd, handle)
self.assertRaises(RuntimeError, poll.update_callback, fd, handle, WRITABLE)
s1.close(); s2.close()
def test_multiple_fds(self):
poll = Poller(get_hub().loop)
cbargs = []
called = Event()
def callback(fd, events):
cbargs.append((fd, events))
called.set()
s11, s12 = socketpair()
fd1 = s12.fileno()
poll.add_callback(fd1, READABLE, callback)
s21, s22 = socketpair()
fd2 = s22.fileno()
poll.add_callback(fd2, READABLE, callback)
s11.send(b'x')
s21.send(b'x')
called.wait()
self.assertEqual(cbargs, [(
|
QuLogic/burnman
|
examples/example_composition.py
|
Python
|
gpl-2.0
| 6,658 | 0.016822 |
# BurnMan - a lower mantle toolkit
# Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S.
# Released under GPL v2 or later.
"""
example_composition
-------------------
This example shows how to create different minerals, how to compute seismic
velocities, and how to compare them to a seismic reference model.
There are many different ways in BurnMan to combine minerals into a
composition. Here we present a couple of examples:
1. Two minerals mixed in simple mole fractions. Can be chosen from the BurnMan
libraries or from user defined minerals (see example_user_input_material)
2. Example with three minerals
3. Using preset solid solutions
4. Defining your own solid solution
To turn a method of mineral creation "on" the first if statement above the
method must be set to True, with all others set to False.
Note: These minerals can include a spin transition in (Mg,Fe)O, see
example_spintransition.py for explanation of how to implement this
*Uses:*
* :doc:`mineral_database`
* :class:`burnman.composite.Composite`
* :class:`burnman.minerals.Mineral`
* :class:`burnman.solidsolution.SolidSolution`
*Demonstrates:*
* Different ways to define a composite
* Using minerals and solid solutions
* Compare computations to seismic models
"""
import os, sys, numpy as np, matplotlib.pyplot as plt
#hack to allow scripts to be placed in subdirectories next to burnman:
if not os.path.exists('burnman') and os.path.exists('../burnman'):
sys.path.insert(1,os.path.abspath('..'))
import burnman
from burnman import minerals
if __name__ == "__main__":
# To compute seismic velocities and other properties, we need to supply
# burnman with a list of minerals (phases) and their molar abundances. Minerals
# are classes found in burnman.minerals and are derived from
# burnman.minerals.material.
# Here are a few ways to define phases and molar_abundances:
#Example 1: two simple fixed minerals
if True:
amount_perovskite = 0.95
rock = burnman.Composite([amount_perovskite, 1.0-amount_perovskite],
[minerals.SLB_2011.mg_perovskite(),
minerals.SLB_2011.periclase()])
#Example 2: three materials
if False:
rock = burnman.Composite([0.7, 0.2, 0.1],
[minerals.SLB_2011.fe_perovskite(),
minerals.SLB_2011.periclase(),
minerals.SLB_2011.stishovite()])
#Example 3: Mixing solid solutions
if False:
# Defining a rock using a predefined solid solution from the mineral library database.
preset_solidsolution=minerals.SLB_2011.mg_fe_perovskite()
# The line below is optional to see which endmembers (and in which order) are in the solid solution
#print preset_solidsolution.endmembers
#Set molar_fraction of mg_perovskite, fe_perovskite and al_perovskite
preset_solidsolution.set_composition([0.9,0.1,0.]) # Set molar_fraction of mg_perovskite, fe_perovskite and al_perovski
|
te
rock = burnman.Composite([0.8, 0.2], phases=[preset_solidsolution, minerals.SLB_201
|
1.periclase()])
#Example 4: Defining your own solid solution
if False:
# Define a new SolidSolution with mg and fe perovskite endmembers
new_solidsolution = burnman.SolidSolution([[minerals.SLB_2011.mg_perovskite()],
[minerals.SLB_2011.fe_perovskite()]])
# Set molar fraction of endmembers
new_solidsolution.set_composition([0.9,0.1])
rock=burnman.Composite([0.8, 0.2], [new_solidsolution, minerals.SLB_2011.periclase()])
#seismic model for comparison:
# pick from .prem() .slow() .fast() (see burnman/seismic.py)
seismic_model = burnman.seismic.PREM()
number_of_points = 20 #set on how many depth slices the computations should be done
# we will do our computation and comparison at the following depth values:
depths = np.linspace(700e3, 2800e3, number_of_points)
#alternatively, we could use the values where prem is defined:
#depths = seismic_model.internal_depth_list()
seis_p, seis_rho, seis_vp, seis_vs, seis_vphi = seismic_model.evaluate_all_at(depths)
temperature = burnman.geotherm.brown_shankland(seis_p)
print "Calculations are done for:"
rock.debug_print()
moduli_list = burnman.calculate_moduli(rock, seis_p, temperature)
moduli = burnman.average_moduli(moduli_list, burnman.averaging_schemes.VoigtReussHill())
mat_vp, mat_vs, mat_vphi = burnman.compute_velocities(moduli)
mat_K = np.array([m.K for m in moduli])
mat_G = np.array([m.G for m in moduli])
mat_rho = np.array([m.rho for m in moduli])
[vs_err, vphi_err, rho_err] = burnman.compare_chifactor (
[mat_vs,mat_vphi,mat_rho],[seis_vs,seis_vphi,seis_rho])
# PLOTTING
# plot vs
plt.subplot(2,2,1)
plt.plot(seis_p/1.e9,mat_vs/1.e3,color='b',linestyle='-',marker='o', \
markerfacecolor='b',markersize=4,label='computation')
plt.plot(seis_p/1.e9,seis_vs/1.e3,color='k',linestyle='-',marker='o', \
markerfacecolor='k',markersize=4,label='reference')
plt.title("Vs (km/s)")
plt.xlim(min(seis_p)/1.e9,max(seis_p)/1.e9)
plt.ylim(5.1,7.6)
plt.legend(loc='lower right')
plt.text(40,7.3,"misfit= %3.3f" % vs_err)
# plot Vphi
plt.subplot(2,2,2)
plt.plot(seis_p/1.e9,mat_vphi/1.e3,color='b',linestyle='-',marker='o', \
markerfacecolor='b',markersize=4)
plt.plot(seis_p/1.e9,seis_vphi/1.e3,color='k',linestyle='-',marker='o', \
markerfacecolor='k',markersize=4)
plt.title("Vphi (km/s)")
plt.xlim(min(seis_p)/1.e9,max(seis_p)/1.e9)
plt.ylim(7,12)
plt.text(40,11.5,"misfit= %3.3f" % vphi_err)
# plot density
plt.subplot(2,2,3)
plt.plot(seis_p/1.e9,mat_rho/1.e3,color='b',linestyle='-',marker='o', \
markerfacecolor='b',markersize=4)
plt.plot(seis_p/1.e9,seis_rho/1.e3,color='k',linestyle='-',marker='o', \
markerfacecolor='k',markersize=4)
plt.title("density (kg/m^3)")
plt.xlim(min(seis_p)/1.e9,max(seis_p)/1.e9)
plt.text(40,4.3,"misfit= %3.3f" % rho_err)
plt.xlabel("Pressure (GPa)")
# plot geotherm
plt.subplot(2,2,4)
plt.plot(seis_p/1e9,temperature,color='r',linestyle='-',marker='o', \
markerfacecolor='r',markersize=4)
plt.title("Geotherm (K)")
plt.xlim(min(seis_p)/1.e9,max(seis_p)/1.e9)
plt.xlabel("Pressure (GPa)")
plt.savefig("output_figures/example_composition.png")
plt.show()
|
jerodg/hackerrank-python
|
python/02.Strings/10.AlphabetRangoli/template.py
|
Python
|
mit
| 118 | 0.016949 |
def print_rangoli(size
|
):
# your code goes here
if __name__ == '__main__':
|
n = int(input())
print_rangoli(n)
|
TheWylieStCoyote/gnuradio
|
gr-digital/python/digital/__init__.py
|
Python
|
gpl-3.0
| 1,013 | 0 |
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
'''
Blocks and utilities for digital modulation and demodulation.
'''
from __future__ import absolute_import
from __future__ import un
|
icode_literals
# The presence of
|
this file turns this directory into a Python package
import os
try:
from .digital_swig import *
except ImportError:
dirname, filename = os.path.split(os.path.abspath(__file__))
__path__.append(os.path.join(dirname, "..", "..", "swig"))
from .digital_swig import *
from .psk import *
from .qam import *
from .qamlike import *
from .bpsk import *
from .qpsk import *
from .gmsk import *
from .gfsk import *
from .cpm import *
from .crc import *
from .modulation_utils import *
from .ofdm_txrx import ofdm_tx, ofdm_rx
from .soft_dec_lut_gen import *
from .psk_constellations import *
from .qam_constellations import *
from .constellation_map_generator import *
from . import packet_utils
|
ndt93/tetris
|
scripts/agent3.py
|
Python
|
mit
| 5,234 | 0 |
import random
from datetime import datetime
from multiprocessing import Pool
import numpy as np
from scipy.optimize import minimize
def worker_func(args):
self = args[0]
m = args[1]
k = args[2]
r = args[3]
return (self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) ** 2
def optimized_func_i_der(args):
"""
The derivative of the optimized function with respect to the
ith component of the vector r
"""
self = args[0]
r = args[1]
i = args[2]
result = 0
M = len(self.data)
for m in range(M):
Nm = self.data[m].shape[0] - 1
for k in range(Nm + 1):
result += ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
return result
def worker_func_der(args):
self = args[0]
m = args[1]
k = args[2]
r = args[3]
i = args[4]
return ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
class Agent:
num_features = 22
def __init__(self):
self.lf = 0.2 # Learning factor lambda
self.data = [] # The features' values for all the games
self.rewards = [] # Reward values for moving from 1 state to the next
self.rt = np.array([])
self.max_iter = 50
def set_learning_factor(self, learning_factor):
assert(learning_factor >= 0 and learning_factor <= 1)
self.lf = learning_factor
def set_rt(self, rt):
assert(len(rt) == self.num_features)
self.rt = rt
def set_iter(self, max_iter):
self.max_iter = max_iter
def set_data(self, data):
self.data = []
self.rewards = []
for game in data:
game = np.vstack((game, np.zeros(self.num_features + 1)))
self.data.append(game[:, :-1])
self.rewards.append(game[:, -1:])
def eval_func(self, m, k, r):
"""
The evaluation function value for the set of weights (vector) r
at the mth game and kth board state """
return np.dot(r, self.data[m][k])
def eval_func_der(self, m, k, r, i):
"""
Find the derivative of the evaluation function with respect
to the ith component of the vector r
"""
return self.data[m][k][i]
def get_reward(self, m, s):
"""
Get reward for moving from state s to state (s + 1)
"""
return self.rewards[m][s + 1][0]
def temporal_diff(self, m, s):
"""
The temporal diffence value for state s to state (s+1) in the mth game
"""
return (self.get_reward(m, s) + self.eval_func(m, s + 1, self.rt) -
self.eval_func(m, s, self.rt))
def temporal_diff_sum(self, m, k):
Nm = self.data[m].shape[0] - 1
result = 0
for s in range(k, Nm):
result += self.lf**(s - k) * self.temporal_diff(m, s)
return result
def optimized_func(self, r):
result = 0
M = len(self.data)
pool = Pool(processes=4)
for m in range(M):
Nm = self.data[m].shape[0] - 1
k_args = range(Nm + 1)
self_args = [self] * len(k_args)
m_args = [m] * len(k_args)
r_args = [r] * len(k_args)
result += sum(pool.map(worker_func,
zip(self_args, m_args, k_args, r_args)))
return result
def optimized_func_i_der(self, r, i):
"""
The derivative of the optimized function with respect to the
ith component of the vector r
"""
result = 0
M = len(self.data)
for m in range(M):
Nm = self.data[m].shape[0] - 1
for k in range(Nm + 1):
result += ((self.eval_func(m, k, r) -
self.eval_func(m, k, self.rt) -
self.temporal_diff_sum(m, k)) * 2 *
self.eval_func_der(m, k, r, i))
return result
def optimized_func_der(self, r):
p = Pool(processes=4)
self_args = [self] * len(r)
i_args = range(len(r))
r_args = [r] * len(r)
return np.array(p.map(optimized_func_i_der,
zip(self_args, r_args, i_args)))
def callback(self, r):
print("Iteration %d completed at %s" %
(self.cur_iter, datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
self.cur_iter += 1
def compute_next_rt(self):
print("Start computing at %s" %
(datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
self.cur_iter = 1
r0 = np.array([random.randint(-10, 10)
for i in range(self.num_features)])
res = minimize(self.optimized_func, r0, method='BFGS',
jac=self.optimized_func_der,
options={'maxiter
|
': self.max_iter, 'disp': True},
|
callback=self.callback)
return res.x
|
stlemme/python-dokuwiki-export
|
entities/application.py
|
Python
|
mit
| 202 | 0.034653 |
from . imp
|
ort NamedEntity
class Application(NamedEntity):
def __init__(self, name, provider):
NamedEntity.__init__(self, name)
|
self.provider = provider
def get_descendants(self):
return []
|
jjas0nn/solvem
|
tensorflow/lib/python2.7/site-packages/tensorflow/contrib/slim/python/slim/learning_test.py
|
Python
|
mit
| 34,627 | 0.008981 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.learning."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
from numpy import testing as np_testing
from tensorflow.contrib.framework.python.ops import variables as variables_lib2
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.losses.python.losses import loss_ops
from tensorflow.contrib.slim.python.slim import learning
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import saver as saver_lib
class ClipGradientNormsTest(test.TestCase):
def clip_values(self, arr):
norm = np.sqrt(np.sum(arr**2))
if norm > self._max_norm:
return self._max_norm * arr / np.sqrt(np.sum(arr**2))
return arr
def setUp(self):
np.random.seed(0)
self._max_norm = 1.0
self._grad_vec = np.array([1., 2., 3.])
self._clipped_grad_vec = self.clip_values(self._grad_vec)
self._zero_vec = np.zeros(self._grad_vec.size)
def testOrdinaryGradIsClippedCorrectly(self):
gradient = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
variable = variables_lib.Variable(self._zero_vec, dtype=dtypes.float32)
gradients_to_variables = (gradient, variable)
[gradients_to_variables] = learning.clip_gradient_norms(
[gradients_to_variables], self._max_norm)
# Ensure the variable passed through.
self.assertEqual(gradients_to_variables[1], variable)
with self.test_session() as sess:
actual_gradient = sess.run(gradients_to_variables[0])
np_testing.assert_almost_equal(actual_gradient, self._clipped_grad_vec)
def testNoneGradPassesThroughCorrectly(self):
gradient = None
variable = variables_lib.Variable(self._zero_vec, dtype=dtypes.float32)
gradients_to_variables = (gradient, variable)
[gradients_to_variables] = learning.clip_gradient_norms(
[gradients_to_variables], self._max_norm)
self.assertEqual(gradients_to_variables[0], None)
self.assertEqual(gradients_to_variables[1], variable)
def testIndexedSlicesGradIsClippedCorrectly(self):
sparse_grad_indices = np.array([0, 1, 4])
sparse_grad_dense_shape = [self._grad_vec.size]
values = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
indices = constant_op.constant(sparse_grad_indices, dtype=dtypes.int32)
dense_shape = constant_op.constant(
sparse_grad_dense_shape, dtype=dtypes.int32)
gradient = ops.IndexedSlices(values, indices, dense_shape)
variable = variables_lib.Variable(self._zero_vec, dtype=dtypes.float32)
gradients_to_variables
|
= (gradient, variable)
gradients_to_variables = learning.clip_gradient_norms(
[gradients_to_variables], self._max_norm)[0]
# Ensure the built IndexedSlice has the right form.
self.assertEqual(gradient
|
s_to_variables[1], variable)
self.assertEqual(gradients_to_variables[0].indices, indices)
self.assertEqual(gradients_to_variables[0].dense_shape, dense_shape)
with session.Session() as sess:
actual_gradient = sess.run(gradients_to_variables[0].values)
np_testing.assert_almost_equal(actual_gradient, self._clipped_grad_vec)
class MultiplyGradientsTest(test.TestCase):
def setUp(self):
np.random.seed(0)
self._multiplier = 3.7
self._grad_vec = np.array([1., 2., 3.])
self._multiplied_grad_vec = np.multiply(self._grad_vec, self._multiplier)
def testNonListGradsRaisesError(self):
gradient = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
variable = variables_lib.Variable(array_ops.zeros_like(gradient))
grad_to_var = (gradient, variable)
gradient_multipliers = {variable: self._multiplier}
with self.assertRaises(ValueError):
learning.multiply_gradients(grad_to_var, gradient_multipliers)
def testEmptyMultiplesRaisesError(self):
gradient = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
variable = variables_lib.Variable(array_ops.zeros_like(gradient))
grad_to_var = (gradient, variable)
with self.assertRaises(ValueError):
learning.multiply_gradients([grad_to_var], {})
def testNonDictMultiplierRaisesError(self):
gradient = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
variable = variables_lib.Variable(array_ops.zeros_like(gradient))
grad_to_var = (gradient, variable)
with self.assertRaises(ValueError):
learning.multiply_gradients([grad_to_var], 3)
def testMultipleOfNoneGradRaisesError(self):
gradient = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
variable = variables_lib.Variable(array_ops.zeros_like(gradient))
grad_to_var = (None, variable)
gradient_multipliers = {variable: self._multiplier}
with self.assertRaises(ValueError):
learning.multiply_gradients(grad_to_var, gradient_multipliers)
def testMultipleGradientsWithVariables(self):
gradient = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
variable = variables_lib.Variable(array_ops.zeros_like(gradient))
grad_to_var = (gradient, variable)
gradient_multipliers = {variable: self._multiplier}
[grad_to_var] = learning.multiply_gradients([grad_to_var],
gradient_multipliers)
# Ensure the variable passed through.
self.assertEqual(grad_to_var[1], variable)
with self.test_session() as sess:
actual_gradient = sess.run(grad_to_var[0])
np_testing.assert_almost_equal(actual_gradient, self._multiplied_grad_vec,
5)
def testIndexedSlicesGradIsMultiplied(self):
values = constant_op.constant(self._grad_vec, dtype=dtypes.float32)
indices = constant_op.constant([0, 1, 2], dtype=dtypes.int32)
dense_shape = constant_op.constant(
[self._grad_vec.size], dtype=dtypes.int32)
gradient = ops.IndexedSlices(values, indices, dense_shape)
variable = variables_lib.Variable(array_ops.zeros((1, 3)))
grad_to_var = (gradient, variable)
gradient_multipliers = {variable: self._multiplier}
[grad_to_var] = learning.multiply_gradients([grad_to_var],
gradient_multipliers)
# Ensure the built IndexedSlice has the right form.
self.assertEqual(grad_to_var[1], variable)
self.assertEqual(grad_to_var[0].indices, indices)
self.assertEqual(grad_to_var[0].dense_shape, dense_shape)
with self.test_session() as sess:
actual_gradient = sess.run(grad_to_var[0].values)
np_testing.assert_almost_equal(actual_gradient, self._multiplied_grad_vec,
5)
def LogisticClassifier(inputs):
return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid)
def BatchNormClassifier(inputs):
inputs = layers.batch_norm(inputs, decay=0.1)
return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid)
class TrainBNClassifierTe
|
wevote/WeVoteServer
|
follow/models.py
|
Python
|
mit
| 78,451 | 0.004015 |
# follow/models.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from datetime import datetime, timedelta
from django.db import models
from election.models import ElectionManager
from exception.models import handle_exception, handle_record_found_more_than_one_exception,\
handle_record_not_found_exception, handle_record_not_saved_exception, print_to_log
from issue.models import IssueManager
from organization.models import OrganizationManager
import pytz
import wevote_functions.admin
from wevote_functions.functions import positive_value_exists
from voter.models import VoterManager
FOLLOWING = 'FOLLOWING'
STOP_FOLLOWING = 'STOP_FOLLOWING'
FOLLOW_IGNORE = 'FOLLOW_IGNORE'
STOP_IGNORING = 'STOP_IGNORING'
FOLLOWING_CHOICES = (
(FOLLOWING, 'Following'),
(STOP_FOLLOWING, 'Not Following'),
(FOLLOW_IGNORE, 'Ignoring'),
(STOP_IGNORING, 'Not Ignoring'),
)
# Kinds of lists of suggested organization
UPDATE_SUGGESTIONS_FROM_TWITTER_IDS_I_FOLLOW = 'UPDATE_SUGGESTIONS_FROM_TWITTER_IDS_I_FOLLOW'
UPDATE_SUGGESTIONS_FROM_WHAT_FRIENDS_FOLLOW = 'UPDATE_SUGGESTIONS_FROM_WHAT_FRIENDS_FOLLOW'
UPDATE_SUGGESTIONS_FROM_WHAT_FRIENDS_FOLLOW_ON_TWITTER = \
'UPDATE_SUGGESTIONS_FROM_WHAT_FRIENDS_FOLLOW_ON_TWITTER'
UPDATE_SUGGESTIONS_FROM_WHAT_FRIEND_FOLLOWS = 'UPDATE_SUGGESTIONS_FROM_WHAT_FRIEND_FOLLOWS'
UPDATE_SUGGESTIONS_FROM_WHAT_FRIEND_FOLLOWS_ON_TWITTER = \
'UPDATE_SUGGESTIONS_FROM_WHAT_FRIEND_FOLLOWS_ON_TWITTER'
UPDATE_SUGGESTIONS_ALL = 'UPDATE_SUGGESTIONS_ALL'
FOLLOW_SUGGESTIONS_FROM_TWITTER_IDS_I_FOLLOW = 'FOLLOW_SUGGESTIONS_FROM_TWITTER_IDS_I_FOLLOW'
FOLLOW_SUGGESTIONS_FROM_FRIENDS = 'FOLLOW_SUGGESTIONS_FROM_FRIENDS'
FOLLOW_SUGGESTIONS_FROM_FRIENDS_ON_TWITTER = 'FOLLOW_SUGGESTIONS_FROM_FRIENDS_ON_TWITTER'
logger = wevote_functions.admin.get_logger(__name__)
class FollowCampaignX(models.Model):
voter_we_vote_id = models.CharField(max_length=255, null=True, blank=True, unique=False, db_index=True)
organization_we_vote_id = models.CharField(max_length=255, null=True, blank=True, unique=False)
campaignx_id = models.PositiveIntegerField(null=True, blank=True)
campaignx_we_vote_id = models.CharField(max_length=255, null=True, blank=True, unique=False)
date_last_changed = models.DateTimeField(verbose_name='date last changed', null=True, auto_now=True, db_index=True)
class FollowCampaignXManager(models.Manager):
def __unicode__(self):
return "FollowCampaignXManager"
def toggle_on_follow_campaignx(self, voter_we_vote_id, issue_id, issue_we_vote_id, following_status):
follow_campaignx_on_stage_found = False
follow_campaignx_changed = False
follow_campaignx_on_stage_id = 0
follow_campaignx_on_stage = FollowIssue()
status = ''
issue_identifier_exists = positive_value_exists(issue_we_vote_id) or positive_value_exists(issue_id)
if not positive_value_exists(voter_we_vote_id) and not issue_identifier_exists:
results = {
'success': True if follow_campaignx_on_stage_found else False,
'status': 'Insufficient inputs to toggle issue link, try passing ids for voter and issue ',
'follow_campaignx_found': follow_campaignx_on_stage_found,
'follow_campaignx_id': follow_campaignx_on_stage_id,
'follow_campaignx': follow_campaignx_on_stage,
}
return results
# Does a follow_campaignx entry exist from this voter already exist?
follow_campaignx_manager = FollowIssueManager()
follow_campaignx_id = 0
results = follow_campaignx_manager.retrieve_follow_campaignx(follow_campaignx_id, voter_we_vote_id, issue_id,
issue_we_vote_id)
if results['MultipleObjectsReturned']:
status += 'TOGGLE_FOLLOWING_ISSUE MultipleObjectsReturned ' + following_status
delete_results = follow_campaignx_manager.delete_follow_campaignx(
follow_campaignx_id, voter_we_vote_id, issue_id, issue_we_vote_id)
status += delete_results['status']
results = follow_campaignx_manager.retrieve_follow_campaignx(follow_campaignx_id, voter_we_vote_id, issue_id,
issue_we_vote_id)
if results['follow_campaignx_found']:
follow_campaignx_on_stage = results['follow_campaignx']
# Update this follow_campaignx entry with new values - we do not delete because we might be able to use
try:
follow_campaignx_on_stage.following_status = following_status
# We don't need to update here because set set auto_now=True in the field
# follow_campaignx_on_stage.date_last_changed =
follow_campaignx_on_stage.save()
follow_campaignx_changed = True
follow_campaignx_on_stage_id = follow_campaignx_on_stage.id
follow_campaignx_on_stage_found = True
status += 'FOLLOW_STATUS_UPDATED_AS ' + following_status
except Exception as e:
status += 'FAILED_TO_UPDATE ' + following_status
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
elif results['DoesNotExist']:
try:
# Create new follow_campaignx entry
# First make sure that issue_id is for a valid issue
issue_manager = IssueManager()
if positive_value_exists(issue_id):
results = issue_manager.retrieve_issue(issue_id)
else:
results = issue_manager.retrieve_issue(0, issue_we_vote_id)
if results['issue_found']:
issue = results['issue']
follow_campaignx_on_stage = FollowIssue(
voter_we_vote_id=voter_we_vote_id,
issue_id=issue.id,
issue_we_vote_id=issue.we_vote_id,
following_status=following_status,
)
# if auto_followed_from_twitter_suggestion:
# follow_campaignx_on_stage.auto_followed_from_twitter_suggestion = True
follow_campaignx_on_stage.save()
follow_campaignx_changed = True
follow_campaignx_on_stage_id = follow_campaignx_on_stage.id
follow_campaignx_on_stage_found = True
status += 'CREATE ' + following_status
else:
status = 'ISSUE_NOT_FOUND_ON_CREATE ' + following_status
except Exception as e:
status += 'FAILED_TO_UPDATE ' + following_status
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
else:
status += results['status']
results = {
'success': True if follow_campaignx_on_stage_found else False,
'status': status,
'follow_campaignx_found': follow_campaignx_on_stage_found,
'follow_campaignx_id': follow_cam
|
paignx_on_stage_id,
'follow_campaignx': follow_campaignx_on_stage,
}
return results
def retrieve_follow_campaignx(self, follow_campaignx_id, voter_we_vote_id, issue_id, issue_we_vote_id):
"""
follow_campaignx_id is the identifier for records stored in this table (it is NOT the issue_id)
"""
error_result = False
exception_does_not_exist = False
exce
|
ption_multiple_object_returned = False
follow_campaignx_on_stage = FollowIssue()
follow_campaignx_on_stage_id = 0
try:
if positive_value_exists(follow_campaignx_id):
follow_campaignx_on_stage = FollowIssue.objects.get(id=follow_campaignx_id)
follow_campaignx_on_stage_id = issue_id.id
success = True
status = 'FOLLOW_ISSUE_FOUND_WITH_ID'
elif
|
aselle/tensorflow
|
tensorflow/python/estimator/keras.py
|
Python
|
apache-2.0
| 21,876 | 0.006491 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Home of estimator related functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
from tensorflow.python.client import session
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import export as export_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import models
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.network import Network
from tensorflow.python.keras.utils.generic_utils import CustomObjectScope
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics as metrics_module
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import training_util
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.training.checkpointable import data_structures
_DEFAULT_SERVING_KEY = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
def _cast_tensor_to_floatx(x):
"""Cast tensor to keras's floatx dtype if it is not already the same dtype."""
if x.dtype == K.floatx():
return x
else:
return math_ops.cast(x, K.floatx())
def _convert_tensor(x):
"""Create or cast tensor if needed."""
if not tensor_util.is_tensor(x):
# x is a numpy array
x = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(x)
if check_ops.is_numeric_tensor(x):
# is_numeric_tensor returns False if provided with a numpy array
x = _cast_tensor_to_floatx(x)
return x
def _any_weight_initialized(keras_model):
"""Check if any weights has been initialized in the Keras model.
Args:
keras_model: An instance of compiled keras model.
Returns:
boolean, True if at least one weight has been initialized, else False.
Currently keras initialize all weights at get_session().
"""
if keras_model is None:
return False
for layer in keras_model.layers:
for weight in layer.weights:
if hasattr(weight, '_keras_initialized'):
return True
return False
def _create_ordered_io(keras_model, estimator_io, is_input=True):
"""Create a list of tensors from IO dictionary based on Keras IO order.
Args:
keras_model: An instance of compiled keras model.
estimator_io: The features or labels (dict or plain array) from model_fn.
is_input: True if dictionary is for inputs.
Returns:
A list of tensors based on Keras IO order.
Raises:
ValueError: if dictionary keys cannot be found in Keras model input_names
or output_names.
"""
if isinstance(estimator_io, (list, tuple)):
# Case currently not supported by most built-in input_fn,
# but it's good to have for sanity
return [_convert_tensor(x) for x in estimator_io]
elif isinstance(estimator_io, dict):
if is_input:
if keras_model._is_graph_network:
keras_io_names = keras_model.input_names
else:
keras_io_names = [
'input_%d' % i for i in range(1, len(estimator_io) + 1)]
else:
if keras_model._is_graph_network:
keras_io_names = keras_model.output_names
else:
keras_io_names = [
'output_%d' % i for i in range(1, len(estimato
|
r_io) + 1)]
for key in estimator_io:
if key not in keras_io_names:
raise ValueError(
'Cannot find %s with name "%s" in Keras Model. '
'It needs to match one '
'of the following: %s' % ('input' if is_input else 'out
|
put', key,
', '.join(keras_io_names)))
tensors = [_convert_tensor(estimator_io[io_name])
for io_name in keras_io_names]
return tensors
else:
# Plain array.
return _convert_tensor(estimator_io)
def _in_place_subclassed_model_reset(model):
"""Substitute for model cloning that works for subclassed models.
Subclassed models cannot be cloned because their topology is not serializable.
To "instantiate" an identical model in a new TF graph, we reuse the original
model object, but we clear its state.
After calling this function on a model instance, you can use the model
instance as if it were a model clone (in particular you can use it in a new
graph).
This method clears the state of the input model. It is thus destructive.
However the original state can be restored fully by calling
`_in_place_subclassed_model_state_restoration`.
Args:
model: Instance of a Keras model created via subclassing.
Raises:
ValueError: In case the model uses a subclassed model as inner layer.
"""
assert not model._is_graph_network # Only makes sense for subclassed networks
# Retrieve all layers tracked by the model as well as their attribute names
attributes_cache = {}
for name in dir(model):
try:
value = getattr(model, name)
except (AttributeError, ValueError, TypeError):
continue
if isinstance(value, Layer):
attributes_cache[name] = value
assert value in model._layers
elif isinstance(value, (list, tuple)) and name not in ('layers', '_layers'):
# Handle case: list/tuple of layers (also tracked by the Network API).
if value and all(isinstance(val, Layer) for val in value):
raise ValueError('We do not support the use of list-of-layers '
'attributes in subclassed models used with '
'`model_to_estimator` at this time. Found list '
'model: %s' % name)
# Replace layers on the model with fresh layers
layers_to_names = {value: key for key, value in attributes_cache.items()}
original_layers = model._layers[:]
model._layers = data_structures.NoDependency([])
for layer in original_layers: # We preserve layer order.
config = layer.get_config()
# This will not work for nested subclassed models used as layers.
# This would be theoretically possible to support, but would add complexity.
# Only do it if users complain.
if isinstance(layer, Network) and not layer._is_graph_network:
raise ValueError('We do not support the use of nested subclassed models '
'in `model_to_estimator` at this time. Found nested '
'model: %s' % layer)
fresh_layer = layer.__class__.from_config(config)
name = layers_to_names[layer]
setattr(model, name, fresh_layer)
# Cache original model build attributes (in addition to layers)
if (not hasattr(model, '_original_attributes_cache') or
model._original_attributes_cache is None):
if model.built:
attributes_to_cache = [
'inputs',
'outputs',
'_feed_outputs',
'_feed_output_names'
|
weaver-viii/h2o-3
|
h2o-py/tests/testdir_misc/pyunit_assign.py
|
Python
|
apache-2.0
| 567 | 0.012346 |
import sys
sys.path.insert(1, "../../")
import h2o
def pyunit_assign(ip,port):
pros = h2o.import_file(h2o.locate("smalldata/prostate/prostate
|
.csv"))
pq = pros.quantile()
PSA_outliers = pros[pros["PSA"] <= pq[1,1] or pros["PSA"] >= pq[1,9]]
PSA_outliers = h2o.assign(PSA_outliers, "PSA.outliers")
pros.head(show=True)
PSA_outliers.head(show=True)
assert PSA_outliers._id == "PSA.outliers", "Expected frame id to be PSA.outliers, but got {0}".format(PSA_outliers._id)
if __name__ == "__main__":
|
h2o.run_test(sys.argv, pyunit_assign)
|
stack-of-tasks/rbdlpy
|
tutorial/lib/python2.7/site-packages/OpenGL/raw/GL/ATI/pixel_format_float.py
|
Python
|
lgpl-3.0
| 648 | 0.023148 |
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.G
|
L._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_ATI_pixel_format_float'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_ATI_pixel_format_float',error_c
|
hecker=_errors._error_checker)
GL_COLOR_CLEAR_UNCLAMPED_VALUE_ATI=_C('GL_COLOR_CLEAR_UNCLAMPED_VALUE_ATI',0x8835)
GL_RGBA_FLOAT_MODE_ATI=_C('GL_RGBA_FLOAT_MODE_ATI',0x8820)
|
DepthDeluxe/ansible
|
lib/ansible/modules/windows/win_scheduled_task.py
|
Python
|
gpl-3.0
| 3,418 | 0.000585 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supp
|
orted_by': 'community'}
DOCUMENTATION = r'''
---
module: win_scheduled_task
author: "Peter Mounce"
version_added: "2.0"
short_description: Manage scheduled tasks
description:
- Manage scheduled tasks
notes:
- This module requires Windows Server 2012 or later.
options:
name:
description:
- Name of the scheduled task
required: true
description:
description:
- The description for the scheduled task
enabled:
descriptio
|
n:
- Enable/disable the task
choices:
- yes
- no
default: yes
state:
description:
- State that the task should become
required: true
choices:
- present
- absent
user:
description:
- User to run scheduled task as
executable:
description:
- Command the scheduled task should execute
aliases: [ execute ]
arguments:
description:
- Arguments to provide scheduled task action
aliases: [ argument ]
frequency:
description:
- The frequency of the command, not idempotent
- C(interval) added in Ansible 2.4
- C(hourly) added in Ansible 2.4
choices:
- once
- interval
- hourly
- daily
- weekly
time:
description:
- Time to execute scheduled task, not idempotent
days_of_week:
description:
- Days of the week to run a weekly task, not idempotent
required: false
interval:
description:
- When frequency is set to interval, time between executions, units are set by "interval_unit"
required: false
version_added: "2.4"
interval_unit:
description:
- Unit of time between interval, can be seconds, minutes, hours, days
default: minutes
version_added: "2.4"
path:
description:
- Task folder in which this task will be stored - creates a non-existent path when C(state) is C(present),
and removes an empty path when C(state) is C(absent)
default: '\'
'''
EXAMPLES = r'''
# Create a scheduled task to open a command prompt
- win_scheduled_task:
name: TaskName
description: open command prompt
executable: cmd
arguments: -opt1 -opt2
path: \example
time: 9am
frequency: daily
state: present
enabled: yes
user: SYSTEM
# create an interval task to run every 12 minutes starting at 2pm
- win_scheduled_task:
name: IntervalTask
execute: cmd
frequency: interval
interval: 12
time: 2pm
path: example
enable: yes
state: present
user: SYSTEM
'''
|
william-richard/moto
|
tests/test_swf/models/test_timeout.py
|
Python
|
apache-2.0
| 501 | 0 |
from freezegun import freeze_time
import sure # noqa
from moto.swf.models import Timeout
from ..utils import make_workflow_execution
def test_timeout_creation():
wfe = make_workflow_execution()
# epoch 1420113600 == "2015-01-01 13:00:00"
|
timeout = Timeout(wfe, 1420117200, "START_TO_CLOSE")
with freeze_time("2015-01-01 12:00:00"):
timeout.reached.should.be.falsy
with freeze_time("2015-01-01 13:00:00"):
timeout.reached.should.be.truthy
| |
xmendez/wfuzz
|
src/wfuzz/filters/simplefilter.py
|
Python
|
gpl-2.0
| 3,567 | 0.00028 |
from ..exception import FuzzExceptBadOptions
import re
import collections
from ..facade import BASELINE_CODE
class FuzzResSimpleFilter:
def __init__(self, ffilter=None):
self.hideparams = dict(
regex_show=None,
codes_show=None,
codes=[],
words=[],
lines=[],
chars=[],
regex=None,
)
if ffilter is not None:
self.hideparams = ffilter
self.stack = []
self._cache = collections.defaultdict(set)
def is_active(self):
return any(
[
self.hideparams["regex_show"] is not None,
self.hideparams["codes_show"] is not None,
]
)
def set_baseline(self, res):
if BASELINE_CODE in self.hideparams["lines"]:
self.hideparams["lines"].append(res.lines)
if BASELINE_CODE in self.hideparams["codes"]:
self.hideparams["codes"].append(res.code)
if BASE
|
LINE_CODE in self.hideparams["words"]:
self.hideparams["words"].append(res.words)
if BASELINE_CODE in self.hideparams["chars"]:
self.hideparams["chars"].append(res.chars)
def is_visible(self, res):
if self.hideparams["codes_show"] is None:
cond1 = True
else:
cond1 =
|
not self.hideparams["codes_show"]
if self.hideparams["regex_show"] is None:
cond2 = True
else:
cond2 = not self.hideparams["regex_show"]
if (
res.code in self.hideparams["codes"]
or res.lines in self.hideparams["lines"]
or res.words in self.hideparams["words"]
or res.chars in self.hideparams["chars"]
):
cond1 = self.hideparams["codes_show"]
if self.hideparams["regex"]:
if self.hideparams["regex"].search(res.history.content):
cond2 = self.hideparams["regex_show"]
return cond1 and cond2
@staticmethod
def from_options(filter_options):
ffilter = FuzzResSimpleFilter()
try:
if filter_options["ss"] is not None:
ffilter.hideparams["regex_show"] = True
ffilter.hideparams["regex"] = re.compile(
filter_options["ss"], re.MULTILINE | re.DOTALL
)
elif filter_options["hs"] is not None:
ffilter.hideparams["regex_show"] = False
ffilter.hideparams["regex"] = re.compile(
filter_options["hs"], re.MULTILINE | re.DOTALL
)
except Exception as e:
raise FuzzExceptBadOptions(
"Invalid regex expression used in filter: %s" % str(e)
)
if [x for x in ["sc", "sw", "sh", "sl"] if len(filter_options[x]) > 0]:
ffilter.hideparams["codes_show"] = True
ffilter.hideparams["codes"] = filter_options["sc"]
ffilter.hideparams["words"] = filter_options["sw"]
ffilter.hideparams["lines"] = filter_options["sl"]
ffilter.hideparams["chars"] = filter_options["sh"]
elif [x for x in ["hc", "hw", "hh", "hl"] if len(filter_options[x]) > 0]:
ffilter.hideparams["codes_show"] = False
ffilter.hideparams["codes"] = filter_options["hc"]
ffilter.hideparams["words"] = filter_options["hw"]
ffilter.hideparams["lines"] = filter_options["hl"]
ffilter.hideparams["chars"] = filter_options["hh"]
return ffilter
|
isterin/flurry
|
examples/python/load_tests.py
|
Python
|
apache-2.0
| 252 | 0.003968 |
from mult
|
iprocessing import Pool
|
from client import FlurryClient, get_id
import time
p = Pool(10)
p.map_async(get_id, [('localhost', 9090, 10000)])
# p.map_async(get_id, [('localhost', 9091, 10000)] * 2)
# p.join()
p.close()
p.join()
# time.sleep(2)
|
neilhan/tensorflow
|
tensorflow/python/framework/ops.py
|
Python
|
apache-2.0
| 150,185 | 0.005413 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions used to construct graphs."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import linecache
import re
import sys
import threading
import six
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import versions_pb2
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import registry
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import versions
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
def _override_helper(clazz_object, operator, func):
"""Overrides (string) operator on Tensors to call func.
Args:
clazz_object: the class to override for; either Tensor or SparseTensor.
operator: the string name of the operator to override.
func: the function that replaces the overridden operator.
Raises:
ValueError: If operator has already been overwritten,
or if operator is not allowed to be overwritten.
"""
existing = getattr(clazz_object, operator, None)
if existing is not None:
# Check to see if this is a default method-wrapper or slot wrapper which
# will be true for the comparison operators.
if not isinstance(existing, type(object.__lt__)):
raise ValueError("operator %s cannot be overwritten again on class %s." %
(operator, clazz_object))
if operator not in Tensor.OVERLOADABLE_OPERATORS:
raise ValueError("Overriding %s is disallowed" % operator)
setattr(clazz_object, operator, func)
def _convert_stack(stack):
"""Converts a stack extracted using _extract_stack() to a traceback stack.
Args:
stack: A list of n 4-tuples, (filename, lineno, name, frame_globals).
Returns:
A list of n 4-tuples (filename, lineno, name, code), where the code tuple
element is calculated from the corresponding elements of the input tuple.
"""
ret = []
for filename, lineno, name, frame_globals in stack:
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, frame_globals)
if line:
line = line.strip()
else:
line = None
ret.append((filename, lineno, name, line))
return ret
# pylint: disable=line-too-long
def _extract_stack():
"""A lightweight re-implementation of traceback.extract_stack.
NOTE(mrry): traceback.extract_stack eagerly retrieves the line of code for
each stack frame using linecache, which results in an abundance of stat()
calls. This implementation does not retrieve the code, and any consumer
should apply _convert_stack to the result to obtain a traceback that can
be formatted etc. using traceback methods.
Returns:
A list of 4-tuples (filename, lineno, name, frame_globals) corresponding to
the call stack of the current thread.
"""
# pylint: enable=line-too-long
try:
raise ZeroDivisionError
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame.f_back
ret = []
while f is not None:
lineno = f.f_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
frame_globals = f.f_globals
ret.append((filename, lineno, name, frame_globals))
f = f.f_back
ret.reverse()
return ret
def _as_graph_element(obj):
"""Convert `obj` to a graph element if possible, otherwise return `None`.
Args:
obj: Object to convert.
Returns:
The result of `obj._as_graph_element()` if that method is available;
otherwise `None`.
"""
conv_fn = getattr(obj, "_as_graph_element", None)
if conv_fn and callable(conv_fn):
return conv_fn()
return None
_TENSOR_LIKE_TYPES = tuple()
def is_dense_tensor_like(t):
"""EXPERIMENTAL: Returns true if `t` implements the tensor interface.
See `register_dense_tensor_like_type()` for the current definition of a
"tensor-like type".
Args:
t: An object.
Returns:
True iff `t` is an instance of one of the registered "tensor-like" types.
"""
return isinstance(t, _TENSOR_LIKE_TYPES)
def register_dense_tensor_like_type(tensor_type):
"""EXPERIMENTAL: Registers `tensor_type` as implementing the tensor interface.
A "tensor-like type" can represent a single dense tensor, and implements
the `name` and `dtype` properties.
Args:
tensor_type: A
|
type implementing the tensor interface.
Raises:
TypeError: If `tensor_type` does not implement the tensor
|
interface.
"""
try:
if not isinstance(tensor_type.name, property):
raise TypeError("Type %s does not define a `name` property")
except AttributeError:
raise TypeError("Type %s does not define a `name` property")
try:
if not isinstance(tensor_type.dtype, property):
raise TypeError("Type %s does not define a `dtype` property")
except AttributeError:
raise TypeError("Type %s does not define a `dtype` property")
# We expect this list to be small, so choose quadratic complexity
# for registration, so that we have a tuple that can be used for
# more efficient `isinstance` checks later.
global _TENSOR_LIKE_TYPES
_TENSOR_LIKE_TYPES = tuple(list(_TENSOR_LIKE_TYPES) + [tensor_type])
class Tensor(object):
"""Represents one of the outputs of an `Operation`.
*Note:* the `Tensor` class will be replaced by `Output` in the future.
Currently these two are aliases for each other.
A `Tensor` is a symbolic handle to one of the outputs of an
`Operation`. It does not hold the values of that operation's output,
but instead provides a means of computing those values in a
TensorFlow [`Session`](../../api_docs/python/client.md#Session).
This class has two primary purposes:
1. A `Tensor` can be passed as an input to another `Operation`.
This builds a dataflow connection between operations, which
enables TensorFlow to execute an entire `Graph` that represents a
large, multi-step computation.
2. After the graph has been launched in a session, the value of the
`Tensor` can be computed by passing it to
[`Session.run()`](../../api_docs/python/client.md#Session.run).
`t.eval()` is a shortcut for calling
`tf.get_default_session().run(t)`.
In the following example, `c`, `d`, and `e` are symbolic `Tensor`
objects, whereas `result` is a numpy array that stores a concrete
value:
```python
# Build a dataflow graph.
c = tf.constant([[1.0, 2.0], [3.0, 4.0]])
d = tf.constant([[1.0, 1.0], [0.0, 1.0]])
e = tf.matmul(c, d)
# Construct a `Session` to execute the graph.
sess = tf.Session()
# Execute the graph and store the value that `e` represents in `result`.
result = sess.run(e)
```
@@dtype
@@name
@@value_index
@@graph
@@op
@@consumers
@@eval
@@get_shape
@@set_shape
"""
# List of Python operators that we allow to override.
OVERLOADABLE_OPERATORS = {
# Binary.
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__div__",
"__rdiv__",
"__truediv__",
"__rtruediv__",
"__floordiv__",
"__rfloordiv__",
"__mod__",
"__rmo
|
takeit/web-publisher
|
docs/_extensions/sensio/sphinx/bestpractice.py
|
Python
|
agpl-3.0
| 1,458 | 0.006173 |
from docutils.parsers.rst import Directive, directives
from docutils import nodes
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
from sphinx.util import compat
|
compat.make_admonition = BaseAdmonition
from sphinx import addnodes
from sphinx.locale import _
class bestpractice(nodes.Admonition, nodes.Element):
pass
class BestPractice(Directive):
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = Tr
|
ue
option_spec = {}
def run(self):
ret = make_admonition(
bestpractice, self.name, [_('Best Practice')], self.options,
self.content, self.lineno, self.content_offset, self.block_text,
self.state, self.state_machine)
if self.arguments:
argnodes, msgs = self.state.inline_text(self.arguments[0],
self.lineno)
para = nodes.paragraph()
para += argnodes
para += msgs
ret[0].insert(1, para)
return ret
def visit_bestpractice_node(self, node):
self.body.append(self.starttag(node, 'div', CLASS=('admonition best-practice')))
self.set_first_last(node)
def depart_bestpractice_node(self, node):
self.depart_admonition(node)
def setup(app):
app.add_node(bestpractice, html=(visit_bestpractice_node, depart_bestpractice_node))
app.add_directive('best-practice', BestPractice)
|
scivision/raspberrypi_raw_camera
|
pibayer/io.py
|
Python
|
mit
| 1,418 | 0 |
from pathlib import Path
import xarray
KEY = 'imgs' # handle to write inside the output
|
file
CLVL = 1 # ZIP compression level
def writeframes(outfn: Path, img: xarray.DataArray):
"""writes image stack to disk"""
assert img.ndim == 3
if outfn is None:
return
outfn = Path(outfn).expanduser()
print('writing', outfn)
if outfn.suffix == '.nc':
# chunksizes made only few % difference in save time and size
# fletcher32 had no noticable impact
# complvl+1 had little useful impact
enc = {KEY: {'zlib': True, 'complevel': CLVL, 'fl
|
etcher32': True,
'chunksizes': (1, img.shape[1], img.shape[2])}}
img.to_netcdf(outfn, mode='w', encoding=enc)
elif outfn.suffix == '.h5': # HDF5
import h5py
with h5py.File(outfn, 'w') as f:
f.create_dataset(KEY,
data=img.values,
shape=img.shape,
dtype=img.dtype,
compression='gzip',
compression_opts=CLVL,
chunks=(1, img.shape[1], img.shape[2]),
shuffle=True, fletcher32=True)
for k, v in img.attrs.items():
f[k] = v
else: # assume stacked image format
import imageio
imageio.mimwrite(outfn, img.values)
|
sanguinariojoe/FreeCAD
|
src/Mod/Path/PathTests/TestPathTool.py
|
Python
|
lgpl-2.1
| 4,116 | 0.001944 |
# -*- coding: utf-8 -*-
# ***************************************************************************
# * Copyright (c) 2017 sliptonic <shopinthewoods@gmail.com> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
import Path
from PathTests.PathTestUtils import PathTestBase
class TestPathTool(PathTestBase):
def test00(self):
'''Verify templateAttrs'''
name = 'tool 1'
mat = 'Carbide'
typ = 'EndMill'
dia = 1.7
flat = 7.2
offset = 3.2
corner = 4
height = 45.3
angle = 118
tool = Path.Tool()
tool.Name = name
tool.ToolType = typ
tool.Material = mat
tool.Diameter = dia
tool.LengthOffset = offset
tool.FlatRadius = flat
tool.CornerRadius = corner
tool.CuttingEdgeAngle = angle
tool.CuttingEdgeHeight = height
attrs = tool.templateAttrs()
self.assertEqual(attrs['name'], name)
self.assertEqual(attrs['diameter'], dia)
self.assertEqual(attrs['material'], mat)
self.assertEqual(attrs['tooltype'], typ)
self.assertEqual(attrs['lengthOffset'], offset)
self.assertEqual(attrs['flatRadius'], flat)
self.assertEqual(attrs['cornerRadius'], corner)
self.assertEqual(attrs['cuttingEdgeAngle'], angle)
self.assertEqual(attrs['cuttingEdgeHeight'], height)
return tool
def test01(self):
'''Verify template roundtrip'''
t0 = self.test00()
t1 = Path.Tool()
t1.setFromTemplate(t0.templateAttrs())
self.assertEqual(t0.Name, t1.Name)
self.assertEqual(t0.ToolType, t1.ToolType)
self.assertEqual(t0.Material, t1.Material)
self.assertEqual(t0.Diameter, t1.Diameter)
self.assertEqual(t0.LengthOffset, t1.LengthOffset)
self.assertEqual(t0.FlatRadius, t1.FlatRadius)
self.assertEqual(t0.CornerRadius, t1.CornerRadius)
self.assertEqual(t0.CuttingEdgeAngle, t1.CuttingEdgeAngle)
self.assertEqual(t0.CuttingEdgeHeight, t1.CuttingEdgeHeight)
def test02(self):
'''Verify template dictionary construction'''
t
|
0 = self.test00()
t1 = Path.Tool(t0.templateAttrs())
self.assertEqual(t0.Name, t1.Name)
self.assertEqual(t0.ToolType, t1.ToolType)
sel
|
f.assertEqual(t0.Material, t1.Material)
self.assertEqual(t0.Diameter, t1.Diameter)
self.assertEqual(t0.LengthOffset, t1.LengthOffset)
self.assertEqual(t0.FlatRadius, t1.FlatRadius)
self.assertEqual(t0.CornerRadius, t1.CornerRadius)
self.assertEqual(t0.CuttingEdgeAngle, t1.CuttingEdgeAngle)
self.assertEqual(t0.CuttingEdgeHeight, t1.CuttingEdgeHeight)
|
goal/uwsgi
|
tests/websockets_chat_async.py
|
Python
|
gpl-2.0
| 3,284 | 0.000914 |
#!./uwsgi --http-socket :9090 --async 100 ...
# same chat example but using uwsgi async api
# for pypy + continulets just run:
# uwsgi --http-socket :9090 --pypy-home /opt/pypy --pypy-wsgi-file tests/websockets_chat_async.py --pypy-eval "uwsgi_pypy_setup_continulets()" --async 100
import uwsgi
import time
import redis
import sys
def application(env, sr):
ws_scheme = 'ws'
if 'HTTPS' in env or env['wsgi.url_scheme'] == 'https':
ws_scheme = 'wss'
if env['PATH_INFO'] == '/':
sr('200 OK', [('Content-Type', 'text/h
|
tml')])
output = """
<html>
<head>
<script language="Javascript">
var s = new WebSocket("%s://%s/foobar/");
s.onopen = function() {
alert("connected !!!");
s.send("ciao");
};
s.onmessage = function(e) {
|
var bb = document.getElementById('blackboard')
var html = bb.innerHTML;
bb.innerHTML = html + '<br/>' + e.data;
};
s.onerror = function(e) {
alert(e);
}
s.onclose = function(e) {
alert("connection closed");
}
function invia() {
var value = document.getElementById('testo').value;
s.send(value);
}
</script>
</head>
<body>
<h1>WebSocket</h1>
<input type="text" id="testo"/>
<input type="button" value="invia" onClick="invia();"/>
<div id="blackboard" style="width:640px;height:480px;background-color:black;color:white;border: solid 2px red;overflow:auto">
</div>
</body>
</html>
""" % (ws_scheme, env['HTTP_HOST'])
if sys.version_info[0] > 2:
return output.encode('latin1')
return output
elif env['PATH_INFO'] == '/favicon.ico':
return ""
elif env['PATH_INFO'] == '/foobar/':
uwsgi.websocket_handshake(env['HTTP_SEC_WEBSOCKET_KEY'], env.get('HTTP_ORIGIN', ''))
print("websockets...")
r = redis.StrictRedis(host='localhost', port=6379, db=0)
channel = r.pubsub()
channel.subscribe('foobar')
websocket_fd = uwsgi.connection_fd()
redis_fd = channel.connection._sock.fileno()
while True:
uwsgi.wait_fd_read(websocket_fd, 3)
uwsgi.wait_fd_read(redis_fd)
uwsgi.suspend()
fd = uwsgi.ready_fd()
if fd > -1:
if fd == websocket_fd:
msg = uwsgi.websocket_recv_nb()
if msg:
r.publish('foobar', msg)
elif fd == redis_fd:
msg = channel.parse_response()
print(msg)
# only interested in user messages
t = 'message'
if sys.version_info[0] > 2:
t = b'message'
if msg[0] == t:
uwsgi.websocket_send("[%s] %s" % (time.time(), msg))
else:
# on timeout call websocket_recv_nb again to manage ping/pong
msg = uwsgi.websocket_recv_nb()
if msg:
r.publish('foobar', msg)
|
jclgoodwin/bustimes.org.uk
|
busstops/management/commands/import_localities.py
|
Python
|
mpl-2.0
| 2,629 | 0.001902 |
"""
Usage:
im
|
port_localities < Localities.csv
"""
from django.contrib.gis.geos import GEOSGeometry
from django.util
|
s.text import slugify
from ..import_from_csv import ImportFromCSVCommand
from ...utils import parse_nptg_datetime
from ...models import Locality
class Command(ImportFromCSVCommand):
"""
Imports localities from the NPTG
"""
def handle_rows(self, rows):
existing_localities = Locality.objects.defer('search_vector', 'latlong').in_bulk()
slugs = {
locality.slug: locality for locality in existing_localities.values()
}
to_update = []
to_create = []
for row in rows:
modified_at = parse_nptg_datetime(row["ModificationDateTime"])
locality_code = row['NptgLocalityCode']
if locality_code in existing_localities:
locality = existing_localities[locality_code]
if modified_at and modified_at == locality.modified_at:
continue
else:
locality = Locality()
created_at = parse_nptg_datetime(row["CreationDateTime"])
locality.modified_at = modified_at
locality.created_at = created_at
locality.name = row['LocalityName'].replace('\'', '\u2019')
locality.short_name = row['ShortName']
if locality.name == locality.short_name:
locality.short_name = ''
locality.qualifier_name = row['QualifierName']
locality.admin_area_id = row['AdministrativeAreaCode']
locality.latlong = GEOSGeometry(f"SRID=27700;POINT({row['Easting']} {row['Northing']})")
if row['NptgDistrictCode'] == '310': # bogus code seemingly used for localities with no district
locality.district_id = None
else:
locality.district_id = row['NptgDistrictCode']
if locality.id:
to_update.append(locality)
else:
locality.id = locality_code
slug = slugify(locality.get_qualified_name())
locality.slug = slug
i = 0
while locality.slug in slugs:
i += 1
locality.slug = f"{slug}-{i}"
slugs[locality.slug] = locality
to_create.append(locality)
Locality.objects.bulk_update(to_update, fields=[
'name', 'qualifier_name', 'short_name', 'admin_area', 'latlong', 'modified_at', 'created_at', 'district'
], batch_size=100)
Locality.objects.bulk_create(to_create)
|
annoviko/pyclustering
|
pyclustering/core/mbsas_wrapper.py
|
Python
|
gpl-3.0
| 842 | 0.015439 |
"""!
@brief CCORE Wrapper for MBSAS algorithm.
@authors Andrei Novikov (pyclustering@yandex.ru)
@date 2014-2020
@copyright BSD-3-Clause
"""
from ctypes import c_double, c_size_t, POINTER;
from pyclustering.core.wrapper i
|
mport ccore_library;
from pyclustering.core.pyclustering_package import pyclustering_package, package_extractor, package_builder;
def mbsas(sample, amount, threshold, metric_pointer):
pointer_data = package_builder(sample, c_double).create();
ccore
|
= ccore_library.get();
ccore.mbsas_algorithm.restype = POINTER(pyclustering_package);
package = ccore.mbsas_algorithm(pointer_data, c_size_t(amount), c_double(threshold), metric_pointer);
result = package_extractor(package).extract();
ccore.free_pyclustering_package(package);
return result[0], result[1];
|
hasgeek/funnel
|
migrations/versions/3a6b2ab00e3e_session_proposal_one.py
|
Python
|
agpl-3.0
| 441 | 0.004535 |
"""Make session:proposal 1:1.
Revision ID: 3a6b2ab00e3e
|
Revises: 4dbf686f4380
Create Date: 2013-11-09 13:51:58.343243
"""
# revision identifiers, used by Alembic.
revision = '3a6b2ab00e3e'
down_revision = '4dbf686f4380'
from alembic import op
def upgrade():
op.create_unique_constraint('session_proposal_id_key', 'session', ['propo
|
sal_id'])
def downgrade():
op.drop_constraint('session_proposal_id_key', 'session', 'unique')
|
google/transitfeed
|
transitfeed/farerule.py
|
Python
|
apache-2.0
| 2,778 | 0.009359 |
#!/usr/bin/python2.5
# Copyright (C) 2007 Google Inc.
#
# License
|
d under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain
|
a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from .problems import default_problem_reporter
from .gtfsobjectbase import GtfsObjectBase
class FareRule(GtfsObjectBase):
"""This class represents a rule that determines which itineraries a
fare rule applies to."""
_REQUIRED_FIELD_NAMES = ['fare_id']
_FIELD_NAMES = _REQUIRED_FIELD_NAMES + ['route_id',
'origin_id',
'destination_id',
'contains_id']
_TABLE_NAME = "fare_rules"
def __init__(self, fare_id=None, route_id=None,
origin_id=None, destination_id=None, contains_id=None,
field_dict=None):
self._schedule = None
(self.fare_id, self.route_id, self.origin_id, self.destination_id,
self.contains_id) = \
(fare_id, route_id, origin_id, destination_id, contains_id)
if field_dict:
if isinstance(field_dict, self.GetGtfsFactory().FareRule):
# Special case so that we don't need to re-parse the attributes to
# native types iteritems returns all attributes that don't start with _
for k, v in field_dict.iteritems():
self.__dict__[k] = v
else:
self.__dict__.update(field_dict)
# canonicalize non-content values as None
if not self.route_id:
self.route_id = None
if not self.origin_id:
self.origin_id = None
if not self.destination_id:
self.destination_id = None
if not self.contains_id:
self.contains_id = None
def GetFieldValuesTuple(self):
return [getattr(self, fn) for fn in self._FIELD_NAMES]
def __getitem__(self, name):
return getattr(self, name)
def __eq__(self, other):
if not other:
return False
if id(self) == id(other):
return True
return self.GetFieldValuesTuple() == other.GetFieldValuesTuple()
def __ne__(self, other):
return not self.__eq__(other)
def AddToSchedule(self, schedule, problems):
self._schedule = schedule
schedule.AddFareRuleObject(self, problems)
def ValidateBeforeAdd(self, problems):
return True
def ValidateAfterAdd(self, problems):
return
|
stweil/ocropy
|
OLD/lineproc.py
|
Python
|
apache-2.0
| 6,891 | 0.026411 |
################################################################
### functions specific to text line processing
### (text line segmentation is in lineseg)
################################################################
from scipy import stats
from scipy.ndimage import interpolation,morphology,filters
from pylab import *
import morph
from toplevel import *
################################################################
### line segmentation geometry estimates based on
### segmentations
################################################################
seg_geometry_display = 0
geowin = None
geoax = None
@checks(SEGMENTATION,math=BOOL)
def seg_geometry(segmentation,math=1):
"""Given a line segmentation (either an rseg--preferably connected
component based--or a cseg, return (mh,a,b), where mh is the
medium component height, and y=a*x+b is a line equation (in
Postscript coordinates) for the center of the text line. This
function is used as a simple, standard estimator of text line
geometry. The intended use is to encode the size and centers of
bounding boxes relative to these estimates and add these as
features to the input of a character classifier, allowing it to
distinguish otherwise ambiguous pairs like ,/' and o/O."""
boxes = seg_boxes(segmentation,math=math)
heights = [(y1-y0) for (y0,y1,x0,x1) in boxes]
mh = stats.scoreatpercentile(heights,per=40)
centers = [(avg(y0,y1),avg(x0,x1)) for (y0,y1,x0,x1) in boxes]
xs = array([x for y,x in centers])
ys = array([y for y,x in centers])
a,b = polyfit(xs,ys,1)
if seg_geometry_display:
print "seggeo",math
from matplotlib import patches
global geowin,geoax
old = gca()
if geowin is None:
geowin = figure()
geoax = geowin.add_subplot(111)
geoax.cla()
geoax.imshow(segmentation!=0,cmap=cm.gray)
for (y0,y1,x0,x1) in boxes:
p = patches.Rectangle((x0,y0),x1-x0,y1-y0,edgecolor="red",fill=0)
geoax.add_patch(p)
xm = max(xs)
geoax.plot([0,xm],[b,a*xm+b],'b')
geoax.plot([0,xm],[b-mh/2,a*xm+b-mh/2],'y')
geoax.plot([0,xm],[b+mh/2,a*xm+b+mh/2],'y')
geoax.plot(xs,[y for y in ys],"g.")
sca(old)
print "mh",mh,"a",a,"b",b
return mh,a,b
def avg(*args):
return mean(args)
@deprecated
def rel_char_geom(box,params):
"""Given a character bounding box and a set of line geometry parameters,
compute relative character position and size."""
y0,y1,x0,x1 = box
assert y1>y0 and x1>x0,"%s %s"%((x0,x1),(y0,y1))
mh,a,b = params
y = avg(y0,y1)
x = avg(x0,x1)
yl = a*x+b
rel_ypos = (y-yl)/mh
rel_width = (x1-x0)*1.0/mh
rel_height = (y1-y0)*1.0/mh
# ensure some reasonable bounds
assert rel_ypos>-100 and rel_ypos<100
assert rel_width>0 and rel_width<100
assert rel_height>0 and rel_height<100
return rel_ypos,rel_width,rel_height
@deprecated
def rel_geo_normalize(rel):
"""Given a set of geometric parameters, normalize them into the
range -1...1 so that they can be used as input to a neural network."""
if rel is None: return None
if type(rel)==str:
rel = [float(x) for x in rel.split()]
ry,rw,rh = rel
if not (rw>0 and rh>0): return None
ry = clip(2*ry,-1.0,1.0)
rw = clip(log(rw),-1.0,1.0)
rh = clip(log(rh),-1.0,1.0)
geometry = array([ry,rw,rh],'f')
return geometry
@deprecated
def seg_boxes(seg,math=0):
"""Given a color segmentation, return a list of bounding boxes.
Bounding boxes are returned as tuples (y0,y1,x0,x1). With
math=0, raster coordinates are used, with math=1, Postscript
coordinates are used (however, the order of the values in the
tuple doesn't change)."""
seg = array(seg,'uint32')
slices = morph.find_objects(seg)
h = seg.shape[0]
result = []
for i in range(len(slices)):
if slices[i] is None: continue
(ys,xs) = slices[i]
if math:
result += [(h-ys.stop-1,h-ys.start-1,xs.start,xs.stop)]
else:
result += [(ys.start,ys.stop,xs.start,xs.stop)]
return result
################################################################
### image based estimation of line geometry, as well
### as dewarping
################################################################
@checks(DARKLINE)
def estimate_baseline(line,order=3):
"""Compute the baseline by fitting a polynomial to the gradient.
TODO: use robust fitting, special case very short line, limit parameter ranges"""
line = line*1.0/amax(line)
vgrad = morphology.grey_closing(line,(1,40))
vgrad = filters.gaussian_filter(vgrad,(2,60),(1,0))
if amin(vgrad)>0 or amax(vgrad)<0: raise BadImage()
h,w = vgrad.shape
ys = argmin(vgrad,axis=0)
xs = arange(w)
baseline = polyfit(xs,ys,order)
print baseline
return baseline
@checks(DARKLINE)
def dewarp_line(line,show=0,order=3):
"""Dewarp the baseline of a line based in estimate_baseline.
Returns the dewarped image."""
line = line*1.0/amax(line)
line = r_[zeros(line.shape),line]
h,w = line.shape
baseline = estimate_baseline(line,order=order)
ys = polyval(baseline,arange(w))
base = 2*h/3
temp = zeros(line.shape)
for x in range(w):
temp[:,x] = interpolation.shift(line[:,x],(base-ys[x]),order=1)
return temp
#line = line*1.0/amax(line)
@checks(DARKLINE)
def estimate_xheight(line,scale=1.0,debug=0):
"""Estimates the xheight of a line based on image processing and
filtering."""
vgrad = morphology.grey_closing(line,(1,int(scale*40)))
vgrad = filters.gaussian_filter(vgrad,(2,int(scale*60)),(1,0))
if amin(vgrad)>0 or amax(vgrad)<0: raise BadImage("bad line")
if debug: imshow(vgrad)
proj = sum(vgrad,1)
proj = filters.gaussian_filter(proj,0.5)
top = argmax(proj)
bottom = argmin(proj)
return bottom-top,bottom
@checks(DARKLINE)
def latin_mask(line,scale=1.0,r=1.2,debug=0):
"""Estimate a mask that covers letters and diacritics of a text
line for Latin alphabets."""
vgrad = morphology.grey_closing(1.0*line,(1,int(scale*40)))
vgrad = filters.gaussian_filter(vgrad,(2,int(scale*60)),(1,0))
tops = argmax(vgrad,0)
bottoms = argmin(vgrad,0)
mask = zeros(line.shape)
# xheight = mean(bottoms-tops)
for i in range(len(bottoms)):
d = bo
|
ttoms[i]-tops[i]
y0 = int(maximum(0,bottoms[i]-r*d))
mask[y0:bottoms[i],i] = 1
return mask
@checks(DARKLINE)
def latin_filter(line,scale=1.0,r=1.5,debug=0):
"""Filter out noise from a text line in Latin alphabets."""
bin = (line>0.5*amax(line))
mask = latin_mask(bin,scale=scale,r=r,debug=debug)
mask = morph.keep_marked(bin,mask)
mask = filters.maximum_filter(mask,3)
|
return line*mask
|
us-ignite/us_ignite
|
us_ignite/blog/urls.py
|
Python
|
bsd-3-clause
| 266 | 0 |
from django.conf.urls import patterns, url
|
urlpatterns = patterns(
'us_ignite.blog.views',
url(r'^$', 'post_list', name='blog_post_list'),
url(r'^(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<slug>[-\w]+)/$',
'post_detail', n
|
ame='blog_post_detail'),
)
|
aplicatii-romanesti/allinclusive-kodi-pi
|
.kodi/addons/plugin.video.familyfunflix/downloader.py
|
Python
|
apache-2.0
| 588 | 0.027211 |
import xbmcgui
import urllib
def download(url, dest, dp = None):
if not dp:
dp = xbmcgui.DialogProgress()
dp.cr
|
eate("XBMCHUB...","Downloading & Copying File",' ', ' ')
dp.update(0)
urllib.urlretrieve(url,dest,lambda nb, bs, fs, url=url: _
|
pbhook(nb,bs,fs,url,dp))
def _pbhook(numblocks, blocksize, filesize, url, dp):
try:
percent = min((numblocks*blocksize*100)/filesize, 100)
dp.update(percent)
except:
percent = 100
dp.update(percent)
if dp.iscanceled():
raise Exception("Canceled")
dp.close()
|
kelvinongtoronto/SecureCRT
|
shintdesc_legacy.py
|
Python
|
artistic-2.0
| 1,392 | 0.03592 |
# $language = "python"
# $interface = "1.0"
# for GCCLABVM1,GCCLABVM2
import os
import csv
def main():
crt.Screen.Synchronous = True
# Create an Excel compatible spreadsheet
filename = crt.Dialog.Prompt("Enter file name to write to: ", "Show Interface Description", "intdesc.csv", False)
fileobj = open(filename, 'wb')
worksheet = csv.writer(fileobj)
# Send the initial command to run and wait for the first linefeed
crt.Screen.Send("show interface description\r")
crt.Screen.WaitForString("\n")
# Create an array of strings to wait for.
waitStrs = ["\n","#"]
result = crt.Screen.WaitForStrings( waitStrs )
while True:
# Send a return to advance the chart
crt.Screen.Send(" ")
# Wait for the linefee
|
d at the end of each line, or the shell
# prompt that indicates we're done.
result = crt.Screen.WaitForStrings( waitStrs )
# If see a prompt, we're done
if result == 2:
break
# Fetch current row and read the characters from the screen
screenrow = crt.Screen.CurrentRow - 1
readline = crt.Screen.Get(screenrow, 1, screenrow, 140)
# if readline[0:2] == "Vl":
# break
# Split the line and put some fields into Excel
items = [readline[0:31].strip(),readline[55:140].
|
strip()]
worksheet.writerow(items)
fileobj.close()
#crt.Dialog.MessageBox("Done! Your file is saved in "+filename)
crt.Screen.Synchronous = False
main()
|
ask/carrot
|
carrot/messaging.py
|
Python
|
bsd-3-clause
| 37,722 | 0.000265 |
"""
Sending/Receiving Messages.
"""
from itertools import count
from carrot.utils import gen_unique_id
import warnings
from carrot import serialization
class Consumer(object):
"""Message consumer.
:param connection: see :attr:`connection`.
:param queue: see :attr:`queue`.
:param exchange: see :attr:`exchange`.
:param routing_key: see :attr:`routing_key`.
:keyword durable: see :attr:`durable`.
:keyword auto_delete: see :attr:`auto_delete`.
:keyword exclusive: see :attr:`exclusive`.
:keyword exchange_type: see :attr:`exchange_type`.
:keyword auto_ack: see :attr:`auto_ack`.
:keyword no_ack: see :attr:`no_ack`.
:keyword auto_declare: see :attr:`auto_declare`.
.. attribute:: connection
The connection to the broker.
A :class:`carrot.connection.BrokerConnection` instance.
.. attribute:: queue
Name of the queue.
.. attribute:: exchange
Name of the exchange the queue binds to.
.. attribute:: routing_key
The routing key (if any). The interpretation of the routing key
depends on the value of the :attr:`exchange_type` attribute:
* direct exchange
Matches if the routing key property of the message and
the :attr:`routing_key` attribute are identical.
* fanout exchange
Always matches, even if the binding does not have a key.
* topic exchange
Matches the routing key property of the message by a primitive
pattern matching scheme. The message routing key then consists
of words separated by dots (``"."``, like domain names), and
two special characters are available; star (``"*"``) and hash
(``"#"``). The star matches any word, and the hash matches
zero or more words. For example ``"*.stock.#"`` matches the
routing keys ``"usd.stock"`` and ``"eur.stock.db"`` but not
``"stock.nasdaq"``.
.. attribute:: durable
Durable exchanges remain active when a server restarts. Non-durable
exchanges (transient exchanges) are purged when a server restarts.
Default is ``True``.
.. attribute:: auto_delete
If set, the exchange is deleted when all queues have finished
using it. Default is ``False``.
.. attribute:: exclusive
Exclusive queues may only be consumed from by the current connection.
When :attr:`exclusive` is on, this also implies :attr:`auto_delete`.
Default is ``False``.
.. attribute:: exchange_type
AMQP defines four default exchange types (routing algorithms) that
covers most of the common messaging use cases. An AMQP broker can
also define additional exchange types, so see your message brokers
manual for more information about available exchange types.
* Direct
Direct match between the routing key in the message, and the
routing criteria used when a queue is bound to this exchange.
* Topic
Wildcard match between the routing key and the routing pattern
specified in the binding. The routing key is treated as zero
or more words delimited by ``"."`` and supports special
wildcard characters. ``"*"`` matches a single word and ``"#"``
matches zero or more words.
* Fanout
Queues are bound to this exchange with no arguments. Hence any
message sent to this exchange will be forwarded to all queues
bound to this exchange.
* Headers
Queues are bound to this exchange with a table of arguments
containing headers and values (optional). A special argument
named "x-match" determines the matching algorithm, where
``"all"`` implies an ``AND`` (all pairs must
|
match) and
``"any"`` implies ``OR`` (at least one pair must match).
Use the :attr:`routing_key`` is used to specify the arguments,
the same when sending messages.
This description of AMQP exchange types was shamelessly stolen
from the blog post `
|
AMQP in 10 minutes: Part 4`_ by
Rajith Attapattu. Recommended reading.
.. _`AMQP in 10 minutes: Part 4`:
http://bit.ly/amqp-exchange-types
.. attribute:: callbacks
List of registered callbacks to trigger when a message is received
by :meth:`wait`, :meth:`process_next` or :meth:`iterqueue`.
.. attribute:: warn_if_exists
Emit a warning if the queue has already been declared. If a queue
already exists, and you try to redeclare the queue with new settings,
the new settings will be silently ignored, so this can be
useful if you've recently changed the :attr:`routing_key` attribute
or other settings.
.. attribute:: auto_ack
Acknowledgement is handled automatically once messages are received.
This means that the :meth:`carrot.backends.base.BaseMessage.ack` and
:meth:`carrot.backends.base.BaseMessage.reject` methods
on the message object are no longer valid.
By default :attr:`auto_ack` is set to ``False``, and the receiver is
required to manually handle acknowledgment.
.. attribute:: no_ack
Disable acknowledgement on the server-side. This is different from
:attr:`auto_ack` in that acknowledgement is turned off altogether.
This functionality increases performance but at the cost of
reliability. Messages can get lost if a client dies before it can
deliver them to the application.
.. attribute auto_declare
If this is ``True`` the following will be automatically declared:
* The queue if :attr:`queue` is set.
* The exchange if :attr:`exchange` is set.
* The :attr:`queue` will be bound to the :attr:`exchange`.
This is the default behaviour.
:raises `amqplib.client_0_8.channel.AMQPChannelException`: if the queue is
exclusive and the queue already exists and is owned by another
connection.
Example Usage
>>> consumer = Consumer(connection=DjangoBrokerConnection(),
... queue="foo", exchange="foo", routing_key="foo")
>>> def process_message(message_data, message):
... print("Got message %s: %s" % (
... message.delivery_tag, message_data))
>>> consumer.register_callback(process_message)
>>> consumer.wait() # Go into receive loop
"""
queue = ""
exchange = ""
routing_key = ""
durable = True
exclusive = False
auto_delete = False
exchange_type = "direct"
channel_open = False
warn_if_exists = False
auto_declare = True
auto_ack = False
queue_arguments = None
no_ack = False
_closed = True
_init_opts = ("durable", "exclusive", "auto_delete",
"exchange_type", "warn_if_exists",
"auto_ack", "auto_declare",
"queue_arguments")
_next_consumer_tag = count(1).next
def __init__(self, connection, queue=None, exchange=None,
routing_key=None, **kwargs):
self.connection = connection
self.backend = kwargs.get("backend", None)
if not self.backend:
self.backend = self.connection.create_backend()
self.queue = queue or self.queue
# Binding.
self.queue = queue or self.queue
self.exchange = exchange or self.exchange
self.routing_key = routing_key or self.routing_key
self.callbacks = []
# Options
for opt_name in self._init_opts:
opt_value = kwargs.get(opt_name)
if opt_value is not None:
setattr(self, opt_name, opt_value)
# exclusive implies auto-delete.
if self.exclusive:
self.auto_delete = True
self.consumer_tag = self._generate_
|
Haravan/haravan_python_api
|
haravan/resources/shipping_line.py
|
Python
|
mit
| 83 | 0 |
from ..base import HaravanResource
class ShippingLi
|
ne(Har
|
avanResource):
pass
|
svperbeast/plain_data_companion
|
src/templates/renderer_result.py
|
Python
|
mit
| 320 | 0.003125 |
"""
|
RendererResult
"""
class RendererResult(object):
def __init__(self):
self.string_stream = ''
def append(self, s):
self.string_stream += s
def pop_back(self, index=1):
self.string_stream = self.string_stream[:-index]
def get_string(self):
return self.s
|
tring_stream
|
AllanNozomu/tecsaladeaula
|
core/migrations/0029_auto__add_field_courseprofessor_is_course_author.py
|
Python
|
agpl-3.0
| 13,255 | 0.007922 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CourseProfessor.is_course_author'
db.add_column(u'core_courseprofessor', 'is_course_author',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CourseProfessor.is_course_author'
db.delete_column(u'core_courseprofessor', 'is_course_author')
models = {
u'accounts.timtecuser': {
'Meta': {'object_name': 'TimtecUser'},
'accepted_terms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'site': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.class': {
'Meta': {'object_name': 'Class'},
'assistant': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'professor_classes'", 'null': 'True', 'to': u"orm['accounts.TimtecUser']"}),
'course': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Course']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'students': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'classes'", 'blank': 'True', 'to': u"orm['accounts.TimtecUser']"})
},
u'core.course': {
'Meta': {'object_name': 'Course'},
'abstract': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'application': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'default_class': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'default_course'", '
|
unique': 'True', 'null': 'True', 'to': u"orm['core.Class']"}),
'home_position': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'home_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'home_thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
|
'intro_video': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Video']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'professors': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'professorcourse_set'", 'symmetrical': 'False', 'through': u"orm['core.CourseProfessor']", 'to': u"orm['accounts.TimtecUser']"}),
'pronatec': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'requirement': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'start_date': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'published'", 'max_length': '64'}),
'structure': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'students': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'studentcourse_set'", 'symmetrical': 'False', 'through': u"orm['core.CourseStudent']", 'to': u"orm['accounts.TimtecUser']"}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'workload': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'core.courseprofessor': {
'Meta': {'unique_together': "(('user', 'course'),)", 'object_name': 'CourseProfessor'},
'biography': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'course': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'course_professors'", 'to': u"orm['core.Course']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_course_author': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
|
d2emon/generator-pack
|
src/fixtures/tools/outfit/scarf.py
|
Python
|
gpl-3.0
| 152 | 0.006579 |
# scarf1{background-image: url('https://rollforfantasy.com/ima
|
ges/clothing/nmale/scarf1.png');}
scarf = ["scarf{}.png".
|
format(i) for i in range(1, 31)]
|
koebbe/homeworks
|
visit/migrations/0074_auto_20150826_2122.py
|
Python
|
mit
| 670 | 0.002985 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migratio
|
ns.Migration):
dependencies = [
('visit', '0073_visit_student_absent_reason'),
]
|
operations = [
migrations.AlterField(
model_name='visit',
name='caregiver',
field=models.ForeignKey(related_name='visits', blank=True, to='visit.Caregiver', null=True),
),
migrations.AlterField(
model_name='visit',
name='student',
field=models.ForeignKey(related_name='visits', blank=True, to='visit.Student', null=True),
),
]
|
sassoftware/catalog-service
|
catalogService/libs/viclient_vendor/ZSI/generate/commands.py
|
Python
|
apache-2.0
| 19,840 | 0.009224 |
############################################################################
# Joshua Boverhof<JRBoverhof@lbl.gov>, LBNL
# Monte Goode <MMGoode@lbl.gov>, LBNL
# See Copyright for copyright notice!
############################################################################
import exceptions, sys, optparse, os, warnings, traceback
from os.path import isfile, join, split
#from operator import xor
import ZSI
from ConfigParser import ConfigParser
from ZSI.generate.wsdl2python import WriteServiceModule, ServiceDescription as wsdl2pyServiceDescription
from ZSI.wstools import WSDLTools, XMLSchema
from ZSI.wstools.logging import setBasicLoggerDEBUG
from ZSI.generate import containers, utility
from ZSI.generate.utility import NCName_to_ClassName as NC_to_CN, TextProtect
from ZSI.generate.wsdl2dispatch import ServiceModuleWriter as ServiceDescription
from ZSI.generate.wsdl2dispatch import WSAServiceModuleWriter as ServiceDescriptionWSA
warnings.filterwarnings('ignore', '', exceptions.UserWarning)
def SetDebugCallback(option, opt, value, parser, *args, **kwargs):
setBasicLoggerDEBUG()
warnings.resetwarnings()
def SetPyclassMetaclass(option, opt, value, parser, *args, **kwargs):
"""set up pyclass metaclass for complexTypes"""
from ZSI.generate.containers import ServiceHeaderContainer,\
TypecodeContainerBase, TypesHeaderContainer
TypecodeContainerBase.metaclass = kwargs['metaclass']
TypesHeaderContainer.imports.append(\
'from %(module)s import %(metaclass)s' %kwargs
)
ServiceHeaderContainer.imports.append(\
'from %(module)s import %(metaclass)s' %kwargs
)
def SetUpLazyEvaluation(option, opt, value, parser, *args, **kwargs):
from ZSI.generate.containers import TypecodeContainerBase
TypecodeContainerBase.lazy = True
def wsdl2py(args=None):
"""Utility for automatically generating client/service interface code from
a wsdl definition, and a set of classes representing element declarations
and type definitions. By default invoking this script produces three files,
each named after the wsdl definition name, in the current working directory.
Generated Modules Suffix:
_client.py -- client locator, rpc proxy port, messages
_types.py -- typecodes representing
_server.py -- server-side bindings
Parameters:
args -- optional can provide arguments, rather than parsing
command-line.
return:
Default behavior is to return None, if args are provided then
return names of the generated files.
"""
op = optparse.OptionParser(usage="USAGE: %wsdl2py [options] WSDL",
description=wsdl2py.__doc__)
# Basic options
op.add_option("-x", "--schema",
action="store_true", dest="schema", default=False,
help="process just the schema from an xsd file [no services]")
op.add_option("-d", "--debug",
action="callback", callback=SetDebugCallback,
help="debug output")
# WS Options
op.add_option("-a", "--address",
action="store_true", dest="address", default=False,
help="ws-addressing support, must include WS-Addressing schema.")
# pyclass Metaclass
op.add_option("-b", "--complexType",
action="callback", callback=SetPyclassMetaclass,
callback_kwargs={'module':'ZSI.generate.pyclass',
'metaclass':'pyclass_type'},
help="add convenience functions for complexTypes, including Getters, Setters, factory methods, and properties (via metaclass). *** DONT USE WITH --simple-naming ***")
# Lazy Evaluation of Typecodes (done at serialization/parsing when needed).
op.add_option("-l", "--lazy",
action="callback", callback=SetUpLazyEvaluation,
callback_kwargs={},
help="EXPERIMENTAL: recursion error solution, lazy evalution of typecodes")
# Use Twisted
op.add_option("-w", "--twisted",
action="store_true", dest='twisted', default=False,
help="generate a twisted.web client/server, dependencies python>=2.4, Twisted>=2.0.0, TwistedWeb>=0.5.0")
op.add_option("-o", "--output-dir",
action="store", dest="output_dir", default=".", type="string",
help="save files in directory")
op.add_option("-s", "--simple-naming",
action="store_true", dest="simple_naming", default=False,
help="map element names directly to python attributes")
op.add_option("-p", "--pydoc",
action="store_true", dest="pydoc", default=False,
help="top-level directory for pydoc documentation.")
is_cmdline = args is None
if is_cmdline:
(options, args) = op.parse_args()
else:
(options, args) = op.parse_args(args)
if len(args) != 1:
print>>sys.stderr, 'Expecting a file/url as argument (WSDL).'
sys.exit(os.EX_USAGE)
location = args[0]
if options.schema is True:
reader = XMLSchema.SchemaReader(base_url=location)
else:
reader = WSDLTools.WSDLReader()
load = reader.loadFromFile
if not isfile(location):
load = reader.loadFromURL
try:
wsdl = load(location)
except Exception, e:
print >> sys.stderr, "Error loading %s: \n\t%s" % (location, e)
traceback.print_exc(sys.stderr)
# exit code UNIX specific, Windows?
if hasattr(os, 'EX_NOINPUT'): sys.exit(os.EX_NOINPUT)
sys.exit("error loading %s" %location)
if isinstance(wsdl, XMLSchema.XMLSchema):
wsdl.location = location
files = _wsdl2py(options, wsdl)
else:
files = _wsdl2py(options, wsdl)
files.append(_wsdl2dispatch(options, wsdl))
if getattr(options, 'pydoc', False):
_writepydoc(os.path.join('docs', 'API'), *files)
if is_cmdline:
return
return files
#def wsdl2dispatch(args=None):
# """Deprecated: wsdl2py now generates everything
# A utility for automatically generating service skeleton code from a wsdl
# definition.
# """
# op = optparse.OptionParser()
# op.add_option("-a", "--address",
# action="stor
|
e_true", dest="address", default=False,
# help="ws-addressing support, must include WS-Addressing schema.")
# op.add_option("-d", "--debug",
# action="callback", callback=SetDebugCallback,
# help="debug output")
# op.add_option("-t", "--types",
# action="store", des
|
t="types", default=None, type="string",
# help="Write generated files to OUTPUT_DIR")
# op.add_option("-o", "--output-dir",
# action="store", dest="output_dir", default=".", type="string",
# help="file to load types from")
# op.add_option("-s", "--simple-naming",
# action="store_true", dest="simple_naming", default=False,
# help="Simplify generated naming.")
#
# if args is None:
# (options, args) = op.parse_args()
# else:
# (options, args) = op.parse_args(args)
#
# if len(args) != 1:
# print>>sys.stderr, 'Expecting a file/url as argument (WSDL).'
# sys.exit(os.EX_USAGE)
#
# reader = WSDLTools.WSDLReader()
# if isfile(args[0]):
# _wsdl2dispatch(options, reader.loadFromFile(args[0]))
# return
#
# _wsdl2dispatch(options, reader.loadFromURL(args[0]))
def _wsdl2py(options, wsdl):
if options.twisted:
from ZSI.generate.containers import ServiceHeaderContainer
try:
ServiceHeaderContainer.imports.remove('from ZSI import client')
except ValueError:
pass
ServiceHeaderContainer.imports.append('from ZSI.twisted import client')
if options.simple_naming:
|
cmuphyscomp/physcomp-examples
|
support/rpi/Adafruit-Raspberry-Pi-Python-Code-master/Adafruit_ADXL345/Adafruit_ADXL345.py
|
Python
|
bsd-3-clause
| 3,978 | 0.008296 |
#!/usr/bin/python
# Python library for ADXL345 accelerometer.
# Copyright 2013 Adafruit Industries
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from Adafruit_I2C import Adafruit_I2C
class Adafruit_ADXL345(Adafruit_I2C):
# Minimal constants carried over from Arduino library
ADXL345_ADDRESS = 0x53
ADXL345_REG_DEVID = 0x00 # Device ID
ADXL345_REG_DATAX0 = 0x32 # X-axis data 0 (6 bytes for X/Y/Z)
ADXL345_REG_POWER_CTL = 0x2D # Power-saving features control
ADXL345_DATARATE_0_10_HZ = 0x00
ADXL345_DATARATE_0_20_HZ = 0x01
ADXL345_DATARATE_0_39_HZ = 0x02
ADXL345_DATARATE_0_78_HZ = 0x03
ADXL345_DATARATE_1_56_HZ = 0x04
ADXL345_DATARATE_3_13_HZ = 0x05
ADXL345_DATARATE_6_25HZ = 0x06
ADXL345_DATARATE_12_5_HZ = 0x07
ADXL345_DATARATE_25_HZ = 0x08
ADXL345_DATARATE_50_HZ = 0x09
ADXL345_DATARATE_100_HZ = 0x0A # (default)
ADXL345_DATARATE_200_HZ = 0x0B
ADXL345_DATARATE_400_HZ = 0x0C
ADXL345_DATARATE_800_HZ = 0x0D
ADXL345_DATARATE_1600_HZ = 0x0E
ADXL345_DATARATE_3200_HZ = 0x0F
ADXL345_RANGE_2_G = 0x00 # +/- 2g (default)
ADXL345_RANGE_4_G = 0x01 # +/- 4g
ADXL345_RANGE_8_G = 0x02 # +/- 8g
ADXL345_RANGE_16_G = 0x03 # +/- 16g
def __init__(self, busnum=-1, debug=False):
|
self.accel = Adafruit_I2C(self.ADXL345_ADDRESS, busnum, debug)
if self.accel.readU8(self.ADXL345_REG_DEVID) ==
|
0xE5:
# Enable the accelerometer
self.accel.write8(self.ADXL345_REG_POWER_CTL, 0x08)
def setRange(self, range):
# Read the data format register to preserve bits. Update the data
# rate, make sure that the FULL-RES bit is enabled for range scaling
format = ((self.accel.readU8(self.ADXL345_REG_DATA_FORMAT) & ~0x0F) |
range | 0x08)
# Write the register back to the IC
seld.accel.write8(self.ADXL345_REG_DATA_FORMAT, format)
def getRange(self):
return self.accel.readU8(self.ADXL345_REG_DATA_FORMAT) & 0x03
def setDataRate(self, dataRate):
# Note: The LOW_POWER bits are currently ignored,
# we always keep the device in 'normal' mode
self.accel.write8(self.ADXL345_REG_BW_RATE, dataRate & 0x0F)
def getDataRate(self):
return self.accel.readU8(self.ADXL345_REG_BW_RATE) & 0x0F
# Read the accelerometer
def read(self):
raw = self.accel.readList(self.ADXL345_REG_DATAX0, 6)
res = []
for i in range(0, 6, 2):
g = raw[i] | (raw[i+1] << 8)
if g > 32767: g -= 65536
res.append(g)
return res
# Simple example prints accelerometer data once per second:
if __name__ == '__main__':
from time import sleep
accel = Adafruit_ADXL345()
print '[Accelerometer X, Y, Z]'
while True:
print accel.read()
sleep(1) # Output is fun to watch if this is commented out
|
noorelden/QuickBooking
|
QuickBooking/migrations/0002_auto_20150623_1913.py
|
Python
|
gpl-2.0
| 450 | 0.002222 |
# -*- coding: utf-8 -*-
from __future__ imp
|
ort unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('QuickBooking', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='timing',
name='id',
field=models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True),
|
),
]
|
GNOME/orca
|
src/orca/orca_gui_navlist.py
|
Python
|
lgpl-2.1
| 6,822 | 0.001906 |
# Orca
#
# Copyright 2012 Igalia, S.L.
#
# Author: Joanmarie Diggs <jdiggs@igalia.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Displays a GUI for Orca navigation list dialogs"""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2012 Igalia, S.L."
__license__ = "LGPL"
from gi.repository import GObject, Gdk, Gtk
from . import debug
from . import guilabels
from . import orca_state
class OrcaNavListGUI:
def __i
|
nit__(self, title, columnHeaders, rows, selectedRow):
self._tree = None
self._activateButton = None
self._gui = self._createNavListDialog(columnHeaders, rows, selec
|
tedRow)
self._gui.set_title(title)
self._gui.set_modal(True)
self._gui.set_keep_above(True)
self._gui.set_focus_on_map(True)
self._gui.set_accept_focus(True)
self._script = orca_state.activeScript
self._document = None
def _createNavListDialog(self, columnHeaders, rows, selectedRow):
dialog = Gtk.Dialog()
dialog.set_default_size(500, 400)
grid = Gtk.Grid()
contentArea = dialog.get_content_area()
contentArea.add(grid)
scrolledWindow = Gtk.ScrolledWindow()
grid.add(scrolledWindow)
self._tree = Gtk.TreeView()
self._tree.set_hexpand(True)
self._tree.set_vexpand(True)
scrolledWindow.add(self._tree)
cols = [GObject.TYPE_OBJECT, GObject.TYPE_INT]
cols.extend(len(columnHeaders) * [GObject.TYPE_STRING])
model = Gtk.ListStore(*cols)
cell = Gtk.CellRendererText()
column = Gtk.TreeViewColumn("Accessible", cell, text=0)
column.set_visible(False)
self._tree.append_column(column)
cell = Gtk.CellRendererText()
column = Gtk.TreeViewColumn("offset", cell, text=1)
column.set_visible(False)
self._tree.append_column(column)
for i, header in enumerate(columnHeaders):
cell = Gtk.CellRendererText()
column = Gtk.TreeViewColumn(header, cell, text=i+2)
column.set_sort_column_id(i+2)
self._tree.append_column(column)
for row in rows:
rowIter = model.append(None)
for i, cell in enumerate(row):
model.set_value(rowIter, i, cell)
self._tree.set_model(model)
selection = self._tree.get_selection()
selection.select_path(selectedRow)
btn = dialog.add_button(guilabels.BTN_CANCEL, Gtk.ResponseType.CANCEL)
btn.connect('clicked', self._onCancelClicked)
btn = dialog.add_button(guilabels.BTN_JUMP_TO, Gtk.ResponseType.APPLY)
btn.grab_default()
btn.connect('clicked', self._onJumpToClicked)
self._activateButton = dialog.add_button(
guilabels.ACTIVATE, Gtk.ResponseType.OK)
self._activateButton.connect('clicked', self._onActivateClicked)
self._tree.connect('key-release-event', self._onKeyRelease)
self._tree.connect('cursor-changed', self._onCursorChanged)
self._tree.set_search_column(2)
return dialog
def showGUI(self):
self._document = self._script.utilities.documentFrame()
x, y, width, height = self._script.utilities.getBoundingBox(self._document)
if (width and height):
self._gui.move(x + 100, y + 100)
self._gui.show_all()
ts = orca_state.lastInputEvent.timestamp
if ts == 0:
ts = Gtk.get_current_event_time()
self._gui.present_with_time(ts)
def _onCursorChanged(self, widget):
obj, offset = self._getSelectedAccessibleAndOffset()
try:
action = obj.queryAction()
except:
self._activateButton.set_sensitive(False)
else:
self._activateButton.set_sensitive(action.get_nActions() > 0)
def _onKeyRelease(self, widget, event):
keycode = event.hardware_keycode
keymap = Gdk.Keymap.get_default()
entries_for_keycode = keymap.get_entries_for_keycode(keycode)
entries = entries_for_keycode[-1]
eventString = Gdk.keyval_name(entries[0])
if eventString == 'Return':
self._gui.activate_default()
def _onCancelClicked(self, widget):
self._gui.destroy()
def _onJumpToClicked(self, widget):
obj, offset = self._getSelectedAccessibleAndOffset()
self._gui.destroy()
self._script.utilities.setCaretPosition(obj, offset, self._document)
def _onActivateClicked(self, widget):
obj, offset = self._getSelectedAccessibleAndOffset()
self._gui.destroy()
if not obj:
return
self._script.utilities.setCaretPosition(obj, offset)
try:
action = obj.queryAction()
except NotImplementedError:
msg = "ERROR: Action interface not implemented for %s" % obj
debug.println(debug.LEVEL_INFO, msg, True)
except:
msg = "ERROR: Exception getting action interface for %s" % obj
debug.println(debug.LEVEL_INFO, msg, True)
else:
action.doAction(0)
def _getSelectedAccessibleAndOffset(self):
if not self._tree:
msg = "ERROR: Could not get navlist tree"
debug.println(debug.LEVEL_INFO, msg, True)
return None, -1
selection = self._tree.get_selection()
if not selection:
msg = "ERROR: Could not get selection for navlist tree"
debug.println(debug.LEVEL_INFO, msg, True)
return None, -1
model, paths = selection.get_selected_rows()
if not paths:
msg = "ERROR: Could not get paths for navlist tree"
debug.println(debug.LEVEL_INFO, msg, True)
return None, -1
obj = model.get_value(model.get_iter(paths[0]), 0)
offset = model.get_value(model.get_iter(paths[0]), 1)
return obj, max(0, offset)
def showUI(title='', columnHeaders=[], rows=[()], selectedRow=0):
gui = OrcaNavListGUI(title, columnHeaders, rows, selectedRow)
gui.showGUI()
|
yznpku/HackerRank
|
solution/practice/data-structures/arrays/array-left-rotation/solution.py
|
Python
|
mit
| 177 | 0.00565 |
# We slice the array in two parts at index d, then print them
# in reverse order.
n, d =
|
map(int, input().split())
A = list(map(int, input().split()))
|
print(*(A[d:] + A[:d]))
|
Tibo-R/jamplaygen
|
fabfile.py
|
Python
|
lgpl-3.0
| 2,830 | 0.006007 |
"""
REQUIREMENTS:
- install pip with distribute (http://packages.python.org/distribute/)
- sudo pip install Fabric
"""
from fabric.api import local
def lang(mode="extract"):
"""
REQUIREMENTS:
- Install before pip with distribute_setup.py (Read the environment setup document)
- sudo pip install babel
- sudo pip install jinja2
HOW TO RUN:
option 1) fab lang
option 2) fab lang:compile
"""
if mode == "compile":
local("pybabel compile -f -d ./locale")
else:
local("pybabel extract -F ./locale/babel.cfg -o ./locale/messages.pot ./ --sort-output --no-location --omit-header")
local("pybabel update -l en_US -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l es_ES -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybab
|
el update -l it_IT -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l zh_CN -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l id_ID -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l fr_FR -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel upd
|
ate -l de_DE -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l ru_RU -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
local("pybabel update -l pt_BR -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete")
def start(mode="normal"):
"""
HOW TO RUN:
option 1) fab start
option 2) fab start:clear
"""
if mode == "clear":
local("dev_appserver.py . -p 8080 -a 0.0.0.0 --clear_datastore")
else:
local("dev_appserver.py . -p 8080 -a 0.0.0.0")
def deploy(app_id="sandengine", version="2-2"):
"""
app.yaml never has to be version:default
HOW TO RUN:
option 1) fab deploy
option 2) fab deploy:preview
option 3) fab deploy:prod
option 4) fab deploy:homo
"""
local("appcfg.py --no_cookies --oauth2 -A {0} -V {1} update .".format(app_id, version))
def test(os="mac"):
"""
REQUIREMENTS:
- install pip with distribute (http://packages.python.org/distribute/)
- sudo pip install mock
- sudo pip install webtest
- sudo pip install pyquery
HOW TO RUN:
option 1) fab test
option 2) fab test:mac
option 3) fab test:linux
"""
path = {
"mac": "/usr/local/google_appengine",
}[os]
local("python testrunner.py {0} ./".format(path))
|
saramic/learning
|
dojo/adventofcode.com/2021/catchup/mcpower-day19_2.py
|
Python
|
unlicense
| 4,767 | 0.004615 |
import sys; sys.dont_write_bytecode = True; from mcpower_utils import *
def do_case(inp: str, sample=False):
# READ THE PROBLEM FROM TOP TO BOTTOM OK
def sprint(*a, **k): sample and print(*a, **k)
lines: typing.List[str] = inp.splitlines()
paras: typing.List[typing.List[str]] = lmap(str.splitlines, inp.split("\n\n"))
out = 0
scanners = []
for para in paras:
points = lmap(ints, para[1:])
scanners.append(points)
#
|
print(len(points))
# assume scanner 0 is good
FACINGS = [x for i in [-1, 1] for x in [[i, 0, 0], [0, i, 0], [0, 0, i]]]
def cross(a, b):
c = [a[1]*b[2] - a[2]*b[1],
a[2]*b[0] - a[0]*b[2],
a[0]*b[1] - a[1]*b[0]]
return c
def common(a, b):
aset = set(map(tuple, a))
|
# return b's points, but now relative to a
for facing in FACINGS:
for up in [f for f in FACINGS if all(abs(x) != abs(y) for x, y in zip(f, facing) if x or y)]:
# facing's
right = cross(facing, up)
matrix = [facing, up, right]
new_b = [matvec(matrix, vec) for vec in b]
for a_point in a:
for b_point in new_b:
# assume they're the same
# add a-b to all b
delta = padd(a_point, pneg(b_point))
new_new_b = [padd(delta, b) for b in new_b]
if len(aset.intersection(map(tuple, new_new_b))) >= 12:
return new_new_b, delta
return None
# if sample:
# print(common(scanners[0], scanners[1]))
# quit()
good_scanners = [None] * len(scanners)
good_scanners[0] = scanners[0]
done_scanners = [False] * len(scanners)
deltas = [None] * len(scanners)
deltas[0] = [0, 0, 0]
while True:
for i in range(len(scanners)):
if good_scanners[i] and not done_scanners[i]:
for j in range(len(scanners)):
if i != j and good_scanners[j] is None:
test = common(good_scanners[i], scanners[j])
# sprint(test)
if test is not None:
good_scanners[j] = test[0]
deltas[j] = test[1]
done_scanners[i] = True
print(done_scanners, lmap(bool, good_scanners))
if all(done_scanners):
break
out = set(tuple(point) for points in good_scanners for point in points)
out = len(out)
out = 0
for x in deltas:
for y in deltas:
out = max(out, pdist1(x, y))
# print()
if out:
print("out: ", out)
return # RETURNED VALUE DOESN'T DO ANYTHING, PRINT THINGS INSTEAD
run_samples_and_actual([
r"""
--- scanner 0 ---
404,-588,-901
528,-643,409
-838,591,734
390,-675,-793
-537,-823,-458
-485,-357,347
-345,-311,381
-661,-816,-575
-876,649,763
-618,-824,-621
553,345,-567
474,580,667
-447,-329,318
-584,868,-557
544,-627,-890
564,392,-477
455,729,728
-892,524,684
-689,845,-530
423,-701,434
7,-33,-71
630,319,-379
443,580,662
-789,900,-551
459,-707,401
--- scanner 1 ---
686,422,578
605,423,415
515,917,-361
-336,658,858
95,138,22
-476,619,847
-340,-569,-846
567,-361,727
-460,603,-452
669,-402,600
729,430,532
-500,-761,534
-322,571,750
-466,-666,-811
-429,-592,574
-355,545,-477
703,-491,-529
-328,-685,520
413,935,-424
-391,539,-444
586,-435,557
-364,-763,-893
807,-499,-711
755,-354,-619
553,889,-390
--- scanner 2 ---
649,640,665
682,-795,504
-784,533,-524
-644,584,-595
-588,-843,648
-30,6,44
-674,560,763
500,723,-460
609,671,-379
-555,-800,653
-675,-892,-343
697,-426,-610
578,704,681
493,664,-388
-671,-858,530
-667,343,800
571,-461,-707
-138,-166,112
-889,563,-600
646,-828,498
640,759,510
-630,509,768
-681,-892,-333
673,-379,-804
-742,-814,-386
577,-820,562
--- scanner 3 ---
-589,542,597
605,-692,669
-500,565,-823
-660,373,557
-458,-679,-417
-488,449,543
-626,468,-788
338,-750,-386
528,-832,-391
562,-778,733
-938,-730,414
543,643,-506
-524,371,-870
407,773,750
-104,29,83
378,-903,-323
-778,-728,485
426,699,580
-438,-605,-362
-469,-447,-387
509,732,623
647,635,-688
-868,-804,481
614,-800,639
595,780,-596
--- scanner 4 ---
727,592,562
-293,-554,779
441,611,-461
-714,465,-776
-743,427,-804
-660,-479,-426
832,-632,460
927,-485,-438
408,393,-506
466,436,-512
110,16,151
-258,-428,682
-393,719,612
-211,-452,876
808,-476,-593
-575,615,604
-485,667,467
-680,325,-822
-627,-443,-432
872,-547,-609
833,512,582
807,604,487
839,-516,451
891,-625,532
-652,-548,-490
30,-46,-14
""",r"""
""",r"""
""",r"""
""",r"""
""",r"""
""",r"""
"""], do_case)
|
ssh0/growing-string
|
triangular_lattice/moving_string/moving_string_deadlock.py
|
Python
|
mit
| 5,500 | 0.013057 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# written by Shotaro Fujimoto
# 2016-05-30
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from triangular import LatticeTriangular as LT
from base import Main as base
import numpy as np
import random
from tqdm import tqdm
from multiprocessing import Pool
def print_debug(arg):
# print arg
pass
class Main(base):
def __init__(self, Lx=40, Ly=40, N=1, size=[10], plot=False):
# Create triangular lattice with given parameters
self.lattice = LT(np.zeros((Lx, Ly), dtype=np.int),
scale=10., boundary={'h': 'periodic', 'v': 'periodic'})
self.occupied = np.zeros((Lx, Ly), dtype=np.bool)
self.number_of_lines = sum(size)
# Put the strings to the lattice
self.strings = self.create_random_strings(1, size)
# Record the number of time-steps to reach the deadlocks
self.num_deadlock = 0
self.plot = plot
while True:
try:
self.update()
except StopIteration:
break
def update(self, num=0):
# numを記録して,ロックが起こるまでの時間を測る。
# いくつかのstringサイズ,格子サイズの上でどのように変動するだろうか
# 詳しいパラメータの設定などはメモ参照。
# move head part of string (if possible)
X = self.get_next_xy(self.strings[0].x, self.strings[0].y)
if not X:
print_debug(self.num_deadlock)
raise StopIteration
# update starting position
x, y, vec = X
rmx, rmy = self.strings[0].follow((x, y, (vec+3)%6))
self.occupied[x, y] = True
self.occupied[rmx, rmy] = False
# Record time steps
self.num_deadlock += 1
# print self.occupied
# print self.strings[0].pos, self.strings[0].vec
if self.plot:
ret = self.plot_string()
return ret
def get_next_xy(self, x, y):
nnx, nny = self.lattice.neighbor_of8x, y)
vectors = [i for i in range(6) if not self.occupied[nnx[i], nny[i]]]
if len(vectors) == 0:
print_debug("no neighbors")
return False
# 確率的に方向を決定
vector = random.choice(vectors)
# 点の格子座標を返す
x, y = nnx[vector], nny[vector]
return x, y, vector
# trial = 100
# trial = 3000 # for npy
trial = 10000
params = dict(Lx=40, Ly=40, plot=False)
# def calc_for_each_size(size):
# summation = 0.
# for t in range(trial):
# main = Main(size=size, **params)
# summation += main.num_deadlock
# return summation / trial
def calc_for_each_size(size):
ret = []
for t in range(trial):
main = Main(size=[size], **params)
ret.append(main.num_deadlock)
return ret
if __name__ == '__main__':
# Simple observation of moving string.
# main = Main(Lx=40, Ly=40, lattice_scale=10., size=100, plot=True)
# Calcurate the deadlock time without plots.
# main = Main(Lx=40, Ly=40, lattice_scale=10., size=10, plot=False)
# print main.num_deadlock
#==========================================================================
# Create data
pool = Pool(6)
sizeset = np.unique(np.logspace(3., 8., num=50, base=2, dtype=np.int))
it = pool.imap(calc_for_each_size, sizeset)
T = []
for ret in tqdm(it, total=len(sizeset)):
T.append(ret)
T = np.array(T)
#==========================================================================
#=-========================================================================
# save the data for plotting, and so on
# np.savez("2016-05-31.npz", trial=tria
|
l, sizeset=sizeset, T=T)
# np.savez("2016-06-02.npz", trial=trial, sizeset=sizeset, T=T)
# np.savez("2016-06-03_80.npz", trial=trial, sizeset=sizeset, T=T)
# np.savez("2016-06-03_120.npz", trial=trial, sizeset=sizeset, T=T)
# np.savez("2016-06-07_40.npz", trial=trial, sizeset=sizeset, T=T)
# np.savez("2016-07-12_40.npz", trial=trial, sizeset=sizeset, T=T)
#====
|
======================================================================
# プロット準備
# fig, ax = plt.subplots()
# ax.set_title("Deadlock time for the string size N on triangular lattice")
#=0========================================================================
# 普通に表示
# ax.plot(sizeset, T, marker='o')
# ax.set_xlabel("$N$")
# ax.set_ylabel("$T$")
# 反比例のように見えた
#==========================================================================
#=1========================================================================
# loglogで表示
# ax.loglog(sizeset, T, marker='o')
# ax.set_xlabel("$N$")
# ax.set_ylabel("$T$")
# 反比例のように見えた
#==========================================================================
#=2========================================================================
# 1/log(N)とlog(T)の関係を見た
# logsizeset = np.log10(sizeset)
# logT = np.log10(T)
# ax.plot(1 / logsizeset, logT, marker='o')
# ax.set_xlabel("$N$")
# ax.set_ylabel("$T$")
# 厳密には直線ではなさそうだった。
#==========================================================================
# plt.show()
|
dannygoldstein/sncosmo
|
sncosmo/tests/test_salt2utils.py
|
Python
|
bsd-3-clause
| 3,927 | 0.000255 |
import os
import pickle
from astropy.extern import six
import numpy as np
from numpy.testing import assert_allclose
from scipy.interpolate import RectBivariateSpline
import sncosmo
from sncosmo.salt2utils import BicubicInterpolator, SALT2ColorLaw
# On Python 2 highest protocol is 2.
# Protocols 0 and 1 don't work on the classes here!
TEST_PICKLE_PROTOCOLS = (2,) if six.PY2 else (2, 3, 4)
def test_bicubic_interpolator_vs_snfit():
datadir = os.path.join(os.path.dirname(__file__),
|
"data")
# created by running generate script in `misc` directory
fname_input = os.path.join(datadir, "interpolation_test_input.dat")
fname_evalx =
|
os.path.join(datadir, "interpolation_test_evalx.dat")
fname_evaly = os.path.join(datadir, "interpolation_test_evaly.dat")
# result file was created by running snfit software Grid2DFunction
fname_result = os.path.join(datadir, "interpolation_test_result.dat")
# load arrays
x, y, z = sncosmo.read_griddata_ascii(fname_input)
xp = np.loadtxt(fname_evalx)
yp = np.loadtxt(fname_evaly)
result = np.loadtxt(fname_result)
f = BicubicInterpolator(x, y, z)
assert_allclose(f(xp, yp), result, rtol=1e-5)
def test_bicubic_interpolator_shapes():
"""Ensure that input shapes are handled like RectBivariateSpline"""
x = np.array([1., 2., 3., 4., 5.])
z = np.ones((len(x), len(x)))
f = BicubicInterpolator(x, x, z)
f2 = RectBivariateSpline(x, x, z)
assert f(0., [1., 2.]).shape == f2(0., [1., 2.]).shape
assert f([1., 2.], 0.).shape == f2([1., 2.], 0.).shape
assert f(0., 0.).shape == f2(0., 0.).shape
def test_bicubic_interpolator_pickle():
x = np.arange(5)
y = np.arange(10)
z = np.ones((len(x), len(y)))
f = BicubicInterpolator(x, y, z)
for protocol in TEST_PICKLE_PROTOCOLS:
f2 = pickle.loads(pickle.dumps(f, protocol=protocol))
assert f2(4., 5.5) == f(4., 5.5)
def test_salt2colorlaw_vs_python():
"""Compare SALT2ColorLaw vs python implementation"""
B_WAVELENGTH = 4302.57
V_WAVELENGTH = 5428.55
colorlaw_coeffs = [-0.504294, 0.787691, -0.461715, 0.0815619]
colorlaw_range = (2800., 7000.)
# old python implementation
def colorlaw_python(wave):
v_minus_b = V_WAVELENGTH - B_WAVELENGTH
l = (wave - B_WAVELENGTH) / v_minus_b
l_lo = (colorlaw_range[0] - B_WAVELENGTH) / v_minus_b
l_hi = (colorlaw_range[1] - B_WAVELENGTH) / v_minus_b
alpha = 1. - sum(colorlaw_coeffs)
coeffs = [0., alpha]
coeffs.extend(colorlaw_coeffs)
coeffs = np.array(coeffs)
prime_coeffs = (np.arange(len(coeffs)) * coeffs)[1:]
extinction = np.empty_like(wave)
# Blue side
idx_lo = l < l_lo
p_lo = np.polyval(np.flipud(coeffs), l_lo)
pprime_lo = np.polyval(np.flipud(prime_coeffs), l_lo)
extinction[idx_lo] = p_lo + pprime_lo * (l[idx_lo] - l_lo)
# Red side
idx_hi = l > l_hi
p_hi = np.polyval(np.flipud(coeffs), l_hi)
pprime_hi = np.polyval(np.flipud(prime_coeffs), l_hi)
extinction[idx_hi] = p_hi + pprime_hi * (l[idx_hi] - l_hi)
# In between
idx_between = np.invert(idx_lo | idx_hi)
extinction[idx_between] = np.polyval(np.flipud(coeffs), l[idx_between])
return -extinction
colorlaw = SALT2ColorLaw(colorlaw_range, colorlaw_coeffs)
wave = np.linspace(2000., 9200., 201)
assert np.all(colorlaw(wave) == colorlaw_python(wave))
def test_salt2colorlaw_pickle():
colorlaw_coeffs = [-0.504294, 0.787691, -0.461715, 0.0815619]
colorlaw_range = (2800., 7000.)
colorlaw = SALT2ColorLaw(colorlaw_range, colorlaw_coeffs)
for protocol in TEST_PICKLE_PROTOCOLS:
colorlaw2 = pickle.loads(pickle.dumps(colorlaw, protocol=protocol))
wave = np.linspace(2000., 9200., 201)
assert np.all(colorlaw(wave) == colorlaw2(wave))
|
rudyryk/python-samples
|
hello_tornado/hello_feed/core/handlers.py
|
Python
|
cc0-1.0
| 240 | 0.004167 |
import tornado.gen
from .m
|
odels import Feed
# Handlers in Tornado like Views in Django
class FeedHandler(tornado.web.RequestHandler):
@tornado.gen.coroutine
|
def get(self):
feed = yield Feed.fetch()
self.write(feed)
|
milanlenco/vpp
|
test/vpp_gre_interface.py
|
Python
|
apache-2.0
| 2,595 | 0 |
from vpp_interface import VppInterface
import socket
class VppGreInterface(VppInterface):
"""
VPP GRE interface
"""
def __init__(self, test, src_ip, dst_ip, outer_fib_id=0, is_teb=0):
""" Create VPP loopback interface """
self._sw_if_index = 0
super(VppGreInterface, self).__init__(test)
self._test = test
self.t_src = src_ip
self.t_dst = dst_ip
self.t_outer_fib = outer_fib_id
self.t_is_teb = is_teb
def add_vpp_config(self):
s = socket.inet_pton(socket.AF_INET, self.t_src)
d = socket.inet_pton(socket.AF_INET, self.t_dst)
r = self.test.vapi.gre_tunnel_add_del(s, d,
outer_fib_id=self.t_outer_fib,
is_teb=self.t_is_teb)
self._sw_if_index = r.sw_if_index
self.generate_remote_hosts()
def remove_vpp_config(self):
s = socket.inet_pton(socket.AF_INET, self.t_src)
d = socket.inet_pton(socket.AF_INET, self.t_dst)
self.unconfig()
r = self.test.vapi.gre_tunnel_add_del(s, d,
outer_fib_id=self.t_outer_fib,
is_add=0)
class VppGre6Interface(VppInterface):
"""
VPP GRE IPv6 interface
"""
def __init__(self, test, src_ip, dst_ip, outer_fib_id=0, is_teb=0):
""" Create VPP loopback interface """
self._sw_if_index = 0
super(
|
VppGre6Interface, self).__init__(test)
self._test = test
self.t_src = src_ip
self.t_dst = dst_
|
ip
self.t_outer_fib = outer_fib_id
self.t_is_teb = is_teb
def add_vpp_config(self):
s = socket.inet_pton(socket.AF_INET6, self.t_src)
d = socket.inet_pton(socket.AF_INET6, self.t_dst)
r = self.test.vapi.gre_tunnel_add_del(s, d,
outer_fib_id=self.t_outer_fib,
is_teb=self.t_is_teb,
is_ip6=1)
self._sw_if_index = r.sw_if_index
self.generate_remote_hosts()
def remove_vpp_config(self):
s = socket.inet_pton(socket.AF_INET6, self.t_src)
d = socket.inet_pton(socket.AF_INET6, self.t_dst)
self.unconfig()
r = self.test.vapi.gre_tunnel_add_del(s, d,
outer_fib_id=self.t_outer_fib,
is_add=0,
is_ip6=1)
|
glormph/msstitch
|
src/app/readers/fasta.py
|
Python
|
mit
| 4,853 | 0.002885 |
from Bio import SeqIO
def get_proteins_for_db(fastafn, fastadelim, genefield):
"""Runs through fasta file and returns proteins accession nrs, sequences
and evidence levels for storage in lookup DB. Duplicate accessions in
fasta are accepted and removed by keeping only the last one.
"""
records = {acc: (rec, get_record_type(rec)) for acc, rec in
SeqIO.index(fastafn, 'fasta').items()}
proteins = ((x,) for x in records.keys())
sequences = ((acc, str(rec.seq)) for acc, (rec, rtype) in records.items())
desc = ((acc, get_description(rec, rtype)) for acc, (rec, rtype) in records.items() if rtype)
evid = ((acc, get_uniprot_evidence_level(rec, rtype)) for acc, (rec, rtype) in
records.items())
ensgs = [(get_ensg(rec), acc) for acc, (rec, rtype) in records.items()
if rtype == 'ensembl']
def sym_out():
symbols = ((get_symbol(rec, rtype, fastadelim, genefield), acc) for
acc, (rec, rtype) in records.items() if rtype)
othergene = ((get_other_gene(rec, fastadelim, genefield), acc) for acc, (rec, rtype) in records.items()
if not rtype and fastadelim and fastadelim in rec.description)
yield from symbols
yield from othergene
return proteins, sequences, desc, evid, ensgs, [x for x in sym_out()]
def parse_fasta(fn):
with open(fn) as fp:
for record in SeqIO.parse(fp, 'fasta'):
yield record
def get_record_type(record):
dmod = get_decoy_mod_string(record.id)
test_name = recor
|
d.id
if dmod is not None:
test_name = record.id.replace(dmod, '')
if test_name.split('|')[0] in ['sp', 'tr']:
return 'swiss'
elif test_name[:3] == 'ENS':
return 'ensembl'
else:
return False
def get_decoy_mod_string(protein):
mods = ['tryp_reverse', 'reverse', 'decoy', 'random', 'shuffle']
for mod in mods:
|
if mod in protein:
if protein.endswith('_{}'.format(mod)):
return '_{}'.format(mod)
elif protein.endswith('{}'.format(mod)):
return mod
elif protein.startswith('{}_'.format(mod)):
return '{}_'.format(mod)
elif protein.startswith('{}'.format(mod)):
return mod
def get_description(record, rectype):
if rectype == 'ensembl':
desc_spl = [x.split(':') for x in record.description.split()]
try:
descix = [ix for ix, x in enumerate(desc_spl) if x[0] == 'description'][0]
except IndexError:
return 'NA'
desc = ' '.join([':'.join(x) for x in desc_spl[descix:]])[12:]
return desc
elif rectype == 'swiss':
desc = []
for part in record.description.split()[1:]:
if len(part.split('=')) > 1:
break
desc.append(part)
return ' '.join(desc)
def get_other_gene(record, fastadelim, genefield):
return record.description.split(fastadelim)[genefield]
def get_genes_pickfdr(fastafn, outputtype, fastadelim, genefield):
"""Called by protein FDR module for both ENSG and e.g. Uniprot"""
for rec in parse_fasta(fastafn):
rtype = get_record_type(rec)
if rtype == 'ensembl' and outputtype == 'ensg':
yield get_ensg(rec)
elif outputtype == 'genename':
yield get_symbol(rec, rtype, fastadelim, genefield)
def get_ensg(record):
fields = [x.split(':') for x in record.description.split()]
try:
return [x[1] for x in fields if x[0] == 'gene' and len(x) == 2][0]
except IndexError:
raise RuntimeError('ENSEMBL detected but cannot find gene ENSG in fasta')
def get_symbol(record, rectype, fastadelim, genefield):
if rectype == 'ensembl':
fields = [x.split(':') for x in record.description.split()]
sym = [x[1] for x in fields if x[0] == 'gene_symbol' and len(x) == 2]
elif rectype == 'swiss':
fields = [x.split('=') for x in record.description.split()]
sym = [x[1] for x in fields if x[0] == 'GN' and len(x) == 2]
elif fastadelim and fastadelim in record.description and genefield:
return record.description.split(fastadelim)[genefield]
else:
return 'NA'
try:
return sym[0]
except IndexError:
return 'NA'
def get_uniprot_evidence_level(record, rtype):
"""Returns uniprot protein existence evidence level for a fasta header.
Evidence levels are 1-5, but we return 5 - x since sorting still demands
that higher is better."""
if rtype != 'swiss':
return -1
for item in record.description.split():
item = item.split('=')
try:
if item[0] == 'PE' and len(item) == 2:
return 5 - int(item[1])
except IndexError:
continue
return -1
|
ApolloAuto/apollo
|
modules/tools/ota/create_sec_package.py
|
Python
|
apache-2.0
| 1,573 | 0 |
#!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRAN
|
TIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing per
|
missions and
# limitations under the License.
###############################################################################
import secure_upgrade_export as sec_api
import os
import sys
sys.path.append('/home/caros/secure_upgrade/python')
root_config_path = "/home/caros/secure_upgrade/config/secure_config.json"
ret = sec_api.init_secure_upgrade(root_config_path)
if ret is True:
print('Security environment init successfully!')
else:
print('Security environment init failed!')
exit(1)
homedir = os.environ['HOME']
release_tgz = homedir + '/.cache/apollo_release.tar.gz'
sec_release_tgz = homedir + '/.cache/sec_apollo_release.tar.gz'
package_token = homedir + '/.cache/package_token'
ret = sec_api.sec_upgrade_get_package(
release_tgz, sec_release_tgz, package_token)
if ret is True:
print('Security package generated successfully!')
else:
print('Security package generated failed!')
|
nburn42/tensorflow
|
tensorflow/python/training/session_manager_test.py
|
Python
|
apache-2.0
| 32,764 | 0.009095 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SessionManager."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_manager
class SessionManagerTest(test.TestCase):
def testPrepareSessionSucceeds(self):
with ops.Graph().as_default():
v = variables.Variable([1.0, 2.0, 3.0], name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
sess = sm.prepare_session(
"", init_op=variables.global_variables_initializer())
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
def testPrepareSessionSucceedsWithInitFeedDict(self):
with ops.Graph().as_default():
p = array_o
|
ps.placeholder(dtypes.float32, shape=(3,))
v = variables.Variable(p, name="v")
sm = session_manager.SessionManager(
r
|
eady_op=variables.report_uninitialized_variables())
sess = sm.prepare_session(
"",
init_op=variables.global_variables_initializer(),
init_feed_dict={p: [1.0, 2.0, 3.0]})
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
def testPrepareSessionSucceedsWithInitFn(self):
with ops.Graph().as_default():
v = variables.Variable([125], name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
sess = sm.prepare_session(
"", init_fn=lambda sess: sess.run(v.initializer))
self.assertAllClose([125], sess.run(v))
def testPrepareSessionFails(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), "prepare_session")
checkpoint_dir2 = os.path.join(self.get_temp_dir(), "prepare_session2")
try:
gfile.DeleteRecursively(checkpoint_dir)
gfile.DeleteRecursively(checkpoint_dir2)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.Variable([1.0, 2.0, 3.0], name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
sess = sm.prepare_session(
"",
init_op=variables.global_variables_initializer(),
saver=saver,
checkpoint_dir=checkpoint_dir)
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
checkpoint_filename = os.path.join(checkpoint_dir,
"prepare_session_checkpoint")
saver.save(sess, checkpoint_filename)
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
# Renames the checkpoint directory.
os.rename(checkpoint_dir, checkpoint_dir2)
gfile.MakeDirs(checkpoint_dir)
v = variables.Variable([6.0, 7.0, 8.0], name="v")
with self.test_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
# This should fail as there's no checkpoint within 2 seconds.
with self.assertRaisesRegexp(
RuntimeError, "no init_op or init_fn or local_init_op was given"):
sess = sm.prepare_session(
"",
init_op=None,
saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=True,
max_wait_secs=2)
# Rename the checkpoint directory back.
gfile.DeleteRecursively(checkpoint_dir)
os.rename(checkpoint_dir2, checkpoint_dir)
# This should succeed as there's checkpoint.
sess = sm.prepare_session(
"",
init_op=None,
saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=True,
max_wait_secs=2)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
def _test_recovered_variable(self,
checkpoint_dir=None,
checkpoint_filename_with_path=None):
# Create a new Graph and SessionManager and recover from a checkpoint.
with ops.Graph().as_default():
v = variables.Variable(2, name="v")
with session_lib.Session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm2.recover_session(
"",
saver=saver,
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=checkpoint_filename_with_path)
self.assertTrue(initialized)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
def testRecoverSession(self):
# Create a checkpoint.
checkpoint_dir = os.path.join(self.get_temp_dir(), "recover_session")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.Variable(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
sess.run(v.initializer)
self.assertEquals(1, sess.run(v))
saver.save(sess,
os.path.join(checkpoint_dir, "recover_session_checkpoint"))
self._test_recovered_variable(checkpoint_dir=checkpoint_dir)
self._test_recovered_variable(
checkpoint_filename_with_path=saver_lib.latest_checkpoint(
checkpoint_dir))
# Cannot set both checkpoint_dir and checkpoint_filename_with_path.
with self.assertRaises(ValueError):
self._test_recovered_variable(
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=saver_lib.latest_checkpoint(
checkpoint_dir))
def testWaitForSessionReturnsNoneAfterTimeout(self):
with ops.Graph().as_default():
variables.Variable(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
recovery_wait_secs=1)
# Set max_wait_secs to allow us to try a few times.
with self.assertRaises(errors.DeadlineExceededError):
sm.wait_for_session(master="", max_wait_secs=3)
def testInitWithNoneLocalInitOpError(self):
# Creating a SessionManager with a None local_init_op but
# non-None ready_for_local_init_op raises ValueError
|
StephanEwen/incubator-flink
|
flink-python/pyflink/fn_execution/coder_impl_slow.py
|
Python
|
apache-2.0
| 28,735 | 0.001183 |
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import datetime
import decimal
import pickle
from abc import ABC, abstractmethod
from typing import List
import cloudpickle
import pyarrow as pa
from pyflink.common import Row, RowKind
from pyflink.common.time import Instant
from pyflink.datastream.window import TimeWindow, CountWindow
from pyflink.fn_execution.ResettableIO import ResettableIO
from pyflink.fn_execution.stream_slow import InputStream, OutputStream
from pyflink.table.utils import pandas_to_arrow, arrow_to_pandas
ROW_KIND_BIT_SIZE = 2
class LengthPrefixBaseCoderImpl(ABC):
"""
LengthPrefixBaseCoder will be used in Operations and other coders will be the field coder of
LengthPrefixBaseCoder.
"""
def __init__(self, field_coder: 'FieldCoderImpl'):
self._field_coder = field_coder
self._data_out_stream = OutputStream()
def _write_data_to_output_stream(self, out_stream: OutputStream):
out_stream.write_var_int64(self._data_out_stream.size())
out_stream.write(self._data_out_stream.get())
self._data_out_stream.clear()
class FieldCoderImpl(ABC):
@abstractmethod
def encode_to_stream(self, value, out_stream: OutputStream):
"""
Encodes `value` to the output stream.
:param value: The output data
:param out_stream: Output Stream
"""
pass
@abstractmethod
def decode_from_stream(self, in_stream: InputStream, length: int = 0):
"""
Decodes data from the input stream.
:param in_stream: Input Stream
:param length: The `length` size data of input stream will be decoded. The default value is
0 which means the coder won't take use of the length to decode the data from input stream.
:return: The decoded Data.
"""
pass
def encode(self, value):
out = OutputStream()
self.encode_to_stream(value, out)
return out.get()
def decode(self, encoded):
return self.decode_from_stream(InputStream(encoded), len(encoded))
class IterableCoderImpl(LengthPrefixBaseCoderImpl):
"""
Encodes iterable data to output stream. The output mode will decide whether write a special end
message 0x00 to output stream after encoding data.
"""
def __init__(self, field_coder: 'FieldCoderImpl', separated_with_end_message: bool):
super(IterableCoderImpl, self).__init__(field_coder)
self._separated_with_end_message = separated_with_end_message
def encode_to_stream(self, value: List, out_stream: OutputStream):
if value:
for item in value:
self._field_coder.encode_to_stream(item, self._data_out_stream)
self._write_data_to_output_stream(out_stream)
# write end message
if self._separated_with_end_message:
out_stream.write_var_int64(1)
out_stream.write_byte(0x00)
def decode_from_stream(self, in_stream: InputStream):
while in_stream.size() > 0:
yield self._field_coder.decode_from_stream(in_stream, in_stream.read_var_int64())
class ValueCoderImpl(LengthPrefixBaseCoderImpl):
"""
Encodes a single data to output stream.
"""
def __init__(self, field_coder: 'FieldCoderImpl'):
super(ValueCoderImpl, self).__init__(field_coder)
def encode_to_stream(self, value, out_stream: OutputStream):
self._field_coder.encode_to_stream(value, self._data_out_stream)
self._write_data_to_output_stream(out_stream)
def decode_from_stream(self, in_stream: InputStream):
return self._field_coder.decode_from_stream(in_stream, in_stream.read_var_int64())
class MaskUtils:
"""
A util class used to encode mask value.
"""
def __init__(self, field_count):
self._field_count = field_count
# the row kind uses the first 2 bits of the bitmap, the remaining bits are used for null
# mask, for more details refer to:
# https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/RowSerializer.java
self._leading_complete_bytes_num = (self._field_count + ROW_KIND_BIT_SIZE) // 8
self._remaining_bits_num = (self._field_count + ROW_KIND_BIT_SIZE) % 8
self.null_mask_search_table = self.generate_null_mask_search_table()
self.null_byte_search_table = (0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01)
self.row_kind_search_table = [0x00, 0x80, 0x40, 0xC0]
@staticmethod
def generate_null_mask_search_table():
"""
Each bit of one byte represents if the column at the corresponding position is None or not,
e.g. 0x84 represents the first column and the sixth column are None.
"""
null_mask = []
for b in range(256):
every_num_null_mask = [(b & 0x80) > 0, (b & 0x40) > 0, (b & 0x20) > 0, (b & 0x10) > 0,
(b & 0x08) > 0, (b & 0x04) > 0, (b & 0x02) > 0, (b & 0x01) > 0]
null_mask.append(tuple(every_num_null_mask))
return tuple(null_mask)
def write_mask(self, value, row_kind_value, out_stream: OutputStream):
field_pos = 0
null_byte_search_table = self.null_byte_search_table
remaining_bits_num = self._remaining_bits_num
# first byte contains the row kind bits
b = self.row_kind_search_table[row_kind_value]
for i in range(0, 8 - ROW_KIND_BIT_SIZE):
if field_pos + i < len(value) and value[field_pos + i] is None:
b |= null_byte_search_table[i + ROW_KIND_BIT_SIZE]
field_pos += 8 - ROW_KIND_BIT_SIZE
out_stream.write_byte(b)
for _ in range(1, self._leading_complete_bytes_num):
b = 0x00
for i in range(0, 8):
if value[field_pos + i] is None:
b |= null_byte_search_table[i]
field_pos += 8
out_stream.write_byte(b)
if self._leading_complete_bytes_num >= 1 and remaining_bits_num:
b = 0x00
for i in range(remaining_bits_num):
if value[field_pos + i] is None:
b |= null_byte_search_table[i]
out_stream.write_byte(b)
def read_mask(self, in_stream: InputStream):
mask = []
mask_search_table = self.null_mask_search_table
remaining_bits_num = self._remaining_bits_num
for _ in range(self._leading_complete_bytes_num):
b = in_stream.read_byte()
mask.extend(mask_search_table[b])
if remaining_bits_num:
b = in_stream.read_byte()
mask.extend(mask_search_table[b][0:remaining_bits_num])
return mask
class FlattenRowCoderImpl(FieldCoderImpl):
"""
A coder for flatten row (List) object (without field names and row kind value is 0).
"""
def __init__(self, field_coders: List[FieldCoderImpl]):
self._field_coders = field_coders
self._field_count = len(field_coders)
self._mask_utils = MaskUtils(self._field_count)
def encode_to_stream(self, v
|
alue, out_stream: OutputStream):
# encode mask value
self._mask_utils.write_mask(value, 0, out_stream)
|
# enc
|
5nizza/party-elli
|
rally_elli_bool.py
|
Python
|
mit
| 3,337 | 0.002997 |
#!/usr/bin/env python3
import logging
import signal
from typing import List
import elli
from LTL_to_atm import translator_via_spot
from config import Z3_PATH
from syntcomp.rally_template import main_template
from syntcomp.task import Task
from syntcomp.task_creator import TaskCreator
from synthesis.z3_via_pipe import Z3InteractiveViaPipes
class ElliBoolRealTask(Task):
def __init__(self, name, ltl_text, part_text, is_moore, min_size, max_size, max_k:int):
super().__init__(name, True)
self.name = name
self.ltl_text = ltl_text
self.part_text = part_text
self.is_moore = is_moore
self.min_size = min_size
self.max_size = max_size
self.max_k = max_k
def do(self):
solver = Z3InteractiveViaPipes(Z3_PATH)
try:
return elli.check_real(self.ltl_text, self.part_text,
self.is_moore,
translator_via_spot.LTLToAtmViaSpot(),
solver,
self.max_k,
self.min_size, self.max_size, 0)
finally:
solver.die()
class ElliBoolUnrealTask(Task):
def __init__(self, name, ltl_text, part_text, is_moore, min_size, max_size, max_k:int, timeout):
super().__init__(name, False)
self.ltl_text = ltl_text
self.part_text = part_text
self.is_moore = is_moore
self.min_size = min_size
self.max_size = max_size
self.max_k = max_k
self.timeout = timeout
def do(self):
class TimeoutException(Exception):
|
pass
if self.timeout:
logging.info('CheckUnrealTask
|
: setting timeout to %i' % self.timeout)
def signal_handler(sig, _):
if sig == signal.SIGALRM:
raise TimeoutException("CheckUnrealTask: timeout reached")
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(self.timeout)
solver = Z3InteractiveViaPipes(Z3_PATH)
try:
return elli.check_unreal(self.ltl_text, self.part_text,
self.is_moore,
translator_via_spot.LTLToAtmViaSpot(),
solver,
self.max_k,
self.min_size, self.max_size, 0)
except TimeoutException:
return None
finally:
solver.die()
if __name__ == "__main__":
class ElliBoolTasksCreator(TaskCreator):
@staticmethod
def create(ltl_text:str, part_text:str, is_moore:bool) -> List[Task]:
elli_int_real = ElliBoolRealTask('elli.bool.real',
ltl_text, part_text, is_moore,
1, 20, 8)
elli_int_unreal = ElliBoolUnrealTask('elli.unreal.short',
ltl_text, part_text, is_moore,
1, 20, 8, timeout=1200)
return [elli_int_real, elli_int_unreal]
main_template("SMT-based bounded synthesizer, with UCW -> k-LA and thus no integer ranks",
ElliBoolTasksCreator())
|
svn2github/pyopt
|
examples/rosenbrock.py
|
Python
|
gpl-3.0
| 2,679 | 0.011945 |
#!/usr/bin/env python
'''
Solves Rosenbrock's Unconstrained Problem.
min 100*(x2-x1^2)**2 + (1-x1)^2
s.t.: -10 <= xi <= 10, i = 1,2
f* = 0 , x* = [1, 1]
'''
# =============================================================================
# Standard Python modules
# =============================================================================
import os, sys, time
import pdb
# =============================================================================
# Extension modules
# =============================================================================
#from pyOpt import *
from pyOpt import Optimization
from pyOpt import PSQP
from pyOpt import SLSQP
from pyOpt import CONMIN
from pyOpt import COBYLA
from pyOpt import SOLVOPT
from pyOpt import KSOPT
from pyOpt import NSGA2
from pyOpt import SDPEN
# =============================================================================
#
# =============================================================================
def objfunc(x):
f = 100*(x[1]-x[0]**2)**2+(1-x[0])**2
g = []
fail = 0
return f,g, fail
# ===========================
|
==================================================
#
# =====================================================================
|
========
opt_prob = Optimization('Rosenbrock Unconstraint Problem',objfunc)
opt_prob.addVar('x1','c',lower=-10.0,upper=10.0,value=-3.0)
opt_prob.addVar('x2','c',lower=-10.0,upper=10.0,value=-4.0)
opt_prob.addObj('f')
print opt_prob
# Instantiate Optimizer (PSQP) & Solve Problem
psqp = PSQP()
psqp.setOption('IPRINT',0)
psqp(opt_prob,sens_type='FD')
print opt_prob.solution(0)
# Instantiate Optimizer (SLSQP) & Solve Problem
slsqp = SLSQP()
slsqp.setOption('IPRINT',-1)
slsqp(opt_prob,sens_type='FD')
print opt_prob.solution(1)
# Instantiate Optimizer (CONMIN) & Solve Problem
conmin = CONMIN()
conmin.setOption('IPRINT',0)
conmin(opt_prob,sens_type='CS')
print opt_prob.solution(2)
# Instantiate Optimizer (COBYLA) & Solve Problem
cobyla = COBYLA()
cobyla.setOption('IPRINT',0)
cobyla(opt_prob)
print opt_prob.solution(3)
# Instantiate Optimizer (SOLVOPT) & Solve Problem
solvopt = SOLVOPT()
solvopt.setOption('iprint',-1)
solvopt(opt_prob,sens_type='FD')
print opt_prob.solution(4)
# Instantiate Optimizer (KSOPT) & Solve Problem
ksopt = KSOPT()
ksopt.setOption('IPRINT',0)
ksopt(opt_prob,sens_type='FD')
print opt_prob.solution(5)
# Instantiate Optimizer (NSGA2) & Solve Problem
nsga2 = NSGA2()
nsga2.setOption('PrintOut',0)
nsga2(opt_prob)
print opt_prob.solution(6)
# Instantiate Optimizer (SDPEN) & Solve Problem
sdpen = SDPEN()
sdpen.setOption('iprint',-1)
sdpen(opt_prob)
print opt_prob.solution(7)
|
Meerkat007/Clothes-Shop-Website
|
server/clothes/tests.py
|
Python
|
mit
| 369 | 0.00542 |
from django.test import TestCase
from .models import Clothes
class ClothesModelTests(TestCase):
def setUp(self
|
):
Clothes.objects.create(clothes_type='ladies dress', price=28.50)
Clothes.objects.create(clothes_type='men tie', price=8.50)
def test_number_of_clothes_created(self):
self.assertEq
|
ual(Clothes.objects.all().count(), 2)
|
jrabenoit/shopvec
|
shopvec.py
|
Python
|
gpl-3.0
| 1,402 | 0.017832 |
#!/usr/bin/env python3
import os
import crossval, features, estimators, estpicker, bootstrap
def Vecuum():
print('\nGROUP CHOICES (automated order: 2,1,0,4,3,5,11,10,12)')
print('\nSymptom Severity:\n')
print(' 0= control/mild\n 1= control/severe\n 2= control/very severe\n 3= mild/severe\n 4= mild/very severe\n 5= severe/very severe\n 6= control/all patients\n')
print('Treatment Response:\n')
print(' 7= control/non-responder\n 8= control/all responder\n 9= control/remitter only\n 10= non-responder/all responder\n 11= non-responder/remitter only\n 12= responder vs remitter\n 13= control/all patients\n')
groups= [2,1,0,
4,3,5,
11,10,12]
runs=5
for group in groups:
run=1
for i in range(runs):
print('BEGINNING RUN {}/{}'.format(run, runs))
crossval.OuterCV(group)
crossval.InnerCV()
features.SelKBest()
features.SelKBestOuter()
estimators.InnerFolds(group, run)
bestest= estpicker.Best(group, run)
estimators.OuterFolds(group, run, bestest)
bootstrap.Bill(group, run)
run= run + 1
print('RUN COMPLETE')
os.system('spd-say -r -50 -p -50 -t female3 "your groups have finished r
|
unning. To run more groups
|
, you must construct additional pylons."')
return
|
istio/api
|
python/istio_api/mcp/v1alpha1/resource_pb2.py
|
Python
|
apache-2.0
| 3,416 | 0.005269 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mcp/v1alpha1/resource.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
from gogoproto import gogo_pb2 as gogoproto_dot_gogo__pb2
from mcp.v1alpha1 import metadata_pb2 as mcp_dot_v1alpha1_dot_metadata__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mcp/v1alpha1/resource.proto',
package='istio.mcp.v1alpha1',
syntax='proto3',
serialized_options=b'Z\031istio.io/api/mcp/v1alpha1\250\342\036\001',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1bmcp/v1alpha1/resource.proto\x12\x12istio.mcp.v1alpha1\x1a\x19google/protobuf/any.proto\x1a\x14gogoproto/gogo.proto\x1a\x1bmcp/v1alpha1/metadata.proto\"n\n\x08Resource\x12\x38\n\x08metadata\x18\x01 \x01(\x0b\x32\x1c.istio.mcp.v1alpha1.MetadataR\x08metadata\x12(\n\x04\x62ody\x18\x02 \x01(\x0b\x32\x14.google.protobuf.AnyR\x04\x62odyB\x1fZ\x19istio.io/api/mcp/v1alpha1\xa8\xe2\x1e\x01\x62\x06proto3'
,
dependencies=[google_dot_protobuf_dot_any__pb2.DESCRIPTOR,gogoproto_dot_gogo__pb2.DESCRIPTOR,mcp_dot_v1alpha1_dot_metadata__pb2.DESCRIPTOR,])
_RESOURCE = _descriptor.Descriptor(
name='Resource',
full_name='istio.mcp.v1alpha1.Resource',
filename=None,
file=DESCRIPTOR,
containing_typ
|
e=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='metadata', full_name='istio.mcp.v1alpha1.Resource.metadata', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='metadata', file=DESCRIPTOR, create_key=_descriptor._internal_create_
|
key),
_descriptor.FieldDescriptor(
name='body', full_name='istio.mcp.v1alpha1.Resource.body', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='body', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=129,
serialized_end=239,
)
_RESOURCE.fields_by_name['metadata'].message_type = mcp_dot_v1alpha1_dot_metadata__pb2._METADATA
_RESOURCE.fields_by_name['body'].message_type = google_dot_protobuf_dot_any__pb2._ANY
DESCRIPTOR.message_types_by_name['Resource'] = _RESOURCE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Resource = _reflection.GeneratedProtocolMessageType('Resource', (_message.Message,), {
'DESCRIPTOR' : _RESOURCE,
'__module__' : 'mcp.v1alpha1.resource_pb2'
# @@protoc_insertion_point(class_scope:istio.mcp.v1alpha1.Resource)
})
_sym_db.RegisterMessage(Resource)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
rodxavier/open-pse-initiative
|
django_project/market/models.py
|
Python
|
mit
| 445 | 0.008989 |
from django.db import models
class NonTradingDay(models.Model):
non_trading_date = models.DateField()
class Meta:
ordering = ('non_trading_date',)
verbose_name = 'Non Trading Day'
verbose_name
|
_plural = 'Non Trading Days'
def __unicode__(self):
return self.non_trading_date.strftime('%B %d, %Y')
def __str__(self)
|
:
return self.non_trading_date.strftime('%B %d, %Y')
|
Desarrollo-CeSPI/meran
|
dev-plugins/node64/lib/node/wafadmin/Utils.py
|
Python
|
gpl-3.0
| 20,212 | 0.030972 |
#!/usr/bin/env python
# Meran - MERAN UNLP is a ILS (Integrated Library System) wich provides Catalog,
# Circulation and User's Management. It's written in Perl, and uses Apache2
# Web-Server, MySQL database and Sphinx 2 indexing.
# Copyright (C) 2009-2013 Grupo de desarrollo de Meran CeSPI-UNLP
#
# This file is part of Meran.
#
# Meran is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Meran is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Meran. If not, see <http://www.gnu.org/licenses/>.
# encoding: utf-8
# Meran - MERAN UNLP is a ILS (Integrated Library System) wich provides Catalog,
# Circulation and User's Management. It's written in Perl, and uses Apache2
# Web-Server, MySQL database and Sphinx 2 indexing.
# Copyright (C) 2009-2013 Grupo de desarrollo de Meran CeSPI-UNLP
#
# This file is part of Meran.
#
# Meran is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Meran is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Meran. If not, see <http://www.gnu.org/licenses/>.
# Thomas Nagy, 2005 (ita)
"""
Utilities, the stable ones are the following:
* h_file: compute a unique value for a file (hash), it uses
the module fnv if it is installed (see waf/utils/fnv & http://code.google.com/p/waf/wiki/FAQ)
else, md5 (see the python docs)
For large projects (projects with more than 15000 files) or slow hard disks and filesystems (HFS)
it is possible to use a hashing based on the path and the size (may give broken cache results)
The method h_file MUST raise an OSError if the file is a folder
import stat
def h_file(filename):
st = os.stat(filename)
if stat.S_ISDIR(st[stat.ST_MODE]): raise IOError('not a file')
m = Utils.md5()
m.update(str(st.st_mtime))
m.update(str(st.st_size))
m.update(filename)
return m.digest()
To replace the function in your project, use something like this:
import Utils
Utils.h_file = h_file
* h_list
* h_fun
* get_term_cols
* ordered_dict
"""
import os, sys, imp, string, errno, traceback, inspect, re, shutil, datetime, gc
# In python 3.0 we can get rid of all this
try: from UserDict import UserDict
except ImportError: from collections import UserDict
if sys.hexversion >= 0x2060000 or os.name == 'java':
import subprocess as pproc
else:
import pproc
import Logs
from Constants import *
try:
from collections import deque
except ImportError:
class deque(list):
def popleft(self):
return self.pop(0)
is_win32 = sys.platform == 'win32'
try:
# defaultdict in python 2.5
from collections import defaultdict as DefaultDict
except ImportError:
class DefaultDict(dict):
def __init__(self, default_factory):
super(DefaultDict, self).__init__()
self.default_factory = default_factory
def __getitem__(self, key):
try:
return super(DefaultDict, self).__getitem__(key)
except KeyError:
value = self.default_factory()
self[key] = value
return value
class WafError(Exception):
def __init__(self, *args):
self.args = args
try:
self.stack = traceback.extract_stack()
except:
pass
Exception.__init__(self, *args)
def __str__(self):
return str(len(self.args) == 1 and self.args[0] or self.args)
class WscriptError(WafError):
def __init__(self, message, wscript_file=None):
if wscript_file:
self.wscript_file = wscript_file
self.wscript_line = None
else:
try:
(self.wscript_file, self.wscript_line) = self.locate_error()
except:
(self.wscript_file, self.wscript_line) = (None, None)
msg_file_line = ''
if self.wscript_file:
msg_file_line = "%s:" % self.wscript_file
if self.wscript_line:
msg_file_line += "%s:" % self.wscript_line
err_message = "%s error: %s" % (msg_file_line, message)
WafError.__init__(self, err_message)
def locate_error(self):
stack = traceback.extract_stack()
stack.reverse()
for frame in stack:
file_name = os.path.basename(frame[0])
is_wscript = (file_name == WSCRIPT_FILE or file_name == WSCRIPT_BUILD_FILE)
if is_wscript:
return (frame[0], frame[1])
return (None, None)
indicator = is_win32 and '\x1b[A\x1b[K%s%s%s\r' or '\x1b[K%s%s%s\r'
try:
from fnv import new as md5
import Constants
Constants.SIG_NIL = 'signofnv'
def h_file(filename):
m = md5()
try:
m.hfile(filename)
x = m.digest()
if x is None: raise OSError("not a file")
return x
except SystemError:
raise OSError("not a file" + filename)
except ImportError:
try:
try:
from hashlib import md5
except ImportError:
from md5 import md5
def h_file(filename):
f = open(filename, 'rb')
m = md5()
while (filename):
filename = f.read(100000)
m.update(filename)
f.close()
return m.digest()
except ImportError:
# portability fixes may be added elsewhere (although, md5 should be everywhere by now)
md5 = None
class ordered_dict(UserDict):
def __init__(self, dict = None):
self.allkeys = []
UserDict.__init__(self, dict)
def __delitem__(self, key):
self.allkeys.remove(key)
UserDict.__delitem__(self, key)
def __setitem__(self, key, item):
if key not in self.allkeys: self.allkeys.append(key)
UserDict.__setitem__(self, key, item)
def exec_command(s, **kw):
if 'log' in kw:
kw['stdout'] = kw['stderr'] = kw['log']
del(kw['log'])
kw['shell'] = isinstance(s, str)
try:
proc = pproc.Popen(s, **kw)
return proc.wait()
except OSError:
return -1
if is_win32:
def exec_command(s, **kw):
if 'log' in kw:
kw['stdout']
|
= kw['stderr'] = kw['log']
del(kw['log'])
kw['shell'] = isinstance(s, str)
if len(s) > 2000:
startupinfo = pproc.STARTUPINFO()
startupinfo.dwFlags |= pproc.STARTF_USESHOWWINDOW
kw['startupinfo'] = startupinfo
try:
if 'st
|
dout' not in kw:
kw['stdout'] = pproc.PIPE
kw['stderr'] = pproc.PIPE
proc = pproc.Popen(s,**kw)
(stdout, stderr) = proc.communicate()
Logs.info(stdout)
if stderr:
Logs.error(stderr)
return proc.returncode
else:
proc = pproc.Popen(s,**kw)
return proc.wait()
except OSError:
return -1
listdir = os.listdir
if is_win32:
def listdir_win32(s):
if re.match('^[A-Za-z]:$', s):
# os.path.isdir fails if s contains only the drive name... (x:)
s += os.sep
if not os.path.isdir(s):
e = OSError()
e.errno = errno.ENOENT
raise e
return os.listdir(s)
listdir = listdir_win32
def waf_version(mini = 0x010000, maxi = 0x100000):
"Halts if the waf version is wrong"
ver = HEXVERSION
try: min_val = mini + 0
except TypeError: min_val = int(mini.replace('.', '0'), 16)
if min_val > ver:
Logs.error("waf version should be at least %s (%s found)" % (mini, ver))
sys.exit(0)
try: max_val = maxi + 0
except TypeError: max_val = int(maxi.replace('.', '0'), 16)
if max_val < ver:
Logs.error("waf version should be at most %s (%s found)" % (maxi, ver))
sys.exit(0)
def python_24_guard():
if sys.hexversion < 0x20400f0 or sys.hexversion >= 0x3000000:
raise ImportError("Waf requires Python >= 2.3 but the raw source requires Python 2.4, 2.5 or 2.6")
def ex_stack():
exc_type, exc_value, tb = sys.exc_info()
if Logs.verbose > 1:
exc_lines = traceback.format_exception(exc_type, exc_value, tb)
return ''.join(exc_lines)
return str(exc_value)
def to_list(sth):
if isinstance(sth, str):
return sth.split()
else:
return sth
g_loaded_modules = {}
"index modules by
|
alxgu/ansible
|
lib/ansible/modules/network/aci/mso_schema_site_vrf_region.py
|
Python
|
gpl-3.0
| 6,343 | 0.002207 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: mso_schema_site_vrf_region
short_description: Manage site-local VRF regions in schema template
description:
- Manage site-local VRF regions in schema template on Cisco ACI Multi-Site.
author:
- Dag Wieers (@dagwieers)
version_added: '2.8'
options:
schema:
description:
- The name of the schema.
type: str
required: yes
site:
description:
- The name of the site.
type: str
required: yes
template:
description:
- The name of the template.
type: str
required: yes
vrf:
description:
- The name of the VRF.
type: str
region:
description:
- The name of the region to manage.
type: str
aliases: [ name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
seealso:
- module: mso_schema_site_vrf
- module: mso_schema_template_vrf
extends_documentation_fragment: mso
'''
EXAMPLES = r'''
- name: Add a new site VRF region
mso_schema_template_vrf_region:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema1
site: Site1
template: Template1
vrf: VRF1
region: us-west-1
state: present
delegate_to: localhost
- name: Remove a site VRF region
mso_schema_template_vrf_region:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema1
site: Site1
template: Template1
vrf: VRF1
region: us-west-1
state: absent
delegate_to: localhost
- name: Query a specific site VRF region
mso_schema_template_vrf_region:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema1
site: Site1
template: Template1
vrf: VRF1
region: us-west-1
state: query
delegate_to: localhost
register: query_result
- name: Query all site VRF regions
mso_schema_template_vrf_region:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema1
site: Site1
template: Template1
vrf: VRF1
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.mso import MSOModule, mso_argument_spec
def main():
argument_spec = mso_argument_spec()
argument_spec.update(
schema=dict(type='str', required=True),
site=dict(type='str', required=True),
template=dict(type='str', required=True),
vrf=dict(type='str', required=True),
region=dict(type='str', aliases=['name']), # This parameter is not required for querying all objects
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['region']],
['state', 'present', ['region']],
],
)
schema = module.params['schema']
site = module.params['site']
template = module.params['template']
vrf = module.params['vrf']
region = module.params['region']
state = module.params['state']
mso = MSOModule(module)
# Get schema_id
schema_obj = mso.get_obj('schemas', displayName=schema)
if not schema_obj:
mso.fail_json(msg="Provided schema '{0}' does not exist".format(schema))
schema_path = 'schemas/{id}'.format(**schema_obj)
schema_id = schema_obj['id']
# Get site
site_id = mso.lookup_site(site)
# Get site_idx
sites = [(s['siteId'], s['templateName']) for s in schema_obj['sites']]
if (site_id, template) not in sites:
mso.fail_json(msg="Provided site/template '{0}-{1}' does not exist. Existing sites/templates: {2}".format(site, template, ', '.join(sites)))
# Schema-access uses indexes
site_idx = sites.index((site_id, template))
# Path-based access uses site_id-template
site_template = '{0}-{1}'.format(site_id, template)
# Get VRF
vrf_ref = mso.vrf_ref(schema_id=schema_id, template=template, vrf=vrf)
vrfs = [v['vrfRef'] for v in schema_obj['sites'][site_idx]['vrfs']]
if vrf_ref not in vrfs:
mso.fail_json(msg="Provided vrf '{0}' does not exist. Existing vrfs: {1}".format(vrf, ', '.join(vrfs)))
vrf_idx = vrfs.index(vrf_ref)
# Get Region
regions = [r['name'] for r in schema_obj['sites'][site_idx]['vrfs'][vrf_idx]['regions']]
if region is not None and region in regions:
region_idx = regions.index(region)
region_path = '/sites/{0}/vrfs/{1}/regions/{2}'.format(site_template, vrf, region)
mso.existing = schema_obj['sites'][site_idx]['vrfs'][vrf_idx]['regions'][region_idx]
if state == 'query':
if region is None:
mso.existing = schema_obj['sites'][site_idx]['vrfs'][vrf_idx]['regions']
elif not mso.existing:
mso.fail_json(msg="Region '{region}' not found".format(region=region))
mso.exit_json()
regions_path = '/sites/{0}/vrfs
|
/{1}/regions'.format(site_template, vrf)
ops = []
mso.previous = mso.existing
if state == 'absent':
if mso.existing:
mso.sent = mso.existing = {}
ops.append(dict(op='r
|
emove', path=region_path))
elif state == 'present':
payload = dict(
name=region,
)
mso.sanitize(payload, collate=True)
if mso.existing:
ops.append(dict(op='replace', path=region_path, value=mso.sent))
else:
ops.append(dict(op='add', path=regions_path + '/-', value=mso.sent))
mso.existing = mso.proposed
if not module.check_mode:
mso.request(schema_path, method='PATCH', data=ops)
mso.exit_json()
if __name__ == "__main__":
main()
|
pyQuest/mount-api
|
mountapi/schema.py
|
Python
|
apache-2.0
| 2,575 | 0 |
import abc
import inspect
import re
from mountapi.core import exceptions
class AbstractConverter(metaclass=abc.ABCMeta):
param_url: str = None
param_regex: str = None
@classmethod
def path_to_regex(cls, path):
return re.sub(cls.param_url, cls.param_regex, path) + '$'
class IntConverter(AbstractConverter):
param_url = r'<(?P<param>\w+):int>'
param_regex = r'(?P<\1>\\d+)'
class StrConverter(AbstractConverter):
param_url = r'<(?P<param>\w+):str>'
param_regex = r'(?P<\1>\\w+)'
class AbstractSchema(exceptions.NotImplementedMixin, metaclass=abc.ABCMeta):
@abc.abstractmethod
def build(self):
self.not_implemented()
@abc.abstractmethod
def match(self, path):
self.not_implemented()
class Schema(AbstractSchema):
_converter_map = {int: IntConverter, str: StrConverter}
def __init__(self, routes: list) -> None:
self._routes = routes
self._schema = None
def build(self) -> None:
if self._schema is None:
self._schema = self._build_schema()
def _build_schema(self):
schema = {}
for route in self._r
|
outes:
schema[route.path] = {
'endpoint': route.endpoint,
'regex': self._get_path_regex(route.path),
**self._get_schema_http_methods(route)
}
return schema
def _g
|
et_path_regex(self, path):
for converter in self._converter_map.values():
path = converter.path_to_regex(path)
return path
def _get_schema_http_methods(self, route):
return {
http_method: {
'handler': getattr(route.endpoint, http_method.lower()),
'params': self._get_func_params(
getattr(route.endpoint, http_method.lower())
)
} for http_method in route.endpoint.get_allowed_methods()
}
def _get_func_params(self, func):
return {
param.name: self._converter_map[param.annotation]
for param in inspect.signature(func).parameters.values()
if param.annotation != inspect.Parameter.empty
}
def match(self, path):
for route_path in self._schema:
route_match = re.match(self._schema[route_path]['regex'], path)
if route_match:
return {
'endpoint': self._schema[route_path]['endpoint'],
'kwargs': route_match.groupdict()
}
else:
raise exceptions.NotFound()
|
kierenbeckett/sentinel
|
sentinel/alert_plugins/rabbit_queues.py
|
Python
|
apache-2.0
| 1,275 | 0.003922 |
import requests
from requests.auth import HTTPBasicAuth
def get_data(config):
auth = HTTPBasicAuth(config['authentication']['username
|
'], config['authentication']['password'])
resp = requests.get(config['host'] + '/api/queues', auth=auth)
queues = resp.json()
data = {}
for queue in queues:
name = queue['name']
message_stats = queue.get('message_stats', {})
queue_size = queue.get('messages')
ack_rate = (message_stats.get('ack_details') or {}).get('rate')
nack_rate = (message_stats.get('redeliver_details') or {}).get('rate')
(inactive_t
|
hreshold, active_threshold, nack_threshold) = (50, 5000, 1)
for qs_name, qs_threshold in config['queue_sizes'].items():
if name.startswith(qs_name):
(inactive_threshold, active_threshold, nack_threshold) = qs_threshold
data[name + ' queue'] = {
'state': 'FAIL' if (queue_size > inactive_threshold and (ack_rate < 2 or ack_rate is None) or queue_size > active_threshold or nack_rate > nack_threshold) else 'OK',
'message': 'size is %d, ack rate is %.2f, nack rate is %.2f' % (queue_size if queue_size else 0, ack_rate if ack_rate else 0, nack_rate if nack_rate else 0)
}
return data
|
LockScreen/Backend
|
venv/lib/python2.7/site-packages/awscli/customizations/cloudsearch.py
|
Python
|
mit
| 4,291 | 0.000466 |
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
from awscli.customizations.flatten import FlattenArguments, SEP
from botocore.compat import OrderedDict
LOG = logging.getLogger(__name__)
DEFAULT_VALUE_TYPE_MAP = {
'Int': int,
'Double': float,
'IntArray': int,
'DoubleArray': float
}
def index_hydrate(params, container, cli_type, key, value):
"""
Hydrate an index-field option value to construct something like::
{
'index_field': {
'DoubleOptions': {
'DefaultValue': 0.0
}
}
}
"""
if 'IndexField' not in params:
params['IndexField'] = {}
if 'IndexFieldType' not in params['IndexField']:
raise RuntimeError('You must pass the --type option.')
# Find the type and transform it for the type options field name
# E.g: int-array => IntArray
_type = params['IndexField']['IndexFieldType']
_type = ''.join([i.capitalize() for i in _type.split('-')])
# ``index_field`` of type ``latlon`` is mapped to ``Latlon``.
# However, it is defined as ``LatLon`` in the model so it needs to
# be changed.
if _type == 'Latlon':
_type = 'LatLon'
# Transform string value to the correct type?
if key.split(SEP)[-1] == 'DefaultValue':
value = DEFAULT_VALUE_TYPE_MAP.get(_type, lambda x: x)(value)
# Set the proper options field
if _type + 'Options' not in params['IndexField']:
params['IndexField'][_type + 'Options'] = {}
params['IndexField'][_type + 'Options'][key.split(SEP)[-1]] = value
FLATTEN_CONFIG = {
"define-expression": {
"expression": {
"keep": False,
"flatten": OrderedDict([
# Order is crucial here! We're
# flattening ExpressionValue to be "expression",
# but this is the name ("expression") of the our parent
# key, the top level nested param.
("ExpressionName", {"name": "name"}),
("ExpressionValue", {"name": "expression"}),]),
}
},
"define-index-field": {
"index-field": {
"keep": False,
# We use an ordered dict because `type` needs to be parsed before
# any of the <X>Options values.
"flatten": OrderedDict([
("IndexFieldName", {"name": "name"}),
("IndexFieldType", {"name": "type"}),
("IntOptions.DefaultValue", {"name": "default-value",
"type": "string",
"hydrate": index_hydrate}),
("IntOptions.FacetEnabled", {"name": "facet-enabled",
"hydrate": index_hydrate }),
("IntOptions.SearchEnabled", {"name": "search-enabled",
"hydrate": index_hydrate}),
("IntOptions.ReturnEnabled", {"name": "return-enabled",
"hydrate": index_hydrate}),
("IntOptions.SortEnabled", {"name": "sort-enabled",
"hydrate": index_hydrate}),
("TextOptions.HighlightEnabled", {"name": "highlight-enabled",
"hydrate": index_hydrate}),
("TextOptions.AnalysisScheme", {"name": "analysis-scheme",
"hydrate": index_hydrate})
])
}
}
}
def initialize(cli):
"""
The entry point for CloudSearch cu
|
stomizations.
"""
flattened = FlattenArguments('cloudsearch', FLATTEN_CON
|
FIG)
flattened.register(cli)
|
pantsbuild/pants
|
src/python/pants/jvm/run_deploy_jar_intergration_test.py
|
Python
|
apache-2.0
| 4,405 | 0.001135 |
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import textwrap
from textwrap import dedent
from pants.engine.internals.native_engine import FileDigest
from pants.jvm.resolve.common import ArtifactRequirement, Coordinate, Coordinates
from pants.jvm.resolve.coursier_fetch import CoursierLockfileEntry, CoursierResolvedLockfile
from pants.jvm.resolve.coursier_test_util import TestCoursierWrapper
from pants.testutil.pants_integration_test import run_pants, setup_tmpdir
EMPTY_RESOLVE = """
# --- BEGIN PANTS LOCKFILE METADATA: DO NOT EDIT OR REMOVE ---
# {{
# "version": 1,
# "generated_with_requirements": [
# ]
# }}
# --- END PANTS LOCKFILE METADATA ---
"""
DEFAULT_LOCKFILE = (
TestCoursierWrapper(
CoursierResolvedLockfile(
(
CoursierLockfileEntry(
coord=Coordinate(
group="org.scala-lang", artifact="scala-library", version="2.13.6"
),
file_name="org.scala-lang_scala-library_2.13.6.jar",
direct_dependencies=Coordinates(),
dependencies=Coordinates(),
file_digest=FileDigest(
"f19ed732e150d3537794fd3fe42ee18470a3f707efd499ecd05a99e727ff6c8a", 5955737
|
),
),
)
)
)
.serialize(
[
ArtifactRequirement(
coordinate=Coordinate(
group="org.scala-lang", artifact="scala-library", version="2.13.6"
)
|
)
]
)
.replace("{", "{{")
.replace("}", "}}")
)
DEFAULT_SCALA_LIBRARY_TARGET = textwrap.dedent(
"""\
jvm_artifact(
name="org.scala-lang_scala-library_2.13.6",
group="org.scala-lang",
artifact="scala-library",
version="2.13.6",
)
"""
)
def test_java() -> None:
sources = {
"src/org/pantsbuild/test/Hello.java": dedent(
"""\
package org.pantsbuild.test;
public class Hello {{
public static void main(String[] args) {{
System.out.println("Hello, World!");
}}
}}
"""
),
"src/org/pantsbuild/test/BUILD": dedent(
"""\
java_sources()
deploy_jar(
name="test_deploy_jar",
main="org.pantsbuild.test.Hello",
dependencies=[":test"],
)
"""
),
"lockfile": EMPTY_RESOLVE,
}
with setup_tmpdir(sources) as tmpdir:
args = [
"--backend-packages=pants.backend.experimental.java",
f"--source-root-patterns=['{tmpdir}/src']",
"--pants-ignore=__pycache__",
f'--jvm-resolves={{"empty": "{tmpdir}/lockfile"}}',
"--jvm-default-resolve=empty",
"run",
f"{tmpdir}/src/org/pantsbuild/test:test_deploy_jar",
]
result = run_pants(args)
assert result.stdout.strip() == "Hello, World!"
def test_scala() -> None:
sources = {
"src/org/pantsbuild/test/Hello.scala": dedent(
"""\
package org.pantsbuild.test;
object Hello {{
def main(args: Array[String]): Unit = {{
println("Hello, World!")
}}
}}
"""
),
"src/org/pantsbuild/test/BUILD": dedent(
"""\
scala_sources()
deploy_jar(
name="test_deploy_jar",
main="org.pantsbuild.test.Hello",
dependencies=[":test"],
)
"""
),
"BUILD": DEFAULT_SCALA_LIBRARY_TARGET,
"lockfile": DEFAULT_LOCKFILE,
}
with setup_tmpdir(sources) as tmpdir:
args = [
"--backend-packages=pants.backend.experimental.scala",
f"--source-root-patterns=['{tmpdir}/src']",
"--pants-ignore=__pycache__",
f'--jvm-resolves={{"jvm-default": "{tmpdir}/lockfile"}}',
"--jvm-default-resolve=jvm-default",
"run",
f"{tmpdir}/src/org/pantsbuild/test:test_deploy_jar",
]
result = run_pants(args)
assert result.stdout.strip() == "Hello, World!"
|
hakyimlab/MetaXcan-Postprocess
|
source/deprecated/ProcessAlleleStatistics.py
|
Python
|
mit
| 9,438 | 0.004238 |
#!/usr/bin/env python
__author__ = 'heroico'
########################################################################################################################
# Gathers statistics on allele information.
# Produces a csv with the following columns:
# rsid,chromosome,wdb_ref_allele,wdb_eff_allele,legend_ref_allele,legend_eff_allele,legend_type,gwas_ref_allele,gwas_eff_allele,gwas_OR_BETA
#
# TODO: needs maintenance
import logging
import os
import metax.ThousandGenomesUtilities as ThousandGenomesUtilities
import metax.GWASUtilities as GWASUtilities
import metax.Utilities as Utilities
import metax.WeightDBUtilities as WeightDBUtilities
import metax.Logging as Logging
class AlleleStats(object):
def __init__(self, rsid, chromosome, weight_db_entry):
self.rsid = rsid
self.chromosome = chromosome
self.weight_db_ref_allele = weight_db_entry.ref_allele
self.weight_db_eff_allele = weight_db_entry.eff_allele
self.gwas_ref_allele = "NA"
self.gwas_eff_allele = "NA"
self.gwas_OR_BETA = "NA"
self.legend_type = "NA"
self.legend_ref_allele = "NA"
self.legend_eff_allele = "NA"
def getGWASEntryData(self, gwas_entry):
if gwas_entry:
self.gwas_ref_allele = gwas_entry[GWASUtilities.GWASSNPInfoLineCollector.A1]
self.gwas_eff_allele = gwas_entry[GWASUtilities.GWASSNPInfoLineCollector.A2]
self.gwas_OR_BETA = gwas_entry[GWASUtilities.GWASSNPInfoLineCollector.OR_BETA]
else:
self.gwas_ref_allele = "NA"
self.gwas_eff_allele = "NA"
self.gwas_OR_BETA = "NA"
def getLegendData(self, type, a0, a1):
self.legend_type = type if type is not None else "NA"
self.legend_ref_allele = a0 if a0 is not None else "NA"
self.legend_eff_allele = a1 if a1 is not None else "NA"
def toCSVLine(self):
tuple = (self.rsid, self.chromosome,
self.weight_db_ref_allele, self.weight_db_eff_allele,
self.legend_ref_allele, self.legend_eff_allele, self.legend_type,
self.gwas_ref_allele, self.gwas_eff_allele, self.gwas_OR_BE
|
TA)
line = "%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n" % tuple
return line
@classmethod
def CSVHeader(cls):
return "rsid,chromosome,wdb_ref_allele,wdb_eff_allele,legend_ref_allele,legend_eff_allele,legend_type,gwas_ref_allele,gwas_eff_allele,gwas_OR_BETA\n"
cl
|
ass ProcessAlleleStatistics(object):
def __init__(self, args):
self.data_folder = args.data_folder
self.weight_db = args.weight_db
self.db_path = os.path.join(self.data_folder, args.weight_db)
self.data_folder_phase = args.phase_folder
self.data_folder_gwas_dosage = args.gwas_dosage_folder
self.output_file = args.output_file
def run(self):
if os.path.exists(self.output_file):
logging.info("File %s already exists, delete it if you want it calculated again", self.output_file)
return
logging.info("Opening %s", self.weight_db)
weight_db_logic = WeightDBUtilities.WeightDBEntryLogic(self.db_path)
CHROMOSOMES = ["chr"+str(x) for x in xrange(1, 23)]
dosage_names = Utilities.dosageNamesFromFolder(self.data_folder_gwas_dosage)
legend_names = Utilities.legendNamesFromFolder(self.data_folder_phase)
findings={}
for chromosome in CHROMOSOMES:
logging.info("Processing chromosome %s", chromosome)
dosage_name = Utilities.removeNameWithPatterns(dosage_names, [chromosome+"."])
dosage = self.loadDosageFile(self.data_folder_gwas_dosage, dosage_name)
self.processDosage(chromosome, weight_db_logic, dosage, findings)
legend_name = Utilities.removeNameEndingWith(legend_names, chromosome)
self.processLegendName(chromosome, weight_db_logic, dosage, findings, legend_name)
with open(self.output_file, "w") as file:
file.write(AlleleStats.CSVHeader())
def sortByChromosome(finding):
return finding.chromosome
entries = sorted(findings.values(), key=sortByChromosome)
for finding in entries:
line = finding.toCSVLine()
file.write(line)
def loadDosageFile(self, base_path, name):
callback = GWASUtilities.GWASSNPInfoLineCollector()
dosage_loader = GWASUtilities.GWASDosageFileLoader(base_path, name, callback)
keyed_data_set = dosage_loader.load()
return keyed_data_set
def processDosage(self, chromosome, weight_db_logic, dosage, findings):
ok = 0
for rsid, dosage_entry in dosage.values_by_key.iteritems():
weight_db_entry = weight_db_logic.anEntryWithRSID(rsid)
if not weight_db_entry:
logging.log(7, "%s in dosage not in weights", rsid)
continue
a1 = dosage_entry[GWASUtilities.GWASSNPInfoLineCollector.A1]
a2 = dosage_entry[GWASUtilities.GWASSNPInfoLineCollector.A2]
OR= dosage_entry[GWASUtilities.GWASSNPInfoLineCollector.OR]
if not weight_db_entry.ref_allele == a1 or \
not weight_db_entry.eff_allele == a2 or \
OR == "NA":
logging.log(7, "%s in dosage is problematic (%s, %s)(%s, %s, %s)", rsid, weight_db_entry.ref_allele, weight_db_entry.eff_allele, a1, a2, OR)
finding = appropriateFinding(findings, rsid, chromosome, weight_db_entry)
finding.getGWASEntryData(dosage_entry)
continue
ok += 1
logging.log(8,"After processing dosage, %d snps were found to be ok", ok)
def processLegendName(self, chromosome, weight_db_logic, dosage, findings, legend_name):
class LegendCallback(object):
def __init__(self):
pass
def __call__(self, i, comps):
id = comps[ThousandGenomesUtilities.ILTF.ID]
id_components = id.split(':')
rsid = id_components[0]
weight_db_entry = weight_db_logic.anEntryWithRSID(rsid)
if not weight_db_entry:
logging.log(8, "rsid %s from legend not in db, %s", rsid, id)
return
type = comps[ThousandGenomesUtilities.ILTF.TYPE]
a0 = comps[ThousandGenomesUtilities.ILTF.A0]
a1 = comps[ThousandGenomesUtilities.ILTF.A1]
if rsid in findings:
finding = findings[rsid]
finding.getLegendData(type, a0, a1)
move_on = True
if not type == "Biallelic_SNP":
logging.log(8, "%s %s Not biallelic: %s", chromosome, id, type)
move_on = False
else:
if (a0 == 'T' and a1 == 'A') or \
(a0 == 'A' and a1 == 'T') or \
(a0 == 'C' and a1 == 'G') or \
(a0 == 'G' and a1 == 'C'):
logging.log(8, "%s %s Problematic: %s, %s", chromosome, id, a0, a1)
move_on = False
if not weight_db_entry.ref_allele == a0 or \
not weight_db_entry.eff_allele == a1:
logging.log(8, "%s %s Different alleles %s %s", chromosome, id, a0, a1)
move_on = False
if not move_on:
finding = appropriateFinding(findings, rsid, chromosome, weight_db_entry)
finding.getLegendData(type,a0,a1)
dosage_entry = None
if rsid in dosage.values_by_key:
dosage_entry = dosage.values_by_key[rsid]
finding.getGWASEntryData(dosage_entry)
callback = LegendCallback()
loader = ThousandGenomesUtilities.LEGENDLoader(self.data_folder_phase, legend_name)
loader.iterateOverFileLegends(callback)
def appropriateFinding(findings, rsid, chromosome, weight_db_entry):
finding = None
if rsid in
|
viniciusfk9/LearningFlask
|
votr/models.py
|
Python
|
gpl-3.0
| 3,878 | 0.001031 |
import uuid
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import select, func
from sqlalchemy.ext.hybrid import hybrid_property
db = SQLAlchemy()
# Base model that for other models to inherit from
class Base(db.Model):
__abstract__ = True
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
# Model to store user details
class Users(Base):
email = db.Column(db.String(100), unique=True)
username = db.Column(db.String(50), unique=True)
password = db.Column(db.String(300)) # incase password hash becomes too long
# Model for poll topics
class Topics(Base):
title = db.Column(db.String(500))
status = db.Column(db.Boolean, default=1) # to mark poll as open or closed
create_uid = db.Column(db.ForeignKey('users.id'))
close_date = db.Column(db.DateTime)
created_by = db.relationship('Users', foreign_keys=[create_uid],
backref=db.backref('user_polls',
lazy='dynamic'))
# user friendly way to display the object
def __repr__(self):
return self.title
# returns dictionary that can easily be jsonified
def to_json(self):
return {
'title': self.title,
'options': [{'name': option.option.name,
'vote_count': option.vote_count}
for option in self.options.all()],
'close_date': self.close_date,
'status': self.status,
'total_vote_count': self.total_vote_count
}
@hybrid_property
def total_vote_count(self, total=0):
for option in self.options.all():
total += option.vote_count
return total
@total_vote_count.expression
def total_vote_count(cls):
return select([func.sum(Polls.vote_count)]).where(
Polls.topic_id == cls.id)
# Model for poll options
class Options(Base):
""" Model for poll options """
name = db.Column(db.String(200), unique=True)
def __repr__(self):
return self.name
def to_json(self):
return {
'id': uuid.uuid4(), # Generates a random uuid
'name': self.name
}
# Polls model to connect topics and options together
class Polls(Base):
# Columns declaration
topic_id = db.Column(db.Integer, db.ForeignKey('topics.id'))
option_id = db.Column(db.Integer, db.ForeignKey('options.i
|
d'))
vote_count = db.Column(db.Integer, default=0)
# Relationship declaration (makes it easier for us to access t
|
he polls model
# from the other models it's related to)
topic = db.relationship('Topics', foreign_keys=[topic_id],
backref=db.backref('options', lazy='dynamic'))
option = db.relationship('Options', foreign_keys=[option_id])
def __repr__(self):
# a user friendly way to view our objects in the terminal
return self.option.name
class Users(Base):
""" Model to store user details """
email = db.Column(db.String(100), unique=True)
username = db.Column(db.String(50), unique=True)
password = db.Column(db.String(200))
def __repr__(self):
return self.username
class UserPolls(Base):
topic_id = db.Column(db.Integer, db.ForeignKey('topics.id'))
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
topics = db.relationship('Topics', foreign_keys=[topic_id],
backref=db.backref('voted_on_by', lazy='dynamic'))
users = db.relationship('Users', foreign_keys=[user_id],
backref=db.backref('voted_on', lazy='dynamic'))
|
ledatelescope/bifrost
|
test/test_gunpack.py
|
Python
|
bsd-3-clause
| 4,872 | 0.007594 |
# Copyright (c) 2016, The Bifrost Authors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Bifrost Authors nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import numpy as np
import bifrost as bf
import bifrost.unpack
class UnpackTest(unittest.TestCase):
def
|
run_unpack_to_ci8_test(self, iarray):
oarray = bf.ndarray(shape=iarray.shape, dtype='ci8', space='cuda')
oarray_known = bf.ndarray([[(0, 1), (2, 3)],
[(4, 5), (6, 7)],
[(-8, -7), (-6, -5)]],
dtype='ci8')
bf.unpack.unpack(iarray.copy(space='cuda'), oarray)
|
oarray = oarray.copy(space='system')
np.testing.assert_equal(oarray, oarray_known)
def test_ci4_to_ci8(self):
iarray = bf.ndarray([[(0x10,),(0x32,)],
[(0x54,),(0x76,)],
[(0x98,),(0xBA,)]],
dtype='ci4')
self.run_unpack_to_ci8_test(iarray)
def test_ci4_to_ci8_byteswap(self):
iarray = bf.ndarray([[(0x01,),(0x23,)],
[(0x45,),(0x67,)],
[(0x89,),(0xAB,)]],
dtype='ci4')
self.run_unpack_to_ci8_test(iarray.byteswap())
def test_ci4_to_ci8_conjugate(self):
iarray = bf.ndarray([[(0xF0,),(0xD2,)],
[(0xB4,),(0x96,)],
[(0x78,),(0x5A,)]],
dtype='ci4')
self.run_unpack_to_ci8_test(iarray.conj())
def test_ci4_to_ci8_byteswap_conjugate(self):
iarray = bf.ndarray([[(0x0F,),(0x2D,)],
[(0x4B,),(0x69,)],
[(0x87,),(0xA5,)]],
dtype='ci4')
self.run_unpack_to_ci8_test(iarray.byteswap().conj())
def run_unpack_to_cf32_test(self, iarray):
oarray = bf.ndarray(shape=iarray.shape, dtype='cf32', space='cuda')
oarray_known = bf.ndarray([[ 0+1j, 2+3j],
[ 4+5j, 6+7j],
[-8-7j, -6-5j]],
dtype='cf32')
bf.unpack.unpack(iarray.copy(space='cuda'), oarray)
oarray = oarray.copy(space='system')
np.testing.assert_equal(oarray, oarray_known)
def test_ci4_to_cf32(self):
iarray = bf.ndarray([[(0x10,),(0x32,)],
[(0x54,),(0x76,)],
[(0x98,),(0xBA,)]],
dtype='ci4')
self.run_unpack_to_cf32_test(iarray)
def test_ci4_to_cf32_byteswap(self):
iarray = bf.ndarray([[(0x01,),(0x23,)],
[(0x45,),(0x67,)],
[(0x89,),(0xAB,)]],
dtype='ci4')
self.run_unpack_to_cf32_test(iarray.byteswap())
def test_ci4_to_cf32_conjugate(self):
iarray = bf.ndarray([[(0xF0,),(0xD2,)],
[(0xB4,),(0x96,)],
[(0x78,),(0x5A,)]],
dtype='ci4')
self.run_unpack_to_cf32_test(iarray.conj())
def test_ci4_to_cf32_byteswap_conjugate(self):
iarray = bf.ndarray([[(0x0F,),(0x2D,)],
[(0x4B,),(0x69,)],
[(0x87,),(0xA5,)]],
dtype='ci4')
self.run_unpack_to_cf32_test(iarray.byteswap().conj())
|
RCOS-Grading-Server/HWserver
|
bin/make_generated_output.py
|
Python
|
bsd-3-clause
| 2,717 | 0.009201 |
#!/usr/bin/env python3
"""
# USAGE
# make_generated_output.py <path to config file for gradeable> <assignment> <semester> <course>
"""
import argparse
import json
import os
from submitty_utils import dateutils
import sys
SUBMITTY_DATA_DIR = "/var/local/submitty"
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("config_file_path")
parser.add_argument("assignment")
parser.add_argument("semester")
parser.add_argument("course")
return parser.parse_args()
def main():
args = parse_args()
complete_config_json_path = os.path.join(SUBMITTY_DATA_DIR,
'courses',
args.semester,
args.course,
'config',
'complete_config',
'complete_config_' + args.assignment + '.json')
if os.path.isfile(complete_config_json_path):
with open(complete_config_json_path,'r', encoding='utf-8') as infile:
config_file=json.load(infile)
|
else:
sys.exit(1)
required_capabilities = config_file.get('required_capabilities','default')
testcases = config_file.get('testcases',[])
graded_file = {
"semester": args.semester,
"course": args.course,
"gradeable": args.assignment,
"requ
|
ired_capabilities": required_capabilities,
"queue_time": dateutils.write_submitty_date(milliseconds=True),
"generate_output": True,
"max_possible_grading_time" : -1,
"who" : "build",
"regrade" : False,
}
should_generated_output = False
for testcase in testcases:
input_generation_commands = testcase.get('input_generation_commands',[])
solution_containers = testcase.get('solution_containers',[])
should_generate_solution = False
for solution_container in solution_containers:
if len(solution_container["commands"]) != 0 :
should_generate_solution = True
break
if should_generate_solution and not input_generation_commands:
path_grading_file = os.path.join(SUBMITTY_DATA_DIR, "to_be_graded_queue", "__".join([args.semester, args.course, args.assignment]))
if os.path.isfile(path_grading_file):
os.remove(path_grading_file)
with open(path_grading_file, 'w') as grading_file:
json.dump(graded_file, grading_file,sort_keys=True,indent=4)
print("Starting to build generated output")
break
if __name__ == "__main__":
main()
|
MassimoLauria/cnfgen
|
cnfgen/formula/linear.py
|
Python
|
gpl-3.0
| 7,042 | 0.000142 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""CNF formulas type with support of linear forms
This CNF formula type supports
- linear equations mod 2
- integer linear inequalities on literals (no coefficients)
for example 'atmost k'
Copyright (C) 2021 Massimo Lauria <lauria.massimo@gmail.com>
https://github.com/MassimoLauria/cnfgen.git
"""
from functools import reduce
from operator import mul
from itertools import combinations
from itertools import product
from inspect import isgenerator
from cnfgen.formula.basecnf import BaseCNF
class CNFLinear(BaseCNF):
"""CNF with linear constraints"""
def __init__(self, clauses=None, description=None):
BaseCNF.__init__(self, clauses=clauses, description=description)
###
# Various utility function for CNFs
###
def add_parity(self, lits, constant, check=True):
"""Adds the CNF encoding of a parity constraint
E.g. X1 + X2 + X3 = 1 (mod 2) is encoded as
( X1 v X2 v X3)
(~X1 v ~X2 v X3)
(~X1 v X2 v ~X3)
( X1 v ~X2 v ~X3)
Parameters
----------
variables : array-like
literals
constant : {0,1}
the constant of the linear equation
check : bool
check that the literals are valid and update the variable count
Returns
-------
None
Examples
--------
>>> C=CNFLinear()
>>> C.add_parity([-1,2],1)
>>> list(C)
[[-1, 2], [1, -2]]
>>> C=CNFLinear()
>>> C.add_parity([-1,2],0)
>>> list(C)
[[-1, -2], [1, 2]]
"""
if isgenerator(lits):
lits = list(lits)
if check:
self._check_and_update(lits)
desired_sign = 1 if constant == 1 else -1
for signs in product([1, -1], repeat=len(lits)):
# Save only the clauses with the right polarity
parity = reduce(mul, signs, 1)
if parity == desired_sign:
self.add_clause([lit*sign for lit, sign in zip(lits, signs)],
check=False)
def add_linear(self, lits, op, constant, check=True):
"""Add a linear constraint to the formula
Encodes an equality or an inequality constraint on literals (no
coeffcients) as clauses.
Parameters
----------
lits : array-like
literals
op: str
one among '<=', ">=", '<', '>', '==', '!='
constant : integer
the constant of the linear equation
check : bool
check that the literals are valid and update the variable count
Returns
-------
None
Examples
--------
>>> c = CNFLinear()
>>> c.add_linear([-1,2,-3],'>=',1)
>>> list(c)
[[-1, 2, -3]]
>>> c = CNFLinear()
>>> c.add_linear([-1,2,-3],'>=',3)
>>> list(c)
[[-1], [2], [-3]]
>>> c = CNFLinear()
>>> c.add_linear([-1,2,-3],'<',2)
>>> list(c)
[[1, -2], [1, 3], [-2, 3]]
>>> c = CNFLinear()
>>> c.add_linear([1,2,3],'<=',-1)
>>> list(c)
[[]]
>>> c = CNFLinear()
>>> c.add_linear([1,2,3],'<=',10)
>>> list(c)
[]
"""
operators = ['<=', ">=", '<', '>', '==', '!=']
if op not in operators:
raise ValueError('Invalid operator, only {} allowed'.
format(", ".join(operators)))
if isgenerator(lits):
lits = list(lits)
if check:
self._check_and_update(lits)
# We fist manage the case of !=
if op == "!=":
n = len(lits)
if constant < 0 or constant > n:
return
for flips in combinations(range(n), constant):
for i in flips:
lits[i] *= -1
self.add_clause(lits, check=False)
for i in flips:
lits[i] *= -1
return
if isgenerator(lits) and op != '<=':
lits = list(lits)
# We reduce to the case of >=
if op == "==":
self.add_linear(lits, '<=', constant, check=False)
self.add_linear(lits, '>=', constant, check=False)
return
elif op == "<":
self.add_linear(lits, '<=', constant-1, check=False)
return
elif op == ">":
self.add_linear(lits, '>=', constant+1, check=False)
return
elif op == "<=":
negated = [-lit for lit in lits]
self.add_linear(negated, '>=', len(negated) - constant, check=False)
return
# Tautologies and invalid inequalities
if constant <= 0:
return
if constant > len(lits):
self.add_clause([], check=False)
return
k = len(lits) - constant + 1
for clause in combinations(lits, k):
self.add_clause(clause, check=False)
def add_loose_majority(self, lits, check=True):
"""Clauses encoding a \"at least half\" constraint
Parameters
|
----------
lists : iterable(int)
literals in the constraint
check : bool
check that the literals are valid and update the variable count
"""
if isgenerator(lits):
lits = list(lits)
threshold = ((len(lits) + 1) // 2)
return self.add_linear(lits, '>=', threshold, check=check)
def add_loose_minority(self,
|
lits, check=True):
"""Clauses encoding a \"at most half\" constraint
Parameters
----------
lists : iterable(int)
literals in the constraint
check : bool
check that the literals are valid and update the variable count
"""
if isgenerator(lits):
lits = list(lits)
threshold = len(lits) // 2
return self.add_linear(lits, '<=', threshold, check=check)
def add_strict_majority(self, lits, check=True):
"""Clauses encoding a "strict majority" constraint
Parameters
----------
lists : iterable(int)
literals in the constraint
check : bool
check that the literals are valid and update the variable count
"""
if isgenerator(lits):
lits = list(lits)
threshold = len(lits)//2 + 1
return self.add_linear(lits, '>=', threshold, check=check)
def add_strict_minority(self, lits, check=True):
"""Clauses encoding a \"at most half\" constraint
Parameters
----------
lists : iterable(int)
literals in the constraint
check : bool
check that the literals are valid and update the variable count
"""
if isgenerator(lits):
lits = list(lits)
threshold = (len(lits) - 1) // 2
return self.add_linear(lits, '<=', threshold, check=check)
|
michaelcontento/whirlwind
|
whirlwind/view/paginator.py
|
Python
|
mit
| 1,921 | 0.008329 |
class Paginator(object):
def __init__(self, collection, page_number=0, limit=20, total=-1):
self.collection = collection
|
self.page_number = int(page_number)
self.limit = int(limit)
self.total = int(total)
@property
|
def page(self):
start = self.page_number * self.limit
end = start + self.limit
try:
return self.collection[start:end]
except Exception as detail:
print detail
return []
@property
def current_page(self):
return self.page_number + 1
@property
def page_count(self):
if self.total != -1:
pages = abs(self.total / self.limit)+1
return pages
else:
return None
@property
def has_previous(self):
return True if (self.page_number > 0) else False
@property
def has_next(self):
return True if (len(self.page) == self.limit) else False
@property
def previous_page(self):
if self.has_previous:
return self.page_number-1
@property
def next_page(self):
if self.has_next:
return self.page_number+1
def previous_page_link(self, request):
return self.__build_url(self.previous_page, request.full_url())
def next_page_link(self, request):
return self.__build_url(self.next_page, request.full_url())
def __build_url(self, page_num, url):
import re
#check if there is a query string
if url.find('?') != -1:
if re.search(r'page=\d',url) != None:
page_str = "&page=%d" % page_num
return re.sub(r'&page=\d+', page_str, url)
else:
return "%s&page=%d" % (url, page_num)
else:
return "%s?page=%d" % (url, page_num)
|
iand/pynappl
|
old/sparql_select_result.py
|
Python
|
gpl-2.0
| 1,296 | 0.026235 |
"""sparql_select_result.py
Data structure for storing the results of SPARQL SELECT queries"""
__all__ = ["SPARQLSelectResult"]
from xml.etree
|
import ElementTree as et
|
class SPARQLSelectResult(object):
def __init__(self):
self.variables = []
self.results = []
def parse(self, s):
tree = et.fromstring(s)
head = tree.find("{http://www.w3.org/2005/sparql-results#}head")
self.variables = [x.get("name") for x in head.findall("{http://www.w3.org/2005/sparql-results#}variable")]
results = tree.find("{http://www.w3.org/2005/sparql-results#}results").findall("{http://www.w3.org/2005/sparql-results#}result")
self.results = []
for result in results:
d = {}
bindings = result.findall("{http://www.w3.org/2005/sparql-results#}binding")
for binding in bindings:
uri = binding.find("{http://www.w3.org/2005/sparql-results#}uri")
if uri is None:
literal = binding.find("{http://www.w3.org/2005/sparql-results#}literal")
if literal is None:
raise InvalidSPARQLSelectResultSyntax("Neither URI or Literal were found")
else:
d[binding.get("name")] = (literal.text, None, None)
else:
d[binding.get("name")] = uri.text
self.results.append(d)
def get_variables(self):
return self.variables
def get_results(self):
return self.results
|
GRUPO-ES2-GJLRT/XADREZ_ES2
|
src/cython/importer.py
|
Python
|
mit
| 665 | 0 |
import platform
import importlib
from utils.pyx_replace import read_file
name = 'cython._genpyx_' + '_'.join(platform.architecture()) + '_chess0x88'
try:
chess = importlib.import_module(name)
checksum = chess.CHECKSUM
expected_checksum = read_file("chess0x88.py", [], "cython")
if checksum != expected_checksum:
print("Invalid checksum.")
raise Exception('checksum')
Board = chess.Board
Move = chess.Move
move_key = chess.move_key
except Exception as e:
print("Python Fallback. You m
|
ay want to run on cython folder:")
print("python setup.py build_ext --inplace")
from chess0x88 import Board, Move, mov
|
e_key
|
MrReN/django-oscar
|
oscar/templatetags/history_tags.py
|
Python
|
bsd-3-clause
| 1,884 | 0.000531 |
import six
from django import template
from oscar.core.loading import get_model
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import resolve, Resolver404
from oscar.apps.customer import history
from oscar.core.compat import urlparse
Site = get_model('sites', 'Site')
register = template.Library()
@register.inclusion_tag('customer/history/recently_viewed_products.html',
takes_context=True)
def recently_viewed_products(context):
"""
Inclusion tag listing the most recently viewed products
"""
request = context['request']
products = history.get(request)
return {'products': products,
'request': request}
@
|
register.assignment_tag(takes_context=True)
def get_back_button(co
|
ntext):
"""
Show back button, custom title available for different urls, for
example 'Back to search results', no back button if user came from other
site
"""
request = context.get('request', None)
if not request:
raise Exception('Cannot get request from context')
referrer = request.META.get('HTTP_REFERER', None)
if not referrer:
return None
try:
url = urlparse.urlparse(referrer)
except:
return None
if request.get_host() != url.netloc:
try:
Site.objects.get(domain=url.netloc)
except Site.DoesNotExist:
# Came from somewhere else, don't show back button:
return None
try:
match = resolve(url.path)
except Resolver404:
return None
# This dict can be extended to link back to other browsing pages
titles = {
'search:search': _('Back to search results'),
}
title = titles.get(match.view_name, None)
if title is None:
return None
return {'url': referrer, 'title': six.text_type(title), 'match': match}
|
plin1112/pysimm
|
tests/test.py
|
Python
|
mit
| 1,809 | 0.012714 |
from __future__ import print_function
import os
import sys
import imp
import shutil
from glob import glob
from pprint import pprint
from cStringIO import StringIO
class Capturing(list):
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = StringIO()
return self
def __exit__(self, *args):
self.extend(self._stringio.getvalue().splitlines())
del self._stringio # free up some memory
sys.stdout = self._stdout
if os.path.isdir('tmp'):
shutil.rmtree('tmp')
os.mkdir('tmp')
else:
os.mkdir('tmp')
os.chdir('tmp')
print('testing {:.<64}'.format('imports'), end='')
import pysimm
from pysimm import cli
from pysimm import amber
from pysimm import utils
from pysimm import calc
from pysimm import system
from pysimm import lmps
from pysimm import forcefield
from pysimm import apps
from pysimm import models
from pysimm.forcefield import *
from pysimm.apps import *
from pysimm.models.monomers.dreiding import *
from pysimm.models.monomers.gaff import *
from pysimm
|
.models.monomers.gaff2 import *
print('passed')
example_dir = os.path.join(o
|
s.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir, 'Examples')
examples = glob(os.path.join(example_dir, '*'))
scripts = []
for example in examples:
scripts += glob(os.path.join(example, '*.py'))
scripts += glob(os.path.join(example, '*', '*.py'))
output = []
for script in sorted(scripts, key=lambda x: x.split('Examples/')[1]):
print('testing Examples/{:.<55}'.format(script.split('Examples/')[1]), end='')
foo = imp.load_source('create', script)
try:
with Capturing(output) as output:
foo.run(test=True)
print('passed')
except:
print('FAILED')
os.chdir('../')
shutil.rmtree('tmp')
|
M3TIOR/Tactix
|
Tactix.py
|
Python
|
mit
| 2,700 | 0.047778 |
#!/usr/bin/python
#This is the old version of Tactix befor I decided to make the C version.
#I hope you all like it.
#I wanted to include it to make sure even if you only have a monochrome display you can still play!
# * HORRAY FOR GAMERS!
#I may very well have modded this a bit too, just so it feels more like the full version.
import os #needed for screen clear call.
#I had to grab this off of stack overflow...
#WHAT?! I NEVER SAID I WAS THE BEST PROGRAMMER ON THE PLANNET!
def getTerminalSize():
import os
env = os.environ
def ioctl_GWINSZ(fd):
try:
import fcntl, termios, struct, os
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,'1234'))
except:
return
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (env.get('LINES', 25), env.get('COLUMNS', 80))
### Use get(key[, default]) instead of a try/catch
#try:
# cr = (env['LINES'], env['COLUMNS'])
#except:
# cr = (25, 80)
return int(cr[1]), int(cr[0])
###MY_CODE \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
#initiate our character size data so we can calculate how large our screen can be.
#this kindof sucks because doing screen size calculations this way means the user can't
#resize mid gameplay without screwing it up...
class window:
edge='+'
line_x='-'
line_y='|'
def __init__(self,width,height):
self.width=width
self.height=height
self.data=[None]*(width*height)
#content padding (by default it's one because we don't want to overwrite our border.)
self.c=1
#fill border.
for(l=0;self.data[l];l+=1):
if(l==0 || l==width || l==(height*width) || l==((height*width)-(width+1))):
self.data[l]=self.edge
#keep working here...
def draw(xoff,yoff,width,height,data):
exit=width*height
for(l=0;data[l]||l>exit;l=+1):
self.
|
data[((c+xoff+l)*(c+yoff))] = data[l]
#outputs everything to screen.
def flush():
#... stuff...
global character_field
character_field = getTerminalSize()
#text orientation.
#def t_orient(text,x,y,
|
maxx,maxy):
#quadrants work as such
#
# +-----------------+
# | | | |
# | 0,0 | 1,0 | 2,0 |
# +-----------------+
# | | | |
# | 0,1 | 1,1 | 2,1 |
# +-----------------+
# | | | |
# | 0,2 | 1,2 | 2,2 |
# +-----------------+
class game:
main_menu = window(character_field[0],character_field[1]);
def __init__(self):
main_menu
#debug section.
print(game.main_menu.width)
print(game.main_menu.height)
#eof, cause Imma stup.
|
iModels/ffci
|
moleculeci/apps.py
|
Python
|
mit
| 215 | 0 |
from importlib import import_mod
|
ule
from django.apps import AppConfig as BaseAppConfig
class AppConfig(BaseAppConfig):
name = "moleculeci"
def ready(self):
import_module("moleculeci.rec
|
eivers")
|
ngageoint/gamification-server
|
gamification/core/models.py
|
Python
|
mit
| 7,022 | 0.003987 |
# -*- coding: utf-8 -*-
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, as long as
# any reuse or further development of the software attributes the
# National Geospatial-Intelligence Agency (NGA) authorship as follows:
# 'This software (gamification-server)
# is provided to the public as a courtesy of the Nationa
|
l
# Geospatial-Intelligence Agency.
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import json
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.utils.datastructures import SortedDict
from django.db.models.signals import post_save
from django.db import models
from gamification.badges.models import ProjectBadge, ProjectBadgeToUser
from jsonfield import JSONField
from mptt.models import MPTTModel, TreeForeignKey
TRUE_FALSE = [(0, 'False'), (1, 'True')]
class ProjectBase(models.Model):
"""
A generic model for GeoQ objects.
"""
active = models.BooleanField(default=True, help_text='If checked, this project will be listed in the active list.')
created_at = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=200, help_text='Name of the project.')
description = models.TextField(help_text='Details of this project that will be listed on the viewing page.')
updated_at = models.DateTimeField(auto_now=True)
url = models.TextField(help_text='Project Information URL', null=True)
def __unicode__(self):
return self.name
class Meta:
abstract = True
ordering = ('-created_at',)
class Team(MPTTModel):
name = models.CharField(max_length=50)
description = models.TextField(null=True, blank=True)
members = models.ManyToManyField(User, null=True, blank=True)
parent = TreeForeignKey('self', null=True, blank=True, related_name='children')
order = models.IntegerField(default=0, null=True, blank=True, help_text='Optionally specify the order teams should appear. Lower numbers appear sooner. By default, teams appear in the order they were created.')
date_created = models.DateTimeField(auto_now_add=True)
background_color = models.CharField(max_length=50, null=True, blank=True, help_text='Optional - Color to use for background of all team badges')
icon = models.ImageField(upload_to='badge_images', null=True, blank=True, help_text='Optional - Image to show next to team names')
def __str__(self):
return "%s (%s)" % (self.name, str(len(self.members.all())))
def get_all_users(self, include_self=True):
u = []
if include_self:
u += self.members.all()
for team in self.get_descendants():
u += team.members.all()
return u
class Meta:
ordering = ['-order', '-date_created', 'id']
class MPTTMeta:
order_insertion_by = ['name']
class Project(ProjectBase):
"""
Top-level organizational object.
"""
THEMES = (
("", "None"),
("camping", "Camping"),
("camping2", "Camping Theme 2"),
("map", "Geospatial"),
)
private = models.BooleanField(default=False, help_text='If checked, hide this project from the list of projects and public badge APIs.')
supervisors = models.ManyToManyField(User, blank=True, null=True, related_name="supervisors", help_text='Anyone other than site administrators that can add badges and update the site')
teams = models.ManyToManyField(Team, blank=True, null=True)
viewing_pass_phrase = models.CharField(max_length=200, null=True, blank=True, help_text='Phrase that must be entered to view this page.')
project_closing_date = models.DateTimeField(null=True, blank=True, help_text='Date that project "closes" with countdown shown on project page. Badges can still be added after this.')
visual_theme = models.CharField(max_length=20, default="none", choices=THEMES, help_text='Visual Theme used to style the project page')
background_image = models.ImageField(upload_to='badge_images', null=True, blank=True, help_text='Optional - Override theme background with this image')
properties = JSONField(null=True, blank=True, help_text='JSON key/value pairs associated with this object, e.g. {"badges_mode":"blue"}')
query_token = models.CharField(max_length=200, null=True, blank=True, help_text='Token that must be entered by any server requesting data - not implemented yet.')
allowed_api_hosts = models.TextField(null=True, blank=True, help_text='Comma-separated list of hosts (IPs or Hostnames) that can access this project via data requests - not implemented yet')
@property
def user_count(self):
return User.objects.filter(projectbadgetouser__projectbadge__project=self).distinct().count()
@property
def badge_count(self):
return ProjectBadgeToUser.objects.filter(projectbadge__project=self).count()
def get_absolute_url(self):
return reverse('project-list', args=[self.name])
class Points(models.Model):
user = models.ForeignKey(User)
projectbadge = models.ForeignKey(ProjectBadge)
value = models.IntegerField(default=0)
date_awarded = models.DateTimeField('date awarded',auto_now=True)
description = models.CharField(max_length=200)
def get_absolute_url(self):
return reverse('points-list', args=[self.id])
class Meta:
verbose_name_plural = "Points"
class UserProfile(models.Model):
""" from http://stackoverflow.com/questions/44109/extending-the-user-model-with-custom-fields-in-django; this is one mechanism for adding extra details (currently score for badges) to the User model """
defaultScore = 1
user = models.OneToOneField(User)
score = models.IntegerField(default=defaultScore)
def __str__(self):
return "%s's profile" % self.user
def create_user_profile(sender, instance, created, **kwargs):
if created:
profile, created = UserProfile.objects.get_or_create(user=instance)
post_save.connect(create_user_profile, sender=User)
import sys
if not 'syncdb' in sys.argv[1:2] and not 'migrate' in sys.argv[1:2]:
from meta_badges import *
|
Talkdesk/graphite-web
|
webapp/graphite/browser/views.py
|
Python
|
apache-2.0
| 7,137 | 0.026482 |
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import re
from django.shortcuts import render_to_response
from django.http import HttpResponse
from django.conf import settings
from graphite.account.models import Profile
from graphite.util import getProfile, getProfileByUsername, defaultUser, json
from graphite.logger import log
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import cPickle as pickle
except ImportError:
import pickle
def header(request):
"View for the header frame of the browser UI"
context = {}
context['user'] = request.user
context['profile'] = getProfile(request)
context['documentation_url'] = settings.DOCUMENTATION_URL
context['login_url'] = settings.LOGIN_URL
return render_to_response("browserHeader.html", context)
def browser(request):
"View for the top-level frame of the browser UI"
context = {
'queryString' : request.GET.urlencode(),
'target' : request.GET.get('target')
}
if context['queryString']:
context['queryString'] = context['queryString'].replace('#','%23')
if context['target']:
context['target'] = context['target'].replace('#','%23') #js libs terminate a querystring on #
return render_to_response("browser.html", context)
def search(request):
query = request.POST.get('query')
if not query:
return HttpResponse("")
patterns = query.split()
regexes = [re.compile(p,re.I) for p in patterns]
def matches(s):
for regex in regexes:
if regex.search(s):
return True
return False
results = []
index_file = open(settings.INDEX_FILE)
for line in index_file:
if matches(line):
results.append( line.strip() )
if len(results) >= 100:
break
index_file.close()
result_string = ','.join(results)
return HttpResponse(result_string, mimetype='text/plain')
def myGraphLookup(request):
"View for My Graphs navigation"
profile = getProfile(request,allowDefault=False)
assert profile
nodes = []
leafNode = {
'allowChildren' : 0,
'expandable' : 0,
'leaf' : 1,
}
branchNode = {
'allowChildren' : 1,
'expandable' : 1,
'leaf' : 0,
}
try:
path = str( request.GET['path'] )
if path:
if path.endswith('.'):
userpath_prefix = path
else:
userpath_prefix = path + '.'
else:
userpath_prefix = ""
matches = [ graph for graph in profile.mygraph_set.all().order_by('name') if graph.name.startswith(userpath_prefix) ]
log.info( "myGraphLookup: username=%s, path=%s, userpath_prefix=%s, %ld graph to process" % (profile.user.username, path, userpath_prefix, len(matches)) )
branch_inserted = set()
leaf_inserted = set()
for graph in matches: #Now let's add the matching graph
isBranch = False
dotPos = graph.name.find( '.', len(userpath_prefix) )
if dotPos >= 0:
isBranch = True
name = graph.name[ len(userpath_prefix) : dotPos ]
if name in branch_inserted: continue
branch_inserted.add(name)
else:
name = graph.name[ len(userpath_prefix): ]
if name in leaf_inserted: continue
leaf_inserted.add(name)
node = {'text' : str(name) }
if isBranch:
node.update( { 'id' : str(userpath_prefix + name + '.') } )
node.update(branchNode)
else:
m = md5()
m.update(name)
node.update( { 'id' : str(userpath_prefix + m.hexdigest()), 'graphUrl' : str(graph.url) } )
node.update(leafNode)
nodes.append(node)
except:
log.exception("browser.views.myGraphLookup(): could not complete request.")
if not nodes:
no_graphs = { 'text' : "No saved graphs", 'id' : 'no-click' }
no_graphs.update(leafNode)
nodes.append(no_graphs)
return json_response(nodes, request)
def userGraphLookup(request):
"View for User Graphs navigation"
user = request.GET.get('user')
path = request.GET['path']
if user:
username = user
graphPath = path[len(username)+1:]
elif '.' in path:
username, graphPath = path.split('.', 1)
else:
username, graphPath = path, None
nodes = []
branchNode = {
'allowChildren' : 1,
'expandable' : 1,
'leaf' : 0,
}
leafNode = {
'allowChildren' : 0,
'expandable' : 0,
'leaf' : 1,
}
try:
if not username:
profiles = Profile.objects.exclude(user=defaultUser)
for profile in profiles:
if profile.mygraph_set.count():
node = {
'text' : str(profile.user.username),
'id' : str(profile.user.username)
}
node.update(branchNode)
nodes.append(node)
else:
profile = getProfileByUsername(username)
assert profile, "No profile for username '%s'" % username
if graphPath:
prefix = graphPath.rstrip('.') + '.'
else:
prefix = ''
matches = [ graph for graph in profile.mygraph_set.all().order_by('name') if graph.name.startswith(prefix) ]
inserted = set()
for graph in matches:
relativePath = graph.name[ len(prefix): ]
nodeName = relativePath.split('.')[0]
if nodeName in inserted:
continue
inserted.add(nodeName)
if '.' in relativePath: # branch
node = {
'text' : str(nodeName),
'id' : str(username + '.' + prefix + nodeName + '.'),
}
node.update(branchNode)
else: # leaf
m = md5()
m.update(nodeName)
node = {
'text' : str(nodeName ),
'id' : str(username + '.' + prefix + m.hexdigest()),
'graphUrl' : str(graph.url),
}
node.update(leafNode)
nodes.append(node)
except:
log.exception("browser.views.userLookup(): could not complete request for %s" % username)
|
if not nodes:
no_graphs = { 'text' : "No saved graphs", 'id' : 'no
|
-click' }
no_graphs.update(leafNode)
nodes.append(no_graphs)
return json_response(nodes, request)
def json_response(nodes, request=None):
if request:
jsonp = request.REQUEST.get('jsonp', False)
else:
jsonp = False
#json = str(nodes) #poor man's json encoder for simple types
json_data = json.dumps(nodes)
if jsonp:
response = HttpResponse("%s(%s)" % (jsonp, json_data),mimetype="text/javascript")
else:
response = HttpResponse(json_data,mimetype="application/json")
response['Pragma'] = 'no-cache'
response['Cache-Control'] = 'no-cache'
return response
def any(iterable): #python2.4 compatibility
for i in iterable:
if i:
return True
return False
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/doc/patch.py
|
Python
|
gpl-3.0
| 34,815 | 0.014074 |
#!/usr/bin/env python
""" Patch utility to apply unified diffs
Brute-force line-by-line non-recursive parsing
Copyright (c) 2008-2012 anatoly techtonik
Available under the terms of MIT license
Project home: http://code.google.com/p/python-patch/
$Id: patch.py 181 2012-11-23 16:03:05Z techtonik $
$HeadURL: https://python-patch.googlecode.com/svn/trunk/patch.py $
This program needs further tweaking for how we use it at Galaxy.
"""
__author__ = "anatoly techtonik <techtonik@gmail.com>"
__version__ = "1.12.11"
import copy
import logging
import re
# cStringIO doesn't support unicode in 2.5
from StringIO import StringIO
import urllib2
from os.path import exists, isfile, abspath
import os
import shutil
#------------------------------------------------
# Logging is controlled by logger named after the
# module name (e.g. 'patch' for patch.py module)
debugmode = False
logger = logging.getLogger(__name__)
debug = logger.debug
info = logger.info
warning = logger.warning
class NullHandler(logging.Handler):
""" Copied from Python 2.7 to avoid getting
`No handlers could be found for logger "patch"`
http://bugs.python.org/issue16539
"""
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
logger.addHandler(NullHandler())
#------------------------------------------------
# Constants for Patch/PatchSet types
DIFF = PLAIN = "plain"
GIT = "git"
HG = MERCURIAL = "mercurial"
SVN = SUBVERSION = "svn"
# mixed type is only actual when PatchSet contains
# Patches of different type
MIXED = MIXED = "mixed"
#------------------------------------------------
# Helpers (these could come with Python stdlib)
# x...() function are used to work with paths in
# cross-platform manner - all paths use forward
# slashes even on Windows.
def xisabs(filename):
""" Cross-platform version of `os.path.isabs()`
Returns True if `filename` is absolute on
Linux, OS X or Windows.
"""
if filename.startswith('/'): # Linux/Unix
return True
elif filename.startswith('\\'): # Windows
return True
elif re.match(r'\w:[\\/]', filename): # Windows
return True
return False
def xnormpath(path):
""" Cross-platform version of os.path.normpath """
return os.path.normpath(path).replace(os.sep, '/')
def xstrip(filename):
""" Make relative path out of absolute by stripping
prefixes used on Linux, OS X and Windows.
This function is critical for security.
"""
while xisabs(filename):
# strip windows drive with all slashes
if re.match(r'\w:[\\/]', filename):
filename = re.sub(r'^\w+:[\\/]+', '', filename)
# strip all slashes
elif re.match(r'[\\/]', filename):
filename = re.sub(r'^[\\/]+', '', filename)
return filename
#-----------------------------------------------
# Main API functions
def fromfile(filename):
""" Parse patch file. If successful, returns
PatchSet() object. Otherwise returns False.
"""
patchset = PatchSet()
debug("reading %s" % filename)
fp = open(filename, "rb")
res = patchset.parse(fp)
fp.close()
if res == True:
return patchset
return False
def fromstring(s):
""" Parse text string and return PatchSet()
object (or False if parsing fails)
"""
ps = PatchSet( StringIO(s) )
if ps.errors == 0:
return ps
return False
def fromurl(url):
""" Parse patch from an URL, return False
if an error occured. Note that this also
can throw urlopen() exceptions.
"""
ps = PatchSet( urllib2.urlopen(url) )
if ps.errors == 0:
return ps
return False
# --- Utility functions ---
# [ ] reuse more universal pathsplit()
def pathstrip(path, n):
""" Strip n leading components from the given path """
pathlist = [path]
while os.path.dirname(pathlist[0]) != '':
pathlist[0:1] = os.path.split(pathlist[0])
return '/'.join(pathlist[n:])
# --- /Utility function ---
class Hunk(object):
""" Parsed hunk data container (hunk starts with @@ -R +R @@) """
def __init__(self):
self.startsrc=None #: line count starts with 1
self.linessrc=None
self.starttgt=None
self.linestgt=None
self.invalid=False
self.hasplus=False # True if any "+" lines in hunk
self.hasminus=False # True if any "-" lines in hunk
self.text=[]
def originalText(self):
return("@@ -" + str(self.startsrc) +
"," + str(self.linessrc) +
" +" + str(self.starttgt) +
"," + str(self.linestgt) +
"\n" +
self.printableText())
def printableText(self):
"""Reformat text into printable text"""
# yeah, there must be a better way to do this.
printable = ""
for line in self.text:
printable += line
return printable
# def apply(self, estream):
# """ write hunk data into enumerable stream
# return strings one by one until hunk is
# over
#
# enumerable stream are tuples (lineno, line)
# where lineno starts with 0
# """
# pass
class Patch(object):
""" Patch for a single file """
def __init__(self):
self.source = None
self.target = None
self.hunks = []
self.hunkends = []
self.header = []
self.type = None
class PatchSet(object):
def __init__(self, stream=None):
# --- API accessible fields ---
# name of the PatchSet (filename or ...)
self.name = None
# patch set type - one of constants
self.type = None
# list of Patch objects
self.items = []
self.errors = 0 # fatal parsing errors
self.warnings = 0 # non-critical warnings
# --- /API ---
if stream:
self.parse(stream)
def __len__(self):
return len(self.items)
def parse(self, stream):
""" parse unified diff
return True on success
"""
lineends = dict(lf=0, crlf=0, cr=0)
nexthunkno = 0 #: even if index starts with 0 user messages number hunks from 1
p = None
hunk = None
# hunkactual variable is used to calculate hunk lines for comparison
hunkactual = dict(linessrc=None, linestgt=None)
class wrapumerate(enumerate):
"""Enumerate wrapper that uses boolean end of stream status instead of
StopIteration exception, and properties to access line information.
"""
def __init__(self, *args, **kwargs):
# we don't call parent, it is magically created by __new__ method
self._exhausted = False
self._lineno = False # after end of stream equal to the num of lines
self._line = False # will be reset to False after end of stream
def next(self):
"""Try to read the next line and return True if it is available,
False if end of stream is reached."""
if self._exhausted:
return False
try:
self._lineno, self._line = super(wrapumerate, self).next()
except StopIteration:
self._exhausted = True
self._line = False
return False
return True
@property
def is_empty(self):
return self._exhausted
@property
def line(self):
return self._line
@property
def lineno(self):
return self._lineno
# define states (possible file regions) that direct parse flow
headscan = True # start with scanning header
filenames = False # lines starting with --- and +++
hunkhead = False # @@ -R +R @@ sequence
hunkbody = False #
hunkskip = False # skipping invalid hunk mode
hunkparsed = False # state after successfully parsed hunk
# regexp to match start of hunk, used groups - 1,3,4,6
re_hunk_start = re.compile("^@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))?")
self.errors = 0
# te
|
mp buffers for header an
|
d filenames info
header = []
srcname = None
tgtname = None
# start of main cycle
# each parsing block already has line available in fe.line
fe = wrapumerate(stream)
while fe.next():
# -- deciders: these only switch state to decide who should process
# -- line fetched at the start of this cycle
if hunk
|
mitodl/micromasters
|
dashboard/api_edx_cache.py
|
Python
|
bsd-3-clause
| 15,571 | 0.002954 |
"""
APIs that deal with the edx cached data
"""
import datetime
import logging
from collections import namedtuple
from django.db import transaction
from requests.exceptions import HTTPError
from edx_api.client import EdxApi
from backends import utils
from backends.constants import COURSEWARE_BACKEND_URL, BACKEND_EDX_ORG, BACKEND_MITX_ONLINE
from backends.exceptions import InvalidCredentialStored
from backends.utils import has_social_auth
from courses.models import CourseRun
from dashboard import models
from micromasters.utils import now_in_utc
from profiles.api import get_social_username, get_social_auth
from search import tasks
log = logging.getLogger(__name__)
UserCachedRunData = namedtuple(
'UserCachedRunData', ['edx_course_key', 'enrollment', 'certificate', 'current_grade'])
class CachedEdxUserData:
"""Represents all edX data related to a User"""
# pylint: disable=too-many-instance-attributes
def __init__(self, user, program=None):
"""
Fetches the given User's edx data and sets object properties
Args:
user (User): a User object
program (Program): an optional Program to filter on
"""
self.user = user
self.program = program
self.enrollments = models.CachedEnrollment.get_edx_data(self.user, program=self.program)
self.certificates = models.CachedCertificate.get_edx_data(self.user, program=self.program)
self.current_grades = models.CachedCurrentGrade.get_edx_data(self.user, program=self.program)
def get_run_data(self, course_id):
"""
Returns cached data for the user in a specific course run
Args:
course_id (str): a string representing the edx course key for a course run
Returns:
UserCachedRunData: a namedtuple containing the cached data for the user in the course run
"""
return UserCachedRunData(
edx_course_key=course_id,
enrollment=self.enrollments.get_enrollment_for_course(course_id),
certificate=self.certificates.get_cert(course_id),
current_grade=self.current_grades.get_current_grade(course_id),
)
class CachedEdxDataApi:
"""
Class to handle the retrieval and update of the users' cached edX information
"""
ENROLLMENT = 'enrollment'
CERTIFICATE = 'certificate'
CURRENT_GRADE = 'current_grade'
ENROLLMENT_MITXONLINE = 'enrollment_mitxonline'
CURRENT_GRADE_MITXONLINE = 'current_grade_mitxonline'
# the sorting of the supported caches matters for refresh
EDX_SUPPORTED_CACHES = (ENROLLMENT, CERTIFICATE, CURRENT_GRADE,)
MITXONLINE_SUPPORTED_CACHES = (ENROLLMENT_MITXONLINE, CURRENT_GRADE_MITXONLINE,)
ALL_CACHE_TYPES = EDX_SUPPORTED_CACHES + MITXONLINE_SUPPORTED_CACHES
CACHE_TYPES_BACKEND = {
BACKEND_EDX_ORG: EDX_SUPPORTED_CACHES,
BACKEND_MITX_ONLINE: MITXONLINE_SUPPORTED_CACHES
}
CACHED_EDX_MODELS = {
ENROLLMENT: models.CachedEnrollment,
CERTIFICATE: models.CachedCertificate,
CURRENT_GRADE: models.CachedCurrentGrade,
}
CACHE_EXPIRATION_DELTAS = {
ENROLLMENT: datetime.timedelta(minutes=5),
CERTIFICATE: datetime.timedelta(hours=6),
CURRENT_GRADE: datetime.timedelta(hours=1),
ENROLLMENT_MITXONLINE: datetime.timedelta(minutes=5),
CURRENT_GRADE_MITXONLINE: datetime.timedelta(hours=1),
}
@classmethod
def get_cached_edx_data(cls, user, cache_type):
"""
Helper function to return cached data.
Args:
user (django.contrib.auth.models.User): A user
cache_type (str): a string representing one of the cached data types
Returns:
Enrollments or Certificates or CurrentGrades
"""
if cache_type not in cls.ALL_CACHE_TYPES:
raise ValueError("{} is an unsupported cache type".format(cache_type))
return cls.CACHED_EDX_MODELS[cache_type].get_edx_data(user)
@classmethod
def update_cache_last_access(cls, user, cache_type, timestamp=None):
"""
Updates the UserCacheRefreshTime model timestamp for the provided cache type
Args:
user (django.contrib.auth.models.User): A user
cache_type (str): a string representing one of the cached data types
timestamp (datetime.datetime): a timestamp
Returns:
None
"""
if cache_type not in cls.ALL_CACHE_TYPES:
raise ValueError("{} is an unsupported cache type".format(cache_type))
if timestamp is None:
timestamp = now_in_utc()
updated_values = {
'user': user,
cache_type: timestamp,
}
models.UserCacheRefreshTime.objects.update_or_create(user=user, defaults=updated_values)
@classmethod
def is_cache_fresh(cls, user, cache_type):
"""
Checks if the specified cache type is fresh.
Args:
user (django.contrib.auth.models.User): A user
cache_type (str): a string representing one of the cached data types
Returns:
bool
"""
if cache_type not in cls.ALL_CACHE_TYPES:
raise ValueError("{} is an unsupported cache type".format(cache_type))
try:
cache_timestamps = models.UserCacheRefreshTime.objects.get(user=user)
except models.UserCacheRefreshTime.DoesNotExist:
return False
cache_timestamp = getattr(cache_timestamps, cache_type)
return cache_timestamp is not None and cache_timestamp > (
now_in_utc() - cls.CACHE_EXPIRATION_DELTAS[cache_type]
)
@classmethod
def are_all_caches_fresh(cls, user):
"""
Checks if all cache types are fresh.
Args:
user (django.contrib.auth.models.User): A user
Returns:
bool
"""
edx_cache_fresh = True
mitxonline_cache_fresh = True
if has_social_auth(user, BACKEND_EDX_ORG):
edx_cache_fresh = all(cls.is_cache_fresh(user, cache_type) for cache_type in cls.EDX_SUPPORTED_CACHES)
if has_social_auth(user, BACKEND_MITX_ONLINE):
mitxonline_cache_fresh = all(
cls.is_cache_fresh(user, cache_type) for cache_type in cls.MITXONLINE_SUPPORTED_CACHES)
return edx_cache_fresh and mitxonline_cache_fresh
@classmethod
def update_cached_enrollment(cls, user, enrollment, course_id, index_user=False):
"""
Updates the cached enrollment based on an Enrollment object
Args:
user (User): A user
enrollment (Enrollment): An Enrollment object from edx_api_client
course_id (str): A course key
index_user (bool): whether to force an user re-index.
This is only necessary if this function is called from outside the general
global user enrollments refresh.
Returns:
|
None
"""
with transact
|
ion.atomic():
# get the enrollment data
enrollment_data = enrollment.json
course_run = CourseRun.objects.get(edx_course_key=course_id)
updated_values = {
'user': user,
'course_run': course_run,
'data': enrollment_data,
}
models.CachedEnrollment.objects.update_or_create(
user=user,
course_run=course_run,
defaults=updated_values
)
if index_user:
# submit a celery task to reindex the user
tasks.index_users.delay([user.id], check_if_changed=True)
@classmethod
def update_cached_enrollments(cls, user, edx_client, provider):
"""
Updates cached enrollment data for an user for the given courseware backend.
Args:
provider (str): name of the courseware backend
user (django.contrib.auth.models.User): A user
edx_client (EdxApi): EdX client to retrieve enrollments
Returns:
None
"""
# Fetch new data from edX
|
vga101/gaiasky
|
assets/scripts/tests/scroll-test.py
|
Python
|
mpl-2.0
| 452 | 0 |
# Test script. Tests GUI scroll movement commands.
# Created by Toni Sagrista
from gaia.c
|
u9.ari.gaiaorbit.script import EventScriptingInterface
gs = EventScriptingInterface.instance()
gs.disableInput()
gs.cameraStop()
gs.setGuiScrollPosition(20.0)
gs.sleep(1)
gs.setGuiScrollPosition(40.0)
gs.
|
sleep(1)
gs.setGuiScrollPosition(60.0)
gs.sleep(1)
gs.setGuiScrollPosition(80.0)
gs.sleep(1)
gs.setGuiScrollPosition(100.0)
gs.sleep(1)
gs.enableInput()
|
unt-libraries/catalog-api
|
django/sierra/sierra/solr_backend.py
|
Python
|
bsd-3-clause
| 11,551 | 0.001472 |
"""
This is a custom solr_backend for haystack. It fixes a few minor issues
with the out-of-the-box version.
1. Overrides the SolrSearchBackend._process_results() method. The
out-of-the-box haystack version uses pysolr._to_python() to parse any
results for indexes that aren't managed by haystack. This method for
some reason just pulls the first item out of lists/tuples and returns
it--so search results truncate any lists of items that come from Solr.
Here I've implemented a custom_to_python function that fixes this
problem while still calling pysolr._to_python() on flattened lis
values.
2. Overrides the SolrSearchBackend.clear() method so that the Solr
index optimization isn't triggered if commit is false.
"""
import subprocess
import os
import shlex
import re
from django.apps import apps
from django.conf import settings
from haystack.backends import solr_backend, BaseEngine
from haystack.models import SearchResult
from haystack.constants import ID, DJANGO_CT, DJANGO_ID
from haystack.utils import get_model_ct
from haystack import connections
from pysolr import SolrError
def is_sequence(arg):
return (not hasattr(arg, 'strip') and
hasattr(arg, '__getitem__') or
hasattr(arg, '__iter__'))
def custom_to_python(val, _to_python):
"""
Simplest way I could think of to add what we want to
pysolr._to_python. Recursively unpacks all values in any sequence
and returns the final data structure.
"""
if is_sequence(val):
ret_val = []
for i in val:
ret_val.append(custom_to_python(i, _to_python))
return ret_val
else:
return _to_python(val)
class CustomSolrSearchBackend(solr_backend.SolrSearchBackend):
def _process_results(self, raw_results, highlight=False, result_class=None, distance_point=None):
results = []
hits = raw_results.hits
facets = {}
stats = {}
spelling_suggestion = None
if result_class is None:
result_class = SearchResult
if hasattr(raw_results,'stats'):
stats = raw_results.stats.get('stats_fields',{})
|
if hasattr(raw_results, 'facets'):
facets = {
'fields': raw_results.facets.get('facet_fields', {}),
'dates': raw_results.facets.get('facet_dates', {}),
'queries': raw_results.facets.get('facet_queries', {}),
}
for key in ['fields']:
|
for facet_field in facets[key]:
# Convert to a two-tuple, as Solr's json format returns a list of
# pairs.
facets[key][facet_field] = list(zip(facets[key][facet_field][::2], facets[key][facet_field][1::2]))
if self.include_spelling is True:
if hasattr(raw_results, 'spellcheck'):
if len(raw_results.spellcheck.get('suggestions', [])):
# For some reason, it's an array of pairs. Pull off the
# collated result from the end.
spelling_suggestion = raw_results.spellcheck.get('suggestions')[-1]
unified_index = connections[self.connection_alias].get_unified_index()
indexed_models = unified_index.get_indexed_models()
for raw_result in raw_results.docs:
app_label, model_name = raw_result[DJANGO_CT].split('.')
additional_fields = {}
model = apps.get_model(app_label, model_name)
if model and model in indexed_models:
for key, value in raw_result.items():
index = unified_index.get_index(model)
string_key = str(key)
if string_key in index.fields and hasattr(index.fields[string_key], 'convert'):
additional_fields[string_key] = index.fields[string_key].convert(value)
else:
additional_fields[string_key] = custom_to_python(value, self.conn._to_python)
del(additional_fields[DJANGO_CT])
del(additional_fields[DJANGO_ID])
del(additional_fields['score'])
if raw_result[ID] in getattr(raw_results, 'highlighting', {}):
additional_fields['highlighted'] = raw_results.highlighting[raw_result[ID]]
if distance_point:
additional_fields['_point_of_origin'] = distance_point
if raw_result.get('__dist__'):
from haystack.utils.geo import Distance
additional_fields['_distance'] = Distance(km=float(raw_result['__dist__']))
else:
additional_fields['_distance'] = None
result = result_class(app_label, model_name, raw_result[DJANGO_ID], raw_result['score'], **additional_fields)
results.append(result)
else:
hits -= 1
return {
'results': results,
'hits': hits,
'stats': stats,
'facets': facets,
'spelling_suggestion': spelling_suggestion,
}
def clear(self, models=[], commit=True):
try:
if not models:
# *:* matches all docs in Solr
self.conn.delete(q='*:*', commit=commit)
else:
models_to_delete = []
for model in models:
models_to_delete.append("%s:%s" % (DJANGO_CT, get_model_ct(model)))
self.conn.delete(q=" OR ".join(models_to_delete), commit=commit)
if commit:
# Run an optimize post-clear. http://wiki.apache.org/solr/FAQ#head-9aafb5d8dff5308e8ea4fcf4b71f19f029c4bb99
self.conn.optimize()
except (IOError, SolrError) as e:
if not self.silently_fail:
raise
if len(models):
self.log.error("Failed to clear Solr index of models '%s': %s", ','.join(models_to_delete), e)
else:
self.log.error("Failed to clear Solr index: %s", e)
class CustomSolrEngine(BaseEngine):
backend = CustomSolrSearchBackend
query = solr_backend.SolrSearchQuery
class SolrmarcIndexBackend(CustomSolrSearchBackend):
"""
This is a custom Solr backend class for Haystack(ish) indexes that
implements doing index updates via Solrmarc. All of the code here
is derived from the code that was part of the `BibsDownloadMarc`
`BibsToSolr` exporters (in `export.basic_exporters`). As we're
working on additional indexes fed by Solrmarc (for Blacklight),
it started to make more sense to move that into a lower-level
class for more uniformity at the index and exporter levels.
How to use this class? In Django settings, use the SolrmarcEngine
class in your HAYSTACK_CONNECTIONS definition. Ensure that you've
created the applicable Solr core and that you have an
index.properties file in the solr/solrmarc project directory for
that index. (By default you should name it <core>_index.properties,
where <core> is the name of the Solr core.) Your haystack index
class should be a `base.search_indexes.CustomQuerySetIndex` or
`SolrmarcIndex` class. There are a few class attributes you can add
to the index class to help further define how the SolrMarc process
works--without them, sensible defaults are used.
`s2marc_class` -- The S2MarcBatch (see `export.sierra2marc`) or
equivalent/derived class that does the batch conversion of Sierra
data (via the Django ORM models) to MARC records and saves them to
the filesystem so that Solrmarc can index them. Default is
S2MarcBatch.
`index_properties` -- The filename for the index.properties file
that converts the MARC files to Solr fields. As mentioned above,
the default is '<core>_index.propertes' -- where <core> is the name
of the Solr core for that index.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.