hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a1d1152a9f7b707c3c0fa399716ea848e121d19
| 137 |
py
|
Python
|
voltdb/datadog_checks/voltdb/__about__.py
|
mchelen-gov/integrations-core
|
81281600b3cc7025a7a32148c59620c9592a564f
|
[
"BSD-3-Clause"
] | 1 |
2021-01-28T01:45:37.000Z
|
2021-01-28T01:45:37.000Z
|
voltdb/datadog_checks/voltdb/__about__.py
|
mchelen-gov/integrations-core
|
81281600b3cc7025a7a32148c59620c9592a564f
|
[
"BSD-3-Clause"
] | 3 |
2021-01-27T04:56:40.000Z
|
2021-02-26T06:29:22.000Z
|
voltdb/datadog_checks/voltdb/__about__.py
|
mchelen-gov/integrations-core
|
81281600b3cc7025a7a32148c59620c9592a564f
|
[
"BSD-3-Clause"
] | 1 |
2021-04-07T16:58:27.000Z
|
2021-04-07T16:58:27.000Z
|
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
__version__ = '1.3.1'
| 27.4 | 59 | 0.722628 |
4a1d11781e4339ff4a2b80ea942a06bcb50216ce
| 678 |
py
|
Python
|
problems/revc.py
|
viadanna/rosalind-python
|
6709c683b04c2e069d73613a2844533e752030bb
|
[
"MIT"
] | null | null | null |
problems/revc.py
|
viadanna/rosalind-python
|
6709c683b04c2e069d73613a2844533e752030bb
|
[
"MIT"
] | null | null | null |
problems/revc.py
|
viadanna/rosalind-python
|
6709c683b04c2e069d73613a2844533e752030bb
|
[
"MIT"
] | null | null | null |
'''
Complementing a Strand of DNA
http://rosalind.info/problems/revc/
Problem
In DNA strings, symbols 'A' and 'T' are complements of each other, as are
'C' and 'G'.
The reverse complement of a DNA string s is the string sc formed by
reversing the symbols of s, then taking the complement of each symbol
(e.g., the reverse complement of "GTCA" is "TGAC").
Given: A DNA string s of length at most 1000 bp.
Return: The reverse complement sc of s.
Sample Dataset
AAAACCCGGT
Sample Output
ACCGGGTTTT
'''
from lib.sequences import DNA
def run_revc(sequence):
''' Returns the reverse completent of a DNA sequence '''
return DNA(sequence).reverse_complement().sequence
| 22.6 | 73 | 0.743363 |
4a1d11819cd40ac9b8da92438a2b0a5da038656f
| 10,997 |
py
|
Python
|
benchmarks/f3_wrong_hints_permutations/scaling_nonlinear_software/19-19_22.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 3 |
2021-04-23T23:29:26.000Z
|
2022-03-23T10:00:30.000Z
|
benchmarks/f3_wrong_hints_permutations/scaling_nonlinear_software/19-19_22.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | null | null | null |
benchmarks/f3_wrong_hints_permutations/scaling_nonlinear_software/19-19_22.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 1 |
2021-11-17T22:02:56.000Z
|
2021-11-17T22:02:56.000Z
|
from typing import FrozenSet, Tuple
import pysmt.typing as types
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
symbols = frozenset([pc, x, y, z])
n_locs = 5
int_bound = n_locs
pcs = []
x_pcs = []
ints = [mgr.Int(i) for i in range(int_bound)]
for l in range(n_locs):
n = ints[l]
pcs.append(mgr.Equals(pc, n))
x_pcs.append(mgr.Equals(x_pc, n))
m_1 = mgr.Int(-1)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
# initial location.
init = pcs[0]
# control flow graph.
cfg = mgr.And(
# pc = -1 : -1,
mgr.Implies(pcend, x_pcend),
# pc = 0 & !(y >= 1) : -1,
mgr.Implies(mgr.And(pcs[0], mgr.Not(mgr.GE(y, ints[1]))), x_pcend),
# pc = 0 & y >= 1 : 1,
mgr.Implies(mgr.And(pcs[0], mgr.GE(y, ints[1])), x_pcs[1]),
# pc = 1 & !(z >= 1) : -1,
mgr.Implies(mgr.And(pcs[1], mgr.Not(mgr.GE(z, ints[1]))), x_pcend),
# pc = 1 & z >= 1 : 2,
mgr.Implies(mgr.And(pcs[1], mgr.GE(z, ints[1])), x_pcs[2]),
# pc = 2 & !(x >= 0) : -1,
mgr.Implies(mgr.And(pcs[2], mgr.Not(mgr.GE(x, ints[0]))), x_pcend),
# pc = 2 & x >= 0 : 3,
mgr.Implies(mgr.And(pcs[2], mgr.GE(x, ints[0])), x_pcs[3]),
# pc = 3 : 4,
mgr.Implies(pcs[3], x_pcs[4]),
# pc = 4 : 2,
mgr.Implies(pcs[4], x_pcs[2]))
# transition labels.
labels = mgr.And(
# (pc = -1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcend, x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = 1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcs[1]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = 2) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = 3) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcs[3]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 3 & pc' = 4) -> (x' = y*z - 1 & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[3], x_pcs[4]),
mgr.And(mgr.Equals(x_x, mgr.Minus(mgr.Times(y, z), ints[1])),
mgr.Equals(x_y, y), mgr.Equals(x_z, z))),
# (pc = 4 & pc' = 2) -> (x' = x & y' = y+1 & z' = z),
mgr.Implies(
mgr.And(pcs[4], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, mgr.Plus(y, ints[1])),
mgr.Equals(x_z, z))))
# transition relation.
trans = mgr.And(cfg, labels)
# fairness.
fairness = mgr.Not(pcend)
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
symbs = frozenset([pc, x, y, z])
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
res = []
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_3 = mgr.Int(3)
loc0 = Location(env, mgr.GT(x, i_3), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)))
loc0.set_progress(1, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_1)))
loc1 = Location(env, mgr.GT(x, i_0), mgr.GE(y, i_1))
loc1.set_progress(2, mgr.Equals(x_x, mgr.Plus(x, y)))
loc2 = Location(env, mgr.GT(x, i_3))
loc2.set_progress(2, mgr.Equals(x_x, x))
h_x = Hint("h_x4", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3))
loc1.set_progress(2, mgr.Equals(x_y, y))
loc2 = Location(env, mgr.GE(y, i_3))
loc2.set_progress(2, mgr.Equals(x_y, mgr.Plus(y, i_1)))
h_y = Hint("h_y4", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1, loc2])
res.append(h_y)
loc0 = Location(env, mgr.Equals(pc, i_2))
loc0.set_progress(1, mgr.GT(x_pc, i_2))
loc1 = Location(env, mgr.GE(pc, i_3))
loc1.set_progress(0, mgr.Equals(x_pc, i_2))
h_pc = Hint("h_pc3", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1])
res.append(h_pc)
stutter = mgr.Equals(x_y, y)
loc0 = Location(env, mgr.GE(y, i_0))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_1))
loc1.set_progress(2, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc2 = Location(env, mgr.GE(y, i_2))
loc2.set_progress(0, mgr.Equals(x_y, y))
h_y = Hint("h_y1", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1, loc2])
res.append(h_y)
loc0 = Location(env, mgr.GE(z, i_3), mgr.GE(y, i_0))
loc0.set_progress(1, mgr.Equals(x_z, y))
loc1 = Location(env, mgr.GE(z, i_0), mgr.GE(x, i_3))
loc1.set_progress(0, mgr.GE(x_z, mgr.Plus(z, x)))
h_z = Hint("h_z3", env, frozenset([z]), symbs)
h_z.set_locs([loc0, loc1])
res.append(h_z)
stutter = mgr.Equals(x_x, x)
loc = Location(env, mgr.GT(x, i_0), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)),
stutterT=stutter)
loc.set_progress(0, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_2)))
h_x = Hint("h_x1", env, frozenset([x]), symbs)
h_x.set_locs([loc])
res.append(h_x)
loc = Location(env, mgr.LE(z, i_0))
loc.set_progress(0, mgr.Equals(x_z, z))
h_z = Hint("h_z0", env, frozenset([z]), symbs)
h_z.set_locs([loc])
res.append(h_z)
loc0 = Location(env, mgr.GE(z, i_3))
loc0.set_progress(0, mgr.GT(x_z, z))
h_z = Hint("h_z1", env, frozenset([z]), symbs)
h_z.set_locs([loc0])
res.append(h_z)
stutter = mgr.Equals(x_x, x)
loc0 = Location(env, mgr.GT(x, i_0), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)))
loc0.set_progress(1, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_1)))
loc1 = Location(env, mgr.GT(x, i_0))
loc1.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, i_1)))
h_x = Hint("h_x2", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
stutter = mgr.Equals(x_y, y)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3), mgr.GE(z, i_2))
loc1.set_progress(0, mgr.Equals(x_y, mgr.Plus(y, z)))
h_y = Hint("h_y2", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GT(x, i_3), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)))
loc0.set_progress(1, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_1)))
loc1 = Location(env, mgr.GT(x, i_0), mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, y)))
h_x = Hint("h_x3", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
stutter = mgr.Equals(x_y, y)
loc = Location(env, mgr.TRUE(), mgr.LE(x, i_0), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_y, mgr.Minus(y, i_1)))
h_y = Hint("h_y0", env, frozenset([y]), symbs)
h_y.set_locs([loc])
res.append(h_y)
loc0 = Location(env, mgr.Equals(pc, i_1))
loc0.set_progress(1, mgr.Equals(x_pc, i_3))
loc1 = Location(env, mgr.Equals(pc, i_3))
loc1.set_progress(0, mgr.Equals(x_pc, i_1))
h_pc = Hint("h_pc2", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1])
res.append(h_pc)
loc0 = Location(env, mgr.Equals(pc, i_1))
loc0.set_progress(1, mgr.Equals(x_pc, i_2))
loc1 = Location(env, mgr.Equals(pc, i_2))
loc1.set_progress(2, mgr.Equals(x_pc, i_3))
loc2 = Location(env, mgr.Equals(pc, i_3))
loc2.set_progress(0, mgr.Equals(x_pc, i_1))
h_pc = Hint("h_pc0", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1, loc2])
res.append(h_pc)
stutter = mgr.Equals(x_x, x)
loc = Location(env, mgr.GT(x, i_0), mgr.GT(y, i_0), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_x, mgr.Times(x, y)))
h_x = Hint("h_x0", env, frozenset([x]), symbs)
h_x.set_locs([loc])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3), mgr.GE(x, i_2))
loc1.set_progress(0, mgr.Equals(x_y, mgr.Plus(y, x)))
h_y = Hint("h_y3", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GE(z, i_0))
loc0.set_progress(1, mgr.Equals(x_z, z))
loc1 = Location(env, mgr.GE(z, i_0))
loc1.set_progress(0, mgr.Equals(x_z, mgr.Plus(z, i_3)))
h_z = Hint("h_z4", env, frozenset([z]), symbs)
h_z.set_locs([loc0, loc1])
res.append(h_z)
loc0 = Location(env, mgr.Equals(pc, i_2))
loc0.set_progress(1, mgr.GT(x_pc, i_2))
loc1 = Location(env, mgr.GE(pc, i_3))
loc1.set_progress(2, mgr.GE(x_pc, i_3))
loc2 = Location(env, mgr.GE(pc, i_3))
loc2.set_progress(0, mgr.Equals(x_pc, i_2))
h_pc = Hint("h_pc4", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1, loc2])
res.append(h_pc)
loc0 = Location(env, mgr.GE(z, i_3), mgr.GE(y, i_0))
loc0.set_progress(1, mgr.GE(x_z, mgr.Plus(z, y)))
loc1 = Location(env, mgr.GE(z, i_3), mgr.GE(x, i_0))
loc1.set_progress(0, mgr.GE(x_z, mgr.Plus(z, i_0)))
h_z = Hint("h_z2", env, frozenset([z]), symbs)
h_z.set_locs([loc0, loc1])
res.append(h_z)
return frozenset(res)
| 34.581761 | 81 | 0.553878 |
4a1d12a6823b46d064d76c257190cda60999f3fa
| 7,191 |
py
|
Python
|
ortools/constraint_solver/samples/vrp_pickup_delivery.py
|
yjaeil/or-tools
|
89c1d950b757a5cc3f58e3b12715bceb24965e2c
|
[
"Apache-2.0"
] | 1 |
2019-01-31T02:31:03.000Z
|
2019-01-31T02:31:03.000Z
|
ortools/constraint_solver/samples/vrp_pickup_delivery.py
|
yjaeil/or-tools
|
89c1d950b757a5cc3f58e3b12715bceb24965e2c
|
[
"Apache-2.0"
] | null | null | null |
ortools/constraint_solver/samples/vrp_pickup_delivery.py
|
yjaeil/or-tools
|
89c1d950b757a5cc3f58e3b12715bceb24965e2c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2010-2018 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START program]
"""Simple Pickup Delivery Problem (PDP)."""
# [START import]
from __future__ import print_function
from ortools.constraint_solver import routing_enums_pb2
from ortools.constraint_solver import pywrapcp
# [END import]
# [START data_model]
def create_data_model():
"""Stores the data for the problem."""
data = {}
data['distance_matrix'] = [
[
0, 548, 776, 696, 582, 274, 502, 194, 308, 194, 536, 502, 388, 354,
468, 776, 662
],
[
548, 0, 684, 308, 194, 502, 730, 354, 696, 742, 1084, 594, 480, 674,
1016, 868, 1210
],
[
776, 684, 0, 992, 878, 502, 274, 810, 468, 742, 400, 1278, 1164,
1130, 788, 1552, 754
],
[
696, 308, 992, 0, 114, 650, 878, 502, 844, 890, 1232, 514, 628, 822,
1164, 560, 1358
],
[
582, 194, 878, 114, 0, 536, 764, 388, 730, 776, 1118, 400, 514, 708,
1050, 674, 1244
],
[
274, 502, 502, 650, 536, 0, 228, 308, 194, 240, 582, 776, 662, 628,
514, 1050, 708
],
[
502, 730, 274, 878, 764, 228, 0, 536, 194, 468, 354, 1004, 890, 856,
514, 1278, 480
],
[
194, 354, 810, 502, 388, 308, 536, 0, 342, 388, 730, 468, 354, 320,
662, 742, 856
],
[
308, 696, 468, 844, 730, 194, 194, 342, 0, 274, 388, 810, 696, 662,
320, 1084, 514
],
[
194, 742, 742, 890, 776, 240, 468, 388, 274, 0, 342, 536, 422, 388,
274, 810, 468
],
[
536, 1084, 400, 1232, 1118, 582, 354, 730, 388, 342, 0, 878, 764,
730, 388, 1152, 354
],
[
502, 594, 1278, 514, 400, 776, 1004, 468, 810, 536, 878, 0, 114,
308, 650, 274, 844
],
[
388, 480, 1164, 628, 514, 662, 890, 354, 696, 422, 764, 114, 0, 194,
536, 388, 730
],
[
354, 674, 1130, 822, 708, 628, 856, 320, 662, 388, 730, 308, 194, 0,
342, 422, 536
],
[
468, 1016, 788, 1164, 1050, 514, 514, 662, 320, 274, 388, 650, 536,
342, 0, 764, 194
],
[
776, 868, 1552, 560, 674, 1050, 1278, 742, 1084, 810, 1152, 274,
388, 422, 764, 0, 798
],
[
662, 1210, 754, 1358, 1244, 708, 480, 856, 514, 468, 354, 844, 730,
536, 194, 798, 0
],
]
data['pickups_deliveries'] = [
[1, 6],
[2, 10],
[4, 3],
[5, 9],
[7, 8],
[15, 11],
[13, 12],
[16, 14],
]
data['num_vehicles'] = 4
data['depot'] = 0
return data
# [END data_model]
# [START solution_printer]
def print_solution(data, manager, routing, assignment):
"""Prints assignment on console."""
print('Objective: {}'.format(assignment.ObjectiveValue()))
total_distance = 0
for vehicle_id in range(data['num_vehicles']):
index = routing.Start(vehicle_id)
plan_output = 'Route for vehicle {}:\n'.format(vehicle_id)
route_distance = 0
while not routing.IsEnd(index):
plan_output += ' {} -> '.format(manager.IndexToNode(index))
previous_index = index
index = assignment.Value(routing.NextVar(index))
route_distance += routing.GetArcCostForVehicle(
previous_index, index, vehicle_id)
plan_output += '{}\n'.format(manager.IndexToNode(index))
plan_output += 'Distance of the route: {}m\n'.format(route_distance)
print(plan_output)
total_distance += route_distance
print('Total Distance of all routes: {}m'.format(total_distance))
# [END solution_printer]
def main():
"""Entry point of the program."""
# Instantiate the data problem.
# [START data]
data = create_data_model()
# [END data]
# Create the routing index manager.
# [START index_manager]
manager = pywrapcp.RoutingIndexManager(
len(data['distance_matrix']), data['num_vehicles'], data['depot'])
# [END index_manager]
# Create Routing Model.
# [START routing_model]
routing = pywrapcp.RoutingModel(manager)
# [END routing_model]
# Define cost of each arc.
# [START arc_cost]
def distance_callback(from_index, to_index):
"""Returns the manhattan distance between the two nodes."""
# Convert from routing variable Index to distance matrix NodeIndex.
from_node = manager.IndexToNode(from_index)
to_node = manager.IndexToNode(to_index)
return data['distance_matrix'][from_node][to_node]
transit_callback_index = routing.RegisterTransitCallback(distance_callback)
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
# [END arc_cost]
# Add Distance constraint.
# [START distance_constraint]
dimension_name = 'Distance'
routing.AddDimension(
transit_callback_index,
0, # no slack
3000, # vehicle maximum travel distance
True, # start cumul to zero
dimension_name)
distance_dimension = routing.GetDimensionOrDie(dimension_name)
distance_dimension.SetGlobalSpanCostCoefficient(100)
# [END distance_constraint]
# Define Transportation Requests.
# [START pickup_delivery]
for request in data['pickups_deliveries']:
pickup_index = manager.NodeToIndex(request[0])
delivery_index = manager.NodeToIndex(request[1])
routing.AddPickupAndDelivery(pickup_index, delivery_index)
routing.solver().Add(
routing.VehicleVar(pickup_index) ==
routing.VehicleVar(delivery_index))
routing.solver().Add(
distance_dimension.CumulVar(pickup_index) <=
distance_dimension.CumulVar(delivery_index))
# [END pickup_delivery]
# Setting first solution heuristic.
# [START parameters]
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
search_parameters.first_solution_strategy = (
routing_enums_pb2.FirstSolutionStrategy.PARALLEL_CHEAPEST_INSERTION)
# [END parameters]
# Solve the problem.
# [START solve]
assignment = routing.SolveWithParameters(search_parameters)
# [END solve]
# Print solution on console.
# [START print_solution]
if assignment:
print_solution(data, manager, routing, assignment)
# [END print_solution]
if __name__ == '__main__':
main()
# [END program]
| 33.138249 | 80 | 0.590738 |
4a1d1493fbce948a2c161af9b1a37ea7a602a714
| 2,330 |
py
|
Python
|
mnistnn.py
|
gesingle/MNIST-Neural-Network
|
65501059cc89969c8aeea9aaca1aa85a49db364c
|
[
"MIT"
] | null | null | null |
mnistnn.py
|
gesingle/MNIST-Neural-Network
|
65501059cc89969c8aeea9aaca1aa85a49db364c
|
[
"MIT"
] | null | null | null |
mnistnn.py
|
gesingle/MNIST-Neural-Network
|
65501059cc89969c8aeea9aaca1aa85a49db364c
|
[
"MIT"
] | null | null | null |
# Trains recurrant neural network on MNIST handwritten digit database
# @author Tariq Rashid
# @author Garrett Singletary
import numpy
from neuralnetwork import neuralNetwork
class mnistNN:
# number of input, hidden and output nodes
input_nodes = 784
hidden_nodes = 150
output_nodes = 10
# learning rate
learning_rate = 0.3
# create instance of neural network
n = neuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)
# load the mnist training data into a list
training_data_file = open("mnist_train.txt", 'r')
training_data_list = training_data_file.readlines()
training_data_file.close()
# train the neural network
# go through all record in the training data set
for record in training_data_list:
# split the values by comma
all_values = record.split(',')
# scale and shift the inputs
inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.1
# create the target output values (all 0.01 except for the desired
# label which is 0.99)
targets = numpy.zeros(output_nodes) + 0.01
# all_values[0] is the target label for this record
targets[int(all_values[0])] = 0.99
n.train(inputs, targets)
pass
# load the minist test data
test_data_file = open("mnist_test.txt", 'r')
test_data_list = test_data_file.readlines()
test_data_file.close()
# test the neural network
# scorecard for how well the nn performs, initally empty
scorecard = []
# go through all the records in the test data set
for record in test_data_list:
# split the values by comma
all_values = record.split(',')
# correct answer is the first value
correct_label = int(all_values[0])
# scale and shift the inputs
inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.1
# query the network
outputs = n.query(inputs)
# the indes of the highest value corresponds to the label
label = numpy.argmax(outputs)
# append correct or incorrect answer to list
if (label == correct_label):
# network's answer matches correct answer, add 1 to scorecard
scorecard.append(1)
else:
# network's answer doesn't match correct answer, add 0 to scorecard
scorecard.append(0)
pass
# calculate the performance score (fraction of correct answers)
scorecard_array = numpy.asarray(scorecard)
print ("performance = ", scorecard_array.sum() / scorecard_array.size)
| 29.125 | 74 | 0.735193 |
4a1d14d7fac7d1e00184eb52aafe9a23a1a04da0
| 3,718 |
py
|
Python
|
toto/inputs/tryaxis.py
|
calypso-science/Toto
|
85e90421343bf3dcf6d730767287647b5bc189bb
|
[
"MIT"
] | 1 |
2022-03-24T23:41:16.000Z
|
2022-03-24T23:41:16.000Z
|
toto/inputs/tryaxis.py
|
calypso-science/Toto
|
85e90421343bf3dcf6d730767287647b5bc189bb
|
[
"MIT"
] | 12 |
2021-02-24T22:30:52.000Z
|
2021-11-16T01:51:38.000Z
|
toto/inputs/tryaxis.py
|
calypso-science/Toto
|
85e90421343bf3dcf6d730767287647b5bc189bb
|
[
"MIT"
] | 1 |
2021-09-21T11:37:09.000Z
|
2021-09-21T11:37:09.000Z
|
"""Read TRYAXIS file
This import raw file for a TRYAXIS wave Buoy.
This class returns a Panda Dataframe with some extra attributes such as Latitude,Longitude,Units.
Parameters
~~~~~~~~~~
filename : (files,) str or list_like
A list of filename to process.
Notes
-----
The function only works with the NONDIRSPEC and DIRSPEC files
Examples
~~~~~~~~
>>> from toto.inputs.tryaxis import TRYAXISfile
>>> nc=TRYAXISfile('filename.NONDIRSPEC')._toDataFrame()
"""
import glob,os,sys
import pandas as pd
import datetime as dt
import numpy as np
class TRYAXISfile():
@staticmethod
def defaultExtensions():
return ['.NONDIRSPEC','.WAVE']
def __init__(self,filenames):
if isinstance(filenames,str):
filenames=[filenames]
self.filenames=filenames
self.data=[]
# READ NONIRCSPCE
if self.filenames[0].endswith('NONDIRSPEC'):
self._reads_NONDIRSPEC()
# READ NONIRCSPCE
if self.filenames[0].endswith('WAVE'):
self._reads_WAVE()
def _read_WAVE(filename):
# Using readline()
wave={}
file1 = open(filename, 'r')
count = 0
while True:
count += 1
# Get next line from file
line = file1.readline()
# if line is empty
# end of file is reached
if not line:
break
if count>3:
name,value=line.split('=')
if count==4:
value=pd.to_datetime(value.rstrip(), format=' %Y-%m-%d %H:%M(UTC)')
elif count>7:
try:
value=float(value)
except:
value=np.nan
else:
value=value.rstrip()
wave[name.rstrip().replace(' ','_')]=value
file1.close()
return wave
def _reads_WAVE(self):
ds=[]
for i,file in enumerate(self.filenames):
ds.append(TRYAXISfile._read_WAVE(file))
keys=[]
for d in ds:
keys+=d.keys()
keys=list(set(keys))
di = {}
for key in keys:
di[key]=[]
for d in ds:
if key in d:
di[key].append(d[key])
else:
di[key].append(np.nan)
df=pd.DataFrame.from_dict(di,orient='columns')
df=df.rename(columns={'DATE':'time'})
df.set_index('time',inplace=True,drop=False)
df.sort_index(inplace=True)
self.data.append(df)
def _reads_NONDIRSPEC(self):
for file in self.filenames:
self._read_NONDIRSPEC(file)
def _read_NONDIRSPEC(self,filename):
try:
with open(filename,'r',encoding=None) as f:
df=pd.read_csv(f,sep=' ',skiprows=9,names=['freq','density'],engine='python')
except pd.errors.ParserError as e:
raise WrongFormatError('TryAxis File {}: '.format(filename)+e.args[0])
def readline(iLine):
with open(filename,'r',encoding=None) as f:
for i, line in enumerate(f):
if i==iLine:
return line.strip()
elif i>iLine:
break
time=pd.to_datetime(readline(3), format='DATE = %Y %b %d %H:%M(UTC)')
df.set_index('freq',inplace=True,drop=False)
time=np.repeat(time,len(df.index), axis = 0)
df['time']=time
self.data.append(df)
def _toDataFrame(self):
#print(self.data)
return self.data
| 25.292517 | 101 | 0.517483 |
4a1d150da9071483d198cd1faf0ca308eea8e0e7
| 13,458 |
py
|
Python
|
desktop/core/ext-py/lxml-3.3.6/setupinfo.py
|
kokosing/hue
|
2307f5379a35aae9be871e836432e6f45138b3d9
|
[
"Apache-2.0"
] | 5,079 |
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
desktop/core/ext-py/lxml-3.3.6/setupinfo.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 1,623 |
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
desktop/core/ext-py/lxml-3.3.6/setupinfo.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 2,033 |
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
import sys, os, os.path
from distutils.core import Extension
from distutils.errors import DistutilsOptionError
from versioninfo import get_base_dir, split_version
try:
from Cython.Distutils import build_ext as build_pyx
import Cython.Compiler.Version
CYTHON_INSTALLED = True
except ImportError:
CYTHON_INSTALLED = False
EXT_MODULES = ["lxml.etree", "lxml.objectify"]
PACKAGE_PATH = "src%slxml%s" % (os.path.sep, os.path.sep)
INCLUDE_PACKAGE_PATH = PACKAGE_PATH + 'includes'
if sys.version_info[0] >= 3:
_system_encoding = sys.getdefaultencoding()
if _system_encoding is None:
_system_encoding = "iso-8859-1" # :-)
def decode_input(data):
if isinstance(data, str):
return data
return data.decode(_system_encoding)
else:
def decode_input(data):
return data
def env_var(name):
value = os.getenv(name)
if value:
value = decode_input(value)
if sys.platform == 'win32' and ';' in value:
return value.split(';')
else:
return value.split()
else:
return []
def ext_modules(static_include_dirs, static_library_dirs,
static_cflags, static_binaries):
global XML2_CONFIG, XSLT_CONFIG
if OPTION_BUILD_LIBXML2XSLT:
from buildlibxml import build_libxml2xslt, get_prebuilt_libxml2xslt
if sys.platform.startswith('win'):
get_prebuilt_libxml2xslt(
OPTION_DOWNLOAD_DIR, static_include_dirs, static_library_dirs)
else:
XML2_CONFIG, XSLT_CONFIG = build_libxml2xslt(
OPTION_DOWNLOAD_DIR, 'build/tmp',
static_include_dirs, static_library_dirs,
static_cflags, static_binaries,
libiconv_version=OPTION_LIBICONV_VERSION,
libxml2_version=OPTION_LIBXML2_VERSION,
libxslt_version=OPTION_LIBXSLT_VERSION,
multicore=OPTION_MULTICORE)
if OPTION_WITHOUT_OBJECTIFY:
modules = [ entry for entry in EXT_MODULES
if 'objectify' not in entry ]
else:
modules = EXT_MODULES
c_files_exist = [ os.path.exists('%s%s.c' % (PACKAGE_PATH, module)) for module in modules ]
if CYTHON_INSTALLED and (OPTION_WITH_CYTHON or False in c_files_exist):
source_extension = ".pyx"
print("Building with Cython %s." % Cython.Compiler.Version.version)
# generate module cleanup code
from Cython.Compiler import Options
Options.generate_cleanup_code = 3
Options.clear_to_none = False
elif not OPTION_WITHOUT_CYTHON and False in c_files_exist:
for exists, module in zip(c_files_exist, modules):
if not exists:
raise RuntimeError(
"ERROR: Trying to build without Cython, but pre-generated '%s%s.c' "
"is not available (pass --without-cython to ignore this error)." % (
PACKAGE_PATH, module))
else:
if False in c_files_exist:
for exists, module in zip(c_files_exist, modules):
if not exists:
print("WARNING: Trying to build without Cython, but pre-generated "
"'%s%s.c' is not available." % (PACKAGE_PATH, module))
source_extension = ".c"
print("Building without Cython.")
lib_versions = get_library_versions()
if lib_versions[0]:
print("Using build configuration of libxml2 %s and libxslt %s" %
lib_versions)
else:
print("Using build configuration of libxslt %s" %
lib_versions[1])
_include_dirs = include_dirs(static_include_dirs)
_library_dirs = library_dirs(static_library_dirs)
_cflags = cflags(static_cflags)
_define_macros = define_macros()
_libraries = libraries()
_include_dirs.append(os.path.join(get_base_dir(), INCLUDE_PACKAGE_PATH))
if _library_dirs:
message = "Building against libxml2/libxslt in "
if len(_library_dirs) > 1:
print(message + "one of the following directories:")
for dir in _library_dirs:
print(" " + dir)
else:
print(message + "the following directory: " +
_library_dirs[0])
if OPTION_AUTO_RPATH:
runtime_library_dirs = _library_dirs
else:
runtime_library_dirs = []
if CYTHON_INSTALLED and OPTION_SHOW_WARNINGS:
from Cython.Compiler import Errors
Errors.LEVEL = 0
result = []
for module in modules:
main_module_source = PACKAGE_PATH + module + source_extension
result.append(
Extension(
module,
sources = [main_module_source],
depends = find_dependencies(module),
extra_compile_args = _cflags,
extra_objects = static_binaries,
define_macros = _define_macros,
include_dirs = _include_dirs,
library_dirs = _library_dirs,
runtime_library_dirs = runtime_library_dirs,
libraries = _libraries,
))
if CYTHON_INSTALLED and OPTION_WITH_CYTHON_GDB:
for ext in result:
ext.cython_gdb = True
if CYTHON_INSTALLED and source_extension == '.pyx':
# build .c files right now and convert Extension() objects
from Cython.Build import cythonize
result = cythonize(result)
return result
def find_dependencies(module):
if not CYTHON_INSTALLED:
return []
base_dir = get_base_dir()
package_dir = os.path.join(base_dir, PACKAGE_PATH)
includes_dir = os.path.join(base_dir, INCLUDE_PACKAGE_PATH)
pxd_files = [ os.path.join(includes_dir, filename)
for filename in os.listdir(includes_dir)
if filename.endswith('.pxd') ]
if 'etree' in module:
pxi_files = [ os.path.join(PACKAGE_PATH, filename)
for filename in os.listdir(package_dir)
if filename.endswith('.pxi')
and 'objectpath' not in filename ]
pxd_files = [ filename for filename in pxd_files
if 'etreepublic' not in filename ]
elif 'objectify' in module:
pxi_files = [ os.path.join(PACKAGE_PATH, 'objectpath.pxi') ]
else:
pxi_files = []
return pxd_files + pxi_files
def extra_setup_args():
result = {}
if CYTHON_INSTALLED:
result['cmdclass'] = {'build_ext': build_pyx}
return result
def libraries():
if sys.platform in ('win32',):
libs = ['libxslt', 'libexslt', 'libxml2', 'iconv']
if OPTION_STATIC:
libs = ['%s_a' % lib for lib in libs]
libs.extend(['zlib', 'WS2_32'])
elif OPTION_STATIC:
libs = ['z', 'm']
else:
libs = ['xslt', 'exslt', 'xml2', 'z', 'm']
return libs
def library_dirs(static_library_dirs):
if OPTION_STATIC:
if not static_library_dirs:
static_library_dirs = env_var('LIBRARY')
assert static_library_dirs, "Static build not configured, see doc/build.txt"
return static_library_dirs
# filter them from xslt-config --libs
result = []
possible_library_dirs = flags('libs')
for possible_library_dir in possible_library_dirs:
if possible_library_dir.startswith('-L'):
result.append(possible_library_dir[2:])
return result
def include_dirs(static_include_dirs):
if OPTION_STATIC:
if not static_include_dirs:
static_include_dirs = env_var('INCLUDE')
return static_include_dirs
# filter them from xslt-config --cflags
result = []
possible_include_dirs = flags('cflags')
for possible_include_dir in possible_include_dirs:
if possible_include_dir.startswith('-I'):
result.append(possible_include_dir[2:])
return result
def cflags(static_cflags):
result = []
if not OPTION_SHOW_WARNINGS:
result.append('-w')
if OPTION_DEBUG_GCC:
result.append('-g2')
if OPTION_STATIC:
if not static_cflags:
static_cflags = env_var('CFLAGS')
result.extend(static_cflags)
else:
# anything from xslt-config --cflags that doesn't start with -I
possible_cflags = flags('cflags')
for possible_cflag in possible_cflags:
if not possible_cflag.startswith('-I'):
result.append(possible_cflag)
if sys.platform in ('darwin',):
for opt in result:
if 'flat_namespace' in opt:
break
else:
result.append('-flat_namespace')
return result
def define_macros():
macros = []
if OPTION_WITHOUT_ASSERT:
macros.append(('PYREX_WITHOUT_ASSERTIONS', None))
if OPTION_WITHOUT_THREADING:
macros.append(('WITHOUT_THREADING', None))
if OPTION_WITH_REFNANNY:
macros.append(('CYTHON_REFNANNY', None))
if OPTION_WITH_UNICODE_STRINGS:
macros.append(('LXML_UNICODE_STRINGS', '1'))
return macros
_ERROR_PRINTED = False
def run_command(cmd, *args):
if not cmd:
return ''
if args:
cmd = ' '.join((cmd,) + args)
try:
import subprocess
except ImportError:
# Python 2.3
sf, rf, ef = os.popen3(cmd)
sf.close()
errors = ef.read()
stdout_data = rf.read()
else:
# Python 2.4+
p = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_data, errors = p.communicate()
global _ERROR_PRINTED
if errors and not _ERROR_PRINTED:
_ERROR_PRINTED = True
print("ERROR: %s" % errors)
print("** make sure the development packages of libxml2 and libxslt are installed **\n")
return decode_input(stdout_data).strip()
def get_library_versions():
xml2_version = run_command(find_xml2_config(), "--version")
xslt_version = run_command(find_xslt_config(), "--version")
return xml2_version, xslt_version
def flags(option):
xml2_flags = run_command(find_xml2_config(), "--%s" % option)
xslt_flags = run_command(find_xslt_config(), "--%s" % option)
flag_list = xml2_flags.split()
for flag in xslt_flags.split():
if flag not in flag_list:
flag_list.append(flag)
return flag_list
XSLT_CONFIG = None
XML2_CONFIG = None
def find_xml2_config():
global XML2_CONFIG
if XML2_CONFIG:
return XML2_CONFIG
option = '--with-xml2-config='
for arg in sys.argv:
if arg.startswith(option):
sys.argv.remove(arg)
XML2_CONFIG = arg[len(option):]
return XML2_CONFIG
else:
# default: do nothing, rely only on xslt-config
XML2_CONFIG = os.getenv('XML2_CONFIG', '')
return XML2_CONFIG
def find_xslt_config():
global XSLT_CONFIG
if XSLT_CONFIG:
return XSLT_CONFIG
option = '--with-xslt-config='
for arg in sys.argv:
if arg.startswith(option):
sys.argv.remove(arg)
XSLT_CONFIG = arg[len(option):]
return XSLT_CONFIG
else:
XSLT_CONFIG = os.getenv('XSLT_CONFIG', 'xslt-config')
return XSLT_CONFIG
## Option handling:
def has_option(name):
try:
sys.argv.remove('--%s' % name)
return True
except ValueError:
pass
# allow passing all cmd line options also as environment variables
env_val = os.getenv(name.upper().replace('-', '_'), 'false').lower()
if env_val == "true":
return True
return False
def option_value(name):
for index, option in enumerate(sys.argv):
if option == '--' + name:
if index+1 >= len(sys.argv):
raise DistutilsOptionError(
'The option %s requires a value' % option)
value = sys.argv[index+1]
sys.argv[index:index+2] = []
return value
if option.startswith('--' + name + '='):
value = option[len(name)+3:]
sys.argv[index:index+1] = []
return value
env_val = os.getenv(name.upper().replace('-', '_'))
return env_val
staticbuild = bool(os.environ.get('STATICBUILD', ''))
# pick up any commandline options and/or env variables
OPTION_WITHOUT_OBJECTIFY = has_option('without-objectify')
OPTION_WITH_UNICODE_STRINGS = has_option('with-unicode-strings')
OPTION_WITHOUT_ASSERT = has_option('without-assert')
OPTION_WITHOUT_THREADING = has_option('without-threading')
OPTION_WITHOUT_CYTHON = has_option('without-cython')
OPTION_WITH_CYTHON = has_option('with-cython')
OPTION_WITH_CYTHON_GDB = has_option('cython-gdb')
OPTION_WITH_REFNANNY = has_option('with-refnanny')
if OPTION_WITHOUT_CYTHON:
CYTHON_INSTALLED = False
OPTION_STATIC = staticbuild or has_option('static')
OPTION_DEBUG_GCC = has_option('debug-gcc')
OPTION_SHOW_WARNINGS = has_option('warnings')
OPTION_AUTO_RPATH = has_option('auto-rpath')
OPTION_BUILD_LIBXML2XSLT = staticbuild or has_option('static-deps')
if OPTION_BUILD_LIBXML2XSLT:
OPTION_STATIC = True
OPTION_LIBXML2_VERSION = option_value('libxml2-version')
OPTION_LIBXSLT_VERSION = option_value('libxslt-version')
OPTION_LIBICONV_VERSION = option_value('libiconv-version')
OPTION_MULTICORE = option_value('multicore')
OPTION_DOWNLOAD_DIR = option_value('download-dir')
if OPTION_DOWNLOAD_DIR is None:
OPTION_DOWNLOAD_DIR = 'libs'
| 34.507692 | 96 | 0.638356 |
4a1d157bc7b6b1b8970a80b3b6f282798c3d894c
| 962 |
py
|
Python
|
kubernetes/test/test_v1_replica_set_status.py
|
fooka03/python
|
073cf4d89e532f92b57e8955b4efc3d5d5eb80cf
|
[
"Apache-2.0"
] | 2 |
2020-07-02T05:47:41.000Z
|
2020-07-02T05:50:34.000Z
|
kubernetes/test/test_v1_replica_set_status.py
|
fooka03/python
|
073cf4d89e532f92b57e8955b4efc3d5d5eb80cf
|
[
"Apache-2.0"
] | 1 |
2021-03-25T23:44:49.000Z
|
2021-03-25T23:44:49.000Z
|
k8sdeployment/k8sstat/python/kubernetes/test/test_v1_replica_set_status.py
|
JeffYFHuang/gpuaccounting
|
afa934350ebbd0634beb60b9df4a147426ea0006
|
[
"MIT"
] | 1 |
2021-10-13T17:45:37.000Z
|
2021-10-13T17:45:37.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.15.6
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.models.v1_replica_set_status import V1ReplicaSetStatus # noqa: E501
from kubernetes.client.rest import ApiException
class TestV1ReplicaSetStatus(unittest.TestCase):
"""V1ReplicaSetStatus unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1ReplicaSetStatus(self):
"""Test V1ReplicaSetStatus"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes.client.models.v1_replica_set_status.V1ReplicaSetStatus() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.05 | 124 | 0.724532 |
4a1d1587442fa04909d688da3be7719da0df6155
| 3,960 |
py
|
Python
|
readthedocs/core/templatetags/core_tags.py
|
tkoyama010/readthedocs.org
|
aac8fb39586db902d9fbb51b639dd281c819dae2
|
[
"MIT"
] | 19 |
2018-03-28T12:28:35.000Z
|
2022-02-14T20:09:42.000Z
|
readthedocs/core/templatetags/core_tags.py
|
tkoyama010/readthedocs.org
|
aac8fb39586db902d9fbb51b639dd281c819dae2
|
[
"MIT"
] | 274 |
2017-10-10T07:59:04.000Z
|
2022-03-12T00:56:03.000Z
|
readthedocs/core/templatetags/core_tags.py
|
tkoyama010/readthedocs.org
|
aac8fb39586db902d9fbb51b639dd281c819dae2
|
[
"MIT"
] | 13 |
2018-04-03T09:49:50.000Z
|
2021-04-18T22:04:15.000Z
|
"""Template tags for core app."""
import hashlib
import json
from urllib.parse import urlencode
from django import template
from django.conf import settings
from django.core.serializers.json import DjangoJSONEncoder
from django.utils.encoding import force_bytes, force_text
from django.utils.safestring import mark_safe
from readthedocs import __version__
from readthedocs.core.resolver import resolve
from readthedocs.projects.models import Project
register = template.Library()
@register.filter
def gravatar(email, size=48):
"""
Hacked from djangosnippets.org, but basically given an email address.
render an img tag with the hashed up bits needed for leetness
omgwtfstillreading
"""
url = 'http://www.gravatar.com/avatar.php?%s' % urlencode({
'gravatar_id': hashlib.md5(email).hexdigest(),
'size': str(size),
})
return (
'<img src="%s" width="%s" height="%s" alt="gravatar" '
'class="gravatar" border="0" />' % (url, size, size)
)
@register.simple_tag(name='doc_url')
def make_document_url(project, version=None, page=''):
if not project:
return ''
return resolve(project=project, version_slug=version, filename=page)
@register.filter(is_safe=True)
def restructuredtext(value, short=False):
try:
from docutils.core import publish_parts
from docutils import ApplicationError
except ImportError:
if settings.DEBUG:
raise template.TemplateSyntaxError(
"Error in 'restructuredtext' filter: "
"The Python docutils library isn't installed.",
)
return force_text(value)
else:
docutils_settings = {
'raw_enabled': False,
'file_insertion_enabled': False,
}
docutils_settings.update(
settings.RESTRUCTUREDTEXT_FILTER_SETTINGS,
)
try:
parts = publish_parts(
source=force_bytes(value),
writer_name='html4css1',
settings_overrides=docutils_settings,
)
except ApplicationError:
return force_text(value)
out = force_text(parts['fragment'])
try:
if short:
out = out.split('\n')[0]
except IndexError:
pass
return mark_safe(out)
@register.filter
def get_project(slug):
try:
return Project.objects.get(slug=slug)
except Project.DoesNotExist:
return None
@register.filter
def get_version(slug):
try:
return Project.objects.get(slug=slug)
except Project.DoesNotExist:
return None
@register.simple_tag
def url_replace(request, field, value):
dict_ = request.GET.copy()
dict_[field] = value
return dict_.urlencode()
@register.filter
def key(d, key_name):
return d[key_name]
@register.filter
def get_key_or_none(d, key_name):
try:
return d[key_name]
except KeyError:
return None
@register.simple_tag
def readthedocs_version():
return __version__
@register.filter
def escapejson(data, indent=None):
"""
Escape JSON correctly for inclusion in Django templates.
This code was mostly taken from Django's implementation
https://docs.djangoproject.com/en/2.2/ref/templates/builtins/#json-script
https://github.com/django/django/blob/2.2.2/django/utils/html.py#L74-L92
After upgrading to Django 2.1+, we could replace this with Django's implementation
although the inputs and outputs are a bit different.
Example:
var jsvar = {{ dictionary_value | escapejson }}
"""
if indent:
indent = int(indent)
_json_script_escapes = {
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
}
return mark_safe(
json.dumps(
data,
cls=DjangoJSONEncoder,
indent=indent,
).translate(_json_script_escapes))
| 25.714286 | 86 | 0.644697 |
4a1d15bf085ec76cad25bad754538e04c5e0a95b
| 2,951 |
py
|
Python
|
numba/exttypes/ordering.py
|
liuzhenhai/numba
|
855a2b262ae3d82bd6ac1c3e1c0acb36ee2e2acf
|
[
"BSD-2-Clause"
] | 1 |
2015-01-29T06:52:36.000Z
|
2015-01-29T06:52:36.000Z
|
numba/exttypes/ordering.py
|
shiquanwang/numba
|
a41c85fdd7d6abf8ea1ebe9116939ddc2217193b
|
[
"BSD-2-Clause"
] | null | null | null |
numba/exttypes/ordering.py
|
shiquanwang/numba
|
a41c85fdd7d6abf8ea1ebe9116939ddc2217193b
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
This module defines ordering schemes for virtual methods and attributes.
If we use hash-based virtual (method/attribute) tables, we don't care about
the ordering. If we're using a C++ like virtual method/attribute table (like
normal Python extension types do for attributes), we need to have a layout
compatible with base classes (i.e. we may only add more attributes, but not
reorder any existing ones).
"""
from __future__ import print_function, division, absolute_import
from numba.traits import traits, Delegate
from numba import error
#------------------------------------------------------------------------
# Virtual Tables
#------------------------------------------------------------------------
@traits
class AbstractTable(object):
# Ordered attribute names
attributes = None
# Dict mapping attribute names to attribute entities
attrdict = None
py_class = Delegate('table')
def __init__(self, table):
self.table = table
@property
def parents(self):
cls = type(self)
return list(map(cls, self.table.parents))
@traits
class VTable(AbstractTable):
attributes = Delegate('table', 'methodnames')
attrdict = Delegate('table', 'methoddict')
@traits
class AttributeTable(AbstractTable):
attributes = Delegate('table', 'attributes')
attrdict = Delegate('table', 'attributedict')
#------------------------------------------------------------------------
# Table Entry Ordering (Virtual Method / Attribute Ordering)
#------------------------------------------------------------------------
def sort_parents(table):
"Sort parent tables by size"
return sorted(table.parents, key=lambda tab: len(tab.attrdict))
def unordered(table):
"Return table entities in a random order"
return list(table.attrdict)
def extending(table):
"""
Order the table entities according to the given parent tables, i.e.
we can only extend existing tables.
"""
if not table.parents:
return unordered(table)
parents = sort_parents(table)
biggest_table = parents[-1]
appending_attributes = set(table.attrdict) - set(biggest_table.attributes)
return biggest_table.attributes + list(appending_attributes)
# ______________________________________________________________________
# Validate Table Ordering
def validate_extending_order_compatibility(table):
parents = sort_parents(table)
tables = parents + [table]
for table_smaller, table_bigger in zip(tables, tables[1:]):
names1 = table_smaller.attributes
names2 = table_bigger.attributes[:len(table_smaller.attributes)]
if names1 != names2:
raise error.NumbaError(
"Cannot create compatible attribute or method ordering for "
"base classes '%s' and '%s'" % (
table_smaller.py_class.__name__,
table_bigger.py_class.__name__))
| 30.112245 | 78 | 0.634361 |
4a1d1645617654267502c9545e2ecbdcec3b1a28
| 1,197 |
py
|
Python
|
stdplugins/sicklet.py
|
dqanshi/PornHub
|
162a7053ca7f2c0b3617b852559cfaf0502d94a7
|
[
"Apache-2.0"
] | 55 |
2019-07-13T15:57:54.000Z
|
2021-09-20T16:50:42.000Z
|
stdplugins/sicklet.py
|
dqanshi/PornHub
|
162a7053ca7f2c0b3617b852559cfaf0502d94a7
|
[
"Apache-2.0"
] | 3 |
2020-04-15T02:08:53.000Z
|
2020-06-06T13:45:18.000Z
|
stdplugins/sicklet.py
|
dqanshi/PornHub
|
162a7053ca7f2c0b3617b852559cfaf0502d94a7
|
[
"Apache-2.0"
] | 450 |
2019-07-12T13:18:41.000Z
|
2022-03-29T18:47:42.000Z
|
import io
import textwrap
from PIL import Image, ImageDraw, ImageFont
from uniborg.util import admin_cmd
@borg.on(admin_cmd(pattern="slet (.*)"))
async def sticklet(event):
sticktext = event.pattern_match.group(1)
if not sticktext:
await event.edit("`I need text to sticklet!`")
return
await event.delete()
sticktext = textwrap.wrap(sticktext, width=10)
sticktext = '\n'.join(sticktext)
image = Image.new("RGBA", (512, 512), (255, 255, 255, 0))
draw = ImageDraw.Draw(image)
fontsize = 230
font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf", size=fontsize)
while draw.multiline_textsize(sticktext, font=font) > (512, 512):
fontsize -= 3
font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf", size=fontsize)
width, height = draw.multiline_textsize(sticktext, font=font)
draw.multiline_text(((512-width)/2,(512-height)/2), sticktext, font=font, fill="white")
image_stream = io.BytesIO()
image_stream.name = "sticker.webp"
image.save(image_stream, "WebP")
image_stream.seek(0)
await event.client.send_file(event.chat_id, image_stream)
| 30.692308 | 103 | 0.691729 |
4a1d16bb15c7f1564e610cbaa2318bde12c6dbd2
| 15,834 |
py
|
Python
|
samples/client/petstore/python/petstore_api/models/format_test.py
|
vithu30/openapi-generator
|
609b7cb21a9673cc1dfddcc3187679ed25fc0662
|
[
"Apache-2.0"
] | null | null | null |
samples/client/petstore/python/petstore_api/models/format_test.py
|
vithu30/openapi-generator
|
609b7cb21a9673cc1dfddcc3187679ed25fc0662
|
[
"Apache-2.0"
] | null | null | null |
samples/client/petstore/python/petstore_api/models/format_test.py
|
vithu30/openapi-generator
|
609b7cb21a9673cc1dfddcc3187679ed25fc0662
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from petstore_api.configuration import Configuration
class FormatTest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'integer': 'int',
'int32': 'int',
'int64': 'int',
'number': 'float',
'float': 'float',
'double': 'float',
'string': 'str',
'byte': 'str',
'binary': 'file',
'date': 'date',
'date_time': 'datetime',
'uuid': 'str',
'password': 'str',
'big_decimal': 'Decimal'
}
attribute_map = {
'integer': 'integer',
'int32': 'int32',
'int64': 'int64',
'number': 'number',
'float': 'float',
'double': 'double',
'string': 'string',
'byte': 'byte',
'binary': 'binary',
'date': 'date',
'date_time': 'dateTime',
'uuid': 'uuid',
'password': 'password',
'big_decimal': 'BigDecimal'
}
def __init__(self, integer=None, int32=None, int64=None, number=None, float=None, double=None, string=None, byte=None, binary=None, date=None, date_time=None, uuid=None, password=None, big_decimal=None, local_vars_configuration=None): # noqa: E501
"""FormatTest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._integer = None
self._int32 = None
self._int64 = None
self._number = None
self._float = None
self._double = None
self._string = None
self._byte = None
self._binary = None
self._date = None
self._date_time = None
self._uuid = None
self._password = None
self._big_decimal = None
self.discriminator = None
if integer is not None:
self.integer = integer
if int32 is not None:
self.int32 = int32
if int64 is not None:
self.int64 = int64
self.number = number
if float is not None:
self.float = float
if double is not None:
self.double = double
if string is not None:
self.string = string
self.byte = byte
if binary is not None:
self.binary = binary
self.date = date
if date_time is not None:
self.date_time = date_time
if uuid is not None:
self.uuid = uuid
self.password = password
if big_decimal is not None:
self.big_decimal = big_decimal
@property
def integer(self):
"""Gets the integer of this FormatTest. # noqa: E501
:return: The integer of this FormatTest. # noqa: E501
:rtype: int
"""
return self._integer
@integer.setter
def integer(self, integer):
"""Sets the integer of this FormatTest.
:param integer: The integer of this FormatTest. # noqa: E501
:type integer: int
"""
if (self.local_vars_configuration.client_side_validation and
integer is not None and integer > 100): # noqa: E501
raise ValueError("Invalid value for `integer`, must be a value less than or equal to `100`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
integer is not None and integer < 10): # noqa: E501
raise ValueError("Invalid value for `integer`, must be a value greater than or equal to `10`") # noqa: E501
self._integer = integer
@property
def int32(self):
"""Gets the int32 of this FormatTest. # noqa: E501
:return: The int32 of this FormatTest. # noqa: E501
:rtype: int
"""
return self._int32
@int32.setter
def int32(self, int32):
"""Sets the int32 of this FormatTest.
:param int32: The int32 of this FormatTest. # noqa: E501
:type int32: int
"""
if (self.local_vars_configuration.client_side_validation and
int32 is not None and int32 > 200): # noqa: E501
raise ValueError("Invalid value for `int32`, must be a value less than or equal to `200`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
int32 is not None and int32 < 20): # noqa: E501
raise ValueError("Invalid value for `int32`, must be a value greater than or equal to `20`") # noqa: E501
self._int32 = int32
@property
def int64(self):
"""Gets the int64 of this FormatTest. # noqa: E501
:return: The int64 of this FormatTest. # noqa: E501
:rtype: int
"""
return self._int64
@int64.setter
def int64(self, int64):
"""Sets the int64 of this FormatTest.
:param int64: The int64 of this FormatTest. # noqa: E501
:type int64: int
"""
self._int64 = int64
@property
def number(self):
"""Gets the number of this FormatTest. # noqa: E501
:return: The number of this FormatTest. # noqa: E501
:rtype: float
"""
return self._number
@number.setter
def number(self, number):
"""Sets the number of this FormatTest.
:param number: The number of this FormatTest. # noqa: E501
:type number: float
"""
if self.local_vars_configuration.client_side_validation and number is None: # noqa: E501
raise ValueError("Invalid value for `number`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
number is not None and number > 543.2): # noqa: E501
raise ValueError("Invalid value for `number`, must be a value less than or equal to `543.2`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
number is not None and number < 32.1): # noqa: E501
raise ValueError("Invalid value for `number`, must be a value greater than or equal to `32.1`") # noqa: E501
self._number = number
@property
def float(self):
"""Gets the float of this FormatTest. # noqa: E501
:return: The float of this FormatTest. # noqa: E501
:rtype: float
"""
return self._float
@float.setter
def float(self, float):
"""Sets the float of this FormatTest.
:param float: The float of this FormatTest. # noqa: E501
:type float: float
"""
if (self.local_vars_configuration.client_side_validation and
float is not None and float > 987.6): # noqa: E501
raise ValueError("Invalid value for `float`, must be a value less than or equal to `987.6`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
float is not None and float < 54.3): # noqa: E501
raise ValueError("Invalid value for `float`, must be a value greater than or equal to `54.3`") # noqa: E501
self._float = float
@property
def double(self):
"""Gets the double of this FormatTest. # noqa: E501
:return: The double of this FormatTest. # noqa: E501
:rtype: float
"""
return self._double
@double.setter
def double(self, double):
"""Sets the double of this FormatTest.
:param double: The double of this FormatTest. # noqa: E501
:type double: float
"""
if (self.local_vars_configuration.client_side_validation and
double is not None and double > 123.4): # noqa: E501
raise ValueError("Invalid value for `double`, must be a value less than or equal to `123.4`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
double is not None and double < 67.8): # noqa: E501
raise ValueError("Invalid value for `double`, must be a value greater than or equal to `67.8`") # noqa: E501
self._double = double
@property
def string(self):
"""Gets the string of this FormatTest. # noqa: E501
:return: The string of this FormatTest. # noqa: E501
:rtype: str
"""
return self._string
@string.setter
def string(self, string):
"""Sets the string of this FormatTest.
:param string: The string of this FormatTest. # noqa: E501
:type string: str
"""
if (self.local_vars_configuration.client_side_validation and
string is not None and not re.search(r'[a-z]', string, flags=re.IGNORECASE)): # noqa: E501
raise ValueError(r"Invalid value for `string`, must be a follow pattern or equal to `/[a-z]/i`") # noqa: E501
self._string = string
@property
def byte(self):
"""Gets the byte of this FormatTest. # noqa: E501
:return: The byte of this FormatTest. # noqa: E501
:rtype: str
"""
return self._byte
@byte.setter
def byte(self, byte):
"""Sets the byte of this FormatTest.
:param byte: The byte of this FormatTest. # noqa: E501
:type byte: str
"""
if self.local_vars_configuration.client_side_validation and byte is None: # noqa: E501
raise ValueError("Invalid value for `byte`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
byte is not None and not re.search(r'^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$', byte)): # noqa: E501
raise ValueError(r"Invalid value for `byte`, must be a follow pattern or equal to `/^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$/`") # noqa: E501
self._byte = byte
@property
def binary(self):
"""Gets the binary of this FormatTest. # noqa: E501
:return: The binary of this FormatTest. # noqa: E501
:rtype: file
"""
return self._binary
@binary.setter
def binary(self, binary):
"""Sets the binary of this FormatTest.
:param binary: The binary of this FormatTest. # noqa: E501
:type binary: file
"""
self._binary = binary
@property
def date(self):
"""Gets the date of this FormatTest. # noqa: E501
:return: The date of this FormatTest. # noqa: E501
:rtype: date
"""
return self._date
@date.setter
def date(self, date):
"""Sets the date of this FormatTest.
:param date: The date of this FormatTest. # noqa: E501
:type date: date
"""
if self.local_vars_configuration.client_side_validation and date is None: # noqa: E501
raise ValueError("Invalid value for `date`, must not be `None`") # noqa: E501
self._date = date
@property
def date_time(self):
"""Gets the date_time of this FormatTest. # noqa: E501
:return: The date_time of this FormatTest. # noqa: E501
:rtype: datetime
"""
return self._date_time
@date_time.setter
def date_time(self, date_time):
"""Sets the date_time of this FormatTest.
:param date_time: The date_time of this FormatTest. # noqa: E501
:type date_time: datetime
"""
self._date_time = date_time
@property
def uuid(self):
"""Gets the uuid of this FormatTest. # noqa: E501
:return: The uuid of this FormatTest. # noqa: E501
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this FormatTest.
:param uuid: The uuid of this FormatTest. # noqa: E501
:type uuid: str
"""
self._uuid = uuid
@property
def password(self):
"""Gets the password of this FormatTest. # noqa: E501
:return: The password of this FormatTest. # noqa: E501
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""Sets the password of this FormatTest.
:param password: The password of this FormatTest. # noqa: E501
:type password: str
"""
if self.local_vars_configuration.client_side_validation and password is None: # noqa: E501
raise ValueError("Invalid value for `password`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
password is not None and len(password) > 64):
raise ValueError("Invalid value for `password`, length must be less than or equal to `64`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
password is not None and len(password) < 10):
raise ValueError("Invalid value for `password`, length must be greater than or equal to `10`") # noqa: E501
self._password = password
@property
def big_decimal(self):
"""Gets the big_decimal of this FormatTest. # noqa: E501
:return: The big_decimal of this FormatTest. # noqa: E501
:rtype: Decimal
"""
return self._big_decimal
@big_decimal.setter
def big_decimal(self, big_decimal):
"""Sets the big_decimal of this FormatTest.
:param big_decimal: The big_decimal of this FormatTest. # noqa: E501
:type big_decimal: Decimal
"""
self._big_decimal = big_decimal
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FormatTest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, FormatTest):
return True
return self.to_dict() != other.to_dict()
| 31.354455 | 252 | 0.58267 |
4a1d1706f3af8c8e3e53e38be9ce291cd5d26919
| 6,623 |
py
|
Python
|
google/cloud/aiplatform_v1/services/prediction_service/transports/base.py
|
conankun/python-aiplatform
|
d6c1bce7e00186aa5ee3cd0e7b8712b21bd06f2a
|
[
"Apache-2.0"
] | 180 |
2020-09-23T17:21:15.000Z
|
2022-03-30T17:25:47.000Z
|
google/cloud/aiplatform_v1/services/prediction_service/transports/base.py
|
conankun/python-aiplatform
|
d6c1bce7e00186aa5ee3cd0e7b8712b21bd06f2a
|
[
"Apache-2.0"
] | 601 |
2020-09-23T16:23:44.000Z
|
2022-03-31T19:08:23.000Z
|
google/cloud/aiplatform_v1/services/prediction_service/transports/base.py
|
conankun/python-aiplatform
|
d6c1bce7e00186aa5ee3cd0e7b8712b21bd06f2a
|
[
"Apache-2.0"
] | 109 |
2020-09-23T16:22:04.000Z
|
2022-03-28T21:18:29.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api import httpbody_pb2 # type: ignore
from google.cloud.aiplatform_v1.types import prediction_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class PredictionServiceTransport(abc.ABC):
"""Abstract transport class for PredictionService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "aiplatform.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.predict: gapic_v1.method.wrap_method(
self.predict, default_timeout=None, client_info=client_info,
),
self.raw_predict: gapic_v1.method.wrap_method(
self.raw_predict, default_timeout=None, client_info=client_info,
),
self.explain: gapic_v1.method.wrap_method(
self.explain, default_timeout=None, client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def predict(
self,
) -> Callable[
[prediction_service.PredictRequest],
Union[
prediction_service.PredictResponse,
Awaitable[prediction_service.PredictResponse],
],
]:
raise NotImplementedError()
@property
def raw_predict(
self,
) -> Callable[
[prediction_service.RawPredictRequest],
Union[httpbody_pb2.HttpBody, Awaitable[httpbody_pb2.HttpBody]],
]:
raise NotImplementedError()
@property
def explain(
self,
) -> Callable[
[prediction_service.ExplainRequest],
Union[
prediction_service.ExplainResponse,
Awaitable[prediction_service.ExplainResponse],
],
]:
raise NotImplementedError()
__all__ = ("PredictionServiceTransport",)
| 36.794444 | 101 | 0.65499 |
4a1d1761a5bbe94aa73645d015fdefd47718bce7
| 557 |
py
|
Python
|
currency_api/apps/currency/management/commands/get_currencies.py
|
robypomoni/currency_api
|
987e5536179bb3db22f63b756730f72fabcdc619
|
[
"MIT"
] | null | null | null |
currency_api/apps/currency/management/commands/get_currencies.py
|
robypomoni/currency_api
|
987e5536179bb3db22f63b756730f72fabcdc619
|
[
"MIT"
] | 9 |
2020-02-11T21:48:21.000Z
|
2021-09-22T17:52:46.000Z
|
currency_api/apps/currency/management/commands/get_currencies.py
|
robypomoni/currency_api
|
987e5536179bb3db22f63b756730f72fabcdc619
|
[
"MIT"
] | null | null | null |
import requests
from django.core.management.base import BaseCommand
from ...models import Currency
class Command(BaseCommand):
help = 'Download currencies list from https://openexchangerates.org'
def handle(self, *args, **options):
response = requests.get('https://openexchangerates.org/api/currencies.json')
data = response.json()
for key, value in data.items():
currency = Currency(code=key, name=value)
currency.save()
message = 'created {}'.format(key)
print(message)
| 29.315789 | 84 | 0.651706 |
4a1d1810bcc8007135a00a0bb0be9d40bc1c5694
| 487 |
py
|
Python
|
config.py
|
eno17/nbutcs_wechat_server
|
0673fc8250dc6477b305c49c12c65aa012304dc6
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
eno17/nbutcs_wechat_server
|
0673fc8250dc6477b305c49c12c65aa012304dc6
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
eno17/nbutcs_wechat_server
|
0673fc8250dc6477b305c49c12c65aa012304dc6
|
[
"Apache-2.0"
] | null | null | null |
#coding=utf8
import os
class Config:
SECRET_KEY = 'hard to guess string' # os.environ.get('SECRET_KEY' )
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://waimai:Chihuo2016@127.0.0.1/takeout' # imac 不能填localhost
config = {
'development':DevelopmentConfig,
'default':DevelopmentConfig
}
| 23.190476 | 103 | 0.718686 |
4a1d18fe31084610c968cc469caface69065a464
| 3,273 |
py
|
Python
|
src/collectors/logstash/logstash.py
|
Affirm/Diamond
|
d7482f03848e0f272a52a13ddb7a37894f740de3
|
[
"MIT"
] | null | null | null |
src/collectors/logstash/logstash.py
|
Affirm/Diamond
|
d7482f03848e0f272a52a13ddb7a37894f740de3
|
[
"MIT"
] | 5 |
2017-06-27T20:03:45.000Z
|
2020-03-01T21:06:28.000Z
|
src/collectors/logstash/logstash.py
|
Affirm/Diamond
|
d7482f03848e0f272a52a13ddb7a37894f740de3
|
[
"MIT"
] | 1 |
2017-06-30T20:55:15.000Z
|
2017-06-30T20:55:15.000Z
|
# coding=utf-8
"""
Collect the logstash stats for the local node
#### Dependencies
* urlib2
* json
"""
import urllib2
try:
import json
json # workaround for pyflakes issue #13
except ImportError:
import simplejson as json
import diamond.collector
class LogstashCollector(diamond.collector.Collector):
metrics = {}
def get_default_config_help(self):
config_help = super(LogstashCollector,
self).get_default_config_help()
config_help.update({
'host': "",
'port': "",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(LogstashCollector, self).get_default_config()
config.update({
'host': '127.0.0.1',
'port': 9600,
})
return config
def _get(self, path, expect_json=True):
url = 'http://%s:%i/%s' % (
self.config['host'], int(self.config['port']), path)
try:
response = urllib2.urlopen(url)
except urllib2.HTTPError, err:
self.log.error("%s: %s", url, err)
return False
if not expect_json:
return response.read()
try:
return json.load(response)
except (TypeError, ValueError):
self.log.error("Unable to parse response from elasticsearch as a"
+ " json object")
return False
def _parse_stats(self, data, prefix=None):
for key, value in data.iteritems():
if type(value) == dict:
name = '.'.join([prefix, key]) if prefix else key
self._parse_stats(value, name)
elif type(value) in [int, float, long]:
name = '.'.join([prefix, key.replace('.', '_')]) if prefix else key.replace('.', '_')
self.metrics[name] = value
else:
self.log.debug('Type %s not handled for %s', type(value), key)
def collect(self):
if json is None:
self.log.error('Unable to import json')
return {}
result = self._get('_node/stats')
if not result:
self.log.error('Could not load node stats')
return
subtrees_to_collect = ['jvm', 'process', 'pipeline']
result = {k:v for k,v in result.iteritems() if any(k == x for x in subtrees_to_collect)}
# convert pipeline.plugins array into hash
plugins_hash = {}
for plugin_type,plugins_array in result['pipeline']['plugins'].iteritems():
plugins_hash[plugin_type] = {}
for plugin in plugins_array:
if 'events' in plugin:
plugins_hash[plugin_type].update({ plugin['id']: plugin['events'] })
# keep only events and plugins subtrees in resulting pipeline hash
result['pipeline'] = {
'events': result['pipeline']['events'],
'plugins': plugins_hash,
}
self._parse_stats(result)
for key in self.metrics:
self.log.debug('%s: %s', key, self.metrics[key])
if key in self.metrics:
self.publish(key, self.metrics[key])
| 29.223214 | 101 | 0.55026 |
4a1d190755d1d790d62d9e906ceab12cecea0742
| 9,164 |
py
|
Python
|
tests/collections/asr/numba/spec_augment/test_spec_aug_numba.py
|
PatrykNeubauer/NeMo
|
3ada744b884dba5f233f22c6991fc6092c6ca8d0
|
[
"Apache-2.0"
] | 2 |
2021-06-23T19:16:59.000Z
|
2022-02-23T18:49:07.000Z
|
tests/collections/asr/numba/spec_augment/test_spec_aug_numba.py
|
PatrykNeubauer/NeMo
|
3ada744b884dba5f233f22c6991fc6092c6ca8d0
|
[
"Apache-2.0"
] | 1 |
2021-07-19T05:36:38.000Z
|
2021-07-29T15:44:19.000Z
|
tests/collections/asr/numba/spec_augment/test_spec_aug_numba.py
|
PatrykNeubauer/NeMo
|
3ada744b884dba5f233f22c6991fc6092c6ca8d0
|
[
"Apache-2.0"
] | 1 |
2021-11-30T15:13:15.000Z
|
2021-11-30T15:13:15.000Z
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from omegaconf import OmegaConf
from nemo.collections.asr.parts.numba.spec_augment import spec_aug_numba
from nemo.core.utils import numba_utils
from nemo.core.utils.numba_utils import __NUMBA_MINIMUM_VERSION__
def get_cfg(seed=0, dtype='float32'):
# fmt: off
cfg = OmegaConf.create(
dict(b=2, f=80, t=20, device='cuda',
freq_masks=2, time_masks=2, freq_width=27, time_width=0.05, mask_value=0.0,
seed=seed, dtype=dtype)
)
# fmt: on
return cfg
# fmt: off
def prepare_data(b, f, t, device='cuda', freq_masks=0, time_masks=0, freq_width=10, time_width=0.1,
seed=0, dtype='float32',
**kwargs):
torch.manual_seed(seed)
if dtype == 'float16':
dtype = torch.float16
else:
dtype = torch.float
x = torch.randn([b, f, t], dtype=dtype, device=device)
x_len = torch.randint(t, size=[b], device=x.device)
sh = x.shape
bs = sh[0]
if isinstance(time_width, int):
adaptive_temporal_width = False
else:
if time_width > 1.0 or time_width < 0.0:
raise ValueError('If `time_width` is a float value, must be in range [0, 1]')
adaptive_temporal_width = True
orginal_time_width = time_width
# Construct the freq and time masks as well as start positions
if freq_masks > 0:
freq_starts = torch.randint(0, sh[1] - freq_width + 1, size=[bs, freq_masks], device=x.device)
freq_lengths = torch.randint(0, freq_width + 1, size=[bs, freq_masks], device=x.device)
else:
freq_starts = torch.zeros([bs, 1], dtype=torch.int64, device=x.device)
freq_lengths = torch.zeros([bs, 1], dtype=torch.int64, device=x.device)
if time_masks > 0:
if adaptive_temporal_width:
time_width = (x_len * orginal_time_width).int().clamp(min=1)
else:
time_width = (
torch.tensor(orginal_time_width, dtype=torch.int32, device=x.device)
.unsqueeze(0)
.repeat(sh[0])
)
time_starts = []
time_lengths = []
for idx in range(sh[0]):
time_starts.append(
torch.randint(
0, max(1, x_len[idx] - time_width[idx]), size=[1, time_masks], device=x.device
)
)
time_lengths.append(
torch.randint(0, time_width[idx] + 1, size=[1, time_masks], device=x.device)
)
time_starts = torch.cat(time_lengths, 0)
time_lengths = torch.cat(time_lengths, 0)
else:
time_starts = torch.zeros([bs, 1], dtype=torch.int64, device=x.device)
time_lengths = torch.zeros([bs, 1], dtype=torch.int64, device=x.device)
output = dict(
x=x,
x_len=x_len,
freq_starts=freq_starts,
freq_lengths=freq_lengths,
time_starts=time_starts,
time_lengths=time_lengths,
sh=sh,
)
return output
# fmt: on
def launch_kernel(data, cfg):
# Launch CUDA kernel
# fmt: off
data['x'] = spec_aug_numba.launch_spec_augment_kernel(
x=data['x'], x_len=data['x_len'],
freq_starts=data['freq_starts'], freq_lengths=data['freq_lengths'],
time_starts=data['time_starts'], time_lengths=data['time_lengths'],
freq_masks=cfg.freq_masks, time_masks=cfg.time_masks, mask_value=cfg.mask_value
)
# fmt: on
def freq_mask_check(x, x_len, f_start, f_len, mask_value, bidx):
check_result = True
for fidx in range(f_start, f_start + f_len):
if not (x[bidx, fidx, :] == mask_value).all():
check_result = False
break
assert check_result
def time_mask_check(x, x_len, t_start, t_len, mask_value, bidx):
check_result = True
for tidx in range(t_start, t_start + t_len):
if tidx >= x_len[bidx]:
# this sample has smaller length than the time index of mask, ignore
continue
if not (x[bidx, :, tidx] == mask_value).all():
check_result = False
break
assert check_result
class TestSpecAugmentNumba:
@pytest.mark.unit
@pytest.mark.run_only_on('GPU')
@pytest.mark.parametrize('dtype', ['float16', 'float32'])
def test_spec_aug_kernel(self, dtype):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
cfg = get_cfg(seed=0, dtype=dtype)
cfg.freq_masks = 2
cfg.time_masks = 10
data = prepare_data(**cfg)
launch_kernel(data, cfg)
x, x_len, sh = data['x'], data['x_len'], data['sh']
# Assert freq masks are correct
for bidx in range(sh[0]):
for f_start, f_len in zip(data['freq_starts'][bidx], data['freq_lengths'][bidx]):
freq_mask_check(x, x_len, f_start, f_len, mask_value=cfg.mask_value, bidx=bidx)
# Assert time masks are correct
for bidx in range(sh[0]):
for t_start, t_len in zip(data['time_starts'][bidx], data['time_lengths'][bidx]):
time_mask_check(x, x_len, t_start, t_len, mask_value=cfg.mask_value, bidx=bidx)
@pytest.mark.unit
@pytest.mark.run_only_on('GPU')
def test_spec_aug_kernel_mask_value(self):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
cfg = get_cfg(seed=0)
cfg.freq_masks = 2
cfg.time_masks = 10
cfg.mask_value = -1.0
data = prepare_data(**cfg)
launch_kernel(data, cfg)
x, x_len, sh = data['x'], data['x_len'], data['sh']
# Assert freq masks are correct
for bidx in range(sh[0]):
for f_start, f_len in zip(data['freq_starts'][bidx], data['freq_lengths'][bidx]):
freq_mask_check(x, x_len, f_start, f_len, mask_value=cfg.mask_value, bidx=bidx)
# Assert time masks are correct
for bidx in range(sh[0]):
for t_start, t_len in zip(data['time_starts'][bidx], data['time_lengths'][bidx]):
time_mask_check(x, x_len, t_start, t_len, mask_value=cfg.mask_value, bidx=bidx)
@pytest.mark.unit
@pytest.mark.run_only_on('GPU')
def test_spec_aug_kernel_grad(self):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
cfg = get_cfg(seed=0)
cfg.freq_masks = 2
cfg.time_masks = 10
data = prepare_data(**cfg)
launch_kernel(data, cfg)
result = data['x'] # inplace modification via kernel
y = torch.ones_like(result, requires_grad=True)
z = y + result
z.mean().backward()
assert y.grad is not None
@pytest.mark.unit
@pytest.mark.run_only_on('GPU')
def test_spec_aug_kernel_no_freq_mask(self):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
cfg = get_cfg(seed=0)
cfg.freq_masks = 0
cfg.time_masks = 10
data = prepare_data(**cfg)
launch_kernel(data, cfg)
x, x_len, sh = data['x'], data['x_len'], data['sh']
# Assert time masks are correct
for bidx in range(sh[0]):
for t_start, t_len in zip(data['time_starts'][bidx], data['time_lengths'][bidx]):
time_mask_check(x, x_len, t_start, t_len, mask_value=cfg.mask_value, bidx=bidx)
@pytest.mark.unit
@pytest.mark.run_only_on('GPU')
def test_spec_aug_kernel_no_time_mask(self):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
cfg = get_cfg(seed=0)
cfg.freq_masks = 2
cfg.time_masks = 0
data = prepare_data(**cfg)
launch_kernel(data, cfg)
x, x_len, sh = data['x'], data['x_len'], data['sh']
# Assert freq masks are correct
for bidx in range(sh[0]):
for f_start, f_len in zip(data['freq_starts'][bidx], data['freq_lengths'][bidx]):
freq_mask_check(x, x_len, f_start, f_len, mask_value=cfg.mask_value, bidx=bidx)
@pytest.mark.unit
@pytest.mark.run_only_on('GPU')
def test_spec_aug_kernel_no_freq_time_mask(self):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
cfg = get_cfg(seed=0)
cfg.freq_masks = 0
cfg.time_masks = 0
data = prepare_data(**cfg)
x, x_len, sh = data['x'], data['x_len'], data['sh']
x_copy = x.clone()
launch_kernel(data, cfg)
# Assert no data edits occured
assert (data['x'] - x_copy).abs().mean() <= 1e-9
| 33.815498 | 102 | 0.627346 |
4a1d1978a374bb0daad0591e0ba274a58dce606c
| 18,676 |
py
|
Python
|
os_brick/tests/local_dev/test_brick_lvm.py
|
Mionsz/os-brick
|
7a6a09fc84a779c3ee08d122664f941195eeab8f
|
[
"Apache-2.0"
] | 61 |
2015-02-17T16:18:45.000Z
|
2021-09-16T18:46:01.000Z
|
os_brick/tests/local_dev/test_brick_lvm.py
|
Mionsz/os-brick
|
7a6a09fc84a779c3ee08d122664f941195eeab8f
|
[
"Apache-2.0"
] | 2 |
2016-06-17T19:46:03.000Z
|
2022-02-16T02:29:02.000Z
|
os_brick/tests/local_dev/test_brick_lvm.py
|
Mionsz/os-brick
|
7a6a09fc84a779c3ee08d122664f941195eeab8f
|
[
"Apache-2.0"
] | 62 |
2015-04-14T19:32:25.000Z
|
2022-03-31T16:32:53.000Z
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslo_concurrency import processutils
from os_brick import exception
from os_brick import executor as os_brick_executor
from os_brick.local_dev import lvm as brick
from os_brick.privileged import rootwrap as priv_rootwrap
from os_brick.tests import base
class BrickLvmTestCase(base.TestCase):
def setUp(self):
super(BrickLvmTestCase, self).setUp()
if not hasattr(self, 'configuration'):
self.configuration = mock.Mock()
self.configuration.lvm_suppress_fd_warnings = False
self.volume_group_name = 'fake-vg'
# Stub processutils.execute for static methods
self.mock_object(priv_rootwrap, 'execute',
self.fake_execute)
self.vg = brick.LVM(
self.volume_group_name,
'sudo',
create_vg=False,
physical_volumes=None,
lvm_type='default',
executor=self.fake_execute,
suppress_fd_warn=self.configuration.lvm_suppress_fd_warnings)
def failed_fake_execute(obj, *cmd, **kwargs):
return ("\n", "fake-error")
def fake_pretend_lvm_version(obj, *cmd, **kwargs):
return (" LVM version: 2.03.00 (2012-03-06)\n", "")
def fake_old_lvm_version(obj, *cmd, **kwargs):
# Does not support thin prov or snap activation
return (" LVM version: 2.02.65(2) (2012-03-06)\n", "")
def fake_customised_lvm_version(obj, *cmd, **kwargs):
return (" LVM version: 2.02.100(2)-RHEL6 (2013-09-12)\n", "")
def fake_f23_lvm_version(obj, *cmd, **kwargs):
return (" LVM version: 2.02.132(2) (2015-09-22)\n", "")
def fake_execute(obj, *cmd, **kwargs):
# TODO(eharney): remove this and move to per-test mocked execute calls
if obj.configuration.lvm_suppress_fd_warnings:
_lvm_prefix = 'env, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=1, '
else:
_lvm_prefix = 'env, LC_ALL=C, '
cmd_string = ', '.join(cmd)
data = "\n"
if (_lvm_prefix + 'vgs, --noheadings, --unit=g, -o, name' ==
cmd_string):
data = " fake-vg\n"
data += " some-other-vg\n"
elif (_lvm_prefix + 'vgs, --noheadings, -o, name, fake-vg' ==
cmd_string):
data = " fake-vg\n"
elif _lvm_prefix + 'vgs, --version' in cmd_string:
data = " LVM version: 2.02.95(2) (2012-03-06)\n"
elif (_lvm_prefix + 'vgs, --noheadings, -o, uuid, fake-vg' in
cmd_string):
data = " kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n"
elif _lvm_prefix + 'vgs, --noheadings, --unit=g, ' \
'-o, name,size,free,lv_count,uuid, ' \
'--separator, :, --nosuffix' in cmd_string:
data = (" test-prov-cap-vg-unit:10.00:10.00:0:"
"mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z4\n")
if 'test-prov-cap-vg-unit' in cmd_string:
return (data, "")
data = (" test-prov-cap-vg-no-unit:10.00:10.00:0:"
"mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z4\n")
if 'test-prov-cap-vg-no-unit' in cmd_string:
return (data, "")
data = " fake-vg:10.00:10.00:0:"\
"kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n"
if 'fake-vg' in cmd_string:
return (data, "")
data += " fake-vg-2:10.00:10.00:0:"\
"lWyauW-dKpG-Rz7E-xtKY-jeju-QsYU-SLG7Z2\n"
data += " fake-vg-3:10.00:10.00:0:"\
"mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z3\n"
elif (_lvm_prefix + 'lvs, --noheadings, '
'--unit=g, -o, vg_name,name,size, --nosuffix, '
'fake-vg/lv-nothere' in cmd_string):
raise processutils.ProcessExecutionError(
stderr="One or more specified logical volume(s) not found.")
elif (_lvm_prefix + 'lvs, --noheadings, '
'--unit=g, -o, vg_name,name,size, --nosuffix, '
'fake-vg/lv-newerror' in cmd_string):
raise processutils.ProcessExecutionError(
stderr="Failed to find logical volume \"fake-vg/lv-newerror\"")
elif (_lvm_prefix + 'lvs, --noheadings, '
'--unit=g, -o, vg_name,name,size' in cmd_string):
if 'fake-unknown' in cmd_string:
raise processutils.ProcessExecutionError(
stderr="One or more volume(s) not found."
)
if 'test-prov-cap-vg-unit' in cmd_string:
data = " fake-vg test-prov-cap-pool-unit 9.50g\n"
data += " fake-vg fake-volume-1 1.00g\n"
data += " fake-vg fake-volume-2 2.00g\n"
elif 'test-prov-cap-vg-no-unit' in cmd_string:
data = " fake-vg test-prov-cap-pool-no-unit 9.50\n"
data += " fake-vg fake-volume-1 1.00\n"
data += " fake-vg fake-volume-2 2.00\n"
elif 'test-found-lv-name' in cmd_string:
data = " fake-vg test-found-lv-name 9.50\n"
else:
data = " fake-vg fake-1 1.00g\n"
data += " fake-vg fake-2 1.00g\n"
elif (_lvm_prefix + 'lvdisplay, --noheading, -C, -o, Attr' in
cmd_string):
if 'test-volumes' in cmd_string:
data = ' wi-a-'
else:
data = ' owi-a-'
elif _lvm_prefix + 'pvs, --noheadings' in cmd_string:
data = " fake-vg|/dev/sda|10.00|1.00\n"
data += " fake-vg|/dev/sdb|10.00|1.00\n"
data += " fake-vg|/dev/sdc|10.00|8.99\n"
data += " fake-vg-2|/dev/sdd|10.00|9.99\n"
elif _lvm_prefix + 'lvs, --noheadings, --unit=g' \
', -o, size,data_percent, --separator, :' in cmd_string:
if 'test-prov-cap-pool' in cmd_string:
data = " 9.5:20\n"
else:
data = " 9:12\n"
elif 'lvcreate, -T, -L, ' in cmd_string:
pass
elif 'lvcreate, -T, -l, 100%FREE' in cmd_string:
pass
elif 'lvcreate, -T, -V, ' in cmd_string:
pass
elif 'lvcreate, -n, ' in cmd_string:
pass
elif 'lvcreate, --name, ' in cmd_string:
pass
elif 'lvextend, -L, ' in cmd_string:
pass
else:
raise AssertionError('unexpected command called: %s' % cmd_string)
return (data, "")
def test_create_lv_snapshot(self):
self.assertIsNone(self.vg.create_lv_snapshot('snapshot-1', 'fake-1'))
with mock.patch.object(self.vg, 'get_volume', return_value=None):
try:
self.vg.create_lv_snapshot('snapshot-1', 'fake-non-existent')
except exception.VolumeDeviceNotFound as e:
self.assertEqual('fake-non-existent', e.kwargs['device'])
else:
self.fail("Exception not raised")
def test_vg_exists(self):
self.assertTrue(self.vg._vg_exists())
def test_get_vg_uuid(self):
self.assertEqual('kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1',
self.vg._get_vg_uuid()[0])
def test_get_all_volumes(self):
out = self.vg.get_volumes()
self.assertEqual('fake-1', out[0]['name'])
self.assertEqual('1.00g', out[0]['size'])
self.assertEqual('fake-vg', out[0]['vg'])
def test_get_volume(self):
self.assertEqual('fake-1', self.vg.get_volume('fake-1')['name'])
def test_get_volume_none(self):
self.assertIsNone(self.vg.get_volume('fake-unknown'))
def test_get_lv_info_notfound(self):
# lv-nothere will raise lvm < 2.102.112 exception
self.assertEqual(
[],
self.vg.get_lv_info(
'sudo', vg_name='fake-vg', lv_name='lv-nothere')
)
# lv-newerror will raise lvm > 2.102.112 exception
self.assertEqual(
[],
self.vg.get_lv_info(
'sudo', vg_name='fake-vg', lv_name='lv-newerror')
)
def test_get_lv_info_found(self):
lv_info = [{'size': '9.50', 'name': 'test-found-lv-name',
'vg': 'fake-vg'}]
self.assertEqual(
lv_info,
self.vg.get_lv_info(
'sudo', vg_name='fake-vg',
lv_name='test-found-lv-name')
)
def test_get_lv_info_no_lv_name(self):
lv_info = [{'name': 'fake-1', 'size': '1.00g', 'vg': 'fake-vg'},
{'name': 'fake-2', 'size': '1.00g', 'vg': 'fake-vg'}]
self.assertEqual(
lv_info,
self.vg.get_lv_info(
'sudo', vg_name='fake-vg')
)
@mock.patch('tenacity.nap.sleep', mock.Mock())
@mock.patch.object(brick.putils, 'execute')
def test_get_lv_info_retry(self, exec_mock):
exec_mock.side_effect = (
processutils.ProcessExecutionError('', '', exit_code=139),
('vg name size', ''),
)
self.assertEqual(
[{'name': 'fake-1', 'size': '1.00g', 'vg': 'fake-vg'},
{'name': 'fake-2', 'size': '1.00g', 'vg': 'fake-vg'}],
self.vg.get_lv_info('sudo', vg_name='vg', lv_name='name')
)
@mock.patch('tenacity.nap.sleep', mock.Mock())
@mock.patch.object(os_brick_executor.Executor, '_execute')
def test_get_thin_pool_free_space_retry(self, exec_mock):
exec_mock.side_effect = (
processutils.ProcessExecutionError('', '', exit_code=139),
('15.84:50', ''),
)
self.assertEqual(
7.92,
self.vg._get_thin_pool_free_space('vg', 'thinpool')
)
self.assertEqual(2, exec_mock.call_count)
args = ['env', 'LC_ALL=C', 'lvs', '--noheadings', '--unit=g', '-o',
'size,data_percent', '--separator', ':', '--nosuffix',
'/dev/vg/thinpool']
if self.configuration.lvm_suppress_fd_warnings:
args.insert(2, 'LVM_SUPPRESS_FD_WARNINGS=1')
lvs_call = mock.call(*args, root_helper='sudo', run_as_root=True)
exec_mock.assert_has_calls([lvs_call, lvs_call])
def test_get_all_physical_volumes(self):
# Filtered VG version
pvs = self.vg.get_all_physical_volumes('sudo', 'fake-vg')
self.assertEqual(3, len(pvs))
# Non-Filtered, all VG's
pvs = self.vg.get_all_physical_volumes('sudo')
self.assertEqual(4, len(pvs))
def test_get_physical_volumes(self):
pvs = self.vg.get_physical_volumes()
self.assertEqual(3, len(pvs))
def test_get_volume_groups(self):
self.assertEqual(3, len(self.vg.get_all_volume_groups('sudo')))
self.assertEqual(1,
len(self.vg.get_all_volume_groups('sudo', 'fake-vg')))
def test_thin_support(self):
# lvm.supports_thin() is a static method and doesn't
# use the self._executor fake we pass in on init
# so we need to stub processutils.execute appropriately
with mock.patch.object(priv_rootwrap, 'execute',
side_effect=self.fake_execute):
self.assertTrue(self.vg.supports_thin_provisioning('sudo'))
with mock.patch.object(priv_rootwrap, 'execute',
side_effect=self.fake_pretend_lvm_version):
self.assertTrue(self.vg.supports_thin_provisioning('sudo'))
with mock.patch.object(priv_rootwrap, 'execute',
side_effect=self.fake_old_lvm_version):
self.assertFalse(self.vg.supports_thin_provisioning('sudo'))
with mock.patch.object(priv_rootwrap, 'execute',
side_effect=self.fake_customised_lvm_version):
self.assertTrue(self.vg.supports_thin_provisioning('sudo'))
def test_snapshot_lv_activate_support(self):
self.vg._supports_snapshot_lv_activation = None
with mock.patch.object(priv_rootwrap, 'execute',
side_effect=self.fake_execute):
self.assertTrue(self.vg.supports_snapshot_lv_activation)
self.vg._supports_snapshot_lv_activation = None
with mock.patch.object(priv_rootwrap, 'execute',
side_effect=self.fake_old_lvm_version):
self.assertFalse(self.vg.supports_snapshot_lv_activation)
self.vg._supports_snapshot_lv_activation = None
def test_lvchange_ignskipact_support_yes(self):
"""Tests if lvchange -K is available via a lvm2 version check."""
self.vg._supports_lvchange_ignoreskipactivation = None
with mock.patch.object(priv_rootwrap, 'execute',
side_effect=self.fake_pretend_lvm_version):
self.assertTrue(self.vg.supports_lvchange_ignoreskipactivation)
self.vg._supports_lvchange_ignoreskipactivation = None
with mock.patch.object(priv_rootwrap, 'execute',
side_effect=self.fake_old_lvm_version):
self.assertFalse(self.vg.supports_lvchange_ignoreskipactivation)
self.vg._supports_lvchange_ignoreskipactivation = None
def test_thin_pool_creation_manual(self):
# The size of fake-vg volume group is 10g, so the calculated thin
# pool size should be 9.5g (95% of 10g).
self.vg.create_thin_pool()
def test_thin_pool_provisioned_capacity(self):
self.vg.vg_thin_pool = "test-prov-cap-pool-unit"
self.vg.vg_name = 'test-prov-cap-vg-unit'
self.assertIsNone(self.vg.create_thin_pool(name=self.vg.vg_thin_pool))
self.assertEqual(9.50, self.vg.vg_thin_pool_size)
self.assertEqual(7.6, self.vg.vg_thin_pool_free_space)
self.assertEqual(3.0, self.vg.vg_provisioned_capacity)
self.vg.vg_thin_pool = "test-prov-cap-pool-no-unit"
self.vg.vg_name = 'test-prov-cap-vg-no-unit'
self.assertIsNone(self.vg.create_thin_pool(name=self.vg.vg_thin_pool))
self.assertEqual(9.50, self.vg.vg_thin_pool_size)
self.assertEqual(7.6, self.vg.vg_thin_pool_free_space)
self.assertEqual(3.0, self.vg.vg_provisioned_capacity)
def test_thin_pool_free_space(self):
# The size of fake-vg-pool is 9g and the allocated data sums up to
# 12% so the calculated free space should be 7.92
self.assertEqual(float("7.92"),
self.vg._get_thin_pool_free_space("fake-vg",
"fake-vg-pool"))
def test_volume_create_after_thin_creation(self):
"""Test self.vg.vg_thin_pool is set to pool_name
See bug #1220286 for more info.
"""
vg_name = "vg-name"
pool_name = vg_name + "-pool"
pool_path = "%s/%s" % (vg_name, pool_name)
def executor(obj, *cmd, **kwargs):
self.assertEqual(pool_path, cmd[-1])
self.vg._executor = executor
self.vg.create_thin_pool(pool_name)
self.vg.create_volume("test", "1G", lv_type='thin')
self.assertEqual(pool_name, self.vg.vg_thin_pool)
def test_lv_has_snapshot(self):
self.assertTrue(self.vg.lv_has_snapshot('fake-vg'))
self.assertFalse(self.vg.lv_has_snapshot('test-volumes'))
def test_activate_lv(self):
self.vg._supports_lvchange_ignoreskipactivation = True
with mock.patch.object(self.vg, '_execute') as mock_exec:
self.vg.activate_lv('my-lv')
expected = [mock.call('lvchange', '-a', 'y', '--yes', '-K',
'fake-vg/my-lv', root_helper='sudo',
run_as_root=True)]
self.assertEqual(expected, mock_exec.call_args_list)
def test_get_mirrored_available_capacity(self):
self.assertEqual(2.0, self.vg.vg_mirror_free_space(1))
def test_lv_extend(self):
self.vg.deactivate_lv = mock.MagicMock()
# Extend lv with snapshot and make sure deactivate called
self.vg.create_volume("test", "1G")
self.vg.extend_volume("test", "2G")
self.vg.deactivate_lv.assert_called_once_with('test')
self.vg.deactivate_lv.reset_mock()
# Extend lv without snapshot so deactivate should not be called
self.vg.create_volume("test", "1G")
self.vg.vg_name = "test-volumes"
self.vg.extend_volume("test", "2G")
self.assertFalse(self.vg.deactivate_lv.called)
def test_lv_deactivate(self):
with mock.patch.object(self.vg, '_execute', return_value=(0, 0)):
is_active_mock = mock.Mock()
is_active_mock.return_value = False
self.vg._lv_is_active = is_active_mock
self.vg.create_volume('test', '1G')
self.vg.deactivate_lv('test')
@mock.patch('os_brick.utils._time_sleep')
def test_lv_deactivate_timeout(self, mock_sleep):
with mock.patch.object(self.vg, '_execute', return_value=(0, 0)):
is_active_mock = mock.Mock()
is_active_mock.return_value = True
self.vg._lv_is_active = is_active_mock
self.vg.create_volume('test', '1G')
self.assertRaises(exception.VolumeNotDeactivated,
self.vg.deactivate_lv, 'test')
def test_lv_is_active(self):
self.vg.create_volume('test', '1G')
with mock.patch.object(self.vg, '_execute',
return_value=['owi-a---', '']):
self.assertTrue(self.vg._lv_is_active('test'))
with mock.patch.object(self.vg, '_execute',
return_value=['owi-----', '']):
self.assertFalse(self.vg._lv_is_active('test'))
class BrickLvmTestCaseIgnoreFDWarnings(BrickLvmTestCase):
def setUp(self):
self.configuration = mock.Mock()
self.configuration.lvm_suppress_fd_warnings = True
super(BrickLvmTestCaseIgnoreFDWarnings, self).setUp()
| 42.253394 | 79 | 0.589473 |
4a1d1c2cc14a181da3e8fd4a2121a4423b2140e5
| 3,864 |
py
|
Python
|
packages/micropython-official/v1.15/esp32s2/stubs/machine.py
|
TheVinhLuong102/micropy-stubs
|
55ff1773008f7c4dfc3d70a403986486226eb6b3
|
[
"MIT"
] | 18 |
2019-07-11T13:31:09.000Z
|
2022-01-27T06:38:40.000Z
|
packages/micropython-official/v1.15/esp32s2/stubs/machine.py
|
TheVinhLuong102/micropy-stubs
|
55ff1773008f7c4dfc3d70a403986486226eb6b3
|
[
"MIT"
] | 9 |
2019-09-01T21:44:49.000Z
|
2022-02-04T20:55:08.000Z
|
packages/micropython-official/v1.15/esp32s2/stubs/machine.py
|
TheVinhLuong102/micropy-stubs
|
55ff1773008f7c4dfc3d70a403986486226eb6b3
|
[
"MIT"
] | 6 |
2019-10-08T05:31:21.000Z
|
2021-04-22T10:21:01.000Z
|
"""
Module: 'machine' on micropython-esp32-1.15
"""
# MCU: {'ver': '1.15', 'port': 'esp32', 'arch': 'xtensawin', 'sysname': 'esp32', 'release': '1.15.0', 'name': 'micropython', 'mpy': 10757, 'version': '1.15.0', 'machine': 'ESP32-S2-SOALA-1 with ESP32S2', 'build': '', 'nodename': 'esp32', 'platform': 'esp32', 'family': 'micropython'}
# Stubber: 1.3.9
class ADC:
''
ATTN_0DB = 0
ATTN_11DB = 3
ATTN_2_5DB = 1
ATTN_6DB = 2
WIDTH_13BIT = 4
def atten():
pass
def read():
pass
def read_u16():
pass
def width():
pass
class DAC:
''
def write():
pass
DEEPSLEEP = 4
DEEPSLEEP_RESET = 4
EXT0_WAKE = 2
EXT1_WAKE = 3
HARD_RESET = 2
class I2C:
''
def init():
pass
def readfrom():
pass
def readfrom_into():
pass
def readfrom_mem():
pass
def readfrom_mem_into():
pass
def readinto():
pass
def scan():
pass
def start():
pass
def stop():
pass
def write():
pass
def writeto():
pass
def writeto_mem():
pass
def writevto():
pass
PIN_WAKE = 2
class PWM:
''
def deinit():
pass
def duty():
pass
def freq():
pass
def init():
pass
PWRON_RESET = 1
class Pin:
''
IN = 1
IRQ_FALLING = 2
IRQ_RISING = 1
OPEN_DRAIN = 7
OUT = 3
PULL_DOWN = 1
PULL_HOLD = 4
PULL_UP = 2
WAKE_HIGH = 5
WAKE_LOW = 4
def init():
pass
def irq():
pass
def off():
pass
def on():
pass
def value():
pass
class RTC:
''
def datetime():
pass
def init():
pass
def memory():
pass
SLEEP = 2
SOFT_RESET = 5
class SPI:
''
LSB = 1
MSB = 0
def deinit():
pass
def init():
pass
def read():
pass
def readinto():
pass
def write():
pass
def write_readinto():
pass
class Signal:
''
def off():
pass
def on():
pass
def value():
pass
class SoftI2C:
''
def init():
pass
def readfrom():
pass
def readfrom_into():
pass
def readfrom_mem():
pass
def readfrom_mem_into():
pass
def readinto():
pass
def scan():
pass
def start():
pass
def stop():
pass
def write():
pass
def writeto():
pass
def writeto_mem():
pass
def writevto():
pass
class SoftSPI:
''
LSB = 1
MSB = 0
def deinit():
pass
def init():
pass
def read():
pass
def readinto():
pass
def write():
pass
def write_readinto():
pass
TIMER_WAKE = 4
TOUCHPAD_WAKE = 5
class Timer:
''
ONE_SHOT = 0
PERIODIC = 1
def deinit():
pass
def init():
pass
def value():
pass
class UART:
''
INV_CTS = 8
INV_RTS = 64
INV_RX = 4
INV_TX = 32
def any():
pass
def deinit():
pass
def init():
pass
def read():
pass
def readinto():
pass
def readline():
pass
def sendbreak():
pass
def write():
pass
ULP_WAKE = 6
class WDT:
''
def feed():
pass
WDT_RESET = 3
def deepsleep():
pass
def disable_irq():
pass
def enable_irq():
pass
def freq():
pass
def idle():
pass
def lightsleep():
pass
mem16 = None
mem32 = None
mem8 = None
def reset():
pass
def reset_cause():
pass
def sleep():
pass
def soft_reset():
pass
def time_pulse_us():
pass
def unique_id():
pass
def wake_reason():
pass
| 11.534328 | 283 | 0.476708 |
4a1d1cdcad5cd74c50e6f38649593284483fbcd9
| 8,395 |
py
|
Python
|
src/vdf.py
|
Rusty-Helper/steamcmd
|
0c81a07918c60423ccfb6fdb96aae65a2b829f5b
|
[
"MIT"
] | 23 |
2020-02-12T07:32:59.000Z
|
2022-03-19T15:32:00.000Z
|
src/vdf.py
|
Rusty-Helper/steamcmd
|
0c81a07918c60423ccfb6fdb96aae65a2b829f5b
|
[
"MIT"
] | 26 |
2020-05-13T12:03:23.000Z
|
2022-02-21T22:01:05.000Z
|
src/vdf.py
|
Rusty-Helper/steamcmd
|
0c81a07918c60423ccfb6fdb96aae65a2b829f5b
|
[
"MIT"
] | 7 |
2020-07-20T13:54:44.000Z
|
2022-02-03T19:55:48.000Z
|
#!/usr/bin/env python3
"""
Valve key/value manipulation functions.
"""
# imports
import sys
import shlex
import json
def surround_quotes(obj):
"""
Surround input (string)
with double quotes.
"""
# add double quotes around string
obj = '"' + obj + '"'
# return quoted string
return obj
def read(data):
"""
Parent function to call Valve key/value
format read function.
"""
# parse and return valve key/value data
try:
data = read_json_method(data)
except:
try:
data = read_dict_method(data)
except:
data = False
# return parsed data or False bool
return data
def read_json_method(data):
"""
Reformatting Valve key/value format
to JSON and returning JSON.
"""
# default vars
parent = []
depth = 0
vdict = {}
# init json and add opening bracket
vjson = "{"
# replace tabs with spaces
data.replace("\t", " ")
# split into lines
data = data.splitlines()
# loop through vdf
for index, line in enumerate(data):
# split line string
split = shlex.split(line)
count = len(split)
# set key vars
key = split[0]
# error if unexpected word count of current line
if count > 2:
print(
"The line: "
+ line
+ " contains an invalid number of words. This must be 1 or 2!"
)
sys.exit(1)
# parse next line if not last line
if index == (len(data) - 1):
# set next to false
nextkey = False
# flag this line as last
lastline = True
else:
# get next line
nextline = data[index + 1]
nextsplit = shlex.split(nextline)
nextkey = nextsplit[0]
# flag this line as not last
lastline = False
# check for object start lines
if count == 1 and not key in ["{", "}"]:
# add colon to define object
line = line + " : "
# check for closing bracket and
if key == "}" and nextkey != "}" and not lastline:
# add colon to define object
line = line + ","
# check for key value lines
if count == 2:
# set value var
val = split[1]
# add colon between key/value
line = surround_quotes(key) + " : " + surround_quotes(val)
# check for brackets on next line
if not nextkey in ["{", "}"]:
# add comma to line
line = line + ","
# add edited line to json dict
vjson = vjson + line
# add closing bracket
vjson = vjson + "}"
# parse json to dict
try:
vdict = json.loads(vjson)
except Exception as parse_error:
# print json parse error and set dict to false
print(
"The following error occured while trying to parse the VDF app"
+ "info returned from steamcmd: the query string: \n > "
+ str(parse_error)
)
vdict = False
# set error exit code
sys.exit(1)
return vdict
# read vdf and return dict
def read_dict_method(data):
"""
Parse Valve key/value format
and return dictionary.
"""
# default vars
parent = []
depth = 0
vdict = {}
# loop through vdf
for line in data.splitlines():
# split line string
split = shlex.split(line)
count = len(split)
# set key value vars
key = split[0]
if count == 2:
val = split[1]
# error if unexpected word count of current line
if count > 2:
print(
"The line: "
+ line
+ " contains an invalid number of words. This must be 1 or 2!"
)
sys.exit(1)
# increase / decrease depth to track dict level
if key == "{":
# increase depth
depth += 1
elif key == "}":
# decrease depth
depth -= 1
# remove last added parent from list
parent.pop(-1)
else:
# add object to dict / root level
if depth == 0:
# add current line
if count == 2:
# add key value
vdict[key] = val
if count == 1:
# add dict of key
vdict[key] = {}
# set key as new parent
parent.append(key)
# add object to dict / first level
if depth == 1:
# add current line
if count == 2:
# add key value
vdict[parent[0]][key] = val
if count == 1:
# add dict of key
vdict[parent[0]][key] = {}
# set key as new parent
parent.append(key)
# add object to dict / second level
if depth == 2:
# add current line
if count == 2:
# add key value
vdict[parent[0]][parent[1]][key] = val
if count == 1:
# add dict of key
vdict[parent[0]][parent[1]][key] = {}
# set key as new parent
parent.append(key)
# add object to dict / third level
if depth == 3:
# add current line
if count == 2:
# add key value
vdict[parent[0]][parent[1]][parent[2]][key] = val
if count == 1:
# add dict of key
vdict[parent[0]][parent[1]][parent[2]][key] = {}
# set key as new parent
parent.append(key)
# add object to dict / fourth level
if depth == 4:
# add current line
if count == 2:
# add key value
vdict[parent[0]][parent[1]][parent[2]][parent[3]][key] = val
if count == 1:
# add dict of key
vdict[parent[0]][parent[1]][parent[2]][parent[3]][key] = {}
# set key as new parent
parent.append(key)
# add object to dict / fifth level
if depth == 5:
# add current line
if count == 2:
# add key value
vdict[parent[0]][parent[1]][parent[2]][parent[3]][parent[4]][
key
] = val
if count == 1:
# add dict of key
vdict[parent[0]][parent[1]][parent[2]][parent[3]][parent[4]][
key
] = {}
# set key as new parent
parent.append(key)
# add object to dict / sixth level
if depth == 6:
# add current line
if count == 2:
# add key value
vdict[parent[0]][parent[1]][parent[2]][parent[3]][parent[4]][
parent[5]
][key] = val
if count == 1:
# add dict of key
vdict[parent[0]][parent[1]][parent[2]][parent[3]][parent[4]][
parent[5]
][key] = {}
# set key as new parent
parent.append(key)
# add object to dict / seventh level
if depth == 7:
# add current line
if count == 2:
# add key value
vdict[parent[0]][parent[1]][parent[2]][parent[3]][parent[4]][
parent[5]
][key][parent[6]][key] = val
if count == 1:
# add dict of key
vdict[parent[0]][parent[1]][parent[2]][parent[3]][parent[4]][
parent[5]
][key][parent[6]][key] = {}
# set key as new parent
parent.append(key)
return vdict
| 28.076923 | 81 | 0.438594 |
4a1d1d310ed1372591df6e5a13ef3a7998ed5bb7
| 1,676 |
py
|
Python
|
src/focal_loss.py
|
tbachlechner/Heuristik
|
4eca32b9e32453439a9987257698dd88781d0a1f
|
[
"MIT"
] | 25 |
2020-07-01T15:56:26.000Z
|
2020-07-10T19:12:39.000Z
|
src/focal_loss.py
|
tbachlechner/Heuristik
|
4eca32b9e32453439a9987257698dd88781d0a1f
|
[
"MIT"
] | null | null | null |
src/focal_loss.py
|
tbachlechner/Heuristik
|
4eca32b9e32453439a9987257698dd88781d0a1f
|
[
"MIT"
] | 2 |
2020-07-13T09:07:17.000Z
|
2020-11-29T21:21:13.000Z
|
""" Implements focal loss, see https://arxiv.org/pdf/1708.02002.pdf """
# Load packages
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
class FocalLoss(nn.Module):
"""
The focal loss function emphasizes wrong predictions of the model.
It is superior to CEL when the data is unbalanced.
"""
def __init__(self, class_num =2, alpha=None, gamma=2, size_average=True):
super(FocalLoss, self).__init__()
if alpha is None:
self.alpha = Variable(torch.ones(class_num, 1))
else:
if isinstance(alpha, Variable):
self.alpha = alpha
else:
self.alpha = Variable(alpha)
self.gamma = gamma
self.class_num = class_num
self.size_average = size_average
def forward(self, inputs, targets):
N = inputs.size(0)
C = inputs.size(1)
P = F.softmax(inputs,dim=1)
class_mask = inputs.data.new(N, C).fill_(0)
class_mask = Variable(class_mask)
ids = targets.view(-1, 1)
class_mask.scatter_(1, ids.data, 1.)
if inputs.is_cuda and not self.alpha.is_cuda:
self.alpha = self.alpha.to(device)
alpha = self.alpha[ids.data.view(-1)]
probs = (P*class_mask).sum(1).view(-1,1)
log_p = probs.log()
batch_loss = -alpha*(torch.pow((1-probs), self.gamma))*log_p
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss.sum()
return loss
| 32.230769 | 77 | 0.591289 |
4a1d1d5bc587f37ebddec59a6dad9f2795578ccc
| 15 |
py
|
Python
|
geminidata/__init__.py
|
RileyR387/gemini-data-feed
|
9784dfd1c9ea976fbe9c93a08f5ba641ef858234
|
[
"MIT"
] | 6 |
2019-05-24T04:41:46.000Z
|
2022-02-14T01:46:54.000Z
|
geminidata/__init__.py
|
RileyR387/gemini-data-feed
|
9784dfd1c9ea976fbe9c93a08f5ba641ef858234
|
[
"MIT"
] | 7 |
2018-12-04T15:33:04.000Z
|
2020-09-11T06:17:15.000Z
|
catframes/__init__.py
|
georgy7/catframes
|
e65eb40a6d98b72a9d6609c057254a7ede3a0959
|
[
"CC0-1.0"
] | 2 |
2019-09-25T19:50:15.000Z
|
2020-02-03T12:59:22.000Z
|
__all__ = []
| 3.75 | 12 | 0.466667 |
4a1d1eb02e6daec380735410d721e76027b99047
| 19,198 |
py
|
Python
|
sklearn/datasets/_svmlight_format_io.py
|
emarkou/scikit-learn
|
d73822f84f2832dcc25f0ff58769f60871a78025
|
[
"BSD-3-Clause"
] | 1 |
2021-11-19T06:21:43.000Z
|
2021-11-19T06:21:43.000Z
|
sklearn/datasets/_svmlight_format_io.py
|
emarkou/scikit-learn
|
d73822f84f2832dcc25f0ff58769f60871a78025
|
[
"BSD-3-Clause"
] | 2 |
2021-04-13T12:48:43.000Z
|
2021-04-13T16:17:58.000Z
|
sklearn/datasets/_svmlight_format_io.py
|
emarkou/scikit-learn
|
d73822f84f2832dcc25f0ff58769f60871a78025
|
[
"BSD-3-Clause"
] | 1 |
2021-11-19T06:21:34.000Z
|
2021-11-19T06:21:34.000Z
|
"""This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from .. import __version__
from ..utils import check_array, IS_PYPY
if not IS_PYPY:
from ._svmlight_format_fast import _load_svmlight_file
else:
def _load_svmlight_file(*args, **kwargs):
raise NotImplementedError(
'load_svmlight_file is currently not '
'compatible with PyPy (see '
'https://github.com/scikit-learn/scikit-learn/issues/11543 '
'for the status updates).')
def load_svmlight_file(f, *, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False,
offset=0, length=-1):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : str, file-like or int
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int, default=None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
n_features is only required if ``offset`` or ``length`` are passed a
non-default value.
dtype : numpy data type, default=np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
multilabel : bool, default=False
Samples may have several labels each (see
https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : bool or "auto", default="auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe when no ``offset`` or ``length`` is passed.
If ``offset`` or ``length`` are passed, the "auto" mode falls back
to ``zero_based=True`` to avoid having the heuristic check yield
inconsistent results on different segments of the file.
query_id : bool, default=False
If True, will return the query_id array for each file.
offset : int, default=0
Ignore the offset first bytes by seeking forward, then
discarding the following bytes up until the next new line
character.
length : int, default=-1
If strictly positive, stop reading any new line of data once the
position in the file has reached the (offset + length) bytes threshold.
Returns
-------
X : scipy.sparse matrix of shape (n_samples, n_features)
y : ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id : array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See Also
--------
load_svmlight_files : Similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from joblib import Memory
from .datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features=n_features,
dtype=dtype,
multilabel=multilabel,
zero_based=zero_based,
query_id=query_id,
offset=offset,
length=length))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, str):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id,
offset=0, length=-1):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id,
offset, length)
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id,
offset, length)
# convert from array.array, give data the right dtype
if not multilabel:
labels = np.frombuffer(labels, np.float64)
data = np.frombuffer(data, actual_dtype)
indices = np.frombuffer(ind, np.longlong)
indptr = np.frombuffer(indptr, dtype=np.longlong) # never empty
query = np.frombuffer(query, np.int64)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, *, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False,
offset=0, length=-1):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : array-like, dtype=str, file-like or int
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features : int, default=None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
dtype : numpy data type, default=np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
multilabel : bool, default=False
Samples may have several labels each (see
https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : bool or "auto", default="auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe when no offset or length is passed.
If offset or length are passed, the "auto" mode falls back
to zero_based=True to avoid having the heuristic check yield
inconsistent results on different segments of the file.
query_id : bool, default=False
If True, will return the query_id array for each file.
offset : int, default=0
Ignore the offset first bytes by seeking forward, then
discarding the following bytes up until the next new line
character.
length : int, default=-1
If strictly positive, stop reading any new line of data once the
position in the file has reached the (offset + length) bytes threshold.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See Also
--------
load_svmlight_file
"""
if (offset != 0 or length > 0) and zero_based == "auto":
# disable heuristic search to avoid getting inconsistent results on
# different segments of the file
zero_based = True
if (offset != 0 or length > 0) and n_features is None:
raise ValueError(
"n_features is required when offset or length is specified.")
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id),
offset=offset, length=length)
for f in files]
if (zero_based is False or
zero_based == "auto" and all(len(tmp[1]) and np.min(tmp[1]) > 0
for tmp in r)):
for _, indices, _, _, _ in r:
indices -= 1
n_f = max(ind[1].max() if len(ind[1]) else 0 for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
X_is_sp = int(hasattr(X, "tocsr"))
y_is_sp = int(hasattr(y, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = "%d:%d"
else:
value_pattern = "%d:%.16g"
if y.dtype.kind == 'i':
label_pattern = "%d"
else:
label_pattern = "%.16g"
line_pattern = "%s"
if query_id is not None:
line_pattern += " qid:%d"
line_pattern += " %s\n"
if comment:
f.write(("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__).encode())
f.write(("# Column indices are %s-based\n"
% ["zero", "one"][one_based]).encode())
f.write(b"#\n")
f.writelines(b"# %s\n" % line for line in comment.splitlines())
for i in range(X.shape[0]):
if X_is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
if y_is_sp:
nz_labels = y[i].nonzero()[1]
else:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
if y_is_sp:
labels_str = label_pattern % y.data[i]
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, *, zero_based=True, comment=None,
query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : {array-like, sparse matrix}, shape = [n_samples (, n_labels)]
Target values. Class labels must be an
integer or float, or array-like objects of integer or float for
multilabel classifications.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, default=True
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, default=None
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like of shape (n_samples,), default=None
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel : boolean, default=False
Samples may have several labels each (see
https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
.. versionadded:: 0.17
parameter *multilabel* to support multilabel datasets.
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if b"\0" in comment:
raise ValueError("comment string contains NUL byte")
yval = check_array(y, accept_sparse='csr', ensure_2d=False)
if sp.issparse(yval):
if yval.shape[1] != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples, 1),"
" got %r" % (yval.shape,))
else:
if yval.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (yval.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != yval.shape[0]:
raise ValueError(
"X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], yval.shape[0])
)
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if yval is y and hasattr(yval, "sorted_indices"):
y = yval.sorted_indices()
else:
y = yval
if hasattr(y, "sort_indices"):
y.sort_indices()
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
| 38.705645 | 80 | 0.629128 |
4a1d1f14ae3d51b487e09d8b4d117f42105843f3
| 565 |
py
|
Python
|
setup.py
|
markjarzynski/slitscan
|
2f61b8161d45382593c807ba65e2753c1a995847
|
[
"MIT"
] | null | null | null |
setup.py
|
markjarzynski/slitscan
|
2f61b8161d45382593c807ba65e2753c1a995847
|
[
"MIT"
] | null | null | null |
setup.py
|
markjarzynski/slitscan
|
2f61b8161d45382593c807ba65e2753c1a995847
|
[
"MIT"
] | null | null | null |
from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(name='slitscan',
version='0.3.1',
description='Slit-scan photography.',
url='http://github.com/markjarzynski/slitscan',
author='Mark Jarzynski',
author_email='mark.jarzynski@gmail.com',
license='MIT',
packages=['slitscan'],
install_requires=[
'numpy',
'imageio'
],
entry_points={
'console_scripts': ['slitscan=slitscan.__main__:main'],
},
zip_safe=False)
| 24.565217 | 65 | 0.587611 |
4a1d208864bbca039bdc1a40c81359b943a6188d
| 12,126 |
py
|
Python
|
fstxt/train.py
|
akramkohansal/pytorch-yolo3
|
21a78afbf097e4f17f62e9a76974ea2f6d7c65dd
|
[
"MIT"
] | null | null | null |
fstxt/train.py
|
akramkohansal/pytorch-yolo3
|
21a78afbf097e4f17f62e9a76974ea2f6d7c65dd
|
[
"MIT"
] | null | null | null |
fstxt/train.py
|
akramkohansal/pytorch-yolo3
|
21a78afbf097e4f17f62e9a76974ea2f6d7c65dd
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import sys
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.backends.cudnn as cudnn
from torchvision import datasets, transforms
from torch.autograd import Variable
import tensorflow as tf
from pyfasttext import FastText
import dataset
import random
import math
import os
from utils import *
from cfg import parse_cfg
from region_loss import RegionLoss
from darknet import Darknet
from models.tiny_yolo import TinyYoloNet
# cmd = 'python train.py cfg/voc.data cfg/yolo-voc.cfg darknet19_448.conv.23'
# cmd_detection = 'python train.py cfg/detection.data cfg/yolo-detection.cfg darknet19_448.conv.23'
'''
# STANDARD TRAINING PASCAL VOC:
datacfg = 'cfg/voc.data'
cfgfile = 'cfg/yolo-voc.cfg'
weightfile = 'darknet19_448.conv.23'
# TRAINING FOR TEXT DETECTION on SYNTH:
datacfg = 'cfg/small_detection.data'
cfgfile = 'cfg/yolo-detection.cfg'
weightfile = 'darknet19_448.conv.23'
# TRAINING FOR TEXT RECOGNITION on SMALL SYNTH:
datacfg = 'cfg/small_recognition.data'
cfgfile = 'cfg/yolo-recognition.cfg'
weightfile = 'backup/002940.weights'
#weightfile = 'darknet19_448.conv.23'
# TRAINING FOR ON COMPLETE MAFLA COLOR SYNTH:
datacfg = 'cfg/mafladataset_recognition.data'
cfgfile = 'cfg/yolo-recognition-13anchors.cfg'
weightfile = 'backup/000001.weights'
if len(sys.argv) != 4:
print('Usage:')
print('python train.py datacfg cfgfile weightfile')
exit()
# Training settings
datacfg = sys.argv[1]
cfgfile = sys.argv[2]
weightfile = sys.argv[3]
'''
# TRAINING FOR TEXT RECOGNITION on COMPLETE SYNTH:
#datacfg = 'cfg/distributed_mixed_recognition.data'
datacfg = 'cfg/small_overfit.data'
#datacfg = 'cfg/distributed_mixed_recognition.data'
cfgfile = 'cfg/yolo-fasttext-13anchors.cfg'
weightfile = 'backup_overfit/001360.weights'
#weightfile = 'backup/darknet19_448.conv.23'
data_options = read_data_cfg(datacfg)
net_options = parse_cfg(cfgfile)[0]
trainlist = data_options['train']
testlist = data_options['valid']
backupdir = data_options['backup']
nsamples = file_lines(trainlist)
gpus = data_options['gpus'] # e.g. 0,1,2,3
ngpus = len(gpus.split(','))
num_workers = int(data_options['num_workers'])
batch_size = int(net_options['batch'])
max_batches = int(net_options['max_batches'])
learning_rate = float(net_options['learning_rate'])
momentum = float(net_options['momentum'])
decay = float(net_options['decay'])
steps = [float(step) for step in net_options['steps'].split(',')]
scales = [float(scale) for scale in net_options['scales'].split(',')]
#Train parameters
max_epochs = max_batches*batch_size/nsamples+1
use_cuda = True
seed = int(time.time())
eps = 1e-5
save_interval = 40 # epoches
dot_interval = 70 # batches
# Test parameters
conf_thresh = 0.25
nms_thresh = 0.4
iou_thresh = 0.5
if not os.path.exists(backupdir):
os.mkdir(backupdir)
print("Loading Fasttext Model...")
print("...")
fasttext_model = FastText('/home/amafla/Downloads/wiki.en.bin')
# WORK WITH NORMALIZED VECTORS:
# self.model.numpy_normalized_vectors
###############
torch.manual_seed(seed)
if use_cuda:
os.environ['CUDA_VISIBLE_DEVICES'] = gpus
torch.cuda.manual_seed(seed)
# CREATE THE MODEL (LAYERS, ROUTE, REORG AND LOSS)
model = Darknet(cfgfile)
region_loss = model.loss
model.load_weights(weightfile)
model.print_network()
region_loss.seen = model.seen
processed_batches = model.seen/batch_size
init_width = model.width
init_height = model.height
init_epoch = model.seen/nsamples
kwargs = {'num_workers': num_workers, 'pin_memory': True} if use_cuda else {}
test_loader = torch.utils.data.DataLoader(
dataset.listDataset(testlist, fasttext_model, shape=(init_width, init_height),
shuffle=False,
transform=transforms.Compose([
transforms.ToTensor(),
]), train=False),
batch_size=batch_size, shuffle=False, **kwargs)
if use_cuda:
if ngpus > 1:
model = torch.nn.DataParallel(model).cuda()
else:
model = model.cuda()
params_dict = dict(model.named_parameters())
params = []
for key, value in params_dict.items():
if key.find('.bn') >= 0 or key.find('.bias') >= 0:
params += [{'params': [value], 'weight_decay': 0.0}]
else:
params += [{'params': [value], 'weight_decay': decay*batch_size}]
""" SETS THE OPTIMIZER TO USE:"""
#optimizer = optim.SGD(model.parameters(), lr=learning_rate/batch_size, momentum=momentum, dampening=0, weight_decay=decay*batch_size)
optimizer = optim.Adam(model.parameters(), lr=learning_rate/batch_size, betas=(0.9, 0.999), eps=1e-08, weight_decay=decay*batch_size)
def adjust_learning_rate(optimizer, batch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = learning_rate
for i in range(len(steps)):
scale = scales[i] if i < len(scales) else 1
if batch >= steps[i]:
lr = lr * scale
if batch == steps[i]:
break
else:
break
for param_group in optimizer.param_groups:
#param_group['lr'] = lr/batch_size
param_group['lr'] = lr
return lr
def train(epoch):
# Graph in Tensorboard
tf_summary_writer = tf and tf.summary.FileWriter('tensorboard/')
global processed_batches
t0 = time.time()
if ngpus > 1:
cur_model = model.module
else:
cur_model = model
train_loader = torch.utils.data.DataLoader(
dataset.listDataset(trainlist, fasttext_model, shape=(init_width, init_height),
shuffle=True,
transform=transforms.Compose([
transforms.ToTensor(),
]),
train=True,
seen=cur_model.seen,
batch_size=batch_size,
num_workers=num_workers),
batch_size=batch_size, shuffle=False, **kwargs)
lr = adjust_learning_rate(optimizer, processed_batches)
logging('epoch %d, processed %d samples, lr %f' % (epoch, epoch * len(train_loader.dataset), lr))
model.train()
t1 = time.time()
avg_time = torch.zeros(9)
for batch_idx, (data, target, embedding_matrix) in enumerate(train_loader): # GENERATES BATCH IDX, BATCH (BxCxWxH) AND GROUND TRUTH (VECTOR TARGET (250X1))
t2 = time.time()
adjust_learning_rate(optimizer, processed_batches)
processed_batches = processed_batches + 1
#if (batch_idx+1) % dot_interval == 0:
# sys.stdout.write('.')
if use_cuda:
data = data.cuda()
#target= target.cuda()
t3 = time.time()
data, target, embedding_matrix = Variable(data), Variable(target), Variable(embedding_matrix)
t4 = time.time()
# Not accum gradients
model.zero_grad()
optimizer.zero_grad()
t5 = time.time()
output = model(data)
t6 = time.time()
region_loss.seen = region_loss.seen + data.data.size(0)
loss = region_loss(output, target, embedding_matrix)
t7 = time.time()
loss.backward()
t8 = time.time()
optimizer.step()
t9 = time.time()
# Graph in Tensorboard
if (batch_idx % 10 == 0):
if tf is not None:
add_summary_value(tf_summary_writer, 'train_loss', loss.data[0],batch_idx+1)
tf_summary_writer.flush()
if False and batch_idx > 1:
avg_time[0] = avg_time[0] + (t2-t1)
avg_time[1] = avg_time[1] + (t3-t2)
avg_time[2] = avg_time[2] + (t4-t3)
avg_time[3] = avg_time[3] + (t5-t4)
avg_time[4] = avg_time[4] + (t6-t5)
avg_time[5] = avg_time[5] + (t7-t6)
avg_time[6] = avg_time[6] + (t8-t7)
avg_time[7] = avg_time[7] + (t9-t8)
avg_time[8] = avg_time[8] + (t9-t1)
print('-------------------------------')
print(' load data : %f' % (avg_time[0]/(batch_idx)))
print(' cpu to cuda : %f' % (avg_time[1]/(batch_idx)))
print('cuda to variable : %f' % (avg_time[2]/(batch_idx)))
print(' zero_grad : %f' % (avg_time[3]/(batch_idx)))
print(' forward feature : %f' % (avg_time[4]/(batch_idx)))
print(' forward loss : %f' % (avg_time[5]/(batch_idx)))
print(' backward : %f' % (avg_time[6]/(batch_idx)))
print(' step : %f' % (avg_time[7]/(batch_idx)))
print(' total : %f' % (avg_time[8]/(batch_idx)))
t1 = time.time()
t1 = time.time()
logging('training with %f samples/s' % (len(train_loader.dataset)/(t1-t0)))
if (epoch+1) % save_interval == 0:
logging('save weights to %s/%06d.weights' % (backupdir, epoch+1))
cur_model.seen = (epoch + 1) * len(train_loader.dataset)
cur_model.save_weights('%s/%06d.weights' % (backupdir, epoch+1))
def test(epoch):
def truths_length(truths):
for i in range(50):
if truths[i][1] == 0:
return i
model.eval()
if ngpus > 1:
cur_model = model.module
else:
cur_model = model
num_classes = cur_model.num_classes
anchors = cur_model.anchors
num_anchors = cur_model.num_anchors
total = 0.0
proposals = 0.0
correct = 0.0
#with torch.no_grad()
for batch_idx, (data, target, phoc_matrix) in enumerate(test_loader):
if use_cuda:
data = data.cuda()
data = Variable(data, volatile=True)
output = model(data).data
# GETS LIST all_boxes OF PREDICTIONS WITH CONFIDENCE ABOVE A THRESHOLD
all_boxes = get_region_boxes(output, conf_thresh, num_classes, anchors, num_anchors)
for i in range(output.size(0)):
boxes = all_boxes[i] # SELECT BOXES ACCORDING TO EACH IMAGE IN THE BATCH
boxes = nms(boxes, nms_thresh)
truths = target[i].view(-1, 5)
num_gts = truths_length(truths)
phoc_batch = phoc_matrix[i][:]
total = total + num_gts
for i in range(len(boxes)):
if boxes[i][4] > conf_thresh:
proposals = proposals+1
for i in range(num_gts):
# box_gt = [truths[i][1], truths[i][2], truths[i][3], truths[i][4], 1.0, 1.0, truths[i][0]] # set true to PHOC_MATRIX
box_gt = [truths[i][1], truths[i][2], truths[i][3], truths[i][4], 1.0, 1.0, phoc_batch[i][:]]
best_iou = 0
best_j = -1
for j in range(len(boxes)):
iou = bbox_iou(box_gt, boxes[j], x1y1x2y2=False)
if iou > best_iou:
best_j = j
best_iou = iou
#prediction = Variable (boxes[best_j][5])
#gt_phoc = Variable(box_gt[6].type(torch.FloatTensor))
if best_j > 0:
prediction = boxes[best_j][5]
else:
prediction = torch.ones(604)
gt_phoc = box_gt[6].type(torch.FloatTensor)
phoc_similarity = torch.nn.functional.binary_cross_entropy(prediction, gt_phoc)
phoc_similarity = phoc_similarity.data.numpy()
if best_iou > iou_thresh and (phoc_similarity < 0.2):
correct = correct+1
precision = 1.0*correct/(proposals+eps)
recall = 1.0*correct/(total+eps)
fscore = 2.0*precision*recall/(precision+recall+eps)
logging("precision: %f, recall: %f, fscore: %f" % (precision, recall, fscore))
evaluate = False
if evaluate:
logging('evaluating ...')
test(0)
else:
for epoch in range(init_epoch, max_epochs):
train(epoch)
#test(epoch)
| 35.046243 | 159 | 0.606053 |
4a1d20eac3a3ebba2a4bed2e8b773a9d1f0d045b
| 6,363 |
py
|
Python
|
word_language_model/model.py
|
kyox24/examples
|
3d63fe18c5016baedaf0170ba339f2412e4cc4f2
|
[
"BSD-3-Clause"
] | null | null | null |
word_language_model/model.py
|
kyox24/examples
|
3d63fe18c5016baedaf0170ba339f2412e4cc4f2
|
[
"BSD-3-Clause"
] | null | null | null |
word_language_model/model.py
|
kyox24/examples
|
3d63fe18c5016baedaf0170ba339f2412e4cc4f2
|
[
"BSD-3-Clause"
] | null | null | null |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):
super(RNNModel, self).__init__()
self.ntoken = ntoken
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
if rnn_type in ['LSTM', 'GRU']:
self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
else:
try:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
except KeyError:
raise ValueError( """An invalid option for `--model` was supplied,
options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']""")
self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)
self.decoder = nn.Linear(nhid, ntoken)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
if nhid != ninp:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.init_weights()
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
nn.init.uniform_(self.encoder.weight, -initrange, initrange)
nn.init.zeros_(self.decoder.bias)
nn.init.uniform_(self.decoder.weight, -initrange, initrange)
def forward(self, input, hidden):
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
output = self.drop(output)
decoded = self.decoder(output)
decoded = decoded.view(-1, self.ntoken)
return F.log_softmax(decoded, dim=1), hidden
def init_hidden(self, bsz):
weight = next(self.parameters())
if self.rnn_type == 'LSTM':
return (weight.new_zeros(self.nlayers, bsz, self.nhid),
weight.new_zeros(self.nlayers, bsz, self.nhid))
else:
return weight.new_zeros(self.nlayers, bsz, self.nhid)
# Temporarily leave PositionalEncoding module here. Will be moved somewhere else.
class PositionalEncoding(nn.Module):
r"""Inject some information about the relative or absolute position of the tokens
in the sequence. The positional encodings have the same dimension as
the embeddings, so that the two can be summed. Here, we use sine and cosine
functions of different frequencies.
.. math::
\text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model))
\text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))
\text{where pos is the word position and i is the embed idx)
Args:
d_model: the embed dim (required).
dropout: the dropout value (default=0.1).
max_len: the max. length of the incoming sequence (default=5000).
Examples:
>>> pos_encoder = PositionalEncoding(d_model)
"""
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
r"""Inputs of forward function
Args:
x: the sequence fed to the positional encoder model (required).
Shape:
x: [sequence length, batch size, embed dim]
output: [sequence length, batch size, embed dim]
Examples:
>>> output = pos_encoder(x)
"""
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
class TransformerModel(nn.Module):
"""Container module with an encoder, a recurrent or transformer module, and a decoder."""
def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):
super(TransformerModel, self).__init__()
try:
from torch.nn import TransformerEncoder, TransformerEncoderLayer
except:
raise ImportError('TransformerEncoder module does not exist in PyTorch 1.1 or lower.')
self.model_type = 'Transformer'
self.src_mask = None
self.pos_encoder = PositionalEncoding(ninp, dropout)
encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)
self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
self.encoder = nn.Embedding(ntoken, ninp)
self.ninp = ninp
self.decoder = nn.Linear(ninp, ntoken)
self.init_weights()
def _generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def init_weights(self):
initrange = 0.1
nn.init.uniform_(self.encoder.weight, -initrange, initrange)
nn.init.zeros_(self.decoder.bias)
nn.init.uniform_(self.decoder.weight, -initrange, initrange)
def forward(self, src, has_mask=True):
if has_mask:
device = src.device
if self.src_mask is None or self.src_mask.size(0) != len(src):
mask = self._generate_square_subsequent_mask(len(src)).to(device)
self.src_mask = mask
else:
self.src_mask = None
src = self.encoder(src) * math.sqrt(self.ninp)
src = self.pos_encoder(src)
output = self.transformer_encoder(src, self.src_mask)
output = self.decoder(output)
return F.log_softmax(output, dim=-1)
| 41.588235 | 110 | 0.624234 |
4a1d22684ec4525792d563193689eca6ad2205b0
| 617 |
py
|
Python
|
sdk/python/pulumi_azure_native/desktopvirtualization/v20201102preview/__init__.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/desktopvirtualization/v20201102preview/__init__.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/desktopvirtualization/v20201102preview/__init__.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from ... import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .application import *
from .application_group import *
from .get_application import *
from .get_application_group import *
from .get_host_pool import *
from .get_msix_package import *
from .get_workspace import *
from .host_pool import *
from .msix_package import *
from .workspace import *
from ._inputs import *
from . import outputs
| 29.380952 | 80 | 0.756888 |
4a1d25bbc5882327434365e0944aed6ecc4728cc
| 37,579 |
py
|
Python
|
test/functional/wallet_basic.py
|
timmywheels/bitcoin
|
55a156fca08713b020aafef91f40df8ce4bc3cae
|
[
"MIT"
] | 2 |
2021-05-18T06:24:12.000Z
|
2021-05-18T07:21:03.000Z
|
test/functional/wallet_basic.py
|
timmywheels/bitcoin
|
55a156fca08713b020aafef91f40df8ce4bc3cae
|
[
"MIT"
] | 1 |
2021-05-20T02:45:54.000Z
|
2021-05-20T02:45:54.000Z
|
test/functional/wallet_basic.py
|
timmywheels/bitcoin
|
55a156fca08713b020aafef91f40df8ce4bc3cae
|
[
"MIT"
] | 1 |
2021-05-21T13:12:57.000Z
|
2021-05-21T13:12:57.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet."""
from decimal import Decimal
from itertools import product
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_array_result,
assert_equal,
assert_fee_amount,
assert_raises_rpc_error,
)
from test_framework.wallet_util import test_address
NOT_A_NUMBER_OR_STRING = "Amount is not a number or string"
OUT_OF_RANGE = "Amount out of range"
class WalletTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.extra_args = [[
"-acceptnonstdtxn=1",
]] * self.num_nodes
self.setup_clean_chain = True
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
# Only need nodes 0-2 running at start of test
self.stop_node(3)
self.connect_nodes(0, 1)
self.connect_nodes(1, 2)
self.connect_nodes(0, 2)
self.sync_all(self.nodes[0:3])
def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
assert_fee_amount(fee, tx_size, fee_per_byte * 1000)
return curr_balance
def get_vsize(self, txn):
return self.nodes[0].decoderawtransaction(txn)['vsize']
def run_test(self):
# Check that there's no UTXO on none of the nodes
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
assert_equal(len(self.nodes[2].listunspent()), 0)
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 50)
assert_equal(walletinfo['balance'], 0)
self.sync_all(self.nodes[0:3])
self.nodes[1].generate(101)
self.sync_all(self.nodes[0:3])
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that only first and second nodes have UTXOs
utxos = self.nodes[0].listunspent()
assert_equal(len(utxos), 1)
assert_equal(len(self.nodes[1].listunspent()), 1)
assert_equal(len(self.nodes[2].listunspent()), 0)
self.log.info("Test gettxout")
confirmed_txid, confirmed_index = utxos[0]["txid"], utxos[0]["vout"]
# First, outputs that are unspent both in the chain and in the
# mempool should appear with or without include_mempool
txout = self.nodes[0].gettxout(txid=confirmed_txid, n=confirmed_index, include_mempool=False)
assert_equal(txout['value'], 50)
txout = self.nodes[0].gettxout(txid=confirmed_txid, n=confirmed_index, include_mempool=True)
assert_equal(txout['value'], 50)
# Send 21 BTC from 0 to 2 using sendtoaddress call.
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
mempool_txid = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.log.info("Test gettxout (second part)")
# utxo spent in mempool should be visible if you exclude mempool
# but invisible if you include mempool
txout = self.nodes[0].gettxout(confirmed_txid, confirmed_index, False)
assert_equal(txout['value'], 50)
txout = self.nodes[0].gettxout(confirmed_txid, confirmed_index) # by default include_mempool=True
assert txout is None
txout = self.nodes[0].gettxout(confirmed_txid, confirmed_index, True)
assert txout is None
# new utxo from mempool should be invisible if you exclude mempool
# but visible if you include mempool
txout = self.nodes[0].gettxout(mempool_txid, 0, False)
assert txout is None
txout1 = self.nodes[0].gettxout(mempool_txid, 0, True)
txout2 = self.nodes[0].gettxout(mempool_txid, 1, True)
# note the mempool tx will have randomly assigned indices
# but 10 will go to node2 and the rest will go to node0
balance = self.nodes[0].getbalance()
assert_equal(set([txout1['value'], txout2['value']]), set([10, balance]))
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Have node0 mine a block, thus it will collect its own fee.
self.nodes[0].generate(1)
self.sync_all(self.nodes[0:3])
# Exercise locking of unspent outputs
unspent_0 = self.nodes[2].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
assert_raises_rpc_error(-8, "Invalid parameter, expected locked output", self.nodes[2].lockunspent, True, [unspent_0])
self.nodes[2].lockunspent(False, [unspent_0])
assert_raises_rpc_error(-8, "Invalid parameter, output already locked", self.nodes[2].lockunspent, False, [unspent_0])
assert_raises_rpc_error(-6, "Insufficient funds", self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 20)
assert_equal([unspent_0], self.nodes[2].listlockunspent())
self.nodes[2].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[2].listlockunspent()), 0)
assert_raises_rpc_error(-8, "txid must be of length 64 (not 34, for '0000000000000000000000000000000000')",
self.nodes[2].lockunspent, False,
[{"txid": "0000000000000000000000000000000000", "vout": 0}])
assert_raises_rpc_error(-8, "txid must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')",
self.nodes[2].lockunspent, False,
[{"txid": "ZZZ0000000000000000000000000000000000000000000000000000000000000", "vout": 0}])
assert_raises_rpc_error(-8, "Invalid parameter, unknown transaction",
self.nodes[2].lockunspent, False,
[{"txid": "0000000000000000000000000000000000000000000000000000000000000000", "vout": 0}])
assert_raises_rpc_error(-8, "Invalid parameter, vout index out of bounds",
self.nodes[2].lockunspent, False,
[{"txid": unspent_0["txid"], "vout": 999}])
# The lock on a manually selected output is ignored
unspent_0 = self.nodes[1].listunspent()[0]
self.nodes[1].lockunspent(False, [unspent_0])
tx = self.nodes[1].createrawtransaction([unspent_0], { self.nodes[1].getnewaddress() : 1 })
self.nodes[1].fundrawtransaction(tx,{"lockUnspents": True})
# fundrawtransaction can lock an input
self.nodes[1].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[1].listlockunspent()), 0)
tx = self.nodes[1].fundrawtransaction(tx,{"lockUnspents": True})['hex']
assert_equal(len(self.nodes[1].listlockunspent()), 1)
# Send transaction
tx = self.nodes[1].signrawtransactionwithwallet(tx)["hex"]
self.nodes[1].sendrawtransaction(tx)
assert_equal(len(self.nodes[1].listlockunspent()), 0)
# Have node1 generate 100 blocks (so node0 can recover the fee)
self.nodes[1].generate(100)
self.sync_all(self.nodes[0:3])
# node0 should end up with 100 btc in block rewards plus fees, but
# minus the 21 plus fees sent to node2
assert_equal(self.nodes[0].getbalance(), 100 - 21)
assert_equal(self.nodes[2].getbalance(), 21)
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]})
outputs[self.nodes[2].getnewaddress()] = utxo["amount"] - 3
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransactionwithwallet(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(hexstring=txns_to_send[0]["hex"], maxfeerate=0)
self.nodes[1].sendrawtransaction(hexstring=txns_to_send[1]["hex"], maxfeerate=0)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all(self.nodes[0:3])
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 94)
# Verify that a spent output cannot be locked anymore
spent_0 = {"txid": node0utxos[0]["txid"], "vout": node0utxos[0]["vout"]}
assert_raises_rpc_error(-8, "Invalid parameter, expected unspent output", self.nodes[0].lockunspent, False, [spent_0])
# Send 10 BTC normal
address = self.nodes[0].getnewaddress("test")
fee_per_byte = Decimal('0.001') / 1000
self.nodes[2].settxfee(fee_per_byte * 1000)
txid = self.nodes[2].sendtoaddress(address, 10, "", "", False)
self.nodes[2].generate(1)
self.sync_all(self.nodes[0:3])
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('84'), fee_per_byte, self.get_vsize(self.nodes[2].gettransaction(txid)['hex']))
assert_equal(self.nodes[0].getbalance(), Decimal('10'))
# Send 10 BTC with subtract fee from amount
txid = self.nodes[2].sendtoaddress(address, 10, "", "", True)
self.nodes[2].generate(1)
self.sync_all(self.nodes[0:3])
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('20'), fee_per_byte, self.get_vsize(self.nodes[2].gettransaction(txid)['hex']))
self.log.info("Test sendmany")
# Sendmany 10 BTC
txid = self.nodes[2].sendmany('', {address: 10}, 0, "", [])
self.nodes[2].generate(1)
self.sync_all(self.nodes[0:3])
node_0_bal += Decimal('10')
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('10'), fee_per_byte, self.get_vsize(self.nodes[2].gettransaction(txid)['hex']))
assert_equal(self.nodes[0].getbalance(), node_0_bal)
# Sendmany 10 BTC with subtract fee from amount
txid = self.nodes[2].sendmany('', {address: 10}, 0, "", [address])
self.nodes[2].generate(1)
self.sync_all(self.nodes[0:3])
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('10'), fee_per_byte, self.get_vsize(self.nodes[2].gettransaction(txid)['hex']))
self.log.info("Test sendmany with fee_rate param (explicit fee rate in sat/vB)")
fee_rate_sat_vb = 2
fee_rate_btc_kvb = fee_rate_sat_vb * 1e3 / 1e8
explicit_fee_rate_btc_kvb = Decimal(fee_rate_btc_kvb) / 1000
# Test passing fee_rate as a string
txid = self.nodes[2].sendmany(amounts={address: 10}, fee_rate=str(fee_rate_sat_vb))
self.nodes[2].generate(1)
self.sync_all(self.nodes[0:3])
balance = self.nodes[2].getbalance()
node_2_bal = self.check_fee_amount(balance, node_2_bal - Decimal('10'), explicit_fee_rate_btc_kvb, self.get_vsize(self.nodes[2].gettransaction(txid)['hex']))
assert_equal(balance, node_2_bal)
node_0_bal += Decimal('10')
assert_equal(self.nodes[0].getbalance(), node_0_bal)
# Test passing fee_rate as an integer
amount = Decimal("0.0001")
txid = self.nodes[2].sendmany(amounts={address: amount}, fee_rate=fee_rate_sat_vb)
self.nodes[2].generate(1)
self.sync_all(self.nodes[0:3])
balance = self.nodes[2].getbalance()
node_2_bal = self.check_fee_amount(balance, node_2_bal - amount, explicit_fee_rate_btc_kvb, self.get_vsize(self.nodes[2].gettransaction(txid)['hex']))
assert_equal(balance, node_2_bal)
node_0_bal += amount
assert_equal(self.nodes[0].getbalance(), node_0_bal)
for key in ["totalFee", "feeRate"]:
assert_raises_rpc_error(-8, "Unknown named parameter key", self.nodes[2].sendtoaddress, address=address, amount=1, fee_rate=1, key=1)
# Test setting explicit fee rate just below the minimum.
self.log.info("Test sendmany raises 'fee rate too low' if fee_rate of 0.99999999 is passed")
assert_raises_rpc_error(-6, "Fee rate (0.999 sat/vB) is lower than the minimum fee rate setting (1.000 sat/vB)",
self.nodes[2].sendmany, amounts={address: 10}, fee_rate=0.999)
self.log.info("Test sendmany raises if an invalid fee_rate is passed")
# Test fee_rate with zero values.
msg = "Fee rate (0.000 sat/vB) is lower than the minimum fee rate setting (1.000 sat/vB)"
for zero_value in [0, 0.000, 0.00000000, "0", "0.000", "0.00000000"]:
assert_raises_rpc_error(-6, msg, self.nodes[2].sendmany, amounts={address: 1}, fee_rate=zero_value)
msg = "Invalid amount"
# Test fee_rate values that don't pass fixed-point parsing checks.
for invalid_value in ["", 0.000000001, 1e-09, 1.111111111, 1111111111111111, "31.999999999999999999999"]:
assert_raises_rpc_error(-3, msg, self.nodes[2].sendmany, amounts={address: 1.0}, fee_rate=invalid_value)
# Test fee_rate values that cannot be represented in sat/vB.
for invalid_value in [0.0001, 0.00000001, 0.00099999, 31.99999999, "0.0001", "0.00000001", "0.00099999", "31.99999999"]:
assert_raises_rpc_error(-3, msg, self.nodes[2].sendmany, amounts={address: 10}, fee_rate=invalid_value)
# Test fee_rate out of range (negative number).
assert_raises_rpc_error(-3, OUT_OF_RANGE, self.nodes[2].sendmany, amounts={address: 10}, fee_rate=-1)
# Test type error.
for invalid_value in [True, {"foo": "bar"}]:
assert_raises_rpc_error(-3, NOT_A_NUMBER_OR_STRING, self.nodes[2].sendmany, amounts={address: 10}, fee_rate=invalid_value)
self.log.info("Test sendmany raises if an invalid conf_target or estimate_mode is passed")
for target, mode in product([-1, 0, 1009], ["economical", "conservative"]):
assert_raises_rpc_error(-8, "Invalid conf_target, must be between 1 and 1008", # max value of 1008 per src/policy/fees.h
self.nodes[2].sendmany, amounts={address: 1}, conf_target=target, estimate_mode=mode)
for target, mode in product([-1, 0], ["btc/kb", "sat/b"]):
assert_raises_rpc_error(-8, 'Invalid estimate_mode parameter, must be one of: "unset", "economical", "conservative"',
self.nodes[2].sendmany, amounts={address: 1}, conf_target=target, estimate_mode=mode)
self.start_node(3, self.nodes[3].extra_args)
self.connect_nodes(0, 3)
self.sync_all()
# check if we can list zero value tx as available coins
# 1. create raw_tx
# 2. hex-changed one output to 0.0
# 3. sign and send
# 4. check if recipient (node0) can list the zero value tx
usp = self.nodes[1].listunspent(query_options={'minimumAmount': '49.998'})[0]
inputs = [{"txid": usp['txid'], "vout": usp['vout']}]
outputs = {self.nodes[1].getnewaddress(): 49.998, self.nodes[0].getnewaddress(): 11.11}
raw_tx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") # replace 11.11 with 0.0 (int32)
signed_raw_tx = self.nodes[1].signrawtransactionwithwallet(raw_tx)
decoded_raw_tx = self.nodes[1].decoderawtransaction(signed_raw_tx['hex'])
zero_value_txid = decoded_raw_tx['txid']
self.nodes[1].sendrawtransaction(signed_raw_tx['hex'])
self.sync_all()
self.nodes[1].generate(1) # mine a block
self.sync_all()
unspent_txs = self.nodes[0].listunspent() # zero value tx must be in listunspents output
found = False
for uTx in unspent_txs:
if uTx['txid'] == zero_value_txid:
found = True
assert_equal(uTx['amount'], Decimal('0'))
assert found
self.log.info("Test -walletbroadcast")
self.stop_nodes()
self.start_node(0, ["-walletbroadcast=0"])
self.start_node(1, ["-walletbroadcast=0"])
self.start_node(2, ["-walletbroadcast=0"])
self.connect_nodes(0, 1)
self.connect_nodes(1, 2)
self.connect_nodes(0, 2)
self.sync_all(self.nodes[0:3])
txid_not_broadcast = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
tx_obj_not_broadcast = self.nodes[0].gettransaction(txid_not_broadcast)
self.nodes[1].generate(1) # mine a block, tx should not be in there
self.sync_all(self.nodes[0:3])
assert_equal(self.nodes[2].getbalance(), node_2_bal) # should not be changed because tx was not broadcasted
# now broadcast from another node, mine a block, sync, and check the balance
self.nodes[1].sendrawtransaction(tx_obj_not_broadcast['hex'])
self.nodes[1].generate(1)
self.sync_all(self.nodes[0:3])
node_2_bal += 2
tx_obj_not_broadcast = self.nodes[0].gettransaction(txid_not_broadcast)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
# create another tx
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
# restart the nodes with -walletbroadcast=1
self.stop_nodes()
self.start_node(0)
self.start_node(1)
self.start_node(2)
self.connect_nodes(0, 1)
self.connect_nodes(1, 2)
self.connect_nodes(0, 2)
self.sync_blocks(self.nodes[0:3])
self.nodes[0].generate(1)
self.sync_blocks(self.nodes[0:3])
node_2_bal += 2
# tx should be added to balance because after restarting the nodes tx should be broadcast
assert_equal(self.nodes[2].getbalance(), node_2_bal)
# send a tx with value in a string (PR#6380 +)
txid = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2")
tx_obj = self.nodes[0].gettransaction(txid)
assert_equal(tx_obj['amount'], Decimal('-2'))
txid = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001")
tx_obj = self.nodes[0].gettransaction(txid)
assert_equal(tx_obj['amount'], Decimal('-0.0001'))
# check if JSON parser can handle scientific notation in strings
txid = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4")
tx_obj = self.nodes[0].gettransaction(txid)
assert_equal(tx_obj['amount'], Decimal('-0.0001'))
# General checks for errors from incorrect inputs
# This will raise an exception because the amount is negative
assert_raises_rpc_error(-3, OUT_OF_RANGE, self.nodes[0].sendtoaddress, self.nodes[2].getnewaddress(), "-1")
# This will raise an exception because the amount type is wrong
assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].sendtoaddress, self.nodes[2].getnewaddress(), "1f-4")
# This will raise an exception since generate does not accept a string
assert_raises_rpc_error(-1, "not an integer", self.nodes[0].generate, "2")
if not self.options.descriptors:
# This will raise an exception for the invalid private key format
assert_raises_rpc_error(-5, "Invalid private key encoding", self.nodes[0].importprivkey, "invalid")
# This will raise an exception for importing an address with the PS2H flag
temp_address = self.nodes[1].getnewaddress("", "p2sh-segwit")
assert_raises_rpc_error(-5, "Cannot use the p2sh flag with an address - use a script instead", self.nodes[0].importaddress, temp_address, "label", False, True)
# This will raise an exception for attempting to dump the private key of an address you do not own
assert_raises_rpc_error(-3, "Address does not refer to a key", self.nodes[0].dumpprivkey, temp_address)
# This will raise an exception for attempting to get the private key of an invalid Bitcoin address
assert_raises_rpc_error(-5, "Invalid Bitcoin address", self.nodes[0].dumpprivkey, "invalid")
# This will raise an exception for attempting to set a label for an invalid Bitcoin address
assert_raises_rpc_error(-5, "Invalid Bitcoin address", self.nodes[0].setlabel, "invalid address", "label")
# This will raise an exception for importing an invalid address
assert_raises_rpc_error(-5, "Invalid Bitcoin address or script", self.nodes[0].importaddress, "invalid")
# This will raise an exception for attempting to import a pubkey that isn't in hex
assert_raises_rpc_error(-5, "Pubkey must be a hex string", self.nodes[0].importpubkey, "not hex")
# This will raise an exception for importing an invalid pubkey
assert_raises_rpc_error(-5, "Pubkey is not a valid public key", self.nodes[0].importpubkey, "5361746f736869204e616b616d6f746f")
# Import address and private key to check correct behavior of spendable unspents
# 1. Send some coins to generate new UTXO
address_to_import = self.nodes[2].getnewaddress()
txid = self.nodes[0].sendtoaddress(address_to_import, 1)
self.nodes[0].generate(1)
self.sync_all(self.nodes[0:3])
self.log.info("Test sendtoaddress with fee_rate param (explicit fee rate in sat/vB)")
prebalance = self.nodes[2].getbalance()
assert prebalance > 2
address = self.nodes[1].getnewaddress()
amount = 3
fee_rate_sat_vb = 2
fee_rate_btc_kvb = fee_rate_sat_vb * 1e3 / 1e8
# Test passing fee_rate as an integer
txid = self.nodes[2].sendtoaddress(address=address, amount=amount, fee_rate=fee_rate_sat_vb)
tx_size = self.get_vsize(self.nodes[2].gettransaction(txid)['hex'])
self.nodes[0].generate(1)
self.sync_all(self.nodes[0:3])
postbalance = self.nodes[2].getbalance()
fee = prebalance - postbalance - Decimal(amount)
assert_fee_amount(fee, tx_size, Decimal(fee_rate_btc_kvb))
prebalance = self.nodes[2].getbalance()
amount = Decimal("0.001")
fee_rate_sat_vb = 1.23
fee_rate_btc_kvb = fee_rate_sat_vb * 1e3 / 1e8
# Test passing fee_rate as a string
txid = self.nodes[2].sendtoaddress(address=address, amount=amount, fee_rate=str(fee_rate_sat_vb))
tx_size = self.get_vsize(self.nodes[2].gettransaction(txid)['hex'])
self.nodes[0].generate(1)
self.sync_all(self.nodes[0:3])
postbalance = self.nodes[2].getbalance()
fee = prebalance - postbalance - amount
assert_fee_amount(fee, tx_size, Decimal(fee_rate_btc_kvb))
for key in ["totalFee", "feeRate"]:
assert_raises_rpc_error(-8, "Unknown named parameter key", self.nodes[2].sendtoaddress, address=address, amount=1, fee_rate=1, key=1)
# Test setting explicit fee rate just below the minimum.
self.log.info("Test sendtoaddress raises 'fee rate too low' if fee_rate of 0.99999999 is passed")
assert_raises_rpc_error(-6, "Fee rate (0.999 sat/vB) is lower than the minimum fee rate setting (1.000 sat/vB)",
self.nodes[2].sendtoaddress, address=address, amount=1, fee_rate=0.999)
self.log.info("Test sendtoaddress raises if an invalid fee_rate is passed")
# Test fee_rate with zero values.
msg = "Fee rate (0.000 sat/vB) is lower than the minimum fee rate setting (1.000 sat/vB)"
for zero_value in [0, 0.000, 0.00000000, "0", "0.000", "0.00000000"]:
assert_raises_rpc_error(-6, msg, self.nodes[2].sendtoaddress, address=address, amount=1, fee_rate=zero_value)
msg = "Invalid amount"
# Test fee_rate values that don't pass fixed-point parsing checks.
for invalid_value in ["", 0.000000001, 1e-09, 1.111111111, 1111111111111111, "31.999999999999999999999"]:
assert_raises_rpc_error(-3, msg, self.nodes[2].sendtoaddress, address=address, amount=1.0, fee_rate=invalid_value)
# Test fee_rate values that cannot be represented in sat/vB.
for invalid_value in [0.0001, 0.00000001, 0.00099999, 31.99999999, "0.0001", "0.00000001", "0.00099999", "31.99999999"]:
assert_raises_rpc_error(-3, msg, self.nodes[2].sendtoaddress, address=address, amount=10, fee_rate=invalid_value)
# Test fee_rate out of range (negative number).
assert_raises_rpc_error(-3, OUT_OF_RANGE, self.nodes[2].sendtoaddress, address=address, amount=1.0, fee_rate=-1)
# Test type error.
for invalid_value in [True, {"foo": "bar"}]:
assert_raises_rpc_error(-3, NOT_A_NUMBER_OR_STRING, self.nodes[2].sendtoaddress, address=address, amount=1.0, fee_rate=invalid_value)
self.log.info("Test sendtoaddress raises if an invalid conf_target or estimate_mode is passed")
for target, mode in product([-1, 0, 1009], ["economical", "conservative"]):
assert_raises_rpc_error(-8, "Invalid conf_target, must be between 1 and 1008", # max value of 1008 per src/policy/fees.h
self.nodes[2].sendtoaddress, address=address, amount=1, conf_target=target, estimate_mode=mode)
for target, mode in product([-1, 0], ["btc/kb", "sat/b"]):
assert_raises_rpc_error(-8, 'Invalid estimate_mode parameter, must be one of: "unset", "economical", "conservative"',
self.nodes[2].sendtoaddress, address=address, amount=1, conf_target=target, estimate_mode=mode)
# 2. Import address from node2 to node1
self.nodes[1].importaddress(address_to_import)
# 3. Validate that the imported address is watch-only on node1
assert self.nodes[1].getaddressinfo(address_to_import)["iswatchonly"]
# 4. Check that the unspents after import are not spendable
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": False})
# 5. Import private key of the previously imported address on node1
priv_key = self.nodes[2].dumpprivkey(address_to_import)
self.nodes[1].importprivkey(priv_key)
# 6. Check that the unspents are now spendable on node1
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": True})
# Mine a block from node0 to an address from node1
coinbase_addr = self.nodes[1].getnewaddress()
block_hash = self.nodes[0].generatetoaddress(1, coinbase_addr)[0]
coinbase_txid = self.nodes[0].getblock(block_hash)['tx'][0]
self.sync_all(self.nodes[0:3])
# Check that the txid and balance is found by node1
self.nodes[1].gettransaction(coinbase_txid)
# check if wallet or blockchain maintenance changes the balance
self.sync_all(self.nodes[0:3])
blocks = self.nodes[0].generate(2)
self.sync_all(self.nodes[0:3])
balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
block_count = self.nodes[0].getblockcount()
# Check modes:
# - True: unicode escaped as \u....
# - False: unicode directly as UTF-8
for mode in [True, False]:
self.nodes[0].rpc.ensure_ascii = mode
# unicode check: Basic Multilingual Plane, Supplementary Plane respectively
for label in [u'рыба', u'𝅘𝅥𝅯']:
addr = self.nodes[0].getnewaddress()
self.nodes[0].setlabel(addr, label)
test_address(self.nodes[0], addr, labels=[label])
assert label in self.nodes[0].listlabels()
self.nodes[0].rpc.ensure_ascii = True # restore to default
# maintenance tests
maintenance = [
'-rescan',
'-reindex',
]
chainlimit = 6
for m in maintenance:
self.log.info("Test " + m)
self.stop_nodes()
# set lower ancestor limit for later
self.start_node(0, [m, "-limitancestorcount=" + str(chainlimit)])
self.start_node(1, [m, "-limitancestorcount=" + str(chainlimit)])
self.start_node(2, [m, "-limitancestorcount=" + str(chainlimit)])
if m == '-reindex':
# reindex will leave rpc warm up "early"; Wait for it to finish
self.wait_until(lambda: [block_count] * 3 == [self.nodes[i].getblockcount() for i in range(3)])
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
# Exercise listsinceblock with the last two blocks
coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0])
assert_equal(coinbase_tx_1["lastblock"], blocks[1])
assert_equal(len(coinbase_tx_1["transactions"]), 1)
assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)
# ==Check that wallet prefers to use coins that don't exceed mempool limits =====
# Get all non-zero utxos together
chain_addrs = [self.nodes[0].getnewaddress(), self.nodes[0].getnewaddress()]
singletxid = self.nodes[0].sendtoaddress(chain_addrs[0], self.nodes[0].getbalance(), "", "", True)
self.nodes[0].generate(1)
node0_balance = self.nodes[0].getbalance()
# Split into two chains
rawtx = self.nodes[0].createrawtransaction([{"txid": singletxid, "vout": 0}], {chain_addrs[0]: node0_balance / 2 - Decimal('0.01'), chain_addrs[1]: node0_balance / 2 - Decimal('0.01')})
signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx)
singletxid = self.nodes[0].sendrawtransaction(hexstring=signedtx["hex"], maxfeerate=0)
self.nodes[0].generate(1)
# Make a long chain of unconfirmed payments without hitting mempool limit
# Each tx we make leaves only one output of change on a chain 1 longer
# Since the amount to send is always much less than the outputs, we only ever need one output
# So we should be able to generate exactly chainlimit txs for each original output
sending_addr = self.nodes[1].getnewaddress()
txid_list = []
for _ in range(chainlimit * 2):
txid_list.append(self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001')))
assert_equal(self.nodes[0].getmempoolinfo()['size'], chainlimit * 2)
assert_equal(len(txid_list), chainlimit * 2)
# Without walletrejectlongchains, we will still generate a txid
# The tx will be stored in the wallet but not accepted to the mempool
extra_txid = self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001'))
assert extra_txid not in self.nodes[0].getrawmempool()
assert extra_txid in [tx["txid"] for tx in self.nodes[0].listtransactions()]
self.nodes[0].abandontransaction(extra_txid)
total_txs = len(self.nodes[0].listtransactions("*", 99999))
# Try with walletrejectlongchains
# Double chain limit but require combining inputs, so we pass SelectCoinsMinConf
self.stop_node(0)
extra_args = ["-walletrejectlongchains", "-limitancestorcount=" + str(2 * chainlimit)]
self.start_node(0, extra_args=extra_args)
# wait until the wallet has submitted all transactions to the mempool
self.wait_until(lambda: len(self.nodes[0].getrawmempool()) == chainlimit * 2)
# Prevent potential race condition when calling wallet RPCs right after restart
self.nodes[0].syncwithvalidationinterfacequeue()
node0_balance = self.nodes[0].getbalance()
# With walletrejectlongchains we will not create the tx and store it in our wallet.
assert_raises_rpc_error(-6, "Transaction has too long of a mempool chain", self.nodes[0].sendtoaddress, sending_addr, node0_balance - Decimal('0.01'))
# Verify nothing new in wallet
assert_equal(total_txs, len(self.nodes[0].listtransactions("*", 99999)))
# Test getaddressinfo on external address. Note that these addresses are taken from disablewallet.py
assert_raises_rpc_error(-5, "Invalid prefix for Base58-encoded address", self.nodes[0].getaddressinfo, "3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy")
address_info = self.nodes[0].getaddressinfo("mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ")
assert_equal(address_info['address'], "mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ")
assert_equal(address_info["scriptPubKey"], "76a9144e3854046c7bd1594ac904e4793b6a45b36dea0988ac")
assert not address_info["ismine"]
assert not address_info["iswatchonly"]
assert not address_info["isscript"]
assert not address_info["ischange"]
# Test getaddressinfo 'ischange' field on change address.
self.nodes[0].generate(1)
destination = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(destination, 0.123)
tx = self.nodes[0].decoderawtransaction(self.nodes[0].gettransaction(txid)['hex'])
output_addresses = [vout['scriptPubKey']['address'] for vout in tx["vout"]]
assert len(output_addresses) > 1
for address in output_addresses:
ischange = self.nodes[0].getaddressinfo(address)['ischange']
assert_equal(ischange, address != destination)
if ischange:
change = address
self.nodes[0].setlabel(change, 'foobar')
assert_equal(self.nodes[0].getaddressinfo(change)['ischange'], False)
# Test gettransaction response with different arguments.
self.log.info("Testing gettransaction response with different arguments...")
self.nodes[0].setlabel(change, 'baz')
baz = self.nodes[0].listtransactions(label="baz", count=1)[0]
expected_receive_vout = {"label": "baz",
"address": baz["address"],
"amount": baz["amount"],
"category": baz["category"],
"vout": baz["vout"]}
expected_fields = frozenset({'amount', 'bip125-replaceable', 'confirmations', 'details', 'fee',
'hex', 'time', 'timereceived', 'trusted', 'txid', 'walletconflicts'})
verbose_field = "decoded"
expected_verbose_fields = expected_fields | {verbose_field}
self.log.debug("Testing gettransaction response without verbose")
tx = self.nodes[0].gettransaction(txid=txid)
assert_equal(set([*tx]), expected_fields)
assert_array_result(tx["details"], {"category": "receive"}, expected_receive_vout)
self.log.debug("Testing gettransaction response with verbose set to False")
tx = self.nodes[0].gettransaction(txid=txid, verbose=False)
assert_equal(set([*tx]), expected_fields)
assert_array_result(tx["details"], {"category": "receive"}, expected_receive_vout)
self.log.debug("Testing gettransaction response with verbose set to True")
tx = self.nodes[0].gettransaction(txid=txid, verbose=True)
assert_equal(set([*tx]), expected_verbose_fields)
assert_array_result(tx["details"], {"category": "receive"}, expected_receive_vout)
assert_equal(tx[verbose_field], self.nodes[0].decoderawtransaction(tx["hex"]))
self.log.info("Test send* RPCs with verbose=True")
address = self.nodes[0].getnewaddress("test")
txid_feeReason_one = self.nodes[2].sendtoaddress(address=address, amount=5, verbose=True)
assert_equal(txid_feeReason_one["fee_reason"], "Fallback fee")
txid_feeReason_two = self.nodes[2].sendmany(dummy='', amounts={address: 5}, verbose=True)
assert_equal(txid_feeReason_two["fee_reason"], "Fallback fee")
self.log.info("Test send* RPCs with verbose=False")
txid_feeReason_three = self.nodes[2].sendtoaddress(address=address, amount=5, verbose=False)
assert_equal(self.nodes[2].gettransaction(txid_feeReason_three)['txid'], txid_feeReason_three)
txid_feeReason_four = self.nodes[2].sendmany(dummy='', amounts={address: 5}, verbose=False)
assert_equal(self.nodes[2].gettransaction(txid_feeReason_four)['txid'], txid_feeReason_four)
if __name__ == '__main__':
WalletTest().main()
| 54.700146 | 193 | 0.651082 |
4a1d26aaf0002aa50b78afe1b6f261b013c80c0a
| 15,013 |
py
|
Python
|
helm/dagster/schema/schema_tests/test_instance.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | null | null | null |
helm/dagster/schema/schema_tests/test_instance.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | null | null | null |
helm/dagster/schema/schema_tests/test_instance.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 1 |
2021-09-26T07:29:17.000Z
|
2021-09-26T07:29:17.000Z
|
import pytest
import yaml
from dagster.core.run_coordinator import QueuedRunCoordinator
from dagster_aws.s3.compute_log_manager import S3ComputeLogManager
from dagster_azure.blob.compute_log_manager import AzureBlobComputeLogManager
from dagster_gcp.gcs.compute_log_manager import GCSComputeLogManager
from kubernetes.client import models
from schema.charts.dagster.subschema.compute_log_manager import (
AzureBlobComputeLogManager as AzureBlobComputeLogManagerModel,
)
from schema.charts.dagster.subschema.compute_log_manager import (
ComputeLogManager,
ComputeLogManagerConfig,
ComputeLogManagerType,
)
from schema.charts.dagster.subschema.compute_log_manager import (
GCSComputeLogManager as GCSComputeLogManagerModel,
)
from schema.charts.dagster.subschema.compute_log_manager import (
S3ComputeLogManager as S3ComputeLogManagerModel,
)
from schema.charts.dagster.subschema.daemon import (
ConfigurableClass,
Daemon,
QueuedRunCoordinatorConfig,
RunCoordinator,
RunCoordinatorConfig,
RunCoordinatorType,
TagConcurrencyLimit,
)
from schema.charts.dagster.subschema.postgresql import PostgreSQL, Service
from schema.charts.dagster.subschema.run_launcher import (
K8sRunLauncherConfig,
RunLauncher,
RunLauncherConfig,
RunLauncherType,
)
from schema.charts.dagster.values import DagsterHelmValues
from schema.utils.helm_template import HelmTemplate
def to_camel_case(s: str) -> str:
components = s.split("_")
return components[0] + "".join(x.title() for x in components[1:])
@pytest.fixture(name="template")
def helm_template() -> HelmTemplate:
return HelmTemplate(
helm_dir_path="helm/dagster",
subchart_paths=["charts/dagster-user-deployments"],
output="templates/configmap-instance.yaml",
model=models.V1ConfigMap,
)
@pytest.mark.parametrize("storage", ["schedule_storage", "run_storage", "event_log_storage"])
def test_storage_postgres_db_config(template: HelmTemplate, storage: str):
postgresql_username = "username"
postgresql_host = "1.1.1.1"
postgresql_database = "database"
postgresql_params = {
"connect_timeout": 10,
"application_name": "myapp",
"options": "-c synchronous_commit=off",
}
postgresql_port = 8080
helm_values = DagsterHelmValues.construct(
postgresql=PostgreSQL.construct(
postgresqlUsername=postgresql_username,
postgresqlHost=postgresql_host,
postgresqlDatabase=postgresql_database,
postgresqlParams=postgresql_params,
service=Service(port=postgresql_port),
)
)
configmaps = template.render(helm_values)
assert len(configmaps) == 1
instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
assert instance[storage]
postgres_db = instance[storage]["config"]["postgres_db"]
assert postgres_db["username"] == postgresql_username
assert postgres_db["password"] == {"env": "DAGSTER_PG_PASSWORD"}
assert postgres_db["hostname"] == postgresql_host
assert postgres_db["db_name"] == postgresql_database
assert postgres_db["port"] == postgresql_port
assert postgres_db["params"] == postgresql_params
def test_k8s_run_launcher_config(template: HelmTemplate):
job_namespace = "namespace"
image_pull_policy = "Always"
load_incluster_config = True
env_config_maps = [{"name": "env_config_map"}]
env_secrets = [{"name": "secret"}]
env_vars = ["ENV_VAR"]
helm_values = DagsterHelmValues.construct(
runLauncher=RunLauncher.construct(
type=RunLauncherType.K8S,
config=RunLauncherConfig.construct(
k8sRunLauncher=K8sRunLauncherConfig.construct(
jobNamespace=job_namespace,
imagePullPolicy=image_pull_policy,
loadInclusterConfig=load_incluster_config,
envConfigMaps=env_config_maps,
envSecrets=env_secrets,
envVars=env_vars,
)
),
)
)
configmaps = template.render(helm_values)
instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
run_launcher_config = instance["run_launcher"]
assert run_launcher_config["module"] == "dagster_k8s"
assert run_launcher_config["class"] == "K8sRunLauncher"
assert run_launcher_config["config"]["job_namespace"] == job_namespace
assert run_launcher_config["config"]["load_incluster_config"] == load_incluster_config
assert run_launcher_config["config"]["image_pull_policy"] == image_pull_policy
assert run_launcher_config["config"]["env_config_maps"][1:] == [
configmap["name"] for configmap in env_config_maps
]
assert run_launcher_config["config"]["env_secrets"] == [
secret["name"] for secret in env_secrets
]
assert run_launcher_config["config"]["env_vars"] == env_vars
@pytest.mark.parametrize("enabled", [True, False])
def test_queued_run_coordinator_config(template: HelmTemplate, enabled: bool):
max_concurrent_runs = 50
tag_concurrency_limits = [TagConcurrencyLimit(key="key", value="value", limit=10)]
dequeue_interval_seconds = 50
helm_values = DagsterHelmValues.construct(
dagsterDaemon=Daemon.construct(
runCoordinator=RunCoordinator.construct(
enabled=enabled,
type=RunCoordinatorType.QUEUED,
config=RunCoordinatorConfig.construct(
queuedRunCoordinator=QueuedRunCoordinatorConfig.construct(
maxConcurrentRuns=max_concurrent_runs,
tagConcurrencyLimits=tag_concurrency_limits,
dequeueIntervalSeconds=dequeue_interval_seconds,
)
),
)
)
)
configmaps = template.render(helm_values)
assert len(configmaps) == 1
instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
assert ("run_coordinator" in instance) == enabled
if enabled:
assert instance["run_coordinator"]["module"] == "dagster.core.run_coordinator"
assert instance["run_coordinator"]["class"] == "QueuedRunCoordinator"
assert instance["run_coordinator"]["config"]
run_coordinator_config = instance["run_coordinator"]["config"]
assert run_coordinator_config["max_concurrent_runs"] == max_concurrent_runs
assert run_coordinator_config["dequeue_interval_seconds"] == dequeue_interval_seconds
assert len(run_coordinator_config["tag_concurrency_limits"]) == len(tag_concurrency_limits)
assert run_coordinator_config["tag_concurrency_limits"] == [
tag_concurrency_limit.dict() for tag_concurrency_limit in tag_concurrency_limits
]
def test_custom_run_coordinator_config(template: HelmTemplate):
module = "a_module"
class_ = "Class"
config_field_one = "1"
config_field_two = "two"
config = {"config_field_one": config_field_one, "config_field_two": config_field_two}
helm_values = DagsterHelmValues.construct(
dagsterDaemon=Daemon.construct(
runCoordinator=RunCoordinator.construct(
enabled=True,
type=RunCoordinatorType.CUSTOM,
config=RunCoordinatorConfig.construct(
customRunCoordinator=ConfigurableClass.construct(
module=module,
class_=class_,
config=config,
)
),
)
)
)
configmaps = template.render(helm_values)
assert len(configmaps) == 1
instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
assert instance["run_coordinator"]["module"] == module
assert instance["run_coordinator"]["class"] == class_
assert instance["run_coordinator"]["config"] == config
@pytest.mark.parametrize(
"compute_log_manager_type",
[ComputeLogManagerType.NOOP, ComputeLogManagerType.LOCAL],
ids=["noop", "local compute log manager becomes noop"],
)
def test_noop_compute_log_manager(
template: HelmTemplate, compute_log_manager_type: ComputeLogManagerType
):
helm_values = DagsterHelmValues.construct(
computeLogManager=ComputeLogManager.construct(type=compute_log_manager_type)
)
configmaps = template.render(helm_values)
instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
compute_logs_config = instance["compute_logs"]
assert compute_logs_config["module"] == "dagster.core.storage.noop_compute_log_manager"
assert compute_logs_config["class"] == "NoOpComputeLogManager"
def test_azure_blob_compute_log_manager(template: HelmTemplate):
storage_account = "account"
container = "container"
secret_key = "secret_key"
local_dir = "/dir"
prefix = "prefix"
helm_values = DagsterHelmValues.construct(
computeLogManager=ComputeLogManager.construct(
type=ComputeLogManagerType.AZURE,
config=ComputeLogManagerConfig.construct(
azureBlobComputeLogManager=AzureBlobComputeLogManagerModel(
storageAccount=storage_account,
container=container,
secretKey=secret_key,
localDir=local_dir,
prefix=prefix,
)
),
)
)
configmaps = template.render(helm_values)
instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
compute_logs_config = instance["compute_logs"]
assert compute_logs_config["module"] == "dagster_azure.blob.compute_log_manager"
assert compute_logs_config["class"] == "AzureBlobComputeLogManager"
assert compute_logs_config["config"] == {
"storage_account": storage_account,
"container": container,
"secret_key": secret_key,
"local_dir": local_dir,
"prefix": prefix,
}
# Test all config fields in configurable class
assert compute_logs_config["config"].keys() == AzureBlobComputeLogManager.config_type().keys()
def test_gcs_compute_log_manager(template: HelmTemplate):
bucket = "bucket"
local_dir = "/dir"
prefix = "prefix"
helm_values = DagsterHelmValues.construct(
computeLogManager=ComputeLogManager.construct(
type=ComputeLogManagerType.GCS,
config=ComputeLogManagerConfig.construct(
gcsComputeLogManager=GCSComputeLogManagerModel(
bucket=bucket, localDir=local_dir, prefix=prefix
)
),
)
)
configmaps = template.render(helm_values)
instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
compute_logs_config = instance["compute_logs"]
assert compute_logs_config["module"] == "dagster_gcp.gcs.compute_log_manager"
assert compute_logs_config["class"] == "GCSComputeLogManager"
assert compute_logs_config["config"] == {
"bucket": bucket,
"local_dir": local_dir,
"prefix": prefix,
}
# Test all config fields in configurable class
assert compute_logs_config["config"].keys() == GCSComputeLogManager.config_type().keys()
def test_s3_compute_log_manager(template: HelmTemplate):
bucket = "bucket"
local_dir = "/dir"
prefix = "prefix"
use_ssl = True
verify = True
verify_cert_path = "/path"
endpoint_url = "endpoint.com"
skip_empty_files = True
helm_values = DagsterHelmValues.construct(
computeLogManager=ComputeLogManager.construct(
type=ComputeLogManagerType.S3,
config=ComputeLogManagerConfig.construct(
s3ComputeLogManager=S3ComputeLogManagerModel(
bucket=bucket,
localDir=local_dir,
prefix=prefix,
useSsl=use_ssl,
verify=verify,
verifyCertPath=verify_cert_path,
endpointUrl=endpoint_url,
skipEmptyFiles=skip_empty_files,
)
),
)
)
configmaps = template.render(helm_values)
instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
compute_logs_config = instance["compute_logs"]
assert compute_logs_config["module"] == "dagster_aws.s3.compute_log_manager"
assert compute_logs_config["class"] == "S3ComputeLogManager"
assert compute_logs_config["config"] == {
"bucket": bucket,
"local_dir": local_dir,
"prefix": prefix,
"use_ssl": use_ssl,
"verify": verify,
"verify_cert_path": verify_cert_path,
"endpoint_url": endpoint_url,
"skip_empty_files": skip_empty_files,
}
# Test all config fields in configurable class
assert compute_logs_config["config"].keys() == S3ComputeLogManager.config_type().keys()
def test_custom_compute_log_manager_config(template: HelmTemplate):
module = "a_module"
class_ = "Class"
config_field_one = "1"
config_field_two = "two"
config = {"config_field_one": config_field_one, "config_field_two": config_field_two}
helm_values = DagsterHelmValues.construct(
computeLogManager=ComputeLogManager.construct(
type=ComputeLogManagerType.CUSTOM,
config=ComputeLogManagerConfig.construct(
customComputeLogManager=ConfigurableClass.construct(
module=module,
class_=class_,
config=config,
)
),
)
)
configmaps = template.render(helm_values)
instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
compute_logs_config = instance["compute_logs"]
assert compute_logs_config["module"] == module
assert compute_logs_config["class"] == class_
assert compute_logs_config["config"] == config
@pytest.mark.parametrize(
argnames=["json_schema_model", "compute_log_manager_class"],
argvalues=[
(AzureBlobComputeLogManagerModel, AzureBlobComputeLogManager),
(GCSComputeLogManagerModel, GCSComputeLogManager),
(S3ComputeLogManagerModel, S3ComputeLogManager),
],
)
def test_compute_log_manager_has_schema(json_schema_model, compute_log_manager_class):
json_schema_fields = json_schema_model.schema()["properties"].keys()
compute_log_manager_fields = set(
map(to_camel_case, compute_log_manager_class.config_type().keys())
)
assert json_schema_fields == compute_log_manager_fields
@pytest.mark.parametrize(
argnames=["json_schema_model", "run_coordinator_class"],
argvalues=[
(QueuedRunCoordinatorConfig, QueuedRunCoordinator),
],
)
def test_run_coordinator_has_schema(json_schema_model, run_coordinator_class):
json_schema_fields = json_schema_model.schema()["properties"].keys()
run_coordinator_fields = set(map(to_camel_case, run_coordinator_class.config_type().keys()))
assert json_schema_fields == run_coordinator_fields
| 37.160891 | 99 | 0.686139 |
4a1d2711b755c3e1656563c25462f1c7b5485e26
| 15,942 |
py
|
Python
|
run_tests.py
|
blobfish/meson
|
d0c7b5169303fb0a394201d90be1e74426d7b2d9
|
[
"Apache-2.0"
] | 1 |
2020-01-21T21:57:32.000Z
|
2020-01-21T21:57:32.000Z
|
run_tests.py
|
blobfish/meson
|
d0c7b5169303fb0a394201d90be1e74426d7b2d9
|
[
"Apache-2.0"
] | null | null | null |
run_tests.py
|
blobfish/meson
|
d0c7b5169303fb0a394201d90be1e74426d7b2d9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
import shutil
import subprocess
import tempfile
import platform
import argparse
from io import StringIO
from enum import Enum
from glob import glob
from pathlib import Path
from mesonbuild import compilers
from mesonbuild import dependencies
from mesonbuild import mesonlib
from mesonbuild import mesonmain
from mesonbuild import mtest
from mesonbuild import mlog
from mesonbuild.environment import Environment, detect_ninja
from mesonbuild.coredata import backendlist
NINJA_1_9_OR_NEWER = False
NINJA_CMD = None
# If we're on CI, just assume we have ninja in PATH and it's new enough because
# we provide that. This avoids having to detect ninja for every subprocess unit
# test that we run.
if 'CI' in os.environ:
NINJA_1_9_OR_NEWER = True
NINJA_CMD = 'ninja'
else:
# Look for 1.9 to see if https://github.com/ninja-build/ninja/issues/1219
# is fixed, else require 1.6 for -w dupbuild=err
for v in ('1.9', '1.6'):
NINJA_CMD = detect_ninja(v)
if NINJA_CMD is not None:
if mesonlib.version_compare(v, '>=1.9'):
NINJA_1_9_OR_NEWER = True
else:
mlog.warning('Found ninja <1.9, tests will run slower', once=True)
break
if NINJA_CMD is None:
raise RuntimeError('Could not find Ninja v1.6 or newer')
def guess_backend(backend, msbuild_exe: str):
# Auto-detect backend if unspecified
backend_flags = []
if backend is None:
if msbuild_exe is not None and (mesonlib.is_windows() and not _using_intelcl()):
backend = 'vs' # Meson will auto-detect VS version to use
else:
backend = 'ninja'
# Set backend arguments for Meson
if backend.startswith('vs'):
backend_flags = ['--backend=' + backend]
backend = Backend.vs
elif backend == 'xcode':
backend_flags = ['--backend=xcode']
backend = Backend.xcode
elif backend == 'ninja':
backend_flags = ['--backend=ninja']
backend = Backend.ninja
else:
raise RuntimeError('Unknown backend: {!r}'.format(backend))
return (backend, backend_flags)
def _using_intelcl() -> bool:
"""
detect if intending to using Intel-Cl compilers (Intel compilers on Windows)
Sufficient evidence of intent is that user is working in the Intel compiler
shell environment, otherwise this function returns False
"""
if not mesonlib.is_windows():
return False
# handle where user tried to "blank" MKLROOT and left space(s)
if not os.environ.get('MKLROOT', '').strip():
return False
if (os.environ.get('CC') == 'icl' or
os.environ.get('CXX') == 'icl' or
os.environ.get('FC') == 'ifort'):
return True
# Intel-Cl users might not have the CC,CXX,FC envvars set,
# but because they're in Intel shell, the exe's below are on PATH
if shutil.which('icl') or shutil.which('ifort'):
return True
mlog.warning('It appears you might be intending to use Intel compiler on Windows '
'since non-empty environment variable MKLROOT is set to {} '
'However, Meson cannot find the Intel WIndows compiler executables (icl,ifort).'
'Please try using the Intel shell.'.format(os.environ.get('MKLROOT')))
return False
# Fake classes and objects for mocking
class FakeBuild:
def __init__(self, env):
self.environment = env
class FakeCompilerOptions:
def __init__(self):
self.value = []
def get_fake_options(prefix=''):
opts = argparse.Namespace()
opts.native_file = []
opts.cross_file = None
opts.wrap_mode = None
opts.prefix = prefix
opts.cmd_line_options = {}
return opts
def get_fake_env(sdir='', bdir=None, prefix='', opts=None):
if opts is None:
opts = get_fake_options(prefix)
env = Environment(sdir, bdir, opts)
env.coredata.compiler_options.host['c_args'] = FakeCompilerOptions()
env.machines.host.cpu_family = 'x86_64' # Used on macOS inside find_library
return env
Backend = Enum('Backend', 'ninja vs xcode')
if 'MESON_EXE' in os.environ:
meson_exe = mesonlib.split_args(os.environ['MESON_EXE'])
else:
meson_exe = None
if mesonlib.is_windows() or mesonlib.is_cygwin():
exe_suffix = '.exe'
else:
exe_suffix = ''
def get_meson_script():
'''
Guess the meson that corresponds to the `mesonbuild` that has been imported
so we can run configure and other commands in-process, since mesonmain.run
needs to know the meson_command to use.
Also used by run_unittests.py to determine what meson to run when not
running in-process (which is the default).
'''
# Is there a meson.py next to the mesonbuild currently in use?
mesonbuild_dir = Path(mesonmain.__file__).resolve().parent.parent
meson_script = mesonbuild_dir / 'meson.py'
if meson_script.is_file():
return str(meson_script)
# Then if mesonbuild is in PYTHONPATH, meson must be in PATH
mlog.warning('Could not find meson.py next to the mesonbuild module. '
'Trying system meson...')
meson_cmd = shutil.which('meson')
if meson_cmd:
return meson_cmd
raise RuntimeError('Could not find {!r} or a meson in PATH'.format(meson_script))
def get_backend_args_for_dir(backend, builddir):
'''
Visual Studio backend needs to be given the solution to build
'''
if backend is Backend.vs:
sln_name = glob(os.path.join(builddir, '*.sln'))[0]
return [os.path.split(sln_name)[-1]]
return []
def find_vcxproj_with_target(builddir, target):
import re, fnmatch
t, ext = os.path.splitext(target)
if ext:
p = r'<TargetName>{}</TargetName>\s*<TargetExt>\{}</TargetExt>'.format(t, ext)
else:
p = r'<TargetName>{}</TargetName>'.format(t)
for _, _, files in os.walk(builddir):
for f in fnmatch.filter(files, '*.vcxproj'):
f = os.path.join(builddir, f)
with open(f, 'r', encoding='utf-8') as o:
if re.search(p, o.read(), flags=re.MULTILINE):
return f
raise RuntimeError('No vcxproj matching {!r} in {!r}'.format(p, builddir))
def get_builddir_target_args(backend, builddir, target):
dir_args = []
if not target:
dir_args = get_backend_args_for_dir(backend, builddir)
if target is None:
return dir_args
if backend is Backend.vs:
vcxproj = find_vcxproj_with_target(builddir, target)
target_args = [vcxproj]
elif backend is Backend.xcode:
target_args = ['-target', target]
elif backend is Backend.ninja:
target_args = [target]
else:
raise AssertionError('Unknown backend: {!r}'.format(backend))
return target_args + dir_args
def get_backend_commands(backend, debug=False):
install_cmd = []
uninstall_cmd = []
if backend is Backend.vs:
cmd = ['msbuild']
clean_cmd = cmd + ['/target:Clean']
test_cmd = cmd + ['RUN_TESTS.vcxproj']
elif backend is Backend.xcode:
cmd = ['xcodebuild']
# In Xcode9 new build system's clean command fails when using a custom build directory.
# Maybe use it when CI uses Xcode10 we can remove '-UseNewBuildSystem=FALSE'
clean_cmd = cmd + ['-alltargets', 'clean', '-UseNewBuildSystem=FALSE']
test_cmd = cmd + ['-target', 'RUN_TESTS']
elif backend is Backend.ninja:
global NINJA_CMD
cmd = [NINJA_CMD, '-w', 'dupbuild=err', '-d', 'explain']
if debug:
cmd += ['-v']
clean_cmd = cmd + ['clean']
test_cmd = cmd + ['test', 'benchmark']
install_cmd = cmd + ['install']
uninstall_cmd = cmd + ['uninstall']
else:
raise AssertionError('Unknown backend: {!r}'.format(backend))
return cmd, clean_cmd, test_cmd, install_cmd, uninstall_cmd
def ensure_backend_detects_changes(backend):
global NINJA_1_9_OR_NEWER
if backend is not Backend.ninja:
return
need_workaround = False
# We're not running on HFS+ which only stores dates in seconds:
# https://developer.apple.com/legacy/library/technotes/tn/tn1150.html#HFSPlusDates
# XXX: Upgrade Travis image to Apple FS when that becomes available
# TODO: Detect HFS+ vs APFS
if mesonlib.is_osx():
mlog.warning('Running on HFS+, enabling timestamp resolution workaround', once=True)
need_workaround = True
# We're using ninja >= 1.9 which has QuLogic's patch for sub-1s resolution
# timestamps
if not NINJA_1_9_OR_NEWER:
mlog.warning('Don\'t have ninja >= 1.9, enabling timestamp resolution workaround', once=True)
need_workaround = True
# Increase the difference between build.ninja's timestamp and the timestamp
# of whatever you changed: https://github.com/ninja-build/ninja/issues/371
if need_workaround:
time.sleep(1)
def run_mtest_inprocess(commandlist):
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
old_stderr = sys.stderr
sys.stderr = mystderr = StringIO()
try:
returncode = mtest.run_with_args(commandlist)
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
return returncode, mystdout.getvalue(), mystderr.getvalue()
def clear_meson_configure_class_caches():
compilers.CCompiler.library_dirs_cache = {}
compilers.CCompiler.program_dirs_cache = {}
compilers.CCompiler.find_library_cache = {}
compilers.CCompiler.find_framework_cache = {}
dependencies.PkgConfigDependency.pkgbin_cache = {}
dependencies.PkgConfigDependency.class_pkgbin = mesonlib.PerMachine(None, None)
def run_configure_inprocess(commandlist, env=None):
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
old_stderr = sys.stderr
sys.stderr = mystderr = StringIO()
old_environ = os.environ.copy()
if env is not None:
os.environ.update(env)
try:
returncode = mesonmain.run(commandlist, get_meson_script())
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
clear_meson_configure_class_caches()
os.environ.clear()
os.environ.update(old_environ)
return returncode, mystdout.getvalue(), mystderr.getvalue()
def run_configure_external(full_command, env=None):
pc, o, e = mesonlib.Popen_safe(full_command, env=env)
return pc.returncode, o, e
def run_configure(commandlist, env=None):
global meson_exe
if meson_exe:
return run_configure_external(meson_exe + commandlist, env=env)
return run_configure_inprocess(commandlist, env=env)
def print_system_info():
print(mlog.bold('System information.').get_text(mlog.colorize_console))
print('Architecture:', platform.architecture())
print('Machine:', platform.machine())
print('Platform:', platform.system())
print('Processor:', platform.processor())
print('System:', platform.system())
print('')
def main():
print_system_info()
parser = argparse.ArgumentParser()
parser.add_argument('--cov', action='store_true')
parser.add_argument('--backend', default=None, dest='backend',
choices=backendlist)
parser.add_argument('--cross', default=False, dest='cross', action='store_true')
parser.add_argument('--failfast', action='store_true')
parser.add_argument('--no-unittests', action='store_true', default=False)
(options, _) = parser.parse_known_args()
# Enable coverage early...
enable_coverage = options.cov
if enable_coverage:
os.makedirs('.coverage', exist_ok=True)
sys.argv.remove('--cov')
import coverage
coverage.process_startup()
returncode = 0
cross = options.cross
backend, _ = guess_backend(options.backend, shutil.which('msbuild'))
no_unittests = options.no_unittests
# Running on a developer machine? Be nice!
if not mesonlib.is_windows() and not mesonlib.is_haiku() and 'CI' not in os.environ:
os.nice(20)
# Appveyor sets the `platform` environment variable which completely messes
# up building with the vs2010 and vs2015 backends.
#
# Specifically, MSBuild reads the `platform` environment variable to set
# the configured value for the platform (Win32/x64/arm), which breaks x86
# builds.
#
# Appveyor setting this also breaks our 'native build arch' detection for
# Windows in environment.py:detect_windows_arch() by overwriting the value
# of `platform` set by vcvarsall.bat.
#
# While building for x86, `platform` should be unset.
if 'APPVEYOR' in os.environ and os.environ['arch'] == 'x86':
os.environ.pop('platform')
# Run tests
print(mlog.bold('Running unittests.').get_text(mlog.colorize_console))
print(flush=True)
# Can't pass arguments to unit tests, so set the backend to use in the environment
env = os.environ.copy()
env['MESON_UNIT_TEST_BACKEND'] = backend.name
with tempfile.TemporaryDirectory() as temp_dir:
# Enable coverage on all subsequent processes.
if enable_coverage:
Path(temp_dir, 'usercustomize.py').open('w').write(
'import coverage\n'
'coverage.process_startup()\n')
env['COVERAGE_PROCESS_START'] = '.coveragerc'
if 'PYTHONPATH' in env:
env['PYTHONPATH'] = os.pathsep.join([temp_dir, env.get('PYTHONPATH')])
else:
env['PYTHONPATH'] = temp_dir
if not cross:
cmd = mesonlib.python_command + ['run_meson_command_tests.py', '-v']
if options.failfast:
cmd += ['--failfast']
returncode += subprocess.call(cmd, env=env)
if options.failfast and returncode != 0:
return returncode
if no_unittests:
print('Skipping all unit tests.')
returncode = 0
else:
cmd = mesonlib.python_command + ['run_unittests.py', '-v']
if options.failfast:
cmd += ['--failfast']
returncode += subprocess.call(cmd, env=env)
if options.failfast and returncode != 0:
return returncode
cmd = mesonlib.python_command + ['run_project_tests.py'] + sys.argv[1:]
returncode += subprocess.call(cmd, env=env)
else:
cross_test_args = mesonlib.python_command + ['run_cross_test.py']
print(mlog.bold('Running armhf cross tests.').get_text(mlog.colorize_console))
print(flush=True)
cmd = cross_test_args + ['cross/ubuntu-armhf.txt']
if options.failfast:
cmd += ['--failfast']
returncode += subprocess.call(cmd, env=env)
if options.failfast and returncode != 0:
return returncode
print(mlog.bold('Running mingw-w64 64-bit cross tests.')
.get_text(mlog.colorize_console))
print(flush=True)
cmd = cross_test_args + ['cross/linux-mingw-w64-64bit.txt']
if options.failfast:
cmd += ['--failfast']
returncode += subprocess.call(cmd, env=env)
return returncode
if __name__ == '__main__':
sys.exit(main())
| 38.788321 | 101 | 0.657446 |
4a1d271b665767d4cda7db035c3dd83b19fce1a1
| 3,757 |
py
|
Python
|
alipay/aop/api/domain/AlipayDataBillTransferaccountbookQueryModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 213 |
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/domain/AlipayDataBillTransferaccountbookQueryModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 29 |
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/domain/AlipayDataBillTransferaccountbookQueryModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 59 |
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayDataBillTransferaccountbookQueryModel(object):
def __init__(self):
self._agreement_no = None
self._end_time = None
self._page_no = None
self._page_size = None
self._start_time = None
self._store_no = None
self._type = None
@property
def agreement_no(self):
return self._agreement_no
@agreement_no.setter
def agreement_no(self, value):
self._agreement_no = value
@property
def end_time(self):
return self._end_time
@end_time.setter
def end_time(self, value):
self._end_time = value
@property
def page_no(self):
return self._page_no
@page_no.setter
def page_no(self, value):
self._page_no = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self, value):
self._start_time = value
@property
def store_no(self):
return self._store_no
@store_no.setter
def store_no(self, value):
self._store_no = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
def to_alipay_dict(self):
params = dict()
if self.agreement_no:
if hasattr(self.agreement_no, 'to_alipay_dict'):
params['agreement_no'] = self.agreement_no.to_alipay_dict()
else:
params['agreement_no'] = self.agreement_no
if self.end_time:
if hasattr(self.end_time, 'to_alipay_dict'):
params['end_time'] = self.end_time.to_alipay_dict()
else:
params['end_time'] = self.end_time
if self.page_no:
if hasattr(self.page_no, 'to_alipay_dict'):
params['page_no'] = self.page_no.to_alipay_dict()
else:
params['page_no'] = self.page_no
if self.page_size:
if hasattr(self.page_size, 'to_alipay_dict'):
params['page_size'] = self.page_size.to_alipay_dict()
else:
params['page_size'] = self.page_size
if self.start_time:
if hasattr(self.start_time, 'to_alipay_dict'):
params['start_time'] = self.start_time.to_alipay_dict()
else:
params['start_time'] = self.start_time
if self.store_no:
if hasattr(self.store_no, 'to_alipay_dict'):
params['store_no'] = self.store_no.to_alipay_dict()
else:
params['store_no'] = self.store_no
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayDataBillTransferaccountbookQueryModel()
if 'agreement_no' in d:
o.agreement_no = d['agreement_no']
if 'end_time' in d:
o.end_time = d['end_time']
if 'page_no' in d:
o.page_no = d['page_no']
if 'page_size' in d:
o.page_size = d['page_size']
if 'start_time' in d:
o.start_time = d['start_time']
if 'store_no' in d:
o.store_no = d['store_no']
if 'type' in d:
o.type = d['type']
return o
| 28.679389 | 75 | 0.571467 |
4a1d29dc19aa9032b3587283770a2a34826fffa6
| 24,186 |
py
|
Python
|
andes/models/agc.py
|
mhdella/andes
|
5b0d2e3e101a38fe0fd2d27343a0ce3f900be45e
|
[
"Apache-2.0"
] | 1 |
2019-12-24T12:57:05.000Z
|
2019-12-24T12:57:05.000Z
|
andes/models/agc.py
|
mhdella/andes
|
5b0d2e3e101a38fe0fd2d27343a0ce3f900be45e
|
[
"Apache-2.0"
] | null | null | null |
andes/models/agc.py
|
mhdella/andes
|
5b0d2e3e101a38fe0fd2d27343a0ce3f900be45e
|
[
"Apache-2.0"
] | null | null | null |
import sys
import logging
import numpy as np
import importlib
import platform
from cvxopt import mul, div, matrix, sparse, spdiag, spmatrix
from cvxopt.modeling import variable, op # NOQA
from andes.consts import Gx, Fy0, Gy0
from andes.models.base import ModelBase
from andes.utils.math import zeros, index
from andes.utils.solver import Solver
logger = logging.getLogger(__name__)
cp = None
class BArea(ModelBase):
"""
Balancing area class. This class defines power balancing area on top of the `Area` class for calculating
center of inertia frequency, total inertia, expected power and area control error.
"""
def __init__(self, system, name):
super(BArea, self).__init__(system, name)
self._group = 'Calculation'
self._data.update({
'area': None,
'syn': None,
'beta': 0,
})
self._descr.update({'area': 'Idx of Area',
'beta': 'Beta coefficient to multiply by the pu freq. deviation',
'syn': 'Indices of generators for computing COI'
})
self._units.update({'syn': 'list'})
self._mandatory.extend(['area', 'syn', 'beta'])
self._algebs.extend(['Pexp', 'fcoi', 'ace'])
self.calls.update({
'gcall': True,
'init1': True,
'jac0': True,
})
self._service.extend(['P0', 'Mtot', 'M', 'usyn', 'wsyn'])
self._fnamey.extend(['P_{exp}', 'f_{coi}', 'ace'])
self._params.extend(['beta'])
self._init()
def init1(self, dae):
for item in self._service:
self.__dict__[item] = [[]] * self.n
# Start with frequency
for idx, item in enumerate(self.syn):
self.M[idx] = self.read_data_ext('Synchronous', field='M', idx=item)
self.Mtot[idx] = sum(self.M[idx])
self.usyn[idx] = self.read_data_ext('Synchronous', field='u', idx=item)
self.wsyn[idx] = self.read_data_ext('Synchronous', field='omega', idx=item)
dae.y[self.fcoi[idx]] = sum(mul(self.M[idx], dae.x[self.wsyn[idx]])) / self.Mtot[idx]
# Get BA Export Power
self.copy_data_ext('Area', field='area_P0', dest='P0', idx=self.area)
dae.y[self.Pexp] = self.P0
dae.y[self.ace] = 0
def gcall(self, dae):
# the value below gets updated at each iteration in `seriesflow`
P = self.read_data_ext('Area', field='area_P0', idx=self.area)
dae.g[self.Pexp] = dae.y[self.Pexp] - P
for idx, item in enumerate(self.syn):
self.wsyn[idx] = self.read_data_ext('Synchronous', field='omega', idx=item)
dae.g[self.fcoi[idx]] = dae.y[self.fcoi[idx]] - \
sum(mul(self.M[idx], dae.x[self.wsyn[idx]])) / self.Mtot[idx]
ACE = (P - self.P0) - mul(self.beta, (1 - dae.y[self.fcoi]))
dae.g[self.ace] = dae.y[self.ace] + ACE
def jac0(self, dae):
dae.add_jac(Gy0, 1, self.Pexp, self.Pexp)
dae.add_jac(Gy0, 1, self.fcoi, self.fcoi)
dae.add_jac(Gy0, 1, self.ace, self.ace)
dae.add_jac(Gy0, 1, self.ace, self.Pexp)
dae.add_jac(Gy0, self.beta, self.ace, self.fcoi)
class AGCBase(ModelBase):
"""
Base AGC class. The allocation of Pagc will be based on inverse droop (iR)
"""
def __init__(self, system, name):
super(AGCBase, self).__init__(system, name)
self._group = 'AGCGroup'
self._data.update({'BArea': None,
'Ki': 0.05,
})
self._descr.update({'BArea': 'Idx of BArea',
'Ki': 'Integral gain of ACE',
})
self._mandatory.extend(['BArea', 'Ki'])
self._states.extend(['Pagc'])
self.calls.update({'init1': True,
'gcall': True,
'fcall': True,
'jac0': True,
'gycall': True
})
self._service.extend(['ace', 'iR', 'iRtot'])
self._fnamex.extend(['P_{agc}^{total}'])
self._params.extend(['Ki'])
def init1(self, dae):
self.copy_data_ext('BArea', field='ace', idx=self.BArea)
def fcall(self, dae):
dae.f[self.Pagc] = mul(self.Ki, dae.y[self.ace])
def gcall(self, dae):
pass
def jac0(self, dae):
dae.add_jac(Fy0, self.Ki, self.Pagc, self.ace)
def gycall(self, dae):
pass
class AGCSyn(AGCBase):
"""AGC for synchronous generators. This class changes the setpoints by modifying the generator pm."""
def __init__(self, system, name):
super(AGCSyn, self).__init__(system, name)
self._data.update({'syn': None})
self._descr.update({'syn': 'Indices of synchronous generators for AGC'})
self._units.update({'syn': 'list'})
self._mandatory.extend(['syn'])
self._service.extend(['pm', 'usyn'])
self._init()
def init1(self, dae):
super(AGCSyn, self).init1(dae)
self.pm = [[]] * self.n
self.iR = [[]] * self.n
self.usyn = [[]] * self.n
self.iRtot = [[]] * self.n
for idx, item in enumerate(self.syn):
self.pm[idx] = self.read_data_ext('Synchronous', field='pm', idx=item)
self.usyn[idx] = self.read_data_ext('Synchronous', field='u', idx=item)
self.iR[idx] = self.read_data_ext('Synchronous', field='M', idx=item)
self.iRtot[idx] = sum(mul(self.usyn[idx], self.iR[idx]))
def gcall(self, dae):
super(AGCSyn, self).gcall(dae)
# Kgen and each item in `self.pm`, `self.usyn`, and `self.Pagc` is a list
# Do not get rid of the `for` loop, since each of them is a matrix operation
for idx, item in enumerate(self.syn):
Kgen = div(self.iR[idx], self.iRtot[idx])
dae.g[self.pm[idx]] -= mul(self.usyn[idx], Kgen, dae.x[self.Pagc[idx]])
def gycall(self, dae):
super(AGCSyn, self).gycall(dae)
# Do not get rid of the for loop; for each `idx` it is a matrix operation
for idx, item in enumerate(self.syn):
Kgen = div(self.iR[idx], self.iRtot[idx])
dae.add_jac(Gx, -mul(self.usyn[idx], Kgen), self.pm[idx], self.Pagc[idx])
class AGC(AGCSyn):
"""Alias for class <AGCSyn>"""
pass
class AGCTG(AGCBase):
"""AGC class that modifies the turbine governor power reference. Links to TG1 only."""
def __init__(self, system, name):
super(AGCTG, self).__init__(system, name)
self._data.update({'tg': None})
self._mandatory.extend(['tg'])
self._descr.update({'tg': 'Indices of turbine governors for AGC'})
self._units.update({'tg': 'list'})
self._service.extend(['pin', 'R', 'iR', 'iRtot'])
self._init()
def init1(self, dae):
super(AGCTG, self).init1(dae)
self.pin = [[]] * self.n
self.R = [[]] * self.n
self.iR = [[]] * self.n
self.iRtot = [[]] * self.n
for idx, item in enumerate(self.tg):
self.pin[idx] = self.read_data_ext(model='Governor', field='pin', idx=item)
self.R[idx] = self.read_data_ext(model='Governor', field='R', idx=item)
self.iR[idx] = div(1, self.R[idx])
self.iRtot[idx] = sum(self.iR[idx])
def gcall(self, dae):
super(AGCTG, self).gcall(dae)
for idx, item in enumerate(self.tg):
Ktg = div(self.iR[idx], self.iRtot[idx])
dae.g[self.pin[idx]] += mul(Ktg, dae.x[self.Pagc[idx]])
def gycall(self, dae):
super(AGCTG, self).gycall(dae)
for idx, item in enumerate(self.tg):
Ktg = div(self.iR[idx], self.iRtot[idx])
dae.add_jac(Gx, Ktg, self.pin[idx], self.Pagc[idx])
class AGCVSCBase(object):
"""
Base class for AGC using VSC. Modifies the ref1 for PV or PQ-controlled VSCs. This class must be
inherited with subclasses of AGCBase
"""
def __init__(self, system, name):
self.system = system
self._data.update({'vsc': None,
'Rvsc': None,
})
self._descr.update({'vsc': 'Indices of VSCs to control',
'Rvsc': 'Droop coefficients for the VSCs'})
self._units.update({'tg': 'list',
'Rvsc': 'list'})
self._mandatory.extend(['vsc', 'Rvsc'])
self._service.extend(['uvsc', 'ref1'])
self._init()
def init1(self, dae):
self.ref1 = [[]] * self.n
self.uvsc = [[]] * self.n
# manually convert self.Rvsc to a list of matrices
self.Rvsc = [matrix(item) for item in self.Rvsc]
self.iRvsc = [div(1, item) for item in self.Rvsc]
# Only PV or PQ-controlled VSCs are acceptable
for agc_idx, item in enumerate(self.vsc[:]):
pv_or_pq = self.read_data_ext('VSCgroup', field="PV", idx=item) + \
self.read_data_ext('VSCgroup', field='PQ', idx=item)
valid_vsc_list = list()
valid_vsc_R = list()
for i, (vsc_idx, valid) in enumerate(zip(item, pv_or_pq)):
if valid:
valid_vsc_list.append(vsc_idx)
# TODO: fix the hard-coded `vsc_Idx` below
valid_vsc_R.append(self.Rvsc[agc_idx][i])
else:
logger.warning('VSC <{}> is not a PV or PQ type, thus cannot be used for AGC.'.format(vsc_idx))
self.vsc[agc_idx] = valid_vsc_list
for agc_idx, item in enumerate(self.vsc):
# skip elements that contain no valid VSC index
if len(item) == 0:
continue
# retrieve status `uvsc`
self.uvsc[agc_idx] = self.read_data_ext('VSCgroup', field='u', idx=item)
self.ref1[agc_idx] = self.read_data_ext('VSCgroup', field='ref1', idx=item)
# Add `Rvsc` to Mtot
self.iRtot[agc_idx] += sum(mul(self.uvsc[agc_idx], self.iRvsc[agc_idx]))
def gcall(self, dae):
for agc_idx, item in enumerate(self.vsc):
if len(item) == 0:
continue
Kvsc = div(self.iRvsc[agc_idx], self.iRtot[agc_idx])
dae.g[self.ref1[agc_idx]] -= mul(self.uvsc[agc_idx], Kvsc, dae.x[self.Pagc[agc_idx]])
def gycall(self, dae):
for agc_idx, item in enumerate(self.vsc):
if len(item) == 0:
continue
Kvsc = div(self.iRvsc[agc_idx], self.iRtot[agc_idx])
dae.add_jac(Gx, -mul(self.uvsc[agc_idx], Kvsc), self.ref1[agc_idx], self.Pagc[agc_idx])
class AGCTGVSC(AGCTG, AGCVSCBase):
"""AGC class that modifies the turbine governor and VSC pref"""
def __init__(self, system, name):
AGCTG.__init__(self, system, name)
AGCVSCBase.__init__(self, system, name)
self._init()
def init1(self, dae):
AGCTG.init1(self, dae)
AGCVSCBase.init1(self, dae)
def jac0(self, dae):
AGCTG.jac0(self, dae)
def gcall(self, dae):
AGCTG.gcall(self, dae)
AGCVSCBase.gcall(self, dae)
def gycall(self, dae):
AGCTG.gycall(self, dae)
AGCVSCBase.gycall(self, dae)
def fcall(self, dae):
AGCTG.fcall(self, dae)
class AGCMPC(ModelBase):
"""MPC based AGC using TG and VSC"""
def __init__(self, system, name):
super(AGCMPC, self).__init__(system, name)
if platform.system() == 'Darwin':
logger.error("** AGCMPC optimization does not work correctly on macOS!!!")
self._group = "AGCGroup"
self._name = "AGCMPC"
self.param_remove('Vn')
self.param_remove('Sn')
self._data.update({'tg': None,
'avr': None,
'vsc': None,
'qw': 15000,
'qu': 10,
})
self._params.extend(['qw', 'qu'])
self._descr.update({'tg': 'idx for turbine governors',
'vsc': 'idx for VSC dynamic models',
'qw': 'the coeff for minimizing frequency deviation',
'qu': 'the coeff for minimizing input deviation'
})
self._units.update({'tg': 'list', 'vsc': 'list'})
self._mandatory.extend(['tg', 'avr'])
self.calls.update({'init1': True,
'gcall': True,
'jac0': True,
'fxcall': True})
self._service.extend(['xg10', 'pin0', 'delta0', 'omega0', 't', 'dpin0', 'x0', 'xlast'
'xidx', 'uidx', 'yxidx', 'sfx', 'sfu', 'sfy', 'sgx', 'sgu', 'sgy',
'A', 'B', 'Aa', 'Ba',
'obj', 'domega', 'du', 'dx', 'x', 'xpred'
'xa'])
self._algebs.extend(['dpin'])
self._fnamey.extend(r'\Delta P_{in}')
self.solver = Solver(system.config.sparselib)
self.H = 6
self.uvar = None
self.op = None
self._linearized = False
self._interval = 0 # AGC apply interval in seconds. 0 - continuous
self._init()
def init1(self, dae):
if globals()['cp'] is None:
try:
globals()['cp'] = importlib.import_module('cvxpy')
except ImportError:
logger.error('CVXPY import error. Install optional package `cvxpy` to use AGCMPC')
sys.exit(1)
self.t = -1
self.tlast = -1
# state array x = [delta, omega, xg1]
# input array u = [dpin]
self.copy_data_ext('Governor', field='gen', dest='syn', idx=self.tg)
self.copy_data_ext('Synchronous', field='delta', dest='delta', idx=self.syn)
self.copy_data_ext('Synchronous', field='omega', dest='omega', idx=self.syn)
self.copy_data_ext('Synchronous', field='e1d', dest='e1d', idx=self.syn)
self.copy_data_ext('Synchronous', field='e1q', dest='e1q', idx=self.syn)
self.copy_data_ext('Synchronous', field='e2d', dest='e2d', idx=self.syn)
self.copy_data_ext('Synchronous', field='e2q', dest='e2q', idx=self.syn)
self.copy_data_ext('Governor', field='xg1', dest='xg1', idx=self.tg)
self.copy_data_ext('Governor', field='xg2', dest='xg2', idx=self.tg)
self.copy_data_ext('Governor', field='xg3', dest='xg3', idx=self.tg)
self.copy_data_ext('Governor', field='pin', dest='pin', idx=self.tg)
self.copy_data_ext('AVR', field='vm', dest='vm', idx=self.avr)
self.copy_data_ext('AVR', field='vr1', dest='vr1', idx=self.avr)
self.copy_data_ext('AVR', field='vr2', dest='vr2', idx=self.avr)
self.copy_data_ext('AVR', field='vfout', dest='vfout', idx=self.avr)
dae.y[self.dpin] = 0
self.dpin0 = zeros(self.n, 1)
# build state/ input /other algebraic idx array
self.xidx = matrix([self.delta, self.omega, self.e1d, self.e1q, self.e2d, self.e2q, self.xg1, self.xg2,
self.xg3, self.vm, self.vr1, self.vr2, self.vfout])
self.x0 = dae.x[self.xidx]
self.x = zeros(len(self.xidx), 1)
self.dx = zeros(len(self.xidx), 1)
self.xlast = dae.x[self.xidx]
self.uidx = matrix([self.dpin])
self.ulast = zeros(self.n, 1)
self.dpin_calc = zeros(self.n, 1)
self.widx = self.system.PQ.a
self.w0 = self.system.PQ.p0
self.wlast = matrix(self.w0)
self.yidx = self.omega
self.yidx_in_x = [index(self.xidx, y)[0] for y in self.yidx]
yidx = np.delete(np.arange(dae.m), np.array(self.uidx))
self.yxidx = matrix(yidx)
# optimization problem
self.uvar = cp.Variable((len(self.uidx), self.H+1), 'u')
self.uzero = cp.Parameter((len(self.uidx), ), 'u0')
self.xazero = cp.Parameter((2 * len(self.xidx), 1), 'xa')
self.prob = None
self.t_store = []
self.xpred_store = []
def gcall(self, dae):
if self.t == -1:
self.t = dae.t
return
if not self._linearized:
# update the linearization points
self._linearized = True
self.t = dae.t
self.tlast = dae.t
self.sfx = dae.Fx[self.xidx, self.xidx]
self.sfu = dae.Fy[self.xidx, self.uidx]
self.sfy = dae.Fy[self.xidx, self.yxidx]
self.sgx = dae.Gx[self.yxidx, self.xidx]
self.sgu = dae.Gy[self.yxidx, self.uidx]
self.sgw = spmatrix(1, self.widx, list(range(len(self.widx))), (len(self.yxidx), len(self.widx)))
self.sgy = dae.Gy[self.yxidx, self.yxidx]
# create state matrices
self.gyigx = matrix(self.sgx)
self.gyigu = matrix(self.sgu)
self.gyigw = matrix(self.sgw)
self.solver.linsolve(self.sgy, self.gyigx)
self.solver.linsolve(self.sgy, self.gyigu)
self.solver.linsolve(self.sgy, self.gyigw)
self.A = (self.sfx - self.sfy * self.gyigx)
self.B = (self.sfu - self.sfy * self.gyigu)
self.C = -(self.sfy * self.gyigw)
self.A = self.system.tds.h * self.A
self.Aa = sparse([[self.A, self.A],
[spmatrix([], [], [], (self.A.size[0], self.A.size[1])),
spdiag([1] * len(self.xidx))]])
self.Ba = sparse([self.B, self.B])
self.Ca = sparse([self.C, self.C])
# formulate optimization problem
nx = len(self.xidx)
nu = len(self.uidx)
obj_x = 0
xa_0 = self.xazero
for i in range(self.H):
# calculate Xa for each step in horizon H
# du = cp.reshape(self.uvar[:, i+1], (nu, 1)) - self.uvar[:,i]
du = cp.reshape(self.uvar[:, i+1] - self.uvar[:, i], (nu, 1))
xa_i = matrix(self.Aa) * xa_0 + matrix(self.Ba) * du
obj_x += cp.multiply(self.qw, cp.square(xa_i[nx:][self.yidx_in_x] - self.x0[self.yidx_in_x]))
xa_0 = xa_i
# construct the optimization problem
self.obj_x = cp.sum(obj_x)
self.obj_u = 0
self.obj_u += cp.sum(
cp.multiply(
np.array(self.qu).reshape((nu, )),
cp.sum(cp.square(self.uvar[:, 1:] - self.uvar[:, :-1]), axis=1)
)
)
constraints = [self.uvar[:, 0] == self.uzero,
self.uvar[:, 1:] - self.uvar[:, :-1] <= 0.5,
self.uvar[:, 1:] - self.uvar[:, :-1] >= -0.5
]
self.prob = cp.Problem(cp.Minimize(self.obj_x + self.obj_u), constraints)
if dae.t != self.t:
self.t = dae.t
nx = len(self.xidx)
nu = len(self.uidx)
# # update Delta x and x for current step
self.x = dae.x[self.xidx]
self.dx = self.x - self.xlast
self.xa = matrix([self.dx, self.x])
# assign values to self.uzero and self.xazero
self.uzero.value = np.array(self.ulast).reshape((-1, ))
self.xazero.value = np.array(self.xa).reshape((-1, 1))
# use warm_start when possible
if dae.t == 0:
self.prob.solve()
else:
self.prob.solve(warm_start=1)
self.dpin_calc = matrix(self.uvar.value[:, 1])
# update every interval
if (self.t - self.tlast) >= self._interval:
self.tlast = self.t
self.dpin0 = self.dpin_calc
opt_val = self.prob.solution.opt_val
logger.debug("t={:.4f}, obj={:.6f}, u[0]={:.6f}".format(dae.t, opt_val, self.uvar.value[0, 0]))
self.t_store.append(self.t)
xa_post = matrix(self.Aa) * self.xa + matrix(self.Ba) * (matrix(self.uvar.value[:, 0]) - self.ulast)
self.xpred_store.append(xa_post[nx:][self.yidx_in_x][0])
# # post-optimization evaluator
# # u_val = matrix([[0, 0], [0, 0], [0, 0]])
# u_val = matrix(self.uvar.value)
# u_val = zeros(2, self.H)
# obj_x = 0
# xa_0 = self.xa
# u_0 = self.ulast
# for i in range(self.H):
# # calculate Xa for each step in horizon H
# du = np.reshape(u_val[:, i], (-1, 1)) - u_0
# xa_i = matrix(self.Aa) * xa_0 + matrix(self.Ba) * matrix(du) #+ matrix(self.Ca) * self.dw
# obj_x += mul(self.qw, (xa_i[nx:][self.yidx_in_x] - self.x0[self.yidx_in_x]) ** 2)
# xa_0 = xa_i
# u_0 = np.reshape(u_val[:, i], (-1, 1))
# self.obj_x = sum(obj_x)
# u2 = np.array(mul(u_val, u_val))
# self.obj_u = sum(mul(self.qu, matrix(np.sum(u2, 1))))
#
# eval_obj = self.obj_x + self.obj_u
# print("Post eval, t={:.4f} obj = {:.6f}, u = {:.6f}, {:.6f}".format(self.t, eval_obj, u_val[0, 0],
# u_val[1, 0]))
# print(" obj_x = {}, obj_u = {}".format(self.obj_x, self.obj_u))
# record data for the current step
self.ulast = self.dpin_calc
self.xlast = dae.x[self.xidx]
dae.g[self.dpin] = dae.y[self.dpin] - self.dpin0
dae.g[self.pin] += dae.y[self.dpin] # positive `dpin` increases the `pin` reference
def jac0(self, dae):
dae.add_jac(Gy0, 1, self.dpin, self.dpin)
dae.add_jac(Gy0, 1, self.pin, self.dpin)
class AGCSynVSC(AGCSyn, AGCVSCBase):
"""AGC class that modifies Synchronous pm and VSC pref"""
def __init__(self, system, name):
AGCSyn.__init__(self, system, name)
AGCVSCBase.__init__(self, system, name)
self._init()
def init1(self, dae):
AGCSyn.init1(self, dae)
AGCVSCBase.init1(self, dae)
def jac0(self, dae):
AGCSyn.jac0(self, dae)
def gcall(self, dae):
AGCSyn.gcall(self, dae)
AGCVSCBase.gcall(self, dae)
def gycall(self, dae):
AGCSyn.gycall(self, dae)
AGCVSCBase.gycall(self, dae)
def fcall(self, dae):
AGCSyn.fcall(self, dae)
class eAGC(ModelBase):
def __init__(self, system, name):
super(eAGC, self).__init__(system, name)
self._group = 'Control'
self._data.update({
'cl': None,
'tl': 0,
'Pl': None,
'BA': None,
})
self._descr.update({
'cl': 'Loss sharing coefficient (vector)',
'tl': 'Time of generator loss',
'Pl': 'Loss of power generation in pu (vector)',
'BA': 'Balancing Area that support the Gen loss',
})
self._mandatory.extend(['cl', 'tl', 'Pl', 'BA'])
self.calls.update({
'gcall': True,
'init1': True,
'jac0': False,
'fcall': False,
})
self._service.extend(['ace', 'en'])
self._params.extend(['cl', 'tl', 'Pl'])
self._init()
def init1(self, dae):
self.ace = [[]] * self.n
for idx, item in enumerate(self.BA):
self.ace[idx] = self.read_data_ext('BArea', field='ace', idx=item)
self.en = zeros(self.n, 1)
def switch(self):
"""Switch if time for eAgc has come"""
t = self.system.dae.t
for idx in range(0, self.n):
if t >= self.tl[idx]:
if self.en[idx] == 0:
self.en[idx] = 1
logger.info('Extended ACE <{}> activated at t = {}.'.format(self.idx[idx], t))
def gcall(self, dae):
self.switch()
for idx in range(0, self.n):
dae.g[self.ace[idx]] -= mul(self.en[idx], self.cl[:, idx],
self.Pl[idx])
| 37.731669 | 115 | 0.527206 |
4a1d2ad95bf6bb003dfd153049645be9a35a5b14
| 5,733 |
py
|
Python
|
cirq/ops/phased_iswap_gate_test.py
|
gnperdue/Cirq
|
15c142df5b1d27bdd42bd5b3f9330cf0c5af4e9b
|
[
"Apache-2.0"
] | null | null | null |
cirq/ops/phased_iswap_gate_test.py
|
gnperdue/Cirq
|
15c142df5b1d27bdd42bd5b3f9330cf0c5af4e9b
|
[
"Apache-2.0"
] | null | null | null |
cirq/ops/phased_iswap_gate_test.py
|
gnperdue/Cirq
|
15c142df5b1d27bdd42bd5b3f9330cf0c5af4e9b
|
[
"Apache-2.0"
] | 1 |
2020-12-18T16:36:41.000Z
|
2020-12-18T16:36:41.000Z
|
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import numpy as np
import pytest
import scipy
import sympy
import cirq
np.set_printoptions(linewidth=300)
def test_phased_iswap_init():
p = -0.25
t = 0.75
gate = cirq.PhasedISwapPowGate(phase_exponent=p, exponent=t)
assert gate.phase_exponent == p
assert gate.exponent == t
def test_phased_iswap_equality():
assert (cirq.PhasedISwapPowGate(phase_exponent=0,
exponent=0.4) == cirq.ISWAP**0.4)
def test_repr():
p = -0.25
t = 0.75
gate = cirq.PhasedISwapPowGate(phase_exponent=p, exponent=t)
cirq.testing.assert_equivalent_repr(gate)
def test_phased_iswap_unitary():
p = 0.3
t = 0.4
actual = cirq.unitary(cirq.PhasedISwapPowGate(phase_exponent=p, exponent=t))
c = np.cos(np.pi * t / 2)
s = np.sin(np.pi * t / 2) * 1j
f = np.exp(2j * np.pi * p)
# yapf: disable
expected = np.array([[1, 0, 0, 0],
[0, c, s * f, 0],
[0, s * f.conjugate(), c, 0],
[0, 0, 0, 1]])
# yapf: enable
assert np.allclose(actual, expected)
def test_phased_iswap_equivalent_circuit():
p = 0.7
t = -0.4
gate = cirq.PhasedISwapPowGate(phase_exponent=p, exponent=t)
q0, q1 = cirq.LineQubit.range(2)
equivalent_circuit = cirq.Circuit([
cirq.Z(q0)**p,
cirq.Z(q1)**-p,
cirq.ISWAP(q0, q1)**t,
cirq.Z(q0)**-p,
cirq.Z(q1)**p,
])
assert np.allclose(cirq.unitary(gate), cirq.unitary(equivalent_circuit))
def test_phased_iswap_str():
assert str(cirq.PhasedISwapPowGate(exponent=1)) == 'PhasedISWAP'
assert str(cirq.PhasedISwapPowGate(exponent=0.5)) == 'PhasedISWAP**0.5'
def test_phased_iswap_pow():
gate1 = cirq.PhasedISwapPowGate(phase_exponent=0.1, exponent=0.25)
gate2 = cirq.PhasedISwapPowGate(phase_exponent=0.1, exponent=0.5)
assert gate1**2 == gate2
u1 = cirq.unitary(gate1)
u2 = cirq.unitary(gate2)
assert np.allclose(u1 @ u1, u2)
def test_decompose_invalid_qubits():
qs = cirq.LineQubit.range(3)
with pytest.raises(ValueError):
cirq.protocols.decompose_once_with_qubits(cirq.PhasedISwapPowGate(), qs)
@pytest.mark.parametrize('phase_exponent, exponent',
itertools.product(
(-0.3, 0, 0.1, 0.5, 1, 2, sympy.Symbol('p')),
(-0.1, 0, 0.1, 1, sympy.Symbol('t')),
))
def test_phased_iswap_has_consistent_protocols(phase_exponent, exponent):
cirq.testing.assert_implements_consistent_protocols(
cirq.PhasedISwapPowGate(phase_exponent=phase_exponent,
exponent=exponent),
ignoring_global_phase=False)
def test_diagram():
q0, q1 = cirq.LineQubit.range(2)
c = cirq.Circuit(
cirq.PhasedISwapPowGate(phase_exponent=sympy.Symbol('p'),
exponent=sympy.Symbol('t')).on(q0, q1),
cirq.PhasedISwapPowGate(phase_exponent=2 * sympy.Symbol('p'),
exponent=1 - sympy.Symbol('t')).on(q0, q1),
cirq.PhasedISwapPowGate(phase_exponent=0.2, exponent=1).on(q0, q1),
cirq.PhasedISwapPowGate(phase_exponent=0.3, exponent=0.4).on(q0, q1),
)
cirq.testing.assert_has_diagram(
c, """
0: ───PhISwap(p)─────PhISwap(2*p)───────────PhISwap(0.2)───PhISwap(0.3)───────
│ │ │ │
1: ───PhISwap(p)^t───PhISwap(2*p)^(1 - t)───PhISwap(0.2)───PhISwap(0.3)^0.4───
""")
@pytest.mark.parametrize('angle_rads', (-np.pi, -np.pi / 3, -0.1, np.pi / 5))
def test_givens_rotation_unitary(angle_rads):
actual = cirq.unitary(cirq.GivensRotation(angle_rads))
c = np.cos(angle_rads)
s = np.sin(angle_rads)
# yapf: disable
expected = np.array([[1, 0, 0, 0],
[0, c, -s, 0],
[0, s, c, 0],
[0, 0, 0, 1]])
# yapf: enable
assert np.allclose(actual, expected)
@pytest.mark.parametrize('angle_rads', (-2 * np.pi / 3, -0.2, 0.4, np.pi / 4))
def test_givens_rotation_hamiltonian(angle_rads):
actual = cirq.unitary(cirq.GivensRotation(angle_rads))
x = np.array([[0, 1], [1, 0]])
y = np.array([[0, -1j], [1j, 0]])
yx = np.kron(y, x)
xy = np.kron(x, y)
expected = scipy.linalg.expm(-0.5j * angle_rads * (yx - xy))
assert np.allclose(actual, expected)
def test_givens_rotation_equivalent_circuit():
angle_rads = 3 * np.pi / 7
t = 2 * angle_rads / np.pi
gate = cirq.GivensRotation(angle_rads)
q0, q1 = cirq.LineQubit.range(2)
equivalent_circuit = cirq.Circuit([
cirq.T(q0),
cirq.T(q1)**-1,
cirq.ISWAP(q0, q1)**t,
cirq.T(q0)**-1,
cirq.T(q1),
])
assert np.allclose(cirq.unitary(gate), cirq.unitary(equivalent_circuit))
@pytest.mark.parametrize('angle_rads', (-np.pi / 5, 0.4, 2, np.pi))
def test_givens_rotation_has_consistent_protocols(angle_rads):
cirq.testing.assert_implements_consistent_protocols(
cirq.GivensRotation(angle_rads), ignoring_global_phase=False)
| 33.138728 | 80 | 0.614338 |
4a1d2afa2d9638c2015b5f846be1524a1c3af96d
| 227 |
py
|
Python
|
Darlington/phase1/python Basic 2/day 21 solution/qtn2.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 6 |
2020-05-23T19:53:25.000Z
|
2021-05-08T20:21:30.000Z
|
Darlington/phase1/python Basic 2/day 21 solution/qtn2.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 8 |
2020-05-14T18:53:12.000Z
|
2020-07-03T00:06:20.000Z
|
Darlington/phase1/python Basic 2/day 21 solution/qtn2.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 39 |
2020-05-10T20:55:02.000Z
|
2020-09-12T17:40:59.000Z
|
#program that accepts six numbers as input and sorts them in descending order.
print("Input six integers:")
nums = list(map(int, input().split()))
nums.sort()
nums.reverse()
print("After sorting the said ntegers:")
print(*nums)
| 32.428571 | 78 | 0.740088 |
4a1d2b2c5e672adacbe709a1b4bf99051e818983
| 573 |
py
|
Python
|
scripts/general/feather_to_excel.py
|
morrislab/plos-medicine-joint-patterns
|
cfdc6dd4854ec33e7e2efbf36d648b65d278df33
|
[
"MIT"
] | null | null | null |
scripts/general/feather_to_excel.py
|
morrislab/plos-medicine-joint-patterns
|
cfdc6dd4854ec33e7e2efbf36d648b65d278df33
|
[
"MIT"
] | null | null | null |
scripts/general/feather_to_excel.py
|
morrislab/plos-medicine-joint-patterns
|
cfdc6dd4854ec33e7e2efbf36d648b65d278df33
|
[
"MIT"
] | 1 |
2021-04-05T22:21:58.000Z
|
2021-04-05T22:21:58.000Z
|
"""
Converts a Feather file to an Excel file.
"""
from click import *
from logging import *
import pandas as pd
@command()
@option("--input", required=True, help="the Feather file to read input from")
@option("--output", required=True, help="the Excel file to write output to")
def main(input, output):
basicConfig(level=DEBUG)
# Load data.
info("Loading data")
X = pd.read_feather(input)
debug(f"Result: {X.shape}")
# Write output.
info("Writing output")
X.to_excel(output, index=False)
if __name__ == "__main__":
main()
| 15.916667 | 77 | 0.652705 |
4a1d2c0bc35ea4780434bf367a04185345d34f8f
| 3,067 |
py
|
Python
|
app/dashapp1/callbacks.py
|
credwood/bitplayers
|
4ca6b6c6a21bb21d7cd963c64028415559c3dcc4
|
[
"MIT"
] | 1 |
2020-06-26T21:49:14.000Z
|
2020-06-26T21:49:14.000Z
|
app/dashapp1/callbacks.py
|
credwood/bitplayers
|
4ca6b6c6a21bb21d7cd963c64028415559c3dcc4
|
[
"MIT"
] | 2 |
2020-03-31T11:11:04.000Z
|
2021-12-13T20:38:48.000Z
|
app/dashapp1/callbacks.py
|
credwood/bitplayers
|
4ca6b6c6a21bb21d7cd963c64028415559c3dcc4
|
[
"MIT"
] | null | null | null |
from datetime import datetime as dt
from dash.dependencies import Input
from dash.dependencies import Output
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly
import plotly.graph_objs as go
import pandas as pd
import mysql.connector as sql
def register_callbacks(dashapp):
@dashapp.callback(Output('live-graph', 'figure'),
[Input('graph-update', 'n_intervals')])
def update_graph_call(n):
try:
db_connection = sql.connect(user='root', password='',
host='127.0.0.1',
database='tweets')
db_cursor = db_connection.cursor()
df = pd.read_sql('SELECT * FROM sent_trump ORDER BY id DESC LIMIT 1000', con=db_connection, index_col='id')
#df.sort_values('date_time', inplace=True)
db_cursor.close()
db_connection.close()
df["rolling_textblob_ave"] = df["sentiment_textblob"].rolling(int(len(df)/2)).mean()
df["rolling_vader_ave"] = df["sentiment_vader"].rolling(int(len(df)/2)).mean()
df['date_time'] = pd.to_datetime(df['date_time'])
df.set_index('date_time', inplace=True)
df = df.resample('1S').mean()
df.dropna(inplace=True)
X = df.index
Y1 = df.rolling_textblob_ave
Y2 = df.rolling_vader_ave
max_r = max(max(Y1), max(Y2))
min_r = min(min(Y1), min(Y2))
data = [go.Scatter(
x=X,
y=Y1,
name='textblob',
mode= 'lines+markers'
), go.Scatter(
x=X,
y=Y2,
name='vader',
mode= 'lines+markers'
)]
return {'data': data, 'layout' : go.Layout(xaxis=dict(range=[min(X),max(X)]),
yaxis=dict(range=[min_r,max_r]))}
except Exception as e:
with open('errors.txt','a') as f:
f.write(str(e))
f.write('\n')
@dashapp.callback(Output('datatable-row-ids', 'data'),
[Input('datatable-row-ids', "page_current"),Input('datatable-row-ids', 'page_size')])
def generate_table(page_current,page_size):
try:
db_connection= sql.connect(user='root', password='', host='127.0.0.1', database='tweets')
cur = db_connection.cursor()
df = pd.read_sql('SELECT * FROM sent_trump ORDER BY id DESC LIMIT 15', con=db_connection, index_col='id')
#df.sort_values('date_time', inplace=True)
db_cursor.close()
db_connection.close()
except Exception as e:
with open('errors.txt','a') as f:
f.write(str(e))
f.write('\n')
return df.iloc[page_current*page_size:(page_current+ 1)*page_size].to_dict('records')
| 38.822785 | 119 | 0.529508 |
4a1d2c0c1c6cac5e7a8fef918cf6e33d095126fe
| 5,021 |
py
|
Python
|
ks_main.py
|
almyki/Kitchen-Scraps-v2
|
f7770eb755caa03ea7db39f0fa2661dec7de21c1
|
[
"Unlicense"
] | null | null | null |
ks_main.py
|
almyki/Kitchen-Scraps-v2
|
f7770eb755caa03ea7db39f0fa2661dec7de21c1
|
[
"Unlicense"
] | null | null | null |
ks_main.py
|
almyki/Kitchen-Scraps-v2
|
f7770eb755caa03ea7db39f0fa2661dec7de21c1
|
[
"Unlicense"
] | null | null | null |
"""Main module for the game Kitchen Scraps."""
#### OUTLINE - MODULES
# ks_main.py
"""Contains the active gameplay loop. The \'central hub\' that puts to use the rest of the modules/classes. This should
not contain any classes or functions of its own."""
# ks_settings.py
"""One huge class that contains all the actual variables and handles a lot of the core mechanisms."""
# TODO Split this into multiple classes. 'Variables' container and 'mechanics' container.
# ks_environment.py
"""Classes to handle the game's -screen elements- like background images and grids/organization."""
# ks_menus.py
"""Classes to handle -menu screens- like the play menu, save/file menu, pause/options menu, and levels menu."""
# ks_buttons.py
"""Classes to handle -interactive- images (with rollovers), Frame images, Buttons, and specifically Food buttons."""
#### JUNK MODULES
# ks_extras_dumping_ground.py
"""A file meant to contain any bits of code that you aren't using anymore, but want to archive just in case."""
# ks_library.py
"""A currently-defunct file that originally held game data about foods/recipes. Turn this into metadata instead?"""
# ks_settings_misc.py
"""I don't exactly remember why this is split off or its purpose. I think I orphaned some functions out when I wasn't sure what to do with them for the moment."""
# ks_testing.py
"""Empty as of this writing. I use it to test out little bits and bobs isolated sometimes."""
import sys
import pygame
from ks_environment import Grid, Button, MessageDisplay
from ks_settings import Settings
from ks_menus import LevelMenu
ks = Settings('ks_bg')
# Set up the game and level.
pygame.init()
pygame.mixer.music.load('sounds/' + ks.music + '.mp3')
pygame.mixer.music.set_volume(0.1)
pygame.mixer.music.play(-1)
""" ks.start_game()
btn_new_game = Button('new_game', self.bg, origin='center')
def start_new_game():
new_game = btn_new_game.check_collide()
if new_game == True:
self.state == 'play' """
ks.set_level()
ks.make_level_menu()
test_msg = MessageDisplay('carrot', ks.bg, 'a carrot!', 'jupiterc.ttf', 16, (80, 40, 20))
center_screen = (ks.bg.rect[2]/2, ks.bg.rect[3]/2)
test_msg.place_image(center_screen, 'center')
# TODO Testing
while True:
ks.refresh_screen()
# TODO Show Level Prompt Card.
# Detect user events. If mouse-click, return the clicked element and act on it.
if ks.state == 'menu':
pass
elif ks.state == 'play':
clicked_button = ks.check_buttons()
# If button is food item, switch grid if possible.
if clicked_button:
if clicked_button.name in ks.current_foods and clicked_button.active:
ks.switch_grid(clicked_button)
# If button is 'Mix' and 'Mix' is active, try the mix. Activate and return O/X for Result Box.
elif clicked_button == ks.mix_button and ks.mix_button.active:
ks.big_box.result = ks.mix_ingredients()
ks.big_box.disable_all_except_self(ks.buttons)
# If Result Box is active, proceed on user input based on success or failure.
elif clicked_button == ks.big_box and ks.big_box.active:
# If Result is Success, show result food in Result Box and wait for another input.
if ks.big_box.success:
ks.sfx_click.play()
ks.big_box.fill_big_box(ks.big_box.result)
ks.buttons.append(ks.big_box.result)
# If Result is Failure, return food to pantry.
else:
ks.sfx_denied.play()
for material in ks.mixing_grid.grid.values():
ks.switch_grid(material)
for button in ks.buttons:
button.active = True
ks.big_box.result = ''
ks.big_box.active = False
# If Result Product is displayed, wait for user input before continuing the game.
elif clicked_button == ks.big_box.result:
ks.erase_mix_materials()
if clicked_button.name != ks.current_goal:
ks.confirm_result_and_cont()
# TODO If player wins, show Win Card and wait for input. Level up and reset screen when user proceeds.
elif clicked_button.name == ks.current_goal:
ks.level += 1
if ks.level < len(ks.goals):
ks.set_level()
else:
print('Hey, congrats, you win! I don\'t have any more levels yet. Thanks for playing =3= !')
ks.refresh_screen()
# TODO This displays food names on hover. Clean it up.
mouse_xy = pygame.mouse.get_pos()
for cell in ks.pantry_grid.grid.values():
if cell:
test_msg.img_srf = test_msg.font.render(cell.name.upper(), False, test_msg.color)
test_msg.show_on_collide(mouse_xy, cell)
pygame.display.flip()
####
| 39.226563 | 162 | 0.641705 |
4a1d2c1074ce635c02d908254fd5b5512c3c1b2e
| 2,164 |
py
|
Python
|
elit/datasets/parsing/semeval15.py
|
emorynlp/stem-cell-hypothesis
|
48a628093d93d653865fbac6409d179cddd99293
|
[
"Apache-2.0"
] | 4 |
2021-09-17T15:23:31.000Z
|
2022-02-28T10:18:04.000Z
|
elit/datasets/parsing/semeval15.py
|
emorynlp/stem-cell-hypothesis
|
48a628093d93d653865fbac6409d179cddd99293
|
[
"Apache-2.0"
] | null | null | null |
elit/datasets/parsing/semeval15.py
|
emorynlp/stem-cell-hypothesis
|
48a628093d93d653865fbac6409d179cddd99293
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-07-28 14:40
# from elit.datasets.parsing.conll_dataset import CoNLLParsingDataset
#
#
# class SemEval15Dataset(CoNLLParsingDataset):
# def load_file(self, filepath: str):
# pass
import warnings
from hanlp_common.constant import ROOT, PAD
from hanlp_common.conll import CoNLLSentence
def unpack_deps_to_head_deprel(sample: dict, pad_rel=None, arc_key='arc', rel_key='rel'):
if 'DEPS' in sample:
deps = ['_'] + sample['DEPS']
sample[arc_key] = arc = []
sample[rel_key] = rel = []
for each in deps:
arc_per_token = [False] * len(deps)
rel_per_token = [None] * len(deps)
if each != '_':
for ar in each.split('|'):
a, r = ar.split(':')
a = int(a)
arc_per_token[a] = True
rel_per_token[a] = r
if not pad_rel:
pad_rel = r
arc.append(arc_per_token)
rel.append(rel_per_token)
if not pad_rel:
pad_rel = PAD
for i in range(len(rel)):
rel[i] = [r if r else pad_rel for r in rel[i]]
return sample
def append_bos_to_form_pos(sample, pos_key='CPOS'):
sample['token'] = [ROOT] + sample['FORM']
if pos_key in sample:
sample['pos'] = [ROOT] + sample[pos_key]
return sample
def merge_head_deprel_with_2nd(sample: dict):
if 'arc' in sample:
arc_2nd = sample['arc_2nd']
rel_2nd = sample['rel_2nd']
for i, (arc, rel) in enumerate(zip(sample['arc'], sample['rel'])):
if i:
if arc_2nd[i][arc] and rel_2nd[i][arc] != rel:
sample_str = CoNLLSentence.from_dict(sample, conllu=True).to_markdown()
warnings.warn(f'The main dependency conflicts with 2nd dependency at ID={i}, ' \
'which means joint mode might not be suitable. ' \
f'The sample is\n{sample_str}')
arc_2nd[i][arc] = True
rel_2nd[i][arc] = rel
return sample
| 34.903226 | 100 | 0.544362 |
4a1d2c87cb84ffc9da569d74b59033e26486b165
| 2,499 |
py
|
Python
|
sdk/synapse/azure-synapse-monitoring/setup.py
|
sammiee5311/azure-sdk-for-python
|
bc99c220bcada3aa7187e915f9df65f4fa0669c5
|
[
"MIT"
] | null | null | null |
sdk/synapse/azure-synapse-monitoring/setup.py
|
sammiee5311/azure-sdk-for-python
|
bc99c220bcada3aa7187e915f9df65f4fa0669c5
|
[
"MIT"
] | null | null | null |
sdk/synapse/azure-synapse-monitoring/setup.py
|
sammiee5311/azure-sdk-for-python
|
bc99c220bcada3aa7187e915f9df65f4fa0669c5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-synapse-monitoring"
PACKAGE_PPRINT_NAME = "Synapse Monitoring"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py')
if os.path.exists(os.path.join(package_folder_path, 'version.py'))
else os.path.join(package_folder_path, '_version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('CHANGELOG.md', encoding='utf-8') as f:
changelog = f.read()
setup(
name=PACKAGE_NAME,
version=version,
include_package_data=True,
description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + changelog,
long_description_content_type='text/markdown',
license='MIT License',
author='Microsoft Corporation',
author_email='azpysdkhelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=[
'tests',
# Exclude packages that will be covered by PEP420 or nspkg
'azure',
'azure.synapse',
]),
python_requires=">=3.7",
install_requires=[
'msrest>=0.6.21',
'azure-common~=1.1',
'azure-core>=1.20.0,<2.0.0',
],
)
| 34.232877 | 91 | 0.609444 |
4a1d2c91f945fd9d1c396d58932d8e06696825f0
| 21,484 |
py
|
Python
|
zuul.d/octavia/amphorae/drivers/haproxy/rest_api_driver.py
|
yi-cloud/octavia
|
b7f5cfa4c3c454925a90c24984049539228806d7
|
[
"Apache-2.0"
] | null | null | null |
zuul.d/octavia/amphorae/drivers/haproxy/rest_api_driver.py
|
yi-cloud/octavia
|
b7f5cfa4c3c454925a90c24984049539228806d7
|
[
"Apache-2.0"
] | null | null | null |
zuul.d/octavia/amphorae/drivers/haproxy/rest_api_driver.py
|
yi-cloud/octavia
|
b7f5cfa4c3c454925a90c24984049539228806d7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2015 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import hashlib
import time
import warnings
from oslo_log import log as logging
import requests
import simplejson
import six
from stevedore import driver as stevedore_driver
from octavia.amphorae.driver_exceptions import exceptions as driver_except
from octavia.amphorae.drivers import driver_base
from octavia.amphorae.drivers.haproxy import exceptions as exc
from octavia.amphorae.drivers.keepalived import vrrp_rest_driver
from octavia.common.config import cfg
from octavia.common import constants as consts
from octavia.common.jinja.haproxy import jinja_cfg
from octavia.common.jinja.lvs import jinja_cfg as jinja_udp_cfg
from octavia.common.tls_utils import cert_parser
from octavia.common import utils
LOG = logging.getLogger(__name__)
API_VERSION = consts.API_VERSION
OCTAVIA_API_CLIENT = (
"Octavia HaProxy Rest Client/{version} "
"(https://wiki.openstack.org/wiki/Octavia)").format(version=API_VERSION)
CONF = cfg.CONF
class HaproxyAmphoraLoadBalancerDriver(
driver_base.AmphoraLoadBalancerDriver,
vrrp_rest_driver.KeepalivedAmphoraDriverMixin):
def __init__(self):
super(HaproxyAmphoraLoadBalancerDriver, self).__init__()
self.client = AmphoraAPIClient()
self.cert_manager = stevedore_driver.DriverManager(
namespace='octavia.cert_manager',
name=CONF.certificates.cert_manager,
invoke_on_load=True,
).driver
self.jinja = jinja_cfg.JinjaTemplater(
base_amp_path=CONF.haproxy_amphora.base_path,
base_crt_dir=CONF.haproxy_amphora.base_cert_dir,
haproxy_template=CONF.haproxy_amphora.haproxy_template,
connection_logging=CONF.haproxy_amphora.connection_logging)
self.udp_jinja = jinja_udp_cfg.LvsJinjaTemplater()
def update_amphora_listeners(self, listeners, amphora_index,
amphorae, timeout_dict=None):
"""Update the amphora with a new configuration.
:param listeners: List of listeners to update.
:type listener: list
:param amphora_index: The index of the amphora to update
:type amphora_index: integer
:param amphorae: List of amphorae
:type amphorae: list
:param timeout_dict: Dictionary of timeout values for calls to the
amphora. May contain: req_conn_timeout,
req_read_timeout, conn_max_retries,
conn_retry_interval
:returns: None
Updates the configuration of the listeners on a single amphora.
"""
# if the amphora does not yet have listeners, no need to update them.
if not listeners:
LOG.debug('No listeners found to update.')
return
amp = amphorae[amphora_index]
if amp is None or amp.status == consts.DELETED:
return
# TODO(johnsom) remove when we don't have a process per listener
for listener in listeners:
LOG.debug("%s updating listener %s on amphora %s",
self.__class__.__name__, listener.id, amp.id)
if listener.protocol == 'UDP':
# Generate Keepalived LVS configuration from listener object
config = self.udp_jinja.build_config(listener=listener)
self.client.upload_udp_config(amp, listener.id, config,
timeout_dict=timeout_dict)
self.client.reload_listener(amp, listener.id,
timeout_dict=timeout_dict)
else:
certs = self._process_tls_certificates(listener)
# Generate HaProxy configuration from listener object
config = self.jinja.build_config(
host_amphora=amp,
listener=listener,
tls_cert=certs['tls_cert'])
self.client.upload_config(amp, listener.id, config,
timeout_dict=timeout_dict)
self.client.reload_listener(amp, listener.id,
timeout_dict=timeout_dict)
def _udp_update(self, listener, vip):
LOG.debug("Amphora %s keepalivedlvs, updating "
"listener %s, vip %s",
self.__class__.__name__, listener.protocol_port,
vip.ip_address)
for amp in listener.load_balancer.amphorae:
if amp.status != consts.DELETED:
# Generate Keepalived LVS configuration from listener object
config = self.udp_jinja.build_config(listener=listener)
self.client.upload_udp_config(amp, listener.id, config)
self.client.reload_listener(amp, listener.id)
def update(self, listener, vip):
if listener.protocol == 'UDP':
self._udp_update(listener, vip)
else:
LOG.debug("Amphora %s haproxy, updating listener %s, "
"vip %s", self.__class__.__name__,
listener.protocol_port,
vip.ip_address)
# Process listener certificate info
certs = self._process_tls_certificates(listener)
for amp in listener.load_balancer.amphorae:
if amp.status != consts.DELETED:
# Generate HaProxy configuration from listener object
config = self.jinja.build_config(
host_amphora=amp,
listener=listener,
tls_cert=certs['tls_cert'])
self.client.upload_config(amp, listener.id, config)
self.client.reload_listener(amp, listener.id)
def upload_cert_amp(self, amp, pem):
LOG.debug("Amphora %s updating cert in REST driver "
"with amphora id %s,",
self.__class__.__name__, amp.id)
self.client.update_cert_for_rotation(amp, pem)
def _apply(self, func, listener=None, amphora=None, *args):
if amphora is None:
for amp in listener.load_balancer.amphorae:
if amp.status != consts.DELETED:
func(amp, listener.id, *args)
else:
if amphora.status != consts.DELETED:
func(amphora, listener.id, *args)
def stop(self, listener, vip):
self._apply(self.client.stop_listener, listener)
def start(self, listener, vip, amphora=None):
self._apply(self.client.start_listener, listener, amphora)
def delete(self, listener, vip):
self._apply(self.client.delete_listener, listener)
def get_info(self, amphora):
return self.client.get_info(amphora)
def get_diagnostics(self, amphora):
pass
def finalize_amphora(self, amphora):
pass
def post_vip_plug(self, amphora, load_balancer, amphorae_network_config):
if amphora.status != consts.DELETED:
subnet = amphorae_network_config.get(amphora.id).vip_subnet
# NOTE(blogan): using the vrrp port here because that
# is what the allowed address pairs network driver sets
# this particular port to. This does expose a bit of
# tight coupling between the network driver and amphora
# driver. We will need to revisit this to try and remove
# this tight coupling.
# NOTE (johnsom): I am loading the vrrp_ip into the
# net_info structure here so that I don't break
# compatibility with old amphora agent versions.
port = amphorae_network_config.get(amphora.id).vrrp_port
LOG.debug("Post-VIP-Plugging with vrrp_ip %s vrrp_port %s",
amphora.vrrp_ip, port.id)
host_routes = [{'nexthop': hr.nexthop,
'destination': hr.destination}
for hr in subnet.host_routes]
net_info = {'subnet_cidr': subnet.cidr,
'gateway': subnet.gateway_ip,
'mac_address': port.mac_address,
'vrrp_ip': amphora.vrrp_ip,
'mtu': port.network.mtu,
'host_routes': host_routes}
try:
self.client.plug_vip(amphora,
load_balancer.vip.ip_address,
net_info)
except exc.Conflict:
LOG.warning('VIP with MAC %(mac)s already exists on amphora, '
'skipping post_vip_plug',
{'mac': port.mac_address})
def post_network_plug(self, amphora, port):
fixed_ips = []
for fixed_ip in port.fixed_ips:
host_routes = [{'nexthop': hr.nexthop,
'destination': hr.destination}
for hr in fixed_ip.subnet.host_routes]
ip = {'ip_address': fixed_ip.ip_address,
'subnet_cidr': fixed_ip.subnet.cidr,
'host_routes': host_routes}
fixed_ips.append(ip)
port_info = {'mac_address': port.mac_address,
'fixed_ips': fixed_ips,
'mtu': port.network.mtu}
try:
self.client.plug_network(amphora, port_info)
except exc.Conflict:
LOG.warning('Network with MAC %(mac)s already exists on amphora, '
'skipping post_network_plug',
{'mac': port.mac_address})
def _process_tls_certificates(self, listener):
"""Processes TLS data from the listener.
Converts and uploads PEM data to the Amphora API
return TLS_CERT and SNI_CERTS
"""
tls_cert = None
sni_certs = []
certs = []
data = cert_parser.load_certificates_data(
self.cert_manager, listener)
if data['tls_cert'] is not None:
tls_cert = data['tls_cert']
certs.append(tls_cert)
if data['sni_certs']:
sni_certs = data['sni_certs']
certs.extend(sni_certs)
for cert in certs:
pem = cert_parser.build_pem(cert)
md5 = hashlib.md5(pem).hexdigest() # nosec
name = '{id}.pem'.format(id=cert.id)
self._apply(self._upload_cert, listener, None, pem, md5, name)
return {'tls_cert': tls_cert, 'sni_certs': sni_certs}
def _upload_cert(self, amp, listener_id, pem, md5, name):
try:
if self.client.get_cert_md5sum(
amp, listener_id, name, ignore=(404,)) == md5:
return
except exc.NotFound:
pass
self.client.upload_cert_pem(
amp, listener_id, name, pem)
# Check a custom hostname
class CustomHostNameCheckingAdapter(requests.adapters.HTTPAdapter):
def cert_verify(self, conn, url, verify, cert):
conn.assert_hostname = self.uuid
return super(CustomHostNameCheckingAdapter,
self).cert_verify(conn, url, verify, cert)
class AmphoraAPIClient(object):
def __init__(self):
super(AmphoraAPIClient, self).__init__()
self.secure = False
self.get = functools.partial(self.request, 'get')
self.post = functools.partial(self.request, 'post')
self.put = functools.partial(self.request, 'put')
self.delete = functools.partial(self.request, 'delete')
self.head = functools.partial(self.request, 'head')
self.start_listener = functools.partial(self._action,
consts.AMP_ACTION_START)
self.stop_listener = functools.partial(self._action,
consts.AMP_ACTION_STOP)
self.reload_listener = functools.partial(self._action,
consts.AMP_ACTION_RELOAD)
self.start_vrrp = functools.partial(self._vrrp_action,
consts.AMP_ACTION_START)
self.stop_vrrp = functools.partial(self._vrrp_action,
consts.AMP_ACTION_STOP)
self.reload_vrrp = functools.partial(self._vrrp_action,
consts.AMP_ACTION_RELOAD)
self.session = requests.Session()
self.session.cert = CONF.haproxy_amphora.client_cert
self.ssl_adapter = CustomHostNameCheckingAdapter()
self.session.mount('https://', self.ssl_adapter)
def _base_url(self, ip):
if utils.is_ipv6_lla(ip):
ip = '[{ip}%{interface}]'.format(
ip=ip,
interface=CONF.haproxy_amphora.lb_network_interface)
elif utils.is_ipv6(ip):
ip = '[{ip}]'.format(ip=ip)
return "https://{ip}:{port}/{version}/".format(
ip=ip,
port=CONF.haproxy_amphora.bind_port,
version=API_VERSION)
def request(self, method, amp, path='/', timeout_dict=None, **kwargs):
cfg_ha_amp = CONF.haproxy_amphora
if timeout_dict is None:
timeout_dict = {}
req_conn_timeout = timeout_dict.get(
consts.REQ_CONN_TIMEOUT, cfg_ha_amp.rest_request_conn_timeout)
req_read_timeout = timeout_dict.get(
consts.REQ_READ_TIMEOUT, cfg_ha_amp.rest_request_read_timeout)
conn_max_retries = timeout_dict.get(
consts.CONN_MAX_RETRIES, cfg_ha_amp.connection_max_retries)
conn_retry_interval = timeout_dict.get(
consts.CONN_RETRY_INTERVAL, cfg_ha_amp.connection_retry_interval)
LOG.debug("request url %s", path)
_request = getattr(self.session, method.lower())
_url = self._base_url(amp.lb_network_ip) + path
LOG.debug("request url %s", _url)
reqargs = {
'verify': CONF.haproxy_amphora.server_ca,
'url': _url,
'timeout': (req_conn_timeout, req_read_timeout), }
reqargs.update(kwargs)
headers = reqargs.setdefault('headers', {})
headers['User-Agent'] = OCTAVIA_API_CLIENT
self.ssl_adapter.uuid = amp.id
exception = None
# Keep retrying
for a in six.moves.xrange(conn_max_retries):
try:
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="A true SSLContext object is not available"
)
r = _request(**reqargs)
LOG.debug('Connected to amphora. Response: %(resp)s',
{'resp': r})
content_type = r.headers.get('content-type', '')
# Check the 404 to see if it is just that the network in the
# amphora is not yet up, in which case retry.
# Otherwise return the response quickly.
if r.status_code == 404:
LOG.debug('Got a 404 (content-type: %(content_type)s) -- '
'connection data: %(content)s',
{'content_type': content_type,
'content': r.content})
if content_type.find("application/json") == -1:
LOG.debug("Amphora agent not ready.")
raise requests.ConnectionError
try:
json_data = r.json().get('details', '')
if 'No suitable network interface found' in json_data:
LOG.debug("Amphora network interface not found.")
raise requests.ConnectionError
except simplejson.JSONDecodeError: # if r.json() fails
pass # TODO(rm_work) Should we do something?
return r
except (requests.ConnectionError, requests.Timeout) as e:
exception = e
LOG.warning("Could not connect to instance. Retrying.")
time.sleep(conn_retry_interval)
LOG.error("Connection retries (currently set to %(max_retries)s) "
"exhausted. The amphora is unavailable. Reason: "
"%(exception)s",
{'max_retries': conn_max_retries,
'exception': exception})
raise driver_except.TimeOutException()
def upload_config(self, amp, listener_id, config, timeout_dict=None):
r = self.put(
amp,
'listeners/{amphora_id}/{listener_id}/haproxy'.format(
amphora_id=amp.id, listener_id=listener_id), timeout_dict,
data=config)
return exc.check_exception(r)
def get_listener_status(self, amp, listener_id):
r = self.get(
amp,
'listeners/{listener_id}'.format(listener_id=listener_id))
if exc.check_exception(r):
return r.json()
return None
def _action(self, action, amp, listener_id, timeout_dict=None):
r = self.put(amp, 'listeners/{listener_id}/{action}'.format(
listener_id=listener_id, action=action), timeout_dict=timeout_dict)
return exc.check_exception(r)
def upload_cert_pem(self, amp, listener_id, pem_filename, pem_file):
r = self.put(
amp,
'listeners/{listener_id}/certificates/{filename}'.format(
listener_id=listener_id, filename=pem_filename),
data=pem_file)
return exc.check_exception(r)
def update_cert_for_rotation(self, amp, pem_file):
r = self.put(amp, 'certificate', data=pem_file)
return exc.check_exception(r)
def get_cert_md5sum(self, amp, listener_id, pem_filename, ignore=tuple()):
r = self.get(amp,
'listeners/{listener_id}/certificates/{filename}'.format(
listener_id=listener_id, filename=pem_filename))
if exc.check_exception(r, ignore):
return r.json().get("md5sum")
return None
def delete_listener(self, amp, listener_id):
r = self.delete(
amp, 'listeners/{listener_id}'.format(listener_id=listener_id))
return exc.check_exception(r, (404,))
def get_info(self, amp):
r = self.get(amp, "info")
if exc.check_exception(r):
return r.json()
return None
def get_details(self, amp):
r = self.get(amp, "details")
if exc.check_exception(r):
return r.json()
return None
def get_all_listeners(self, amp):
r = self.get(amp, "listeners")
if exc.check_exception(r):
return r.json()
return None
def delete_cert_pem(self, amp, listener_id, pem_filename):
r = self.delete(
amp,
'listeners/{listener_id}/certificates/{filename}'.format(
listener_id=listener_id, filename=pem_filename))
return exc.check_exception(r, (404,))
def plug_network(self, amp, port):
r = self.post(amp, 'plug/network',
json=port)
return exc.check_exception(r)
def plug_vip(self, amp, vip, net_info):
r = self.post(amp,
'plug/vip/{vip}'.format(vip=vip),
json=net_info)
return exc.check_exception(r)
def upload_vrrp_config(self, amp, config):
r = self.put(amp, 'vrrp/upload', data=config)
return exc.check_exception(r)
def _vrrp_action(self, action, amp):
r = self.put(amp, 'vrrp/{action}'.format(action=action))
return exc.check_exception(r)
def get_interface(self, amp, ip_addr, timeout_dict=None):
r = self.get(amp, 'interface/{ip_addr}'.format(ip_addr=ip_addr),
timeout_dict=timeout_dict)
if exc.check_exception(r):
return r.json()
return None
def upload_udp_config(self, amp, listener_id, config, timeout_dict=None):
r = self.put(
amp,
'listeners/{amphora_id}/{listener_id}/udp_listener'.format(
amphora_id=amp.id, listener_id=listener_id), timeout_dict,
data=config)
return exc.check_exception(r)
| 42.968 | 80 | 0.577686 |
4a1d2cee5713369fd75c04e1ddc60f8fc3c9bfe1
| 8,047 |
py
|
Python
|
tests/transitfeed/testfrequency.py
|
Bertware/transitfeed
|
699986d54744d85613c7b1ec6c08e0c333a50dbd
|
[
"Apache-2.0"
] | 647 |
2015-01-03T05:35:57.000Z
|
2022-03-28T03:45:21.000Z
|
tests/transitfeed/testfrequency.py
|
Bertware/transitfeed
|
699986d54744d85613c7b1ec6c08e0c333a50dbd
|
[
"Apache-2.0"
] | 111 |
2015-01-20T21:03:14.000Z
|
2022-01-21T17:30:35.000Z
|
tests/transitfeed/testfrequency.py
|
Bertware/transitfeed
|
699986d54744d85613c7b1ec6c08e0c333a50dbd
|
[
"Apache-2.0"
] | 286 |
2015-01-07T05:25:17.000Z
|
2022-03-24T15:21:14.000Z
|
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Unit tests for the frequency module.
from __future__ import absolute_import
from tests import util
import transitfeed
class FrequencyValidationTestCase(util.ValidationTestCase):
def setUp(self):
util.ValidationTestCase.setUp(self)
self.schedule = self.SimpleSchedule()
trip = transitfeed.Trip()
trip.route_id = '054C'
trip.service_id = 'WEEK'
trip.trip_id = '054C-00'
trip.trip_headsign = 'via Polish Hill'
trip.direction_id = '0'
trip.block_id = None
trip.shape_id = None
self.schedule.AddTripObject(trip, self.problems, True)
self.trip = trip
def testNonOverlappingPeriods(self):
headway_period1 = transitfeed.Frequency({'trip_id': '054C-00',
'start_time': '06:00:00',
'end_time': '12:00:00',
'headway_secs': 600,
})
headway_period2 = transitfeed.Frequency({'trip_id': '054C-00',
'start_time': '01:00:00',
'end_time': '02:00:00',
'headway_secs': 1200,
})
headway_period3 = transitfeed.Frequency({'trip_id': '054C-00',
'start_time': '04:00:00',
'end_time': '05:00:00',
'headway_secs': 1000,
})
headway_period4 = transitfeed.Frequency({'trip_id': '054C-00',
'start_time': '12:00:00',
'end_time': '19:00:00',
'headway_secs': 700,
})
# expect no problems for non-overlapping periods
headway_period1.AddToSchedule(self.schedule, self.problems)
headway_period2.AddToSchedule(self.schedule, self.problems)
headway_period3.AddToSchedule(self.schedule, self.problems)
headway_period4.AddToSchedule(self.schedule, self.problems)
self.trip.Validate(self.problems)
self.accumulator.AssertNoMoreExceptions()
self.trip.ClearFrequencies()
def testOverlappingPeriods(self):
# overlapping headway periods
headway_period1 = transitfeed.Frequency({'trip_id': '054C-00',
'start_time': '00:00:00',
'end_time': '12:00:00',
'headway_secs': 600,
})
headway_period2 = transitfeed.Frequency({'trip_id': '054C-00',
'start_time': '06:00:00',
'end_time': '18:00:00',
'headway_secs': 1200,
})
headway_period1.AddToSchedule(self.schedule, self.problems)
headway_period2.AddToSchedule(self.schedule, self.problems)
self.ValidateAndExpectOtherProblem(self.trip)
self.trip.ClearFrequencies()
self.accumulator.AssertNoMoreExceptions()
def testPeriodWithInvalidTripId(self):
headway_period1 = transitfeed.Frequency({'trip_id': 'foo',
'start_time': '00:00:00',
'end_time': '12:00:00',
'headway_secs': 600,
})
headway_period1.AddToSchedule(self.schedule, self.problems)
e = self.accumulator.PopException('InvalidValue')
self.assertEqual('trip_id', e.column_name)
self.trip.ClearFrequencies()
def testExactTimesStringValueConversion(self):
# Test that no exact_times converts to 0
frequency = transitfeed.Frequency(
field_dict={"trip_id": "AB1,10", "start_time": "10:00:00",
"end_time": "23:01:00", "headway_secs": "1800"})
frequency.ValidateBeforeAdd(self.problems)
self.assertEquals(frequency.ExactTimes(), 0)
# Test that empty exact_times converts to 0
frequency = transitfeed.Frequency(
field_dict={"trip_id": "AB1,10", "start_time": "10:00:00",
"end_time": "23:01:00", "headway_secs": "1800",
"exact_times": ""})
frequency.ValidateBeforeAdd(self.problems)
self.assertEquals(frequency.ExactTimes(), 0)
# Test that exact_times "0" converts to 0
frequency = transitfeed.Frequency(
field_dict={"trip_id": "AB1,10", "start_time": "10:00:00",
"end_time": "23:01:00", "headway_secs": "1800",
"exact_times": "0"})
frequency.ValidateBeforeAdd(self.problems)
self.assertEquals(frequency.ExactTimes(), 0)
# Test that exact_times "1" converts to 1
frequency = transitfeed.Frequency(
field_dict={"trip_id": "AB1,10", "start_time": "10:00:00",
"end_time": "23:01:00", "headway_secs": "1800",
"exact_times": "1"})
frequency.ValidateBeforeAdd(self.problems)
self.assertEquals(frequency.ExactTimes(), 1)
self.accumulator.AssertNoMoreExceptions()
def testExactTimesAsIntValue(self):
# Test that exact_times None converts to 0
frequency = transitfeed.Frequency(
field_dict={"trip_id": "AB1,10", "start_time": "10:00:00",
"end_time": "23:01:00", "headway_secs": "1800",
"exact_times": None})
frequency.ValidateBeforeAdd(self.problems)
self.assertEquals(frequency.ExactTimes(), 0)
# Test that exact_times 0 remains 0
frequency = transitfeed.Frequency(
field_dict={"trip_id": "AB1,10", "start_time": "10:00:00",
"end_time": "23:01:00", "headway_secs": "1800",
"exact_times": 0})
frequency.ValidateBeforeAdd(self.problems)
self.assertEquals(frequency.ExactTimes(), 0)
# Test that exact_times 1 remains 1
frequency = transitfeed.Frequency(
field_dict={"trip_id": "AB1,10", "start_time": "10:00:00",
"end_time": "23:01:00", "headway_secs": "1800",
"exact_times": 1})
frequency.ValidateBeforeAdd(self.problems)
self.assertEquals(frequency.ExactTimes(), 1)
self.accumulator.AssertNoMoreExceptions()
def testExactTimesInvalidValues(self):
# Test that exact_times 15 raises error
frequency = transitfeed.Frequency(
field_dict={"trip_id": "AB1,10", "start_time": "10:00:00",
"end_time": "23:01:00", "headway_secs": "1800",
"exact_times": 15})
frequency.ValidateBeforeAdd(self.problems)
self.accumulator.PopInvalidValue("exact_times")
self.accumulator.AssertNoMoreExceptions()
# Test that exact_times "yes" raises error
frequency = transitfeed.Frequency(
field_dict={"trip_id": "AB1,10", "start_time": "10:00:00",
"end_time": "23:01:00", "headway_secs": "1800",
"exact_times": "yes"})
frequency.ValidateBeforeAdd(self.problems)
self.accumulator.PopInvalidValue("exact_times")
self.accumulator.AssertNoMoreExceptions()
| 47.89881 | 74 | 0.562446 |
4a1d2da58f693ab49d5cd09a5fa055cb47d8fe92
| 758 |
py
|
Python
|
backend/backend/urls.py
|
Alex-code-01/gap-service
|
b63f9cb1ad6d1fbfdf09f75fc8c51ca8ed952592
|
[
"MIT"
] | null | null | null |
backend/backend/urls.py
|
Alex-code-01/gap-service
|
b63f9cb1ad6d1fbfdf09f75fc8c51ca8ed952592
|
[
"MIT"
] | 8 |
2021-03-27T18:30:40.000Z
|
2021-04-23T17:34:58.000Z
|
backend/backend/urls.py
|
Alex-code-01/gap-service
|
b63f9cb1ad6d1fbfdf09f75fc8c51ca8ed952592
|
[
"MIT"
] | 4 |
2021-05-08T08:57:34.000Z
|
2021-05-21T19:05:21.000Z
|
"""backend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
]
| 34.454545 | 77 | 0.709763 |
4a1d2ee44806b242fb175561d905d1bd6bed521f
| 2,701 |
py
|
Python
|
metalibm_core/utility/arg_utils.py
|
metalibm/metalibm-clone
|
d04839e58950a156b79b763b9f45cb874e21ebfe
|
[
"MIT"
] | 27 |
2018-03-12T16:49:36.000Z
|
2021-12-15T06:53:55.000Z
|
metalibm_core/utility/arg_utils.py
|
nibrunie/metalibm
|
776b044f5f323ef907a8724d9ce9a27a482f6cc5
|
[
"MIT"
] | 57 |
2018-03-12T16:49:56.000Z
|
2021-03-04T15:25:39.000Z
|
metalibm_core/utility/arg_utils.py
|
nibrunie/metalibm
|
776b044f5f323ef907a8724d9ce9a27a482f6cc5
|
[
"MIT"
] | 4 |
2018-03-12T15:40:22.000Z
|
2018-11-28T14:34:54.000Z
|
# -*- coding: utf-8 -*-
###############################################################################
# This file is part of metalibm (https://github.com/kalray/metalibm)
###############################################################################
# MIT License
#
# Copyright (c) 2018 Kalray
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
import sys
from .log_report import Log
def test_flag_option(flag_name, flag_value, default_value, parse_arg = None, help_map = None, help_str = ""):
if help_map != None:
help_map[flag_name] = "[yes=%s|no=%s] %s" % (flag_value, default_value, help_str)
if flag_name in sys.argv and parse_arg:
parse_arg.append(sys.argv.index(flag_name))
return flag_value if flag_name in sys.argv else default_value
def extract_option_value(option_name, default_value, help_map = None, help_str = "", processing = lambda x: x, parse_arg = None):
if help_map != None:
help_map[option_name] = "[%s] %s" % (default_value, help_str)
if option_name in sys.argv:
option_index = sys.argv.index(option_name)
if option_index + 1 >= len(sys.argv):
Log.report(Log.Error, "missing value for option argument: %s" % option_name)
elif parse_arg:
parse_arg.append(option_index)
parse_arg.append(option_index+1)
return processing(sys.argv[sys.argv.index(option_name)+1] if option_name in sys.argv else default_value)
def extract_option_list_value(option_name, default_value):
return sys.argv[sys.arg.index(option_name+1)].split(",") if option_name in sys.argv else default_value
| 50.018519 | 129 | 0.668271 |
4a1d2f0b3b8e0242b9c8dcef4123452d18d05dad
| 1,289 |
py
|
Python
|
django-classic-leads/setup.py
|
kailashchandra92/django-classic-user-account
|
f3a4e5c0b424d8ed63d4f393bfa141a2de526c9a
|
[
"BSD-2-Clause"
] | null | null | null |
django-classic-leads/setup.py
|
kailashchandra92/django-classic-user-account
|
f3a4e5c0b424d8ed63d4f393bfa141a2de526c9a
|
[
"BSD-2-Clause"
] | null | null | null |
django-classic-leads/setup.py
|
kailashchandra92/django-classic-user-account
|
f3a4e5c0b424d8ed63d4f393bfa141a2de526c9a
|
[
"BSD-2-Clause"
] | null | null | null |
import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-classic-user-accounts',
version='1.0.20',
packages=find_packages(),
include_package_data=True,
license='BSD License', # example license
description='A extended user profile for Django.',
long_description=README,
url='https://www.example.com/',
author='Suman Kumar',
author_email='sumankumar72@gmail.com',
install_requires=[
'Pillow',
'sorl-thumbnail',
],
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 2.1', # replace "X.Y" as appropriate
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| 33.051282 | 78 | 0.631497 |
4a1d2fc6eda877e92101d99b44846437ac6790fd
| 785 |
py
|
Python
|
examples/datetimecol.py
|
nullptrT/flask_table
|
d4577307bf3b790fb1d91238019577beb477ee4a
|
[
"BSD-3-Clause"
] | 215 |
2015-01-09T12:18:19.000Z
|
2022-01-31T00:18:29.000Z
|
examples/datetimecol.py
|
nullptrT/flask_table
|
d4577307bf3b790fb1d91238019577beb477ee4a
|
[
"BSD-3-Clause"
] | 93 |
2015-02-03T22:39:02.000Z
|
2022-01-26T04:12:16.000Z
|
examples/datetimecol.py
|
nullptrT/flask_table
|
d4577307bf3b790fb1d91238019577beb477ee4a
|
[
"BSD-3-Clause"
] | 48 |
2015-04-29T09:23:34.000Z
|
2022-01-21T13:50:39.000Z
|
import os
from datetime import datetime
# Run this example with LC_TIME=[other locale] to use a different
# locale's datetime formatting, eg:
#
# LC_TIME=en_US python examples/datetimecol.py
# or
# LC_TIME=en_GB python examples/datetimecol.py
os.environ.setdefault('LC_TIME', 'en_GB') # noqa
from flask_table import Table, Col, DatetimeCol
class Item(object):
def __init__(self, name, dt):
self.name = name
self.dt = dt
class ItemTable(Table):
name = Col('Name')
dt = DatetimeCol('Datetime')
def main():
items = [
Item('Name1', datetime.now()),
Item('Name2', datetime(2018, 1, 1, 12, 34, 56)),
]
table = ItemTable(items)
# or {{ table }} in jinja
print(table.__html__())
if __name__ == '__main__':
main()
| 20.128205 | 65 | 0.647134 |
4a1d2ff5691231775cc7de87fcf96f892391ea28
| 12,262 |
py
|
Python
|
venv/lib/python3.6/site-packages/thinc/neural/_classes/model.py
|
lumierra/project-flask
|
6e27148299a283c92f5d758d269f3b5fc6e2163e
|
[
"MIT"
] | 1 |
2018-10-30T07:19:27.000Z
|
2018-10-30T07:19:27.000Z
|
venv/lib/python3.6/site-packages/thinc/neural/_classes/model.py
|
lumierra/project-flask
|
6e27148299a283c92f5d758d269f3b5fc6e2163e
|
[
"MIT"
] | 4 |
2020-07-26T02:10:42.000Z
|
2021-03-31T18:48:58.000Z
|
venv/lib/python3.6/site-packages/thinc/neural/_classes/model.py
|
lumierra/project-flask
|
6e27148299a283c92f5d758d269f3b5fc6e2163e
|
[
"MIT"
] | 1 |
2020-11-18T06:18:15.000Z
|
2020-11-18T06:18:15.000Z
|
from __future__ import division, unicode_literals
from numpy import prod
import numpy
import contextlib
import msgpack
import msgpack_numpy
from collections import OrderedDict
msgpack_numpy.patch()
from .. import util
from ..train import Trainer
from ..ops import NumpyOps, CupyOps
from ..mem import Memory
from ..util import get_ops, copy_array, ensure_path
from ... import check
from ... import describe
from ...check import equal_length, has_shape, is_sequence, is_float, is_array
class Model(object):
'''Model base class.'''
name = 'model'
id = 0
lsuv = False
ops = NumpyOps()
Ops = NumpyOps
Trainer = Trainer
drop_factor = 1.0
descriptions = []
on_data_hooks = []
on_init_hooks = [] # Use this to add layers
_operators = {}
@classmethod
@contextlib.contextmanager
def define_operators(cls, operators):
'''Bind operators to specified functions for the scope of the context:
Example
-------
model = Model()
other = Model()
with Model.define_operators({"+": lambda self, other: "plus"}):
print(model + other)
# "plus"
print(model + other)
# Raises TypeError --- binding limited to scope of with block.
'''
old_ops = dict(cls._operators)
for op, func in operators.items():
cls._operators[op] = func
yield
cls._operators = old_ops
@classmethod
@contextlib.contextmanager
def use_device(cls, device):
'''Change the device to execute on for the scope of the block.'''
if device == cls.ops.device:
yield
else:
curr_Ops, curr_ops = (cls.Ops, cls.ops)
cls.Ops = get_ops(device)
cls.ops = cls.Ops()
yield
cls.Ops = curr_Ops
cls.ops = curr_ops
@property
def input_shape(self):
raise NotImplementedError
@property
def output_shape(self):
raise NotImplementedError
def __init__(self, *args, **kwargs):
self.name = self.__class__.name
self.ops = self.Ops()
kwargs = self._update_defaults(args, kwargs)
self._mem = Memory(self.ops)
self._dims = {}
if not hasattr(self, '_layers'):
self._layers = []
self.descriptions = dict(self.descriptions)
self.on_init_hooks = list(self.on_init_hooks)
self.on_data_hooks = list(self.on_data_hooks)
for attr, install in self.descriptions.items():
install(attr, self)
for hook in self.on_init_hooks:
hook(self, *args, **kwargs)
self.set_id()
def _update_defaults(self, args, kwargs):
new_kwargs = {}
for key, value in kwargs.items():
if hasattr(self, key):
setattr(self, key, value)
else:
new_kwargs[key] = value
return new_kwargs
def set_id(self):
Model.id += 1
self.id = Model.id
for layer in self._layers:
layer.set_id()
#@check.args(equal_length)
@check.arg(1, is_sequence)
def begin_training(self, train_X, train_y=None, **trainer_cfg):
for hook in self.on_data_hooks:
hook(self, train_X, train_y)
return self.Trainer(self, **trainer_cfg)
@check.arg(2, is_float)
@check.arg(1, has_shape(('nB', 'nI')))
def begin_update(self, X, drop=0.0):
raise NotImplementedError
def predict(self, X):
y, _ = self.begin_update(X)
return y
def predict_one(self, x):
X = self.ops.expand_dims(x, axis=0)
return self.predict(X)[0]
@contextlib.contextmanager
def use_params(self, params): # pragma: no cover
backup = None
weights = self._mem.weights
if self.id in params:
param = params[self.id]
backup = weights.copy()
copy_array(weights, param)
if hasattr(self, '_layers'):
contexts = [layer.use_params(params) for layer in self._layers]
for context in contexts:
next(context.gen)
yield
if backup is not None:
copy_array(self._mem.weights, backup)
for i, context in enumerate(contexts):
# This is ridiculous, but apparently it's what you
# have to do to make this work across Python 2/3?
try:
next(context.gen)
except StopIteration:
pass
def __call__(self, x):
'''
x
Must match expected type
Must match expected shape
'''
return self.predict(x)
def pipe(self, stream, batch_size=128):
for batch in util.minibatch(stream, batch_size):
ys = self.predict(batch)
for y in ys:
yield y
def update(self, stream, batch_size=1000):
for X, y in util.minibatch(stream, batch_size=batch_size):
output, finish_update = self.begin_update(X)
gradient = finish_update(y)
yield gradient
def to_gpu(self, device_num):
import cupy.cuda.device
device = cupy.cuda.device.Device(device_num)
device.use()
queue = [self]
for layer in queue:
layer.ops = CupyOps()
layer.Ops = CupyOps
if hasattr(layer, u'_mem'):
layer._mem._mem = self.ops.xp.asarray(layer._mem._mem)
layer._mem.ops = layer.ops
if hasattr(layer, u'_layers'):
queue.extend(layer._layers)
return device
def to_cpu(self):
queue = [self]
for layer in queue:
layer.ops = NumpyOps()
layer.Ops = NumpyOps
if hasattr(layer, u'_mem'):
if hasattr(layer._mem._mem, 'get'):
layer._mem._mem = layer._mem._mem.get()
layer._mem.ops = layer.ops
if hasattr(layer, u'_layers'):
queue.extend(layer._layers)
def evaluate(self, X, y):
'''
x
Must match expected type
Must match expected shape
y
Must match expected type
'''
scores = self.ops.flatten(list(self.pipe(X)))
if not hasattr(y, 'shape'):
y = self.ops.flatten(y)
scores = scores.reshape(y.shape)
if len(scores.shape) == 1:
correct = ((scores >= 0.5) == (y >= 0.5)).sum()
else:
correct = (scores.argmax(axis=1) == y.argmax(axis=1)).sum()
return correct / y.shape[0]
def evaluate_logloss(self, X, y, minimum=None, maximum=None):
yh = self.ops.xp.vstack(self.pipe(X))
yh = yh.reshape(y.shape)
if minimum is not None:
yh = self.ops.xp.maximum(yh, minimum)
if maximum is not None:
yh = self.ops.xp.minimum(yh, maximum)
assert len(yh.shape) == 1
losses = -y * self.ops.xp.log(yh + 1e-8) - (1-y) * self.ops.xp.log((1-yh)+1e-8)
return losses.mean()
@check.operator_is_defined('+')
def __add__(self, other):
'''Apply the function bound to the '+' operator.'''
return self._operators['+'](self, other)
@check.operator_is_defined('-')
def __sub__(self, other):
'''Apply the function bound to the '-' operator.'''
return self._operators['-'](self, other)
@check.operator_is_defined('*')
def __mul__(self, other):
'''Apply the function bound to the '*' operator.'''
return self._operators['*'](self, other)
@check.operator_is_defined('@')
def __matmul__(self, other):
'''Apply the function bound to the '@' operator.'''
return self._operators['@'](self, other)
@check.operator_is_defined('/')
def __div__(self, other):
'''Apply the function bound to the '/' operator.'''
return self._operators['/'](self, other)
@check.operator_is_defined('/')
def __truediv__(self, other): # pragma: no cover
'''Apply the function bound to the '/' operator.'''
return self._operators['/'](self, other)
@check.operator_is_defined('//')
def __floordiv__(self, other):
'''Apply the function bound to the '//' operator.'''
return self._operators['//'](self, other)
@check.operator_is_defined('%')
def __mod__(self, other):
'''Apply the function bound to the '%' operator.'''
return self._operators['%'](self, other)
@check.operator_is_defined('**')
def __pow__(self, other, modulo=None):
'''Apply the function bound to the '**' operator.'''
return self._operators['**'](self, other)
@check.operator_is_defined('<<')
def __lshift__(self, other):
'''Apply the function bound to the '<<' operator.'''
return self._operators['<<'](self, other)
@check.operator_is_defined('>>')
def __rshift__(self, other):
'''Apply the function bound to the '>>' operator.'''
return self._operators['>>'](self, other)
@check.operator_is_defined('&')
def __and__(self, other):
'''Apply the function bound to the '&' operator.'''
return self._operators['&'](self, other)
@check.operator_is_defined('^')
def __xor__(self, other):
'''Apply the function bound to the '^' operator.'''
return self._operators['^'](self, other)
@check.operator_is_defined('|')
def __or__(self, other):
'''Apply the function bound to the '|' operator.'''
return self._operators['|'](self, other)
def to_bytes(self):
weights = []
queue = [self]
i = 0
for layer in queue:
if hasattr(layer, u'_mem'):
weights.append(OrderedDict((
(b'dims', OrderedDict(sorted(layer._dims.items()))),
(b'params', []))))
if hasattr(layer, u'seed'):
weights[-1][b'seed'] = layer.seed
offsets = sorted(layer._mem._offsets.items())
for (id_, name), (start, row, shape) in offsets:
if row == 1:
continue
param = layer._mem.get((id_, name))
if not isinstance(layer._mem.weights, numpy.ndarray):
param = param.get()
weights[-1][b'params'].append(
OrderedDict((
(b'name', name),
(b'offset', start),
(b'shape', shape),
(b'value', param),
))
)
i += 1
if hasattr(layer, u'_layers'):
queue.extend(layer._layers)
return msgpack.dumps({b'weights': weights}, use_bin_type=True,
encoding='utf8')
def from_bytes(self, bytes_data):
data = msgpack.loads(bytes_data, encoding='utf8')
weights = data[b'weights']
queue = [self]
i = 0
for layer in queue:
if hasattr(layer, '_mem'):
if b'seed' in weights[i]:
layer.seed = weights[i][b'seed']
for dim, value in weights[i][b'dims'].items():
if isinstance(dim, bytes):
dim = dim.decode('utf8')
setattr(layer, dim, value)
for param in weights[i][b'params']:
name = param[b'name']
if isinstance(name, bytes):
name = name.decode('utf8')
dest = getattr(layer, name)
copy_array(dest, param[b'value'])
i += 1
if hasattr(layer, '_layers'):
queue.extend(layer._layers)
return self
def to_disk(self, path):
path = util.ensure_path(path)
with path.open('wb') as file_:
file_.write(self.to_bytes())
def from_disk(self, path):
path = util.ensure_path(path)
with path.open('rb') as file_:
bytes_data = file_.read()
return self.from_bytes(bytes_data)
| 33.320652 | 87 | 0.545833 |
4a1d30ef98ff9d39fdf6083a1ef1274006c1c684
| 32,950 |
py
|
Python
|
rlssm/fits_race.py
|
birgovanandrei/rlssm
|
32f3deaa2b69b6cf974a4334ab39b0d3822cf29f
|
[
"MIT"
] | 11 |
2021-02-25T11:02:13.000Z
|
2021-11-09T00:49:05.000Z
|
rlssm/fits_race.py
|
birgovanandrei/rlssm
|
32f3deaa2b69b6cf974a4334ab39b0d3822cf29f
|
[
"MIT"
] | null | null | null |
rlssm/fits_race.py
|
birgovanandrei/rlssm
|
32f3deaa2b69b6cf974a4334ab39b0d3822cf29f
|
[
"MIT"
] | 10 |
2021-03-27T13:14:45.000Z
|
2022-03-19T15:58:23.000Z
|
from __future__ import absolute_import, division, print_function
import os
import re
import pickle
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from rlssm import plotting
from .utils import list_individual_variables
from .stan_utility import check_all_diagnostics
from .random import random_rdm_2A, random_rdm_nA, random_lba_2A
from .fits import FittedModel, ModelResults
class raceFittedModel_2A(FittedModel):
def __init__(self,
stan_model,
data,
hierarchical_levels,
model_label,
family,
n_parameters_individual,
n_parameters_trial,
print_diagnostics,
priors):
self.family = family
super().__init__(stan_model,
data,
hierarchical_levels,
model_label,
family,
n_parameters_individual,
n_parameters_trial,
print_diagnostics,
priors)
def extract_results(self, include_rhat, include_waic, pointwise_waic, include_last_values):
if include_rhat:
rhat = self.get_rhat()
else:
rhat = None
if include_waic:
waic = self.calculate_waic(pointwise_waic)
else:
waic = None
if include_last_values:
last_values = self.get_last_values()
else:
last_values = None
# main parameters
if self.parameters_info['hierarchical_levels'] == 2:
main_parameters = self.parameters_info['group_parameters_names_transf']
for p in self.parameters_info['individual_parameters_names']:
main_parameters = np.append(main_parameters, list_individual_variables(p, self.data_info['L']))
else:
main_parameters = self.parameters_info['parameters_names_transf']
par_to_display = list(np.append(['chain', 'draw'], main_parameters))
samples = self.stan_model.to_dataframe(pars=list(main_parameters),
permuted=True,
diagnostics=False,
inc_warmup=False)[par_to_display].reset_index(drop=True)
# trial parameters
f_label = self.family.split('_')[0]
if f_label == 'LBA' or f_label == 'ALBA' or f_label == 'RLLBA' or f_label == 'RLALBA':
trial_samples = self.stan_model.extract(['k_t',
'A_t',
'tau_t',
'drift_cor_t',
'drift_inc_t'])
else:
trial_samples = self.stan_model.extract(['drift_cor_t',
'drift_inc_t',
'threshold_t',
'ndt_t'])
res = raceModelResults_2A(self.model_label,
self.data_info,
self.parameters_info,
self.priors,
rhat,
waic,
last_values,
samples,
trial_samples,
self.family)
return res
class raceModelResults_2A(ModelResults):
def __init__(self,
model_label,
data_info,
parameters_info,
priors,
rhat,
waic,
last_values,
samples,
trial_samples,
family):
self.family = family
super().__init__(model_label,
data_info,
parameters_info,
priors,
rhat,
waic,
last_values,
samples,
trial_samples)
def get_posterior_predictives(self, n_posterior_predictives=500, **kwargs):
if n_posterior_predictives > self.parameters_info['n_posterior_samples']:
warnings.warn("Cannot have more posterior predictive samples than posterior samples. " \
"Will continue with n_posterior_predictives=%s" % self.parameters_info['n_posterior_samples'],
UserWarning,
stacklevel=2)
n_posterior_predictives = self.parameters_info['n_posterior_samples']
f_label = self.family.split('_')[0]
if f_label == 'LBA' or f_label == 'ALBA' or f_label == 'RLLBA' or f_label == 'RLALBA':
k_t = self.trial_samples['k_t'][:n_posterior_predictives, :]
A_t = self.trial_samples['A_t'][:n_posterior_predictives, :]
tau_t = self.trial_samples['tau_t'][:n_posterior_predictives, :]
drift_cor_t = self.trial_samples['drift_cor_t'][:n_posterior_predictives, :]
drift_inc_t = self.trial_samples['drift_inc_t'][:n_posterior_predictives, :]
pp_rt, pp_acc = random_lba_2A(k_t, A_t, tau_t, drift_cor_t, drift_inc_t, **kwargs)
else:
drift_cor_t = self.trial_samples['drift_cor_t'][:n_posterior_predictives, :]
drift_inc_t = self.trial_samples['drift_inc_t'][:n_posterior_predictives, :]
threshold_t = self.trial_samples['threshold_t'][:n_posterior_predictives, :]
ndt_t = self.trial_samples['ndt_t'][:n_posterior_predictives, :]
pp_rt, pp_acc = random_rdm_2A(drift_cor_t, drift_inc_t, threshold_t, ndt_t, **kwargs)
return pp_rt, pp_acc
def get_posterior_predictives_df(self, n_posterior_predictives=500, **kwargs):
"""Calculates posterior predictives of choices and response times.
Parameters
----------
n_posterior_predictives : int
Number of posterior samples to use for posterior predictives calculation.
If n_posterior_predictives is bigger than the posterior samples,
then calculation will continue with the total number of posterior samples.
Other Parameters
----------------
noise_constant : float
Scaling factor of the diffusion decision model.
If changed, drift and threshold would be scaled accordingly.
Not to be changed in most applications.
rt_max : float
Controls the maximum rts that can be predicted.
Making this higher might make the function a bit slower.
dt : float
Controls the time resolution of the diffusion decision model. Default is 1 msec.
Lower values of dt make the function more precise but much slower.
Returns
-------
out : DataFrame
Data frame of shape (n_samples, n_trials*2).
Response times and accuracy are provided as hierarchical column indeces.
"""
pp_rt, pp_acc = self.get_posterior_predictives(n_posterior_predictives, **kwargs)
tmp1 = pd.DataFrame(pp_rt,
index=pd.Index(np.arange(1, len(pp_rt)+1), name='sample'),
columns=pd.MultiIndex.from_product((['rt'],
np.arange(pp_rt.shape[1])+1),
names=['variable', 'trial']))
tmp2 = pd.DataFrame(pp_acc,
index=pd.Index(np.arange(1, len(pp_acc)+1), name='sample'),
columns=pd.MultiIndex.from_product((['accuracy'],
np.arange(pp_acc.shape[1])+1),
names=['variable', 'trial']))
out = pd.concat((tmp1, tmp2), axis=1)
return out
def get_posterior_predictives_summary(self,
n_posterior_predictives=500,
quantiles=None,
**kwargs):
"""Calculates summary of posterior predictives of choices and response times.
The mean proportion of choices (in this case coded as accuracy) is calculated
for each posterior sample across all trials.
Response times are summarized using mean, skewness, and quantiles.
Parameters
----------
n_posterior_predictives : int
Number of posterior samples to use for posterior predictives calculation.
If n_posterior_predictives is bigger than the posterior samples,
then calculation will continue with the total number of posterior samples.
quantiles : list of floats
Quantiles to summarize response times distributions
(separately for correct/incorrect) with.
Default to [.1, .3, .5, .7, .9].
Other Parameters
----------------
noise_constant : float
Scaling factor of the diffusion decision model.
If changed, drift and threshold would be scaled accordingly.
Not to be changed in most applications.
rt_max : float
Controls the maximum rts that can be predicted.
Making this higher might make the function a bit slower.
dt : float
Controls the time resolution of the diffusion decision model. Default is 1 msec.
Lower values of dt make the function more precise but much slower.
Returns
-------
out : DataFrame
Pandas DataFrame, where every row corresponds to a posterior sample.
The columns contains the mean accuracy for each posterior sample,
as well as mean response times, response times skewness and response times quantiles.
"""
if quantiles is None:
quantiles = [.1, .3, .5, .7, .9]
pp = self.get_posterior_predictives_df(
n_posterior_predictives=n_posterior_predictives,
**kwargs)
tmp = pd.DataFrame({'mean_accuracy': pp['accuracy'].mean(axis=1),
'mean_rt': pp['rt'].mean(axis=1),
'skewness': pp['rt'].skew(axis=1, skipna=True)})
pp_rt_inc = pp['rt'][pp['accuracy'] == 0]
pp_rt_cor = pp['rt'][pp['accuracy'] == 1]
q_inc = pp_rt_inc.quantile(q=quantiles, axis=1).T
q_cor = pp_rt_cor.quantile(q=quantiles, axis=1).T
q_inc.columns = ['quant_{}_rt_incorrect'.format(int(c*100)) for c in q_inc.columns]
q_cor.columns = ['quant_{}_rt_correct'.format(int(c*100)) for c in q_cor.columns]
out = pd.concat([tmp, q_inc, q_cor], axis=1)
return out
def plot_mean_posterior_predictives(self,
n_posterior_predictives,
figsize=(20, 8),
post_pred_kws=None,
**kwargs):
"""Plots the mean posterior predictives of choices and response times.
The mean proportion of choices (in this case coded as accuracy) is calculated
for each posterior sample across all trials,
and then it's plotted as a distribution.
The mean accuracy in the data is plotted as vertical line.
This allows to compare the real mean with the BCI or HDI of the predictions.
The same is done for response times, and are plotted one next to each other.
Parameters
----------
n_posterior_predictives : int
Number of posterior samples to use for posterior predictives calculation.
If n_posterior_predictives is bigger than the posterior samples,
then calculation will continue with the total number of posterior samples.
figsize : tuple
figure size of the matplotlib figure
Other Parameters
----------------
show_data : bool
Whether to show a vertical line for the mean data. Set to False to not show it.
color : matplotlib color
Color for both the mean data and intervals.
ax : matplotlib axis, optional
If provided, plot on this axis.
Default is set to current Axes.
gridsize : int
Resolution of the kernel density estimation function, default to 100.
clip : tuple
Range for the kernel density estimation function.
Default is min and max values of the distribution.
show_intervals : either "HDI", "BCI", or None
HDI is better when the distribution is not simmetrical.
If None, then no intervals are shown.
alpha_intervals : float
Alpha level for the intervals.
Default is 5 percent which gives 95 percent BCIs and HDIs.
intervals_kws : dictionary
Additional arguments for the matplotlib fill_between function
that shows shaded intervals.
By default, they are 50 percent transparent.
post_pred_kws : dictionary
Additional parameters to get_posterior_predictives_summary.
Returns
-------
fig : matplotlib.figure.Figure
"""
if post_pred_kws is None:
post_pred_kws = {}
pp_df = self.get_posterior_predictives_summary(n_posterior_predictives, **post_pred_kws)
fig, axes = plt.subplots(1, 2, figsize=figsize)
plotting.plot_mean_prediction(pp_df,
self.data_info['data'],
y_data='accuracy',
y_predictions='mean_accuracy',
ax=axes[0],
**kwargs)
plotting.plot_mean_prediction(pp_df,
self.data_info['data'],
y_data='rt',
y_predictions='mean_rt',
ax=axes[1],
**kwargs)
axes[0].set_xlabel('Mean accuracy')
axes[1].set_xlabel('Mean RTs')
axes[0].set_ylabel('Density')
sns.despine()
return fig
def plot_quantiles_posterior_predictives(self,
n_posterior_predictives,
quantiles=None,
figsize=(20, 8),
post_pred_kws=None,
**kwargs):
"""Plots the quantiles of the posterior predictives of response times,
separately for correct/incorrect responses.
Parameters
----------
n_posterior_predictives : int
Number of posterior samples to use for posterior predictives calculation.
If n_posterior_predictives is bigger than the posterior samples,
then calculation will continue with the total number of posterior samples.
quantiles : list of floats
Quantiles to summarize response times distributions
(separately for correct/incorrect) with.
figsize : tuple
figure size of the matplotlib figure
Other Parameters
----------------
show_data : bool
Whether to show the quantiles of the data. Set to False to not show it.
show_intervals : either "HDI", "BCI", or None
HDI is better when the distribution is not simmetrical.
If None, then no intervals are shown.
alpha_intervals : float
Alpha level for the intervals.
Default is 5 percent which gives 95 percent BCIs and HDIs.
kind : either 'lines' or 'shades'
Two different styles to plot quantile distributions.
color : matplotlib color
Color for both the data and intervals.
scatter_kws : dictionary
Additional plotting parameters to change how the data points are shown.
intervals_kws : dictionary
Additional plotting parameters to change how the quantile distributions are shown.
post_pred_kws : dictionary
Additional parameters to get_posterior_predictives_summary.
Returns
-------
fig : matplotlib.figure.Figure
"""
if post_pred_kws is None:
post_pred_kws = {}
pp_summary = self.get_posterior_predictives_summary(
n_posterior_predictives=n_posterior_predictives,
quantiles=quantiles,
**post_pred_kws)
fig = plotting.plot_quantiles_prediction(pp_summary,
self.data_info['data'],
'rdm',
quantiles=quantiles,
figsize=figsize,
**kwargs)
return fig
def get_grouped_posterior_predictives_summary(self,
grouping_vars,
n_posterior_predictives=500,
quantiles=None,
**kwargs):
"""Calculates summary of posterior predictives of choices and response times,
separately for a list of grouping variables.
The mean proportion of choices (in this case coded as accuracy) is calculated
for each posterior sample across all trials
in all conditions combination.
Response times are summarized using mean, skewness, and quantiles.
For example, if grouping_vars=['reward', 'difficulty'],
posterior predictives will be collapsed
for all combinations of levels of the reward and difficulty variables.
Parameters
----------
grouping_vars : list of strings
They should be existing grouping variables in the data.
n_posterior_predictives : int
Number of posterior samples to use for posterior predictives calculation.
If n_posterior_predictives is bigger than the posterior samples,
then calculation will continue with the total number of posterior samples.
quantiles : list of floats
Quantiles to summarize response times distributions
(separately for correct/incorrect) with.
Other Parameters
----------------
noise_constant : float
Scaling factor of the diffusion decision model.
If changed, drift and threshold would be scaled accordingly.
Not to be changed in most applications.
rt_max : float
Controls the maximum rts that can be predicted.
Making this higher might make the function a bit slower.
dt : float
Controls the time resolution of the diffusion decision model. Default is 1 msec.
Lower values of dt make the function more precise but much slower.
Returns
-------
out : DataFrame
Pandas DataFrame.
The columns contains the mean accuracy for each posterior sample,
as well as mean response times, response times skewness and response times quantiles.
The row index is a pandas.MultIndex, with the grouping variables as higher level
and number of samples as lower level.
"""
if quantiles is None:
quantiles = [.1, .3, .5, .7, .9]
data_copy = self.data_info['data'].copy()
data_copy['trial'] = np.arange(1, self.data_info['N']+ 1)
data_copy.set_index('trial', inplace=True)
pp = self.get_posterior_predictives_df(n_posterior_predictives=n_posterior_predictives,
**kwargs)
tmp = pp.copy().T.reset_index().set_index('trial')
tmp = pd.merge(tmp,
data_copy[grouping_vars],
left_index=True,
right_index=True).reset_index()
tmp_rt = tmp[tmp.variable == 'rt'].drop('variable', axis=1)
tmp_accuracy = tmp[tmp.variable == 'accuracy'].drop('variable', axis=1)
out = pd.concat([tmp_accuracy.groupby(grouping_vars).mean().drop('trial',
axis=1).stack().to_frame('mean_accuracy'),
tmp_rt.groupby(grouping_vars).mean().drop('trial',
axis=1).stack().to_frame('mean_rt'),
tmp_rt.groupby(grouping_vars).skew().drop('trial',
axis=1).stack().to_frame('skewness')],
axis=1)
tmp_accuracy.set_index(list(np.append(grouping_vars, 'trial')), inplace=True)
tmp_rt.set_index(list(np.append(grouping_vars, 'trial')), inplace=True)
pp_rt_low = tmp_rt[tmp_accuracy == 0] # lower boundary (usually incorrect)
pp_rt_up = tmp_rt[tmp_accuracy == 1] # upper boundary (usually correct)
for q in quantiles:
new_col = 'quant_{}_rt_incorrect'.format(int(q*100))
out[new_col] = pp_rt_low.reset_index().groupby(grouping_vars).quantile(q).drop('trial',
axis=1).stack().to_frame('quant')
new_col = 'quant_{}_rt_correct'.format(int(q*100))
out[new_col] = pp_rt_up.reset_index().groupby(grouping_vars).quantile(q).drop('trial',
axis=1).stack().to_frame('quant')
out.index.rename(np.append(grouping_vars, 'sample'), inplace=True)
return out
def plot_mean_grouped_posterior_predictives(self,
grouping_vars,
n_posterior_predictives,
figsize=(20, 8),
post_pred_kws=None,
**kwargs):
"""Plots the mean posterior predictives of choices and response times,
separately for either 1 or 2 grouping variables.
The first grouping variable will be plotted on the x-axis.
The second grouping variable, if provided, will be showed
with a different color per variable level.
Parameters
----------
grouping_vars : list of strings
They should be existing grouping variables in the data.
The list should be of lenght 1 or 2.
n_posterior_predictives : int
Number of posterior samples to use for posterior predictives calculation.
If n_posterior_predictives is bigger than the posterior samples,
then calculation will continue with the total number of posterior samples.
Other Parameters
----------------
x_order : list of strings
Order to plot the levels of the first grouping variable in,
otherwise the levels are inferred from the data objects.
hue_order : lists of strings
Order to plot the levels of the second grouping variable (when provided) in,
otherwise the levels are inferred from the data objects.
hue_labels : list of strings
Labels corresponding to hue_order in the legend.
Advised to specify hue_order when using this to avoid confusion.
Only makes sense when the second grouping variable is provided.
show_data : bool
Whether to show a vertical line for the mean data. Set to False to not show it.
show_intervals : either "HDI", "BCI", or None
HDI is better when the distribution is not simmetrical.
If None, then no intervals are shown.
alpha_intervals : float
Alpha level for the intervals.
Default is 5 percent which gives 95 percent BCIs and HDIs.
palette : palette name, list, or dict
Colors to use for the different levels of the second grouping variable (when provided).
Should be something that can be interpreted by color_palette(),
or a dictionary mapping hue levels to matplotlib colors.
color : matplotlib color
Color for both the mean data and intervals.
Only used when there is 1 grouping variable.
ax : matplotlib axis, optional
If provided, plot on this axis.
Default is set to current Axes.
intervals_kws : dictionary
Additional arguments for the matplotlib fill_between function
that shows shaded intervals.
By default, they are 50 percent transparent.
post_pred_kws : dictionary
Additional parameters to get_grouped_posterior_predictives_summary.
Returns
-------
fig : matplotlib.figure.Figure
"""
if np.sum(len(grouping_vars) == np.array([1, 2])) < 1:
raise ValueError("must be a list of either 1 or values")
if post_pred_kws is None:
post_pred_kws = {}
pp = self.get_grouped_posterior_predictives_summary(grouping_vars,
n_posterior_predictives,
**post_pred_kws)
if len(grouping_vars) == 1:
fig, axes = plt.subplots(1, 2, figsize=figsize)
plotting.plot_grouped_mean_prediction(x=grouping_vars[0],
y_data='accuracy',
y_predictions='mean_accuracy',
predictions=pp,
data=self.data_info['data'],
ax=axes[0],
**kwargs)
plotting.plot_grouped_mean_prediction(x=grouping_vars[0],
y_data='rt',
y_predictions='mean_rt',
predictions=pp,
data=self.data_info['data'],
ax=axes[1],
**kwargs)
else:
fig, axes = plt.subplots(1, 2, figsize=figsize)
plotting.plot_grouped_mean_prediction(x=grouping_vars[0],
y_data='accuracy',
y_predictions='mean_accuracy',
predictions=pp,
data=self.data_info['data'],
hue=grouping_vars[1],
ax=axes[0],
**kwargs)
plotting.plot_grouped_mean_prediction(x=grouping_vars[0],
y_data='rt',
y_predictions='mean_rt',
predictions=pp,
data=self.data_info['data'],
hue=grouping_vars[1],
ax=axes[1],
**kwargs)
axes[0].get_legend().remove()
axes[1].legend(bbox_to_anchor=(1, 1))
axes[0].set_ylabel('Mean accuracy')
axes[1].set_ylabel('Mean RTs')
sns.despine()
return fig
def plot_quantiles_grouped_posterior_predictives(self,
n_posterior_predictives,
grouping_var,
quantiles=None,
figsize=(20, 8),
post_pred_kws=None,
**kwargs):
"""Plots the quantiles of the posterior predictives of response times,
separately for correct/incorrect responses, and 1 grouping variable.
Parameters
----------
n_posterior_predictives : int
Number of posterior samples to use for posterior predictives calculation.
If n_posterior_predictives is bigger than the posterior samples,
then calculation will continue with the total number of posterior samples.
grouping_vars : string
Should be an existing grouping variable in the data.
quantiles : list of floats
Quantiles to summarize response times distributions
(separately for correct/incorrect) with.
figsize : tuple
figure size of the matplotlib figure
Other Parameters
----------------
show_data : bool
Whether to show the quantiles of the data. Set to False to not show it.
show_intervals : either "HDI", "BCI", or None
HDI is better when the distribution is not simmetrical.
If None, then no intervals are shown.
alpha_intervals : float
Alpha level for the intervals.
Default is 5 percent which gives 95 percent BCIs and HDIs.
kind : either 'lines' or 'shades'
Two different styles to plot quantile distributions.
palette : palette name, list, or dict
Colors to use for the different levels of the second grouping variable (when provided).
Should be something that can be interpreted by color_palette(),
or a dictionary mapping hue levels to matplotlib colors.
hue_order : lists of strings
Order to plot the levels of the grouping variable in,
otherwise the levels are inferred from the data objects.
hue_labels : list of strings
Labels corresponding to hue_order in the legend.
Advised to specify hue_order when using this to avoid confusion.
jitter: float
Amount to jitter the grouping variable's levels for better visualization.
scatter_kws : dictionary
Additional plotting parameters to change how the data points are shown.
intervals_kws : dictionary
Additional plotting parameters to change how the quantile distributions are shown.
post_pred_kws : dictionary
Additional parameters to get_grouped_posterior_predictives_summary.
Returns
-------
fig : matplotlib.figure.Figure
"""
if post_pred_kws is None:
post_pred_kws = {}
pp_summary = self.get_grouped_posterior_predictives_summary(
n_posterior_predictives=n_posterior_predictives,
grouping_vars=[grouping_var],
quantiles=quantiles,
**post_pred_kws)
fig = plotting.plot_grouped_quantiles_prediction(pp_summary,
self.data_info['data'],
'rdm',
quantiles=quantiles,
grouping_var=grouping_var,
figsize=figsize,
**kwargs)
return fig
| 42.571059 | 124 | 0.528225 |
4a1d3120cff02dd8ed932177fb132e751e39b658
| 33,801 |
py
|
Python
|
python/dgl/distributed/rpc.py
|
marwage/dgl
|
15e3ff878c3d8927b6f6fac702e4f74eaee7607a
|
[
"Apache-2.0"
] | null | null | null |
python/dgl/distributed/rpc.py
|
marwage/dgl
|
15e3ff878c3d8927b6f6fac702e4f74eaee7607a
|
[
"Apache-2.0"
] | null | null | null |
python/dgl/distributed/rpc.py
|
marwage/dgl
|
15e3ff878c3d8927b6f6fac702e4f74eaee7607a
|
[
"Apache-2.0"
] | null | null | null |
"""RPC components. They are typically functions or utilities used by both
server and clients."""
import abc
import pickle
import random
import numpy as np
from .._ffi.object import register_object, ObjectBase
from .._ffi.function import _init_api
from ..base import DGLError
from .. import backend as F
__all__ = ['set_rank', 'get_rank', 'Request', 'Response', 'register_service', \
'create_sender', 'create_receiver', 'finalize_sender', 'finalize_receiver', \
'receiver_wait', 'add_receiver_addr', 'sender_connect', 'read_ip_config', \
'get_num_machines', 'set_num_machines', 'get_machine_id', 'set_machine_id', \
'send_request', 'recv_request', 'send_response', 'recv_response', 'remote_call', \
'send_request_to_machine', 'remote_call_to_machine', 'fast_pull', \
'get_num_client', 'set_num_client', 'client_barrier', 'copy_data_to_shared_memory']
REQUEST_CLASS_TO_SERVICE_ID = {}
RESPONSE_CLASS_TO_SERVICE_ID = {}
SERVICE_ID_TO_PROPERTY = {}
DEFUALT_PORT = 30050
def read_ip_config(filename, num_servers):
"""Read network configuration information of server from file.
For exampple, the following TXT shows a 4-machine configuration:
172.31.40.143
172.31.36.140
172.31.47.147
172.31.30.180
Users can also set user-specified port for this network configuration. For example:
172.31.40.143 20090
172.31.36.140 20090
172.31.47.147 20090
172.31.30.180 20090
Note that, DGL supports multiple backup servers that shares data with each others
on the same machine via shared-memory tensor. The num_servers should be >= 1. For example,
if we set num_servers to 5, it means that we have 1 main server and 4 backup servers on
current machine.
Parameters
----------
filename : str
Path of IP configuration file.
num_servers : int
Server count on each machine.
Returns
-------
dict
server namebook.
The key is server_id (int)
The value is [machine_id, ip, port, num_servers] ([int, str, int, int])
e.g.,
{0:[0, '172.31.40.143', 30050, 2],
1:[0, '172.31.40.143', 30051, 2],
2:[1, '172.31.36.140', 30050, 2],
3:[1, '172.31.36.140', 30051, 2],
4:[2, '172.31.47.147', 30050, 2],
5:[2, '172.31.47.147', 30051, 2],
6:[3, '172.31.30.180', 30050, 2],
7:[3, '172.31.30.180', 30051, 2]}
"""
assert len(filename) > 0, 'filename cannot be empty.'
assert num_servers > 0, 'num_servers (%d) must be a positive number.' % num_servers
server_namebook = {}
try:
server_id = 0
machine_id = 0
lines = [line.rstrip('\n') for line in open(filename)]
for line in lines:
result = line.split()
if len(result) == 2:
port = int(result[1])
elif len(result) == 1:
port = DEFUALT_PORT
else:
raise RuntimeError('length of result can only be 1 or 2.')
ip_addr = result[0]
for s_count in range(num_servers):
server_namebook[server_id] = [machine_id, ip_addr, port+s_count, num_servers]
server_id += 1
machine_id += 1
except RuntimeError:
print("Error: data format on each line should be: [ip] [port]")
return server_namebook
def reset():
"""Reset the rpc context
"""
_CAPI_DGLRPCReset()
def create_sender(max_queue_size, net_type):
"""Create rpc sender of this process.
Parameters
----------
max_queue_size : int
Maximal size (bytes) of network queue buffer.
net_type : str
Networking type. Current options are: 'socket'.
"""
_CAPI_DGLRPCCreateSender(int(max_queue_size), net_type)
def create_receiver(max_queue_size, net_type):
"""Create rpc receiver of this process.
Parameters
----------
max_queue_size : int
Maximal size (bytes) of network queue buffer.
net_type : str
Networking type. Current options are: 'socket'.
"""
_CAPI_DGLRPCCreateReceiver(int(max_queue_size), net_type)
def finalize_sender():
"""Finalize rpc sender of this process.
"""
_CAPI_DGLRPCFinalizeSender()
def finalize_receiver():
"""Finalize rpc receiver of this process.
"""
_CAPI_DGLRPCFinalizeReceiver()
def receiver_wait(ip_addr, port, num_senders):
"""Wait all of the senders' connections.
This api will be blocked until all the senders connect to the receiver.
Parameters
----------
ip_addr : str
receiver's IP address, e,g, '192.168.8.12'
port : int
receiver's port
num_senders : int
total number of senders
"""
_CAPI_DGLRPCReceiverWait(ip_addr, int(port), int(num_senders))
def add_receiver_addr(ip_addr, port, recv_id):
"""Add Receiver's IP address to sender's namebook.
Parameters
----------
ip_addr : str
receiver's IP address, e,g, '192.168.8.12'
port : int
receiver's listening port
recv_id : int
receiver's ID
"""
_CAPI_DGLRPCAddReceiver(ip_addr, int(port), int(recv_id))
def sender_connect():
"""Connect to all the receivers.
"""
_CAPI_DGLRPCSenderConnect()
def set_rank(rank):
"""Set the rank of this process.
If the process is a client, this is equal to client ID. Otherwise, the process
is a server and this is equal to server ID.
Parameters
----------
rank : int
Rank value
"""
_CAPI_DGLRPCSetRank(int(rank))
def get_rank():
"""Get the rank of this process.
If the process is a client, this is equal to client ID. Otherwise, the process
is a server and this is equal to server ID.
Returns
-------
int
Rank value
"""
return _CAPI_DGLRPCGetRank()
def set_machine_id(machine_id):
"""Set current machine ID
Parameters
----------
machine_id : int
Current machine ID
"""
_CAPI_DGLRPCSetMachineID(int(machine_id))
def get_machine_id():
"""Get current machine ID
Returns
-------
int
machine ID
"""
return _CAPI_DGLRPCGetMachineID()
def set_num_machines(num_machines):
"""Set number of machine
Parameters
----------
num_machines : int
Number of machine
"""
_CAPI_DGLRPCSetNumMachines(int(num_machines))
def get_num_machines():
"""Get number of machines
Returns
-------
int
number of machines
"""
return _CAPI_DGLRPCGetNumMachines()
def set_num_server(num_server):
"""Set the total number of server.
"""
_CAPI_DGLRPCSetNumServer(int(num_server))
def get_num_server():
"""Get the total number of server.
"""
return _CAPI_DGLRPCGetNumServer()
def set_num_client(num_client):
"""Set the total number of client.
"""
_CAPI_DGLRPCSetNumClient(int(num_client))
def get_num_client():
"""Get the total number of client.
"""
return _CAPI_DGLRPCGetNumClient()
def set_num_server_per_machine(num_server):
"""Set the total number of server per machine
"""
_CAPI_DGLRPCSetNumServerPerMachine(num_server)
def get_num_server_per_machine():
"""Get the total number of server per machine
"""
return _CAPI_DGLRPCGetNumServerPerMachine()
def incr_msg_seq():
"""Increment the message sequence number and return the old one.
Returns
-------
long
Message sequence number
"""
return _CAPI_DGLRPCIncrMsgSeq()
def get_msg_seq():
"""Get the current message sequence number.
Returns
-------
long
Message sequence number
"""
return _CAPI_DGLRPCGetMsgSeq()
def set_msg_seq(msg_seq):
"""Set the current message sequence number.
Parameters
----------
msg_seq : int
sequence number of current rpc message.
"""
_CAPI_DGLRPCSetMsgSeq(int(msg_seq))
def register_service(service_id, req_cls, res_cls=None):
"""Register a service to RPC.
Parameter
---------
service_id : int
Service ID.
req_cls : class
Request class.
res_cls : class, optional
Response class. If none, the service has no response.
"""
REQUEST_CLASS_TO_SERVICE_ID[req_cls] = service_id
if res_cls is not None:
RESPONSE_CLASS_TO_SERVICE_ID[res_cls] = service_id
SERVICE_ID_TO_PROPERTY[service_id] = (req_cls, res_cls)
def get_service_property(service_id):
"""Get service property.
Parameters
----------
service_id : int
Service ID.
Returns
-------
(class, class)
(Request class, Response class)
"""
return SERVICE_ID_TO_PROPERTY[service_id]
class Request:
"""Base request class"""
@abc.abstractmethod
def __getstate__(self):
"""Get serializable states.
Must be inherited by subclasses. For array members, return them as
individual return values (i.e., do not put them in containers like
dictionary or list).
"""
@abc.abstractmethod
def __setstate__(self, state):
"""Construct the request object from serialized states.
Must be inherited by subclasses.
"""
@abc.abstractmethod
def process_request(self, server_state):
"""Server-side function to process the request.
Must be inherited by subclasses.
Parameters
----------
server_state : ServerState
Server state data.
Returns
-------
Response
Response of this request or None if no response.
"""
@property
def service_id(self):
"""Get service ID."""
cls = self.__class__
sid = REQUEST_CLASS_TO_SERVICE_ID.get(cls, None)
if sid is None:
raise DGLError('Request class {} has not been registered as a service.'.format(cls))
return sid
class Response:
"""Base response class"""
@abc.abstractmethod
def __getstate__(self):
"""Get serializable states.
Must be inherited by subclasses. For array members, return them as
individual return values (i.e., do not put them in containers like
dictionary or list).
"""
@abc.abstractmethod
def __setstate__(self, state):
"""Construct the response object from serialized states.
Must be inherited by subclasses.
"""
@property
def service_id(self):
"""Get service ID."""
cls = self.__class__
sid = RESPONSE_CLASS_TO_SERVICE_ID.get(cls, None)
if sid is None:
raise DGLError('Response class {} has not been registered as a service.'.format(cls))
return sid
def serialize_to_payload(serializable):
"""Serialize an object to payloads.
The object must have implemented the __getstate__ function.
Parameters
----------
serializable : object
Any serializable object.
Returns
-------
bytearray
Serialized payload buffer.
list[Tensor]
A list of tensor payloads.
"""
state = serializable.__getstate__()
if not isinstance(state, tuple):
state = (state,)
nonarray_pos = []
nonarray_state = []
array_state = []
for i, arr_state in enumerate(state):
if F.is_tensor(arr_state):
array_state.append(arr_state)
else:
nonarray_state.append(arr_state)
nonarray_pos.append(i)
data = bytearray(pickle.dumps((nonarray_pos, nonarray_state)))
return data, array_state
class PlaceHolder:
"""PlaceHolder object for deserialization"""
_PLACEHOLDER = PlaceHolder()
def deserialize_from_payload(cls, data, tensors):
"""Deserialize and reconstruct the object from payload.
The object must have implemented the __setstate__ function.
Parameters
----------
cls : class
The object class.
data : bytearray
Serialized data buffer.
tensors : list[Tensor]
A list of tensor payloads.
Returns
-------
object
De-serialized object of class cls.
"""
pos, nonarray_state = pickle.loads(data)
# Use _PLACEHOLDER to distinguish with other deserizliaed elements
state = [_PLACEHOLDER] * (len(nonarray_state) + len(tensors))
for i, no_state in zip(pos, nonarray_state):
state[i] = no_state
if len(tensors) != 0:
j = 0
state_len = len(state)
for i in range(state_len):
if state[i] is _PLACEHOLDER:
state[i] = tensors[j]
j += 1
if len(state) == 1:
state = state[0]
else:
state = tuple(state)
obj = cls.__new__(cls)
obj.__setstate__(state)
return obj
@register_object('rpc.RPCMessage')
class RPCMessage(ObjectBase):
"""Serialized RPC message that can be sent to remote processes.
This class can be used as argument or return value for C API.
Attributes
----------
service_id : int
The remote service ID the message wishes to invoke.
msg_seq : int
Sequence number of this message.
client_id : int
The client ID.
server_id : int
The server ID.
data : bytearray
Payload buffer carried by this request.
tensors : list[tensor]
Extra payloads in the form of tensors.
"""
def __init__(self, service_id, msg_seq, client_id, server_id, data, tensors):
self.__init_handle_by_constructor__(
_CAPI_DGLRPCCreateRPCMessage,
int(service_id),
int(msg_seq),
int(client_id),
int(server_id),
data,
[F.zerocopy_to_dgl_ndarray(tsor) for tsor in tensors])
@property
def service_id(self):
"""Get service ID."""
return _CAPI_DGLRPCMessageGetServiceId(self)
@property
def msg_seq(self):
"""Get message sequence number."""
return _CAPI_DGLRPCMessageGetMsgSeq(self)
@property
def client_id(self):
"""Get client ID."""
return _CAPI_DGLRPCMessageGetClientId(self)
@property
def server_id(self):
"""Get server ID."""
return _CAPI_DGLRPCMessageGetServerId(self)
@property
def data(self):
"""Get payload buffer."""
return _CAPI_DGLRPCMessageGetData(self)
@property
def tensors(self):
"""Get tensor payloads."""
rst = _CAPI_DGLRPCMessageGetTensors(self)
return [F.zerocopy_from_dgl_ndarray(tsor) for tsor in rst]
def send_request(target, request):
"""Send one request to the target server.
Serialize the given request object to an :class:`RPCMessage` and send it
out.
The operation is non-blocking -- it does not guarantee the payloads have
reached the target or even have left the sender process. However,
all the payloads (i.e., data and arrays) can be safely freed after this
function returns.
Parameters
----------
target : int
ID of target server.
request : Request
The request to send.
Raises
------
ConnectionError if there is any problem with the connection.
"""
service_id = request.service_id
msg_seq = incr_msg_seq()
client_id = get_rank()
server_id = target
data, tensors = serialize_to_payload(request)
msg = RPCMessage(service_id, msg_seq, client_id, server_id, data, tensors)
send_rpc_message(msg, server_id)
def send_request_to_machine(target, request):
"""Send one request to the target machine, which will randomly
select a server node to process this request.
The operation is non-blocking -- it does not guarantee the payloads have
reached the target or even have left the sender process. However,
all the payloads (i.e., data and arrays) can be safely freed after this
function returns.
Parameters
----------
target : int
ID of target machine.
request : Request
The request to send.
Raises
------
ConnectionError if there is any problem with the connection.
"""
service_id = request.service_id
msg_seq = incr_msg_seq()
client_id = get_rank()
server_id = random.randint(target*get_num_server_per_machine(),
(target+1)*get_num_server_per_machine()-1)
data, tensors = serialize_to_payload(request)
msg = RPCMessage(service_id, msg_seq, client_id, server_id, data, tensors)
send_rpc_message(msg, server_id)
def send_response(target, response):
"""Send one response to the target client.
Serialize the given response object to an :class:`RPCMessage` and send it
out.
The operation is non-blocking -- it does not guarantee the payloads have
reached the target or even have left the sender process. However,
all the payloads (i.e., data and arrays) can be safely freed after this
function returns.
Parameters
----------
target : int
ID of target client.
response : Response
The response to send.
Raises
------
ConnectionError if there is any problem with the connection.
"""
service_id = response.service_id
msg_seq = get_msg_seq()
client_id = target
server_id = get_rank()
data, tensors = serialize_to_payload(response)
msg = RPCMessage(service_id, msg_seq, client_id, server_id, data, tensors)
send_rpc_message(msg, client_id)
def recv_request(timeout=0):
"""Receive one request.
Receive one :class:`RPCMessage` and de-serialize it into a proper Request object.
The operation is blocking -- it returns when it receives any message
or it times out.
Parameters
----------
timeout : int, optional
The timeout value in milliseconds. If zero, wait indefinitely.
Returns
-------
req : request
One request received from the target, or None if it times out.
client_id : int
Client' ID received from the target.
Raises
------
ConnectionError if there is any problem with the connection.
"""
# TODO(chao): handle timeout
msg = recv_rpc_message(timeout)
if msg is None:
return None
set_msg_seq(msg.msg_seq)
req_cls, _ = SERVICE_ID_TO_PROPERTY[msg.service_id]
if req_cls is None:
raise DGLError('Got request message from service ID {}, '
'but no request class is registered.'.format(msg.service_id))
req = deserialize_from_payload(req_cls, msg.data, msg.tensors)
if msg.server_id != get_rank():
raise DGLError('Got request sent to server {}, '
'different from my rank {}!'.format(msg.server_id, get_rank()))
return req, msg.client_id
def recv_response(timeout=0):
"""Receive one response.
Receive one :class:`RPCMessage` and de-serialize it into a proper Response object.
The operation is blocking -- it returns when it receives any message
or it times out.
Parameters
----------
timeout : int, optional
The timeout value in milliseconds. If zero, wait indefinitely.
Returns
-------
res : Response
One response received from the target, or None if it times out.
Raises
------
ConnectionError if there is any problem with the connection.
"""
# TODO(chao): handle timeout
msg = recv_rpc_message(timeout)
if msg is None:
return None
_, res_cls = SERVICE_ID_TO_PROPERTY[msg.service_id]
if res_cls is None:
raise DGLError('Got response message from service ID {}, '
'but no response class is registered.'.format(msg.service_id))
res = deserialize_from_payload(res_cls, msg.data, msg.tensors)
if msg.client_id != get_rank() and get_rank() != -1:
raise DGLError('Got reponse of request sent by client {}, '
'different from my rank {}!'.format(msg.client_id, get_rank()))
return res
def remote_call(target_and_requests, timeout=0):
"""Invoke registered services on remote servers and collect responses.
The operation is blocking -- it returns when it receives all responses
or it times out.
If the target server state is available locally, it invokes local computation
to calculate the response.
Parameters
----------
target_and_requests : list[(int, Request)]
A list of requests and the server they should be sent to.
timeout : int, optional
The timeout value in milliseconds. If zero, wait indefinitely.
Returns
-------
list[Response]
Responses for each target-request pair. If the request does not have
response, None is placed.
Raises
------
ConnectionError if there is any problem with the connection.
"""
# TODO(chao): handle timeout
all_res = [None] * len(target_and_requests)
msgseq2pos = {}
num_res = 0
myrank = get_rank()
for pos, (target, request) in enumerate(target_and_requests):
# send request
service_id = request.service_id
msg_seq = incr_msg_seq()
client_id = get_rank()
server_id = random.randint(target*get_num_server_per_machine(),
(target+1)*get_num_server_per_machine()-1)
data, tensors = serialize_to_payload(request)
msg = RPCMessage(service_id, msg_seq, client_id, server_id, data, tensors)
send_rpc_message(msg, server_id)
# check if has response
res_cls = get_service_property(service_id)[1]
if res_cls is not None:
num_res += 1
msgseq2pos[msg_seq] = pos
while num_res != 0:
# recv response
msg = recv_rpc_message(timeout)
num_res -= 1
_, res_cls = SERVICE_ID_TO_PROPERTY[msg.service_id]
if res_cls is None:
raise DGLError('Got response message from service ID {}, '
'but no response class is registered.'.format(msg.service_id))
res = deserialize_from_payload(res_cls, msg.data, msg.tensors)
if msg.client_id != myrank:
raise DGLError('Got reponse of request sent by client {}, '
'different from my rank {}!'.format(msg.client_id, myrank))
# set response
all_res[msgseq2pos[msg.msg_seq]] = res
return all_res
def send_requests_to_machine(target_and_requests):
""" Send requests to the remote machines.
This operation isn't block. It returns immediately once it sends all requests.
Parameters
----------
target_and_requests : list[(int, Request)]
A list of requests and the machine they should be sent to.
timeout : int, optional
The timeout value in milliseconds. If zero, wait indefinitely.
Returns
-------
msgseq2pos : dict
map the message sequence number to its position in the input list.
"""
msgseq2pos = {}
for pos, (target, request) in enumerate(target_and_requests):
# send request
service_id = request.service_id
msg_seq = incr_msg_seq()
client_id = get_rank()
server_id = random.randint(target*get_num_server_per_machine(),
(target+1)*get_num_server_per_machine()-1)
data, tensors = serialize_to_payload(request)
msg = RPCMessage(service_id, msg_seq, client_id, server_id, data, tensors)
send_rpc_message(msg, server_id)
# check if has response
res_cls = get_service_property(service_id)[1]
if res_cls is not None:
msgseq2pos[msg_seq] = pos
return msgseq2pos
def recv_responses(msgseq2pos, timeout=0):
""" Receive responses
It returns the responses in the same order as the requests. The order of requests
are stored in msgseq2pos.
The operation is blocking -- it returns when it receives all responses
or it times out.
Parameters
----------
msgseq2pos : dict
map the message sequence number to its position in the input list.
timeout : int, optional
The timeout value in milliseconds. If zero, wait indefinitely.
Returns
-------
list[Response]
Responses for each target-request pair. If the request does not have
response, None is placed.
"""
myrank = get_rank()
size = np.max(list(msgseq2pos.values())) + 1
all_res = [None] * size
num_res = len(msgseq2pos)
while num_res != 0:
# recv response
msg = recv_rpc_message(timeout)
num_res -= 1
_, res_cls = SERVICE_ID_TO_PROPERTY[msg.service_id]
if res_cls is None:
raise DGLError('Got response message from service ID {}, '
'but no response class is registered.'.format(msg.service_id))
res = deserialize_from_payload(res_cls, msg.data, msg.tensors)
if msg.client_id != myrank:
raise DGLError('Got reponse of request sent by client {}, '
'different from my rank {}!'.format(msg.client_id, myrank))
# set response
all_res[msgseq2pos[msg.msg_seq]] = res
return all_res
def remote_call_to_machine(target_and_requests, timeout=0):
"""Invoke registered services on remote machine
(which will ramdom select a server to process the request) and collect responses.
The operation is blocking -- it returns when it receives all responses
or it times out.
If the target server state is available locally, it invokes local computation
to calculate the response.
Parameters
----------
target_and_requests : list[(int, Request)]
A list of requests and the machine they should be sent to.
timeout : int, optional
The timeout value in milliseconds. If zero, wait indefinitely.
Returns
-------
list[Response]
Responses for each target-request pair. If the request does not have
response, None is placed.
Raises
------
ConnectionError if there is any problem with the connection.
"""
# TODO(chao): handle timeout
msgseq2pos = send_requests_to_machine(target_and_requests)
return recv_responses(msgseq2pos, timeout)
def send_rpc_message(msg, target):
"""Send one message to the target server.
The operation is non-blocking -- it does not guarantee the payloads have
reached the target or even have left the sender process. However,
all the payloads (i.e., data and arrays) can be safely freed after this
function returns.
The data buffer in the requst will be copied to internal buffer for actual
transmission, while no memory copy for tensor payloads (a.k.a. zero-copy).
The underlying sending threads will hold references to the tensors until
the contents have been transmitted.
Parameters
----------
msg : RPCMessage
The message to send.
target : int
target ID
Raises
------
ConnectionError if there is any problem with the connection.
"""
_CAPI_DGLRPCSendRPCMessage(msg, int(target))
def recv_rpc_message(timeout=0):
"""Receive one message.
The operation is blocking -- it returns when it receives any message
or it times out.
Parameters
----------
timeout : int, optional
The timeout value in milliseconds. If zero, wait indefinitely.
Returns
-------
msg : RPCMessage
One rpc message received from the target, or None if it times out.
Raises
------
ConnectionError if there is any problem with the connection.
"""
msg = _CAPI_DGLRPCCreateEmptyRPCMessage()
_CAPI_DGLRPCRecvRPCMessage(timeout, msg)
return msg
def client_barrier():
"""Barrier all client processes"""
req = ClientBarrierRequest()
send_request(0, req)
res = recv_response()
assert res.msg == 'barrier'
def finalize_server():
"""Finalize resources of current server
"""
finalize_sender()
finalize_receiver()
print("Server (%d) shutdown." % get_rank())
def fast_pull(name, id_tensor, part_id, service_id,
machine_count, group_count, machine_id,
client_id, local_data, policy):
"""Fast-pull api used by kvstore.
Parameters
----------
name : str
data name
id_tensor : tensor
data ID
part_id : tensor
partition ID of id_tensor
service_id : int
service_id of pull request
machine_count : int
total number of machine
group_count : int
total number of server inside machine
machine_id : int
current machine ID
client_id : int
current client ID
local_data : tensor
local data tensor
policy : PartitionPolicy
store the partition information
"""
msg_seq = incr_msg_seq()
pickle_data = bytearray(pickle.dumps(([0], [name])))
global_id = _CAPI_DGLRPCGetGlobalIDFromLocalPartition(F.zerocopy_to_dgl_ndarray(id_tensor),
F.zerocopy_to_dgl_ndarray(part_id),
machine_id)
global_id = F.zerocopy_from_dgl_ndarray(global_id)
g2l_id = policy.to_local(global_id)
res_tensor = _CAPI_DGLRPCFastPull(name,
int(machine_id),
int(machine_count),
int(group_count),
int(client_id),
int(service_id),
int(msg_seq),
pickle_data,
F.zerocopy_to_dgl_ndarray(id_tensor),
F.zerocopy_to_dgl_ndarray(part_id),
F.zerocopy_to_dgl_ndarray(g2l_id),
F.zerocopy_to_dgl_ndarray(local_data))
return F.zerocopy_from_dgl_ndarray(res_tensor)
def register_ctrl_c():
"""HandleCtrlC Register for handling Ctrl+C event.
"""
_CAPI_DGLRPCHandleCtrlC()
def copy_data_to_shared_memory(source, dst):
"""Copy tensor data to shared-memory tensor
"""
_CAPI_DGLCopyDataToSharedMemory(F.zerocopy_to_dgl_ndarray(source),
F.zerocopy_to_dgl_ndarray(dst))
############### Some basic services will be defined here #############
CLIENT_REGISTER = 22451
class ClientRegisterRequest(Request):
"""This request will send client's ip to server.
Parameters
----------
ip_addr : str
client's IP address
"""
def __init__(self, ip_addr):
self.ip_addr = ip_addr
def __getstate__(self):
return self.ip_addr
def __setstate__(self, state):
self.ip_addr = state
def process_request(self, server_state):
return None # do nothing
class ClientRegisterResponse(Response):
"""This response will send assigned ID to client.
Parameters
----------
ID : int
client's ID
"""
def __init__(self, client_id):
self.client_id = client_id
def __getstate__(self):
return self.client_id
def __setstate__(self, state):
self.client_id = state
SHUT_DOWN_SERVER = 22452
class ShutDownRequest(Request):
"""Client send this request to shut-down a server.
This request has no response.
Parameters
----------
client_id : int
client's ID
"""
def __init__(self, client_id):
self.client_id = client_id
def __getstate__(self):
return self.client_id
def __setstate__(self, state):
self.client_id = state
def process_request(self, server_state):
assert self.client_id == 0
finalize_server()
return 'exit'
GET_NUM_CLIENT = 22453
class GetNumberClientsResponse(Response):
"""This reponse will send total number of clients.
Parameters
----------
num_client : int
total number of clients
"""
def __init__(self, num_client):
self.num_client = num_client
def __getstate__(self):
return self.num_client
def __setstate__(self, state):
self.num_client = state
class GetNumberClientsRequest(Request):
"""Client send this request to get the total number of client.
Parameters
----------
client_id : int
client's ID
"""
def __init__(self, client_id):
self.client_id = client_id
def __getstate__(self):
return self.client_id
def __setstate__(self, state):
self.client_id = state
def process_request(self, server_state):
res = GetNumberClientsResponse(get_num_client())
return res
CLIENT_BARRIER = 22454
class ClientBarrierResponse(Response):
"""Send the barrier confirmation to client
Parameters
----------
msg : str
string msg
"""
def __init__(self, msg='barrier'):
self.msg = msg
def __getstate__(self):
return self.msg
def __setstate__(self, state):
self.msg = state
class ClientBarrierRequest(Request):
"""Send the barrier information to server
Parameters
----------
msg : str
string msg
"""
def __init__(self, msg='barrier'):
self.msg = msg
def __getstate__(self):
return self.msg
def __setstate__(self, state):
self.msg = state
def process_request(self, server_state):
_CAPI_DGLRPCSetBarrierCount(_CAPI_DGLRPCGetBarrierCount()+1)
if _CAPI_DGLRPCGetBarrierCount() == get_num_client():
_CAPI_DGLRPCSetBarrierCount(0)
res_list = []
for target_id in range(get_num_client()):
res_list.append((target_id, ClientBarrierResponse()))
return res_list
return None
_init_api("dgl.distributed.rpc")
| 29.264935 | 97 | 0.632762 |
4a1d3267318da7677d4f7747ef98194aefe18464
| 1,249 |
py
|
Python
|
server/__init__.py
|
michaelscales88/irrigation-site
|
847bfb5b874d3813b71ea2939fd83459a6a1f991
|
[
"MIT"
] | null | null | null |
server/__init__.py
|
michaelscales88/irrigation-site
|
847bfb5b874d3813b71ea2939fd83459a6a1f991
|
[
"MIT"
] | null | null | null |
server/__init__.py
|
michaelscales88/irrigation-site
|
847bfb5b874d3813b71ea2939fd83459a6a1f991
|
[
"MIT"
] | null | null | null |
# server/__init__.py
from flask import Flask, Blueprint
from flask_restful import Api
from .server import build_server
def create_server(*cfg):
"""
Creates the server that the html pages interact with.
"""
server_instance = Flask(
__name__,
instance_relative_config=True,
template_folder='../static/templates',
static_folder="../static",
)
# Settings
# This program could take multiple different settings files by name.
server_instance.config.from_object('server.default_config')
for config_file in cfg:
server_instance.config.from_pyfile(config_file, silent=True)
api_bp = Blueprint('backend', __name__)
api = Api(api_bp)
# This is where the API are configured so that you can access them
# with a url.
from .data import DataAPI
from .login import LoginAPI, RefreshTokenAPI, AuthenticateTokenAPI
api.add_resource(LoginAPI, '/', '/login')
api.add_resource(RefreshTokenAPI, '/refresh-token')
api.add_resource(AuthenticateTokenAPI, '/authenticate-token')
api.add_resource(DataAPI, '/data')
# Register all the API rules with the server
server_instance.register_blueprint(api_bp)
return build_server(server_instance)
| 30.463415 | 72 | 0.713371 |
4a1d326b147d3757792c79302153014122111c85
| 1,284 |
py
|
Python
|
scipy/odr/setup.py
|
opoplawski/scipy
|
582d59caabb4a2a6fcdd06b512dcd14daf7ca6b2
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/odr/setup.py
|
opoplawski/scipy
|
582d59caabb4a2a6fcdd06b512dcd14daf7ca6b2
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/odr/setup.py
|
opoplawski/scipy
|
582d59caabb4a2a6fcdd06b512dcd14daf7ca6b2
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
from os.path import join
def configuration(parent_package='', top_path=None):
import warnings
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
config = Configuration('odr', parent_package, top_path)
libodr_files = ['d_odr.f',
'd_mprec.f',
'dlunoc.f']
blas_info = get_info('blas_opt')
if blas_info:
libodr_files.append('d_lpk.f')
else:
warnings.warn(BlasNotFoundError.__doc__)
libodr_files.append('d_lpkbls.f')
libodr = [join('odrpack', x) for x in libodr_files]
config.add_library('odrpack', sources=libodr)
sources = ['__odrpack.c']
libraries = ['odrpack'] + blas_info.pop('libraries', [])
include_dirs = ['.'] + blas_info.pop('include_dirs', [])
config.add_extension('__odrpack',
sources=sources,
libraries=libraries,
include_dirs=include_dirs,
depends=['odrpack.h'],
**blas_info
)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 29.860465 | 71 | 0.659657 |
4a1d32d25d068ea2e2c56e38484e227346f35670
| 2,820 |
py
|
Python
|
Kaggle/Playgroud/RiskPrediction/Home-Credit-Default-Risk-master/py/404_aggregate-3y.py
|
hehuanlin123/DeepLearning
|
6b7feabbbde9ac9489f76da4c06eeb6703fb165a
|
[
"MIT"
] | 1 |
2020-02-28T12:03:39.000Z
|
2020-02-28T12:03:39.000Z
|
Kaggle/Playgroud/RiskPrediction/Home-Credit-Default-Risk-master/py/404_aggregate-3y.py
|
hehuanlin123/DeepLearning
|
6b7feabbbde9ac9489f76da4c06eeb6703fb165a
|
[
"MIT"
] | null | null | null |
Kaggle/Playgroud/RiskPrediction/Home-Credit-Default-Risk-master/py/404_aggregate-3y.py
|
hehuanlin123/DeepLearning
|
6b7feabbbde9ac9489f76da4c06eeb6703fb165a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 20 10:30:13 2018
@author: Kazuki
based on
https://www.kaggle.com/jsaguiar/updated-0-792-lb-lightgbm-with-simple-features/code
"""
import numpy as np
import pandas as pd
import gc
import os
from multiprocessing import Pool, cpu_count
NTHREAD = cpu_count()
import utils_agg
import utils
utils.start(__file__)
#==============================================================================
PREF = 'f404_'
KEY = 'SK_ID_CURR'
month_start = -12*3 # -96
month_end = -12*2 # -96
os.system(f'rm ../feature/t*_{PREF}*')
# =============================================================================
#
# =============================================================================
cre = utils.read_pickles('../data/credit_card_balance')
cre = cre[cre['MONTHS_BALANCE'].between(month_start, month_end)].drop('SK_ID_PREV', axis=1)
col_cat = ['NAME_CONTRACT_STATUS']
train = utils.load_train([KEY])
test = utils.load_test([KEY])
# =============================================================================
#
# =============================================================================
def aggregate():
df = utils.get_dummies(cre)
li = []
for c1 in df.columns:
for c2 in col_cat:
if c1.startswith(c2+'_'):
li.append(c1)
break
cat_aggregations = {}
for cat in li:
cat_aggregations[cat] = ['mean', 'sum']
df_agg = df.groupby(KEY).agg({**utils_agg.cre_num_aggregations, **cat_aggregations})
df_agg.columns = pd.Index([e[0] + "_" + e[1] for e in df_agg.columns.tolist()])
# std / mean
col_std = [c for c in df_agg.columns if c.endswith('_std')]
for c in col_std:
df_agg[f'{c}-d-mean'] = df_agg[c]/df_agg[c.replace('_std', '_mean')]
# max / min
col_max = [c for c in df_agg.columns if c.endswith('_max')]
for c in col_max:
try:
df_agg[f'{c}-d-min'] = df_agg[c]/df_agg[c.replace('_max', '_min')]
df_agg[f'{c}-m-min'] = df_agg[c]-df_agg[c.replace('_max', '_min')]
except:
pass
df_agg['CRE_COUNT'] = df.groupby(KEY).size()
df_agg.reset_index(inplace=True)
tmp = pd.merge(train, df_agg, on=KEY, how='left').drop(KEY, axis=1)
utils.to_feature(tmp.add_prefix(PREF), '../feature/train')
tmp = pd.merge(test, df_agg, on=KEY, how='left').drop(KEY, axis=1)
utils.to_feature(tmp.add_prefix(PREF), '../feature/test')
return
# =============================================================================
# main
# =============================================================================
aggregate()
#==============================================================================
utils.end(__file__)
| 28.484848 | 91 | 0.475177 |
4a1d3480cf28bea4d86ba312237dc486433525f9
| 1,461 |
py
|
Python
|
tools/contourlet_transform/tools/nsdfbdec.py
|
yilinshao/CoT-Contourlet-Transformer
|
44d36a05f81ec168e3ccd8b9438ddaee6283189e
|
[
"MIT"
] | 4 |
2021-12-21T07:45:01.000Z
|
2021-12-21T09:15:47.000Z
|
tools/contourlet_transform/tools/nsdfbdec.py
|
yilinshao/CoT-Contourlet-Transformer
|
44d36a05f81ec168e3ccd8b9438ddaee6283189e
|
[
"MIT"
] | null | null | null |
tools/contourlet_transform/tools/nsdfbdec.py
|
yilinshao/CoT-Contourlet-Transformer
|
44d36a05f81ec168e3ccd8b9438ddaee6283189e
|
[
"MIT"
] | null | null | null |
from .nssfbdec import *
from tqdm import tqdm
def nsdfbdec(x, dfilter, clevels, gpu_mode=False):
k1 = dfilter[0]
k2 = dfilter[1]
f1 = dfilter[2]
f2 = dfilter[3]
q1 = [[1, -1], [1, 1]]
y = []
if clevels == 1:
y1, y2 = nssfbdec(x, k1, k2, gpu_mode=gpu_mode)
y.append(y1)
y.append(y2)
else:
x1, x2 = nssfbdec(x, k1, k2, gpu_mode=gpu_mode)
y1, y2 = nssfbdec(x1, k1, k2, mup=q1, gpu_mode=gpu_mode)
y3, y4 = nssfbdec(x2, k1, k2, mup=q1, gpu_mode=gpu_mode)
y.append(y1)
y.append(y2)
y.append(y3)
y.append(y4)
for l in range(3, clevels + 1):
y_old = y
y = [None for i in range(2**l)]
for k in range(1, 2**(l-2) + 1):
slk = 2 * int(np.floor((k-1)/2)) - 2**(l-3) + 1
mkl = 2 * np.dot(np.asarray([[2**(l - 3), 0], [0, 1]]), np.asarray([[1, 0], [-slk, 1]]))
i = np.remainder(k - 1, 2) + 1
y[2*k-2], y[2*k-1] = nssfbdec(y_old[k-1], f1[i-1], f2[i-1], mkl)
for k in range(2**(l-2) + 1, 2**(l-1) + 1):
slk = 2 * int(np.floor((k-2**(l-2)-1) / 2)) - 2**(l-3) + 1
mkl = 2 * np.dot(np.asarray([1, 0], [0, 2 ** (l - 3)]), np.asarray([[1, -slk], [0, 1]]))
i = np.remainder(k - 1, 2) + 3
y[2 * k - 2], y[2 * k - 1] = nssfbdec(y_old[k - 1], f1[i - 1], f2[i - 1], mkl)
return y
| 33.204545 | 104 | 0.44011 |
4a1d35781e822d848ed32add7b83a3be0b925a90
| 1,373 |
py
|
Python
|
nuplan/planning/nuboard/base/test/test_nuboard_file.py
|
motional/nuplan-devkit
|
e39029e788b17f47f2fcadb774098ef8fbdd0d67
|
[
"Apache-2.0"
] | 128 |
2021-12-06T15:41:14.000Z
|
2022-03-29T13:16:32.000Z
|
nuplan/planning/nuboard/base/test/test_nuboard_file.py
|
motional/nuplan-devkit
|
e39029e788b17f47f2fcadb774098ef8fbdd0d67
|
[
"Apache-2.0"
] | 28 |
2021-12-11T08:11:31.000Z
|
2022-03-25T02:35:43.000Z
|
nuplan/planning/nuboard/base/test/test_nuboard_file.py
|
motional/nuplan-devkit
|
e39029e788b17f47f2fcadb774098ef8fbdd0d67
|
[
"Apache-2.0"
] | 14 |
2021-12-11T04:12:26.000Z
|
2022-03-24T06:38:30.000Z
|
import os
import tempfile
import unittest
from pathlib import Path
from nuplan.planning.nuboard.base.data_class import NuBoardFile
class TestNuBoardFile(unittest.TestCase):
"""Test NuBoardFile functionality."""
def setUp(self) -> None:
"""Set up a nuBoard file class."""
self.tmp_dir = tempfile.TemporaryDirectory()
self.nuboard_file = NuBoardFile(
simulation_main_path=self.tmp_dir.name,
metric_main_path=self.tmp_dir.name,
metric_folder="metrics",
simulation_folder="simulations",
aggregator_metric_folder="aggregator_metric",
)
self.nuboard_file_name = Path(self.tmp_dir.name) / ("nuboard_file" + self.nuboard_file.extension())
def test_nuboard_save_and_load_file(self) -> None:
"""Test saving and loading a nuboard file."""
self.nuboard_file.save_nuboard_file(self.nuboard_file_name)
self.assertTrue(os.path.exists(self.nuboard_file_name))
self.assertEqual(self.nuboard_file_name.suffix, self.nuboard_file.extension())
nuboard_file = NuBoardFile.load_nuboard_file(self.nuboard_file_name)
self.assertEqual(nuboard_file, self.nuboard_file)
def tearDown(self) -> None:
"""Clean up temporary folder and files."""
self.tmp_dir.cleanup()
if __name__ == "__main__":
unittest.main()
| 33.487805 | 107 | 0.694829 |
4a1d3697605a91b8572c4d1a413625e8869e6338
| 7,788 |
py
|
Python
|
backend/model/__init__.py
|
deti/boss
|
bc0cfe3067bf1cbf26789f7443a36e7cdd2ac869
|
[
"Apache-2.0"
] | 7 |
2018-05-20T08:56:08.000Z
|
2022-03-11T15:50:54.000Z
|
backend/model/__init__.py
|
deti/boss
|
bc0cfe3067bf1cbf26789f7443a36e7cdd2ac869
|
[
"Apache-2.0"
] | 2 |
2021-06-08T21:12:51.000Z
|
2022-01-13T01:25:27.000Z
|
backend/model/__init__.py
|
deti/boss
|
bc0cfe3067bf1cbf26789f7443a36e7cdd2ac869
|
[
"Apache-2.0"
] | 5 |
2016-10-09T14:52:09.000Z
|
2020-12-25T01:04:35.000Z
|
import conf
import arrow
import logbook
import warnings
from collections.abc import Iterable
from sqlalchemy.exc import IntegrityError
from model.sa import SQLAlchemy
from attrdict import AttrDict
from functools import wraps
from sqlalchemy.orm import joinedload
warnings.filterwarnings("ignore", "Data truncated for column")
def database_config():
config = AttrDict(conf.database.copy())
test = config.pop("test")
if conf.test:
for k, v in test.items():
config[k] = v
return config
db = SQLAlchemy(database_config())
class DisplayedModel:
display_fields = None
display_fields_short = None
expand_fields = frozenset()
extract_fields = {}
sorting_fields = None
def display(self, short=True):
fields = self.display_fields_short or self.display_fields if short else self.display_fields
def get_value(field):
value = getattr(self, field)
if field in self.expand_fields:
value = display(value)
elif field in self.extract_fields:
value = self.extract_fields[field](value)
return value
return {f: get_value(f) for f in fields}
def __repr__(self):
return str(self)
class BaseModel(DisplayedModel):
id_field = None
unique_field = None
@classmethod
def get_by_id(cls, model_id):
return cls.query.get(model_id)
@classmethod
def filter_by_id(cls, model_id):
assert cls.id_field
return cls.query.filter_by(**{cls.id_field: model_id})
def mark_removed(self):
if self.deleted:
return False
self.deleted = arrow.utcnow().datetime
return True
@classmethod
def sort_query(cls, query, sort):
fields = []
for field in sort:
if field.startswith('-'):
field = getattr(cls, field[1:]).desc()
else:
field = getattr(cls, field)
fields.append(field)
query = query.order_by(*fields)
return query
@classmethod
def api_filter(cls, query_parameters, exact=None, query=None, extract_by_id=False, visibility=None):
limit = query_parameters.pop("limit")
page = query_parameters.pop("page")
sort = query_parameters.pop("sort", None)
if query is None:
query = cls.query
if visibility:
query_parameters.pop("visibility", None)
if visibility == "all":
pass
elif visibility == "visible":
query = query.filter(cls.deleted == None)
elif visibility == "deleted":
query = query.filter(cls.deleted != None)
for k, v in query_parameters.items():
if k in cls.__table__.columns:
if k in (exact or {}):
query = query.filter_by(**{k: v})
elif v is not None:
column = cls.__table__.columns[k]
query = query.filter(column.ilike("%{}%".format(v)))
elif k.endswith('_before'):
query = query.filter(getattr(cls, k.partition('_before')[0]) < v)
elif k.endswith('_after'):
query = query.filter(getattr(cls, k.partition('_after')[0]) > v)
if extract_by_id:
subquery = query.with_entities(cls.id_field).subquery()
query = cls.query.filter(cls.__table__.columns[cls.id_field].in_(subquery)).\
options(joinedload('localized_name'))
if sort:
query = cls.sort_query(query, sort)
return query.paginate(page, limit)
@classmethod
def delete_by_prefix(cls, prefix, field=None):
field = field or cls.unique_field
if not field:
raise Exception("Field for removing is not set")
member = getattr(cls, field)
return cls.query.filter(member.like(prefix + "%")).delete(False)
def update(self, parameters):
logbook.debug("Update {} with parameters: {}", self, parameters)
for key, value in parameters.items():
assert key in self.__table__.columns
setattr(self, key, value)
def __str__(self):
# noinspection PyUnresolvedReferences
try:
fields = self.__table__.columns.keys()
columns = ", ".join("%s=%s" % (k, self.__dict__.get(k, "<Unknown field %s>" % k)) for k in fields)
return "<%s %s>" % (self.__class__.__name__, columns)
except Exception as e:
logbook.error("__str__ failed for {}: {}", type(self), e)
return str(type(self))
def to_dict(self):
result = {}
for key in self.__mapper__.c.keys():
result[key] = getattr(self, key)
return result
class AccountDb(BaseModel):
pass
class FitterDb(BaseModel):
__bind_key__ = 'fitter'
def duplicate_handle(duplicate_exception):
def outer(fn):
@wraps(fn)
def inner(*args, **kwargs):
try:
res = fn(*args, **kwargs)
db.session.flush()
return res
except IntegrityError as e:
# after exception all model were became expired. To represent object as string,
# new request to db is needed to refresh object.
# But this is impossible because of previous sql command was failed.
# So we should exclude any model from debug output.
args = tuple(value for value in args if not isinstance(value, db.Model))
logbook.debug("Integrity error for {}({}, {}): {}", fn.__qualname__, args, kwargs, e)
raise duplicate_exception()
return inner
return outer
def autocommit(fn):
import errors
@wraps(fn)
def wrap(*args, **kwargs):
try:
res = fn(*args, **kwargs)
db.session.commit()
return res
except errors.BadRequest:
db.session.rollback()
raise
except Exception:
logbook.exception("Exception in function {}:", fn)
db.session.rollback()
raise
return wrap
def display(value, short=False, expand_references_in_list=None):
if value is None:
return value
if hasattr(value, "display"):
result = value.display(short)
if expand_references_in_list is not None:
expand_references_in_list([result])
return result
if isinstance(value, str):
return value
if isinstance(value, Iterable):
result = [display(l, short) for l in value]
if expand_references_in_list is not None:
expand_references_in_list(result)
return result
raise Exception("Incorrect type for display %s (%s)" % (value, type(value)))
from model.account.message_template import MessageTemplate
from model.account.scheduled_task import ScheduledTask
from model.account.customer import Customer, Subscription, SubscriptionSwitch, Quote, CustomerCard, PromoCode
from model.account.customer_info import PrivateCustomerInfo, EntityCustomerInfo
from model.account.user import User
from model.account.service import FixedService, Measure, Category, Service, ServiceLocalization, ServiceDescription,\
Flavor
from model.account.tariff import TariffLocalization, Tariff, TariffHistory, ServicePrice
from model.account.news import News
from model.fitter.service_usage import ServiceUsage
from model.account.tenant import Tenant
from model.account.deferred import Deferred
from model.account.account import Account, AccountHistory
from model.account.customer_history import CustomerHistory
from model.account.option import Option
from model.account.time_state import TimeState, TimeMachine
| 33 | 117 | 0.624037 |
4a1d3716bdfbb96b8199cdf85b8f9ba20fc70936
| 96 |
py
|
Python
|
tests/data/test_02/test_03_expected.py
|
astahlman/format-sql
|
0c93acbf5517ac8e05394621cc0507cac3d75e66
|
[
"BSD-2-Clause"
] | 56 |
2015-03-20T13:10:31.000Z
|
2022-01-05T11:23:02.000Z
|
tests/data/test_02/test_03_expected.py
|
astahlman/format-sql
|
0c93acbf5517ac8e05394621cc0507cac3d75e66
|
[
"BSD-2-Clause"
] | 9 |
2015-08-20T21:03:42.000Z
|
2021-07-22T02:34:48.000Z
|
tests/data/test_02/test_03_expected.py
|
astahlman/format-sql
|
0c93acbf5517ac8e05394621cc0507cac3d75e66
|
[
"BSD-2-Clause"
] | 16 |
2016-04-24T02:09:44.000Z
|
2020-12-05T02:07:03.000Z
|
def args():
X.objects.raw("""
SELECT
*
FROM
k; """)
| 13.714286 | 21 | 0.302083 |
4a1d37588aab3dae832a9edd3914a4d8cc645d0e
| 1,100 |
py
|
Python
|
scrapyd/runner.py
|
cazana/scrapyd
|
b4c80e2b95162b8c06dbf11e8964cdcd0bcde7da
|
[
"BSD-3-Clause"
] | 1 |
2020-08-14T10:29:12.000Z
|
2020-08-14T10:29:12.000Z
|
scrapyd/runner.py
|
cazana/scrapyd
|
b4c80e2b95162b8c06dbf11e8964cdcd0bcde7da
|
[
"BSD-3-Clause"
] | 3 |
2021-11-17T11:22:25.000Z
|
2021-12-15T16:27:39.000Z
|
scrapyd/runner.py
|
cazana/scrapyd
|
b4c80e2b95162b8c06dbf11e8964cdcd0bcde7da
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import os
import shutil
import tempfile
from contextlib import contextmanager
from scrapyd import get_application
from scrapyd.interfaces import IEggStorage
from scrapyd.eggutils import activate_egg
@contextmanager
def project_environment(project):
app = get_application()
eggstorage = app.getComponent(IEggStorage)
eggversion = os.environ.get('EGG_VERSION', None)
version, eggfile = eggstorage.get(project, eggversion)
if eggfile:
prefix = '%s-%s-' % (project, version)
fd, eggpath = tempfile.mkstemp(prefix=prefix, suffix='.egg')
lf = os.fdopen(fd, 'wb')
shutil.copyfileobj(eggfile, lf)
lf.close()
activate_egg(eggpath)
else:
eggpath = None
try:
assert 'scrapy.conf' not in sys.modules, "Scrapy settings already loaded"
yield
finally:
if eggpath:
os.remove(eggpath)
def main():
project = os.environ['SCRAPY_PROJECT']
with project_environment(project):
from scrapy.cmdline import execute
execute()
if __name__ == '__main__':
main()
| 26.829268 | 81 | 0.674545 |
4a1d3786208582f3961b1bb3c7456dfd4fedc12e
| 325 |
py
|
Python
|
SeleniumWrapper_JE/test/start_webdriver_test/get_webdriver.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
SeleniumWrapper_JE/test/start_webdriver_test/get_webdriver.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
SeleniumWrapper_JE/test/start_webdriver_test/get_webdriver.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
from selenium_wrapper import get_webdriver
if __name__ == "__main__":
driver_wrapper = get_webdriver(
"chrome"
)
driver_wrapper.set_webdriver_options_capability({"test": "test"})
driver_wrapper.open_browser("http://www.python.org")
print(driver_wrapper.webdriver.title)
driver_wrapper.quit()
| 29.545455 | 69 | 0.729231 |
4a1d379bf2207f2996a4541b1d55e42a3290f787
| 4,364 |
py
|
Python
|
tests/serialization/test_run_configs.py
|
suryatmodulus/prefect
|
e4ac9f6aa831140c7fba0397f3e5e0884b1b9e42
|
[
"Apache-2.0"
] | 8,633 |
2019-03-23T17:51:03.000Z
|
2022-03-31T22:17:42.000Z
|
tests/serialization/test_run_configs.py
|
suryatmodulus/prefect
|
e4ac9f6aa831140c7fba0397f3e5e0884b1b9e42
|
[
"Apache-2.0"
] | 3,903 |
2019-03-23T19:11:21.000Z
|
2022-03-31T23:21:23.000Z
|
tests/serialization/test_run_configs.py
|
suryatmodulus/prefect
|
e4ac9f6aa831140c7fba0397f3e5e0884b1b9e42
|
[
"Apache-2.0"
] | 937 |
2019-03-23T18:49:44.000Z
|
2022-03-31T21:45:13.000Z
|
import pytest
from prefect.run_configs import KubernetesRun, LocalRun, DockerRun, ECSRun, UniversalRun
from prefect.serialization.run_config import RunConfigSchema, RunConfigSchemaBase
def test_serialized_run_config_sorts_labels():
assert RunConfigSchemaBase().dump({"labels": ["b", "c", "a"]})["labels"] == [
"a",
"b",
"c",
]
@pytest.mark.parametrize(
"config", [UniversalRun(), UniversalRun(env={"FOO": "BAR"}, labels=["a", "b"])]
)
def test_serialize_universal_run(config):
msg = RunConfigSchema().dump(config)
config2 = RunConfigSchema().load(msg)
assert (config.env) == config2.env
assert sorted(config.labels) == sorted(config2.labels)
@pytest.mark.parametrize(
"config",
[
KubernetesRun(),
KubernetesRun(
job_template_path="s3://bucket/test.yaml",
image="myimage",
env={"test": "foo"},
cpu_limit=2,
cpu_request="500m",
memory_limit="4G",
memory_request="2G",
service_account_name="my-account",
image_pull_secrets=["secret-1", "secret-2"],
labels=["a", "b"],
image_pull_policy="Always",
),
KubernetesRun(
job_template={
"apiVersion": "batch/v1",
"kind": "Job",
"metadata": {"labels": {"example": "foo"}},
}
),
],
)
def test_serialize_kubernetes_run(config):
msg = RunConfigSchema().dump(config)
config2 = RunConfigSchema().load(msg)
assert sorted(config.labels) == sorted(config2.labels)
fields = [
"job_template",
"job_template_path",
"image",
"env",
"cpu_limit",
"cpu_request",
"memory_limit",
"memory_request",
"service_account_name",
"image_pull_secrets",
"image_pull_policy",
]
for field in fields:
assert getattr(config, field) == getattr(config2, field)
@pytest.mark.parametrize(
"config",
[
LocalRun(),
LocalRun(
env={"test": "foo"},
working_dir="/path/to/dir",
labels=["a", "b"],
),
],
)
def test_serialize_local_run(config):
msg = RunConfigSchema().dump(config)
config2 = RunConfigSchema().load(msg)
assert sorted(config.labels) == sorted(config2.labels)
fields = ["env", "working_dir"]
for field in fields:
assert getattr(config, field) == getattr(config2, field)
@pytest.mark.parametrize(
"config",
[
DockerRun(),
DockerRun(
env={"test": "foo"}, image="testing", labels=["a", "b"], ports=[12001]
),
],
)
def test_serialize_docker_run(config):
msg = RunConfigSchema().dump(config)
config2 = RunConfigSchema().load(msg)
assert sorted(config.labels) == sorted(config2.labels)
fields = ["env", "image", "ports"]
for field in fields:
assert getattr(config, field) == getattr(config2, field)
@pytest.mark.parametrize(
"config",
[
ECSRun(),
ECSRun(
task_definition_path="s3://bucket/test.yaml",
image="myimage",
env={"test": "foo"},
cpu="1 vcpu",
memory="1 GB",
task_role_arn="my-task-role",
execution_role_arn="execution-role",
run_task_kwargs={"overrides": {"taskRoleArn": "example"}},
labels=["a", "b"],
),
ECSRun(
task_definition={
"containerDefinitions": [
{
"name": "flow",
"environment": [{"name": "TEST", "value": "VALUE"}],
}
]
}
),
ECSRun(task_definition_arn="my-task-definition"),
],
)
def test_serialize_ecs_run(config):
msg = RunConfigSchema().dump(config)
config2 = RunConfigSchema().load(msg)
assert sorted(config.labels) == sorted(config2.labels)
fields = [
"task_definition",
"task_definition_path",
"task_definition_arn",
"image",
"env",
"cpu",
"memory",
"task_role_arn",
"execution_role_arn",
"run_task_kwargs",
]
for field in fields:
assert getattr(config, field) == getattr(config2, field)
| 27.796178 | 88 | 0.547663 |
4a1d37fbfcb0eba68f110458b9d18806cbb9cfd7
| 34,445 |
py
|
Python
|
salt/modules/btrfs.py
|
tomdoherty/salt
|
f87d5d7abbf9777773c4d91fdafecb8b1a728e76
|
[
"Apache-2.0"
] | 1 |
2022-03-12T00:03:19.000Z
|
2022-03-12T00:03:19.000Z
|
salt/modules/btrfs.py
|
tomdoherty/salt
|
f87d5d7abbf9777773c4d91fdafecb8b1a728e76
|
[
"Apache-2.0"
] | 2 |
2022-03-02T16:11:35.000Z
|
2022-03-03T08:04:30.000Z
|
salt/modules/btrfs.py
|
tomdoherty/salt
|
f87d5d7abbf9777773c4d91fdafecb8b1a728e76
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2014 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for managing BTRFS file systems.
"""
import itertools
import os
import re
import subprocess
import uuid
import salt.utils.fsutils
import salt.utils.platform
from salt.exceptions import CommandExecutionError
def __virtual__():
"""
Only work on POSIX-like systems
"""
return not salt.utils.platform.is_windows() and __grains__.get("kernel") == "Linux"
def version():
"""
Return BTRFS version.
CLI Example:
.. code-block:: bash
salt '*' btrfs.version
"""
out = __salt__["cmd.run_all"]("btrfs --version")
if out.get("stderr"):
raise CommandExecutionError(out["stderr"])
return {"version": out["stdout"].split(" ", 1)[-1]}
def _parse_btrfs_info(data):
"""
Parse BTRFS device info data.
"""
ret = {}
for line in [line for line in data.split("\n") if line][:-1]:
if line.startswith("Label:"):
line = re.sub(r"Label:\s+", "", line)
label, uuid_ = (tkn.strip() for tkn in line.split("uuid:"))
ret["label"] = label != "none" and label or None
ret["uuid"] = uuid_
continue
if line.startswith("\tdevid"):
dev_data = re.split(r"\s+", line.strip())
dev_id = dev_data[-1]
ret[dev_id] = {
"device_id": dev_data[1],
"size": dev_data[3],
"used": dev_data[5],
}
return ret
def info(device):
"""
Get BTRFS filesystem information.
CLI Example:
.. code-block:: bash
salt '*' btrfs.info /dev/sda1
"""
out = __salt__["cmd.run_all"]("btrfs filesystem show {}".format(device))
salt.utils.fsutils._verify_run(out)
return _parse_btrfs_info(out["stdout"])
def devices():
"""
Get known BTRFS formatted devices on the system.
CLI Example:
.. code-block:: bash
salt '*' btrfs.devices
"""
out = __salt__["cmd.run_all"]("blkid -o export")
salt.utils.fsutils._verify_run(out)
return salt.utils.fsutils._blkid_output(out["stdout"], fs_type="btrfs")
def _defragment_mountpoint(mountpoint):
"""
Defragment only one BTRFS mountpoint.
"""
out = __salt__["cmd.run_all"](
"btrfs filesystem defragment -f {}".format(mountpoint)
)
return {
"mount_point": mountpoint,
"passed": not out["stderr"],
"log": out["stderr"] or False,
"range": False,
}
def defragment(path):
"""
Defragment mounted BTRFS filesystem.
In order to defragment a filesystem, device should be properly mounted and writable.
If passed a device name, then defragmented whole filesystem, mounted on in.
If passed a moun tpoint of the filesystem, then only this mount point is defragmented.
CLI Example:
.. code-block:: bash
salt '*' btrfs.defragment /dev/sda1
salt '*' btrfs.defragment /path/on/filesystem
"""
is_device = salt.utils.fsutils._is_device(path)
mounts = salt.utils.fsutils._get_mounts("btrfs")
if is_device and not mounts.get(path):
raise CommandExecutionError('Device "{}" is not mounted'.format(path))
result = []
if is_device:
for mount_point in mounts[path]:
result.append(_defragment_mountpoint(mount_point["mount_point"]))
else:
is_mountpoint = False
for mountpoints in mounts.values():
for mpnt in mountpoints:
if path == mpnt["mount_point"]:
is_mountpoint = True
break
d_res = _defragment_mountpoint(path)
if (
not is_mountpoint
and not d_res["passed"]
and "range ioctl not supported" in d_res["log"]
):
d_res[
"log"
] = "Range ioctl defragmentation is not supported in this kernel."
if not is_mountpoint:
d_res["mount_point"] = False
d_res["range"] = os.path.exists(path) and path or False
result.append(d_res)
return result
def features():
"""
List currently available BTRFS features.
CLI Example:
.. code-block:: bash
salt '*' btrfs.mkfs_features
"""
out = __salt__["cmd.run_all"]("mkfs.btrfs -O list-all")
salt.utils.fsutils._verify_run(out)
ret = {}
for line in [
re.sub(r"\s+", " ", line) for line in out["stderr"].split("\n") if " - " in line
]:
option, description = line.split(" - ", 1)
ret[option] = description
return ret
def _usage_overall(raw):
"""
Parse usage/overall.
"""
data = {}
for line in raw.split("\n")[1:]:
keyset = [
item.strip()
for item in re.sub(r"\s+", " ", line).split(":", 1)
if item.strip()
]
if len(keyset) == 2:
key = re.sub(r"[()]", "", keyset[0]).replace(" ", "_").lower()
if key in ["free_estimated", "global_reserve"]: # An extra field
subk = keyset[1].split("(")
data[key] = subk[0].strip()
subk = subk[1].replace(")", "").split(": ")
data["{}_{}".format(key, subk[0])] = subk[1]
else:
data[key] = keyset[1]
return data
def _usage_specific(raw):
"""
Parse usage/specific.
"""
get_key = lambda val: dict([tuple(val.split(":"))])
raw = raw.split("\n")
section, size, used = raw[0].split(" ")
section = section.replace(",", "_").replace(":", "").lower()
data = {}
data[section] = {}
for val in [size, used]:
data[section].update(get_key(val.replace(",", "")))
for devices in raw[1:]:
data[section].update(get_key(re.sub(r"\s+", ":", devices.strip())))
return data
def _usage_unallocated(raw):
"""
Parse usage/unallocated.
"""
ret = {}
for line in raw.split("\n")[1:]:
keyset = re.sub(r"\s+", " ", line.strip()).split(" ")
if len(keyset) == 2:
ret[keyset[0]] = keyset[1]
return ret
def usage(path):
"""
Show in which disk the chunks are allocated.
CLI Example:
.. code-block:: bash
salt '*' btrfs.usage /your/mountpoint
"""
out = __salt__["cmd.run_all"]("btrfs filesystem usage {}".format(path))
salt.utils.fsutils._verify_run(out)
ret = {}
for section in out["stdout"].split("\n\n"):
if section.startswith("Overall:\n"):
ret["overall"] = _usage_overall(section)
elif section.startswith("Unallocated:\n"):
ret["unallocated"] = _usage_unallocated(section)
else:
ret.update(_usage_specific(section))
return ret
def mkfs(*devices, **kwargs):
"""
Create a file system on the specified device. By default wipes out with force.
General options:
* **allocsize**: Specify the BTRFS offset from the start of the device.
* **bytecount**: Specify the size of the resultant filesystem.
* **nodesize**: Node size.
* **leafsize**: Specify the nodesize, the tree block size in which btrfs stores data.
* **noforce**: Prevent force overwrite when an existing filesystem is detected on the device.
* **sectorsize**: Specify the sectorsize, the minimum data block allocation unit.
* **nodiscard**: Do not perform whole device TRIM operation by default.
* **uuid**: Pass UUID or pass True to generate one.
Options:
* **dto**: (raid0|raid1|raid5|raid6|raid10|single|dup)
Specify how the data must be spanned across the devices specified.
* **mto**: (raid0|raid1|raid5|raid6|raid10|single|dup)
Specify how metadata must be spanned across the devices specified.
* **fts**: Features (call ``salt <host> btrfs.features`` for full list of available features)
See the ``mkfs.btrfs(8)`` manpage for a more complete description of corresponding options description.
CLI Example:
.. code-block:: bash
salt '*' btrfs.mkfs /dev/sda1
salt '*' btrfs.mkfs /dev/sda1 noforce=True
"""
if not devices:
raise CommandExecutionError("No devices specified")
mounts = salt.utils.fsutils._get_mounts("btrfs")
for device in devices:
if mounts.get(device):
raise CommandExecutionError(
'Device "{}" should not be mounted'.format(device)
)
cmd = ["mkfs.btrfs"]
dto = kwargs.get("dto")
mto = kwargs.get("mto")
if len(devices) == 1:
if dto:
cmd.append("-d single")
if mto:
cmd.append("-m single")
else:
if dto:
cmd.append("-d {}".format(dto))
if mto:
cmd.append("-m {}".format(mto))
for key, option in [
("-l", "leafsize"),
("-L", "label"),
("-O", "fts"),
("-A", "allocsize"),
("-b", "bytecount"),
("-n", "nodesize"),
("-s", "sectorsize"),
]:
if option == "label" and option in kwargs:
kwargs["label"] = "'{}'".format(kwargs["label"])
if kwargs.get(option):
cmd.append("{} {}".format(key, kwargs.get(option)))
if kwargs.get("uuid"):
cmd.append(
"-U {}".format(
kwargs.get("uuid") is True and uuid.uuid1() or kwargs.get("uuid")
)
)
if kwargs.get("nodiscard"):
cmd.append("-K")
if not kwargs.get("noforce"):
cmd.append("-f")
cmd.extend(devices)
out = __salt__["cmd.run_all"](" ".join(cmd))
salt.utils.fsutils._verify_run(out)
ret = {"log": out["stdout"]}
ret.update(__salt__["btrfs.info"](devices[0]))
return ret
def resize(mountpoint, size):
"""
Resize filesystem.
General options:
* **mountpoint**: Specify the BTRFS mountpoint to resize.
* **size**: ([+/-]<newsize>[kKmMgGtTpPeE]|max) Specify the new size of the target.
CLI Example:
.. code-block:: bash
salt '*' btrfs.resize /mountpoint size=+1g
salt '*' btrfs.resize /dev/sda1 size=max
"""
if size == "max":
if not salt.utils.fsutils._is_device(mountpoint):
raise CommandExecutionError(
'Mountpoint "{}" should be a valid device'.format(mountpoint)
)
if not salt.utils.fsutils._get_mounts("btrfs").get(mountpoint):
raise CommandExecutionError(
'Device "{}" should be mounted'.format(mountpoint)
)
elif (
len(size) < 3
or size[0] not in "-+"
or size[-1] not in "kKmMgGtTpPeE"
or re.sub(r"\d", "", size[1:][:-1])
):
raise CommandExecutionError(
'Unknown size: "{}". Expected: [+/-]<newsize>[kKmMgGtTpPeE]|max'.format(
size
)
)
out = __salt__["cmd.run_all"](
"btrfs filesystem resize {} {}".format(size, mountpoint)
)
salt.utils.fsutils._verify_run(out)
ret = {"log": out["stdout"]}
ret.update(__salt__["btrfs.info"](mountpoint))
return ret
def _fsck_ext(device):
"""
Check an ext2/ext3/ext4 file system.
This is forced check to determine a filesystem is clean or not.
NOTE: Maybe this function needs to be moved as a standard method in extfs module in a future.
"""
msgs = {
0: "No errors",
1: "Filesystem errors corrected",
2: "System should be rebooted",
4: "Filesystem errors left uncorrected",
8: "Operational error",
16: "Usage or syntax error",
32: "Fsck canceled by user request",
128: "Shared-library error",
}
return msgs.get(
__salt__["cmd.run_all"]("fsck -f -n {}".format(device))["retcode"],
"Unknown error",
)
def convert(device, permanent=False, keeplf=False):
"""
Convert ext2/3/4 to BTRFS. Device should be mounted.
Filesystem can be converted temporarily so the further processing and rollback is possible,
or permanently, where previous extended filesystem image gets deleted. Please note, permanent
conversion takes a while as BTRFS filesystem needs to be properly rebalanced afterwards.
General options:
* **permanent**: Specify if the migration should be permanent (false by default)
* **keeplf**: Keep ``lost+found`` of the partition (removed by default,
but still in the image, if not permanent migration)
CLI Example:
.. code-block:: bash
salt '*' btrfs.convert /dev/sda1
salt '*' btrfs.convert /dev/sda1 permanent=True
"""
out = __salt__["cmd.run_all"]("blkid -o export")
salt.utils.fsutils._verify_run(out)
devices = salt.utils.fsutils._blkid_output(out["stdout"])
if not devices.get(device):
raise CommandExecutionError('The device "{}" was is not found.'.format(device))
if not devices[device]["type"] in ["ext2", "ext3", "ext4"]:
raise CommandExecutionError(
'The device "{}" is a "{}" file system.'.format(
device, devices[device]["type"]
)
)
mountpoint = (
salt.utils.fsutils._get_mounts(devices[device]["type"])
.get(device, [{"mount_point": None}])[0]
.get("mount_point")
)
if mountpoint == "/":
raise CommandExecutionError(
"""One does not simply converts a root filesystem!
Converting an extended root filesystem to BTRFS is a careful
and lengthy process, among other steps including the following
requirements:
1. Proper verified backup.
2. System outage.
3. Offline system access.
For further details, please refer to your OS vendor
documentation regarding this topic.
"""
)
salt.utils.fsutils._verify_run(__salt__["cmd.run_all"]("umount {}".format(device)))
ret = {
"before": {
"fsck_status": _fsck_ext(device),
"mount_point": mountpoint,
"type": devices[device]["type"],
}
}
salt.utils.fsutils._verify_run(
__salt__["cmd.run_all"]("btrfs-convert {}".format(device))
)
salt.utils.fsutils._verify_run(
__salt__["cmd.run_all"]("mount {} {}".format(device, mountpoint))
)
# Refresh devices
out = __salt__["cmd.run_all"]("blkid -o export")
salt.utils.fsutils._verify_run(out)
devices = salt.utils.fsutils._blkid_output(out["stdout"])
ret["after"] = {
"fsck_status": "N/A", # ToDO
"mount_point": mountpoint,
"type": devices[device]["type"],
}
# Post-migration procedures
image_path = "{}/ext2_saved".format(mountpoint)
orig_fstype = ret["before"]["type"]
if not os.path.exists(image_path):
raise CommandExecutionError(
'BTRFS migration went wrong: the image "{}" not found!'.format(image_path)
)
if not permanent:
ret["after"]["{}_image".format(orig_fstype)] = image_path
image_info_proc = subprocess.run(
["file", "{}/image".format(image_path)], check=True, stdout=subprocess.PIPE
)
ret["after"][
"{}_image_info".format(orig_fstype)
] = image_info_proc.stdout.strip()
else:
ret["after"]["{}_image".format(orig_fstype)] = "removed"
ret["after"]["{}_image_info".format(orig_fstype)] = "N/A"
salt.utils.fsutils._verify_run(
__salt__["cmd.run_all"]("btrfs subvolume delete {}".format(image_path))
)
out = __salt__["cmd.run_all"]("btrfs filesystem balance {}".format(mountpoint))
salt.utils.fsutils._verify_run(out)
ret["after"]["balance_log"] = out["stdout"]
lost_found = "{}/lost+found".format(mountpoint)
if os.path.exists(lost_found) and not keeplf:
salt.utils.fsutils._verify_run(
__salt__["cmd.run_all"]("rm -rf {}".format(lost_found))
)
return ret
def _restripe(mountpoint, direction, *devices, **kwargs):
"""
Restripe BTRFS: add or remove devices from the particular mounted filesystem.
"""
fs_log = []
if salt.utils.fsutils._is_device(mountpoint):
raise CommandExecutionError(
'Mountpount expected, while device "{}" specified'.format(mountpoint)
)
mounted = False
for device, mntpoints in salt.utils.fsutils._get_mounts("btrfs").items():
for mntdata in mntpoints:
if mntdata["mount_point"] == mountpoint:
mounted = True
break
if not mounted:
raise CommandExecutionError(
'No BTRFS device mounted on "{}" mountpoint'.format(mountpoint)
)
if not devices:
raise CommandExecutionError("No devices specified.")
available_devices = __salt__["btrfs.devices"]()
for device in devices:
if device not in available_devices.keys():
raise CommandExecutionError('Device "{}" is not recognized'.format(device))
cmd = ["btrfs device {}".format(direction)]
for device in devices:
cmd.append(device)
if direction == "add":
if kwargs.get("nodiscard"):
cmd.append("-K")
if kwargs.get("force"):
cmd.append("-f")
cmd.append(mountpoint)
out = __salt__["cmd.run_all"](" ".join(cmd))
salt.utils.fsutils._verify_run(out)
if out["stdout"]:
fs_log.append(out["stdout"])
if direction == "add":
out = None
data_conversion = kwargs.get("dc")
meta_conversion = kwargs.get("mc")
if data_conversion and meta_conversion:
out = __salt__["cmd.run_all"](
"btrfs balance start -dconvert={} -mconvert={} {}".format(
data_conversion, meta_conversion, mountpoint
)
)
else:
out = __salt__["cmd.run_all"](
"btrfs filesystem balance {}".format(mountpoint)
)
salt.utils.fsutils._verify_run(out)
if out["stdout"]:
fs_log.append(out["stdout"])
# Summarize the result
ret = {}
if fs_log:
ret.update({"log": "\n".join(fs_log)})
ret.update(__salt__["btrfs.info"](mountpoint))
return ret
def add(mountpoint, *devices, **kwargs):
"""
Add a devices to a BTRFS filesystem.
General options:
* **nodiscard**: Do not perform whole device TRIM
* **force**: Force overwrite existing filesystem on the disk
CLI Example:
.. code-block:: bash
salt '*' btrfs.add /mountpoint /dev/sda1 /dev/sda2
"""
return _restripe(mountpoint, "add", *devices, **kwargs)
def delete(mountpoint, *devices, **kwargs):
"""
Remove devices from a BTRFS filesystem.
CLI Example:
.. code-block:: bash
salt '*' btrfs.delete /mountpoint /dev/sda1 /dev/sda2
"""
return _restripe(mountpoint, "delete", *devices, **kwargs)
def _parse_proplist(data):
"""
Parse properties list.
"""
out = {}
for line in data.split("\n"):
line = re.split(r"\s+", line, 1)
if len(line) == 2:
out[line[0]] = line[1]
return out
def properties(obj, type=None, set=None):
"""
List properties for given btrfs object. The object can be path of BTRFS device,
mount point, or any directories/files inside the BTRFS filesystem.
General options:
* **type**: Possible types are s[ubvol], f[ilesystem], i[node] and d[evice].
* **force**: Force overwrite existing filesystem on the disk
* **set**: <key=value,key1=value1...> Options for a filesystem properties.
CLI Example:
.. code-block:: bash
salt '*' btrfs.properties /mountpoint
salt '*' btrfs.properties /dev/sda1 type=subvol set='ro=false,label="My Storage"'
"""
if type and type not in [
"s",
"subvol",
"f",
"filesystem",
"i",
"inode",
"d",
"device",
]:
raise CommandExecutionError(
'Unknown property type: "{}" specified'.format(type)
)
cmd = ["btrfs"]
cmd.append("property")
cmd.append(set and "set" or "list")
if type:
cmd.append("-t{}".format(type))
cmd.append(obj)
if set:
try:
for key, value in [
[item.strip() for item in keyset.split("=")]
for keyset in set.split(",")
]:
cmd.append(key)
cmd.append(value)
except Exception as ex: # pylint: disable=broad-except
raise CommandExecutionError(ex)
out = __salt__["cmd.run_all"](" ".join(cmd))
salt.utils.fsutils._verify_run(out)
if not set:
ret = {}
for prop, descr in _parse_proplist(out["stdout"]).items():
ret[prop] = {"description": descr}
value = __salt__["cmd.run_all"](
"btrfs property get {} {}".format(obj, prop)
)["stdout"]
ret[prop]["value"] = value and value.split("=")[-1] or "N/A"
return ret
def subvolume_exists(path):
"""
Check if a subvolume is present in the filesystem.
path
Mount point for the subvolume (full path)
CLI Example:
.. code-block:: bash
salt '*' btrfs.subvolume_exists /mnt/var
"""
cmd = ["btrfs", "subvolume", "show", path]
return __salt__["cmd.retcode"](cmd, ignore_retcode=True) == 0
def subvolume_create(name, dest=None, qgroupids=None):
"""
Create subvolume `name` in `dest`.
Return True if the subvolume is created, False is the subvolume is
already there.
name
Name of the new subvolume
dest
If not given, the subvolume will be created in the current
directory, if given will be in /dest/name
qgroupids
Add the newly created subcolume to a qgroup. This parameter
is a list
CLI Example:
.. code-block:: bash
salt '*' btrfs.subvolume_create var
salt '*' btrfs.subvolume_create var dest=/mnt
salt '*' btrfs.subvolume_create var qgroupids='[200]'
"""
if qgroupids and type(qgroupids) is not list:
raise CommandExecutionError("Qgroupids parameter must be a list")
if dest:
name = os.path.join(dest, name)
# If the subvolume is there, we are done
if subvolume_exists(name):
return False
cmd = ["btrfs", "subvolume", "create"]
if type(qgroupids) is list:
cmd.append("-i")
cmd.extend(qgroupids)
cmd.append(name)
res = __salt__["cmd.run_all"](cmd)
salt.utils.fsutils._verify_run(res)
return True
def subvolume_delete(name=None, names=None, commit=None):
"""
Delete the subvolume(s) from the filesystem
The user can remove one single subvolume (name) or multiple of
then at the same time (names). One of the two parameters needs to
specified.
Please, refer to the documentation to understand the implication
on the transactions, and when the subvolume is really deleted.
Return True if the subvolume is deleted, False is the subvolume
was already missing.
name
Name of the subvolume to remove
names
List of names of subvolumes to remove
commit
* 'after': Wait for transaction commit at the end
* 'each': Wait for transaction commit after each delete
CLI Example:
.. code-block:: bash
salt '*' btrfs.subvolume_delete /var/volumes/tmp
salt '*' btrfs.subvolume_delete /var/volumes/tmp commit=after
"""
if not name and not (names and type(names) is list):
raise CommandExecutionError("Provide a value for the name parameter")
if commit and commit not in ("after", "each"):
raise CommandExecutionError("Value for commit not recognized")
# Filter the names and take the ones that are still there
names = [
n for n in itertools.chain([name], names or []) if n and subvolume_exists(n)
]
# If the subvolumes are gone, we are done
if not names:
return False
cmd = ["btrfs", "subvolume", "delete"]
if commit == "after":
cmd.append("--commit-after")
elif commit == "each":
cmd.append("--commit-each")
cmd.extend(names)
res = __salt__["cmd.run_all"](cmd)
salt.utils.fsutils._verify_run(res)
return True
def subvolume_find_new(name, last_gen):
"""
List the recently modified files in a subvolume
name
Name of the subvolume
last_gen
Last transid marker from where to compare
CLI Example:
.. code-block:: bash
salt '*' btrfs.subvolume_find_new /var/volumes/tmp 1024
"""
cmd = ["btrfs", "subvolume", "find-new", name, last_gen]
res = __salt__["cmd.run_all"](cmd)
salt.utils.fsutils._verify_run(res)
lines = res["stdout"].splitlines()
# Filenames are at the end of each inode line
files = [l.split()[-1] for l in lines if l.startswith("inode")]
# The last transid is in the last line
transid = lines[-1].split()[-1]
return {
"files": files,
"transid": transid,
}
def subvolume_get_default(path):
"""
Get the default subvolume of the filesystem path
path
Mount point for the subvolume
CLI Example:
.. code-block:: bash
salt '*' btrfs.subvolume_get_default /var/volumes/tmp
"""
cmd = ["btrfs", "subvolume", "get-default", path]
res = __salt__["cmd.run_all"](cmd)
salt.utils.fsutils._verify_run(res)
line = res["stdout"].strip()
# The ID is the second parameter, and the name the last one, or
# '(FS_TREE)'
#
# When the default one is set:
# ID 5 (FS_TREE)
#
# When we manually set a different one (var):
# ID 257 gen 8 top level 5 path var
#
id_ = line.split()[1]
name = line.split()[-1]
return {
"id": id_,
"name": name,
}
def _pop(line, key, use_rest):
"""
Helper for the line parser.
If key is a prefix of line, will remove ir from the line and will
extract the value (space separation), and the rest of the line.
If use_rest is True, the value will be the rest of the line.
Return a tuple with the value and the rest of the line.
"""
value = None
if line.startswith(key):
line = line[len(key) :].strip()
if use_rest:
value = line
line = ""
else:
value, line = line.split(" ", 1)
return value, line.strip()
def subvolume_list(
path,
parent_id=False,
absolute=False,
ogeneration=False,
generation=False,
subvolumes=False,
uuid=False,
parent_uuid=False,
sent_subvolume_uuid=False,
snapshots=False,
readonly=False,
deleted=False,
generation_cmp=None,
ogeneration_cmp=None,
sort=None,
):
"""
List the subvolumes present in the filesystem.
path
Mount point for the subvolume
parent_id
Print parent ID
absolute
Print all the subvolumes in the filesystem and distinguish
between absolute and relative path with respect to the given
<path>
ogeneration
Print the ogeneration of the subvolume
generation
Print the generation of the subvolume
subvolumes
Print only subvolumes below specified <path>
uuid
Print the UUID of the subvolume
parent_uuid
Print the parent uuid of subvolumes (and snapshots)
sent_subvolume_uuid
Print the UUID of the sent subvolume, where the subvolume is
the result of a receive operation
snapshots
Only snapshot subvolumes in the filesystem will be listed
readonly
Only readonly subvolumes in the filesystem will be listed
deleted
Only deleted subvolumens that are ye not cleaned
generation_cmp
List subvolumes in the filesystem that its generation is >=,
<= or = value. '+' means >= value, '-' means <= value, If
there is neither '+' nor '-', it means = value
ogeneration_cmp
List subvolumes in the filesystem that its ogeneration is >=,
<= or = value
sort
List subvolumes in order by specified items. Possible values:
* rootid
* gen
* ogen
* path
You can add '+' or '-' in front of each items, '+' means
ascending, '-' means descending. The default is ascending. You
can combite it in a list.
CLI Example:
.. code-block:: bash
salt '*' btrfs.subvolume_list /var/volumes/tmp
salt '*' btrfs.subvolume_list /var/volumes/tmp path=True
salt '*' btrfs.subvolume_list /var/volumes/tmp sort='[-rootid]'
"""
if sort and type(sort) is not list:
raise CommandExecutionError("Sort parameter must be a list")
valid_sorts = [
"".join((order, attrib))
for order, attrib in itertools.product(
("-", "", "+"), ("rootid", "gen", "ogen", "path")
)
]
if sort and not all(s in valid_sorts for s in sort):
raise CommandExecutionError("Value for sort not recognized")
cmd = ["btrfs", "subvolume", "list"]
params = (
(parent_id, "-p"),
(absolute, "-a"),
(ogeneration, "-c"),
(generation, "-g"),
(subvolumes, "-o"),
(uuid, "-u"),
(parent_uuid, "-q"),
(sent_subvolume_uuid, "-R"),
(snapshots, "-s"),
(readonly, "-r"),
(deleted, "-d"),
)
cmd.extend(p[1] for p in params if p[0])
if generation_cmp:
cmd.extend(["-G", generation_cmp])
if ogeneration_cmp:
cmd.extend(["-C", ogeneration_cmp])
# We already validated the content of the list
if sort:
cmd.append("--sort={}".format(",".join(sort)))
cmd.append(path)
res = __salt__["cmd.run_all"](cmd)
salt.utils.fsutils._verify_run(res)
# Parse the output. ID and gen are always at the beginning, and
# path is always at the end. There is only one column that
# contains space (top level), and the path value can also have
# spaces. The issue is that we do not know how many spaces do we
# have in the path name, so any classic solution based on split
# will fail.
#
# This list is in order.
columns = (
"ID",
"gen",
"cgen",
"parent",
"top level",
"otime",
"parent_uuid",
"received_uuid",
"uuid",
"path",
)
result = []
for line in res["stdout"].splitlines():
table = {}
for key in columns:
value, line = _pop(line, key, key == "path")
if value:
table[key.lower()] = value
# If line is not empty here, we are not able to parse it
if not line:
result.append(table)
return result
def subvolume_set_default(subvolid, path):
"""
Set the subvolume as default
subvolid
ID of the new default subvolume
path
Mount point for the filesystem
CLI Example:
.. code-block:: bash
salt '*' btrfs.subvolume_set_default 257 /var/volumes/tmp
"""
cmd = ["btrfs", "subvolume", "set-default", subvolid, path]
res = __salt__["cmd.run_all"](cmd)
salt.utils.fsutils._verify_run(res)
return True
def subvolume_show(path):
"""
Show information of a given subvolume
path
Mount point for the filesystem
CLI Example:
.. code-block:: bash
salt '*' btrfs.subvolume_show /var/volumes/tmp
"""
cmd = ["btrfs", "subvolume", "show", path]
res = __salt__["cmd.run_all"](cmd)
salt.utils.fsutils._verify_run(res)
result = {}
table = {}
# The real name is the first line, later there is a table of
# values separated with colon.
stdout = res["stdout"].splitlines()
key = stdout.pop(0)
result[key.strip()] = table
for line in stdout:
key, value = line.split(":", 1)
table[key.lower().strip()] = value.strip()
return result
def subvolume_snapshot(source, dest=None, name=None, read_only=False):
"""
Create a snapshot of a source subvolume
source
Source subvolume from where to create the snapshot
dest
If only dest is given, the subvolume will be named as the
basename of the source
name
Name of the snapshot
read_only
Create a read only snapshot
CLI Example:
.. code-block:: bash
salt '*' btrfs.subvolume_snapshot /var/volumes/tmp dest=/.snapshots
salt '*' btrfs.subvolume_snapshot /var/volumes/tmp name=backup
"""
if not dest and not name:
raise CommandExecutionError("Provide parameter dest, name, or both")
cmd = ["btrfs", "subvolume", "snapshot"]
if read_only:
cmd.append("-r")
if dest and not name:
cmd.append(dest)
if dest and name:
name = os.path.join(dest, name)
if name:
cmd.append(name)
res = __salt__["cmd.run_all"](cmd)
salt.utils.fsutils._verify_run(res)
return True
def subvolume_sync(path, subvolids=None, sleep=None):
"""
Wait until given subvolume are completely removed from the
filesystem after deletion.
path
Mount point for the filesystem
subvolids
List of IDs of subvolumes to wait for
sleep
Sleep N seconds betwenn checks (default: 1)
CLI Example:
.. code-block:: bash
salt '*' btrfs.subvolume_sync /var/volumes/tmp
salt '*' btrfs.subvolume_sync /var/volumes/tmp subvolids='[257]'
"""
if subvolids and type(subvolids) is not list:
raise CommandExecutionError("Subvolids parameter must be a list")
cmd = ["btrfs", "subvolume", "sync"]
if sleep:
cmd.extend(["-s", sleep])
cmd.append(path)
if subvolids:
cmd.extend(subvolids)
res = __salt__["cmd.run_all"](cmd)
salt.utils.fsutils._verify_run(res)
return True
| 27.05813 | 107 | 0.593787 |
4a1d39b1baec2384febc21fe2f329b5504f2d503
| 17,151 |
py
|
Python
|
pysnmp/H3C-VOICE-DIAL-CONTROL-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11 |
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/H3C-VOICE-DIAL-CONTROL-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75 |
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/H3C-VOICE-DIAL-CONTROL-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10 |
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module H3C-VOICE-DIAL-CONTROL-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/H3C-VOICE-DIAL-CONTROL-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:11:21 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint")
AbsoluteCounter32, = mibBuilder.importSymbols("DIAL-CONTROL-MIB", "AbsoluteCounter32")
h3cVoice, = mibBuilder.importSymbols("HUAWEI-3COM-OID-MIB", "h3cVoice")
InetAddress, InetAddressType = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress", "InetAddressType")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Integer32, MibIdentifier, iso, TimeTicks, Counter32, NotificationType, IpAddress, Unsigned32, Gauge32, Counter64, Bits, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "MibIdentifier", "iso", "TimeTicks", "Counter32", "NotificationType", "IpAddress", "Unsigned32", "Gauge32", "Counter64", "Bits", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity")
DisplayString, RowStatus, TextualConvention, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "RowStatus", "TextualConvention", "TruthValue")
h3cVoiceEntityControl = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14))
h3cVoiceEntityControl.setRevisions(('2009-04-16 00:00',))
if mibBuilder.loadTexts: h3cVoiceEntityControl.setLastUpdated('200904160000Z')
if mibBuilder.loadTexts: h3cVoiceEntityControl.setOrganization('Hangzhou H3C Technologies Co., Ltd.')
class H3cCodecType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))
namedValues = NamedValues(("g711a", 1), ("g711u", 2), ("g723r53", 3), ("g723r63", 4), ("g729r8", 5), ("g729a", 6), ("g726r16", 7), ("g726r24", 8), ("g726r32", 9), ("g726r40", 10), ("unknown", 11), ("g729br8", 12))
class H3cOutBandMode(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("voice", 1), ("h245AlphaNumeric", 2), ("h225", 3), ("sip", 4), ("nte", 5), ("vofr", 6))
class H3cFaxProtocolType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("t38", 1), ("standardt38", 2), ("pcmG711alaw", 3), ("pcmG711ulaw", 4))
class H3cFaxBaudrateType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("disable", 1), ("voice", 2), ("b2400", 3), ("b4800", 4), ("b9600", 5), ("b14400", 6))
class H3cFaxTrainMode(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("local", 1), ("ppp", 2))
class H3cRegisterdStatus(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("other", 1), ("offline", 2), ("online", 3), ("login", 4), ("logout", 5))
h3cVoEntityObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1))
h3cVoEntityCreateTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 1), )
if mibBuilder.loadTexts: h3cVoEntityCreateTable.setStatus('current')
h3cVoEntityCreateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 1, 1), ).setIndexNames((0, "H3C-VOICE-DIAL-CONTROL-MIB", "h3cVoEntityIndex"))
if mibBuilder.loadTexts: h3cVoEntityCreateEntry.setStatus('current')
h3cVoEntityIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: h3cVoEntityIndex.setStatus('current')
h3cVoEntityType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("pots", 1), ("voip", 2), ("vofr", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cVoEntityType.setStatus('current')
h3cVoEntityRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 1, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cVoEntityRowStatus.setStatus('current')
h3cVoEntityCommonConfigTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 2), )
if mibBuilder.loadTexts: h3cVoEntityCommonConfigTable.setStatus('current')
h3cVoEntityCommonConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 2, 1), ).setIndexNames((0, "H3C-VOICE-DIAL-CONTROL-MIB", "h3cVoEntityCfgIndex"))
if mibBuilder.loadTexts: h3cVoEntityCommonConfigEntry.setStatus('current')
h3cVoEntityCfgIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: h3cVoEntityCfgIndex.setStatus('current')
h3cVoEntityCfgCodec1st = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 2, 1, 2), H3cCodecType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cVoEntityCfgCodec1st.setStatus('current')
h3cVoEntityCfgCodec2nd = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 2, 1, 3), H3cCodecType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cVoEntityCfgCodec2nd.setStatus('current')
h3cVoEntityCfgCodec3rd = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 2, 1, 4), H3cCodecType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cVoEntityCfgCodec3rd.setStatus('current')
h3cVoEntityCfgCodec4th = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 2, 1, 5), H3cCodecType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cVoEntityCfgCodec4th.setStatus('current')
h3cVoEntityCfgDSCP = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 2, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 63))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cVoEntityCfgDSCP.setStatus('current')
h3cVoEntityCfgVADEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 2, 1, 7), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cVoEntityCfgVADEnable.setStatus('current')
h3cVoEntityCfgOutbandMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 2, 1, 8), H3cOutBandMode()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cVoEntityCfgOutbandMode.setStatus('current')
h3cVoEntityCfgFaxLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 2, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-60, -3))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cVoEntityCfgFaxLevel.setStatus('current')
h3cVoEntityCfgFaxBaudrate = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 2, 1, 10), H3cFaxBaudrateType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cVoEntityCfgFaxBaudrate.setStatus('current')
h3cVoEntityCfgFaxLocalTrainPara = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 2, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cVoEntityCfgFaxLocalTrainPara.setStatus('current')
h3cVoEntityCfgFaxProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 2, 1, 12), H3cFaxProtocolType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cVoEntityCfgFaxProtocol.setStatus('current')
h3cVoEntityCfgFaxHRPackNum = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 2, 1, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cVoEntityCfgFaxHRPackNum.setStatus('current')
h3cVoEntityCfgFaxLRPackNum = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 2, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 5))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cVoEntityCfgFaxLRPackNum.setStatus('current')
h3cVoEntityCfgFaxSendNSFEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 2, 1, 15), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cVoEntityCfgFaxSendNSFEnable.setStatus('current')
h3cVoEntityCfgFaxTrainMode = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 2, 1, 16), H3cFaxTrainMode()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cVoEntityCfgFaxTrainMode.setStatus('current')
h3cVoEntityCfgFaxEcm = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 2, 1, 17), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cVoEntityCfgFaxEcm.setStatus('current')
h3cVoEntityCfgPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 2, 1, 18), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 10))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cVoEntityCfgPriority.setStatus('current')
h3cVoEntityCfgDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 2, 1, 19), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cVoEntityCfgDescription.setStatus('current')
h3cVoPOTSEntityConfigTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 3), )
if mibBuilder.loadTexts: h3cVoPOTSEntityConfigTable.setStatus('current')
h3cVoPOTSEntityConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 3, 1), ).setIndexNames((0, "H3C-VOICE-DIAL-CONTROL-MIB", "h3cVoPOTSEntityConfigIndex"))
if mibBuilder.loadTexts: h3cVoPOTSEntityConfigEntry.setStatus('current')
h3cVoPOTSEntityConfigIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: h3cVoPOTSEntityConfigIndex.setStatus('current')
h3cVoPOTSEntityConfigPrefix = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 3, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 31))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cVoPOTSEntityConfigPrefix.setStatus('current')
h3cVoPOTSEntityConfigSubLine = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 3, 1, 3), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cVoPOTSEntityConfigSubLine.setStatus('current')
h3cVoPOTSEntityConfigSendNum = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 3, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 31), ValueRangeConstraint(65534, 65534), ValueRangeConstraint(65535, 65535), ))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cVoPOTSEntityConfigSendNum.setStatus('current')
h3cVoVoIPEntityConfigTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 4), )
if mibBuilder.loadTexts: h3cVoVoIPEntityConfigTable.setStatus('current')
h3cVoVoIPEntityConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 4, 1), ).setIndexNames((0, "H3C-VOICE-DIAL-CONTROL-MIB", "h3cVoVoIPEntityCfgIndex"))
if mibBuilder.loadTexts: h3cVoVoIPEntityConfigEntry.setStatus('current')
h3cVoVoIPEntityCfgIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: h3cVoVoIPEntityCfgIndex.setStatus('current')
h3cVoVoIPEntityCfgTargetType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 4, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("unknown", 1), ("ras", 2), ("h323IpAddress", 3), ("sipIpAddress", 4), ("sipProxy", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cVoVoIPEntityCfgTargetType.setStatus('current')
h3cVoVoIPEntityCfgTargetAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 4, 1, 3), InetAddressType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cVoVoIPEntityCfgTargetAddrType.setStatus('current')
h3cVoVoIPEntityCfgTargetAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 4, 1, 4), InetAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cVoVoIPEntityCfgTargetAddr.setStatus('current')
h3cVoEntityNumberTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 5), )
if mibBuilder.loadTexts: h3cVoEntityNumberTable.setStatus('current')
h3cVoEntityNumberEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 5, 1), ).setIndexNames((0, "H3C-VOICE-DIAL-CONTROL-MIB", "h3cVoEntityIndex"))
if mibBuilder.loadTexts: h3cVoEntityNumberEntry.setStatus('current')
h3cVoEntityNumberAuthUser = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 5, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 63))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cVoEntityNumberAuthUser.setStatus('current')
h3cVoEntityNumberPasswordType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 5, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cVoEntityNumberPasswordType.setStatus('current')
h3cVoEntityNumberPassword = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 5, 1, 3), OctetString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 16), ValueSizeConstraint(24, 24), ))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cVoEntityNumberPassword.setStatus('current')
h3cVoEntityNumberStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 5, 1, 4), H3cRegisterdStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cVoEntityNumberStatus.setStatus('current')
h3cVoEntityNumberExpires = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 39, 14, 1, 5, 1, 5), Integer32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cVoEntityNumberExpires.setStatus('current')
mibBuilder.exportSymbols("H3C-VOICE-DIAL-CONTROL-MIB", h3cVoEntityCfgFaxEcm=h3cVoEntityCfgFaxEcm, h3cVoEntityCfgFaxHRPackNum=h3cVoEntityCfgFaxHRPackNum, h3cVoEntityCommonConfigTable=h3cVoEntityCommonConfigTable, H3cCodecType=H3cCodecType, h3cVoEntityCfgDSCP=h3cVoEntityCfgDSCP, h3cVoPOTSEntityConfigPrefix=h3cVoPOTSEntityConfigPrefix, h3cVoVoIPEntityCfgTargetAddrType=h3cVoVoIPEntityCfgTargetAddrType, h3cVoVoIPEntityCfgIndex=h3cVoVoIPEntityCfgIndex, h3cVoEntityCfgPriority=h3cVoEntityCfgPriority, h3cVoEntityCfgCodec2nd=h3cVoEntityCfgCodec2nd, h3cVoEntityNumberPasswordType=h3cVoEntityNumberPasswordType, H3cRegisterdStatus=H3cRegisterdStatus, h3cVoPOTSEntityConfigIndex=h3cVoPOTSEntityConfigIndex, H3cOutBandMode=H3cOutBandMode, h3cVoEntityCfgCodec1st=h3cVoEntityCfgCodec1st, h3cVoEntityRowStatus=h3cVoEntityRowStatus, H3cFaxProtocolType=H3cFaxProtocolType, h3cVoEntityCfgOutbandMode=h3cVoEntityCfgOutbandMode, h3cVoEntityCfgFaxLRPackNum=h3cVoEntityCfgFaxLRPackNum, h3cVoEntityCfgFaxLevel=h3cVoEntityCfgFaxLevel, h3cVoEntityNumberTable=h3cVoEntityNumberTable, h3cVoPOTSEntityConfigSubLine=h3cVoPOTSEntityConfigSubLine, h3cVoiceEntityControl=h3cVoiceEntityControl, h3cVoEntityCfgFaxSendNSFEnable=h3cVoEntityCfgFaxSendNSFEnable, h3cVoEntityCfgCodec3rd=h3cVoEntityCfgCodec3rd, h3cVoVoIPEntityCfgTargetAddr=h3cVoVoIPEntityCfgTargetAddr, h3cVoEntityCfgFaxProtocol=h3cVoEntityCfgFaxProtocol, h3cVoEntityCfgFaxTrainMode=h3cVoEntityCfgFaxTrainMode, h3cVoEntityCfgDescription=h3cVoEntityCfgDescription, h3cVoPOTSEntityConfigSendNum=h3cVoPOTSEntityConfigSendNum, h3cVoVoIPEntityConfigTable=h3cVoVoIPEntityConfigTable, h3cVoVoIPEntityCfgTargetType=h3cVoVoIPEntityCfgTargetType, h3cVoEntityCfgCodec4th=h3cVoEntityCfgCodec4th, h3cVoEntityType=h3cVoEntityType, h3cVoEntityCfgIndex=h3cVoEntityCfgIndex, h3cVoEntityCreateEntry=h3cVoEntityCreateEntry, h3cVoPOTSEntityConfigEntry=h3cVoPOTSEntityConfigEntry, h3cVoVoIPEntityConfigEntry=h3cVoVoIPEntityConfigEntry, h3cVoPOTSEntityConfigTable=h3cVoPOTSEntityConfigTable, H3cFaxTrainMode=H3cFaxTrainMode, h3cVoEntityCfgFaxLocalTrainPara=h3cVoEntityCfgFaxLocalTrainPara, h3cVoEntityNumberExpires=h3cVoEntityNumberExpires, h3cVoEntityNumberEntry=h3cVoEntityNumberEntry, H3cFaxBaudrateType=H3cFaxBaudrateType, h3cVoEntityCfgVADEnable=h3cVoEntityCfgVADEnable, h3cVoEntityNumberStatus=h3cVoEntityNumberStatus, h3cVoEntityObjects=h3cVoEntityObjects, h3cVoEntityCommonConfigEntry=h3cVoEntityCommonConfigEntry, PYSNMP_MODULE_ID=h3cVoiceEntityControl, h3cVoEntityCfgFaxBaudrate=h3cVoEntityCfgFaxBaudrate, h3cVoEntityCreateTable=h3cVoEntityCreateTable, h3cVoEntityNumberPassword=h3cVoEntityNumberPassword, h3cVoEntityNumberAuthUser=h3cVoEntityNumberAuthUser, h3cVoEntityIndex=h3cVoEntityIndex)
| 119.937063 | 2,713 | 0.770159 |
4a1d3a16b75116f404e8951e5669211dbd110f25
| 5,504 |
py
|
Python
|
leasing/viewsets/invoice.py
|
suutari-ai/mvj
|
c39dbc692afcb3b26366783414c2d5a88a57b25a
|
[
"MIT"
] | 1 |
2021-01-12T08:14:10.000Z
|
2021-01-12T08:14:10.000Z
|
leasing/viewsets/invoice.py
|
suutari-ai/mvj
|
c39dbc692afcb3b26366783414c2d5a88a57b25a
|
[
"MIT"
] | 249 |
2017-04-18T14:00:13.000Z
|
2022-03-30T12:18:03.000Z
|
leasing/viewsets/invoice.py
|
suutari-ai/mvj
|
c39dbc692afcb3b26366783414c2d5a88a57b25a
|
[
"MIT"
] | 7 |
2017-04-18T08:43:54.000Z
|
2021-07-28T07:29:30.000Z
|
from django.utils.translation import ugettext_lazy as _
from django_filters.rest_framework import DjangoFilterBackend
from django_filters.widgets import BooleanWidget
from rest_framework.exceptions import ValidationError
from rest_framework.viewsets import ReadOnlyModelViewSet
from field_permissions.viewsets import FieldPermissionsViewsetMixin
from leasing.enums import InvoiceState, InvoiceType
from leasing.filters import (
CoalesceOrderingFilter,
InvoiceFilter,
InvoiceNoteFilter,
InvoiceRowFilter,
InvoiceSetFilter,
)
from leasing.models import Invoice, Lease
from leasing.models.invoice import InvoiceNote, InvoiceRow, InvoiceSet, ReceivableType
from leasing.serializers.invoice import (
CreditNoteUpdateSerializer,
GeneratedInvoiceUpdateSerializer,
InvoiceCreateSerializer,
InvoiceNoteCreateUpdateSerializer,
InvoiceNoteSerializer,
InvoiceRowSerializer,
InvoiceSerializer,
InvoiceSerializerWithSuccinctLease,
InvoiceSetSerializer,
InvoiceUpdateSerializer,
ReceivableTypeSerializer,
SentToSapInvoiceUpdateSerializer,
)
from .utils import AtomicTransactionModelViewSet
class InvoiceViewSet(FieldPermissionsViewsetMixin, AtomicTransactionModelViewSet):
queryset = Invoice.objects.all()
serializer_class = InvoiceSerializer
filterset_class = InvoiceFilter
filter_backends = (DjangoFilterBackend, CoalesceOrderingFilter)
ordering_fields = (
"sent_to_sap_at",
"recipient_name",
"number",
"due_date",
"total_amount",
"billed_amount",
"lease__identifier__type__identifier",
"lease__identifier__municipality__identifier",
"lease__identifier__district__identifier",
"lease__identifier__sequence",
)
coalesce_ordering = {"recipient_name": ("recipient__name", "recipient__last_name")}
def get_queryset(self):
queryset = Invoice.objects.select_related("recipient").prefetch_related(
"rows__receivable_type",
"rows",
"rows__tenant",
"rows__tenant__tenantcontact_set",
"rows__tenant__tenantcontact_set__contact",
"payments",
"credit_invoices",
"interest_invoices",
)
return queryset
def get_serializer_class(self):
if self.action == "create":
return InvoiceCreateSerializer
if self.action in ("update", "partial_update", "metadata"):
if "pk" in self.kwargs:
instance = self.get_object()
if instance:
if instance.sent_to_sap_at:
return SentToSapInvoiceUpdateSerializer
if instance.type == InvoiceType.CREDIT_NOTE:
return CreditNoteUpdateSerializer
if instance.generated:
return GeneratedInvoiceUpdateSerializer
return InvoiceUpdateSerializer
if self.request.query_params.get("going_to_sap"):
boolean_widget = BooleanWidget()
# check passed value against widget's truthy values
if boolean_widget.value_from_datadict(
self.request.query_params, None, "going_to_sap"
):
return InvoiceSerializerWithSuccinctLease
return InvoiceSerializer
def create(self, request, *args, **kwargs):
lease = Lease.objects.get(pk=request.data.get("lease"))
if not lease.is_invoicing_enabled:
raise ValidationError(
_("Can't create invoices if invoicing is not enabled.")
)
return super().create(request, *args, **kwargs)
def update(self, request, *args, **kwargs):
instance = self.get_object()
if (
instance.sent_to_sap_at
and self.get_serializer_class() is not SentToSapInvoiceUpdateSerializer
):
raise ValidationError(_("Can't edit invoices that have been sent to SAP"))
if instance.state == InvoiceState.REFUNDED:
raise ValidationError(_("Can't edit fully refunded invoices"))
return super().update(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
if instance.number:
raise ValidationError(_("Can't delete numbered invoices"))
if instance.sent_to_sap_at:
raise ValidationError(_("Can't delete invoices that have been sent to SAP"))
return super().destroy(request, *args, **kwargs)
class InvoiceNoteViewSet(FieldPermissionsViewsetMixin, AtomicTransactionModelViewSet):
queryset = InvoiceNote.objects.all()
serializer_class = InvoiceNoteSerializer
filterset_class = InvoiceNoteFilter
def get_serializer_class(self):
if self.action in ("create", "update", "partial_update", "metadata"):
return InvoiceNoteCreateUpdateSerializer
return InvoiceNoteSerializer
class InvoiceRowViewSet(FieldPermissionsViewsetMixin, ReadOnlyModelViewSet):
queryset = InvoiceRow.objects.all()
serializer_class = InvoiceRowSerializer
filterset_class = InvoiceRowFilter
class InvoiceSetViewSet(ReadOnlyModelViewSet):
queryset = InvoiceSet.objects.all()
serializer_class = InvoiceSetSerializer
filterset_class = InvoiceSetFilter
class ReceivableTypeViewSet(ReadOnlyModelViewSet):
queryset = ReceivableType.objects.all()
serializer_class = ReceivableTypeSerializer
| 34.186335 | 88 | 0.695131 |
4a1d3af5fcd49cbee30ad103df8b678188f0e46f
| 1,198 |
py
|
Python
|
apps/adopcion/forms.py
|
freygeth/refugiobeta
|
6f350b7577a816e028de93cbc187fbf17ce056d3
|
[
"Unlicense"
] | null | null | null |
apps/adopcion/forms.py
|
freygeth/refugiobeta
|
6f350b7577a816e028de93cbc187fbf17ce056d3
|
[
"Unlicense"
] | null | null | null |
apps/adopcion/forms.py
|
freygeth/refugiobeta
|
6f350b7577a816e028de93cbc187fbf17ce056d3
|
[
"Unlicense"
] | null | null | null |
# -*- encoding:utf-8 -*-
from django import forms
from adopcion.models import Persona, Solicitud
class PersonaForm(forms.ModelForm):
class Meta:
model = Persona
fields = [
'nombre',
'apellidos',
'edad',
'telefono',
'email',
'domicilio',
]
labels = {
'nombre':'Nombre',
'apellidos':'Apellidos',
'edad':'Edad',
'telefono':'Teléfono',
'email':'Correo Electrónico',
'domicilio':'Domicilio',
}
widgets= {
'nombre':forms.TextInput(attrs={'class':'form-control'}),
'apellidos':forms.TextInput(attrs={'class':'form-control'}),
'edad':forms.TextInput(attrs={'class':'form-control'}),
'telefono':forms.TextInput(attrs={'class':'form-control'}),
'email':forms.TextInput(attrs={'class':'form-control'}),
'domicilio':forms.Textarea(attrs={'class':'form-control'}),
}
class SolicitudForm(forms.ModelForm):
class Meta:
model = Solicitud
fields = [
'numero_mascotas',
'razones',
]
labels = {
'numero_mascotas':'Numero de mascotas',
'razones' : 'Razones para adoptar'
}
widgets = {
'numero_mascotas':forms.TextInput(attrs={'class':'form-control'}),
'razones':forms.Textarea(attrs={'class':'form-control'}),
}
| 24.44898 | 69 | 0.643573 |
4a1d3b6c207c7ff1c680842e4a121c75adfa4e03
| 1,293 |
py
|
Python
|
home/migrations/0001_initial.py
|
mhdSharuk/CodePanda
|
55ce8e271378df5d27ee7b3d681cca69e0c2ca83
|
[
"MIT"
] | null | null | null |
home/migrations/0001_initial.py
|
mhdSharuk/CodePanda
|
55ce8e271378df5d27ee7b3d681cca69e0c2ca83
|
[
"MIT"
] | null | null | null |
home/migrations/0001_initial.py
|
mhdSharuk/CodePanda
|
55ce8e271378df5d27ee7b3d681cca69e0c2ca83
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2 on 2021-04-12 09:51
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('question', models.TextField()),
('date_posted', models.DateTimeField(default=django.utils.timezone.now)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Answer',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('answer', models.TextField()),
('date_posted', models.DateTimeField(default=django.utils.timezone.now)),
('votes', models.IntegerField(default=0)),
('answer_for_ques', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='home.question')),
],
),
]
| 34.945946 | 120 | 0.58778 |
4a1d3c6845dccb0a3e07e3f1d3a38007c373368d
| 7,278 |
py
|
Python
|
keras/optimizers/optimizer_experimental/sgd.py
|
shraddhazpy/keras
|
21a78464c191c40a90ed4e3ddfed747ae994703e
|
[
"Apache-2.0"
] | 1 |
2020-02-02T04:43:33.000Z
|
2020-02-02T04:43:33.000Z
|
keras/optimizers/optimizer_experimental/sgd.py
|
CloudboySolutions/keras
|
b96518a22bfd92a29811e507dec0b34248a8a3f5
|
[
"Apache-2.0"
] | null | null | null |
keras/optimizers/optimizer_experimental/sgd.py
|
CloudboySolutions/keras
|
b96518a22bfd92a29811e507dec0b34248a8a3f5
|
[
"Apache-2.0"
] | 1 |
2020-12-13T22:14:48.000Z
|
2020-12-13T22:14:48.000Z
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SGD optimizer implementation."""
from keras.optimizers.optimizer_experimental import optimizer
from keras.utils import generic_utils
import tensorflow.compat.v2 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.util.tf_export import keras_export
@generic_utils.register_keras_serializable()
@keras_export('keras.optimizers.experimental.SGD', v1=[])
class SGD(optimizer.Optimizer):
r"""Gradient descent (with momentum) optimizer.
Update rule for parameter `w` with gradient `g` when `momentum` is 0:
```python
w = w - learning_rate * g
```
Update rule when `momentum` is larger than 0:
```python
velocity = momentum * velocity - learning_rate * g
w = w + velocity
```
When `nesterov=True`, this rule becomes:
```python
velocity = momentum * velocity - learning_rate * g
w = w + momentum * velocity - learning_rate * g
```
Attributes:
learning_rate: A `Tensor`, floating point value, or a schedule that is a
`tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable
that takes no arguments and returns the actual value to use. The
learning rate. Defaults to 0.001.
momentum: float hyperparameter >= 0 that accelerates gradient descent
in the relevant
direction and dampens oscillations. Defaults to 0, i.e., vanilla gradient
descent.
nesterov: boolean. Whether to apply Nesterov momentum.
Defaults to `False`.
clipnorm: see the `clipnorm` argument of `optimizer_experimental.Optimizer`.
clipvalue: see the `clipvalue` argument of
`optimizer_experimental.Optimizer`.
global_clipnorm: see the `global_clipnorm` argument of
`optimizer_experimental.Optimizer`.
use_ema: see the `use_ema` argument of `optimizer_experimental.Optimizer`.
ema_momentum: see the `ema_momentum` argument of
`optimizer_experimental.Optimizer`.
ema_overwrite_frequency: see the `ema_overwrite_frequency` argument of
`optimizer_experimental.Optimizer`.
jit_compile: see the `jit_compile` argument of
`optimizer_experimental.Optimizer`.
name: Optional name prefix for the operations created when applying
gradients. Defaults to `"SGD"`.
**kwargs: see the `**kwargs` argument of `optimizer_experimental.Optimizer`.
Usage:
>>> opt = tf.keras.optimizers.SGD(learning_rate=0.1)
>>> var = tf.Variable(1.0)
>>> loss = lambda: (var ** 2)/2.0 # d(loss)/d(var1) = var1
>>> step_count = opt.minimize(loss, [var]).numpy()
>>> # Step is `- learning_rate * grad`
>>> var.numpy()
0.9
>>> opt = tf.keras.optimizers.SGD(learning_rate=0.1, momentum=0.9)
>>> var = tf.Variable(1.0)
>>> val0 = var.value()
>>> loss = lambda: (var ** 2)/2.0 # d(loss)/d(var1) = var1
>>> # First step is `- learning_rate * grad`
>>> step_count = opt.minimize(loss, [var]).numpy()
>>> val1 = var.value()
>>> (val0 - val1).numpy()
0.1
>>> # On later steps, step-size increases because of momentum
>>> step_count = opt.minimize(loss, [var]).numpy()
>>> val2 = var.value()
>>> (val1 - val2).numpy()
0.18
Reference:
- For `nesterov=True`, See [Sutskever et al., 2013](
http://jmlr.org/proceedings/papers/v28/sutskever13.pdf).
"""
def __init__(self,
learning_rate=0.01,
momentum=0.0,
nesterov=False,
amsgrad=False,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
jit_compile=False,
name='SGD',
**kwargs):
super(SGD, self).__init__(
name=name,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
jit_compile=jit_compile,
**kwargs)
self._learning_rate = self._build_learning_rate(learning_rate)
self.momentum = momentum
self.nesterov = nesterov
if isinstance(momentum, (int, float)) and (momentum < 0 or momentum > 1):
raise ValueError('`momentum` must be between [0, 1].')
def build(self, var_list):
"""Initialize optimizer variables.
SGD optimizer has one variable `momentums`, only set if `self.momentum`
is not 0.
Args:
var_list: list of model variables to build SGD variables on.
"""
super().build(var_list)
if hasattr(self, '_built') and self._built:
return
self.momentums = []
if self.momentum != 0:
for var in var_list:
self.momentums.append(
self.add_variable_from_reference(
model_variable=var, variable_name='m'))
self._built = True
def update_step(self, gradient, variable):
"""Update step given gradient and the associated model variable."""
if self._var_key(variable) not in self._index_dict:
raise KeyError(f'Optimizer cannot recognize variable {variable.name}, '
f'this usually means you are calling an optimizer '
f'previously used on a different model. Please try '
f'creating a new optimizer instance.')
lr = tf.cast(self.learning_rate, variable.dtype)
m = None
var_key = self._var_key(variable)
if self.momentum != 0:
momentum = tf.cast(self.momentum, variable.dtype)
m = self.momentums[self._index_dict[var_key]]
# TODO(b/204321487): Add nesterov acceleration.
if isinstance(gradient, tf.IndexedSlices):
# Sparse gradients.
add_value = tf.IndexedSlices(-gradient.values * lr, gradient.indices)
if m is not None:
m.assign(m * momentum)
m.scatter_add(add_value)
if self.nesterov:
variable.scatter_add(add_value)
variable.assign_add(m * momentum)
else:
variable.assign_add(m)
else:
variable.scatter_add(add_value)
else:
# Dense gradients
if m is not None:
m.assign(-gradient * lr + m * momentum)
if self.nesterov:
variable.assign_add(-gradient * lr + m * momentum)
else:
variable.assign_add(m)
else:
variable.assign_add(-gradient * lr)
def get_config(self):
config = super(SGD, self).get_config()
config.update({
'learning_rate': self._serialize_hyperparameter(self._learning_rate),
'momentum': self.momentum,
'nesterov': self.nesterov,
})
return config
| 35.502439 | 80 | 0.650316 |
4a1d3cb18d162cda3365aef674d3e6b03f1b48bd
| 706 |
py
|
Python
|
yamlconf/tests/test_propagate_defaults.py
|
halfak/yamlconf
|
6b84ca8a7f738a1fe6b3b67b30b55acaaa8a56a8
|
[
"MIT"
] | 1 |
2019-01-20T23:03:08.000Z
|
2019-01-20T23:03:08.000Z
|
yamlconf/tests/test_propagate_defaults.py
|
halfak/yamlconf
|
6b84ca8a7f738a1fe6b3b67b30b55acaaa8a56a8
|
[
"MIT"
] | 3 |
2015-12-16T21:05:23.000Z
|
2020-05-27T19:18:39.000Z
|
yamlconf/tests/test_propagate_defaults.py
|
halfak/yamlconf
|
6b84ca8a7f738a1fe6b3b67b30b55acaaa8a56a8
|
[
"MIT"
] | 2 |
2015-09-15T04:38:09.000Z
|
2020-05-27T17:58:26.000Z
|
from nose.tools import eq_
from ..propagate_defaults import propagate_defaults
def test_propagate_defaults():
input = {
'foos': {
'defaults': {
'bar': 1,
'baz': 2
},
'1_foo': {},
'2_foo': {
'baz': 3
}
}
}
expected = {
'foos': {
'defaults': {
'bar': 1,
'baz': 2
},
'1_foo': {
'bar': 1,
'baz': 2
},
'2_foo': {
'bar': 1,
'baz': 3
}
}
}
eq_(propagate_defaults(input), expected)
| 19.081081 | 51 | 0.313031 |
4a1d3d706b3fe6c8f029c6fc3a5ad124a49684c6
| 558 |
py
|
Python
|
var/spack/repos/builtin/packages/sparsehash/package.py
|
adrianjhpc/spack
|
0a9e4fcee57911f2db586aa50c8873d9cca8de92
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 |
2020-10-15T01:08:42.000Z
|
2021-10-18T01:28:18.000Z
|
var/spack/repos/builtin/packages/sparsehash/package.py
|
adrianjhpc/spack
|
0a9e4fcee57911f2db586aa50c8873d9cca8de92
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 |
2019-07-30T10:12:28.000Z
|
2019-12-17T09:02:27.000Z
|
var/spack/repos/builtin/packages/sparsehash/package.py
|
adrianjhpc/spack
|
0a9e4fcee57911f2db586aa50c8873d9cca8de92
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 5 |
2019-07-30T09:42:14.000Z
|
2021-01-25T05:39:20.000Z
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Sparsehash(AutotoolsPackage):
"""Sparse and dense hash-tables for C++ by Google"""
homepage = "https://github.com/sparsehash/sparsehash"
url = "https://github.com/sparsehash/sparsehash/archive/sparsehash-2.0.3.tar.gz"
version('2.0.3', sha256='05e986a5c7327796dad742182b2d10805a8d4f511ad090da0490f146c1ff7a8c')
| 37.2 | 95 | 0.752688 |
4a1d3e7a06edf0a09632027365e615554cf24415
| 7,388 |
py
|
Python
|
tests/runTest.py
|
movie-travel-code/cppinsights
|
0cc79032db5e7fad06f3523771ff4a9111a0beef
|
[
"MIT"
] | null | null | null |
tests/runTest.py
|
movie-travel-code/cppinsights
|
0cc79032db5e7fad06f3523771ff4a9111a0beef
|
[
"MIT"
] | null | null | null |
tests/runTest.py
|
movie-travel-code/cppinsights
|
0cc79032db5e7fad06f3523771ff4a9111a0beef
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python
#------------------------------------------------------------------------------
import os
import sys
import subprocess
import re
import argparse
import tempfile
#------------------------------------------------------------------------------
mypath = '.'
def testCompare(tmpFileName, stdout, expectFile, f, args):
expect = open(expectFile, 'r').read()
if args['docker']:
expect = re.sub( r'instantiated from: .*?.cpp:', r'instantiated from: x.cpp:', expect)
if stdout != expect:
print '[FAILED] %s' %(f)
cmd = ['/usr/bin/diff', expectFile, tmpFileName]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
print stdout
else:
print '[PASSED] %s' %(f)
return True
return False
#------------------------------------------------------------------------------
def testCompile(tmpFileName, f, args, fileName):
cmd = [args['cxx'], '-std=c++1z', '-c', tmpFileName]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
compileErrorFile = os.path.join(mypath, fileName + '.cerr')
if 0 != p.returncode:
if os.path.isfile(compileErrorFile):
ce = open(compileErrorFile, 'r').read()
stderr = stderr.replace(tmpFileName, '.tmp.cpp')
if ce == stderr:
print '[PASSED] Compile: %s' %(f)
return True
compileErrorFile = os.path.join(mypath, fileName + '.ccerr')
if os.path.isfile(compileErrorFile):
ce = open(compileErrorFile, 'r').read()
stderr = stderr.replace(tmpFileName, '.tmp.cpp')
if ce == stderr:
print '[PASSED] Compile: %s' %(f)
return True
print '[ERROR] Compile failed: %s' %(f)
print stderr
ret = 1
else:
if os.path.isfile(compileErrorFile):
print 'unused file: %s' %(compileErrorFile)
print '[PASSED] Compile: %s' %(f)
return True
return False
#------------------------------------------------------------------------------
def getDefaultIncludeDirs(cxx):
cmd = [cxx, '-E', '-x', 'c++', '-v', '/dev/null']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
m = re.findall('\n (/.*)', stderr)
includes = []
for x in m:
if -1 != x.find('(framework directory)'):
continue
includes.append('-isystem%s' %(x))
return includes
#------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description='Description of your program')
parser.add_argument('--insights', help='C++ Insights binary', required=True)
parser.add_argument('--cxx', help='C++ compiler to used', default='/usr/local/clang-current/bin/clang++')
parser.add_argument('--docker', help='Run tests in docker container', action='store_true')
parser.add_argument('--docker-image', help='Docker image name', default='cppinsights-runtime')
parser.add_argument('--failure-is-ok', help='Failing tests are ok', default=False, action='store_true')
parser.add_argument('args', nargs=argparse.REMAINDER)
args = vars(parser.parse_args())
insightsPath = args['insights']
remainingArgs = args['args']
bFailureIsOk = args['failure_is_ok']
if 0 == len(remainingArgs):
cppFiles = [f for f in os.listdir(mypath) if (os.path.isfile(os.path.join(mypath, f)) and f.endswith('.cpp'))]
else:
cppFiles = remainingArgs
if args['docker']:
print 'Running tests in docker'
filesPassed = 0
missingExpected = 0
ret = 0
defaultIncludeDirs = getDefaultIncludeDirs(args['cxx'])
for f in sorted(cppFiles):
fileName = os.path.splitext(f)[0]
expectFile = os.path.join(mypath, fileName + '.expect')
if not os.path.isfile(expectFile):
print 'Missing expect for: %s' %(f)
missingExpected += 1
continue
if args['docker']:
data = open(f, 'r').read()
cmd = ['docker', 'run', '-i', args['docker_image'], insightsPath, '-stdin', 'x.cpp', '--', '-std=c++1z', '-isystem/usr/include/c++/v1/']
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate(input=data)
else:
cmd = [insightsPath, f, '--', '-std=c++1z'] + defaultIncludeDirs
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if 0 != p.returncode:
compileErrorFile = os.path.join(mypath, fileName + '.cerr')
if os.path.isfile(compileErrorFile):
ce = open(compileErrorFile, 'r').read()
# Linker errors name the tmp file and not the .tmp.cpp, replace the name here to be able to suppress
# these errors.
ce = re.sub('(.*).cpp:', '.tmp:', ce)
if ce == stderr:
print '[PASSED] Compile: %s' %(f)
filesPassed += 1
continue
else:
print '[ERROR] Compile: %s' %(f)
ret = 1
print 'Insight crashed for: %s with: %d' %(f, p.returncode)
print stderr
continue
fd, tmpFileName = tempfile.mkstemp('.cpp')
try:
with os.fdopen(fd, 'w') as tmp:
# stupid replacements for clang 6.0. With 7.0 they added a 1.
stdout = stdout.replace('__range ', '__range1 ')
stdout = stdout.replace('__range.', '__range1.')
stdout = stdout.replace('__range)', '__range1)')
stdout = stdout.replace('__range;', '__range1;')
stdout = stdout.replace('__begin ', '__begin1 ')
stdout = stdout.replace('__begin.', '__begin1.')
stdout = stdout.replace('__begin,', '__begin1,')
stdout = stdout.replace('__begin;', '__begin1;')
stdout = stdout.replace('__end ', '__end1 ')
stdout = stdout.replace('__end.', '__end1.')
stdout = stdout.replace('__end;', '__end1;')
stdout = stdout.replace('__end)', '__end1)')
# write the data to the temp file
tmp.write(stdout)
equal = testCompare(tmpFileName, stdout, expectFile, f, args)
if testCompile(tmpFileName, f, args, fileName) and equal:
filesPassed += 1
finally:
os.remove(tmpFileName)
expectedToPass = len(cppFiles)-missingExpected
print '-----------------------------------------------------------------'
print 'Tests passed: %d/%d' %(filesPassed, expectedToPass)
if bFailureIsOk:
return 0
return expectedToPass != filesPassed # note bash expects 0 for ok
#------------------------------------------------------------------------------
sys.exit(main())
#------------------------------------------------------------------------------
| 35.690821 | 152 | 0.514348 |
4a1d3fa2d343712d70873e8247660aa7840c2930
| 58 |
py
|
Python
|
tests/package1/subpackage1/__init__.py
|
sizrailev/py2reqs
|
f09f8b808b310c27860a273660dedd50d3c7bea3
|
[
"MIT"
] | null | null | null |
tests/package1/subpackage1/__init__.py
|
sizrailev/py2reqs
|
f09f8b808b310c27860a273660dedd50d3c7bea3
|
[
"MIT"
] | null | null | null |
tests/package1/subpackage1/__init__.py
|
sizrailev/py2reqs
|
f09f8b808b310c27860a273660dedd50d3c7bea3
|
[
"MIT"
] | null | null | null |
from .module3 import foo3 as bar3
def foo():
bar3()
| 9.666667 | 33 | 0.637931 |
4a1d3fc986f3a46d1484f2a58fc10f34c73e3fb0
| 425 |
py
|
Python
|
apps/ableton/locate.py
|
thinium/knausj_talon
|
bdf222f702605cc60242d235c86fc9503fbc65d9
|
[
"MIT"
] | 1 |
2021-04-15T13:35:06.000Z
|
2021-04-15T13:35:06.000Z
|
apps/ableton/locate.py
|
thinium/knausj_talon
|
bdf222f702605cc60242d235c86fc9503fbc65d9
|
[
"MIT"
] | null | null | null |
apps/ableton/locate.py
|
thinium/knausj_talon
|
bdf222f702605cc60242d235c86fc9503fbc65d9
|
[
"MIT"
] | null | null | null |
from talon.experimental.locate import locate_hover
from talon import Module, ctrl
mod = Module()
@mod.action_class
class Actions:
def locate(name: str):
"""Find an image on the screen and put the mouse in the center"""
locate_hover(name, threshold=0.95)
def nudge_mouse(x: int, y: int):
"""Move the mouse relatively"""
_x, _y = ctrl.mouse_pos()
ctrl.mouse_move(_x + x, _y + y)
| 32.692308 | 73 | 0.654118 |
4a1d43072806b5f290b9c30679a4396d505777a2
| 27,202 |
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_05_01/operations/_ip_allocations_operations.py
|
xolve/azure-sdk-for-python
|
9f5baa19c392f77f811d936ee43450e4ea524002
|
[
"MIT"
] | 2,728 |
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_05_01/operations/_ip_allocations_operations.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 17,773 |
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_05_01/operations/_ip_allocations_operations.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 1,916 |
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class IpAllocationsOperations(object):
"""IpAllocationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
ip_allocation_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipAllocationName': self._serialize.url("ip_allocation_name", ip_allocation_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations/{ipAllocationName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
ip_allocation_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified IpAllocation.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ip_allocation_name: The name of the IpAllocation.
:type ip_allocation_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
ip_allocation_name=ip_allocation_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipAllocationName': self._serialize.url("ip_allocation_name", ip_allocation_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations/{ipAllocationName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
ip_allocation_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.IpAllocation"
"""Gets the specified IpAllocation by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ip_allocation_name: The name of the IpAllocation.
:type ip_allocation_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IpAllocation, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_05_01.models.IpAllocation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpAllocation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipAllocationName': self._serialize.url("ip_allocation_name", ip_allocation_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('IpAllocation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations/{ipAllocationName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
ip_allocation_name, # type: str
parameters, # type: "_models.IpAllocation"
**kwargs # type: Any
):
# type: (...) -> "_models.IpAllocation"
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpAllocation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipAllocationName': self._serialize.url("ip_allocation_name", ip_allocation_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'IpAllocation')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('IpAllocation', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('IpAllocation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations/{ipAllocationName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
ip_allocation_name, # type: str
parameters, # type: "_models.IpAllocation"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.IpAllocation"]
"""Creates or updates an IpAllocation in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ip_allocation_name: The name of the IpAllocation.
:type ip_allocation_name: str
:param parameters: Parameters supplied to the create or update virtual network operation.
:type parameters: ~azure.mgmt.network.v2021_05_01.models.IpAllocation
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either IpAllocation or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2021_05_01.models.IpAllocation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpAllocation"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
ip_allocation_name=ip_allocation_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('IpAllocation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipAllocationName': self._serialize.url("ip_allocation_name", ip_allocation_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations/{ipAllocationName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
ip_allocation_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.IpAllocation"
"""Updates a IpAllocation tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ip_allocation_name: The name of the IpAllocation.
:type ip_allocation_name: str
:param parameters: Parameters supplied to update IpAllocation tags.
:type parameters: ~azure.mgmt.network.v2021_05_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IpAllocation, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_05_01.models.IpAllocation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpAllocation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipAllocationName': self._serialize.url("ip_allocation_name", ip_allocation_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('IpAllocation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations/{ipAllocationName}'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.IpAllocationListResult"]
"""Gets all IpAllocations in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IpAllocationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_05_01.models.IpAllocationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpAllocationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('IpAllocationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/IpAllocations'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.IpAllocationListResult"]
"""Gets all IpAllocations in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IpAllocationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_05_01.models.IpAllocationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpAllocationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('IpAllocationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations'} # type: ignore
| 48.749104 | 195 | 0.661238 |
4a1d449f10c97ef7c66f9fc3a7d70318dad11edf
| 144 |
py
|
Python
|
tests/core/utils.py
|
uditagarwal/tastypie
|
ece398310040e9ddfeeacee6a699beb1dee6dad6
|
[
"BSD-3-Clause"
] | 22 |
2015-01-16T01:36:32.000Z
|
2020-06-08T00:46:18.000Z
|
tests/core/utils.py
|
uditagarwal/tastypie
|
ece398310040e9ddfeeacee6a699beb1dee6dad6
|
[
"BSD-3-Clause"
] | 9 |
2019-03-15T11:39:32.000Z
|
2019-04-30T00:59:50.000Z
|
tests/core/utils.py
|
uditagarwal/tastypie
|
ece398310040e9ddfeeacee6a699beb1dee6dad6
|
[
"BSD-3-Clause"
] | 13 |
2015-01-13T20:56:22.000Z
|
2022-02-23T06:01:17.000Z
|
import logging
class SimpleHandler(logging.Handler):
logged = []
def emit(self, record):
SimpleHandler.logged.append(record)
| 16 | 43 | 0.6875 |
4a1d44ad02bcf47239ba48495a5830f6883256c3
| 3,617 |
py
|
Python
|
tensorflow_io/python/ops/mnist_dataset_ops.py
|
vanshhhhh/io
|
2c8204e7a2fb8704a0843bdfd624d785d17c58d6
|
[
"Apache-2.0"
] | 1 |
2018-11-18T06:10:45.000Z
|
2018-11-18T06:10:45.000Z
|
tensorflow_io/python/ops/mnist_dataset_ops.py
|
vanshhhhh/io
|
2c8204e7a2fb8704a0843bdfd624d785d17c58d6
|
[
"Apache-2.0"
] | 1 |
2019-01-23T06:16:57.000Z
|
2019-01-23T06:16:57.000Z
|
tensorflow_io/python/ops/mnist_dataset_ops.py
|
yongtang/io
|
2ea1121e944629c2b462773c2d8d805da427311c
|
[
"Apache-2.0"
] | 1 |
2020-12-13T22:13:03.000Z
|
2020-12-13T22:13:03.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MNISTIODataset."""
import tensorflow as tf
from tensorflow_io.python.ops import core_ops
class MNISTLabelIODataset(tf.data.Dataset):
"""A MNISTLabelIODataset"""
def __init__(self, filename):
"""Create a MNISTLabelDataset.
Args:
filename: A `tf.string` tensor containing filename.
"""
_, compression = core_ops.io_file_info(filename)
dataset = tf.data.FixedLengthRecordDataset(
filename, 1, header_bytes=8, compression_type=compression
)
dataset = dataset.map(lambda e: tf.io.decode_raw(e, tf.uint8))
dataset = dataset.unbatch()
self._dataset = dataset
super().__init__(
self._dataset._variant_tensor
) # pylint: disable=protected-access
def _inputs(self):
return []
@property
def element_spec(self):
return self._dataset.element_spec
class MNISTImageIODataset(tf.data.Dataset):
"""A MNISTImageIODataset"""
def __init__(self, filename):
"""Create a MNISTImageDataset.
Args:
filename: A `tf.string` tensor containing filename.
"""
_, compression = core_ops.io_file_info(filename)
rows = tf.io.decode_raw(
core_ops.io_file_read(filename, 8, 4, compression=compression),
tf.int32,
little_endian=False,
)
cols = tf.io.decode_raw(
core_ops.io_file_read(filename, 12, 4, compression=compression),
tf.int32,
little_endian=False,
)
lens = rows[0] * cols[0]
dataset = tf.data.FixedLengthRecordDataset(
filename,
tf.cast(lens, tf.int64),
header_bytes=16,
compression_type=compression,
)
dataset = dataset.map(lambda e: tf.io.decode_raw(e, tf.uint8))
dataset = dataset.map(lambda e: tf.reshape(e, tf.concat([rows, cols], axis=0)))
self._dataset = dataset
super().__init__(
self._dataset._variant_tensor
) # pylint: disable=protected-access
def _inputs(self):
return []
@property
def element_spec(self):
return self._dataset.element_spec
def MNISTIODataset(images=None, labels=None, internal=True):
"""MNISTIODataset"""
assert internal, (
"MNISTIODataset constructor is private; please use one "
"of the factory methods instead (e.g., "
"IODataset.from_mnist())"
)
assert (
images is not None or labels is not None
), "images and labels could not be all None"
images_dataset = MNISTImageIODataset(images) if images is not None else None
labels_dataset = MNISTLabelIODataset(labels) if labels is not None else None
if images is None:
return labels_dataset
if labels is None:
return images_dataset
return tf.data.Dataset.zip((images_dataset, labels_dataset))
| 31.181034 | 87 | 0.634227 |
4a1d456240c4dbb55ba57166919117b1588c2e6f
| 1,934 |
py
|
Python
|
src/products/models/base.py
|
tlgtaa/education-backend
|
86f8af315f9cff2c1fd19406899d593fc0852124
|
[
"MIT"
] | 1 |
2021-03-03T19:51:24.000Z
|
2021-03-03T19:51:24.000Z
|
src/products/models/base.py
|
tlgtaa/education-backend
|
86f8af315f9cff2c1fd19406899d593fc0852124
|
[
"MIT"
] | null | null | null |
src/products/models/base.py
|
tlgtaa/education-backend
|
86f8af315f9cff2c1fd19406899d593fc0852124
|
[
"MIT"
] | null | null | null |
from typing import Optional
from decimal import Decimal
from django.apps import apps
from django.utils.translation import gettext_lazy as _
from app.models import TimestampedModel, models
from app.pricing import format_old_price, format_price
from orders.models import Order
from shipping import factory as ShippingFactory
from users.models import User
class Shippable(TimestampedModel):
"""Add this to every shippable item"""
name = models.CharField(max_length=255)
name_receipt = models.CharField(_('Name for receipts'), max_length=255, help_text='«посещение мастер-класса по TDD» или «Доступ к записи курсов кройки и шитья»')
full_name = models.CharField(
_('Full name for letters'), max_length=255,
help_text='Билет на мастер-класс о TDD или «запись курсов кройки и шитья»',
)
slug = models.SlugField()
price = models.DecimalField(max_digits=8, decimal_places=2)
old_price = models.DecimalField(max_digits=8, decimal_places=2, blank=True, null=True)
class Meta:
abstract = True
def get_price_display(self):
return format_price(self.price)
def get_old_price_display(self):
return format_price(self.old_price)
def get_formatted_price_display(self):
return format_old_price(self.old_price, self.price)
def ship(self, to: User, order: Optional[Order] = None):
return ShippingFactory.ship(self, to=to, order=order)
def get_price(self, promocode=None) -> Decimal:
promocode = apps.get_model('orders.PromoCode').objects.get_or_nothing(name=promocode)
if promocode is not None:
return promocode.apply(self.price)
return self.price
def get_template_id(self):
"""Get custom per-item template_id"""
if not hasattr(self, 'template_id'):
return
if self.template_id is not None and len(self.template_id):
return self.template_id
| 33.929825 | 165 | 0.713547 |
4a1d45b3a53d1f78c07986524185d5584772dabf
| 2,888 |
py
|
Python
|
psrl_experiments_2016/chain_experiment_nsamp.py
|
orenpeer12/randomized_value_functions
|
a10fe99a9a3a92fea02b38740753da7de7db8f1a
|
[
"MIT"
] | 1 |
2020-04-26T14:09:48.000Z
|
2020-04-26T14:09:48.000Z
|
psrl_experiments_2016/chain_experiment_nsamp.py
|
orenpeer12/randomized_value_functions
|
a10fe99a9a3a92fea02b38740753da7de7db8f1a
|
[
"MIT"
] | null | null | null |
psrl_experiments_2016/chain_experiment_nsamp.py
|
orenpeer12/randomized_value_functions
|
a10fe99a9a3a92fea02b38740753da7de7db8f1a
|
[
"MIT"
] | 2 |
2020-05-10T08:03:50.000Z
|
2021-09-08T11:58:35.000Z
|
'''
Script to run tabular experiments in batch mode.
author: iosband@stanford.edu
'''
import numpy as np
import pandas as pd
import argparse
import sys
from src import environment
from src import finite_tabular_agents
from src.feature_extractor import FeatureTrueState
from src.experiment import run_finite_tabular_experiment
if __name__ == '__main__':
'''
Run a tabular experiment according to command line arguments
'''
# Take in command line flags
parser = argparse.ArgumentParser(description='Run tabular RL experiment')
parser.add_argument('chainLen', help='length of chain', type=int)
parser.add_argument('alg', help='Agent constructor', type=str)
parser.add_argument('nSamp', help='nSamp', type=int)
parser.add_argument('seed', help='random seed', type=int)
parser.add_argument('nEps', help='number of episodes', type=int)
args = parser.parse_args()
# Make a filename to identify flags
fileName = ('chainLen'
+ '_len=' + '%03.f' % args.chainLen
+ '_alg=' + str(args.alg)
+ '_nSamp=' + '%03.2f' % args.nSamp
+ '_seed=' + str(args.seed)
+ '.csv')
folderName = './'
targetPath = folderName + fileName
print('******************************************************************')
print(fileName)
print('******************************************************************')
# Make the environment
env = environment.make_stochasticChain(args.chainLen)
# Make the feature extractor
f_ext = FeatureTrueState(env.epLen, env.nState, env.nAction, env.nState)
# Make the agent
alg_dict = {'PSRL': finite_tabular_agents.PSRL,
'PSRLunif': finite_tabular_agents.PSRLunif,
'OptimisticPSRL': finite_tabular_agents.OptimisticPSRL,
'GaussianPSRL': finite_tabular_agents.GaussianPSRL,
'UCBVI': finite_tabular_agents.UCBVI,
'BEB': finite_tabular_agents.BEB,
'BOLT': finite_tabular_agents.BOLT,
'UCRL2': finite_tabular_agents.UCRL2,
'UCRL2_GP': finite_tabular_agents.UCRL2_GP,
'UCRL2_GP_RTDP': finite_tabular_agents.UCRL2_GP_RTDP,
'EULER': finite_tabular_agents.EULER,
'EULER_GP': finite_tabular_agents.EULER_GP,
'EULER_GP_RTDP': finite_tabular_agents.EULER_GP_RTDP,
'UCFH': finite_tabular_agents.UCFH,
'EpsilonGreedy': finite_tabular_agents.EpsilonGreedy}
agent_constructor = alg_dict[args.alg]
agent = agent_constructor(env.nState, env.nAction, env.epLen,
nSamp=args.nSamp)
# Run the experiment
run_finite_tabular_experiment(agent, env, f_ext, args.nEps, args.seed,
recFreq=100, fileFreq=1000, targetPath=targetPath)
| 36.1 | 79 | 0.616343 |
4a1d45c9845a17364312a64632dd64e9c710aa3a
| 2,402 |
py
|
Python
|
ambari-agent/src/main/python/ambari_agent/hostname.py
|
flipkart-incubator/incubator-ambari
|
bf747346312170834c6beb89a60c8624b47aa288
|
[
"Apache-2.0"
] | 2 |
2015-07-29T22:50:10.000Z
|
2021-11-10T16:05:59.000Z
|
ambari-agent/src/main/python/ambari_agent/hostname.py
|
boydos/incubator-ambari
|
e10d85756dd55729c20aeda2baa0d6c93c4ca31d
|
[
"Apache-2.0"
] | 1 |
2021-11-04T13:31:30.000Z
|
2021-11-04T13:31:30.000Z
|
ambari-agent/src/main/python/ambari_agent/hostname.py
|
isabella232/incubator-ambari
|
bf747346312170834c6beb89a60c8624b47aa288
|
[
"Apache-2.0"
] | 9 |
2016-01-08T21:11:06.000Z
|
2021-11-10T16:05:51.000Z
|
#!/usr/bin/env python2.6
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import socket
import subprocess
import urllib2
import AmbariConfig
import logging
import traceback
logger = logging.getLogger()
def hostname():
config = AmbariConfig.config
try:
scriptname = config.get('agent', 'hostname_script')
try:
osStat = subprocess.Popen([scriptname], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = osStat.communicate()
if (0 == osStat.returncode and 0 != len(out.strip())):
return out.strip()
else:
return socket.getfqdn()
except:
return socket.getfqdn()
except:
return socket.getfqdn()
def public_hostname():
config = AmbariConfig.config
out = ''
err = ''
try:
if config.has_option('agent', 'public_hostname_script'):
scriptname = config.get('agent', 'public_hostname_script')
output = subprocess.Popen([scriptname], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = output.communicate()
if (0 == output.returncode and 0 != len(out.strip())):
return out.strip()
except:
#ignore for now.
trace_info = traceback.format_exc()
logger.info("Error using the scriptname:" + trace_info
+ " :out " + out + " :err " + err)
logger.info("Defaulting to fqdn.")
# future - do an agent entry for this too
try:
handle = urllib2.urlopen('http://169.254.169.254/latest/meta-data/public-hostname', '', 2)
str = handle.read()
handle.close()
return str
except Exception, e:
return socket.getfqdn()
def main(argv=None):
print hostname()
print public_hostname()
if __name__ == '__main__':
main()
| 30.405063 | 94 | 0.697752 |
4a1d461af01cc11cae24d2b3da0921b3efed71eb
| 10,325 |
py
|
Python
|
docs/generate_modules.py
|
cfobel/dropbot-chip-qc
|
e5944b88c0d423163f55a3f49ebf84bb27e229bc
|
[
"BSD-3-Clause"
] | null | null | null |
docs/generate_modules.py
|
cfobel/dropbot-chip-qc
|
e5944b88c0d423163f55a3f49ebf84bb27e229bc
|
[
"BSD-3-Clause"
] | 5 |
2019-04-02T11:10:45.000Z
|
2019-07-17T20:31:18.000Z
|
docs/generate_modules.py
|
cfobel/dropbot-chip-qc
|
e5944b88c0d423163f55a3f49ebf84bb27e229bc
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
sphinx-autopackage-script
This script parses a directory tree looking for python modules and packages and
creates ReST files appropriately to create code documentation with Sphinx.
It also creates a modules index (named modules.<suffix>).
"""
# Copyright 2008 Société des arts technologiques (SAT), http://www.sat.qc.ca/
# Copyright 2010 Thomas Waldmann <tw AT waldmann-edv DOT de>
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import print_function
import os
import optparse
# automodule options
OPTIONS = ['members',
'undoc-members',
# 'inherited-members', # disabled because there's a bug in sphinx
'show-inheritance',
]
INIT = '__init__.py'
def makename(package, module):
"""Join package and module with a dot."""
# Both package and module can be None/empty.
if package:
name = package
if module:
name += '.' + module
else:
name = module
return name
def write_file(name, text, opts):
"""Write the output file for module/package <name>."""
if opts.dryrun:
return
fname = os.path.join(opts.destdir, "%s.%s" % (name, opts.suffix))
if not opts.force and os.path.isfile(fname):
print('File %s already exists, skipping.' % fname)
else:
print('Creating file %s.' % fname)
f = open(fname, 'w')
f.write(text)
f.close()
def format_heading(level, text):
"""Create a heading of <level> [1, 2 or 3 supported]."""
underlining = ['=', '-', '~', ][level-1] * len(text)
return '%s\n%s\n\n' % (text, underlining)
def format_directive(module, package=None):
"""Create the automodule directive and add the options."""
directive = '.. automodule:: %s\n' % makename(package, module)
for option in OPTIONS:
directive += ' :%s:\n' % option
return directive
def create_module_file(package, module, opts):
"""Build the text of the file and write the file."""
text = format_heading(1, '%s Module' % module)
text += format_heading(2, ':mod:`%s` Module' % module)
text += format_directive(module, package)
write_file(makename(package, module), text, opts)
def create_package_file(root, master_package, subroot, py_files, opts, subs):
"""Build the text of the file and write the file."""
package = os.path.split(root)[-1]
text = format_heading(1, '%s Package' % package)
# add each package's module
for py_file in py_files:
if shall_skip(os.path.join(root, py_file)):
continue
is_package = py_file == INIT
py_file = os.path.splitext(py_file)[0]
py_path = makename(subroot, py_file)
if is_package:
heading = ':mod:`%s` Package' % package
else:
heading = ':mod:`%s` Module' % py_file
text += format_heading(2, heading)
text += format_directive(is_package and subroot or py_path, master_package)
text += '\n'
# build a list of directories that are packages (they contain an INIT file)
subs = [sub for sub in subs if os.path.isfile(os.path.join(root, sub, INIT))]
# if there are some package directories, add a TOC for theses subpackages
if subs:
text += format_heading(2, 'Subpackages')
text += '.. toctree::\n\n'
for sub in subs:
text += ' %s.%s\n' % (makename(master_package, subroot), sub)
text += '\n'
write_file(makename(master_package, subroot), text, opts)
def create_modules_toc_file(master_package, modules, opts, name='modules'):
"""
Create the module's index.
"""
text = format_heading(1, '%s Modules' % opts.header)
text += '.. toctree::\n'
text += ' :maxdepth: %s\n\n' % opts.maxdepth
modules.sort()
prev_module = ''
for module in modules:
# look if the module is a subpackage and, if yes, ignore it
if module.startswith(prev_module + '.'):
continue
prev_module = module
text += ' %s\n' % module
write_file(name, text, opts)
def shall_skip(module):
"""
Check if we want to skip this module.
"""
# skip it, if there is nothing (or just \n or \r\n) in the file
return os.path.getsize(module) < 3
def recurse_tree(path, excludes, opts):
"""
Look for every file in the directory tree and create the corresponding
ReST files.
"""
# use absolute path for root, as relative paths like '../../foo' cause
# 'if "/." in root ...' to filter out *all* modules otherwise
path = os.path.abspath(path)
# check if the base directory is a package and get is name
if INIT in os.listdir(path):
package_name = path.split(os.path.sep)[-1]
else:
package_name = None
toc = []
tree = os.walk(path, False)
for root, subs, files in tree:
# keep only the Python script files
py_files = sorted([f for f in files if os.path.splitext(f)[1] == '.py'])
if INIT in py_files:
py_files.remove(INIT)
py_files.insert(0, INIT)
# remove hidden ('.') and private ('_') directories
subs = sorted([sub for sub in subs if sub[0] not in ['.', '_']])
# check if there are valid files to process
# TODO: could add check for windows hidden files
if "/." in root or "/_" in root \
or not py_files \
or is_excluded(root, excludes):
continue
if INIT in py_files:
# we are in package ...
if (# ... with subpackage(s)
subs
or
# ... with some module(s)
len(py_files) > 1
or
# ... with a not-to-be-skipped INIT file
not shall_skip(os.path.join(root, INIT))
):
subroot = root[len(path):].lstrip(os.path.sep).replace(os.path.sep, '.')
create_package_file(root, package_name, subroot, py_files, opts, subs)
toc.append(makename(package_name, subroot))
elif root == path:
# if we are at the root level, we don't require it to be a package
for py_file in py_files:
if not shall_skip(os.path.join(path, py_file)):
module = os.path.splitext(py_file)[0]
create_module_file(package_name, module, opts)
toc.append(makename(package_name, module))
# create the module's index
if not opts.notoc:
create_modules_toc_file(package_name, toc, opts)
def normalize_excludes(rootpath, excludes):
"""
Normalize the excluded directory list:
* must be either an absolute path or start with rootpath,
* otherwise it is joined with rootpath
* with trailing slash
"""
sep = os.path.sep
f_excludes = []
for exclude in excludes:
if not os.path.isabs(exclude) and not exclude.startswith(rootpath):
exclude = os.path.join(rootpath, exclude)
if not exclude.endswith(sep):
exclude += sep
f_excludes.append(exclude)
return f_excludes
def is_excluded(root, excludes):
"""
Check if the directory is in the exclude list.
Note: by having trailing slashes, we avoid common prefix issues, like
e.g. an exlude "foo" also accidentally excluding "foobar".
"""
sep = os.path.sep
if not root.endswith(sep):
root += sep
for exclude in excludes:
if root.startswith(exclude):
return True
return False
def main():
"""
Parse and check the command line arguments.
"""
parser = optparse.OptionParser(usage="""usage: %prog [options] <package path> [exclude paths, ...]
Note: By default this script will not overwrite already created files.""")
parser.add_option("-n", "--doc-header", action="store", dest="header", help="Documentation Header (default=Project)", default="Project")
parser.add_option("-d", "--dest-dir", action="store", dest="destdir", help="Output destination directory", default="")
parser.add_option("-s", "--suffix", action="store", dest="suffix", help="module suffix (default=txt)", default="txt")
parser.add_option("-m", "--maxdepth", action="store", dest="maxdepth", help="Maximum depth of submodules to show in the TOC (default=4)", type="int", default=4)
parser.add_option("-r", "--dry-run", action="store_true", dest="dryrun", help="Run the script without creating the files")
parser.add_option("-f", "--force", action="store_true", dest="force", help="Overwrite all the files")
parser.add_option("-t", "--no-toc", action="store_true", dest="notoc", help="Don't create the table of content file")
(opts, args) = parser.parse_args()
if not args:
parser.error("package path is required.")
else:
rootpath, excludes = args[0], args[1:]
if os.path.isdir(rootpath):
# check if the output destination is a valid directory
if opts.destdir and os.path.isdir(opts.destdir):
excludes = normalize_excludes(rootpath, excludes)
recurse_tree(rootpath, excludes, opts)
else:
print('%s is not a valid output destination directory.' % opts.destdir)
else:
print('%s is not a valid directory.' % rootpath)
if __name__ == '__main__':
main()
| 38.962264 | 165 | 0.606877 |
4a1d4629799981459bf152c9d0b4883a1afe7b8e
| 7,584 |
py
|
Python
|
dbt_gen/py3env/lib/python3.5/site-packages/azure/storage/table/models.py
|
norton120/dbt_gen
|
712fc8698a77c3372f5a403a5ae50711d0cb3c7d
|
[
"MIT"
] | null | null | null |
dbt_gen/py3env/lib/python3.5/site-packages/azure/storage/table/models.py
|
norton120/dbt_gen
|
712fc8698a77c3372f5a403a5ae50711d0cb3c7d
|
[
"MIT"
] | null | null | null |
dbt_gen/py3env/lib/python3.5/site-packages/azure/storage/table/models.py
|
norton120/dbt_gen
|
712fc8698a77c3372f5a403a5ae50711d0cb3c7d
|
[
"MIT"
] | null | null | null |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from azure.common import (
AzureException,
AzureHttpError,
)
from ._error import (
_ERROR_ATTRIBUTE_MISSING,
)
class AzureBatchValidationError(AzureException):
'''
Indicates that a batch operation cannot proceed due to invalid input.
:ivar str message:
A detailed error message indicating the reason for the failure.
'''
class AzureBatchOperationError(AzureHttpError):
'''
Indicates that a batch operation failed.
:ivar str message:
A detailed error message indicating the index of the batch
request which failed and the reason for the failure. For example,
'0:One of the request inputs is out of range.' indicates the 0th batch
request failed as one of its property values was out of range.
:ivar int status_code:
The HTTP status code of the batch request. For example, 400.
:ivar str batch_code:
The batch status code. For example, 'OutOfRangeInput'.
'''
def __init__(self, message, status_code, batch_code):
super(AzureBatchOperationError, self).__init__(message, status_code)
self.code = batch_code
class Entity(dict):
'''
An entity object. Can be accessed as a dict or as an obj. The attributes of
the entity will be created dynamically. For example, the following are both
valid::
entity = Entity()
entity.a = 'b'
entity['x'] = 'y'
'''
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(_ERROR_ATTRIBUTE_MISSING.format('Entity', name))
__setattr__ = dict.__setitem__
def __delattr__(self, name):
try:
del self[name]
except KeyError:
raise AttributeError(_ERROR_ATTRIBUTE_MISSING.format('Entity', name))
def __dir__(self):
return dir({}) + list(self.keys())
class EntityProperty(object):
'''
An entity property. Used to explicitly set :class:`~EdmType` when necessary.
Values which require explicit typing are GUID, INT32, and BINARY. Other EdmTypes
may be explicitly create as EntityProperty objects but need not be. For example,
the below with both create STRING typed properties on the entity::
entity = Entity()
entity.a = 'b'
entity.x = EntityProperty(EdmType.STRING, 'y')
'''
def __init__(self, type=None, value=None, encrypt=False):
'''
Represents an Azure Table. Returned by list_tables.
:param str type: The type of the property.
:param EdmType value: The value of the property.
:param bool encrypt: Indicates whether or not the property should be encrypted.
'''
self.type = type
self.value = value
self.encrypt = encrypt
class Table(object):
'''
Represents an Azure Table. Returned by list_tables.
:ivar str name: The name of the table.
'''
pass
class TablePayloadFormat(object):
'''
Specifies the accepted content type of the response payload. More information
can be found here: https://msdn.microsoft.com/en-us/library/azure/dn535600.aspx
'''
JSON_NO_METADATA = 'application/json;odata=nometadata'
'''Returns no type information for the entity properties.'''
JSON_MINIMAL_METADATA = 'application/json;odata=minimalmetadata'
'''Returns minimal type information for the entity properties.'''
JSON_FULL_METADATA = 'application/json;odata=fullmetadata'
'''Returns minimal type information for the entity properties plus some extra odata properties.'''
class EdmType(object):
'''
Used by :class:`~.EntityProperty` to represent the type of the entity property
to be stored by the Table service.
'''
BINARY = 'Edm.Binary'
''' Represents byte data. Must be specified. '''
INT64 = 'Edm.Int64'
''' Represents a number between -(2^31) and 2^31. This is the default type for Python numbers. '''
GUID = 'Edm.Guid'
''' Represents a GUID. Must be specified. '''
DATETIME = 'Edm.DateTime'
''' Represents a date. This type will be inferred for Python datetime objects. '''
STRING = 'Edm.String'
''' Represents a string. This type will be inferred for Python strings. '''
INT32 = 'Edm.Int32'
''' Represents a number between -(2^15) and 2^15. Must be specified or numbers will default to INT64. '''
DOUBLE = 'Edm.Double'
''' Represents a double. This type will be inferred for Python floating point numbers. '''
BOOLEAN = 'Edm.Boolean'
''' Represents a boolean. This type will be inferred for Python bools. '''
class TablePermissions(object):
'''
TablePermissions class to be used with the :func:`~azure.storage.table.tableservice.TableService.generate_table_shared_access_signature`
method and for the AccessPolicies used with :func:`~azure.storage.table.tableservice.TableService.set_table_acl`.
:ivar TablePermissions TablePermissions.QUERY: Get entities and query entities.
:ivar TablePermissions TablePermissions.ADD: Add entities.
:ivar TablePermissions TablePermissions.UPDATE: Update entities.
:ivar TablePermissions TablePermissions.DELETE: Delete entities.
'''
def __init__(self, query=False, add=False, update=False, delete=False, _str=None):
'''
:param bool query:
Get entities and query entities.
:param bool add:
Add entities. Add and Update permissions are required for upsert operations.
:param bool update:
Update entities. Add and Update permissions are required for upsert operations.
:param bool delete:
Delete entities.
:param str _str:
A string representing the permissions.
'''
if not _str:
_str = ''
self.query = query or ('r' in _str)
self.add = add or ('a' in _str)
self.update = update or ('u' in _str)
self.delete = delete or ('d' in _str)
def __or__(self, other):
return TablePermissions(_str=str(self) + str(other))
def __add__(self, other):
return TablePermissions(_str=str(self) + str(other))
def __str__(self):
return (('r' if self.query else '') +
('a' if self.add else '') +
('u' if self.update else '') +
('d' if self.delete else ''))
TablePermissions.QUERY = TablePermissions(query=True)
TablePermissions.ADD = TablePermissions(add=True)
TablePermissions.UPDATE = TablePermissions(update=True)
TablePermissions.DELETE = TablePermissions(delete=True)
| 36.815534 | 141 | 0.638186 |
4a1d49091ca5e9518f740777f3ccc1e42d267ac6
| 6,255 |
py
|
Python
|
chapter6_嵌入与表示学习/denoise_autoencoder.py
|
Tisword/pytorch-in-action
|
193745dc0b45b4c292ad9276eac0023c4ac85ae8
|
[
"MIT"
] | 164 |
2018-05-16T13:10:52.000Z
|
2022-03-15T12:18:30.000Z
|
chapter6_嵌入与表示学习/denoise_autoencoder.py
|
Tisword/pytorch-in-action
|
193745dc0b45b4c292ad9276eac0023c4ac85ae8
|
[
"MIT"
] | 20 |
2019-01-21T12:16:06.000Z
|
2022-03-11T23:33:48.000Z
|
chapter6_嵌入与表示学习/denoise_autoencoder.py
|
Tisword/pytorch-in-action
|
193745dc0b45b4c292ad9276eac0023c4ac85ae8
|
[
"MIT"
] | 86 |
2018-11-06T05:42:58.000Z
|
2022-02-28T01:05:52.000Z
|
# Simple Convolutional Autoencoder
import torch
import torch.nn as nn
import torch.utils as utils
from torch.autograd import Variable
import torchvision.datasets as dset
import torchvision.transforms as transforms
import numpy as np
import matplotlib.pyplot as plt
# 配置参数
torch.manual_seed(1) #设置随机数种子,确保结果可重复
n_epoch = 200 #训练次数
batch_size = 100 #批处理大小
learning_rate = 0.0002 #学习率
#下载训练集 MNIST 手写数字训练集
mnist_train = dset.MNIST("./", train=True, transform=transforms.ToTensor(), target_transform=None, download=True)
train_loader = torch.utils.data.DataLoader(dataset=mnist_train,batch_size=batch_size,shuffle=True)
# Encoder 模型设置
class Encoder(nn.Module):
def __init__(self):
super(Encoder,self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1,32,3,padding=1), # batch x 32 x 28 x 28
nn.ReLU(),
nn.BatchNorm2d(32),
nn.Conv2d(32,32,3,padding=1), # batch x 32 x 28 x 28
nn.ReLU(),
nn.BatchNorm2d(32),
nn.Conv2d(32,64,3,padding=1), # batch x 64 x 28 x 28
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Conv2d(64,64,3,padding=1), # batch x 64 x 28 x 28
nn.ReLU(),
nn.BatchNorm2d(64),
nn.MaxPool2d(2,2) # batch x 64 x 14 x 14
)
self.layer2 = nn.Sequential(
nn.Conv2d(64,128,3,padding=1), # batch x 128 x 14 x 14
nn.ReLU(),
nn.BatchNorm2d(128),
nn.Conv2d(128,128,3,padding=1), # batch x 128 x 14 x 14
nn.ReLU(),
nn.BatchNorm2d(128),
nn.MaxPool2d(2,2),
nn.Conv2d(128,256,3,padding=1), # batch x 256 x 7 x 7
nn.ReLU()
)
# Encoder 模型设置
class Encoder(nn.Module):
def __init__(self):
super(Encoder,self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1,32,3,padding=1), # batch x 32 x 28 x 28
nn.ReLU(),
nn.BatchNorm2d(32),
nn.Conv2d(32,32,3,padding=1), # batch x 32 x 28 x 28
nn.ReLU(),
nn.BatchNorm2d(32),
nn.Conv2d(32,64,3,padding=1), # batch x 64 x 28 x 28
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Conv2d(64,64,3,padding=1), # batch x 64 x 28 x 28
nn.ReLU(),
nn.BatchNorm2d(64),
nn.MaxPool2d(2,2) # batch x 64 x 14 x 14
)
self.layer2 = nn.Sequential(
nn.Conv2d(64,128,3,padding=1), # batch x 128 x 14 x 14
nn.ReLU(),
nn.BatchNorm2d(128),
nn.Conv2d(128,128,3,padding=1), # batch x 128 x 14 x 14
nn.ReLU(),
nn.BatchNorm2d(128),
nn.MaxPool2d(2,2),
nn.Conv2d(128,256,3,padding=1), # batch x 256 x 7 x 7
nn.ReLU()
)
def forward(self,x):
out = self.layer1(x)
out = self.layer2(out)
out = out.view(batch_size, -1)
return out
#encoder = Encoder().cuda()
encoder = Encoder()
# decoder模型设置
class Decoder(nn.Module):
def __init__(self):
super(Decoder,self).__init__()
self.layer1 = nn.Sequential(
nn.ConvTranspose2d(256,128,3,2,1,1), # batch x 128 x 14 x 14
nn.ReLU(),
nn.BatchNorm2d(128),
nn.ConvTranspose2d(128,128,3,1,1), # batch x 128 x 14 x 14
nn.ReLU(),
nn.BatchNorm2d(128),
nn.ConvTranspose2d(128,64,3,1,1), # batch x 64 x 14 x 14
nn.ReLU(),
nn.BatchNorm2d(64),
nn.ConvTranspose2d(64,64,3,1,1), # batch x 64 x 14 x 14
nn.ReLU(),
nn.BatchNorm2d(64)
)
self.layer2 = nn.Sequential(
nn.ConvTranspose2d(64,32,3,1,1), # batch x 32 x 14 x 14
nn.ReLU(),
nn.BatchNorm2d(32),
nn.ConvTranspose2d(32,32,3,1,1), # batch x 32 x 14 x 14
nn.ReLU(),
nn.BatchNorm2d(32),
nn.ConvTranspose2d(32,1,3,2,1,1), # batch x 1 x 28 x 28
nn.ReLU()
)
def forward(self,x):
out = x.view(batch_size,256,7,7)
out = self.layer1(out)
out = self.layer2(out)
return out
#decoder = Decoder().cuda()
decoder = Decoder()
parameters = list(encoder.parameters())+ list(decoder.parameters())
loss_func = nn.MSELoss()
optimizer = torch.optim.Adam(parameters, lr=learning_rate)
# 噪声
noise = torch.rand(batch_size,1,28,28)
for i in range(n_epoch):
for image,label in train_loader:
image_n = torch.mul(image+0.25, 0.1 * noise)
#image = Variable(image).cuda()
image = Variable(image)
#image_n = Variable(image_n).cuda()
image_n = Variable(image_n)
optimizer.zero_grad()
output = encoder(image_n)
output = decoder(output)
loss = loss_func(output,image)
loss.backward()
optimizer.step()
break
print('epoch [{}/{}], loss:{:.4f}'
.format(i + 1, n_epoch, loss.data.item()))
img = image[0].cpu()
input_img = image_n[0].cpu()
output_img = output[0].cpu()
origin = img.data.numpy()
inp = input_img.data.numpy()
out = output_img.data.numpy()
plt.figure('denoising autodecoder')
plt.subplot(131)
plt.imshow(origin[0],cmap='gray')
plt.subplot(132)
plt.imshow(inp[0],cmap='gray')
plt.subplot(133)
plt.imshow(out[0],cmap="gray")
plt.show()
print(label[0])
| 36.366279 | 113 | 0.48729 |
4a1d4a718dfcc02f2eb407105f8979a055e61294
| 98 |
py
|
Python
|
hello/tests/test_hello.py
|
mkostich/RIT_retreat_CICD
|
5c88d272cbb3d3eeef8074ff6d68bc73d20c924c
|
[
"MIT"
] | null | null | null |
hello/tests/test_hello.py
|
mkostich/RIT_retreat_CICD
|
5c88d272cbb3d3eeef8074ff6d68bc73d20c924c
|
[
"MIT"
] | null | null | null |
hello/tests/test_hello.py
|
mkostich/RIT_retreat_CICD
|
5c88d272cbb3d3eeef8074ff6d68bc73d20c924c
|
[
"MIT"
] | null | null | null |
import pytest
from hello.hello_world import hello_world
def test_hello_world():
hello_world()
| 19.6 | 41 | 0.806122 |
4a1d4ab4121e2b7d4dabcfb68c2eefc2ff9bfe0c
| 1,527 |
py
|
Python
|
UnityEngine/ParticleSystem/ColorOverLifetimeModule/__init__.py
|
Grim-es/udon-pie-auto-completion
|
c2cd86554ed615cdbbb01e19fa40665eafdfaedc
|
[
"MIT"
] | null | null | null |
UnityEngine/ParticleSystem/ColorOverLifetimeModule/__init__.py
|
Grim-es/udon-pie-auto-completion
|
c2cd86554ed615cdbbb01e19fa40665eafdfaedc
|
[
"MIT"
] | null | null | null |
UnityEngine/ParticleSystem/ColorOverLifetimeModule/__init__.py
|
Grim-es/udon-pie-auto-completion
|
c2cd86554ed615cdbbb01e19fa40665eafdfaedc
|
[
"MIT"
] | null | null | null |
from UdonPie import System
from UdonPie import UnityEngine
from UdonPie.Undefined import *
class ColorOverLifetimeModule:
def __new__(cls, arg1=None):
'''
:returns: ColorOverLifetimeModule
:rtype: UnityEngine.ParticleSystem.ColorOverLifetimeModule
'''
pass
@staticmethod
def set_enabled(arg1):
'''
:param arg1: Boolean
:type arg1: System.Boolean or bool
'''
pass
@staticmethod
def get_enabled():
'''
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def set_color(arg1):
'''
:param arg1: MinMaxGradient
:type arg1: UnityEngine.MinMaxGradient
'''
pass
@staticmethod
def get_color():
'''
:returns: ParticleSystem+MinMaxGradient
:rtype: UnityEngine.ParticleSystem+MinMaxGradient
'''
pass
@staticmethod
def Equals(arg1):
'''
:param arg1: Object
:type arg1: System.Object
:returns: Boolean
:rtype: System.Boolean
'''
pass
@staticmethod
def ToString():
'''
:returns: String
:rtype: System.String
'''
pass
@staticmethod
def GetHashCode():
'''
:returns: Int32
:rtype: System.Int32
'''
pass
@staticmethod
def GetType():
'''
:returns: Type
:rtype: System.Type
'''
pass
| 19.329114 | 66 | 0.530452 |
4a1d4c2b2d573a115c8be55cfb157dc0dd5ea181
| 11,446 |
py
|
Python
|
custom_components/xiaomi_cloud_map_extractor/image_handler.py
|
horse315/Home-Assistant-custom-components-Xiaomi-Cloud-Map-Extractor
|
29cd242310f94d0895f396c9e6d605545b88d06b
|
[
"MIT"
] | 3 |
2019-03-12T21:27:56.000Z
|
2019-05-03T06:18:48.000Z
|
custom_components/xiaomi_cloud_map_extractor/image_handler.py
|
horse315/Home-Assistant-custom-components-Xiaomi-Cloud-Map-Extractor
|
29cd242310f94d0895f396c9e6d605545b88d06b
|
[
"MIT"
] | 32 |
2019-11-11T22:13:08.000Z
|
2020-12-15T18:18:27.000Z
|
custom_components/xiaomi_cloud_map_extractor/image_handler.py
|
horse315/Home-Assistant-custom-components-Xiaomi-Cloud-Map-Extractor
|
29cd242310f94d0895f396c9e6d605545b88d06b
|
[
"MIT"
] | null | null | null |
import logging
from typing import Callable
from PIL import Image, ImageDraw, ImageFont
from .const import *
_LOGGER = logging.getLogger(__name__)
class ImageHandler:
MAP_OUTSIDE = 0x00
MAP_WALL = 0x01
MAP_INSIDE = 0xFF
MAP_SCAN = 0x07
COLORS = {
COLOR_MAP_INSIDE: (32, 115, 185),
COLOR_MAP_OUTSIDE: (19, 87, 148),
COLOR_MAP_WALL: (100, 196, 254),
COLOR_MAP_WALL_V2: (93, 109, 126),
COLOR_GREY_WALL: (93, 109, 126),
COLOR_PATH: (147, 194, 238),
COLOR_GOTO_PATH: (0, 255, 0),
COLOR_PREDICTED_PATH: (255, 255, 0),
COLOR_ZONES: (0xAD, 0xD8, 0xFF, 0x8F),
COLOR_ZONES_OUTLINE: (0xAD, 0xD8, 0xFF),
COLOR_VIRTUAL_WALLS: (255, 0, 0),
COLOR_NO_GO_ZONES: (255, 33, 55, 127),
COLOR_NO_GO_ZONES_OUTLINE: (255, 0, 0),
COLOR_NO_MOPPING_ZONES: (163, 130, 211, 127),
COLOR_NO_MOPPING_ZONES_OUTLINE: (163, 130, 211),
COLOR_CHARGER: (0x66, 0xfe, 0xda, 0x7f),
COLOR_ROBO: (75, 235, 149),
COLOR_UNKNOWN: (0, 0, 0),
COLOR_SCAN: (0xDF, 0xDF, 0xDF),
COLOR_ROOM_1: (240, 178, 122),
COLOR_ROOM_2: (133, 193, 233),
COLOR_ROOM_3: (217, 136, 128),
COLOR_ROOM_4: (52, 152, 219),
COLOR_ROOM_5: (205, 97, 85),
COLOR_ROOM_6: (243, 156, 18),
COLOR_ROOM_7: (88, 214, 141),
COLOR_ROOM_8: (245, 176, 65),
COLOR_ROOM_9: (252, 212, 81),
COLOR_ROOM_10: (72, 201, 176),
COLOR_ROOM_11: (84, 153, 199),
COLOR_ROOM_12: (133, 193, 233),
COLOR_ROOM_13: (245, 176, 65),
COLOR_ROOM_14: (82, 190, 128),
COLOR_ROOM_15: (72, 201, 176),
COLOR_ROOM_16: (165, 105, 189)
}
ROOM_COLORS = [COLOR_ROOM_1, COLOR_ROOM_2, COLOR_ROOM_3, COLOR_ROOM_4, COLOR_ROOM_5, COLOR_ROOM_6, COLOR_ROOM_7,
COLOR_ROOM_8, COLOR_ROOM_9, COLOR_ROOM_10, COLOR_ROOM_11, COLOR_ROOM_12, COLOR_ROOM_13,
COLOR_ROOM_14, COLOR_ROOM_15, COLOR_ROOM_16]
@staticmethod
def parse(raw_data: bytes, width, height, colors, image_config):
rooms = {}
scale = image_config[CONF_SCALE]
trim_left = int(image_config[CONF_TRIM][CONF_LEFT] * width / 100)
trim_right = int(image_config[CONF_TRIM][CONF_RIGHT] * width / 100)
trim_top = int(image_config[CONF_TRIM][CONF_TOP] * height / 100)
trim_bottom = int(image_config[CONF_TRIM][CONF_BOTTOM] * height / 100)
trimmed_height = height - trim_top - trim_bottom
trimmed_width = width - trim_left - trim_right
image = Image.new('RGBA', (trimmed_width, trimmed_height))
if width == 0 or height == 0:
return ImageHandler.create_empty_map(colors)
pixels = image.load()
for img_y in range(trimmed_height):
for img_x in range(trimmed_width):
pixel_type = raw_data[img_x + trim_left + width * (img_y + trim_bottom)]
x = img_x
y = trimmed_height - img_y - 1
if pixel_type == ImageHandler.MAP_OUTSIDE:
pixels[x, y] = ImageHandler.__get_color__(COLOR_MAP_OUTSIDE, colors)
elif pixel_type == ImageHandler.MAP_WALL:
pixels[x, y] = ImageHandler.__get_color__(COLOR_MAP_WALL, colors)
elif pixel_type == ImageHandler.MAP_INSIDE:
pixels[x, y] = ImageHandler.__get_color__(COLOR_MAP_INSIDE, colors)
elif pixel_type == ImageHandler.MAP_SCAN:
pixels[x, y] = ImageHandler.__get_color__(COLOR_SCAN, colors)
else:
obstacle = pixel_type & 0x07
if obstacle == 0:
pixels[x, y] = ImageHandler.__get_color__(COLOR_GREY_WALL, colors)
elif obstacle == 1:
pixels[x, y] = ImageHandler.__get_color__(COLOR_MAP_WALL_V2, colors)
elif obstacle == 7:
room_number = (pixel_type & 0xFF) >> 3
room_x = img_x + trim_left
room_y = img_y + trim_bottom
if room_number not in rooms:
rooms[room_number] = (room_x, room_y, room_x, room_y)
else:
rooms[room_number] = (min(rooms[room_number][0], room_x),
min(rooms[room_number][1], room_y),
max(rooms[room_number][2], room_x),
max(rooms[room_number][3], room_y))
default = ImageHandler.ROOM_COLORS[room_number >> 1]
pixels[x, y] = ImageHandler.__get_color__(f"{COLOR_ROOM_PREFIX}{room_number}", colors, default)
else:
pixels[x, y] = ImageHandler.__get_color__(COLOR_UNKNOWN, colors)
if image_config["scale"] != 1 and width != 0 and height != 0:
image = image.resize((int(trimmed_width * scale), int(trimmed_height * scale)), resample=Image.NEAREST)
return image, rooms
@staticmethod
def create_empty_map(colors):
color = ImageHandler.__get_color__(COLOR_MAP_OUTSIDE, colors)
image = Image.new('RGBA', (100, 100), color=color)
if sum(color[0:3]) > 382:
text_color = (0, 0, 0)
else:
text_color = (255, 255, 255)
draw = ImageDraw.Draw(image, "RGBA")
text = "NO MAP"
w, h = draw.textsize(text)
draw.text((50 - w / 2, 50 - h / 2), text, fill=text_color)
return image, {}
@staticmethod
def get_room_at_pixel(raw_data: bytes, width, x, y):
room_number = None
pixel_type = raw_data[x + width * y]
if pixel_type not in [ImageHandler.MAP_INSIDE, ImageHandler.MAP_SCAN]:
if pixel_type & 0x07 == 7:
room_number = (pixel_type & 0xFF) >> 3
return room_number
@staticmethod
def draw_path(image, path, colors):
ImageHandler.__draw_path__(image, path, ImageHandler.__get_color__(COLOR_PATH, colors))
@staticmethod
def draw_goto_path(image, path, colors):
ImageHandler.__draw_path__(image, path, ImageHandler.__get_color__(COLOR_GOTO_PATH, colors))
@staticmethod
def draw_predicted_path(image, path, colors):
ImageHandler.__draw_path__(image, path, ImageHandler.__get_color__(COLOR_PREDICTED_PATH, colors))
@staticmethod
def draw_no_go_areas(image, areas, colors):
ImageHandler.__draw_areas__(image, areas,
ImageHandler.__get_color__(COLOR_NO_GO_ZONES, colors),
ImageHandler.__get_color__(COLOR_NO_GO_ZONES_OUTLINE, colors))
@staticmethod
def draw_no_mopping_areas(image, areas, colors):
ImageHandler.__draw_areas__(image, areas,
ImageHandler.__get_color__(COLOR_NO_MOPPING_ZONES, colors),
ImageHandler.__get_color__(COLOR_NO_MOPPING_ZONES_OUTLINE, colors))
@staticmethod
def draw_walls(image, walls, colors):
draw = ImageDraw.Draw(image.data, 'RGBA')
for wall in walls:
draw.line(wall.to_img(image.dimensions).as_list(),
ImageHandler.__get_color__(COLOR_VIRTUAL_WALLS, colors), width=2)
@staticmethod
def draw_zones(image, zones, colors):
areas = list(map(lambda z: z.as_area(), zones))
ImageHandler.__draw_areas__(image, areas,
ImageHandler.__get_color__(COLOR_ZONES, colors),
ImageHandler.__get_color__(COLOR_ZONES_OUTLINE, colors))
@staticmethod
def draw_charger(image, charger, radius, colors):
color = ImageHandler.__get_color__(COLOR_CHARGER, colors)
ImageHandler.__draw_circle__(image, charger, radius, color, color)
@staticmethod
def draw_vacuum_position(image, vacuum_position, radius, colors):
color = ImageHandler.__get_color__(COLOR_ROBO, colors)
ImageHandler.__draw_circle__(image, vacuum_position, radius, color, color)
@staticmethod
def rotate(image):
if image.dimensions.rotation == 90:
image.data = image.data.transpose(Image.ROTATE_90)
if image.dimensions.rotation == 180:
image.data = image.data.transpose(Image.ROTATE_180)
if image.dimensions.rotation == 270:
image.data = image.data.transpose(Image.ROTATE_270)
@staticmethod
def draw_texts(image, texts):
for text_config in texts:
x = text_config[CONF_X] * image.data.size[0] / 100
y = text_config[CONF_Y] * image.data.size[1] / 100
ImageHandler.__draw_text__(image, text_config[CONF_TEXT], x, y, text_config[CONF_COLOR],
text_config[CONF_FONT], text_config[CONF_FONT_SIZE])
@staticmethod
def __draw_circle__(image, center, r, outline, fill):
def draw_func(draw: ImageDraw):
point = center.to_img(image.dimensions)
coords = [point.x - r, point.y - r, point.x + r, point.y + r]
draw.ellipse(coords, outline=outline, fill=fill)
ImageHandler.__draw_on_new_layer__(image, draw_func)
@staticmethod
def __draw_areas__(image, areas, fill, outline):
if len(areas) == 0:
return
def draw_func(draw: ImageDraw):
for area in areas:
draw.polygon(area.to_img(image.dimensions).as_list(), fill, outline)
ImageHandler.__draw_on_new_layer__(image, draw_func)
@staticmethod
def __draw_path__(image, path, color):
if len(path.path) < 2:
return
def draw_func(draw: ImageDraw):
s = path.path[0].to_img(image.dimensions)
for point in path.path[1:]:
e = point.to_img(image.dimensions)
draw.line([s.x, s.y, e.x, e.y], fill=color)
s = e
ImageHandler.__draw_on_new_layer__(image, draw_func)
@staticmethod
def __draw_text__(image, text, x, y, color, font_file=None, font_size=None):
def draw_func(draw: ImageDraw):
font = ImageFont.load_default()
try:
if font_file is not None and font_size > 0:
font = ImageFont.truetype(font_file, font_size)
except OSError:
_LOGGER.warning("Unable to find font file: %s", font_file)
except ImportError:
_LOGGER.warning("Unable to open font: %s", font_file)
finally:
w, h = draw.textsize(text, font)
draw.text((x - w / 2, y - h / 2), text, font=font, fill=color)
ImageHandler.__draw_on_new_layer__(image, draw_func)
@staticmethod
def __get_color__(name, colors, default_name=None):
if name in colors:
return colors[name]
if default_name is None:
return ImageHandler.COLORS[name]
return ImageHandler.COLORS[default_name]
@staticmethod
def __draw_on_new_layer__(image, draw_function: Callable):
layer = Image.new("RGBA", image.data.size, (255, 255, 255, 0))
draw = ImageDraw.Draw(layer, "RGBA")
draw_function(draw)
image.data = Image.alpha_composite(image.data, layer)
| 43.854406 | 119 | 0.595841 |
4a1d4c47aeb7e2baacd1ef341fc620353217ace4
| 395 |
py
|
Python
|
TrekBot_WS/build/zed-ros-wrapper/tutorials/zed_tracking_sub_tutorial/catkin_generated/pkg.develspace.context.pc.py
|
Rafcin/RescueRoboticsLHMV
|
d3dc63e6c16a040b16170f143556ef358018b7da
|
[
"Unlicense"
] | 1 |
2018-10-04T14:37:00.000Z
|
2018-10-04T14:37:00.000Z
|
TrekBot_WS/build/zed-ros-wrapper/tutorials/zed_tracking_sub_tutorial/catkin_generated/pkg.develspace.context.pc.py
|
Rafcin/TrekBot
|
d3dc63e6c16a040b16170f143556ef358018b7da
|
[
"Unlicense"
] | null | null | null |
TrekBot_WS/build/zed-ros-wrapper/tutorials/zed_tracking_sub_tutorial/catkin_generated/pkg.develspace.context.pc.py
|
Rafcin/TrekBot
|
d3dc63e6c16a040b16170f143556ef358018b7da
|
[
"Unlicense"
] | null | null | null |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "zed_tracking_sub_tutorial"
PROJECT_SPACE_DIR = "/xavier_ssd/TrekBot/TrekBot_WS/devel"
PROJECT_VERSION = "2.6.0"
| 43.888889 | 68 | 0.721519 |
4a1d4c6ff45c848008923c2e37eaacb7715a5997
| 868 |
py
|
Python
|
var/spack/repos/builtin/packages/py-azure-mgmt-servicebus/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-azure-mgmt-servicebus/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-azure-mgmt-servicebus/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyAzureMgmtServicebus(PythonPackage):
"""Microsoft Azure Service Bus Management Client Library for Python."""
homepage = "https://github.com/Azure/azure-sdk-for-python"
pypi = "azure-mgmt-servicebus/azure-mgmt-servicebus-0.6.0.zip"
version('0.6.0', sha256='f20920b8fb119ef4abeda4d2dac765a4fc48cd0bcf30c27f8c4cc6d890bc08b1')
depends_on('py-setuptools', type='build')
depends_on('py-msrest@0.5.0:', type=('build', 'run'))
depends_on('py-msrestazure@0.4.32:1.999', type=('build', 'run'))
depends_on('py-azure-common@1.1:1.999', type=('build', 'run'))
depends_on('py-azure-mgmt-nspkg', when='^python@:2', type=('build', 'run'))
| 43.4 | 95 | 0.711982 |
4a1d4c80a264b58d345d179fd6df9d56870950c0
| 41,918 |
py
|
Python
|
nipype/interfaces/spm/model.py
|
PAmcconnell/nipype
|
39fbd5411a844ce7c023964d3295eb7643b95af5
|
[
"Apache-2.0"
] | null | null | null |
nipype/interfaces/spm/model.py
|
PAmcconnell/nipype
|
39fbd5411a844ce7c023964d3295eb7643b95af5
|
[
"Apache-2.0"
] | 2 |
2018-04-26T12:09:32.000Z
|
2018-04-27T06:36:49.000Z
|
nipype/interfaces/spm/model.py
|
PAmcconnell/nipype
|
39fbd5411a844ce7c023964d3295eb7643b95af5
|
[
"Apache-2.0"
] | 1 |
2019-11-14T14:16:57.000Z
|
2019-11-14T14:16:57.000Z
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The spm module provides basic functions for interfacing with matlab
and spm to access spm tools.
"""
# Standard library imports
import os
from glob import glob
# Third-party imports
import numpy as np
# Local imports
from ... import logging
from ...utils.filemanip import (ensure_list, simplify_list,
split_filename)
from ..base import (Bunch, traits, TraitedSpec, File, Directory,
OutputMultiPath, InputMultiPath, isdefined)
from .base import (SPMCommand, SPMCommandInputSpec, scans_for_fnames,
ImageFileSPM)
__docformat__ = 'restructuredtext'
iflogger = logging.getLogger('nipype.interface')
class Level1DesignInputSpec(SPMCommandInputSpec):
spm_mat_dir = Directory(
exists=True, field='dir', desc='directory to store SPM.mat file (opt)')
timing_units = traits.Enum(
'secs',
'scans',
field='timing.units',
desc='units for specification of onsets',
mandatory=True)
interscan_interval = traits.Float(
field='timing.RT', desc='Interscan interval in secs', mandatory=True)
microtime_resolution = traits.Int(
field='timing.fmri_t',
desc=('Number of time-bins per scan '
'in secs (opt)'))
microtime_onset = traits.Float(
field='timing.fmri_t0',
desc=('The onset/time-bin in seconds for '
'alignment (opt)'))
session_info = traits.Any(
field='sess',
desc=('Session specific information generated '
'by ``modelgen.SpecifyModel``'),
mandatory=True)
factor_info = traits.List(
traits.Dict(traits.Enum('name', 'levels')),
field='fact',
desc=('Factor specific information '
'file (opt)'))
bases = traits.Dict(
traits.Enum('hrf', 'fourier', 'fourier_han', 'gamma', 'fir'),
field='bases',
desc="""
dict {'name':{'basesparam1':val,...}}
name : string
Name of basis function (hrf, fourier, fourier_han,
gamma, fir)
hrf :
derivs : 2-element list
Model HRF Derivatives. No derivatives: [0,0],
Time derivatives : [1,0], Time and Dispersion
derivatives: [1,1]
fourier, fourier_han, gamma, fir:
length : int
Post-stimulus window length (in seconds)
order : int
Number of basis functions
""",
mandatory=True)
volterra_expansion_order = traits.Enum(
1, 2, field='volt', desc=('Model interactions - '
'yes:1, no:2'))
global_intensity_normalization = traits.Enum(
'none',
'scaling',
field='global',
desc=('Global intensity '
'normalization - '
'scaling or none'))
mask_image = File(
exists=True,
field='mask',
desc='Image for explicitly masking the analysis')
mask_threshold = traits.Either(
traits.Enum('-Inf'),
traits.Float(),
desc="Thresholding for the mask",
default='-Inf',
usedefault=True)
model_serial_correlations = traits.Enum(
'AR(1)',
'FAST',
'none',
field='cvi',
desc=('Model serial correlations '
'AR(1), FAST or none. FAST '
'is available in SPM12'))
flags = traits.Dict(
desc='Additional arguments to the job, e.g., a common SPM operation is to '
'modify the default masking threshold (mthresh)')
class Level1DesignOutputSpec(TraitedSpec):
spm_mat_file = File(exists=True, desc='SPM mat file')
class Level1Design(SPMCommand):
"""Generate an SPM design matrix
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=59
Examples
--------
>>> level1design = Level1Design()
>>> level1design.inputs.timing_units = 'secs'
>>> level1design.inputs.interscan_interval = 2.5
>>> level1design.inputs.bases = {'hrf':{'derivs': [0,0]}}
>>> level1design.inputs.session_info = 'session_info.npz'
>>> level1design.inputs.flags = {'mthresh': 0.4}
>>> level1design.run() # doctest: +SKIP
"""
input_spec = Level1DesignInputSpec
output_spec = Level1DesignOutputSpec
_jobtype = 'stats'
_jobname = 'fmri_spec'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['spm_mat_dir', 'mask_image']:
return np.array([str(val)], dtype=object)
if opt in ['session_info']: # , 'factor_info']:
if isinstance(val, dict):
return [val]
else:
return val
return super(Level1Design, self)._format_arg(opt, spec, val)
def _parse_inputs(self):
"""validate spm realign options if set to None ignore
"""
einputs = super(Level1Design,
self)._parse_inputs(skip=('mask_threshold', 'flags'))
if isdefined(self.inputs.flags):
einputs[0].update(
{flag: val
for (flag, val) in self.inputs.flags.items()})
for sessinfo in einputs[0]['sess']:
sessinfo['scans'] = scans_for_fnames(
ensure_list(sessinfo['scans']), keep4d=False)
if not isdefined(self.inputs.spm_mat_dir):
einputs[0]['dir'] = np.array([str(os.getcwd())], dtype=object)
return einputs
def _make_matlab_command(self, content):
"""validates spm options and generates job structure
if mfile is True uses matlab .m file
else generates a job structure and saves in .mat
"""
if isdefined(self.inputs.mask_image):
# SPM doesn't handle explicit masking properly, especially
# when you want to use the entire mask image
postscript = "load SPM;\n"
postscript += ("SPM.xM.VM = spm_vol('%s');\n" % simplify_list(
self.inputs.mask_image))
postscript += "SPM.xM.I = 0;\n"
postscript += "SPM.xM.T = [];\n"
postscript += ("SPM.xM.TH = ones(size(SPM.xM.TH))*(%s);\n" %
self.inputs.mask_threshold)
postscript += ("SPM.xM.xs = struct('Masking', "
"'explicit masking only');\n")
postscript += "save SPM SPM;\n"
else:
postscript = None
return super(Level1Design, self)._make_matlab_command(
content, postscript=postscript)
def _list_outputs(self):
outputs = self._outputs().get()
spm = os.path.join(os.getcwd(), 'SPM.mat')
outputs['spm_mat_file'] = spm
return outputs
class EstimateModelInputSpec(SPMCommandInputSpec):
spm_mat_file = File(
exists=True,
field='spmmat',
copyfile=True,
mandatory=True,
desc='Absolute path to SPM.mat')
estimation_method = traits.Dict(
traits.Enum('Classical', 'Bayesian2', 'Bayesian'),
field='method',
mandatory=True,
desc=('Dictionary of either Classical: 1, Bayesian: 1, '
'or Bayesian2: 1 (dict)'))
write_residuals = traits.Bool(
field='write_residuals', desc="Write individual residual images")
flags = traits.Dict(desc='Additional arguments')
class EstimateModelOutputSpec(TraitedSpec):
mask_image = ImageFileSPM(
exists=True, desc='binary mask to constrain estimation')
beta_images = OutputMultiPath(
ImageFileSPM(exists=True), desc='design parameter estimates')
residual_image = ImageFileSPM(
exists=True, desc='Mean-squared image of the residuals')
residual_images = OutputMultiPath(
ImageFileSPM(exists=True),
desc="individual residual images (requires `write_residuals`")
RPVimage = ImageFileSPM(exists=True, desc='Resels per voxel image')
spm_mat_file = File(exists=True, desc='Updated SPM mat file')
labels = ImageFileSPM(exists=True, desc="label file")
SDerror = OutputMultiPath(
ImageFileSPM(exists=True),
desc="Images of the standard deviation of the error")
ARcoef = OutputMultiPath(
ImageFileSPM(exists=True), desc="Images of the AR coefficient")
Cbetas = OutputMultiPath(
ImageFileSPM(exists=True), desc="Images of the parameter posteriors")
SDbetas = OutputMultiPath(
ImageFileSPM(exists=True),
desc="Images of the standard deviation of parameter posteriors")
class EstimateModel(SPMCommand):
"""Use spm_spm to estimate the parameters of a model
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=69
Examples
--------
>>> est = EstimateModel()
>>> est.inputs.spm_mat_file = 'SPM.mat'
>>> est.inputs.estimation_method = {'Classical': 1}
>>> est.run() # doctest: +SKIP
"""
input_spec = EstimateModelInputSpec
output_spec = EstimateModelOutputSpec
_jobtype = 'stats'
_jobname = 'fmri_est'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt == 'spm_mat_file':
return np.array([str(val)], dtype=object)
if opt == 'estimation_method':
if isinstance(val, (str, bytes)):
return {'{}'.format(val): 1}
else:
return val
return super(EstimateModel, self)._format_arg(opt, spec, val)
def _parse_inputs(self):
"""validate spm realign options if set to None ignore
"""
einputs = super(EstimateModel, self)._parse_inputs(skip=('flags'))
if isdefined(self.inputs.flags):
einputs[0].update(
{flag: val
for (flag, val) in self.inputs.flags.items()})
return einputs
def _list_outputs(self):
import scipy.io as sio
outputs = self._outputs().get()
pth = os.path.dirname(self.inputs.spm_mat_file)
outtype = 'nii' if '12' in self.version.split('.')[0] else 'img'
spm = sio.loadmat(self.inputs.spm_mat_file, struct_as_record=False)
betas = [vbeta.fname[0] for vbeta in spm['SPM'][0, 0].Vbeta[0]]
if ('Bayesian' in self.inputs.estimation_method.keys()
or 'Bayesian2' in self.inputs.estimation_method.keys()):
outputs['labels'] = os.path.join(pth, 'labels.{}'.format(outtype))
outputs['SDerror'] = glob(os.path.join(pth, 'Sess*_SDerror*'))
outputs['ARcoef'] = glob(os.path.join(pth, 'Sess*_AR_*'))
if betas:
outputs['Cbetas'] = [
os.path.join(pth, 'C{}'.format(beta)) for beta in betas
]
outputs['SDbetas'] = [
os.path.join(pth, 'SD{}'.format(beta)) for beta in betas
]
if 'Classical' in self.inputs.estimation_method.keys():
outputs['residual_image'] = os.path.join(
pth, 'ResMS.{}'.format(outtype))
outputs['RPVimage'] = os.path.join(pth, 'RPV.{}'.format(outtype))
if self.inputs.write_residuals:
outputs['residual_images'] = glob(os.path.join(pth, 'Res_*'))
if betas:
outputs['beta_images'] = [
os.path.join(pth, beta) for beta in betas
]
outputs['mask_image'] = os.path.join(pth, 'mask.{}'.format(outtype))
outputs['spm_mat_file'] = os.path.join(pth, 'SPM.mat')
return outputs
class EstimateContrastInputSpec(SPMCommandInputSpec):
spm_mat_file = File(
exists=True,
field='spmmat',
desc='Absolute path to SPM.mat',
copyfile=True,
mandatory=True)
contrasts = traits.List(
traits.Either(
traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str),
traits.List(traits.Float)),
traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str),
traits.List(traits.Float), traits.List(traits.Float)),
traits.Tuple(traits.Str, traits.Enum('F'),
traits.List(
traits.Either(
traits.Tuple(traits.Str, traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float)),
traits.Tuple(traits.Str, traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float),
traits.List(traits.Float)))))),
desc="""List of contrasts with each contrast being a list of the form:
[('name', 'stat', [condition list], [weight list], [session list])]
If session list is None or not provided, all sessions are used. For
F contrasts, the condition list should contain previously defined
T-contrasts.""",
mandatory=True)
beta_images = InputMultiPath(
File(exists=True),
desc=('Parameter estimates of the '
'design matrix'),
copyfile=False,
mandatory=True)
residual_image = File(
exists=True,
desc='Mean-squared image of the residuals',
copyfile=False,
mandatory=True)
use_derivs = traits.Bool(
desc='use derivatives for estimation', xor=['group_contrast'])
group_contrast = traits.Bool(
desc='higher level contrast', xor=['use_derivs'])
class EstimateContrastOutputSpec(TraitedSpec):
con_images = OutputMultiPath(
File(exists=True), desc='contrast images from a t-contrast')
spmT_images = OutputMultiPath(
File(exists=True), desc='stat images from a t-contrast')
ess_images = OutputMultiPath(
File(exists=True), desc='contrast images from an F-contrast')
spmF_images = OutputMultiPath(
File(exists=True), desc='stat images from an F-contrast')
spm_mat_file = File(exists=True, desc='Updated SPM mat file')
class EstimateContrast(SPMCommand):
"""Use spm_contrasts to estimate contrasts of interest
Examples
--------
>>> import nipype.interfaces.spm as spm
>>> est = spm.EstimateContrast()
>>> est.inputs.spm_mat_file = 'SPM.mat'
>>> cont1 = ('Task>Baseline','T', ['Task-Odd','Task-Even'],[0.5,0.5])
>>> cont2 = ('Task-Odd>Task-Even','T', ['Task-Odd','Task-Even'],[1,-1])
>>> contrasts = [cont1,cont2]
>>> est.inputs.contrasts = contrasts
>>> est.run() # doctest: +SKIP
"""
input_spec = EstimateContrastInputSpec
output_spec = EstimateContrastOutputSpec
_jobtype = 'stats'
_jobname = 'con'
def _make_matlab_command(self, _):
"""validates spm options and generates job structure
"""
contrasts = []
cname = []
for i, cont in enumerate(self.inputs.contrasts):
cname.insert(i, cont[0])
contrasts.insert(i,
Bunch(
name=cont[0],
stat=cont[1],
conditions=cont[2],
weights=None,
sessions=None))
if len(cont) >= 4:
contrasts[i].weights = cont[3]
if len(cont) >= 5:
contrasts[i].sessions = cont[4]
script = "% generated by nipype.interfaces.spm\n"
script += "spm_defaults;\n"
script += ("jobs{1}.stats{1}.con.spmmat = {'%s'};\n" %
self.inputs.spm_mat_file)
script += "load(jobs{1}.stats{1}.con.spmmat{:});\n"
script += "SPM.swd = '%s';\n" % os.getcwd()
script += "save(jobs{1}.stats{1}.con.spmmat{:},'SPM');\n"
script += "names = SPM.xX.name;\n"
# get names for columns
if (isdefined(self.inputs.group_contrast)
and self.inputs.group_contrast):
script += "condnames=names;\n"
else:
if self.inputs.use_derivs:
script += "pat = 'Sn\([0-9]*\) (.*)';\n"
else:
script += ("pat = 'Sn\([0-9]*\) (.*)\*bf\(1\)|Sn\([0-9]*\) "
".*\*bf\([2-9]\)|Sn\([0-9]*\) (.*)';\n")
script += "t = regexp(names,pat,'tokens');\n"
# get sessidx for columns
script += "pat1 = 'Sn\(([0-9].*)\)\s.*';\n"
script += "t1 = regexp(names,pat1,'tokens');\n"
script += ("for i0=1:numel(t),condnames{i0}='';condsess(i0)=0;if "
"~isempty(t{i0}{1}),condnames{i0} = t{i0}{1}{1};"
"condsess(i0)=str2num(t1{i0}{1}{1});end;end;\n")
# BUILD CONTRAST SESSION STRUCTURE
for i, contrast in enumerate(contrasts):
if contrast.stat == 'T':
script += ("consess{%d}.tcon.name = '%s';\n" %
(i + 1, contrast.name))
script += (
"consess{%d}.tcon.convec = zeros(1,numel(names));\n" %
(i + 1))
for c0, cond in enumerate(contrast.conditions):
script += ("idx = strmatch('%s',condnames,'exact');\n" %
(cond))
script += (("if isempty(idx), throw(MException("
"'CondName:Chk', sprintf('Condition %%s not "
"found in design','%s'))); end;\n") % cond)
if contrast.sessions:
for sno, sw in enumerate(contrast.sessions):
script += ("sidx = find(condsess(idx)==%d);\n" %
(sno + 1))
script += (("consess{%d}.tcon.convec(idx(sidx)) "
"= %f;\n") %
(i + 1, sw * contrast.weights[c0]))
else:
script += ("consess{%d}.tcon.convec(idx) = %f;\n" %
(i + 1, contrast.weights[c0]))
for i, contrast in enumerate(contrasts):
if contrast.stat == 'F':
script += ("consess{%d}.fcon.name = '%s';\n" %
(i + 1, contrast.name))
for cl0, fcont in enumerate(contrast.conditions):
try:
tidx = cname.index(fcont[0])
except:
Exception("Contrast Estimate: could not get index of"
" T contrast. probably not defined prior "
"to the F contrasts")
script += (("consess{%d}.fcon.convec{%d} = "
"consess{%d}.tcon.convec;\n") %
(i + 1, cl0 + 1, tidx + 1))
script += "jobs{1}.stats{1}.con.consess = consess;\n"
script += ("if strcmp(spm('ver'),'SPM8'), spm_jobman('initcfg');"
"jobs=spm_jobman('spm5tospm8',{jobs});end\n")
script += "spm_jobman('run',jobs);"
return script
def _list_outputs(self):
import scipy.io as sio
outputs = self._outputs().get()
pth, _ = os.path.split(self.inputs.spm_mat_file)
spm = sio.loadmat(self.inputs.spm_mat_file, struct_as_record=False)
con_images = []
spmT_images = []
for con in spm['SPM'][0, 0].xCon[0]:
con_images.append(str(os.path.join(pth, con.Vcon[0, 0].fname[0])))
spmT_images.append(str(os.path.join(pth, con.Vspm[0, 0].fname[0])))
if con_images:
outputs['con_images'] = con_images
outputs['spmT_images'] = spmT_images
spm12 = '12' in self.version.split('.')[0]
if spm12:
ess = glob(os.path.join(pth, 'ess*.nii'))
else:
ess = glob(os.path.join(pth, 'ess*.img'))
if len(ess) > 0:
outputs['ess_images'] = sorted(ess)
if spm12:
spmf = glob(os.path.join(pth, 'spmF*.nii'))
else:
spmf = glob(os.path.join(pth, 'spmF*.img'))
if len(spmf) > 0:
outputs['spmF_images'] = sorted(spmf)
outputs['spm_mat_file'] = self.inputs.spm_mat_file
return outputs
class ThresholdInputSpec(SPMCommandInputSpec):
spm_mat_file = File(
exists=True,
desc='absolute path to SPM.mat',
copyfile=True,
mandatory=True)
stat_image = File(
exists=True, desc='stat image', copyfile=False, mandatory=True)
contrast_index = traits.Int(
mandatory=True, desc='which contrast in the SPM.mat to use')
use_fwe_correction = traits.Bool(
True,
usedefault=True,
desc=('whether to use FWE (Bonferroni) '
'correction for initial threshold '
'(height_threshold_type has to be '
'set to p-value)'))
use_topo_fdr = traits.Bool(
True,
usedefault=True,
desc=('whether to use FDR over cluster extent '
'probabilities'))
height_threshold = traits.Float(
0.05,
usedefault=True,
desc=('value for initial thresholding '
'(defining clusters)'))
height_threshold_type = traits.Enum(
'p-value',
'stat',
usedefault=True,
desc=('Is the cluster forming '
'threshold a stat value or '
'p-value?'))
extent_fdr_p_threshold = traits.Float(
0.05,
usedefault=True,
desc=('p threshold on FDR corrected '
'cluster size probabilities'))
extent_threshold = traits.Int(
0, usedefault=True, desc='Minimum cluster size in voxels')
force_activation = traits.Bool(
False,
usedefault=True,
desc=('In case no clusters survive the '
'topological inference step this '
'will pick a culster with the highes '
'sum of t-values. Use with care.'))
class ThresholdOutputSpec(TraitedSpec):
thresholded_map = File(exists=True)
n_clusters = traits.Int()
pre_topo_fdr_map = File(exists=True)
pre_topo_n_clusters = traits.Int()
activation_forced = traits.Bool()
cluster_forming_thr = traits.Float()
class Threshold(SPMCommand):
"""Topological FDR thresholding based on cluster extent/size. Smoothness is
estimated from GLM residuals but is assumed to be the same for all of the
voxels.
Examples
--------
>>> thresh = Threshold()
>>> thresh.inputs.spm_mat_file = 'SPM.mat'
>>> thresh.inputs.stat_image = 'spmT_0001.img'
>>> thresh.inputs.contrast_index = 1
>>> thresh.inputs.extent_fdr_p_threshold = 0.05
>>> thresh.run() # doctest: +SKIP
"""
input_spec = ThresholdInputSpec
output_spec = ThresholdOutputSpec
def _gen_thresholded_map_filename(self):
_, fname, ext = split_filename(self.inputs.stat_image)
return os.path.abspath(fname + "_thr" + ext)
def _gen_pre_topo_map_filename(self):
_, fname, ext = split_filename(self.inputs.stat_image)
return os.path.abspath(fname + "_pre_topo_thr" + ext)
def _make_matlab_command(self, _):
script = "con_index = %d;\n" % self.inputs.contrast_index
script += "cluster_forming_thr = %f;\n" % self.inputs.height_threshold
if self.inputs.use_fwe_correction:
script += "thresDesc = 'FWE';\n"
else:
script += "thresDesc = 'none';\n"
if self.inputs.use_topo_fdr:
script += "use_topo_fdr = 1;\n"
else:
script += "use_topo_fdr = 0;\n"
if self.inputs.force_activation:
script += "force_activation = 1;\n"
else:
script += "force_activation = 0;\n"
script += ("cluster_extent_p_fdr_thr = %f;\n" %
self.inputs.extent_fdr_p_threshold)
script += "stat_filename = '%s';\n" % self.inputs.stat_image
script += ("height_threshold_type = '%s';\n" %
self.inputs.height_threshold_type)
script += "extent_threshold = %d;\n" % self.inputs.extent_threshold
script += "load %s;\n" % self.inputs.spm_mat_file
script += """
FWHM = SPM.xVol.FWHM;
df = [SPM.xCon(con_index).eidf SPM.xX.erdf];
STAT = SPM.xCon(con_index).STAT;
R = SPM.xVol.R;
S = SPM.xVol.S;
n = 1;
switch thresDesc
case 'FWE'
cluster_forming_thr = spm_uc(cluster_forming_thr,df,STAT,R,n,S);
case 'none'
if strcmp(height_threshold_type, 'p-value')
cluster_forming_thr = spm_u(cluster_forming_thr^(1/n),df,STAT);
end
end
stat_map_vol = spm_vol(stat_filename);
[stat_map_data, stat_map_XYZmm] = spm_read_vols(stat_map_vol);
Z = stat_map_data(:)';
[x,y,z] = ind2sub(size(stat_map_data),(1:numel(stat_map_data))');
XYZ = cat(1, x', y', z');
XYZth = XYZ(:, Z >= cluster_forming_thr);
Zth = Z(Z >= cluster_forming_thr);
"""
script += (("spm_write_filtered(Zth,XYZth,stat_map_vol.dim',"
"stat_map_vol.mat,'thresholded map', '%s');\n") %
self._gen_pre_topo_map_filename())
script += """
max_size = 0;
max_size_index = 0;
th_nclusters = 0;
nclusters = 0;
if isempty(XYZth)
thresholded_XYZ = [];
thresholded_Z = [];
else
if use_topo_fdr
V2R = 1/prod(FWHM(stat_map_vol.dim > 1));
[uc,Pc,ue] = spm_uc_clusterFDR(cluster_extent_p_fdr_thr,df,STAT,R,n,Z,XYZ,V2R,cluster_forming_thr);
end
voxel_labels = spm_clusters(XYZth);
nclusters = max(voxel_labels);
thresholded_XYZ = [];
thresholded_Z = [];
for i = 1:nclusters
cluster_size = sum(voxel_labels==i);
if cluster_size > extent_threshold && (~use_topo_fdr || (cluster_size - uc) > -1)
thresholded_XYZ = cat(2, thresholded_XYZ, XYZth(:,voxel_labels == i));
thresholded_Z = cat(2, thresholded_Z, Zth(voxel_labels == i));
th_nclusters = th_nclusters + 1;
end
if force_activation
cluster_sum = sum(Zth(voxel_labels == i));
if cluster_sum > max_size
max_size = cluster_sum;
max_size_index = i;
end
end
end
end
activation_forced = 0;
if isempty(thresholded_XYZ)
if force_activation && max_size ~= 0
thresholded_XYZ = XYZth(:,voxel_labels == max_size_index);
thresholded_Z = Zth(voxel_labels == max_size_index);
th_nclusters = 1;
activation_forced = 1;
else
thresholded_Z = [0];
thresholded_XYZ = [1 1 1]';
th_nclusters = 0;
end
end
fprintf('activation_forced = %d\\n',activation_forced);
fprintf('pre_topo_n_clusters = %d\\n',nclusters);
fprintf('n_clusters = %d\\n',th_nclusters);
fprintf('cluster_forming_thr = %f\\n',cluster_forming_thr);
"""
script += (("spm_write_filtered(thresholded_Z,thresholded_XYZ,"
"stat_map_vol.dim',stat_map_vol.mat,'thresholded map',"
" '%s');\n") % self._gen_thresholded_map_filename())
return script
def aggregate_outputs(self, runtime=None):
outputs = self._outputs()
setattr(outputs, 'thresholded_map',
self._gen_thresholded_map_filename())
setattr(outputs, 'pre_topo_fdr_map', self._gen_pre_topo_map_filename())
for line in runtime.stdout.split('\n'):
if line.startswith("activation_forced = "):
setattr(outputs, 'activation_forced',
line[len("activation_forced = "):].strip() == "1")
elif line.startswith("n_clusters = "):
setattr(outputs, 'n_clusters',
int(line[len("n_clusters = "):].strip()))
elif line.startswith("pre_topo_n_clusters = "):
setattr(outputs, 'pre_topo_n_clusters',
int(line[len("pre_topo_n_clusters = "):].strip()))
elif line.startswith("cluster_forming_thr = "):
setattr(outputs, 'cluster_forming_thr',
float(line[len("cluster_forming_thr = "):].strip()))
return outputs
def _list_outputs(self):
outputs = self._outputs().get()
outputs['thresholded_map'] = self._gen_thresholded_map_filename()
outputs['pre_topo_fdr_map'] = self._gen_pre_topo_map_filename()
return outputs
class ThresholdStatisticsInputSpec(SPMCommandInputSpec):
spm_mat_file = File(
exists=True,
desc='absolute path to SPM.mat',
copyfile=True,
mandatory=True)
stat_image = File(
exists=True, desc='stat image', copyfile=False, mandatory=True)
contrast_index = traits.Int(
mandatory=True, desc='which contrast in the SPM.mat to use')
height_threshold = traits.Float(
desc=('stat value for initial '
'thresholding (defining clusters)'),
mandatory=True)
extent_threshold = traits.Int(
0, usedefault=True, desc="Minimum cluster size in voxels")
class ThresholdStatisticsOutputSpec(TraitedSpec):
voxelwise_P_Bonf = traits.Float()
voxelwise_P_RF = traits.Float()
voxelwise_P_uncor = traits.Float()
voxelwise_P_FDR = traits.Float()
clusterwise_P_RF = traits.Float()
clusterwise_P_FDR = traits.Float()
class ThresholdStatistics(SPMCommand):
"""Given height and cluster size threshold calculate theoretical
probabilities concerning false positives
Examples
--------
>>> thresh = ThresholdStatistics()
>>> thresh.inputs.spm_mat_file = 'SPM.mat'
>>> thresh.inputs.stat_image = 'spmT_0001.img'
>>> thresh.inputs.contrast_index = 1
>>> thresh.inputs.height_threshold = 4.56
>>> thresh.run() # doctest: +SKIP
"""
input_spec = ThresholdStatisticsInputSpec
output_spec = ThresholdStatisticsOutputSpec
def _make_matlab_command(self, _):
script = "con_index = %d;\n" % self.inputs.contrast_index
script += "cluster_forming_thr = %f;\n" % self.inputs.height_threshold
script += "stat_filename = '%s';\n" % self.inputs.stat_image
script += "extent_threshold = %d;\n" % self.inputs.extent_threshold
script += "load '%s'\n" % self.inputs.spm_mat_file
script += """
FWHM = SPM.xVol.FWHM;
df = [SPM.xCon(con_index).eidf SPM.xX.erdf];
STAT = SPM.xCon(con_index).STAT;
R = SPM.xVol.R;
S = SPM.xVol.S;
n = 1;
voxelwise_P_Bonf = spm_P_Bonf(cluster_forming_thr,df,STAT,S,n)
voxelwise_P_RF = spm_P_RF(1,0,cluster_forming_thr,df,STAT,R,n)
stat_map_vol = spm_vol(stat_filename);
[stat_map_data, stat_map_XYZmm] = spm_read_vols(stat_map_vol);
Z = stat_map_data(:);
Zum = Z;
switch STAT
case 'Z'
VPs = (1-spm_Ncdf(Zum)).^n;
voxelwise_P_uncor = (1-spm_Ncdf(cluster_forming_thr)).^n
case 'T'
VPs = (1 - spm_Tcdf(Zum,df(2))).^n;
voxelwise_P_uncor = (1 - spm_Tcdf(cluster_forming_thr,df(2))).^n
case 'X'
VPs = (1-spm_Xcdf(Zum,df(2))).^n;
voxelwise_P_uncor = (1-spm_Xcdf(cluster_forming_thr,df(2))).^n
case 'F'
VPs = (1 - spm_Fcdf(Zum,df)).^n;
voxelwise_P_uncor = (1 - spm_Fcdf(cluster_forming_thr,df)).^n
end
VPs = sort(VPs);
voxelwise_P_FDR = spm_P_FDR(cluster_forming_thr,df,STAT,n,VPs)
V2R = 1/prod(FWHM(stat_map_vol.dim > 1));
clusterwise_P_RF = spm_P_RF(1,extent_threshold*V2R,cluster_forming_thr,df,STAT,R,n)
[x,y,z] = ind2sub(size(stat_map_data),(1:numel(stat_map_data))');
XYZ = cat(1, x', y', z');
[u, CPs, ue] = spm_uc_clusterFDR(0.05,df,STAT,R,n,Z,XYZ,V2R,cluster_forming_thr);
clusterwise_P_FDR = spm_P_clusterFDR(extent_threshold*V2R,df,STAT,R,n,cluster_forming_thr,CPs')
"""
return script
def aggregate_outputs(self, runtime=None, needed_outputs=None):
outputs = self._outputs()
cur_output = ""
for line in runtime.stdout.split('\n'):
if cur_output != "" and len(line.split()) != 0:
setattr(outputs, cur_output, float(line))
cur_output = ""
continue
if (len(line.split()) != 0 and line.split()[0] in [
"clusterwise_P_FDR", "clusterwise_P_RF",
"voxelwise_P_Bonf", "voxelwise_P_FDR", "voxelwise_P_RF",
"voxelwise_P_uncor"
]):
cur_output = line.split()[0]
continue
return outputs
class FactorialDesignInputSpec(SPMCommandInputSpec):
spm_mat_dir = Directory(
exists=True, field='dir', desc='directory to store SPM.mat file (opt)')
# Need to make an alias of InputMultiPath; the inputs below are not Path
covariates = InputMultiPath(
traits.Dict(
key_trait=traits.Enum('vector', 'name', 'interaction',
'centering')),
field='cov',
desc=('covariate dictionary {vector, name, '
'interaction, centering}'))
threshold_mask_none = traits.Bool(
field='masking.tm.tm_none',
xor=['threshold_mask_absolute', 'threshold_mask_relative'],
desc='do not use threshold masking')
threshold_mask_absolute = traits.Float(
field='masking.tm.tma.athresh',
xor=['threshold_mask_none', 'threshold_mask_relative'],
desc='use an absolute threshold')
threshold_mask_relative = traits.Float(
field='masking.tm.tmr.rthresh',
xor=['threshold_mask_absolute', 'threshold_mask_none'],
desc=('threshold using a '
'proportion of the global '
'value'))
use_implicit_threshold = traits.Bool(
field='masking.im',
desc=('use implicit mask NaNs or '
'zeros to threshold'))
explicit_mask_file = File(
field='masking.em', # requires cell
desc='use an implicit mask file to threshold')
global_calc_omit = traits.Bool(
field='globalc.g_omit',
xor=['global_calc_mean', 'global_calc_values'],
desc='omit global calculation')
global_calc_mean = traits.Bool(
field='globalc.g_mean',
xor=['global_calc_omit', 'global_calc_values'],
desc='use mean for global calculation')
global_calc_values = traits.List(
traits.Float,
field='globalc.g_user.global_uval',
xor=['global_calc_mean', 'global_calc_omit'],
desc='omit global calculation')
no_grand_mean_scaling = traits.Bool(
field='globalm.gmsca.gmsca_no',
desc=('do not perform grand mean '
'scaling'))
global_normalization = traits.Enum(
1,
2,
3,
field='globalm.glonorm',
desc=('global normalization None-1, '
'Proportional-2, ANCOVA-3'))
class FactorialDesignOutputSpec(TraitedSpec):
spm_mat_file = File(exists=True, desc='SPM mat file')
class FactorialDesign(SPMCommand):
"""Base class for factorial designs
http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=77
"""
input_spec = FactorialDesignInputSpec
output_spec = FactorialDesignOutputSpec
_jobtype = 'stats'
_jobname = 'factorial_design'
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['spm_mat_dir', 'explicit_mask_file']:
return np.array([str(val)], dtype=object)
if opt in ['covariates']:
outlist = []
mapping = {
'name': 'cname',
'vector': 'c',
'interaction': 'iCFI',
'centering': 'iCC'
}
for dictitem in val:
outdict = {}
for key, keyval in list(dictitem.items()):
outdict[mapping[key]] = keyval
outlist.append(outdict)
return outlist
return super(FactorialDesign, self)._format_arg(opt, spec, val)
def _parse_inputs(self):
"""validate spm realign options if set to None ignore
"""
einputs = super(FactorialDesign, self)._parse_inputs()
if not isdefined(self.inputs.spm_mat_dir):
einputs[0]['dir'] = np.array([str(os.getcwd())], dtype=object)
return einputs
def _list_outputs(self):
outputs = self._outputs().get()
spm = os.path.join(os.getcwd(), 'SPM.mat')
outputs['spm_mat_file'] = spm
return outputs
class OneSampleTTestDesignInputSpec(FactorialDesignInputSpec):
in_files = traits.List(
File(exists=True),
field='des.t1.scans',
mandatory=True,
minlen=2,
desc='input files')
class OneSampleTTestDesign(FactorialDesign):
"""Create SPM design for one sample t-test
Examples
--------
>>> ttest = OneSampleTTestDesign()
>>> ttest.inputs.in_files = ['cont1.nii', 'cont2.nii']
>>> ttest.run() # doctest: +SKIP
"""
input_spec = OneSampleTTestDesignInputSpec
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['in_files']:
return np.array(val, dtype=object)
return super(OneSampleTTestDesign, self)._format_arg(opt, spec, val)
class TwoSampleTTestDesignInputSpec(FactorialDesignInputSpec):
# very unlikely that you will have a single image in one group, so setting
# parameters to require at least two files in each group [SG]
group1_files = traits.List(
File(exists=True),
field='des.t2.scans1',
mandatory=True,
minlen=2,
desc='Group 1 input files')
group2_files = traits.List(
File(exists=True),
field='des.t2.scans2',
mandatory=True,
minlen=2,
desc='Group 2 input files')
dependent = traits.Bool(
field='des.t2.dept',
desc=('Are the measurements dependent between '
'levels'))
unequal_variance = traits.Bool(
field='des.t2.variance',
desc=('Are the variances equal or unequal '
'between groups'))
class TwoSampleTTestDesign(FactorialDesign):
"""Create SPM design for two sample t-test
Examples
--------
>>> ttest = TwoSampleTTestDesign()
>>> ttest.inputs.group1_files = ['cont1.nii', 'cont2.nii']
>>> ttest.inputs.group2_files = ['cont1a.nii', 'cont2a.nii']
>>> ttest.run() # doctest: +SKIP
"""
input_spec = TwoSampleTTestDesignInputSpec
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['group1_files', 'group2_files']:
return np.array(val, dtype=object)
return super(TwoSampleTTestDesign, self)._format_arg(opt, spec, val)
class PairedTTestDesignInputSpec(FactorialDesignInputSpec):
paired_files = traits.List(
traits.List(File(exists=True), minlen=2, maxlen=2),
field='des.pt.pair',
mandatory=True,
minlen=2,
desc='List of paired files')
grand_mean_scaling = traits.Bool(
field='des.pt.gmsca', desc='Perform grand mean scaling')
ancova = traits.Bool(
field='des.pt.ancova', desc='Specify ancova-by-factor regressors')
class PairedTTestDesign(FactorialDesign):
"""Create SPM design for paired t-test
Examples
--------
>>> pttest = PairedTTestDesign()
>>> pttest.inputs.paired_files = [['cont1.nii','cont1a.nii'],['cont2.nii','cont2a.nii']]
>>> pttest.run() # doctest: +SKIP
"""
input_spec = PairedTTestDesignInputSpec
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['paired_files']:
return [dict(scans=np.array(files, dtype=object)) for files in val]
return super(PairedTTestDesign, self)._format_arg(opt, spec, val)
class MultipleRegressionDesignInputSpec(FactorialDesignInputSpec):
in_files = traits.List(
File(exists=True),
field='des.mreg.scans',
mandatory=True,
minlen=2,
desc='List of files')
include_intercept = traits.Bool(
True,
field='des.mreg.incint',
usedefault=True,
desc='Include intercept in design')
user_covariates = InputMultiPath(
traits.Dict(key_trait=traits.Enum('vector', 'name', 'centering')),
field='des.mreg.mcov',
desc=('covariate dictionary {vector, '
'name, centering}'))
class MultipleRegressionDesign(FactorialDesign):
"""Create SPM design for multiple regression
Examples
--------
>>> mreg = MultipleRegressionDesign()
>>> mreg.inputs.in_files = ['cont1.nii','cont2.nii']
>>> mreg.run() # doctest: +SKIP
"""
input_spec = MultipleRegressionDesignInputSpec
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for spm
"""
if opt in ['in_files']:
return np.array(val, dtype=object)
if opt in ['user_covariates']:
outlist = []
mapping = {'name': 'cname', 'vector': 'c', 'centering': 'iCC'}
for dictitem in val:
outdict = {}
for key, keyval in list(dictitem.items()):
outdict[mapping[key]] = keyval
outlist.append(outdict)
return outlist
return (super(MultipleRegressionDesign, self)._format_arg(
opt, spec, val))
| 37.128432 | 107 | 0.576006 |
4a1d4d1cc62acfa6ba8124c151c63918e7190bf9
| 95 |
py
|
Python
|
pkr/version.py
|
rlacazel/pkr
|
3575b634286409cdbc7799fce254fbd9848b066a
|
[
"Apache-2.0"
] | null | null | null |
pkr/version.py
|
rlacazel/pkr
|
3575b634286409cdbc7799fce254fbd9848b066a
|
[
"Apache-2.0"
] | null | null | null |
pkr/version.py
|
rlacazel/pkr
|
3575b634286409cdbc7799fce254fbd9848b066a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright© 1986-2019 Altair Engineering Inc.
__version__ = "1.0.10"
| 19 | 46 | 0.652632 |
4a1d4d3442b9a97395597bc48850f2a3d41ab391
| 9,034 |
py
|
Python
|
tests/test_read.py
|
NHPatterson/bfio
|
0891721b316ccaef6d4bb434e1c80c93fa8fcd69
|
[
"MIT"
] | 6 |
2021-09-09T01:27:13.000Z
|
2021-12-16T13:52:15.000Z
|
tests/test_read.py
|
NHPatterson/bfio
|
0891721b316ccaef6d4bb434e1c80c93fa8fcd69
|
[
"MIT"
] | 15 |
2021-07-20T13:16:34.000Z
|
2022-03-04T13:34:16.000Z
|
tests/test_read.py
|
NHPatterson/bfio
|
0891721b316ccaef6d4bb434e1c80c93fa8fcd69
|
[
"MIT"
] | 2 |
2021-11-01T18:08:18.000Z
|
2022-01-26T19:23:12.000Z
|
import unittest
import requests, io, pathlib, shutil, logging, sys
import bfio
import numpy as np
TEST_IMAGES = {
'1884807.ome.zarr': 'https://s3.embassy.ebi.ac.uk/idr/zarr/v0.1/1884807.zarr/',
'Plate1-Blue-A-12-Scene-3-P3-F2-03.czi': 'https://downloads.openmicroscopy.org/images/Zeiss-CZI/idr0011/Plate1-Blue-A_TS-Stinger/Plate1-Blue-A-12-Scene-3-P3-F2-03.czi',
'0.tif': 'https://osf.io/j6aer/download',
'img_r001_c001.ome.tif': 'https://github.com/usnistgov/WIPP/raw/master/data/PyramidBuilding/inputCollection/img_r001_c001.ome.tif'
}
TEST_DIR = pathlib.Path(__file__).with_name('data')
logging.basicConfig(format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S')
logger = logging.getLogger("bfio.test")
if '-v' in sys.argv:
logger.setLevel(logging.INFO)
def setUpModule():
""" Download images for testing """
TEST_DIR.mkdir(exist_ok=True)
for file,url in TEST_IMAGES.items():
logger.info(f'setup - Downloading: {file}')
if not file.endswith('.ome.zarr'):
r = requests.get(url)
with open(TEST_DIR.joinpath(file),'wb') as fw:
fw.write(r.content)
else:
base_path = TEST_DIR.joinpath(file)
base_path.mkdir()
base_path.joinpath('0').mkdir()
units = [
'.zattrs',
'.zgroup',
'0/.zarray',
'0/0.0.0.0.0',
'0/0.1.0.0.0',
'0/0.2.0.0.0',
]
for u in units:
with open(base_path.joinpath(u),'wb') as fw:
fw.write(requests.get(url+u).content)
# def tearDownModule():
# """ Remove test images """
# logger.info('teardown - Removing test images...')
# shutil.rmtree(TEST_DIR)
class TestSimpleRead(unittest.TestCase):
@classmethod
def tearDownClass(self):
""" Load the czi image, and save as a npy file for further testing. """
with bfio.BioReader(TEST_DIR.joinpath('Plate1-Blue-A-12-Scene-3-P3-F2-03.czi')) as br:
np.save(TEST_DIR.joinpath('4d_array.npy'),br[:])
def test_java(self):
"""test_java - Fails if Java/JPype improperly configured """
bfio.start()
def test_read_czi(self):
"""test_read_czi
This test will fail if JPype and Java are not installed or improperly
configured.
"""
with bfio.BioReader(TEST_DIR.joinpath('Plate1-Blue-A-12-Scene-3-P3-F2-03.czi')) as br:
self.assertEqual(br._backend_name, 'java')
I = br[:]
np.save(TEST_DIR.joinpath('4d_array.npy'),br[:])
self.assertEqual(I.shape[0],512)
self.assertEqual(I.shape[1],672)
self.assertEqual(I.shape[2],21)
self.assertEqual(I.shape[3],3)
self.assertEqual(I.shape[4],1)
self.assertEqual(I.dtype,np.uint16)
def test_read_tif_strip_auto(self):
"""test_read_tif_strip_auto - Read tiff saved in strips, should load java backend """
with bfio.BioReader(TEST_DIR.joinpath('0.tif')) as br:
self.assertEqual(br._backend_name, 'java')
I = br[:]
def test_read_zarr_auto(self):
"""test_read_zarr_auto - Read ome zarr, should load zarr backend """
with bfio.BioReader(TEST_DIR.joinpath('1884807.ome.zarr')) as br:
self.assertEqual(br._backend_name, 'zarr')
I = br[:]
logger.info(I.shape)
@unittest.expectedFailure
def test_read_ome_tif_strip_auto(self):
"""test_read_ome_tif_strip_auto - Expected failure, should load python backend """
with bfio.BioReader(TEST_DIR.joinpath('img_r001_c001.ome.tif')) as br:
I = br[:]
def test_read_tif_strip_java(self):
"""test_read_tif_strip_java - Read tiff using Java backend """
with bfio.BioReader(TEST_DIR.joinpath('img_r001_c001.ome.tif'),backend='java') as br:
self.assertEqual(br._backend_name,'java')
I = br[:]
@unittest.expectedFailure
def test_read_tif_strip_python(self):
"""test_read_tif_strip_python - Expected failure, read tiff saved in strips """
with bfio.BioReader(TEST_DIR.joinpath('img_r001_c001.ome.tif'),backend='python') as br:
I = br[:]
""" Metadata tests to run on each backend """
def get_dims(reader):
""" Get all dimension attributes """
for dim in 'xyzct':
logger.info('image.{} = {}'.format(dim,getattr(reader,dim)))
for dim in 'xyzct'.upper():
logger.info('image.{} = {}'.format(dim,getattr(reader,dim)))
logger.info('image.shape = {}'.format(reader.shape))
def get_pixel_size(reader):
""" Get all pixel size attributes """
for dim in 'xyz':
attribute = 'physical_size_{}'.format(dim)
logger.info('image.physical_size_{} = {}'.format(dim,
getattr(reader,attribute)))
for dim in 'xyz':
attribute = 'ps_{}'.format(dim)
logger.info('image.ps_{} = {}'.format(dim,
getattr(reader,attribute)))
def get_pixel_info(reader):
""" Get pixel information (type, samples per pixel, etc) """
logger.info('image.samples_per_pixel={}'.format(reader.samples_per_pixel))
logger.info('image.spp={}'.format(reader.spp))
logger.info('image.bytes_per_pixel={}'.format(reader.bytes_per_pixel))
logger.info('image.bpp={}'.format(reader.bpp))
logger.info('image.dtype={}'.format(reader.dtype))
def get_channel_names(reader):
""" Get channel names attribute """
logger.info('image.channel_names={}'.format(reader.channel_names))
logger.info('image.cnames={}'.format(reader.cnames))
""" Test classes (where the testing actually happens) """
class TestVersion(unittest.TestCase):
def test_bfio_version(self):
""" Ensure bfio version is properly loaded """
logger.info('__version__ = {}'.format(bfio.__version__))
assert bfio.__version__ != '0.0.0'
def test_jar_version(self):
""" Load loci-tools.jar and get version """
logger.info('JAR_VERSION = {}'.format(bfio.JAR_VERSION))
assert bfio.__version__ != None
class TestJavaReader(unittest.TestCase):
def test_get_dims(self):
""" Testing metadata dimension attributes """
with bfio.BioReader(TEST_DIR.joinpath('0.tif')) as br:
get_dims(br)
def test_get_pixel_size(self):
""" Testing metadata pixel sizes """
with bfio.BioReader(TEST_DIR.joinpath('0.tif')) as br:
get_pixel_size(br)
def test_get_pixel_info(self):
""" Testing metadata pixel information """
with bfio.BioReader(TEST_DIR.joinpath('0.tif')) as br:
get_pixel_info(br)
def test_get_channel_names(self):
""" Testing metadata channel names """
with bfio.BioReader(TEST_DIR.joinpath('0.tif')) as br:
get_channel_names(br)
class TestZarrReader(unittest.TestCase):
def test_get_dims(self):
""" Testing metadata dimension attributes """
with bfio.BioReader(TEST_DIR.joinpath('1884807.ome.zarr')) as br:
get_dims(br)
def test_get_pixel_size(self):
""" Testing metadata pixel sizes """
with bfio.BioReader(TEST_DIR.joinpath('1884807.ome.zarr')) as br:
get_pixel_size(br)
def test_get_pixel_info(self):
""" Testing metadata pixel information """
with bfio.BioReader(TEST_DIR.joinpath('1884807.ome.zarr')) as br:
get_pixel_info(br)
def test_get_channel_names(self):
""" Testing metadata channel names """
with bfio.BioReader(TEST_DIR.joinpath('1884807.ome.zarr')) as br:
get_channel_names(br)
class TestZarrMetadata(unittest.TestCase):
def test_set_metadata(self):
""" Testing metadata dimension attributes """
cname = ['test']
image = np.load(TEST_DIR.joinpath('4d_array.npy'))
with bfio.BioWriter(TEST_DIR.joinpath('test_cname.ome.zarr')) as bw:
bw.cnames = cname
bw.ps_x = (100,'nm')
bw.shape = image.shape
bw[:] = image
with bfio.BioReader(TEST_DIR.joinpath('test_cname.ome.zarr')) as br:
logger.info(br.cnames)
logger.info(br.ps_x)
self.assertEqual(br.cnames[0],cname[0])
| 36.427419 | 172 | 0.576157 |
4a1d4d6f1c19820e0818ed0731da2bb752ecb002
| 7,594 |
py
|
Python
|
edb/server/main.py
|
mcaramma/edgedb
|
53b18dbaf7407617ca135d1f8a5047bda6414654
|
[
"Apache-2.0"
] | null | null | null |
edb/server/main.py
|
mcaramma/edgedb
|
53b18dbaf7407617ca135d1f8a5047bda6414654
|
[
"Apache-2.0"
] | null | null | null |
edb/server/main.py
|
mcaramma/edgedb
|
53b18dbaf7407617ca135d1f8a5047bda6414654
|
[
"Apache-2.0"
] | null | null | null |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import getpass
import ipaddress
import logging
import os
import os.path
import setproctitle
import signal
import socket
import sys
import click
from asyncpg import cluster as pg_cluster
from edb.lang.common import exceptions
from . import cluster as edgedb_cluster
from . import daemon
from . import defines
from . import logsetup
logger = logging.getLogger('edb.server')
def abort(msg, *args):
logger.critical(msg, *args)
sys.exit(1)
def terminate_server(server, loop):
loop.stop()
def _init_cluster(cluster, args):
loop = asyncio.get_event_loop()
from edb.server import pgsql as backend
bootstrap_args = {
'default_database': (args['default_database'] or
args['default_database_user']),
'default_database_user': args['default_database_user'],
}
loop.run_until_complete(backend.bootstrap(
cluster, bootstrap_args, loop=loop))
def _sd_notify(message):
notify_socket = os.environ.get('NOTIFY_SOCKET')
if not notify_socket:
return
if notify_socket[0] == '@':
notify_socket = '\0' + notify_socket[1:]
sd_sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sd_sock.connect(notify_socket)
try:
sd_sock.sendall(message.encode())
finally:
sd_sock.close()
def _run_server(cluster, args):
loop = asyncio.get_event_loop()
srv = None
_init_cluster(cluster, args)
from edb.server import protocol as edgedb_protocol
def protocol_factory():
return edgedb_protocol.Protocol(cluster, loop=loop)
try:
srv = loop.run_until_complete(
loop.create_server(
protocol_factory,
host=args['bind_address'], port=args['port']))
loop.add_signal_handler(signal.SIGTERM, terminate_server, srv, loop)
logger.info('Serving on %s:%s', args['bind_address'], args['port'])
# Notify systemd that we've started up.
_sd_notify('READY=1')
loop.run_forever()
except KeyboardInterrupt:
logger.info('Shutting down.')
_sd_notify('STOPPING=1')
srv.close()
loop.run_until_complete(srv.wait_closed())
srv = None
finally:
if srv is not None:
logger.info('Shutting down.')
srv.close()
def run_server(args):
if edgedb_cluster.is_in_dev_mode():
logger.info('EdgeDB server starting in DEV mode.')
else:
logger.info('EdgeDB server starting.')
pg_cluster_started_by_us = False
if args['data_dir']:
server_settings = {
'log_connections': 'yes',
'log_statement': 'all',
'log_disconnections': 'yes',
'log_min_messages': 'INFO',
'client_min_messages': 'INFO',
}
if args['timezone']:
server_settings['TimeZone'] = args['timezone']
cluster = edgedb_cluster.get_pg_cluster(args['data_dir'])
cluster_status = cluster.get_status()
if cluster_status == 'not-initialized':
logger.info(
'Initializing database cluster in %s', args['data_dir'])
initdb_output = cluster.init(
username='postgres', locale='C', encoding='UTF8')
for line in initdb_output.splitlines():
logger.debug('initdb: %s', line)
cluster.reset_hba()
cluster.add_hba_entry(
type='local',
database='all', user='all',
auth_method='trust'
)
cluster.add_hba_entry(
type='local', address=ipaddress.ip_network('127.0.0.0/24'),
database='all', user='all',
auth_method='trust'
)
cluster_status = cluster.get_status()
if cluster_status == 'stopped':
cluster.start(
port=edgedb_cluster.find_available_port(),
server_settings=server_settings)
pg_cluster_started_by_us = True
elif cluster_status != 'running':
abort('Could not start database cluster in %s', args['data_dir'])
cluster.override_connection_spec(
user='postgres', database='template1')
else:
cluster = pg_cluster.RunningCluster(dsn=args['postgres'])
if args['bootstrap']:
_init_cluster(cluster, args)
else:
_run_server(cluster, args)
if pg_cluster_started_by_us:
cluster.stop()
@click.command('EdgeDB Server')
@click.option(
'-D', '--data-dir', type=str, envvar='EDGEDB_DATADIR',
help='database cluster directory')
@click.option(
'-P', '--postgres', type=str,
help='address of Postgres backend server in DSN format')
@click.option(
'-l', '--log-level',
help=('Logging level. Possible values: (d)ebug, (i)nfo, (w)arn, '
'(e)rror, (s)ilent'),
default='i', envvar='EDGEDB_LOG_LEVEL')
@click.option(
'--log-to',
help=('send logs to DEST, where DEST can be a file name, "syslog", '
'or "stderr"'),
type=str, metavar='DEST', default='stderr')
@click.option(
'--bootstrap', is_flag=True,
help='bootstrap the database cluster and exit')
@click.option(
'--default-database', type=str, default=getpass.getuser(),
help='the name of the default database to create')
@click.option(
'--default-database-user', type=str, default=getpass.getuser(),
help='the name of the default database owner')
@click.option(
'-I', '--bind-address', type=str, default='127.0.0.1',
help='IP address to listen on', envvar='EDGEDB_BIND_ADDRESS')
@click.option(
'-p', '--port', type=int, default=defines.EDGEDB_PORT,
help='port to listen on')
@click.option(
'-b', '--background', is_flag=True, help='daemonize')
@click.option(
'--pidfile', type=str, default='/run/edgedb/',
help='path to PID file directory')
@click.option(
'--timezone', type=str,
help='timezone for displaying and interpreting timestamps')
@click.option(
'--daemon-user', type=int)
@click.option(
'--daemon-group', type=int)
def main(**kwargs):
logsetup.setup_logging(kwargs['log_level'], kwargs['log_to'])
exceptions.install_excepthook()
if kwargs['background']:
daemon_opts = {'detach_process': True}
pidfile = os.path.join(
kwargs['pidfile'], '.s.EDGEDB.{}.lock'.format(kwargs['port']))
daemon_opts['pidfile'] = pidfile
if kwargs['daemon_user']:
daemon_opts['uid'] = kwargs['daemon_user']
if kwargs['daemon_group']:
daemon_opts['gid'] = kwargs['daemon_group']
with daemon.DaemonContext(**daemon_opts):
setproctitle.setproctitle(
'edgedb-server-{}'.format(kwargs['port']))
run_server(kwargs)
else:
run_server(kwargs)
def main_dev():
edgedb_cluster.enable_dev_mode()
main()
| 29.207692 | 77 | 0.631288 |
4a1d4d8c617c52aaa536b8d67df6213b644c4e93
| 8,452 |
py
|
Python
|
airflow/dags/polygonetl_airflow/build_parse_dag.py
|
cffls/polygon-etl
|
6f1c0b1adc8066afd7b6f653944947801c0d2b92
|
[
"MIT"
] | 28 |
2021-04-30T08:50:49.000Z
|
2022-03-23T17:58:40.000Z
|
airflow/dags/polygonetl_airflow/build_parse_dag.py
|
cffls/polygon-etl
|
6f1c0b1adc8066afd7b6f653944947801c0d2b92
|
[
"MIT"
] | 7 |
2021-04-19T06:33:32.000Z
|
2022-03-14T05:55:12.000Z
|
airflow/dags/polygonetl_airflow/build_parse_dag.py
|
cffls/polygon-etl
|
6f1c0b1adc8066afd7b6f653944947801c0d2b92
|
[
"MIT"
] | 18 |
2021-06-23T14:36:14.000Z
|
2022-03-24T21:46:52.000Z
|
from __future__ import print_function
import collections
import logging
import os
from datetime import datetime, timedelta
from glob import glob
from airflow import models
from airflow.operators.bash_operator import BashOperator
from airflow.operators.email_operator import EmailOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.sensors import ExternalTaskSensor
from google.cloud import bigquery
from polygonetl_airflow.bigquery_utils import create_view
from polygonetl_airflow.common import read_json_file, read_file
from polygonetl_airflow.parse.parse_logic import ref_regex, parse, create_dataset
from utils.error_handling import handle_dag_failure
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
dags_folder = os.environ.get('DAGS_FOLDER', '/home/airflow/gcs/dags')
def build_parse_dag(
dag_id,
dataset_folder,
parse_destination_dataset_project_id,
notification_emails=None,
parse_start_date=datetime(2020, 5, 30),
schedule_interval='0 0 * * *',
parse_all_partitions=None,
):
logging.info('parse_all_partitions is {}'.format(parse_all_partitions))
if parse_all_partitions:
dag_id = dag_id + '_FULL'
SOURCE_PROJECT_ID = 'public-data-finance'
SOURCE_DATASET_NAME = 'crypto_polygon'
PARTITION_DAG_ID = 'polygon_partition_dag'
default_dag_args = {
'depends_on_past': True,
'start_date': parse_start_date,
'email_on_failure': True,
'email_on_retry': False,
'retries': 5,
'retry_delay': timedelta(minutes=5),
'on_failure_callback': handle_dag_failure,
}
if notification_emails and len(notification_emails) > 0:
default_dag_args['email'] = [email.strip() for email in notification_emails.split(',')]
dag = models.DAG(
dag_id,
catchup=False,
schedule_interval=schedule_interval,
default_args=default_dag_args)
validation_error = None
try:
validate_definition_files(dataset_folder)
except ValueError as e:
validation_error = e
# This prevents failing all dags as they are constructed in a loop in ethereum_parse_dag.py
if validation_error is not None:
def raise_validation_error(ds, **kwargs):
raise validation_error
validation_error_operator = PythonOperator(
task_id='validation_error',
python_callable=raise_validation_error,
provide_context=True,
execution_timeout=timedelta(minutes=10),
dag=dag
)
return dag
def create_parse_task(table_definition):
def parse_task(ds, **kwargs):
client = bigquery.Client()
parse(
bigquery_client=client,
table_definition=table_definition,
ds=ds,
source_project_id=SOURCE_PROJECT_ID,
source_dataset_name=SOURCE_DATASET_NAME,
destination_project_id=parse_destination_dataset_project_id,
sqls_folder=os.path.join(dags_folder, 'resources/stages/parse/sqls'),
parse_all_partitions=parse_all_partitions
)
table_name = table_definition['table']['table_name']
parsing_operator = PythonOperator(
task_id=table_name,
python_callable=parse_task,
provide_context=True,
execution_timeout=timedelta(minutes=60),
dag=dag
)
contract_address = table_definition['parser']['contract_address']
if contract_address is not None:
ref_dependencies = ref_regex.findall(table_definition['parser']['contract_address'])
else:
ref_dependencies = []
return parsing_operator, ref_dependencies
def create_add_view_task(dataset_name, view_name, sql):
def create_view_task(ds, **kwargs):
client = bigquery.Client()
dest_table_name = view_name
dest_table_ref = create_dataset(client, dataset_name, parse_destination_dataset_project_id).table(dest_table_name)
print('View sql: \n' + sql)
create_view(client, sql, dest_table_ref)
create_view_operator = PythonOperator(
task_id=f'create_view_{view_name}',
python_callable=create_view_task,
provide_context=True,
execution_timeout=timedelta(minutes=10),
dag=dag
)
return create_view_operator
wait_for_ethereum_load_dag_task = ExternalTaskSensor(
task_id='wait_for_polygon_partition_dag',
external_dag_id=PARTITION_DAG_ID,
external_task_id='done',
execution_delta=timedelta(minutes=30),
priority_weight=0,
mode='reschedule',
retries=20,
poke_interval=5 * 60,
timeout=60 * 60 * 30,
dag=dag)
json_files = get_list_of_files(dataset_folder, '*.json')
logging.info(json_files)
all_parse_tasks = {}
task_dependencies = {}
for json_file in json_files:
table_definition = read_json_file(json_file)
task, dependencies = create_parse_task(table_definition)
wait_for_ethereum_load_dag_task >> task
all_parse_tasks[task.task_id] = task
task_dependencies[task.task_id] = dependencies
checkpoint_task = BashOperator(
task_id='parse_all_checkpoint',
bash_command='echo parse_all_checkpoint',
priority_weight=1000,
dag=dag
)
for task, dependencies in task_dependencies.items():
for dependency in dependencies:
if dependency not in all_parse_tasks:
raise ValueError(
'Table {} is not found in the the dataset. Check your ref() in contract_address field.'.format(
dependency))
all_parse_tasks[dependency] >> all_parse_tasks[task]
all_parse_tasks[task] >> checkpoint_task
final_tasks = [checkpoint_task]
sql_files = get_list_of_files(dataset_folder, '*.sql')
logging.info(sql_files)
# TODO: Use folder name as dataset name and remove dataset_name in JSON definitions.
dataset_name = os.path.basename(dataset_folder)
full_dataset_name = 'polygon_' + dataset_name
for sql_file in sql_files:
sql = read_file(sql_file)
base_name = os.path.basename(sql_file)
view_name = os.path.splitext(base_name)[0]
create_view_task = create_add_view_task(full_dataset_name, view_name, sql)
checkpoint_task >> create_view_task
final_tasks.append(create_view_task)
return dag
def get_list_of_files(dataset_folder, filter='*.json'):
logging.info('get_list_of_files')
logging.info(dataset_folder)
logging.info(os.path.join(dataset_folder, filter))
return [f for f in glob(os.path.join(dataset_folder, filter))]
def validate_definition_files(dataset_folder):
json_files = get_list_of_files(dataset_folder, '*.json')
dataset_folder_name = dataset_folder.split('/')[-1]
all_lowercase_table_names = []
for json_file in json_files:
file_name = json_file.split('/')[-1].replace('.json', '')
table_definition = read_json_file(json_file)
table = table_definition.get('table')
if not table:
raise ValueError(f'table is empty in file {json_file}')
dataset_name = table.get('dataset_name')
if not dataset_name:
raise ValueError(f'dataset_name is empty in file {json_file}')
if dataset_folder_name != dataset_name:
raise ValueError(f'dataset_name {dataset_name} is not equal to dataset_folder_name {dataset_folder_name}')
table_name = table.get('table_name')
if not table_name:
raise ValueError(f'table_name is empty in file {json_file}')
if file_name != table_name:
raise ValueError(f'file_name {file_name} doest match the table_name {table_name}')
all_lowercase_table_names.append(table_name.lower())
table_name_counts = collections.defaultdict(lambda: 0)
for table_name in all_lowercase_table_names:
table_name_counts[table_name] += 1
non_unique_table_names = [name for name, count in table_name_counts.items() if count > 1]
if len(non_unique_table_names) > 0:
raise ValueError(f'The following table names are not unique {",".join(non_unique_table_names)}')
| 34.92562 | 126 | 0.682679 |
4a1d4fc5af6a726e482a075b2721529629f1f608
| 1,015 |
py
|
Python
|
create-serverless-clusters.py
|
mistwire/2019_AWS_Boto3_vBrownBag
|
8680a7a366a3a5a067d3e1cc7dc84010653b3c51
|
[
"BSD-3-Clause"
] | 2 |
2020-07-04T01:11:14.000Z
|
2021-12-04T06:08:56.000Z
|
create-serverless-clusters.py
|
mistwire/2019_AWS_Boto3_vBrownBag
|
8680a7a366a3a5a067d3e1cc7dc84010653b3c51
|
[
"BSD-3-Clause"
] | 1 |
2021-07-06T02:35:02.000Z
|
2021-07-06T02:35:02.000Z
|
create-serverless-clusters.py
|
calvinhp/2019_AWS_Boto3_vBrownBag
|
8680a7a366a3a5a067d3e1cc7dc84010653b3c51
|
[
"BSD-3-Clause"
] | 4 |
2019-02-28T14:27:21.000Z
|
2020-08-27T05:34:55.000Z
|
import csv
import boto3
from pwgen import pwgen
client = boto3.client('rds')
new_dbs = {'saas{:0>2}'.format(db): '' for db in range(10)}
for db in new_dbs.keys():
new_dbs[db] = pwgen(20)
client.create_db_cluster(
AvailabilityZones=[
"us-east-1b",
"us-east-1c"
],
BackupRetentionPeriod=1,
DBClusterIdentifier=db,
VpcSecurityGroupIds=[
'sg-0aa5e07ebdabfb0d1'
],
DBSubnetGroupName="default-vpc-0f38b2e75ac4e5349",
Engine="aurora",
MasterUsername="root",
MasterUserPassword=new_dbs[db],
StorageEncrypted=True,
EngineMode="serverless",
ScalingConfiguration={
"MinCapacity": 2,
"MaxCapacity": 64,
"AutoPause": True,
"SecondsUntilAutoPause": 300
},
DeletionProtection=True
)
with open('newdbs.csv', 'w', newline='') as f:
writer = csv.writer(f)
[writer.writerow(db) for db in new_dbs.items()]
| 25.375 | 59 | 0.581281 |
4a1d500768464ade5c5bde87bfd4fe909b8354fb
| 4,511 |
py
|
Python
|
server.py
|
TonyJR/image-sharpening
|
ec135cbbc6aba5754e4df0a01d8d0e03ec415152
|
[
"MIT"
] | 1 |
2020-03-18T03:49:00.000Z
|
2020-03-18T03:49:00.000Z
|
server.py
|
TonyJR/image-sharpening
|
ec135cbbc6aba5754e4df0a01d8d0e03ec415152
|
[
"MIT"
] | null | null | null |
server.py
|
TonyJR/image-sharpening
|
ec135cbbc6aba5754e4df0a01d8d0e03ec415152
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*-coding=utf-8 -*-
import sys
import traceback
import datetime
from StringIO import StringIO
import requests
import tornado
import tornado.ioloop
import tornado.web
import tornado.gen
import image
import imageColor
import os
import time
from concurrent.futures import ThreadPoolExecutor
reload(sys)
sys.setdefaultencoding('utf8')
path = "/tmp/com.shiqichuban.image-sharpening"
if not os.path.exists(path):
os.makedirs(path)
class Executor(ThreadPoolExecutor):
_instance = None
def __new__(cls, *args, **kwargs):
if not getattr(cls, '_instance', None):
cls._instance = ThreadPoolExecutor(max_workers=30)
return cls._instance
class ImageHandler(tornado.web.RequestHandler):
executor = Executor()
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*") # 这个地方可以写域名
self.set_header("Access-Control-Allow-Headers", "Content-Type, Content-MD5, Accept, Accept-Encoding, X-Shiqi-Content-Type, X-Shiqi-Content-Disposition, X-Shiqi-Content-Md5, X-Shiqi-Ctime, X-Shiqi-Filename, X-Shiqi-Position, Refer, User-Agent, Origin, Authorization")
self.set_header("Access-Control-Allow-Methods", "POST, GET, OPTIONS")
self.set_header("Access-Control-Allow-Max-Age","1728000")
self.set_header("Cache-Control","max-age=2628000")
@tornado.concurrent.run_on_executor
def get(self, *args, **kwargs):
image_url = self.get_argument("image_url", default="")
width = int(self.get_argument("width", default=0))
height = int(self.get_argument("height", default=0))
force = float(self.get_argument("force", default=0))
smoth = int(self.get_argument("smoth", default=0))
if not image_url:
result = {}
result["msg"] = "error"
self.write(json_encode(result))
elif image_url.endswith(('png','PNG','gif','GIF')):
print("png图片重定向"+image_url)
self.redirect(image_url)
else:
response = self.converImage(image_url,width,height,force,smoth)
self.set_header("Content-type", "image/jpeg")
self.write(response)
def converImage(self,image_url,width,height,force,smoth):
bytes = image.convertURLImage(image_url,width,height,2,force,smoth)
return bytes
def process(self, image_url):
print image_url
return ""
class ColorHandler(tornado.web.RequestHandler):
executor = Executor()
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*") # 这个地方可以写域名
self.set_header("Access-Control-Allow-Headers", "Content-Type, Content-MD5, Accept, Accept-Encoding, X-Shiqi-Content-Type, X-Shiqi-Content-Disposition, X-Shiqi-Content-Md5, X-Shiqi-Ctime, X-Shiqi-Filename, X-Shiqi-Position, Refer, User-Agent, Origin, Authorization")
self.set_header("Access-Control-Allow-Methods", "POST, GET, OPTIONS")
self.set_header("Access-Control-Allow-Max-Age","1728000")
self.set_header("Cache-Control","max-age=2628000")
@tornado.concurrent.run_on_executor
def get(self, *args, **kwargs):
image_url = self.get_argument("image_url", default="")
color = int(self.get_argument("color", default="0x0"),16)
red = color >> 16 & 0xff
green = color >> 8 & 0xff
blue = color & 0xff
print color,red,green,blue
if not image_url:
result = {}
result["msg"] = "error"
self.write(json_encode(result))
else:
response = self.converImage(image_url,red,green,blue)
self.set_header("Content-type", "image/x-png")
self.write(response)
def converImage(self,image_url,red,green,blue):
bytes = imageColor.convertURLToData(image_url,red,green,blue)
return bytes
def process(self, image_url):
print image_url
return ""
class ImageServer(object):
def __init__(self, port):
self.port = port
def process(self,server_port):
app = tornado.web.Application([(r"/color?", ColorHandler),(r"/image?", ImageHandler)], )
app.listen(server_port)
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
if len(sys.argv)>1:
server_port = sys.argv[1]
else:
server_port = 80
server = ImageServer(server_port)
print "begin server"
server.process(server_port)
| 33.917293 | 274 | 0.65174 |
4a1d505bd13055869946af70fa68afaebd59eb5f
| 583 |
py
|
Python
|
src/mongo.py
|
guve4e/db-populator
|
981d35ed5e43b5c434d57bc962bfeeb07c841bdf
|
[
"Apache-2.0"
] | null | null | null |
src/mongo.py
|
guve4e/db-populator
|
981d35ed5e43b5c434d57bc962bfeeb07c841bdf
|
[
"Apache-2.0"
] | null | null | null |
src/mongo.py
|
guve4e/db-populator
|
981d35ed5e43b5c434d57bc962bfeeb07c841bdf
|
[
"Apache-2.0"
] | null | null | null |
import pymongo
class Mongo(super):
def __init__(self, connection_str: str) -> None:
super().__init__()
self.__connection = pymongo.MongoClient(connection_str)
def drop_database(self, db: str):
self.__connection.drop_database(db)
def get_database(self, db: str):
return self.__connection[db]
def list_databases(self)-> []:
return self.__connection.list_database_names()
def insert_one(self, obj):
self.__connection.insert_one(obj)
def insert_many(self, obj: {}):
self.__connection.insert_many(obj)
| 26.5 | 63 | 0.670669 |
4a1d50b60224cecbfe15de09758d27935ef83d12
| 5,998 |
py
|
Python
|
parser.py
|
inutza/spaceflight-parser
|
d723d45d4d1a6c484f2324644d5b14387bec6b39
|
[
"MIT"
] | null | null | null |
parser.py
|
inutza/spaceflight-parser
|
d723d45d4d1a6c484f2324644d5b14387bec6b39
|
[
"MIT"
] | null | null | null |
parser.py
|
inutza/spaceflight-parser
|
d723d45d4d1a6c484f2324644d5b14387bec6b39
|
[
"MIT"
] | null | null | null |
import urllib2
import re
from HTMLParser import HTMLParser
from datetime import datetime, date, time, timedelta
import pytz
from icalendar import Calendar, Event, vDatetime
# Tags which help extract each section
DATETAG = '<div class="datename"><span class="launchdate">'
SPANENDTAG = '</span>'
SPANSTARTTAG = '<span class="strong">'
DIVENDTAG = '</div>'
MISSIONTAG = '<span class="mission">'
LAUNCHWINDOWTAG = '<div class="missiondata"><span class="strong">Launch window:</span> '
GMT = ' GMT'
LOCTAG = 'Launch site:</span> '
DESCTAG = '<div class="missdescrip">'
UPDATETAG = '. ['
LAUNCHREGEX = '[0-9]+(-?[0-9]+)'
# Short-hand months and full name months -- hope they don't change
SH_MTH = ['Jan.', 'Feb.', 'Mar.', 'Apr.', 'May', 'Jun.', 'Jul.', 'Aug.', 'Sept.', 'Oct.', 'Nov.', 'Dec.']
FL_MTH = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
def parser():
req = urllib2.Request('https://spaceflightnow.com/launch-schedule/')
response = urllib2.urlopen(req)
the_page = response.read()
d = datetime.utcnow()
h = HTMLParser()
cal = Calendar()
cal.add('version', 2.0)
cal.add('prodid', '-//madkat//SpaceX feed//EN')
# Get all DATETAG indexes
date_group = [m.start() for m in re.finditer(DATETAG, the_page)]
# For each date index in date_group, extract the other data
for _idx in range(len(date_group)):
date_idx = date_group[_idx]
if _idx + 1 == len(date_group):
block_end = len(the_page)
else:
block_end = date_group[_idx + 1]
date_start_idx = date_idx + len(DATETAG)
date_end_idx = the_page[date_start_idx:block_end].find(SPANENDTAG) + date_start_idx
date = the_page[date_start_idx:date_end_idx]
if '/' in date:
_idx = date.find('/')
date=date[_idx+1:]
found_month = False
mth_idx = 0
while not found_month and mth_idx < 12:
if SH_MTH[mth_idx] in date:
_idx = date.find(SH_MTH[mth_idx])
day = date[_idx + len(SH_MTH[mth_idx]) + 1:]
found_month = True
break
if FL_MTH[mth_idx] in date:
_idx = date.find(FL_MTH[mth_idx])
day = date[_idx + len(FL_MTH[mth_idx]) + 1:]
found_month = True
break
mth_idx += 1
# If I find a day, or month, start building datetime object
# Otherwise, I just skip the event
if found_month and day != '':
event = Event()
# Check if day has '/' in it
year = d.year
_idx = day.find('/')
if _idx != -1:
day = day[_idx+1:]
mth = mth_idx + 1
if mth < d.month:
year += 1
# Get event title
mission_start_idx = the_page[date_end_idx:block_end].find(MISSIONTAG) + len(MISSIONTAG) + date_end_idx
mission_end_idx = the_page[mission_start_idx:block_end].find(SPANENDTAG) + mission_start_idx
mission = the_page[mission_start_idx:mission_end_idx]
mission = re.sub(r'[^\x00-\x7F]+','-', mission)
# Escape all sorts of weird characters
mission = mission.decode("ascii", errors="ignore").encode()
# Escape HTML characters & add summary
event.add('summary', h.unescape(mission))
# Get launch window
launch_win_start_idx = the_page[mission_end_idx:block_end].find(LAUNCHWINDOWTAG) + len(LAUNCHWINDOWTAG) + mission_end_idx
launch_win_end_idx = the_page[launch_win_start_idx:block_end].find(SPANSTARTTAG) + launch_win_start_idx
launch_win_raw = the_page[launch_win_start_idx:launch_win_end_idx]
is_gmt_idx = launch_win_raw.find(GMT)
# If there is no launch window yet, just make it a 24hr event (all day equivalent?)
if is_gmt_idx == -1:
launch_win = "0000-2359"
else:
launch_win = re.search(LAUNCHREGEX, launch_win_raw[:is_gmt_idx]).group(0)
# Parse launch window
if '-' in launch_win:
# I have a launch window!
ev_date = datetime(year, mth, int(day), int(launch_win[:2]), int(launch_win[2:4]), 0, 0, tzinfo=pytz.utc)
ev_date_end = datetime(year, mth, int(day), int(launch_win[5:7]), int(launch_win[7:]), 0, 0, tzinfo=pytz.utc)
else:
ev_date = datetime(year, mth, int(day), int(launch_win[:2]), int(launch_win[2:4]), 0, 0, tzinfo=pytz.utc)
ev_date_end = ev_date + timedelta(hours=1)
event.add('dtstart', ev_date)
event.add('dtend', ev_date_end)
# Get event location
loc_start_idx = the_page[launch_win_end_idx:block_end].find(LOCTAG) + len(LOCTAG) + launch_win_end_idx
loc_end_idx = the_page[loc_start_idx:block_end].find(DIVENDTAG) + loc_start_idx
location = the_page[loc_start_idx:loc_end_idx]
event.add('location', location)
# Get event description
desc_start_idx = the_page[launch_win_end_idx:block_end].find(DESCTAG) + launch_win_end_idx + len(DESCTAG)
desc_end_idx = the_page[desc_start_idx:block_end].find(UPDATETAG) + desc_start_idx
desc = the_page[desc_start_idx:desc_end_idx].decode("ascii", errors="ignore").encode()
desc_filtered = h.unescape(desc)
# If it didn't have a launch window, write a comment in description
if launch_win == "0000-2359":
desc_filtered = "Launch window currently unavailable. Please check at a later time. " + desc_filtered
event.add('description', desc_filtered)
# Add event to calendar
cal.add_component(event)
# Return calendar
return cal.to_ical()
if __name__ == '__main__':
print parser()
| 41.652778 | 133 | 0.603201 |
4a1d50d195faf37c7e0967cf27e30758708f08f5
| 7,946 |
py
|
Python
|
hmlf/common/atari_wrappers.py
|
lorenzob123/HMLF
|
3577c61b8f2bae7959de81dfd3981c3a8e26d8b6
|
[
"MIT"
] | 1 |
2021-05-05T05:59:55.000Z
|
2021-05-05T05:59:55.000Z
|
hmlf/common/atari_wrappers.py
|
lorenzob123/HMLF
|
3577c61b8f2bae7959de81dfd3981c3a8e26d8b6
|
[
"MIT"
] | 1 |
2021-05-18T07:51:46.000Z
|
2021-05-18T07:51:46.000Z
|
hmlf/common/atari_wrappers.py
|
lorenzob123/HMLF
|
3577c61b8f2bae7959de81dfd3981c3a8e26d8b6
|
[
"MIT"
] | null | null | null |
import gym
import numpy as np
from hmlf import spaces
try:
import cv2 # pytype:disable=import-error
cv2.ocl.setUseOpenCL(False)
except ImportError:
cv2 = None
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from hmlf.common.type_aliases import GymObs, GymStepReturn
class NoopResetEnv(gym.Wrapper):
"""
Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
:param env: the environment to wrap
:param noop_max: the maximum value of no-ops to run
"""
def __init__(self, env: gym.Env, noop_max: int = 30):
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == "NOOP"
def reset(self, **kwargs) -> np.ndarray:
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1)
assert noops > 0
obs = np.zeros(0)
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
class FireResetEnv(gym.Wrapper):
"""
Take action on reset for environments that are fixed until firing.
:param env: the environment to wrap
"""
def __init__(self, env: gym.Env):
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == "FIRE"
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs) -> np.ndarray:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
class EpisodicLifeEnv(gym.Wrapper):
"""
Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
:param env: the environment to wrap
"""
def __init__(self, env: gym.Env):
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action: int) -> "GymStepReturn":
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if 0 < lives < self.lives:
# for Qbert sometimes we stay in lives == 0 condtion for a few frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs) -> np.ndarray:
"""
Calls the Gym environment reset, only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
:param kwargs: Extra keywords passed to env.reset() call
:return: the first observation of the environment
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
"""
Return only every ``skip``-th frame (frameskipping)
:param env: the environment
:param skip: number of ``skip``-th frame
"""
def __init__(self, env: gym.Env, skip: int = 4):
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,) + env.observation_space.shape, dtype=env.observation_space.dtype)
self._skip = skip
def step(self, action: int) -> "GymStepReturn":
"""
Step the environment with the given action
Repeat action, sum reward, and max over last observations.
:param action: the action
:return: observation, reward, done, information
"""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2:
self._obs_buffer[0] = obs
if i == self._skip - 1:
self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs) -> "GymObs":
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
"""
Clips the reward to {+1, 0, -1} by its sign.
:param env: the environment
"""
def __init__(self, env: gym.Env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward: float) -> float:
"""
Bin reward to {+1, 0, -1} by its sign.
:param reward:
:return:
"""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
"""
Convert to grayscale and warp frames to 84x84 (default)
as done in the Nature paper and later work.
:param env: the environment
:param width:
:param height:
"""
def __init__(self, env: gym.Env, width: int = 84, height: int = 84):
gym.ObservationWrapper.__init__(self, env)
self.width = width
self.height = height
self.observation_space = spaces.Box(
low=0, high=255, shape=(self.height, self.width, 1), dtype=env.observation_space.dtype
)
def observation(self, frame: np.ndarray) -> np.ndarray:
"""
returns the current observation from a frame
:param frame: environment frame
:return: the observation
"""
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
return frame[:, :, None]
class AtariWrapper(gym.Wrapper):
"""
Atari 2600 preprocessings
Specifically:
* NoopReset: obtain initial state by taking random number of no-ops on reset.
* Frame skipping: 4 by default
* Max-pooling: most recent two observations
* Termination signal when a life is lost.
* Resize to a square image: 84x84 by default
* Grayscale observation
* Clip reward to {-1, 0, 1}
:param env: gym environment
:param noop_max: max number of no-ops
:param frame_skip: the frequency at which the agent experiences the game.
:param screen_size: resize Atari frame
:param terminal_on_life_loss: if True, then step() returns done=True whenever a life is lost.
:param clip_reward: If True (default), the reward is clip to {-1, 0, 1} depending on its sign.
"""
def __init__(
self,
env: gym.Env,
noop_max: int = 30,
frame_skip: int = 4,
screen_size: int = 84,
terminal_on_life_loss: bool = True,
clip_reward: bool = True,
):
env = NoopResetEnv(env, noop_max=noop_max)
env = MaxAndSkipEnv(env, skip=frame_skip)
if terminal_on_life_loss:
env = EpisodicLifeEnv(env)
if "FIRE" in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env, width=screen_size, height=screen_size)
if clip_reward:
env = ClipRewardEnv(env)
super(AtariWrapper, self).__init__(env)
| 31.407115 | 106 | 0.613894 |
4a1d50ec7fd9669fb5abec4151c2b5135efbe9c0
| 9,432 |
py
|
Python
|
benchmark.py
|
andfoy/py-distributed-dot-product
|
70ede0b3b07294d1c00dfe64187d8fa1c6c8d848
|
[
"MIT"
] | null | null | null |
benchmark.py
|
andfoy/py-distributed-dot-product
|
70ede0b3b07294d1c00dfe64187d8fa1c6c8d848
|
[
"MIT"
] | null | null | null |
benchmark.py
|
andfoy/py-distributed-dot-product
|
70ede0b3b07294d1c00dfe64187d8fa1c6c8d848
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Distributed multiplication benchmark (time/memory).
The following benchmarks were run on a machine with 3 Quadro RTX 6000 GPUs,
each one with 24Gb of RAM.
"""
import os
import time
import json
import humanize
import argparse
import functools
import os.path as osp
import torch
from mpi4py import MPI
from distributed_dot_product.utils.comm import (
synchronize, is_main_process, get_rank)
from distributed_dot_product.multiplication.functions import (
distributed_matmul_all, distributed_matmul_nt, distributed_matmul_tn)
comm = MPI.COMM_WORLD
parser = argparse.ArgumentParser(
description='Benchmark suit for distributed operations')
parser.add_argument('--mode', type=str, default='nt',
help='Benchmark mode')
parser.add_argument('--offset', type=int, default=1000,
help='Offset used to balance time/memory')
parser.add_argument('--scale', type=int, default=1,
help='Scale factor used for reduction')
parser.add_argument('--file', type=str, default="nt_benchmark.json",
help='JSON file used to append results')
args = parser.parse_args()
values = []
if osp.exists(args.file):
values = json.load(open(args.file, 'r'))
torch.set_grad_enabled(False)
torch.manual_seed(111)
device = torch.device('cpu')
if torch.cuda.is_available():
torch.cuda.set_device(get_rank())
device = torch.device('cuda')
def measure(function, *args, **kwargs):
torch.cuda.reset_max_memory_allocated()
start_memory = torch.cuda.max_memory_allocated()
start_time = time.time()
y = function(*args, **kwargs)
total_time = time.time() - start_time
end_memory = torch.cuda.max_memory_allocated()
print(f'{function.__name__} - Total time elapsed: {total_time}s')
print(f'{function.__name__} - '
f'Memory consumption: '
f'{humanize.naturalsize(end_memory - start_memory)}')
return y, total_time, end_memory - start_memory
def nt_benchmark():
# Benchmark NT multiplication (local node)
if is_main_process():
xlarge = torch.rand(1, 75000 // args.scale, 768, device=device)
y = xlarge.transpose(-1, -2)
input_memory = torch.cuda.memory_allocated()
print(f'Memory allocated by xlarge/y: '
f'{humanize.naturalsize(input_memory)}')
result, op_time, peak_memory = measure(torch.matmul, xlarge, y)
del xlarge
del y
torch.cuda.empty_cache()
output_memory = torch.cuda.memory_allocated()
print(f'matmul_nt - Output memory consumption: '
f'{humanize.naturalsize(output_memory)}')
del result
torch.cuda.empty_cache()
# Benchmark TN multiplication (distributed)
xsmall = torch.rand(1, 75000 // (3 * args.scale), 768, device=device)
dist_input_size = torch.cuda.memory_allocated()
print(f'Memory allocated by xsmall: '
f'{humanize.naturalsize(dist_input_size)}')
synchronize()
result, dop_time, dpeak_memory = measure(
distributed_matmul_nt, xsmall, xsmall, offset=1000)
del xsmall
torch.cuda.empty_cache()
doutput_memory = torch.cuda.memory_allocated()
print(f'distributed_matmul_nt - Output memory consumption: '
f'{humanize.naturalsize(doutput_memory)}')
del result
torch.cuda.empty_cache()
all_input_size = comm.gather(dist_input_size, root=0)
all_op_time = comm.gather(dop_time, root=0)
all_peak_memory = comm.gather(dpeak_memory, root=0)
all_output_memory = comm.gather(doutput_memory, root=0)
if is_main_process():
avg_input_size = sum(all_input_size) / len(all_input_size)
avg_op_time = sum(all_op_time) / len(all_op_time)
avg_peak_memory = sum(all_peak_memory) / len(all_peak_memory)
avg_output_memory = sum(all_output_memory) / len(all_output_memory)
return (input_memory, output_memory, op_time, peak_memory,
avg_input_size, avg_op_time, avg_peak_memory,
avg_output_memory)
def all_benchmark():
# Benchmark all multiplication (local node)
if is_main_process():
xlarge = torch.rand(1, 75000 // args.scale, 75000 // args.scale,
device=device)
y = torch.rand(1, 75000 // args.scale, 768, device=device)
input_memory = torch.cuda.memory_allocated()
print(f'Memory allocated by xlarge/y: '
f'{humanize.naturalsize(input_memory)}')
result, op_time, peak_memory = measure(torch.matmul, xlarge, y)
del xlarge
del y
torch.cuda.empty_cache()
output_memory = torch.cuda.memory_allocated()
print(f'matmul_nt - Output memory consumption: '
f'{humanize.naturalsize(output_memory)}')
del result
torch.cuda.empty_cache()
# Benchmark all multiplication (distributed)
xsmall = torch.rand(1, 75000 // (3 * args.scale), 75000 // args.scale,
device=device)
ysmall = torch.rand(1, 75000 // (3 * args.scale), 768, device=device)
dist_input_size = torch.cuda.memory_allocated()
print(f'Memory allocated by xsmall/ysmall: '
f'{humanize.naturalsize(dist_input_size)}')
synchronize()
result, dop_time, dpeak_memory = measure(
distributed_matmul_all, xsmall, ysmall, offset=args.offset)
del xsmall
del ysmall
torch.cuda.empty_cache()
doutput_memory = torch.cuda.memory_allocated()
print(f'distributed_matmul_all - Output memory consumption: '
f'{humanize.naturalsize(doutput_memory)}')
del result
torch.cuda.empty_cache()
all_input_size = comm.gather(dist_input_size, root=0)
all_op_time = comm.gather(dop_time, root=0)
all_peak_memory = comm.gather(dpeak_memory, root=0)
all_output_memory = comm.gather(doutput_memory, root=0)
if is_main_process():
avg_input_size = sum(all_input_size) / len(all_input_size)
avg_op_time = sum(all_op_time) / len(all_op_time)
avg_peak_memory = sum(all_peak_memory) / len(all_peak_memory)
avg_output_memory = sum(all_output_memory) / len(all_output_memory)
return (input_memory, output_memory, op_time, peak_memory,
avg_input_size, avg_op_time, avg_peak_memory,
avg_output_memory)
def tn_benchmark():
# Benchmark tn multiplication (local node)
if is_main_process():
xlarge = torch.rand(1, 75000 // args.scale, 75000 // args.scale,
device=device)
y = torch.rand(1, 75000 // args.scale, 768, device=device)
input_memory = torch.cuda.memory_allocated()
print(f'Memory allocated by xlarge/y: '
f'{humanize.naturalsize(input_memory)}')
result, op_time, peak_memory = measure(torch.matmul, xlarge, y)
del xlarge
del y
torch.cuda.empty_cache()
output_memory = torch.cuda.memory_allocated()
print(f'matmul_nt - Output memory consumption: '
f'{humanize.naturalsize(output_memory)}')
del result
torch.cuda.empty_cache()
# Benchmark tn multiplication (distributed)
xsmall = torch.rand(1, 75000 // (3 * args.scale), 75000 // args.scale,
device=device)
ysmall = torch.rand(1, 75000 // (3 * args.scale), 768, device=device)
dist_input_size = torch.cuda.memory_allocated()
print(f'Memory allocated by xsmall/ysmall: '
f'{humanize.naturalsize(dist_input_size)}')
synchronize()
result, dop_time, dpeak_memory = measure(
distributed_matmul_tn, xsmall, ysmall)
del xsmall
del ysmall
torch.cuda.empty_cache()
doutput_memory = torch.cuda.memory_allocated()
print(f'distributed_matmul_all - Output memory consumption: '
f'{humanize.naturalsize(doutput_memory)}')
del result
torch.cuda.empty_cache()
all_input_size = comm.gather(dist_input_size, root=0)
all_op_time = comm.gather(dop_time, root=0)
all_peak_memory = comm.gather(dpeak_memory, root=0)
all_output_memory = comm.gather(doutput_memory, root=0)
if is_main_process():
avg_input_size = sum(all_input_size) / len(all_input_size)
avg_op_time = sum(all_op_time) / len(all_op_time)
avg_peak_memory = sum(all_peak_memory) / len(all_peak_memory)
avg_output_memory = sum(all_output_memory) / len(all_output_memory)
return (input_memory, output_memory, op_time, peak_memory,
avg_input_size, avg_op_time, avg_peak_memory,
avg_output_memory)
def main():
test_funcs = {
'nt': nt_benchmark,
'all': all_benchmark,
'tn': tn_benchmark
}
output = test_funcs[args.mode]()
if is_main_process():
(input_memory, output_memory, op_time, peak_memory, avg_input_size,
avg_op_time, avg_peak_memory, avg_output_memory) = output
output = {
'input_memory': input_memory,
'total_time': op_time,
'peak_memory': peak_memory,
'output_memory': output_memory,
'distributed_input_memory': avg_input_size,
'distributed_time': avg_op_time,
'distributed_peak_memory': avg_peak_memory,
'distributed_output_memory': avg_output_memory
}
values.append(output)
json.dump(values, open(args.file, 'w'))
synchronize()
if __name__ == '__main__':
main()
| 36.416988 | 75 | 0.668575 |
4a1d50fbd1c2f5ed164b8541ab1efa9f5da8441a
| 3,632 |
py
|
Python
|
a3/Assignment 3 LVQ1.py
|
YingTing04/Neural-Networks
|
3923a53026f390091796cab54938af62ff7da22a
|
[
"MIT"
] | null | null | null |
a3/Assignment 3 LVQ1.py
|
YingTing04/Neural-Networks
|
3923a53026f390091796cab54938af62ff7da22a
|
[
"MIT"
] | null | null | null |
a3/Assignment 3 LVQ1.py
|
YingTing04/Neural-Networks
|
3923a53026f390091796cab54938af62ff7da22a
|
[
"MIT"
] | 1 |
2020-11-04T08:50:38.000Z
|
2020-11-04T08:50:38.000Z
|
#implements the LVQ algorithm
import numpy as np
#reading of data from files
def reading_data(string):
with open(string, 'r') as f:
data = f.readlines()
all_data = []
all_label = []
for item in data:
x = item.split(',')
#obtaining label from data
label = x[-1].rstrip()
#obtaining features from data
indv_data = x[:-1]
indv_data = np.array(indv_data).astype(np.float)
all_data.append(indv_data)
#converting labels into one-hot vectors
if label == 'Iris-setosa':
label_vec = 0
all_label.append(label_vec)
elif label == 'Iris-versicolor':
label_vec = 1
all_label.append(label_vec)
elif label == 'Iris-virginica':
label_vec = 2
all_label.append(label_vec)
return (all_data, all_label)
#calculating euclidean distance
def euclidean_distance(input_vec, weights):
distance = 0
for i in range(len(weights)):
distance += (input_vec[i] - weights[i])**2
return np.sqrt(distance)
#shuffling of data and labels
def shuffle(data, labels):
index = [i for i in range(len(data))]
np.random.shuffle(index)
data = np.array(data)
labels = np.array(labels)
return data[index], labels[index]
f = open("LVQ1.txt", "w")
all_data, all_label = reading_data('iris_train.txt')
all_data, all_label = shuffle(all_data, all_label)
test_data, test_label = reading_data('iris_test.txt')
test_data, test_label = shuffle(test_data, test_label)
#initialising weights and other parameters
np.random.seed(4)
weights = np.random.rand(3,4)
print(weights)
alpha = 0.3
n_epochs = 50
for epoch in range(n_epochs):
cluster_error = np.zeros(3)
lr = alpha * (1 - epoch/n_epochs)
for i in range(len(all_data)):
#calculating euclidean distance between data and each node
distances = []
for j in range(3):
distances.append(euclidean_distance(all_data[i], weights[j]))
#obtaining the predicted label
predicted_label = int(distances.index(min(distances)))
actual_label = int(all_label[i])
#calculation of mean squared error
error = all_data[i] - weights[predicted_label]
mean_squared_error = np.dot(error, error)
cluster_error[predicted_label] += mean_squared_error
#performing weight change
if (predicted_label == actual_label):
delta_w = lr * error
else:
delta_w = -lr * error
weights[predicted_label] += delta_w
#weights normalisation
weights[predicted_label] /= np.linalg.norm(weights[predicted_label])
print("epoch:", epoch, "error:", cluster_error)
f.write("epoch: " + str(epoch) + " error: " + str(cluster_error) + "\n")
print("weights", weights)
f.write("codebook vector 0: " + str(weights[0]) + "\n")
f.write("codebook vector 1: " + str(weights[1]) + "\n")
f.write("codebook vector 2: " + str(weights[2]) + "\n")
#calculation of confusion matrix
confusion_matrix = np.zeros([3,3])
correct_predictions = 0
#testing on test data
for i in range(len(test_data)):
distances = []
for j in range(3):
distances.append(euclidean_distance(test_data[i], weights[j]))
predicted_label = int(distances.index(min(distances)))
actual_label = int(test_label[i])
if (int(predicted_label) == int(actual_label)):
correct_predictions += 1
confusion_matrix[predicted_label][actual_label] += 1
print(confusion_matrix)
print("accuracy", correct_predictions/len(test_data))
f.close()
| 29.528455 | 76 | 0.642346 |
4a1d5199ab74f6c0f312f73f61df227b8d9eb263
| 15,482 |
py
|
Python
|
tests/python/pants_test/engine/test_build_files.py
|
anthonyjpratti/pants
|
d98e53af6ddd877861231bce8343f8204da0a9d1
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/engine/test_build_files.py
|
anthonyjpratti/pants
|
d98e53af6ddd877861231bce8343f8204da0a9d1
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/engine/test_build_files.py
|
anthonyjpratti/pants
|
d98e53af6ddd877861231bce8343f8204da0a9d1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import re
import unittest
from pants.base.project_tree import Dir
from pants.base.specs import SiblingAddresses, SingleAddress, Specs
from pants.build_graph.address import Address
from pants.engine.addressable import addressable, addressable_dict
from pants.engine.build_files import (
ResolvedTypeMismatchError,
create_graph_rules,
parse_address_family,
provenanced_addresses_from_address_families,
remove_provenance,
)
from pants.engine.fs import Digest, FileContent, FilesContent, PathGlobs, Snapshot, create_fs_rules
from pants.engine.legacy.structs import TargetAdaptor
from pants.engine.mapper import AddressFamily, AddressMapper, ResolveError
from pants.engine.nodes import Return, Throw
from pants.engine.parser import HydratedStruct, SymbolTable
from pants.engine.rules import rule
from pants.engine.struct import Struct, StructWithDeps
from pants.testutil.engine.util import MockGet, Target, run_rule
from pants.util.objects import Exactly
from pants_test.engine.examples.parsers import (
JsonParser,
PythonAssignmentsParser,
PythonCallbacksParser,
)
from pants_test.engine.scheduler_test_base import SchedulerTestBase
class ParseAddressFamilyTest(unittest.TestCase):
def test_empty(self):
"""Test that parsing an empty BUILD file results in an empty AddressFamily."""
address_mapper = AddressMapper(JsonParser(TEST_TABLE))
af = run_rule(
parse_address_family,
rule_args=[address_mapper, Dir('/dev/null')],
mock_gets=[
MockGet(
product_type=Snapshot,
subject_type=PathGlobs,
mock=lambda _: Snapshot(Digest('abc', 10), ('/dev/null/BUILD',), ()),
),
MockGet(
product_type=FilesContent,
subject_type=Digest,
mock=lambda _: FilesContent([FileContent(path='/dev/null/BUILD', content=b'')]),
),
],
)
self.assertEqual(len(af.objects_by_name), 0)
class AddressesFromAddressFamiliesTest(unittest.TestCase):
def _address_mapper(self):
return AddressMapper(JsonParser(TEST_TABLE))
def _snapshot(self):
return Snapshot(Digest('xx', 2), ('root/BUILD',), ())
def _resolve_build_file_addresses(self, specs, address_family, snapshot, address_mapper):
pbfas = run_rule(
provenanced_addresses_from_address_families,
rule_args=[address_mapper, specs],
mock_gets=[
MockGet(
product_type=Snapshot,
subject_type=PathGlobs,
mock=lambda _: snapshot,
),
MockGet(
product_type=AddressFamily,
subject_type=Dir,
mock=lambda _: address_family,
),
],
)
return run_rule(remove_provenance, rule_args=[pbfas])
def test_duplicated(self):
"""Test that matching the same Spec twice succeeds."""
address = SingleAddress('a', 'a')
snapshot = Snapshot(Digest('xx', 2), ('a/BUILD',), ())
address_family = AddressFamily('a', {'a': ('a/BUILD', 'this is an object!')})
specs = Specs([address, address])
bfas = self._resolve_build_file_addresses(
specs, address_family, snapshot, self._address_mapper())
self.assertEqual(len(bfas.dependencies), 1)
self.assertEqual(bfas.dependencies[0].spec, 'a:a')
def test_tag_filter(self):
"""Test that targets are filtered based on `tags`."""
specs = Specs([SiblingAddresses('root')], tags=['+integration'])
address_family = AddressFamily('root',
{'a': ('root/BUILD', TargetAdaptor()),
'b': ('root/BUILD', TargetAdaptor(tags={'integration'})),
'c': ('root/BUILD', TargetAdaptor(tags={'not_integration'}))
}
)
targets = self._resolve_build_file_addresses(
specs, address_family, self._snapshot(), self._address_mapper())
self.assertEqual(len(targets.dependencies), 1)
self.assertEqual(targets.dependencies[0].spec, 'root:b')
def test_fails_on_nonexistent_specs(self):
"""Test that specs referring to nonexistent targets raise a ResolveError."""
address_family = AddressFamily('root', {'a': ('root/BUILD', TargetAdaptor())})
specs = Specs([SingleAddress('root', 'b'), SingleAddress('root', 'a')])
expected_rx_str = re.escape(
""""b" was not found in namespace "root". Did you mean one of:
:a""")
with self.assertRaisesRegexp(ResolveError, expected_rx_str):
self._resolve_build_file_addresses(
specs, address_family, self._snapshot(), self._address_mapper())
# Ensure that we still catch nonexistent targets later on in the list of command-line specs.
specs = Specs([SingleAddress('root', 'a'), SingleAddress('root', 'b')])
with self.assertRaisesRegexp(ResolveError, expected_rx_str):
self._resolve_build_file_addresses(
specs, address_family, self._snapshot(), self._address_mapper())
def test_exclude_pattern(self):
"""Test that targets are filtered based on exclude patterns."""
specs = Specs([SiblingAddresses('root')], exclude_patterns=tuple(['.exclude*']))
address_family = AddressFamily('root',
{'exclude_me': ('root/BUILD', TargetAdaptor()),
'not_me': ('root/BUILD', TargetAdaptor()),
}
)
targets = self._resolve_build_file_addresses(
specs, address_family, self._snapshot(), self._address_mapper())
self.assertEqual(len(targets.dependencies), 1)
self.assertEqual(targets.dependencies[0].spec, 'root:not_me')
def test_exclude_pattern_with_single_address(self):
"""Test that single address targets are filtered based on exclude patterns."""
specs = Specs([SingleAddress('root', 'not_me')], exclude_patterns=tuple(['root.*']))
address_family = AddressFamily('root',
{
'not_me': ('root/BUILD', TargetAdaptor()),
}
)
targets = self._resolve_build_file_addresses(
specs, address_family, self._snapshot(), self._address_mapper())
self.assertEqual(len(targets.dependencies), 0)
class ApacheThriftConfiguration(StructWithDeps):
# An example of a mixed-mode object - can be directly embedded without a name or else referenced
# via address if both top-level and carrying a name.
#
# Also an example of a more constrained config object that has an explicit set of allowed fields
# and that can have pydoc hung directly off the constructor to convey a fully accurate BUILD
# dictionary entry.
def __init__(self, name=None, version=None, strict=None, lang=None, options=None, **kwargs):
super().__init__(
name=name, version=version, strict=strict, lang=lang, options=options, **kwargs
)
# An example of a validatable bit of config.
def validate_concrete(self):
if not self.version:
self.report_validation_error('A thrift `version` is required.')
if not self.lang:
self.report_validation_error('A thrift gen `lang` is required.')
class PublishConfiguration(Struct):
# An example of addressable and addressable_mapping field wrappers.
def __init__(self, default_repo, repos, name=None, **kwargs):
super().__init__(name=name, **kwargs)
self.default_repo = default_repo
self.repos = repos
@addressable(Exactly(Struct))
def default_repo(self):
""""""
@addressable_dict(Exactly(Struct))
def repos(self):
""""""
TEST_TABLE = SymbolTable({
'ApacheThriftConfig': ApacheThriftConfiguration,
'Struct': Struct,
'StructWithDeps': StructWithDeps,
'PublishConfig': PublishConfiguration,
'Target': Target,
})
class GraphTestBase(unittest.TestCase, SchedulerTestBase):
def create(self, build_patterns=None, parser=None):
address_mapper = AddressMapper(build_patterns=build_patterns,
parser=parser)
@rule
def symbol_table_singleton() -> SymbolTable:
return TEST_TABLE
rules = create_fs_rules() + create_graph_rules(address_mapper) + [symbol_table_singleton]
project_tree = self.mk_fs_tree(os.path.join(os.path.dirname(__file__), 'examples'))
scheduler = self.mk_scheduler(rules=rules, project_tree=project_tree)
return scheduler
def create_json(self):
return self.create(build_patterns=('*.BUILD.json',), parser=JsonParser(TEST_TABLE))
def _populate(self, scheduler, address):
"""Perform an ExecutionRequest to parse the given Address into a Struct."""
request = scheduler.execution_request([HydratedStruct], [address])
returns, throws = scheduler.execute(request)
if returns:
state = returns[0][1]
else:
state = throws[0][1]
return request, state
def resolve_failure(self, scheduler, address):
_, state = self._populate(scheduler, address)
self.assertEqual(type(state), Throw, f'{state} is not a Throw.')
return state.exc
def resolve(self, scheduler, address):
_, state = self._populate(scheduler, address)
self.assertEqual(type(state), Return, f'{state} is not a Return.')
return state.value.value
class InlinedGraphTest(GraphTestBase):
def do_test_codegen_simple(self, scheduler):
def address(name):
return Address(spec_path='graph_test', target_name=name)
resolved_java1 = self.resolve(scheduler, address('java1'))
nonstrict = ApacheThriftConfiguration(type_alias='ApacheThriftConfig',
address=address('nonstrict'),
version='0.10.0',
strict=False,
lang='java')
public = Struct(type_alias='Struct',
address=address('public'),
url='https://oss.sonatype.org/#stagingRepositories')
thrift1 = Target(address=address('thrift1'))
thrift2 = Target(address=address('thrift2'), dependencies=[thrift1])
expected_java1 = Target(address=address('java1'),
configurations=[
PublishConfiguration(
type_alias='PublishConfig',
default_repo=public,
repos={
'jake':
Struct(type_alias='Struct', url='https://dl.bintray.com/pantsbuild/maven'),
'jane': public
}
),
nonstrict,
ApacheThriftConfiguration(type_alias='ApacheThriftConfig',
version='0.10.0',
strict=True,
dependencies=[address('thrift2')],
lang='java'),
],
dependencies=[thrift2],
type_alias='Target')
self.assertEqual(expected_java1.configurations, resolved_java1.configurations)
def test_json(self):
scheduler = self.create_json()
self.do_test_codegen_simple(scheduler)
def test_python(self):
scheduler = self.create(build_patterns=('*.BUILD.python',),
parser=PythonAssignmentsParser(TEST_TABLE))
self.do_test_codegen_simple(scheduler)
def test_python_classic(self):
scheduler = self.create(build_patterns=('*.BUILD',),
parser=PythonCallbacksParser(TEST_TABLE))
self.do_test_codegen_simple(scheduler)
def test_resolve_cache(self):
scheduler = self.create_json()
nonstrict_address = Address.parse('graph_test:nonstrict')
nonstrict = self.resolve(scheduler, nonstrict_address)
self.assertEqual(nonstrict, self.resolve(scheduler, nonstrict_address))
# The already resolved `nonstrict` interior node should be re-used by `java1`.
java1_address = Address.parse('graph_test:java1')
java1 = self.resolve(scheduler, java1_address)
self.assertEqual(nonstrict, java1.configurations[1])
self.assertEqual(java1, self.resolve(scheduler, java1_address))
def do_test_trace_message(self, scheduler, parsed_address, expected_regex=None):
# Confirm that the root failed, and that a cycle occurred deeper in the graph.
request, state = self._populate(scheduler, parsed_address)
self.assertEqual(type(state), Throw)
trace_message = '\n'.join(scheduler.trace(request))
self.assert_throws_are_leaves(trace_message, Throw.__name__)
if expected_regex:
print(trace_message)
self.assertRegex(trace_message, expected_regex)
def do_test_cycle(self, address_str, cyclic_address_str):
scheduler = self.create_json()
parsed_address = Address.parse(address_str)
self.do_test_trace_message(
scheduler,
parsed_address,
f'(?ms)Dep graph contained a cycle:.*{cyclic_address_str}.* <-.*{cyclic_address_str}.* <-'
)
def assert_throws_are_leaves(self, error_msg, throw_name):
def indent_of(s):
return len(s) - len(s.lstrip())
def assert_equal_or_more_indentation(more_indented_line, less_indented_line):
self.assertTrue(indent_of(more_indented_line) >= indent_of(less_indented_line),
'\n"{}"\nshould have more equal or more indentation than\n"{}"\n{}'.format(more_indented_line,
less_indented_line, error_msg))
lines = error_msg.splitlines()
line_indices_of_throws = [i for i, v in enumerate(lines) if throw_name in v]
for idx in line_indices_of_throws:
# Make sure lines with Throw have more or equal indentation than its neighbors.
current_line = lines[idx]
line_above = lines[max(0, idx - 1)]
assert_equal_or_more_indentation(current_line, line_above)
def test_cycle_self(self):
self.do_test_cycle('graph_test:self_cycle', 'graph_test:self_cycle')
def test_cycle_direct(self):
self.do_test_cycle('graph_test:direct_cycle', 'graph_test:direct_cycle')
def test_cycle_indirect(self):
self.do_test_cycle('graph_test:indirect_cycle', 'graph_test:one')
def test_type_mismatch_error(self):
scheduler = self.create_json()
mismatch = Address.parse('graph_test:type_mismatch')
self.assert_resolve_failure_type(ResolvedTypeMismatchError, mismatch, scheduler)
self.do_test_trace_message(scheduler, mismatch)
def test_not_found_but_family_exists(self):
scheduler = self.create_json()
dne = Address.parse('graph_test:this_addressable_does_not_exist')
self.assert_resolve_failure_type(ResolveError, dne, scheduler)
self.do_test_trace_message(scheduler, dne)
def test_not_found_and_family_does_not_exist(self):
scheduler = self.create_json()
dne = Address.parse('this/dir/does/not/exist')
self.assert_resolve_failure_type(ResolveError, dne, scheduler)
self.do_test_trace_message(scheduler, dne)
def assert_resolve_failure_type(self, expected_type, mismatch, scheduler):
failure = self.resolve_failure(scheduler, mismatch)
self.assertEqual(type(failure),
expected_type,
f'type was not {expected_type.__name__}. Instead was {type(failure).__name__}, {failure!r}')
| 40.005168 | 124 | 0.664643 |
4a1d53ac3e6b9fffd9cab550451b80fdc53b0785
| 5,057 |
py
|
Python
|
utils/p4runtime_switch.py
|
davidcawork/P4Tutorial
|
297c663f9516a769fb40fb28c2271d582de7a01e
|
[
"Apache-2.0"
] | null | null | null |
utils/p4runtime_switch.py
|
davidcawork/P4Tutorial
|
297c663f9516a769fb40fb28c2271d582de7a01e
|
[
"Apache-2.0"
] | null | null | null |
utils/p4runtime_switch.py
|
davidcawork/P4Tutorial
|
297c663f9516a769fb40fb28c2271d582de7a01e
|
[
"Apache-2.0"
] | 1 |
2021-09-09T15:11:34.000Z
|
2021-09-09T15:11:34.000Z
|
# Copyright 2017-present Barefoot Networks, Inc.
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys, os, tempfile, socket
from time import sleep
from mininet.node import Switch
from mininet.moduledeps import pathCheck
from mininet.log import info, error, debug
from p4_mininet import P4Switch, SWITCH_START_TIMEOUT
from netstat import check_listening_on_port, check_listening_on_port_pid
class P4RuntimeSwitch(P4Switch):
"BMv2 switch with gRPC support"
next_grpc_port = 50051
next_thrift_port = 9090
def __init__(self, name, sw_path = None, json_path = None,
grpc_port = None,
thrift_port = None,
pcap_dump = False,
log_console = False,
verbose = False,
device_id = None,
enable_debugger = False,
log_file = None,
**kwargs):
Switch.__init__(self, name, **kwargs)
assert (sw_path)
self.sw_path = sw_path
# make sure that the provided sw_path is valid
pathCheck(sw_path)
if json_path is not None:
# make sure that the provided JSON file exists
if not os.path.isfile(json_path):
error("Invalid JSON file: {}\n".format(json_path))
exit(1)
self.json_path = json_path
else:
self.json_path = None
if grpc_port is not None:
self.grpc_port = grpc_port
else:
self.grpc_port = P4RuntimeSwitch.next_grpc_port
P4RuntimeSwitch.next_grpc_port += 1
if thrift_port is not None:
self.thrift_port = thrift_port
else:
self.thrift_port = P4RuntimeSwitch.next_thrift_port
P4RuntimeSwitch.next_thrift_port += 1
if check_listening_on_port(self.grpc_port):
error('%s cannot bind port %d because it is bound by another process\n' % (self.name, self.grpc_port))
exit(1)
self.verbose = verbose
logfile = "/tmp/p4s.{}.log".format(self.name)
self.output = open(logfile, 'w')
self.pcap_dump = pcap_dump
self.enable_debugger = enable_debugger
self.log_console = log_console
if log_file is not None:
self.log_file = log_file
else:
self.log_file = "/tmp/p4s.{}.log".format(self.name)
if device_id is not None:
self.device_id = device_id
P4Switch.device_id = max(P4Switch.device_id, device_id)
else:
self.device_id = P4Switch.device_id
P4Switch.device_id += 1
self.nanomsg = "ipc:///tmp/bm-{}-log.ipc".format(self.device_id)
def check_switch_started(self, pid):
for _ in range(SWITCH_START_TIMEOUT * 2):
if not os.path.exists(os.path.join("/proc", str(pid))):
return False
if check_listening_on_port_pid(self.grpc_port, pid):
return True
sleep(0.5)
def start(self, controllers):
info("Starting P4 switch {}.\n".format(self.name))
args = [self.sw_path]
for port, intf in self.intfs.items():
if not intf.IP():
args.extend(['-i', str(port) + "@" + intf.name])
if self.pcap_dump:
args.append("--pcap %s" % self.pcap_dump)
if self.nanomsg:
args.extend(['--nanolog', self.nanomsg])
args.extend(['--device-id', str(self.device_id)])
P4Switch.device_id += 1
if self.json_path:
args.append(self.json_path)
else:
args.append("--no-p4")
if self.enable_debugger:
args.append("--debugger")
if self.log_console:
args.append("--log-console")
if self.thrift_port:
args.append('--thrift-port ' + str(self.thrift_port))
if self.grpc_port:
args.append("-- --grpc-server-addr 0.0.0.0:" + str(self.grpc_port))
cmd = ' '.join(args)
info(cmd + "\n")
pid = None
with tempfile.NamedTemporaryFile() as f:
self.cmd(cmd + ' >' + self.log_file + ' 2>&1 & echo $! >> ' + f.name)
pid = int(f.read())
debug("P4 switch {} PID is {}.\n".format(self.name, pid))
if not self.check_switch_started(pid):
error("P4 switch {} did not start correctly.\n".format(self.name))
exit(1)
info("P4 switch {} has been started.\n".format(self.name))
| 36.644928 | 114 | 0.598378 |
4a1d54bd2caec63f28c91aa939f6b29d0bb96db9
| 10,089 |
py
|
Python
|
kivy/utils.py
|
geojeff/kivy
|
25ab20e5b0e87269531abe1f8cc76bf270bcc755
|
[
"MIT"
] | 1 |
2017-11-15T08:59:23.000Z
|
2017-11-15T08:59:23.000Z
|
kivy/utils.py
|
geojeff/kivy
|
25ab20e5b0e87269531abe1f8cc76bf270bcc755
|
[
"MIT"
] | null | null | null |
kivy/utils.py
|
geojeff/kivy
|
25ab20e5b0e87269531abe1f8cc76bf270bcc755
|
[
"MIT"
] | 3 |
2015-07-18T11:03:59.000Z
|
2018-03-17T01:32:42.000Z
|
# pylint: disable=W0611
'''
Utils
=====
.. versionchanged:: 1.6.0
OrderedDict class has been removed. Use the collections.OrderedDict.
'''
__all__ = ('intersection', 'difference', 'strtotuple',
'get_color_from_hex', 'get_hex_from_color', 'get_random_color',
'is_color_transparent', 'boundary',
'deprecated', 'SafeList',
'interpolate', 'QueryDict',
'platform', 'escape_markup', 'reify')
from os import environ
from sys import platform as _sys_platform
from re import match, split
try:
from UserDict import UserDict
from UserDict import DictMixin
except ImportError:
from collections import UserDict
from collections import MutableMapping as DictMixin
_platform_android = None
_platform_ios = None
def boundary(value, minvalue, maxvalue):
'''Limit a value between a minvalue and maxvalue'''
return min(max(value, minvalue), maxvalue)
def intersection(set1, set2):
'''Return intersection between 2 list'''
return [s for s in set1 if s in set2]
def difference(set1, set2):
'''Return difference between 2 list'''
return [s for s in set1 if s not in set2]
def interpolate(value_from, value_to, step=10):
'''Interpolate a value to another. Can be useful to smooth some transition.
For example::
# instead of setting directly
self.pos = pos
# use interpolate, and you'll have a nice transition
self.pos = interpolate(self.pos, new_pos)
.. warning::
This interpolation work only on list/tuple/double with the same
dimension. No test are done if the dimension is not the same.
'''
if type(value_from) in (list, tuple):
out = []
for x, y in zip(value_from, value_to):
out.append(interpolate(x, y, step))
return out
else:
return value_from + (value_to - value_from) / float(step)
def strtotuple(s):
'''Convert a tuple string into tuple,
with some security check. Designed to be used
with eval() function::
a = (12, 54, 68)
b = str(a) # return '(12, 54, 68)'
c = strtotuple(b) # return (12, 54, 68)
'''
# security
if not match('^[,.0-9 ()\[\]]*$', s):
raise Exception('Invalid characters in string for tuple conversion')
# fast syntax check
if s.count('(') != s.count(')'):
raise Exception('Invalid count of ( and )')
if s.count('[') != s.count(']'):
raise Exception('Invalid count of [ and ]')
r = eval(s)
if type(r) not in (list, tuple):
raise Exception('Conversion failed')
return r
def get_color_from_hex(s):
'''Transform from hex string color to kivy color'''
if s.startswith('#'):
return get_color_from_hex(s[1:])
value = [int(x, 16) / 255. for x in split('([0-9a-f]{2})', s) if x != '']
if len(value) == 3:
value.append(1)
return value
def get_hex_from_color(color):
'''Transform from kivy color to hex::
>>> get_hex_from_color((0, 1, 0))
'#00ff00'
>>> get_hex_from_color((.25, .77, .90, .5))
'#3fc4e57f'
.. versionadded:: 1.5.0
'''
return '#' + ''.join(['{0:02x}'.format(int(x * 255)) for x in color])
def get_random_color(alpha=1.0):
''' Returns a random color (4 tuple)
:Parameters:
`alpha` : float, default to 1.0
if alpha == 'random' a random alpha value is generated
'''
from random import random
if alpha == 'random':
return [random(), random(), random(), random()]
else:
return [random(), random(), random(), alpha]
def is_color_transparent(c):
'''Return true if alpha channel is 0'''
if len(c) < 4:
return False
if float(c[3]) == 0.:
return True
return False
DEPRECATED_CALLERS = []
def deprecated(func):
'''This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted the first time
the function is used.'''
import inspect
import functools
@functools.wraps(func)
def new_func(*args, **kwargs):
file, line, caller = inspect.stack()[1][1:4]
caller_id = "%s:%s:%s" % (file, line, caller)
# We want to print deprecated warnings only once:
if caller_id not in DEPRECATED_CALLERS:
DEPRECATED_CALLERS.append(caller_id)
warning = (
'Call to deprecated function %s in %s line %d.'
'Called from %s line %d'
' by %s().') % (
func.__name__,
func.__code__.co_filename,
func.__code__.co_firstlineno + 1,
file, line, caller)
from kivy.logger import Logger
Logger.warn(warning)
if func.__doc__:
Logger.warn(func.__doc__)
return func(*args, **kwargs)
return new_func
class SafeList(list):
'''List with clear() method
.. warning::
Usage of iterate() function will decrease your performance.
'''
def clear(self):
del self[:]
@deprecated
def iterate(self, reverse=False):
if reverse:
return reversed(iter(self))
return iter(self)
class QueryDict(dict):
'''QueryDict is a dict() that can be queried with dot.
.. versionadded:: 1.0.4
::
d = QueryDict()
# create a key named toto, with the value 1
d.toto = 1
# it's the same as
d['toto'] = 1
'''
def __getattr__(self, attr):
try:
return self.__getitem__(attr)
except KeyError:
return super(QueryDict, self).__getattr__(attr)
def __setattr__(self, attr, value):
self.__setitem__(attr, value)
def format_bytes_to_human(size, precision=2):
'''Format a bytes number to human size (B, KB, MB...)
.. versionadded:: 1.0.8
:Parameters:
`size`: int
Number that represent a bytes number
`precision`: int
Precision after the comma
Examples::
>>> format_bytes_to_human(6463)
'6.31 KB'
>>> format_bytes_to_human(646368746541)
'601.98 GB'
'''
size = int(size)
fmt = '%%1.%df %%s' % precision
for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
if size < 1024.0:
return fmt % (size, unit)
size /= 1024.0
class Platform(object):
# refactored to class to allow module function to be replaced
# with module variable
_platform = None
@deprecated
def __call__(self):
return self._get_platform()
def __eq__(self, other):
return other == self._get_platform()
def __ne__(self, other):
return other != self._get_platform()
def __str__(self):
return self._get_platform()
def __repr__(self):
return 'platform name: \'{platform}\' from: \n{instance}'.format(
platform=self._get_platform(),
instance=super(Platform, self).__repr__()
)
def __hash__(self):
return self._get_platform().__hash__()
def _get_platform(self):
if self._platform is not None:
return self._platform
global _platform_ios, _platform_android
if _platform_android is None:
# ANDROID_ARGUMENT and ANDROID_PRIVATE are 2 environment variables
# from python-for-android project
_platform_android = 'ANDROID_ARGUMENT' in environ
if _platform_ios is None:
_platform_ios = (environ.get('KIVY_BUILD', '') == 'ios')
# On android, _sys_platform return 'linux2', so prefer to check the
# import of Android module than trying to rely on _sys_platform.
if _platform_android is True:
return 'android'
elif _platform_ios is True:
return 'ios'
elif _sys_platform in ('win32', 'cygwin'):
return 'win'
elif _sys_platform == 'darwin':
return 'macosx'
elif _sys_platform[:5] == 'linux':
return 'linux'
return 'unknown'
platform = Platform()
'''
.. versionadded:: 1.3.0
Deprecated since 1.8.0: Use platform as variable instaed of a function.\n
Calling platform() will return one of: *win*, *linux*, *android*, *macosx*,
*ios*, or *unknown*.
.. versionchanged:: 1.8.0
`platform` also behaves like a regular variable in comparisons like so::
from kivy import platform
if platform == 'linux':
do_linux_things()
if platform() == 'linux': # triggers deprecation warning
do_more_linux_things()
foo = {'linux' : do_linux_things}
foo[platform]() # calls do_linux_things
p = platform # assigns to a module object
if p is 'android':
do_android_things()
p += 'some string' # error!
'''
def escape_markup(text):
'''
Escape markup characters found in the text. Intended to be used when markup
text is activated on the Label::
untrusted_text = escape_markup('Look at the example [1]')
text = '[color=ff0000]' + untrusted_text + '[/color]'
w = Label(text=text, markup=True)
.. versionadded:: 1.3.0
'''
return text.replace('[', '&bl;').replace(']', '&br;').replace('&', '&')
class reify(object):
'''
Put the result of a method which uses this (non-data) descriptor decorator
in the instance dict after the first call, effectively replacing the
decorator with an instance variable.
It acts like @property, except that the function is only ever called once;
after that, the value is cached as a regular attribute. This gives you lazy
attribute creation on objects that are meant to be immutable.
Taken from Pyramid project.
'''
def __init__(self, func):
self.func = func
self.__doc__ = func.__doc__
def __get__(self, inst, cls):
if inst is None:
return self
retval = self.func(inst)
setattr(inst, self.func.__name__, retval)
return retval
| 27.793388 | 79 | 0.599068 |
4a1d54f6306f2a250a0b34859c9362db10e0c863
| 4,327 |
py
|
Python
|
tgen/config.py
|
nbishdev/tgen
|
3c43c0e29faa7ea3857a6e490d9c28a8daafc7d0
|
[
"Apache-2.0"
] | 222 |
2015-06-15T14:39:41.000Z
|
2022-03-12T03:45:32.000Z
|
tgen/config.py
|
nbishdev/tgen
|
3c43c0e29faa7ea3857a6e490d9c28a8daafc7d0
|
[
"Apache-2.0"
] | 40 |
2015-12-02T10:42:44.000Z
|
2021-12-05T17:31:11.000Z
|
tgen/config.py
|
nbishdev/tgen
|
3c43c0e29faa7ea3857a6e490d9c28a8daafc7d0
|
[
"Apache-2.0"
] | 72 |
2015-07-27T08:11:48.000Z
|
2022-03-24T14:25:37.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Load Python code as configuration files. A copy from Alex (http://github.com/UFAL-DSG/alex)
"""
from builtins import object
from importlib import import_module
import os
import os.path
import sys
import tempfile
import codecs
import yaml
config = None
def _expand_file_var(text, path):
# This method has clear limitations, since it ignores the whole Python
# syntax.
return text.replace('__file__', "'{p}'".format(p=path))
def load_as_module(path, force=False):
"""Loads a file pointed to by `path' as a Python module with minimal impact
on the global program environment. The file name should end in '.py'.
Arguments:
path -- path towards the file
force -- whether to load the file even if its name does not end in
'.py'
Returns the loaded module object.
"""
do_delete_temp = False
if not path.endswith('.py'):
if force:
happy = False
while not happy:
temp_fd, temp_path = tempfile.mkstemp(suffix='.py')
dirname, basename = os.path.split(temp_path)
modname = basename[:-3]
if modname not in sys.modules:
happy = True
temp_file = os.fdopen(temp_fd, 'wb')
temp_file.write(_expand_file_var(open(path, 'rb').read(), path))
temp_file.close()
path = temp_path
do_delete_temp = True
else:
raise ValueError(("Path `{path}' should be loaded as module but "
"does not end in '.py' and `force' wasn't set.")
.format(path=path))
else:
dirname, basename = os.path.split(path)
modname = basename[:-3]
sys.path.insert(0, dirname)
mod = import_module(modname)
sys.path.pop(0)
if do_delete_temp:
os.unlink(temp_path)
del sys.modules[modname]
return mod
class Config(object):
"""
Config handles configuration data necessary for all the components
in Alex. It is implemented using a dictionary so that any component can use
arbitrarily structured configuration data.
When the configuration file is loaded, several automatic transformations
are applied:
1. '{cfg_abs_path}' as a substring of atomic attributes is replaced by
an absolute path of the configuration files. This can be used to
make the configuration file independent of the location of programs
using the configuration file.
"""
# TODO: Enable setting requirements on the configuration variables and
# checking that they are met (i.e., 2 things:
# - requirements = property(get_reqs, set_reqs)
# - def check_requirements_are_met(self)
def __init__(self, file_name=None, config={}):
self.config = config
if file_name:
self.load(file_name)
def get(self, i, default=None):
return self.config.get(i, default)
def __delitem__(self, i):
del self.config[i]
def __len__(self):
return len(self.config)
def __getitem__(self, i):
return self.config[i]
def __setitem__(self, key, val):
self.config[key] = val
def __iter__(self):
for i in self.config:
yield i
def contains(self, *path):
"""Check if configuration contains given keys (= path in config tree)."""
curr = self.config
for path_part in path:
if path_part in curr:
curr = curr[path_part]
else:
return False
return True
def load(self, file_name):
if file_name.endswith('.yaml'):
with codecs.open(file_name, 'r', encoding='UTF-8') as fh:
self.config = yaml.load(fh, Loader=yaml.FullLoader)
else:
# pylint: disable-msg=E0602
global config
# config = None
self.config = config = load_as_module(file_name, force=True).config
# execfile(file_name, globals())
# assert config is not None
# self.config = config
cfg_abs_dirname = os.path.dirname(os.path.abspath(file_name))
self.config_replace('{cfg_abs_path}', cfg_abs_dirname)
| 30.907143 | 91 | 0.606425 |
4a1d556819cd12ae60afd128fb1be94e4a30cc56
| 10,341 |
py
|
Python
|
tests/test_service.py
|
ucphhpc/SwarmSpawner
|
a1f4f2fdae111832210e9bdc9ef5323a6108e739
|
[
"BSD-3-Clause"
] | 1 |
2021-11-23T11:43:28.000Z
|
2021-11-23T11:43:28.000Z
|
tests/test_service.py
|
ucphhpc/SwarmSpawner
|
a1f4f2fdae111832210e9bdc9ef5323a6108e739
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_service.py
|
ucphhpc/SwarmSpawner
|
a1f4f2fdae111832210e9bdc9ef5323a6108e739
|
[
"BSD-3-Clause"
] | null | null | null |
import docker
import json
import time
import requests
import logging
import pytest
from random import SystemRandom
from docker.types import EndpointSpec
from os.path import dirname, join, realpath
from urllib.parse import urljoin
from util import (
get_service,
get_task_image,
get_service_labels,
wait_for_site,
wait_for_service_task,
get_service_user,
delete,
)
HUB_IMAGE_TAG = "hub:test"
MOUNT_IMAGE_TAG = "nielsbohr/ssh-mount-dummy"
NETWORK_NAME = "jh_test"
HUB_SERVICE_NAME = "jupyterhub"
MOUNT_SERVICE_NAME = "mount_target"
PORT = 8000
JHUB_URL = "http://127.0.0.1:{}".format(PORT)
# Logger
logging.basicConfig(level=logging.INFO)
test_logger = logging.getLogger()
# Test data
rand_key = "".join(SystemRandom().choice("0123456789abcdef") for _ in range(32))
# root dir
hub_path = dirname(dirname(__file__))
hub_image = {"path": hub_path, "tag": HUB_IMAGE_TAG, "rm": True, "pull": False}
swarm_config = {}
network_config = {
"name": NETWORK_NAME,
"driver": "overlay",
"options": {"subnet": "192.168.0.0/24"},
"attachable": True,
}
hub_config = join(dirname(realpath(__file__)), "configs", "jupyterhub_config.py")
hub_service = {
"image": HUB_IMAGE_TAG,
"name": HUB_SERVICE_NAME,
"mounts": [
":".join(["/var/run/docker.sock", "/var/run/docker.sock", "rw"]),
":".join([hub_config, "/etc/jupyterhub/jupyterhub_config.py", "ro"]),
],
"networks": [NETWORK_NAME],
"endpoint_spec": EndpointSpec(ports={PORT: PORT}),
"env": ["JUPYTERHUB_CRYPT_KEY=" + rand_key],
"command": ["jupyterhub", "-f", "/etc/jupyterhub/jupyterhub_config.py"],
}
@pytest.mark.parametrize("image", [hub_image], indirect=["image"])
@pytest.mark.parametrize("swarm", [swarm_config], indirect=["swarm"])
@pytest.mark.parametrize("network", [network_config], indirect=["network"])
def test_creates_service(image, swarm, network, make_service):
"""Test that logging in as a new user creates a new docker service."""
test_logger.info("Start of service testing")
make_service(hub_service)
client = docker.from_env()
# jupyterhub service should be running at this point
services_before_spawn = client.services.list()
test_logger.info("Pre test services: {}".format(services_before_spawn))
username = "a-new-user"
password = "just magnets"
test_logger.info("Authenticating with user: {}".format(username))
assert wait_for_site(JHUB_URL) is True
with requests.Session() as s:
# login
test_logger.info("Authenticating with user: {}".format(username))
login_response = s.post(
JHUB_URL + "/hub/login?next=",
data={"username": username, "password": password},
)
test_logger.info("Login response message: {}".format(login_response.text))
assert login_response.status_code == 200
# Spawn a notebook
spawn_form_resp = s.get(JHUB_URL + "/hub/spawn")
test_logger.info("Spawn page message: {}".format(spawn_form_resp.text))
assert spawn_form_resp.status_code == 200
assert "Select a notebook image" in spawn_form_resp.text
payload = {"dockerimage": "nielsbohr/base-notebook:latest"}
spawn_resp = s.post(JHUB_URL + "/hub/spawn", data=payload)
test_logger.info("Spawn POST response message: {}".format(spawn_resp.text))
assert spawn_resp.status_code == 200
services = client.services.list()
test_logger.info("Post spawn services: {}".format(services))
# New services are there
assert len(services) > 0
for service in services:
while (
service.tasks() and service.tasks()[0]["Status"]["State"] != "running"
):
time.sleep(5)
state = service.tasks()[0]["Status"]["State"]
assert state != "failed"
# wait for user home
home_resp = s.get(JHUB_URL + "/user/{}/tree?".format(username))
assert home_resp.status_code == 200
# New services are there
services_after_spawn = set(client.services.list()) - set(services_before_spawn)
assert len(services_after_spawn) > 0
# Remove via the web interface
# Wait for the server to finish spawning
pending = True
num_wait, max_wait = 0, 15
while pending or num_wait > max_wait:
num_wait += 1
resp = s.delete(
JHUB_URL + "/hub/api/users/{}/server".format(username),
headers={"Referer": "127.0.0.1:{}/hub/".format(PORT)},
)
test_logger.info(
"Response from removing the user server: {}".format(resp.text)
)
if resp.status_code == 204:
pending = False
time.sleep(1)
assert resp.status_code == 204
# double check it is gone
services_after_remove = client.services.list()
assert len((set(services_before_spawn) - set(services_after_remove))) == 0
test_logger.info("End of test service")
@pytest.mark.parametrize("image", [hub_image], indirect=["image"])
@pytest.mark.parametrize("swarm", [swarm_config], indirect=["swarm"])
@pytest.mark.parametrize("network", [network_config], indirect=["network"])
def test_image_selection(image, swarm, network, make_service):
"""Test that the spawner allows for dynamic image selection"""
test_logger.info("Start of the image selection test")
make_service(hub_service)
client = docker.from_env()
# jupyterhub service should be running at this point
services_before_spawn = client.services.list()
test_logger.info("Pre test services: {}".format(services_before_spawn))
username = "a-new-user"
password = "just magnets"
test_logger.info("Authenticating with user: {}".format(username))
assert wait_for_site(JHUB_URL) is True
with requests.Session() as s:
# login
test_logger.info("Authenticating with user: {}".format(username))
login_response = s.post(
urljoin(JHUB_URL, "/hub/login"),
data={"username": username, "password": password},
)
test_logger.info("Login response message: {}".format(login_response.text))
assert login_response.status_code == 200
# Spawn a notebook
spawn_form_resp = s.get(JHUB_URL + "/hub/spawn")
test_logger.info("Spawn page message: {}".format(spawn_form_resp.text))
assert spawn_form_resp.status_code == 200
assert "Select a notebook image" in spawn_form_resp.text
user_image = "nielsbohr/base-notebook:latest"
user_image_name = "Basic Python Notebook"
payload = {"name": user_image_name, "image": user_image}
json_payload = json.dumps(payload)
spawn_resp = s.post(
JHUB_URL + "/hub/spawn/{}".format(username),
files={
"select_image": (
None,
json_payload,
)
},
)
test_logger.info("Spawn POST response message: {}".format(spawn_resp.text))
assert spawn_resp.status_code == 200
target_service_name = "{}-{}-{}".format("jupyter", username, "1")
spawned_service = get_service(client, target_service_name)
assert spawned_service is not None
# Verify that a task is succesfully running
running_task = wait_for_service_task(
client, spawned_service, filters={"desired-state": "running"}
)
assert running_task
# Verify that the image is correct
service_image = get_task_image(running_task)
assert service_image == user_image
service_labels = get_service_labels(spawned_service)
assert service_labels is not None
assert service_labels["image_name"] == user_image_name
# Delete the spawned service
delete_headers = {"Referer": urljoin(JHUB_URL, "/hub/home"), "Origin": JHUB_URL}
jhub_user = get_service_user(spawned_service)
delete_url = urljoin(JHUB_URL, "/hub/api/users/{}/server".format(jhub_user))
deleted = delete(s, delete_url, headers=delete_headers)
assert deleted
deleted_service = get_service(client, target_service_name)
assert deleted_service is None
# Spawn a second service with a different name but the same image ##
# Spawn a notebook
second_spawn_form_resp = s.get(JHUB_URL + "/hub/spawn")
test_logger.info("Spawn page message: {}".format(second_spawn_form_resp.text))
assert second_spawn_form_resp.status_code == 200
assert "Select a notebook image" in second_spawn_form_resp.text
second_image_name = "Basic Python Notebook 2"
selection_payload = {"name": second_image_name, "image": user_image}
json_second_payload = json.dumps(selection_payload)
spawn_resp = s.post(
JHUB_URL + "/hub/spawn/{}".format(username),
files={
"select_image": (
None,
json_second_payload,
)
},
)
test_logger.info("Spawn POST response message: {}".format(spawn_resp.text))
assert spawn_resp.status_code == 200
second_target_service_name = "{}-{}-{}".format("jupyter", username, "1")
second_spawned_service = get_service(client, second_target_service_name)
assert second_spawned_service is not None
# Verify that a task is succesfully running
second_running_task = wait_for_service_task(
client, second_spawned_service, filters={"desired-state": "running"}
)
assert second_running_task
# Verify that the image is correct
second_service_image = get_task_image(second_running_task)
assert second_service_image == user_image
second_service_labels = get_service_labels(second_spawned_service)
assert second_service_labels is not None
assert second_service_labels["image_name"] == second_image_name
# Delete the second spawned service
deleted = delete(s, delete_url, headers=delete_headers)
assert deleted
deleted_service = get_service(client, second_target_service_name)
assert deleted_service is None
| 36.800712 | 88 | 0.64839 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.