hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a1b741be39e1f7212ec19b42652116e1072e5e2
| 6,694 |
py
|
Python
|
dmb/modeling/stereo/losses/stereo_focal_loss.py
|
jiaw-z/DenseMatchingBenchmark
|
177c56ca1952f54d28e6073afa2c16981113a2af
|
[
"MIT"
] | 160 |
2019-11-16T13:59:21.000Z
|
2022-03-28T07:52:59.000Z
|
dmb/modeling/stereo/losses/stereo_focal_loss.py
|
jiaw-z/DenseMatchingBenchmark
|
177c56ca1952f54d28e6073afa2c16981113a2af
|
[
"MIT"
] | 22 |
2019-11-22T02:14:18.000Z
|
2022-01-24T10:16:14.000Z
|
dmb/modeling/stereo/losses/stereo_focal_loss.py
|
jiaw-z/DenseMatchingBenchmark
|
177c56ca1952f54d28e6073afa2c16981113a2af
|
[
"MIT"
] | 38 |
2019-12-27T14:01:01.000Z
|
2022-03-12T11:40:11.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from dmb.modeling.stereo.losses.utils import LaplaceDisp2Prob, GaussianDisp2Prob, OneHotDisp2Prob
class StereoFocalLoss(object):
"""
Under the same start disparity and maximum disparity, calculating all estimated cost volumes' loss.
Args:
max_disp (int): the max of Disparity. default: 192
start_disp (int): the start searching disparity index, usually be 0
dilation (optional, int): the step between near disparity index,
it mainly used in gt probability volume generation
weights (list of float or None): loss weight for each scale of estCost.
focal_coefficient (float): stereo focal loss focal coefficient,
details please refer to paper. default: 0.0
sparse (bool): whether the ground-truth disparity is sparse,
for example, KITTI is sparse, but SceneFlow is not. default: False
Inputs:
estCost (Tensor or list of Tensor): the estimated cost volume,
in [BatchSize, disp_sample_number, Height, Width] layout,
the disp_sample_number can be: (max_disp + dilation - 1) / dilation or disp_index.shape[1]
gtDisp (Tensor): the ground truth disparity map,
in [BatchSize, 1, Height, Width] layout.
variance (int, Tensor or list of Tensor): the variance of distribution,
details please refer to paper, in [BatchSize, 1, Height, Width] layout.
disp_sample (optional, (Tensor or list of Tensor)):
if not None, direct provide the disparity samples for each pixel
in [BatchSize, disp_sample_number, Height, Width] layout
Outputs:
weighted_loss_all_level (Tensor), the weighted loss of all levels
Note:
Before calculate loss, the estCost shouldn't be normalized,
because we will use softmax for normalization
"""
def __init__(
self, max_disp, start_disp=0,
dilation=1, weights=None,
focal_coefficient=0.0,
sparse=False
):
self.max_disp = max_disp
self.start_disp = start_disp
self.end_disp = self.max_disp + self.start_disp - 1
self.dilation = dilation
self.weights = weights
self.focal_coefficient = focal_coefficient
self.sparse = sparse
if sparse:
# sparse disparity ==> max_pooling
self.scale_func = F.adaptive_max_pool2d
else:
# dense disparity ==> avg_pooling
self.scale_func = F.adaptive_avg_pool2d
def loss_per_level(self, estCost, gtDisp, variance, dilation, disp_sample):
B, C, H, W = estCost.shape
scaled_gtDisp = gtDisp.clone()
scale = 1.0
if gtDisp.shape[-2] != H or gtDisp.shape[-1] != W:
# compute scale factor for per level and scale gtDisp
scale = gtDisp.shape[-1] / (W * 1.0)
scaled_gtDisp = gtDisp.clone() / scale
scaled_gtDisp = self.scale_func(scaled_gtDisp, (H, W))
# mask for valid disparity
# (start_disp, max disparity / scale)
# Attention: the invalid disparity of KITTI is set as 0, be sure to mask it out
lower_bound = self.start_disp
upper_bound = lower_bound + int(self.max_disp / scale)
mask = (scaled_gtDisp > lower_bound) & (scaled_gtDisp < upper_bound)
mask = mask.detach_().type_as(scaled_gtDisp)
if mask.sum() < 1.0:
print('Stereo focal loss: there is no point\'s '
'disparity is within [{},{})!'.format(lower_bound, upper_bound))
scaled_gtProb = torch.zeros_like(estCost) # let this sample have loss with 0
else:
# transfer disparity map to probability map
mask_scaled_gtDisp = scaled_gtDisp * mask
scaled_gtProb = LaplaceDisp2Prob(
mask_scaled_gtDisp, max_disp=int(self.max_disp / scale), variance=variance,
start_disp=self.start_disp, dilation=dilation, disp_sample=disp_sample
).getProb()
# stereo focal loss
valid_pixel_number = mask.float().sum()
if valid_pixel_number < 1.0:
valid_pixel_number = 1.0
estLogProb = F.log_softmax(estCost, dim=1)
weight = (1.0 - scaled_gtProb).pow(-self.focal_coefficient).type_as(scaled_gtProb)
loss = -((scaled_gtProb * estLogProb) * weight * mask.float()).sum() / valid_pixel_number
return loss
def __call__(self, estCost, gtDisp, variance, disp_sample=None):
if not isinstance(estCost, (list, tuple)):
estCost = [estCost]
if self.weights is None:
self.weights = 1.0
if not isinstance(self.weights, (list, tuple)):
self.weights = [self.weights] * len(estCost)
if not isinstance(self.dilation, (list, tuple)):
self.dilation = [self.dilation] * len(estCost)
if not isinstance(variance, (list, tuple)):
variance = [variance] * len(estCost)
if disp_sample is None:
disp_sample = [disp_sample] * len(estCost)
else:
if not isinstance(disp_sample, (list, tuple)):
# Use same disparity samples for each estimated cost volume
disp_sample = [disp_sample] * len(estCost)
# compute loss for per level
loss_all_level = []
for est_cost_per_lvl, var, dt, ds in zip(estCost, variance, self.dilation, disp_sample):
loss_all_level.append(
self.loss_per_level(est_cost_per_lvl, gtDisp, var, dt, ds))
# re-weight loss per level
weighted_loss_all_level = dict()
for i, loss_per_level in enumerate(loss_all_level):
name = "stereo_focal_loss_lvl{}".format(i)
weighted_loss_all_level[name] = self.weights[i] * loss_per_level
return weighted_loss_all_level
def __repr__(self):
repr_str = '{}\n'.format(self.__class__.__name__)
repr_str += ' ' * 4 + 'Max Disparity: {}\n'.format(self.max_disp)
repr_str += ' ' * 4 + 'Start disparity: {}\n'.format(self.start_disp)
repr_str += ' ' * 4 + 'Dilation rate: {}\n'.format(self.dilation)
repr_str += ' ' * 4 + 'Loss weight: {}\n'.format(self.weights)
repr_str += ' ' * 4 + 'Focal coefficient: {}\n'.format(self.focal_coefficient)
repr_str += ' ' * 4 + 'Disparity is sparse: {}\n'.format(self.sparse)
return repr_str
@property
def name(self):
return 'StereoFocalLoss'
| 43.467532 | 106 | 0.617867 |
4a1b749194ddbad9d7ee4d0e131d2b82149d3d7c
| 208 |
py
|
Python
|
does_my_number_look_big_in_this.py
|
Kunalpod/codewars
|
8dc1af2f3c70e209471045118fd88b3ea1e627e5
|
[
"MIT"
] | null | null | null |
does_my_number_look_big_in_this.py
|
Kunalpod/codewars
|
8dc1af2f3c70e209471045118fd88b3ea1e627e5
|
[
"MIT"
] | null | null | null |
does_my_number_look_big_in_this.py
|
Kunalpod/codewars
|
8dc1af2f3c70e209471045118fd88b3ea1e627e5
|
[
"MIT"
] | null | null | null |
#Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Does my number look big in this?
#Problem level: 6 kyu
def narcissistic( value ):
return value==int(sum([int(x)**len(str(value)) for x in str(value)]))
| 26 | 73 | 0.697115 |
4a1b754e95f3d98a2944416cf559a599fcf4565a
| 2,523 |
py
|
Python
|
examples/networking/tc_perf_event.py
|
Birch-san/bcc
|
b374be886b555ead8feaad9ec2d86ccd39d748dd
|
[
"Apache-2.0"
] | 4 |
2018-01-29T13:38:50.000Z
|
2021-06-30T07:28:47.000Z
|
examples/networking/tc_perf_event.py
|
Birch-san/bcc
|
b374be886b555ead8feaad9ec2d86ccd39d748dd
|
[
"Apache-2.0"
] | 13 |
2018-02-09T22:24:29.000Z
|
2018-06-18T22:33:29.000Z
|
examples/networking/tc_perf_event.py
|
Birch-san/bcc
|
b374be886b555ead8feaad9ec2d86ccd39d748dd
|
[
"Apache-2.0"
] | 5 |
2018-01-31T05:04:19.000Z
|
2018-06-12T00:45:21.000Z
|
#!/usr/bin/python
#
# tc_perf_event.py Output skb and meta data through perf event
#
# Copyright (c) 2016-present, Facebook, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
from bcc import BPF
import ctypes as ct
import pyroute2
import socket
bpf_txt = """
#include <uapi/linux/if_ether.h>
#include <uapi/linux/in6.h>
#include <uapi/linux/ipv6.h>
#include <uapi/linux/pkt_cls.h>
#include <uapi/linux/bpf.h>
BPF_PERF_OUTPUT(skb_events);
struct eth_hdr {
unsigned char h_dest[ETH_ALEN];
unsigned char h_source[ETH_ALEN];
unsigned short h_proto;
};
int handle_egress(struct __sk_buff *skb)
{
void *data = (void *)(long)skb->data;
void *data_end = (void *)(long)skb->data_end;
struct eth_hdr *eth = data;
struct ipv6hdr *ip6h = data + sizeof(*eth);
u32 magic = 0xfaceb00c;
/* single length check */
if (data + sizeof(*eth) + sizeof(*ip6h) > data_end)
return TC_ACT_OK;
if (eth->h_proto == htons(ETH_P_IPV6) &&
ip6h->nexthdr == IPPROTO_ICMPV6)
skb_events.perf_submit_skb(skb, skb->len, &magic, sizeof(magic));
return TC_ACT_OK;
}"""
def print_skb_event(cpu, data, size):
class SkbEvent(ct.Structure):
_fields_ = [ ("magic", ct.c_uint32),
("raw", ct.c_ubyte * (size - ct.sizeof(ct.c_uint32))) ]
skb_event = ct.cast(data, ct.POINTER(SkbEvent)).contents
icmp_type = int(skb_event.raw[54])
# Only print for echo request
if icmp_type == 128:
src_ip = bytes(bytearray(skb_event.raw[22:38]))
dst_ip = bytes(bytearray(skb_event.raw[38:54]))
print("%-3s %-32s %-12s 0x%08x" %
(cpu, socket.inet_ntop(socket.AF_INET6, src_ip),
socket.inet_ntop(socket.AF_INET6, dst_ip),
skb_event.magic))
try:
b = BPF(text=bpf_txt)
fn = b.load_func("handle_egress", BPF.SCHED_CLS)
ipr = pyroute2.IPRoute()
ipr.link("add", ifname="me", kind="veth", peer="you")
me = ipr.link_lookup(ifname="me")[0]
you = ipr.link_lookup(ifname="you")[0]
for idx in (me, you):
ipr.link('set', index=idx, state='up')
ipr.tc("add", "clsact", me)
ipr.tc("add-filter", "bpf", me, ":1", fd=fn.fd, name=fn.name,
parent="ffff:fff3", classid=1, direct_action=True)
b["skb_events"].open_perf_buffer(print_skb_event)
print('Try: "ping6 ff02::1%me"\n')
print("%-3s %-32s %-12s %-10s" % ("CPU", "SRC IP", "DST IP", "Magic"))
while True:
b.perf_buffer_poll()
finally:
if "me" in locals(): ipr.link("del", index=me)
| 29.337209 | 77 | 0.637337 |
4a1b761debd1f6d2085801af94d1ae58ec0a77d0
| 1,011 |
py
|
Python
|
test_project/many_to_one/alchemy.py
|
pctSW1/django-url-filter
|
42fc1c3aa71f51081cafdb5fcf39f082538eea1a
|
[
"MIT"
] | null | null | null |
test_project/many_to_one/alchemy.py
|
pctSW1/django-url-filter
|
42fc1c3aa71f51081cafdb5fcf39f082538eea1a
|
[
"MIT"
] | null | null | null |
test_project/many_to_one/alchemy.py
|
pctSW1/django-url-filter
|
42fc1c3aa71f51081cafdb5fcf39f082538eea1a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from sqlalchemy import Column, Date, Integer, String
from sqlalchemy.orm import backref, relationship
from ..alchemy import Base
class Reporter(Base):
__tablename__ = 'many_to_one_reporter'
id = Column(Integer, primary_key=True)
first_name = Column(String(30))
last_name = Column(String(30))
email = Column(String(254))
@property
def pk(self):
return self.id
class Article(Base):
__tablename__ = 'many_to_one_article'
id = Column(Integer, primary_key=True)
reporter_id = Column(Integer)
headline = Column(String(100))
pub_date = Column(Date)
reporter = relationship(
Reporter,
backref=backref('articles', uselist=True),
uselist=False,
primaryjoin='test_project.many_to_one.alchemy.Article.reporter_id == Reporter.id',
foreign_keys=reporter_id,
)
@property
def pk(self):
return self.id
| 25.275 | 90 | 0.686449 |
4a1b775180c681879ce94636b12e1198b252f79a
| 4,698 |
py
|
Python
|
tests/sn3218.py
|
cscashby/pi-showcontrol
|
2cc9b2b34ec8eeebede7609535b3c1e937b700cb
|
[
"MIT"
] | 3 |
2017-05-07T18:13:09.000Z
|
2017-08-25T09:35:26.000Z
|
tests/sn3218.py
|
cscashby/pi-showcontrol
|
2cc9b2b34ec8eeebede7609535b3c1e937b700cb
|
[
"MIT"
] | 6 |
2017-05-07T11:36:45.000Z
|
2017-07-31T15:30:20.000Z
|
tests/sn3218.py
|
cscashby/pi-showcontrol
|
2cc9b2b34ec8eeebede7609535b3c1e937b700cb
|
[
"MIT"
] | null | null | null |
import sys
try:
from smbus import SMBus
except ImportError:
if sys.version_info[0] < 3:
sys.exit("This library requires python-smbus\nInstall with: sudo apt-get install python-smbus")
elif sys.version_info[0] == 3:
sys.exit("This library requires python3-smbus\nInstall with: sudo apt-get install python3-smbus")
__version__ = '1.2.7'
I2C_ADDRESS = 0x54
CMD_ENABLE_OUTPUT = 0x00
CMD_SET_PWM_VALUES = 0x01
CMD_ENABLE_LEDS = 0x13
CMD_UPDATE = 0x16
CMD_RESET = 0x17
def i2c_bus_id():
revision = ([l[12:-1] for l in open('/proc/cpuinfo', 'r').readlines() if l[:8] == "Revision"]+['0000'])[0]
return 1 if int(revision, 16) >= 4 else 0
def enable():
"""
Enables output.
"""
i2c.write_i2c_block_data(I2C_ADDRESS, CMD_ENABLE_OUTPUT, [0x01])
def disable():
"""
Disables output.
"""
i2c.write_i2c_block_data(I2C_ADDRESS, CMD_ENABLE_OUTPUT, [0x00])
def reset():
"""
Resets all internal registers.
"""
i2c.write_i2c_block_data(I2C_ADDRESS, CMD_RESET, [0xFF])
def enable_leds(enable_mask):
"""
Enables or disables each LED channel. The first 18 bit values are
used to determine the state of each channel (1=on, 0=off) if fewer
than 18 bits are provided the remaining channels are turned off.
Args:
enable_mask (int): up to 18 bits of data
Raises:
TypeError: if enable_mask is not an integer.
"""
if type(enable_mask) is not int:
raise TypeError("enable_mask must be an integer")
i2c.write_i2c_block_data(I2C_ADDRESS, CMD_ENABLE_LEDS,
[enable_mask & 0x3F, (enable_mask >> 6) & 0x3F, (enable_mask >> 12) & 0X3F])
i2c.write_i2c_block_data(I2C_ADDRESS, CMD_UPDATE, [0xFF])
def channel_gamma(channel, gamma_table):
"""
Overrides the gamma table for a single channel.
Args:
channel (int): channel number
gamma_table (list): list of 256 gamma correction values
Raises:
TypeError: if channel is not an integer.
ValueError: if channel is not in the range 0..17.
TypeError: if gamma_table is not a list.
"""
global channel_gamma_table
if type(channel) is not int:
raise TypeError("channel must be an integer")
if channel not in range(18):
raise ValueError("channel be an integer in the range 0..17")
if type(gamma_table) is not list or len(gamma_table) != 256:
raise TypeError("gamma_table must be a list of 256 integers")
channel_gamma_table[channel] = gamma_table
def output(values):
"""
Outputs a new set of values to the driver
Args:
values (list): channel number
Raises:
TypeError: if values is not a list.
"""
if type(values) is not list or len(values) != 18:
raise TypeError("values must be a list of 18 integers")
i2c.write_i2c_block_data(I2C_ADDRESS, CMD_SET_PWM_VALUES, [channel_gamma_table[i][values[i]] for i in range(18)])
i2c.write_i2c_block_data(I2C_ADDRESS, CMD_UPDATE, [0xFF])
i2c = SMBus(i2c_bus_id())
# generate a good default gamma table
default_gamma_table = [int(pow(255, float(i - 1) / 255)) for i in range(256)]
channel_gamma_table = [default_gamma_table] * 18
enable_leds(0b111111111111111111)
if __name__ == "__main__":
print("sn3218 test cycles")
import time
import math
# enable output
enable()
enable_leds(0b111111111111111111)
print(">> test enable mask (on/off)")
enable_mask = 0b000000000000000000
output([0x10] * 18)
for i in range(10):
enable_mask = ~enable_mask
enable_leds(enable_mask)
time.sleep(0.15)
print(">> test enable mask (odd/even)")
enable_mask = 0b101010101010101010
output([0x10] * 18)
for i in range(10):
enable_mask = ~enable_mask
enable_leds(enable_mask)
time.sleep(0.15)
print(">> test enable mask (rotate)")
enable_mask = 0b100000100000100000
output([0x10] * 18)
for i in range(10):
enable_mask = ((enable_mask & 0x01) << 18) | enable_mask >> 1
enable_leds(enable_mask)
time.sleep(0.15)
print(">> test gamma gradient")
enable_mask = 0b111111111111111111
enable_leds(enable_mask)
for i in range(256):
output([((j * (256//18)) + (i * (256//18))) % 256 for j in range(18)])
time.sleep(0.01)
print(">> test gamma fade")
enable_mask = 0b111111111111111111
enable_leds(enable_mask)
for i in range(512):
output([int((math.sin(float(i)/64.0) + 1.0) * 128.0)]*18)
time.sleep(0.01)
# turn everything off and disable output
output([0 for i in range(18)])
disable()
| 28.131737 | 117 | 0.650915 |
4a1b7994d6d8eed53ab543ddfcf74b1e16aa3032
| 12,015 |
py
|
Python
|
networks/generator.py
|
Puneet-G/Impersonator-NNProject
|
980cfc260feebbc873b4150326791340f6526c42
|
[
"MIT"
] | 1 |
2020-05-11T19:10:27.000Z
|
2020-05-11T19:10:27.000Z
|
networks/generator.py
|
Puneet-G/Impersonator-NNProject
|
980cfc260feebbc873b4150326791340f6526c42
|
[
"MIT"
] | 4 |
2020-05-11T19:12:18.000Z
|
2021-10-12T22:52:12.000Z
|
networks/generator.py
|
Puneet-G/Impersonator-NNProject
|
980cfc260feebbc873b4150326791340f6526c42
|
[
"MIT"
] | 1 |
2020-05-27T01:59:41.000Z
|
2020-05-27T01:59:41.000Z
|
import torch.nn as nn
import torch.nn.functional as F
from .networks import NetworkBase
from utils.util import Swish
import torch
import ipdb
class ResidualBlock(nn.Module):
"""Residual Block."""
def __init__(self, dim_in, dim_out):
super(ResidualBlock, self).__init__()
self.main = nn.Sequential(
nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.InstanceNorm2d(dim_out, affine=True),
nn.ReLU(inplace=True), #Swish(), #nn.ReLU(inplace=True),
nn.Conv2d(dim_out, dim_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.InstanceNorm2d(dim_out, affine=True))
def forward(self, x):
return x + self.main(x)
class ResNetGenerator(NetworkBase):
"""Generator. Encoder-Decoder Architecture."""
def __init__(self, conv_dim=64, c_dim=5, repeat_num=9, k_size=4, n_down=2):
super(ResNetGenerator, self).__init__()
self._name = 'resnet_generator'
print('init: ', self._name)
layers = []
layers.append(nn.Conv2d(c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.InstanceNorm2d(conv_dim, affine=True))
layers.append(nn.ReLU(inplace=True))
# Down-Sampling
curr_dim = conv_dim
for i in range(n_down):
layers.append(nn.Conv2d(curr_dim, curr_dim*2, kernel_size=k_size, stride=2, padding=1, bias=False))
layers.append(nn.InstanceNorm2d(curr_dim*2, affine=True))
layers.append(nn.ReLU(inplace=True)) #Swish()) #nn.ReLU(inplace=True))
curr_dim = curr_dim * 2
# Bottleneck
for i in range(repeat_num):
layers.append(ResidualBlock(dim_in=curr_dim, dim_out=curr_dim))
# Up-Sampling
for i in range(n_down):
layers.append(nn.ConvTranspose2d(curr_dim, curr_dim//2, kernel_size=k_size, stride=2, padding=1,
output_padding=1, bias=False))
layers.append(nn.InstanceNorm2d(curr_dim//2, affine=True))
layers.append(nn.ReLU(inplace=True)) #Swish()) #nn.ReLU(inplace=True))
curr_dim = curr_dim // 2
layers.append(nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.Tanh())
self.model = nn.Sequential(*layers)
def forward(self, x, c=None):
if c is not None:
# replicate spatially and concatenate domain information
c = c.unsqueeze(2).unsqueeze(3)
c = c.expand(c.size(0), c.size(1), x.size(2), x.size(3))
x = torch.cat([x, c], dim=1)
return self.model(x)
class ResUnetGenerator(NetworkBase):
"""Generator. Encoder-Decoder Architecture."""
def __init__(self, conv_dim=64, c_dim=5, repeat_num=6, k_size=4, n_down=2):
super(ResUnetGenerator, self).__init__()
self._name = 'resunet_generator'
self.repeat_num = repeat_num
self.n_down = n_down
encoders = []
encoders.append(nn.Sequential(
nn.Conv2d(c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False),
nn.InstanceNorm2d(conv_dim, affine=True),
nn.ReLU(inplace=True)
))
# Down-Sampling
curr_dim = conv_dim
for i in range(n_down):
encoders.append(nn.Sequential(
nn.Conv2d(curr_dim, curr_dim*2, kernel_size=k_size, stride=2, padding=1, bias=False),
nn.InstanceNorm2d(curr_dim*2, affine=True),
nn.ReLU(inplace=True)
))
curr_dim = curr_dim * 2
self.encoders = nn.Sequential(*encoders)
# Bottleneck
resnets = []
for i in range(repeat_num):
resnets.append(ResidualBlock(dim_in=curr_dim, dim_out=curr_dim))
self.resnets = nn.Sequential(*resnets)
# Up-Sampling
decoders = []
skippers = []
for i in range(n_down):
decoders.append(nn.Sequential(
nn.ConvTranspose2d(curr_dim, curr_dim//2, kernel_size=k_size, stride=2, padding=1, output_padding=1, bias=False),
nn.InstanceNorm2d(curr_dim//2, affine=True),
nn.ReLU(inplace=True)
))
skippers.append(nn.Sequential(
nn.Conv2d(curr_dim, curr_dim//2, kernel_size=k_size, stride=1, padding=1, bias=False),
nn.InstanceNorm2d(curr_dim//2, affine=True),
nn.ReLU(inplace=True)
))
curr_dim = curr_dim // 2
self.decoders = nn.Sequential(*decoders)
self.skippers = nn.Sequential(*skippers)
layers = []
layers.append(nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.Tanh())
self.img_reg = nn.Sequential(*layers)
layers = []
layers.append(nn.Conv2d(curr_dim, 1, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.Sigmoid())
self.attetion_reg = nn.Sequential(*layers)
def inference(self, x):
# encoder, 0, 1, 2, 3 -> [256, 128, 64, 32]
encoder_outs = self.encode(x)
# resnet, 32
resnet_outs = []
src_x = encoder_outs[-1]
for i in range(self.repeat_num):
src_x = self.resnets[i](src_x)
resnet_outs.append(src_x)
return encoder_outs, resnet_outs
def forward(self, x):
# encoder, 0, 1, 2, 3 -> [256, 128, 64, 32]
encoder_outs = self.encode(x)
# resnet, 32
resnet_outs = self.resnets(encoder_outs[-1])
# decoder, 0, 1, 2 -> [64, 128, 256]
d_out = self.decode(resnet_outs, encoder_outs)
img_outs, mask_outs = self.regress(d_out)
return img_outs, mask_outs
def encode(self, x):
e_out = self.encoders[0](x)
encoder_outs = [e_out]
for i in range(1, self.n_down + 1):
e_out = self.encoders[i](e_out)
encoder_outs.append(e_out)
#print(i, e_out.shape)
return encoder_outs
def decode(self, x, encoder_outs):
d_out = x
for i in range(self.n_down):
d_out = self.decoders[i](d_out) # x * 2
skip = encoder_outs[self.n_down - 1 - i]
d_out = torch.cat([skip, d_out], dim=1)
d_out = self.skippers[i](d_out)
# print(i, d_out.shape)
return d_out
def regress(self, x):
return self.img_reg(x), self.attetion_reg(x)
class ImpersonatorGenerator(NetworkBase):
"""Generator. Encoder-Decoder Architecture."""
def __init__(self, bg_dim, src_dim, tsf_dim, conv_dim=64, repeat_num=6):
super(ImpersonatorGenerator, self).__init__()
self._name = 'impersonator_generator'
self.n_down = 3
self.repeat_num = repeat_num
# background generator
print('background generator')
self.bg_model = ResNetGenerator(conv_dim=conv_dim, c_dim=bg_dim, repeat_num=repeat_num, k_size=3, n_down=self.n_down)
# source generator
self.src_model = ResUnetGenerator(conv_dim=conv_dim, c_dim=src_dim, repeat_num=repeat_num, k_size=3, n_down=self.n_down)
# transfer generator
self.tsf_model = ResUnetGenerator(conv_dim=conv_dim, c_dim=tsf_dim, repeat_num=repeat_num, k_size=3, n_down=self.n_down)
def forward(self, bg_inputs, src_inputs, tsf_inputs, T):
img_bg = self.bg_model(bg_inputs)
src_img, src_mask, tsf_img, tsf_mask = self.infer_front(src_inputs, tsf_inputs, T)
# print(front_rgb.shape, front_mask.shape)
return img_bg, src_img, src_mask, tsf_img, tsf_mask
def encode_src(self, src_inputs):
return self.src_model.inference(src_inputs)
def infer_front(self, src_inputs, tsf_inputs, T):
# encoder
src_x = self.src_model.encoders[0](src_inputs)
tsf_x = self.tsf_model.encoders[0](tsf_inputs)
src_encoder_outs = [src_x]
tsf_encoder_outs = [tsf_x]
for i in range(1, self.n_down + 1):
src_x = self.src_model.encoders[i](src_x)
warp = self.transform(src_x, T)
tsf_x = self.tsf_model.encoders[i](tsf_x) + warp
src_encoder_outs.append(src_x)
tsf_encoder_outs.append(tsf_x)
# resnets
T_scale = self.resize_trans(src_x, T)
for i in range(self.repeat_num):
src_x = self.src_model.resnets[i](src_x)
warp = self.stn(src_x, T_scale)
tsf_x = self.tsf_model.resnets[i](tsf_x) + warp
# decoders
src_img, src_mask = self.src_model.regress(self.src_model.decode(src_x, src_encoder_outs))
tsf_img, tsf_mask = self.tsf_model.regress(self.tsf_model.decode(tsf_x, tsf_encoder_outs))
# print(front_rgb.shape, front_mask.shape)
return src_img, src_mask, tsf_img, tsf_mask
def swap(self, tsf_inputs, src_encoder_outs12, src_encoder_outs21, src_resnet_outs12, src_resnet_outs21, T12, T21):
# encoder
src_x12 = src_encoder_outs12[0]
src_x21 = src_encoder_outs21[0]
tsf_x = self.tsf_model.encoders[0](tsf_inputs)
tsf_encoder_outs = [tsf_x]
for i in range(1, self.n_down + 1):
src_x12 = src_encoder_outs12[i]
src_x21 = src_encoder_outs21[i]
warp12 = self.transform(src_x12, T12)
warp21 = self.transform(src_x21, T21)
tsf_x = self.tsf_model.encoders[i](tsf_x) + warp12 + warp21
tsf_encoder_outs.append(tsf_x)
# resnets
T_scale12 = self.resize_trans(src_x12, T12)
T_scale21 = self.resize_trans(src_x21, T21)
for i in range(self.repeat_num):
src_x12 = src_resnet_outs12[i]
src_x21 = src_resnet_outs21[i]
warp12 = self.stn(src_x12, T_scale12)
warp21 = self.stn(src_x21, T_scale21)
tsf_x = self.tsf_model.resnets[i](tsf_x) + warp12 + warp21
# decoders
tsf_img, tsf_mask = self.tsf_model.regress(self.tsf_model.decode(tsf_x, tsf_encoder_outs))
# print(front_rgb.shape, front_mask.shape)
return tsf_img, tsf_mask
def inference(self, src_encoder_outs, src_resnet_outs, tsf_inputs, T):
# encoder
src_x = src_encoder_outs[0]
tsf_x = self.tsf_model.encoders[0](tsf_inputs)
tsf_encoder_outs = [tsf_x]
for i in range(1, self.n_down + 1):
src_x = src_encoder_outs[i]
warp = self.transform(src_x, T)
tsf_x = self.tsf_model.encoders[i](tsf_x) + warp
tsf_encoder_outs.append(tsf_x)
# resnets
T_scale = self.resize_trans(src_x, T)
for i in range(self.repeat_num):
src_x = src_resnet_outs[i]
warp = self.stn(src_x, T_scale)
tsf_x = self.tsf_model.resnets[i](tsf_x) + warp
# decoders
tsf_img, tsf_mask = self.tsf_model.regress(self.tsf_model.decode(tsf_x, tsf_encoder_outs))
# print(front_rgb.shape, front_mask.shape)
return tsf_img, tsf_mask
def resize_trans(self, x, T):
_, _, h, w = x.shape
T_scale = T.permute(0, 3, 1, 2) # (bs, 2, h, w)
T_scale = F.interpolate(T_scale, size=(h, w), mode='bilinear', align_corners=True)
T_scale = T_scale.permute(0, 2, 3, 1) # (bs, h, w, 2)
return T_scale
def stn(self, x, T):
x_trans = F.grid_sample(x, T)
return x_trans
def transform(self, x, T):
T_scale = self.resize_trans(x, T)
x_trans = self.stn(x, T_scale)
return x_trans
if __name__ == '__main__':
imitator = ImpersonatorGenerator(bg_dim=4, src_dim=6, tsf_dim=6, conv_dim=64, repeat_num=6)
bg_x = torch.rand(2, 4, 256, 256)
src_x = torch.rand(2, 6, 256, 256)
tsf_x = torch.rand(2, 6, 256, 256)
T = torch.rand(2, 256, 256, 2)
img_bg, src_img, src_mask, tsf_img, tsf_mask = imitator(bg_x, src_x, tsf_x, T)
ipdb.set_trace()
| 35.758929 | 129 | 0.612318 |
4a1b79ba6bbbe35d7b76127bdea690a6c98b8443
| 32,210 |
py
|
Python
|
effmass/inputs.py
|
musicmrman99/effmass
|
77e75e0cb5688bdcd2e3cb170d47300ef04cec06
|
[
"MIT"
] | 34 |
2018-10-05T14:52:42.000Z
|
2022-03-29T14:23:11.000Z
|
effmass/inputs.py
|
musicmrman99/effmass
|
77e75e0cb5688bdcd2e3cb170d47300ef04cec06
|
[
"MIT"
] | 43 |
2018-07-06T16:31:27.000Z
|
2022-03-29T11:00:31.000Z
|
effmass/inputs.py
|
musicmrman99/effmass
|
77e75e0cb5688bdcd2e3cb170d47300ef04cec06
|
[
"MIT"
] | 31 |
2018-06-26T12:10:00.000Z
|
2022-03-29T14:23:12.000Z
|
#! /usr/bin/env python3
"""
A module for storing electronic structure data and user settings. Currently supported codes are VASP and FHI-Aims (with limited functionality).
The module contains a :class:`Data` class which parses OUTCAR and PROCAR files using the `vasppy <https://github.com/bjmorgan/vasppy>`_ package.
A function for parsing DOSCAR files is also provided.
The module contains a :class:`DataAims` class which parses and stores the `geometry.in`/`calculation.out` files generated for/from a FHI-AIMS calculation.
The module contains a :class:`DataOctopus` class which parses and stores the `bandstructure`, `eigenvalues`, `info`, and `results.out` files generated by the Octopus DFT software.
A :class:`Settings` class stores analysis parameters set by the user.
"""
from octopuspy import bandstructure, info, results
from vasppy import procar, outcar
from effmass import extrema
from ase.calculators.castep import Castep
from ase import io
import ase.io
import math
import warnings
import numpy as np
from pymatgen.io.vasp.outputs import BSVasprun
from pymatgen.electronic_structure.bandstructure import get_reconstructed_band_structure
import os
class Settings:
"""Class for setting analysis parameters.
Attributes: energy_range (float): energy in kT over which the
segment extends. extrema_search_depth (float): energy in kT from
bandedge over which to search for extrema. degree_bandfit (int):
the degree of the polynomial which is used to fit to dispersion data
when calculating the transport mass.
"""
def __init__(self,
energy_range=0.25,
extrema_search_depth=0.025,
bandfit=6):
"""Initialises an instance of the Settings class and checks input using
:meth:`check_settings()`.
Args:
energy_range (float): energy in eV over which the segment extends. Defaults to 0.25 eV.
extrema_search_depth (float): energy in eV from bandedge over which to search for extrema. Defaults to 0.025 eV.
degree_bandfit (int): the degree of the polynomial which is used to fit to dispersion data when calculating the transport mass.
Returns:
None.
"""
self.energy_range = energy_range
self.extrema_search_depth = extrema_search_depth
self.degree_bandfit = bandfit
self.check_settings()
def check_settings(self):
"""Check that Settings class attributes are sane.
Args:
None.
Returns:
None.
"""
assert (self.energy_range >
0), "The energy range must be a positive number"
assert (self.extrema_search_depth >
0), "The energy depth must be a positive number"
assert (
type(self.degree_bandfit) == int and self.degree_bandfit > 1
), "The bandfit degree must be a positive integer greater than 1"
class Data():
r"""Parent class for parsing and storing data from bandstructure calculations. Contains a :meth:`check_data` method for basic checks on bandstructure data.
Attributes:
spin_channels (int): 1 (non-spin-polarised), 2 (spin-polarised), 4 (spin-orbit coupling).
number_of_kpoints (int): the number of k-points per band.
number_of_bands (int): the number of bands.
kpoints (array(float)): 2-dimensional array with shape (number_of_kpoints, 3). Each row contains the fractional coordinates of a kpoint [kx,ky,kz].
energies (array(float)): 2-dimensional array with shape (number_of_bands,number_of_kpoints). Each row contains energies of eigenstates in eV for a particular band.
occupancy (array(float)): 2-dimensional array with shape (number_of_bands,number_of_kpoints). Each row contains occupation number of the eigenstates for a particular band. Values range from 0-1 (spin-polarised) or 0-2 (non-spin-polarised).
reciprocal_lattice (list(float)): the reciprocal lattice vectors in format [[x1,y1,z1],[x2,y2,z2],[x3,y3,z3]], units Angstrom :math:`^{-1}`.
CBM (float): the conduction band minimum energy in eV.
VBM (float): the valence band maximum in eV.
fermi_energy (float): the fermi energy in eV."""
def __init__(self):
r"""
Initialises an instance of the :class:`~effmass.inputs.Data` class. All attributes are None until set by the derived class.
Args:
None.
Returns:
None.
"""
self.spin_channels = None
self.number_of_bands = None
self.number_of_kpoints = None
self.energies = None
self.occupancy = None
self.kpoints = None
self.fermi_energy = None
self.reciprocal_lattice = None
self.CBM = None
self.VBM = None
def check_data(self, spin_channels, number_of_kpoints, number_of_bands, CBM,
VBM, fermi_energy, occupancy):
"""Check that Data class attributes make basic sense.
Args:
None.
Returns:
None.
Notes:
There is a similar method that runs automatically when reading data in using the `vasppy.procar <http://vasppy.readthedocs.io/en/latest/vasppy.html#module-vasppy.procar>`_ module.
"""
assert (
((spin_channels == 1) | (spin_channels == 2) |
(spin_channels == 4)) is True
), "Spin channels must have value 1 (non spin-polarised) or 2 (spin-polarised)"
assert (type(number_of_kpoints) == int
and number_of_kpoints > 0
), "The number of kpoints is not a positive integer"
assert (type(number_of_bands) == int and number_of_bands > 0
), "The number of bands is not a positive integer"
assert (CBM >
VBM), "The CBM energy is lower than than the VBM energy"
if fermi_energy < VBM:
warnings.warn("The fermi energy is lower than the VBM")
if fermi_energy > CBM:
warnings.warn("The fermi energy is higher than the CBM")
if occupancy is not None:
if ((occupancy == 0) | (occupancy == 1) |
(occupancy == 2)).all() is False:
warnings.warn("You have partial occupancy of bands")
def find_cbm_vbm(self):
self.CBM, self.VBM = extrema.calc_CBM_VBM_from_Fermi(self,CBMVBM_search_depth=4.0)
class DataASE(Data):
r"""
Class for interfacing with the ASE bandstructure object. Inherits attributes and methods from the :class:`~effmass.inputs.Data` class, and extends
with a method for inferring the CBM/VBM from Fermi level.
Note: DataASE.fermi_energy is taken from the seedname.out file.
Note: The DataASE class does not parse eigenstate occupancy data. The Fermi energy will \
be used to infer which bands are occupied (below the fermi energy) and which are unoccupied (above \
the fermi energy). You should independently confirm that the fermi energy is in the band gap of \
your material. Note that you can manually set the `fermi_energy` attribute and find the CBM and VBM using the method `find_cbm_vbm`. ")
"""
def __init__(self, bs, atoms):
r"""
Initialises an instance of the :class:`~effmass.inputs.DataASE` class and infers which bands are occupied and unoccupied from the fermi level.
Args:
bs (ase.spectrum.band_structure.BandStructure): An instance of the ase.spectrum.band_structure.BandStructure object.
Returns:
None.
"""
warnings.warn("The DataASE class does not parse eigenstate occupancy data. The Fermi energy will \
be used to infer which bands are occupied (below the fermi energy) and which are unoccupied (above \
the fermi energy). You should independently confirm that the fermi energy is in the band gap of \
your material. Note that you can manually set the DataASE.fermi_energy attribute and then re-find the CBM and VBM using the method `DataASE.find_cbm_vbm`. ")
super().__init__()
self.spin_channels = bs.energies.shape[0]
self.number_of_kpoints = bs.energies.shape[1]
self.number_of_bands = bs.energies.shape[2]*bs.energies.shape[0]
self.energies = bs.energies.transpose(1,0,2).reshape(self.number_of_kpoints,-1).transpose()
self.kpoints = bs.path.kpts
self.reciprocal_lattice = atoms.cell.reciprocal()*2*math.pi
self.fermi_energy = bs.reference
self.find_cbm_vbm()
self.check_data(self.spin_channels, self.number_of_kpoints, self.number_of_bands, self.CBM,
self.VBM, self.fermi_energy, self.occupancy)
class DataCastep(DataASE):
r"""Class for parsing and storing data from a Castep bandstructure calculation. Inherits attributes and methods from the :class:`~effmass.inputs.DataASE` class."""
def __init__(self,directory_path,seedname):
r"""
Initialises an instance of the :class:`~effmass.inputs.DataCastep` class.
Args:
directory_path (str): The path to a directory containing seedname.cell, seedname.out and seedname.bands
seedname (str): The name (without suffix) of the input and output files
Returns:
None.
"""
Castep_calculator = Castep(directory_path)
Castep_calculator.atoms = io.read(directory_path+"./"+seedname+".cell", format='castep-cell')
ASE_bandstructure = Castep_calculator.band_structure(directory_path+"./"+seedname+".bands")
ASE_atoms = Castep_calculator.atoms
super().__init__(ASE_bandstructure, ASE_atoms)
# class DataQE(DataASE):
# r"""Class for parsing and storing data from a Quantum Espresso bandstructure calculation. Inherits attributes and methods from the :class:`~effmass.inputs.DataASE` class."""
# def __init__(self,directory_path,seedname):
# r"""
# Initialises an instance of the :class:`~effmass.inputs.DataQE` class.
# Args:
# Returns:
# None.
# """
# QE_calculator = ase.calculators.espresso.Espresso()
# QE_calculator.atoms = ase.io.espresso.read_espresso_out()
# ASE_bandstructure = QE_calculator.band_structure()
# super().__init__(self, ASE_bandstructure)
class DataVasprun(Data):
r"""
Class for parsing and storing data from a VASP calculation using vasprun.xml.
Works for parsing calculations with split k-point paths
Note: occupancies are set to 0 below fermi level and 1 above it
"""
def __init__(self, path):
r"""
Initialises an instance of the :class:`~effmass.inputs.Data` class and
checks data using :meth:`check_data`.
Args:
path (str): Path to vasprun.xml. If the calculation was split along
the k-path, the path should be to the folder which contains the
splits. i.e. for mapi/split-01/vasprun.xml, mapi/split-02/vasprun.xml
you would specify path=mapi
Returns:
None.
"""
super().__init__()
# read in vasprun
if path.endswith('vasprun.xml'):
if os.path.exists(path):
vr = BSVasprun(path)
bs = vr.get_band_structure(line_mode=True)
# read in vaspruns from multiple splits, parse_potcar is false because
# it generates useless warnings, parse_projected is false because we
# don't need projected eigenstates
else:
filenames = []
for fol in sorted(os.listdir(path)):
vr_file = os.path.join(path, fol, "vasprun.xml")
if os.path.exists(vr_file):
filenames.append(vr_file)
bandstructures = []
for vr_file in filenames:
vr = BSVasprun(vr_file, parse_projected_eigen=False,
parse_potcar_file=False)
bs = vr.get_band_structure(line_mode=True)
bandstructures.append(bs)
bs = get_reconstructed_band_structure(bandstructures)
bs_dict = bs.as_dict()
# set occupancies below fermi as 0, above fermi as 1
occupancy = np.array(bs_dict['bands']['1'])
occupancy[occupancy < bs_dict['efermi']] = 1
occupancy[occupancy > bs_dict['efermi']] = 0
# set spin channels
spin = 2 if bs_dict['is_spin_polarized'] else 1
self.spin_channels = spin
self.number_of_bands = len(bs_dict['bands']['1'])
self.number_of_kpoints = len(bs_dict['kpoints'])
self.energies = np.array(bs_dict['bands']['1'])
self.occupancy = occupancy
self.kpoints = np.array(bs_dict['kpoints'])
self.fermi_energy = bs_dict['efermi']
self.reciprocal_lattice = bs_dict['lattice_rec']['matrix']
self.CBM = bs_dict['cbm']['energy']
self.VBM = bs_dict['vbm']['energy']
class DataVasp(Data):
r"""
Class for parsing and storing data from a vasp calculation. Extends the :class:`~effmass.inputs.Data` class to include support for analysing DOSCAR data"
Additional attributes:
dos (array): 2-dimensional array. Each row contains density of states data (units "number of states / unit cell") at a given energy: [energy(float),dos(float)].
integrated_dos: 2-dimensional array. Each row contains integrated density of states data at a given energy: [energy(float),integrated_dos(float)].
Note: DataVasp.fermi_energy is automatically set to the mean of DataVasp.CBM and DataVasp.VBM.
"""
def __init__(self, outcar_path, procar_path, ignore=0, **kwargs):
r"""
Initialises an instance of the :class:`~effmass.inputs.Data` class and checks data using :meth:`check_data`.
Args:
outcar_path (str): The path to the OUTCAR file
procar_path (:obj:`str` or :obj:`list`): The path(s) to one or more PROCAR files.
ignore (int): The number of kpoints to ignore at the beginning of the bandstructure slice through kspace (useful for hybrid calculations where zero weightings are appended to a previous self-consistent calculation).
**kwargs: Additional keyword arguments for reading the PROCAR file(s).
Returns:
None.
"""
super().__init__()
assert (type(outcar_path) == str), "The OUTCAR path must be a string"
assert (type(ignore) == int and ignore >= 0
), "The number of kpoints to ignore must be a positive integer"
reciprocal_lattice = outcar.reciprocal_lattice_from_outcar(outcar_path)
if isinstance(procar_path, list):
vasp_data = procar.Procar.from_files(procar_path, **kwargs)
elif isinstance(procar_path, str):
vasp_data = procar.Procar.from_file(procar_path, **kwargs)
else:
raise TypeError('procar_path must be a string or list of strings')
self.spin_channels = vasp_data.spin_channels
self.number_of_bands = vasp_data.number_of_bands
number_of_kpoints = vasp_data.number_of_k_points
vasp_data_energies = np.array( [ band.energy for band in np.ravel( vasp_data.bands ) ] )
vasp_data_occupancies = np.array( [ band.occupancy for band in np.ravel( vasp_data.bands ) ] )
if vasp_data.calculation['spin_polarised']: # to account for the change in PROCAR format for calculations with 2 spin channels (1 k-point block ---> 2 k-point blocks)
energies = np.zeros([self.number_of_bands*2,number_of_kpoints]) # This is a very ugly way to slice 'n' dice. Should avoid creating new array and use array methods instead. But it does the job so will keep for now.
for i in range(self.number_of_bands):
energies[i] = vasp_data_energies.reshape(
number_of_kpoints*2, # factor of 2 for each kpoint block
self.number_of_bands).T[i][:number_of_kpoints]
energies[self.number_of_bands+i] = vasp_data_energies.reshape(
number_of_kpoints*2,
self.number_of_bands).T[i][number_of_kpoints:]
occupancy = np.zeros([self.number_of_bands*2,number_of_kpoints])
for i in range(self.number_of_bands):
occupancy[i] = vasp_data_occupancies.reshape(
number_of_kpoints*2,
self.number_of_bands).T[i][:number_of_kpoints]
occupancy[self.number_of_bands+i] = vasp_data_occupancies.reshape(
number_of_kpoints*2,
self.number_of_bands).T[i][number_of_kpoints:]
else:
energies = vasp_data_energies.reshape(
number_of_kpoints,
self.number_of_bands).T
occupancy = vasp_data_occupancies.reshape(
number_of_kpoints,
self.number_of_bands).T
# remove values which are from the self-consistent calculation prior to the bandstructure calculation (workflow for hybrid functionals)
self.energies = np.delete(energies,list(range(ignore)),1)
self.occupancy = np.delete(occupancy,list(range(ignore)),1)
self.number_of_kpoints = number_of_kpoints - ignore
# handle negative occupancy values
if np.any(self.occupancy < 0):
warnings.warn("One or more occupancies in your PROCAR file are negative. All negative occupancies will be set to zero.")
self.occupancy[ self.occupancy < 0 ] = 0.0
self.kpoints = np.array( [ kp.frac_coords
for kp in vasp_data.k_points[ignore:vasp_data.number_of_k_points] ] )
self.reciprocal_lattice = reciprocal_lattice * 2 * math.pi
self.CBM = extrema._calc_CBM(self.occupancy, self.energies)
self.VBM = extrema._calc_VBM(self.occupancy, self.energies)
self.fermi_energy = (self.CBM + self.VBM) / 2
self.dos = []
self.integrated_dos = []
self.check_data(self.spin_channels, self.number_of_kpoints, self.number_of_bands,
self.CBM, self.VBM, self.fermi_energy, self.occupancy)
def parse_DOSCAR(self, filename='./DOSCAR'):
"""Parses the DOS and integrated DOS from a vasp DOSCAR file.
Args:
filename (str, optional): The location and filename of the DOSCAR to read in. Defaults to `'./DOSCAR'`.
Returns:
None.
Notes:
If the DOS has been sampled at more than 10000 points then this function will break at the expression for `num_data_points`.
In this case, edit your DOSCAR file so that in the header there is a space preceding the number of points.
"""
with open(filename, 'r') as f:
lines = f.readlines()
num_data_points = int(lines[5].split()[2])
if len(lines[6].split()) == 5:
self.dos = np.array([[
float(x.split()[0]),
float(x.split()[1]) + float(x.split()[2])
] for x in lines[6:num_data_points + 6]])
self.integrated_dos = np.array([[
float(x.split()[0]),
float(x.split()[3]) + float(x.split()[4])
] for x in lines[6:num_data_points + 6]])
elif len(lines[6].split()) == 3:
self.dos = np.array([[float(x.split()[0]),
float(x.split()[1])]
for x in lines[6:num_data_points + 6]])
self.integrated_dos = np.array(
[[float(x.split()[0]),
float(x.split()[2])] for x in lines[6:num_data_points + 6]])
else:
print("problem parsing DOSCAR")
return
class DataAims(Data):
r"""
Class for parsing and storing data from a FHI-AIMS calculation.
Attributes:
spin_channels (int): 1 (non-spin-polarised), 2 (spin-polarised), 4 (spin-orbit coupling).
number_of_kpoints (int): the number of k-points per band.
number_of_bands (int): the number of bands.
kpoints (array(float)): 2-dimensional array with shape (number_of_kpoints, 3). Each row contains the fractional coordinates of a kpoint [kx,ky,kz].
energies (array(float)): 2-dimensional array with shape (number_of_bands,number_of_kpoints). Each row contains energies of eigenstates in eV for a particular band.
occupancy (array(float)): 2-dimensional array with shape (number_of_bands,number_of_kpoints). Each row contains occupation number of the eigenstates for a particular band. Values range from 0-1 (spin-polarised) or 0-2 (non-spin-polarised).
reciprocal_lattice (list(float)): the reciprocal lattice vectors in format [[x1,y1,z1],[x2,y2,z2],[x3,y3,z3]], units Angstrom :math:`^{-1}`.
CBM (float): the conduction band minimum energy in eV.
VBM (float): the valence band maximum in eV.
fermi_energy (float): the fermi energy in eV. Automatically set to the mean of Data.CBM and Data.VBM.
"""
def __init__(self, directory_path, output_name='calculation.out'):
r"""
Initialises an instance of the :class:`~effmass.inputs.DataAims` class and checks data using :meth:`check_data`.
Args:
directory_path (str): The path to the directory containing output, geometry.in, control.in and bandstructure files
output_name (str): Name of the output file - contrary to the rest of the files, this is chosen by the user during an Aims run. Defaults to 'aims.out'.
Returns:
None.
"""
super().__init__()
assert (type(directory_path) == str), "The file path must be a string"
"Finding reciprocal lattice vectors"
latvec = []
for line in open("{}/geometry.in".format(directory_path)):
line = line.split("\t")[0]
words = line.split()
if len(words) == 0:
continue
if words[0] == "lattice_vector":
if len(words) != 4:
raise Exception("geometry.in: Syntax error in line '"+line+"'")
latvec.append(np.array(words[1:4]))
if len(latvec) != 3:
raise Exception("geometry.in: Must contain exactly 3 lattice vectors")
latvec = np.asarray(latvec)
latvec = latvec.astype(np.float)
#Calculate reciprocal lattice vectors
rlatvec = []
volume = (np.dot(latvec[0,:],np.cross(latvec[1,:],latvec[2,:])))
rlatvec.append(np.array(2*math.pi*np.cross(latvec[1,:],latvec[2,:])/volume))
rlatvec.append(np.array(2*math.pi*np.cross(latvec[2,:],latvec[0,:])/volume))
rlatvec.append(np.array(2*math.pi*np.cross(latvec[0,:],latvec[1,:])/volume))
reciprocal_lattice = np.asarray(rlatvec)
self.reciprocal_lattice = reciprocal_lattice
"Finding spin channels"
spin_channels = 0
for line in open("{}/{}".format(directory_path, output_name)):
line = line.split("\t")[0]
if "include_spin_orbit" in line:
spin_channels = 4
break
elif "Number of spin channels" in line:
words = line.split()
spin_channels = int(words[-1])
break
self.spin_channels = spin_channels
"Finding number of bands"
number_of_bands = 0
for line in open("{}/{}".format(directory_path, output_name)):
line = line.split("\t")[0]
if "Number of Kohn-Sham" in line:
words = line.split()
number_of_bands = int(words[-1])
break
if spin_channels == 2 or spin_channels == 4: #Doubling for spin-polarised calculation
number_of_bands = 2*number_of_bands
self.number_of_bands = number_of_bands
"Finding number of kpoints and determining number of BZ paths"
number_of_kpoints = 0
number_of_BZ_paths = 0
path_list = []
for line in open("{}/{}".format(directory_path, output_name)):
line = line.split("\n")[0]
if not line.startswith("#") and "output" in line:
if "band" in line:
words = line.split()
if words[0]=="output" and words[1]=="band":
path_list.append(int(words[8]))
number_of_BZ_paths += 1
number_of_kpoints = sum(path_list)
"Reading out bandstructure files to determine kpoint, energy and occupation matrices"
kpoints = np.zeros([number_of_kpoints,3])
energies = np.zeros([number_of_bands,number_of_kpoints])
occupancy = np.zeros([number_of_bands,number_of_kpoints])
path_counter = 0
if spin_channels == 1 or spin_channels == 4:
kpoint_counter = 0
while path_counter<number_of_BZ_paths:
kpoint_counter = sum(path_list[:path_counter])
for line in open("{}/band1{:03d}.out".format(directory_path, path_counter+1)):
line = line.split("\t")[0]
words = line.split()
kpoints[int(kpoint_counter),0] = float(words[1])
kpoints[int(kpoint_counter),1] = float(words[2])
kpoints[int(kpoint_counter),2] = float(words[3])
for i in range(number_of_bands):
energies[i,int(kpoint_counter)] = float(words[5+2*i])
occupancy[i,int(kpoint_counter)] = float(words[4+2*i])
kpoint_counter += 1
path_counter +=1
if spin_channels == 2:
while path_counter<number_of_BZ_paths:
kpoint_counter = int(sum(path_list[:path_counter]))
for line in open("{}/band1{:03d}.out".format(directory_path, path_counter+1)):
line = line.split("\t")[0]
words = line.split()
kpoints[int(kpoint_counter),0] = float(words[1])
kpoints[int(kpoint_counter),1] = float(words[2])
kpoints[int(kpoint_counter),2] = float(words[3])
for i in range(number_of_bands//2):
energies[i,int(kpoint_counter)] = float(words[5+2*i])
occupancy[i,int(kpoint_counter)] = float(words[4+2*i])
kpoint_counter += 1
kpoint_counter = int(sum(path_list[:path_counter]))
for line in open("{}/band2{:03d}.out".format(directory_path, path_counter+1)):
line = line.split("\t")[0]
words = line.split()
for i in range(number_of_bands//2):
energies[number_of_bands//2+i,kpoint_counter] = float(words[5+2*i])
occupancy[number_of_bands//2+i,kpoint_counter] = float(words[4+2*i])
kpoint_counter += 1
path_counter += 1
"Delete double kpoints at path edges"
index_count = len(kpoints)
index = 0
while index < index_count-1:
if np.array_equal(kpoints[index],kpoints[index+1]):
kpoints = np.delete(kpoints,index+1,axis=0)
energies = np.delete(energies,index+1,axis=1)
occupancy = np.delete(occupancy,index+1,axis=1)
index_count = len(kpoints)
index += 1
self.number_of_kpoints = len(kpoints)
self.CBM = extrema._calc_CBM(occupancy, energies)
self.VBM = extrema._calc_VBM(occupancy, energies)
self.fermi_energy = (self.CBM + self.VBM) / 2
"Cutting energy values in a range of 30 eV above and below the Fermi level. FHI AIMS is all electron, but not all states are needed for a meaningful effmass calculation"
index_count = len(occupancy)
index = 0
while index < index_count-1:
if all(item < self.fermi_energy - 30 for item in energies[index]):
energies = np.delete(energies, index, axis = 0)
occupancy = np.delete(occupancy, index, axis = 0)
index_count = len(occupancy)
elif all(item > self.fermi_energy + 30 for item in energies[index]):
energies = np.delete(energies, index, axis = 0)
occupancy = np.delete(occupancy, index, axis = 0)
index_count = len(occupancy)
else:
index += 1
self.energies = energies
self.occupancy = occupancy
self.kpoints = kpoints
self.check_data(self.spin_channels, self.number_of_kpoints, self.number_of_bands,
self.CBM, self.VBM, self.fermi_energy, self.occupancy)
class DataOctopus(Data):
r"""
Class for parsing and storing data from a octopus calculation. Extends the :class:`~effmass.inputs.Data` class"
Note: DataOctopus.fermi_energy is automatically set to the mean of DataOctopus.CBM and DataOctopus.VBM.
"""
def __init__(self, bandstructure_path, info_path, results_path):
r"""
Initialises an instance of the :class:`~effmass.inputs.Data` class and checks data using :meth:`check_data`.
Args:
bandStructure_path (str): The path to the bandstructure file.
info_path (str): The path to the info file.
results_path (str): The path to the results.out file.
Returns:
None.
"""
super().__init__()
assert (type(bandstructure_path) == str), "The bandStructure path must be a string"
assert (type(info_path) == str), "The info path must be a string"
assert (type(results_path) == str), "The results path must be a string"
# load the Octopus data
band_struct = bandstructure.Bandstructure(bandstructure_path)
self.number_of_kpoints = band_struct.num_kpoints
info_data = info.Info(info_path)
results_data = results.Results(results_path, self.number_of_kpoints)
# unpack the reciprocal lattice vector
lattice_vectors = info_data.get_lattice_vectors()
lattice_vector, reciprocal_lattice = zip(*lattice_vectors)
reciprocal_lattice_split = [lattice.split() for lattice in reciprocal_lattice]
self.reciprocal_lattice = [[float(v) for v in vector] for vector in reciprocal_lattice_split]
self.spin_channels = 1
self.number_of_bands = band_struct.num_bands
# load energies and occupancies from bandstructure file
octo_data_energies, octo_data_occupancies = band_struct.get_eigenvalues()
# change to shape (num kpoints, num bands)
self.energies = octo_data_energies.T
self.occupancy = octo_data_occupancies.T
# handle negative occupancy values
if np.any(self.occupancy < 0):
warnings.warn("One or more occupancies in your data are negative. All negative occupancies will be set to zero.")
self.occupancy[ self.occupancy < 0 ] = 0.0
# load kpoints from bandstructure file and into an numpy array
kpoints = band_struct.kpoints
kx, ky, kz = zip(*kpoints)
kpoints = np.array([kx, ky, kz])
# change to shape (number_of_kpoints, 3)
self.kpoints = kpoints.T
self.CBM = extrema._calc_CBM(self.occupancy, self.energies)
self.VBM = extrema._calc_VBM(self.occupancy, self.energies)
self.fermi_energy = (self.CBM + self.VBM) / 2
self.check_data(self.spin_channels, self.number_of_kpoints, self.number_of_bands,
self.CBM, self.VBM, self.fermi_energy, self.occupancy)
| 45.238764 | 251 | 0.620863 |
4a1b79d601680ac93f9dbf8a0d0049c576ea0c96
| 520 |
py
|
Python
|
saleor/lib/python3.7/site-packages/measurement/measures/__init__.py
|
cxsper/saleor
|
5566ddcdaf8f72ba872eca869798e66eb9cdae44
|
[
"BSD-3-Clause"
] | 1 |
2019-07-18T13:16:09.000Z
|
2019-07-18T13:16:09.000Z
|
saleor/lib/python3.7/site-packages/measurement/measures/__init__.py
|
cxsper/saleor
|
5566ddcdaf8f72ba872eca869798e66eb9cdae44
|
[
"BSD-3-Clause"
] | 13 |
2020-03-24T17:53:51.000Z
|
2022-02-10T20:01:14.000Z
|
saleor/lib/python3.7/site-packages/measurement/measures/__init__.py
|
cxsper/saleor
|
5566ddcdaf8f72ba872eca869798e66eb9cdae44
|
[
"BSD-3-Clause"
] | 1 |
2019-01-21T10:30:30.000Z
|
2019-01-21T10:30:30.000Z
|
from measurement.measures.distance import *
from measurement.measures.energy import *
from measurement.measures.temperature import *
from measurement.measures.volume import *
from measurement.measures.mass import *
from measurement.measures.speed import *
from measurement.measures.time import *
from measurement.measures.voltage import *
from measurement.measures.resistance import *
from measurement.measures.capacitance import *
from measurement.measures.frequency import *
from measurement.measures.current import *
| 40 | 46 | 0.838462 |
4a1b7a5043849bab051e9613aeb0af2df05218cb
| 335 |
py
|
Python
|
chat/views.py
|
tilcara/ChatProject
|
61ad0fcf38601c063ff7cb714f8f11ec671dae21
|
[
"MIT",
"Unlicense"
] | null | null | null |
chat/views.py
|
tilcara/ChatProject
|
61ad0fcf38601c063ff7cb714f8f11ec671dae21
|
[
"MIT",
"Unlicense"
] | 6 |
2021-03-19T01:12:03.000Z
|
2021-09-22T18:48:05.000Z
|
chat/views.py
|
tilcara/ChatProject
|
61ad0fcf38601c063ff7cb714f8f11ec671dae21
|
[
"MIT",
"Unlicense"
] | null | null | null |
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from .models import Message
@login_required()
def room(request):
messages = list(Message.objects.values().order_by('-timestamp')[:10])
context = {
'messages': messages
}
return render(request, 'chat/room.html', context)
| 30.454545 | 73 | 0.731343 |
4a1b7a597efc83d0b419e7fa388c5b28f77cfac4
| 989 |
py
|
Python
|
solutions/03.py
|
brocki/adventofcode2021
|
d54acdd1a25fd78211f4a45cdb7f96009c7faa4f
|
[
"MIT"
] | null | null | null |
solutions/03.py
|
brocki/adventofcode2021
|
d54acdd1a25fd78211f4a45cdb7f96009c7faa4f
|
[
"MIT"
] | null | null | null |
solutions/03.py
|
brocki/adventofcode2021
|
d54acdd1a25fd78211f4a45cdb7f96009c7faa4f
|
[
"MIT"
] | null | null | null |
def evaluate(inputs: list) -> int:
tmp = find_most_common(inputs)
output = ""
for char in tmp:
if char > 0:
output += "1"
else:
output += "0"
bitmask = int("".ljust(len(tmp), "1"), 2)
gamma = int(output, 2)
epsilon = int(format(~gamma & bitmask, "0"+str(len(tmp))+"b"), 2)
power = gamma * epsilon
return {'gamma': gamma, 'gamma_bin': output, 'epsilon': epsilon, 'epsilon_bin': format(~gamma & 0b11111, "0"+str(len(tmp))+"b"), 'power': power}
def find_most_common(lines: list) -> list:
tmp = []
for char in lines[0]:
tmp.append(0)
for line in lines:
for idx, char in enumerate(line):
if char == "1":
tmp[idx] += 1
else:
tmp[idx] += -1
return tmp
def main():
inputs = []
while True:
try:
input_ = input("")
if input_ == "":
break
inputs.append(input_)
except EOFError:
break
print(evaluate(inputs))
if __name__ == '__main__':
main()
| 21.042553 | 146 | 0.548028 |
4a1b7a657eed48003bbc8a2a9529f61006932e72
| 1,519 |
py
|
Python
|
setup.py
|
caserec2018/CaseRecommender
|
1b63fe79aa26786c99f35e6b8f0a0dd9e591811b
|
[
"MIT"
] | 16 |
2018-09-19T07:29:24.000Z
|
2022-03-30T07:32:36.000Z
|
setup.py
|
caserec2018/CaseRecommender
|
1b63fe79aa26786c99f35e6b8f0a0dd9e591811b
|
[
"MIT"
] | 1 |
2018-09-10T17:43:56.000Z
|
2018-09-10T17:43:56.000Z
|
setup.py
|
caserec2018/CaseRecommender
|
1b63fe79aa26786c99f35e6b8f0a0dd9e591811b
|
[
"MIT"
] | 2 |
2019-07-11T10:13:24.000Z
|
2020-03-12T10:09:39.000Z
|
""""
Setup for Case Recommender
"""
# © 2018. Case Recommender (MIT License)
from distutils.core import setup
from setuptools import find_packages
from os import path
here = path.abspath(path.dirname(__file__))
__author__ = 'removed for double blind review'
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='CaseRecommender',
packages=find_packages(),
version='1.0.13',
license='MIT License',
description='A recommender systems framework for Python',
long_description=long_description,
author='removed for double blind review',
author_email='fortes.arthur@gmail.com',
url='https://github.com/caserec/CaseRecommender',
download_url='https://github.com/caserec/CaseRecommender/archive/master.zip',
keywords=['recommender systems', 'framework', 'collaborative filtering', 'content-based filtering',
'recommendation'],
classifiers=[
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
],
)
| 29.211538 | 103 | 0.669519 |
4a1b7bf4cdb2745aa22cff35f2a58bdf417a83de
| 3,340 |
py
|
Python
|
test/annotation_ontology_api_server_test.py
|
kbaseapps/annotation_ontology_api
|
4b3b268524c31446926aa276ff2b09cc9cf99625
|
[
"MIT"
] | null | null | null |
test/annotation_ontology_api_server_test.py
|
kbaseapps/annotation_ontology_api
|
4b3b268524c31446926aa276ff2b09cc9cf99625
|
[
"MIT"
] | 1 |
2022-03-14T22:28:16.000Z
|
2022-03-15T02:34:19.000Z
|
test/annotation_ontology_api_server_test.py
|
kbaseapps/annotation_ontology_api
|
4b3b268524c31446926aa276ff2b09cc9cf99625
|
[
"MIT"
] | 2 |
2021-02-18T02:08:53.000Z
|
2022-03-09T17:37:18.000Z
|
# -*- coding: utf-8 -*-
import unittest
import os # noqa: F401
import json # noqa: F401
import time
import requests
from os import environ
try:
from ConfigParser import ConfigParser # py2
except:
from configparser import ConfigParser # py3
from pprint import pprint # noqa: F401
from biokbase.workspace.client import Workspace as workspaceService
from annotation_ontology_api.annotation_ontology_apiImpl import annotation_ontology_api
from annotation_ontology_api.annotation_ontology_apiServer import MethodContext
from annotation_ontology_api.authclient import KBaseAuth as _KBaseAuth
class annotation_ontology_apiTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
token = environ.get('KB_AUTH_TOKEN', None)
config_file = environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('annotation_ontology_api'):
cls.cfg[nameval[0]] = nameval[1]
# Getting username from Auth profile for token
authServiceUrl = cls.cfg['auth-service-url']
auth_client = _KBaseAuth(authServiceUrl)
user_id = auth_client.get_user(token)
# WARNING: don't call any logging methods on the context object,
# it'll result in a NoneType error
cls.ctx = MethodContext(None)
cls.ctx.update({'token': token,
'user_id': user_id,
'provenance': [
{'service': 'annotation_ontology_api',
'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
cls.wsURL = cls.cfg['workspace-url']
cls.wsClient = workspaceService(cls.wsURL)
cls.serviceImpl = annotation_ontology_api(cls.cfg)
cls.scratch = cls.cfg['scratch']
cls.callback_url = os.environ['SDK_CALLBACK_URL']
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'wsName'):
cls.wsClient.delete_workspace({'workspace': cls.wsName})
print('Test workspace was deleted')
def getWsClient(self):
return self.__class__.wsClient
def getWsName(self):
if hasattr(self.__class__, 'wsName'):
return self.__class__.wsName
suffix = int(time.time() * 1000)
wsName = "test_annotation_ontology_api_" + str(suffix)
ret = self.getWsClient().create_workspace({'workspace': wsName}) # noqa
self.__class__.wsName = wsName
return wsName
def getImpl(self):
return self.__class__.serviceImpl
def getContext(self):
return self.__class__.ctx
# NOTE: According to Python unittest naming rules test method names should start from 'test'. # noqa
def test_your_method(self):
# Prepare test objects in workspace if needed using
# self.getWsClient().save_objects({'workspace': self.getWsName(),
# 'objects': []})
#
# Run your method by
# ret = self.getImpl().your_method(self.getContext(), parameters...)
#
# Check returned data with
# self.assertEqual(ret[...], ...) or other unittest methods
pass
| 37.111111 | 104 | 0.631138 |
4a1b7c78cc48bd29a567cdc35ed1232fdd8396a5
| 17,624 |
py
|
Python
|
lite/pylite/megenginelite/tensor.py
|
kagome1007/MegEngine
|
6814cf1cd7bfae05f1ca22a5b79a09244028c7fd
|
[
"Apache-2.0"
] | null | null | null |
lite/pylite/megenginelite/tensor.py
|
kagome1007/MegEngine
|
6814cf1cd7bfae05f1ca22a5b79a09244028c7fd
|
[
"Apache-2.0"
] | null | null | null |
lite/pylite/megenginelite/tensor.py
|
kagome1007/MegEngine
|
6814cf1cd7bfae05f1ca22a5b79a09244028c7fd
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from ctypes import *
import numpy as np
from .base import _Ctensor, _lib, _LiteCObjBase
from .struct import LiteDataType, LiteDeviceType, LiteIOType, Structure
MAX_DIM = 7
_lite_type_to_nptypes = {
LiteDataType.LITE_INT: np.int32,
LiteDataType.LITE_FLOAT: np.float32,
LiteDataType.LITE_UINT8: np.uint8,
LiteDataType.LITE_INT8: np.int8,
LiteDataType.LITE_INT16: np.int16,
LiteDataType.LITE_UINT16: np.uint16,
LiteDataType.LITE_HALF: np.float16,
}
_nptype_to_lite_type = {val: key for key, val in _lite_type_to_nptypes.items()}
_str_nptypes_to_lite_nptypes = {
np.dtype("int32"): LiteDataType.LITE_INT,
np.dtype("float32"): LiteDataType.LITE_FLOAT,
np.dtype("uint8"): LiteDataType.LITE_UINT8,
np.dtype("int8"): LiteDataType.LITE_INT8,
np.dtype("int16"): LiteDataType.LITE_INT16,
np.dtype("uint16"): LiteDataType.LITE_UINT16,
np.dtype("float16"): LiteDataType.LITE_HALF,
}
ctype_to_lite_dtypes = {
c_int: LiteDataType.LITE_INT,
c_uint: LiteDataType.LITE_INT,
c_float: LiteDataType.LITE_FLOAT,
c_ubyte: LiteDataType.LITE_UINT8,
c_byte: LiteDataType.LITE_INT8,
c_short: LiteDataType.LITE_INT16,
c_ushort: LiteDataType.LITE_UINT16,
}
_lite_dtypes_to_ctype = {
LiteDataType.LITE_INT: c_int,
LiteDataType.LITE_FLOAT: c_float,
LiteDataType.LITE_UINT8: c_ubyte,
LiteDataType.LITE_INT8: c_byte,
LiteDataType.LITE_INT16: c_short,
LiteDataType.LITE_UINT16: c_ushort,
}
class LiteLayout(Structure):
"""
the simple layout description
"""
_fields_ = [
("_shapes", c_size_t * MAX_DIM),
("ndim", c_size_t),
("data_type", c_int),
]
def __init__(self, shape=None, dtype=None):
if shape:
shape = list(shape)
assert len(shape) <= MAX_DIM, "Layout max dim is 7."
self._shapes = (c_size_t * MAX_DIM)(*shape)
self.ndim = len(shape)
else:
self._shapes = (c_size_t * MAX_DIM)()
self.ndim = 0
if not dtype:
self.data_type = LiteDataType.LITE_FLOAT
elif isinstance(dtype, LiteDataType):
self.data_type = dtype
elif type(dtype) == str:
self.data_type = _str_nptypes_to_lite_nptypes[np.dtype(dtype)]
elif isinstance(dtype, np.dtype):
ctype = np.ctypeslib.as_ctypes_type(dtype)
self.data_type = ctype_to_lite_dtypes[ctype]
elif isinstance(dtype, type):
self.data_type = _nptype_to_lite_type[dtype]
else:
raise RuntimeError("unkonw data type")
@property
def dtype(self):
return _lite_type_to_nptypes[LiteDataType(self.data_type)]
@property
def shapes(self):
return list(self._shapes)[0 : self.ndim]
@shapes.setter
def shapes(self, shape):
shape = list(shape)
assert len(shape) <= MAX_DIM, "Layout max dim is 7."
self._shapes = (c_size_t * MAX_DIM)(*shape)
self.ndim = len(shape)
def __repr__(self):
data = {
"shapes": self.shapes,
"ndim": self.ndim,
"data_type": _lite_type_to_nptypes[LiteDataType(self.data_type)],
}
return data.__repr__()
class _LiteTensorDesc(Structure):
"""
warpper of the MegEngine Tensor
:is_pinned_host: when set, the storage memory of the tensor is pinned memory,
this is used to Optimize the H2D or D2H memory copy, if the device or layout
is not set, when copy form other device(CUDA) tensor, this tensor
will be automatically set to pinned tensor
"""
_fields_ = [
("is_pinned_host", c_int),
("layout", LiteLayout),
("device_type", c_int),
("device_id", c_int),
]
def __init__(self):
self.layout = LiteLayout()
self.device_type = LiteDeviceType.LITE_CPU
self.is_pinned_host = False
self.device_id = 0
def __repr__(self):
data = {
"is_pinned_host": self.is_pinned_host,
"layout": LiteLayout(self.layout),
"device_type": LiteDeviceType(self.device_type.value),
"device_id": self.device_id,
}
return data.__repr__()
class _TensorAPI(_LiteCObjBase):
"""
get the api from the lib
"""
_api_ = [
("LITE_make_tensor", [_LiteTensorDesc, POINTER(_Ctensor)]),
("LITE_set_tensor_layout", [_Ctensor, LiteLayout]),
("LITE_reset_tensor_memory", [_Ctensor, c_void_p, c_size_t]),
("LITE_reset_tensor", [_Ctensor, LiteLayout, c_void_p]),
("LITE_tensor_reshape", [_Ctensor, POINTER(c_int), c_int]),
(
"LITE_tensor_slice",
[
_Ctensor,
POINTER(c_size_t),
POINTER(c_size_t),
POINTER(c_size_t),
c_size_t,
POINTER(_Ctensor),
],
),
(
"LITE_tensor_concat",
[POINTER(_Ctensor), c_int, c_int, c_int, c_int, POINTER(_Ctensor),],
),
("LITE_tensor_fill_zero", [_Ctensor]),
("LITE_tensor_copy", [_Ctensor, _Ctensor]),
("LITE_tensor_share_memory_with", [_Ctensor, _Ctensor]),
("LITE_get_tensor_memory", [_Ctensor, POINTER(c_void_p)]),
("LITE_get_tensor_total_size_in_byte", [_Ctensor, POINTER(c_size_t)]),
("LITE_get_tensor_layout", [_Ctensor, POINTER(LiteLayout)]),
("LITE_get_tensor_device_type", [_Ctensor, POINTER(c_int)]),
("LITE_get_tensor_device_id", [_Ctensor, POINTER(c_int)]),
("LITE_destroy_tensor", [_Ctensor]),
("LITE_is_pinned_host", [_Ctensor, POINTER(c_int)]),
]
class LiteTensor(object):
"""
the tensor to hold a block of data
"""
_api = _TensorAPI()._lib
def __init__(
self,
layout=None,
device_type=LiteDeviceType.LITE_CPU,
device_id=0,
is_pinned_host=False,
shapes=None,
dtype=None,
):
"""
create a Tensor with layout, device, is_pinned_host or shapes, dtype,
device_type, device_id, is_pinned_host param
"""
self._tensor = _Ctensor()
self._layout = LiteLayout()
if layout is not None:
self._layout = layout
elif shapes is not None:
shapes = list(shapes)
self._layout = LiteLayout(shapes, dtype)
self._device_type = device_type
self._device_id = device_id
self._is_pinned_host = is_pinned_host
tensor_desc = _LiteTensorDesc()
tensor_desc.layout = self._layout
tensor_desc.device_type = device_type
tensor_desc.device_id = device_id
tensor_desc.is_pinned_host = is_pinned_host
self._api.LITE_make_tensor(tensor_desc, byref(self._tensor))
self.update()
def __del__(self):
self._api.LITE_destroy_tensor(self._tensor)
def fill_zero(self):
"""
fill the buffer memory with zero
"""
self._api.LITE_tensor_fill_zero(self._tensor)
self.update()
def share_memory_with(self, src_tensor):
"""
share the same memory with the src_tensor, the self memory will be freed
"""
assert isinstance(src_tensor, LiteTensor)
self._api.LITE_tensor_share_memory_with(self._tensor, src_tensor._tensor)
self.update()
@property
def layout(self):
self._api.LITE_get_tensor_layout(self._tensor, byref(self._layout))
return self._layout
@layout.setter
def layout(self, layout):
if isinstance(layout, LiteLayout):
self._layout = layout
elif isinstance(layout, list):
self._layout.shapes = layout
self._api.LITE_set_tensor_layout(self._tensor, self._layout)
@property
def is_pinned_host(self):
"""
whether the tensor is pinned tensor
"""
pinned = c_int()
self._api.LITE_is_pinned_host(self._tensor, byref(pinned))
self._is_pinned_host = pinned
return bool(self._is_pinned_host)
@property
def device_type(self):
"""
get device of the tensor
"""
device_type = c_int()
self._api.LITE_get_tensor_device_type(self._tensor, byref(device_type))
self._device_type = device_type
return LiteDeviceType(device_type.value)
@property
def device_id(self):
"""
get device id of the tensor
"""
device_id = c_int()
self._api.LITE_get_tensor_device_id(self._tensor, byref(device_id))
self._device_id = device_id.value
return device_id.value
@property
def is_continue(self):
"""
whether the tensor memory is continue
"""
is_continue = c_int()
self._api.LITE_is_memory_continue(self._tensor, byref(is_continue))
return bool(is_continue.value)
@property
def nbytes(self):
"""
get the length of the meomry in byte
"""
length = c_size_t()
self._api.LITE_get_tensor_total_size_in_byte(self._tensor, byref(length))
return length.value
def update(self):
"""
update the member from C, this will auto used after slice, share
"""
pinned = c_int()
self._api.LITE_is_pinned_host(self._tensor, byref(pinned))
self._is_pinned_host = pinned
device_type = c_int()
self._api.LITE_get_tensor_device_type(self._tensor, byref(device_type))
self._device_type = device_type
self._api.LITE_get_tensor_layout(self._tensor, byref(self._layout))
c_types = _lite_dtypes_to_ctype[self._layout.data_type]
self.np_array_type = np.ctypeslib._ctype_ndarray(
c_types, list(self._layout.shapes)[0 : self._layout.ndim]
)
def copy_from(self, src_tensor):
"""
copy memory form the src_tensor
"""
assert isinstance(src_tensor, LiteTensor)
self._api.LITE_tensor_copy(self._tensor, src_tensor._tensor)
self.update()
def reshape(self, shape):
"""
reshape the tensor with data not change, only change the shape
:param shape: int arrary of dst_shape
"""
shape = list(shape)
length = len(shape)
c_shape = (c_int * length)(*shape)
self._api.LITE_tensor_reshape(self._tensor, c_shape, length)
self.update()
def slice(self, start, end, step=None):
"""
slice the tensor with gaven start, end, step
:param start: silce begin index of each dim
:param end: silce end index of each dim
:param step: silce step of each dim
"""
start = list(start)
end = list(end)
length = len(start)
assert length == len(end), "slice with different length of start and end."
if step:
assert length == len(step), "slice with different length of start and step."
step = list(step)
else:
step = [1 for i in range(length)]
c_start = (c_size_t * length)(*start)
c_end = (c_size_t * length)(*end)
c_step = (c_size_t * length)(*step)
slice_tensor = LiteTensor()
self._api.LITE_tensor_slice(
self._tensor, c_start, c_end, c_step, length, byref(slice_tensor._tensor)
)
slice_tensor.update()
return slice_tensor
def get_ctypes_memory(self):
"""
get the memory of the tensor, return c_void_p of the tensor memory
"""
mem = c_void_p()
self._api.LITE_get_tensor_memory(self._tensor, byref(mem))
return mem
def set_data_by_share(self, data, length=0, layout=None):
"""
share the data to the tensor
param data: the data will shared to the tensor, it should be a
numpy.ndarray or ctypes data
"""
if isinstance(data, np.ndarray):
assert (
self.is_continue
), "set_data_by_share can only apply in continue tensor."
assert (
self.is_pinned_host or self.device_type == LiteDeviceType.LITE_CPU
), "set_data_by_share can only apply in cpu tensor or pinned tensor."
c_type = _lite_dtypes_to_ctype[LiteDataType(self._layout.data_type)]
if self.nbytes != data.nbytes:
self.layout = LiteLayout(data.shape, ctype_to_lite_dtypes[c_type])
self._shared_data = data
data = data.ctypes.data_as(POINTER(c_type))
if layout is not None:
self.layout = layout
else:
assert length == 0 or length == self.nbytes, "the data length is not match."
self._api.LITE_reset_tensor_memory(self._tensor, data, self.nbytes)
def set_data_by_copy(self, data, data_length=0, layout=None):
"""
copy the data to the tensor, the memory of the tensor must be continue
param data: the data to copy to tensor, it should be list,
numpy.ndarraya or ctypes with length
"""
if layout is not None:
self.layout = layout
assert self.is_continue, "set_data_by_copy can only apply in continue tensor."
c_type = _lite_dtypes_to_ctype[LiteDataType(self._layout.data_type)]
cpu_tensor = LiteTensor(self._layout)
tensor_length = self.nbytes
if type(data) == list:
length = len(data)
assert (
length * sizeof(c_type) <= tensor_length
), "the length of input data to set to the tensor is too large."
cdata = (c_type * length)(*data)
self._api.LITE_reset_tensor_memory(cpu_tensor._tensor, cdata, tensor_length)
self.copy_from(cpu_tensor)
elif type(data) == np.ndarray:
self.layout = LiteLayout(data.shape, data.dtype)
cpu_tensor.layout = LiteLayout(data.shape, data.dtype)
cdata = data.ctypes.data_as(POINTER(c_type))
self._api.LITE_reset_tensor_memory(cpu_tensor._tensor, cdata, self.nbytes)
self.copy_from(cpu_tensor)
else:
assert (
data_length == self.nbytes or layout is not None
), "when input data is ctypes, the length of input data or layout must set"
self._api.LITE_reset_tensor_memory(cpu_tensor._tensor, data, tensor_length)
self.copy_from(cpu_tensor)
def get_data_by_share(self):
"""
get the data in the tensor, add share the data with a new numpy, and
return the numpy arrray, be careful, the data in numpy is valid before
the tensor memory is write again, such as LiteNetwok forward next time.
"""
self.update()
buffer = c_void_p()
self._api.LITE_get_tensor_memory(self._tensor, byref(buffer))
buffer = self.np_array_type.from_address(buffer.value)
return np.ctypeslib.as_array(buffer)
def to_numpy(self):
"""
get the buffer of the tensor
"""
self.update()
if self.nbytes <= 0:
np_type = _lite_type_to_nptypes[LiteDataType(self._layout.data_type)]
return np.array([], dtype=np_type)
if self.is_continue and (
self.is_pinned_host or self.device_type == LiteDeviceType.LITE_CPU
):
ptr = c_void_p()
self._api.LITE_get_tensor_memory(self._tensor, byref(ptr))
np_type = _lite_type_to_nptypes[LiteDataType(self._layout.data_type)]
shape = [self._layout.shapes[i] for i in range(self._layout.ndim)]
np_arr = np.zeros(shape, np_type)
if np_arr.nbytes:
memmove(np_arr.ctypes.data_as(c_void_p), ptr, np_arr.nbytes)
return np_arr
else:
tmp_tensor = LiteTensor(self.layout)
tmp_tensor.copy_from(self)
return tmp_tensor.to_numpy()
def __repr__(self):
self.update()
data = {
"layout": self._layout,
"device_type": LiteDeviceType(self._device_type.value),
"device_id": int(self.device_id),
"is_pinned_host": bool(self._is_pinned_host),
}
return data.__repr__()
def LiteTensorConcat(
tensors, dim, device_type=LiteDeviceType.LITE_DEVICE_DEFAULT, device_id=-1
):
"""
concat tensor in input dim to one tensor
dim : the dim to act concat
device_type: the result tensor device type
device_id: the result tensor device id
"""
api = _TensorAPI()._lib
length = len(tensors)
c_tensors = [t._tensor for t in tensors]
c_tensors = (_Ctensor * length)(*c_tensors)
result_tensor = LiteTensor()
api.LITE_tensor_concat(
cast(byref(c_tensors), POINTER(c_void_p)),
length,
dim,
device_type,
device_id,
byref(result_tensor._tensor),
)
result_tensor.update()
return result_tensor
def lite_dtype_2_numpy(dtype):
"""
convert lite dtype to corresponding numpy dtype
"""
assert isinstance(
dtype, LiteDataType
), "input must be LiteDataType when using lite_dtype_2_numpy."
return _lite_type_to_nptypes[dtype]
| 33.25283 | 88 | 0.622617 |
4a1b7ca9ab3b4befe5bdbb8377688bc9d72d1002
| 4,490 |
py
|
Python
|
ephypype/interfaces/mne/LF_computation.py
|
EtienneCmb/ephypype
|
fdc13efb79545f072585d1e180d03702efd9e326
|
[
"BSD-3-Clause"
] | null | null | null |
ephypype/interfaces/mne/LF_computation.py
|
EtienneCmb/ephypype
|
fdc13efb79545f072585d1e180d03702efd9e326
|
[
"BSD-3-Clause"
] | 1 |
2018-09-03T20:08:46.000Z
|
2018-09-03T21:00:55.000Z
|
ephypype/interfaces/mne/LF_computation.py
|
EtienneCmb/ephypype
|
fdc13efb79545f072585d1e180d03702efd9e326
|
[
"BSD-3-Clause"
] | null | null | null |
"""LF computation.
Created on Mon May 2 17:24:00 2016
@author: pasca
"""
import os.path as op
from nipype.utils.filemanip import split_filename as split_f
from nipype.interfaces.base import BaseInterface, BaseInterfaceInputSpec
from nipype.interfaces.base import traits, File, TraitedSpec
from ephypype.compute_fwd_problem import create_mixed_source_space
from ephypype.compute_fwd_problem import create_bem_sol, create_src_space
from ephypype.compute_fwd_problem import is_trans, compute_fwd_sol
class LFComputationConnInputSpec(BaseInterfaceInputSpec):
"""LF computation conn input spec."""
sbj_id = traits.String(desc='subject id', mandatory=True)
sbj_dir = traits.String(exists=True, desc='Freesurfer main directory',
mandatory=True)
raw_info = traits.Any(desc='raw info', mandatory=True)
raw_fname = traits.String(desc='raw file name', mandatory=True)
spacing = traits.String(desc='spacing to use to setup a source space',
mandatory=False)
aseg = traits.Bool(desc='if true sub structures will be considered',
mandatory=False)
aseg_labels = traits.List(desc='list of substructures in the src space',
mandatory=False)
save_mixed_src_space = traits.Bool(False, desc='if true save src space',
usedefault=True,
mandatory=False)
class LFComputationConnOutputSpec(TraitedSpec):
"""LF computation conn output spec."""
fwd_filename = File(exists=False, desc='LF matrix')
class LFComputation(BaseInterface):
"""Compute the Lead Field matrix using MNE Python functions.
Parameters
----------
sbj_id : str
subject name
sbj_dir : str
Freesurfer directory
raw_info : dict
information dictionary of the raw data
raw_filename : str
filename of the raw data
spacing : str (default 'ico-5')
spacing to use to setup a source space
aseg: bool (defualt False)
if True a mixed source space will be created and the sub cortical
regions defined in aseg_labels will be added to the source space
aseg_labels: list (default [])
list of substructures we want to include in the mixed source space
save_mixed_src_space: bool (default False)
if True save the mixed src space
"""
input_spec = LFComputationConnInputSpec
output_spec = LFComputationConnOutputSpec
def _get_fwd_filename(self, raw_info, aseg, spacing):
data_path, raw_fname, ext = split_f(raw_info)
fwd_filename = '%s-%s' % (raw_fname, spacing)
if aseg:
fwd_filename += '-aseg'
fwd_filename = op.join(data_path, fwd_filename + '-fwd.fif')
print(('\n *** fwd_filename %s ***\n' % fwd_filename))
return fwd_filename
def _run_interface(self, runtime):
sbj_id = self.inputs.sbj_id
sbj_dir = self.inputs.sbj_dir
raw_info = self.inputs.raw_info
raw_fname = self.inputs.raw_fname
aseg = self.inputs.aseg
spacing = self.inputs.spacing
aseg_labels = self.inputs.aseg_labels
save_mixed_src_space = self.inputs.save_mixed_src_space
self.fwd_filename = self._get_fwd_filename(raw_fname, aseg,
spacing)
# check if we have just created the fwd matrix
if not op.isfile(self.fwd_filename):
bem = create_bem_sol(sbj_dir, sbj_id) # bem solution
src = create_src_space(sbj_dir, sbj_id, spacing) # src space
if aseg:
src = create_mixed_source_space(sbj_dir, sbj_id, spacing,
aseg_labels, src,
save_mixed_src_space)
n = sum(src[i]['nuse'] for i in range(len(src)))
print(('il src space contiene %d spaces e %d vertici'
% (len(src), n)))
trans_fname = is_trans(raw_fname)
# TODO: ha senso una funzione con un solo cmd?
compute_fwd_sol(raw_info, trans_fname, src, bem, self.fwd_filename)
else:
print(('\n*** FWD file %s exists!!!\n' % self.fwd_filename))
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['fwd_filename'] = self.fwd_filename
return outputs
| 33.507463 | 79 | 0.630067 |
4a1b7cea9bb43bdeda90f0fca320aa7b47abece3
| 1,805 |
py
|
Python
|
tf_quant_finance/math/__init__.py
|
cottrell/tf-quant-finance
|
e05664ed13933812446e95f8a05fccab86b8909c
|
[
"Apache-2.0"
] | 1 |
2019-11-19T00:10:07.000Z
|
2019-11-19T00:10:07.000Z
|
tf_quant_finance/math/__init__.py
|
SeptumCapital/tf-quant-finance
|
5aba5ddab3a4dd1efa87d5a12fec403315d2ac98
|
[
"Apache-2.0"
] | null | null | null |
tf_quant_finance/math/__init__.py
|
SeptumCapital/tf-quant-finance
|
5aba5ddab3a4dd1efa87d5a12fec403315d2ac98
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Quantitative Finance general math functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tf_quant_finance.math import integration
from tf_quant_finance.math import interpolation
from tf_quant_finance.math import optimizer
from tf_quant_finance.math import pde
from tf_quant_finance.math import piecewise
from tf_quant_finance.math import random_ops as random
from tf_quant_finance.math import root_search
from tf_quant_finance.math import segment_ops
from tf_quant_finance.math.diff_ops import diff
from tf_quant_finance.math.gradient import fwd_gradient
from tf_quant_finance.math.gradient import gradients
from tf_quant_finance.math.gradient import make_val_and_grad_fn
from tf_quant_finance.math.gradient import value_and_gradient
from tensorflow.python.util.all_util import remove_undocumented # pylint: disable=g-direct-tensorflow-import
_allowed_symbols = [
'fwd_gradient',
'integration',
'interpolation',
'optimizer',
'pde',
'piecewise',
'random',
'root_search',
'diff',
'segment_ops',
'value_and_gradient',
'make_val_and_grad_fn',
]
remove_undocumented(__name__, _allowed_symbols)
| 34.711538 | 109 | 0.798892 |
4a1b7dad03ea5755d0807172989f83010f07a8df
| 9,423 |
py
|
Python
|
utility/Upload.py
|
wuye999/Y2B
|
6441ff0d9a9e67ae58a2845d4da0c783d91904e1
|
[
"MIT"
] | 29 |
2020-02-24T00:00:09.000Z
|
2021-12-13T07:56:56.000Z
|
utility/Upload.py
|
ghc7046/Y2B
|
6441ff0d9a9e67ae58a2845d4da0c783d91904e1
|
[
"MIT"
] | 6 |
2021-06-13T05:53:08.000Z
|
2022-01-13T03:12:48.000Z
|
utility/Upload.py
|
ghc7046/Y2B
|
6441ff0d9a9e67ae58a2845d4da0c783d91904e1
|
[
"MIT"
] | 10 |
2020-05-17T05:37:04.000Z
|
2022-01-24T10:45:11.000Z
|
import json
import os
import math
import base64
import time
from utility import tool
import threading
import re
def uploadFile(cookie: dict, videoPath: str, enableParallel=False) -> str:
logger = tool.getLogger()
logger.info(f"start {videoPath}")
file_size = os.path.getsize(videoPath)
s = tool.Session()
s.cookies.update(cookie)
s.headers.update({
"Origin": "https://member.bilibili.com",
"Referer": "https://member.bilibili.com/video/upload.html",
})
limit: threading.Semaphore = None
limitCnt = 0
upos: str = None
upcdn: str = None
cost: float = 99999999
rs = s.get("https://member.bilibili.com/preupload?r=probe",
wantStatusCode=200).json()
testContent = b'\0' * 1048576
for i in rs["lines"]:
testURL = f"https:{i['probe_url']}"
start = time.time()
tRs = s.put(testURL, data=testContent)
LCost = time.time() - start
logger.debug(
f"url:{i['probe_url']};code:{tRs.status_code};cost:{LCost}")
if tRs.status_code == 200 and "NGINX_OK" in tRs.text and LCost < cost:
cost = LCost
upos = i["os"]
upcdn = i["query"]
del testContent
if upcdn is None or upcdn is None:
return False, ""
upcdn = re.findall("upcdn=([^&]+)", upcdn)[0]
logger.debug(f"upos[{upos}],cdn[{upcdn}]")
ext = videoPath.split(".")[-1]
param = {
"name": "{}.{}".format(int(time.time()), ext),
"size": file_size,
"r": upos,
"profile": "ugcupos/bup",
"ssl": "0",
"version": "2.7.1",
"build": "2070100",
"upcdn": upcdn,
"probe_version": "20200427",
}
url = "https://member.bilibili.com/preupload"
_data = s.get(url=url, params=param).text
logger.debug(_data)
_data = json.loads(_data)
upload_size = _data["chunk_size"]
upos_uri = _data["upos_uri"].replace("upos:/", "").replace("/ugc/", "")
biz_id = _data["biz_id"]
endpoint = _data["endpoint"]
auth = _data["auth"]
if enableParallel:
limit = threading.Semaphore(_data["threads"])
limitCnt = _data["threads"]
logger.info("use parallel upload, count:{}".format(_data["threads"]))
logger.info("preupload done")
# get upload id
data_url = f"https:{endpoint}/ugc/{upos_uri}?uploads&output=json"
s.headers.update({"X-Upos-Auth": auth})
# while True:
# try:
# _data = s.post(url=data_url).json()
# upload_id = _data["upload_id"]
# break
# except (IndexError, KeyError):
# time.sleep(2)
# continue
_data = s.post(url=data_url).json()
upload_id = _data["upload_id"]
logger.debug(json.dumps(_data))
logger.info("get upload id done")
# start upload
# upload_size = 8 * 1024 * 1024
upload_url = f"https:{endpoint}/ugc/{upos_uri}"
total_chunk = math.ceil(file_size / upload_size)
index = 1
now_size = 0
restore = {"parts": []}
file = open(videoPath, "rb")
# 分块下载&上传
while now_size < file_size:
new_end = min(now_size + upload_size, file_size - 1)
part = file.read(upload_size)
size = len(part)
param = {
"total": file_size,
"partNumber": index,
"uploadId": upload_id,
"chunk": index - 1,
"chunks": total_chunk,
"size": size,
"start": now_size,
"end": new_end
}
now_size = new_end + 1
index += 1
def threadUpload(url, param, part, s):
logger = tool.getLogger()
res = s.put(url=upload_url, params=param,
data=part, wantStatusCode=200)
logger.info(f"{param['partNumber']}/{param['chunks']}:{res.text}")
limit.release()
if enableParallel:
limit.acquire()
tool.Thread(target=threadUpload, args=(
upload_url, param.copy(), part, s)).start()
else:
res = s.put(url=upload_url, params=param,
data=part, wantStatusCode=200)
logger.info(f"{index - 1}/{total_chunk}:{res.text}")
restore["parts"].append({"partNumber": index, "eTag": "etag"})
file.close()
for _ in range(limitCnt):
if not limit.acquire(timeout=60 * 20):
return False, ""
del limit
# 上传完成
param = {
'output': 'json',
'name': time.ctime() + "." + ext,
'profile': 'ugcupos/bup',
'uploadId': upload_id,
'biz_id': biz_id,
}
_data = s.post(upload_url, params=param, json=restore).text
logger.info(f"upload file done: {upos_uri}")
logger.debug(_data)
return True, upos_uri
def uploadWithOldBvid(cookie: dict, uploadInfo: dict, videoPath: str):
logger = tool.getLogger()
enableParallel = uploadInfo.get("enableParallel", True)
success, upos_uri = uploadFile(
cookie, videoPath, enableParallel=enableParallel)
if not success:
return False, "", ""
s = tool.Session()
s.cookies.update(cookie)
url = f"https://member.bilibili.com/x/vu/web/edit?csrf={cookie['bili_jct']}"
# s.headers.pop("X-Upos-Auth")
_rs = s.get(
f"https://member.bilibili.com/x/web/archive/view?bvid={uploadInfo['bvid']}"
).json()["data"]
# logger.debug(json.dumps(_rs["videos"]))
videos = []
for i in _rs["videos"]:
if len(i['reject_reason']) > 0: # 判断视频是否有错误,比如撞车、解码错误、违法违规等
logger.debug(
"{}-{}:{}".format(i["aid"], i["cid"], i["reject_reason"]))
continue
videos.append({"filename": i["filename"], "title": i["title"]})
videos.append({"filename": upos_uri.split(".")[0],
"title": uploadInfo["title"][0:min(79, len(uploadInfo["title"]))],
"desc": uploadInfo["id"]
})
send_data = {"copyright": 2,
"videos": videos,
"source": _rs["archive"]["source"],
"tid": _rs["archive"]["tid"],
"cover": _rs["archive"]["cover"].split(":")[-1],
"title": _rs["archive"]["title"],
"tag": _rs["archive"]["tag"],
"desc_format_id": 0,
"desc": _rs["archive"]["desc"],
"dynamic": _rs["archive"]["dynamic"],
"subtitle": {
"open": 0,
"lan": ""
},
"bvid": uploadInfo["bvid"],
"handle_staff": False,
}
logger.debug(json.dumps(send_data))
# s.headers.update({"Content-Type": "application/json;charset=UTF-8"})
res = s.post(url=url, json=send_data).text
logger.debug(res)
return True, res, upos_uri
def uploadWithNewBvid(cookie: dict, uploadInfo: dict, videoPath: str):
logger = tool.getLogger()
enableParallel = uploadInfo.get("enableParallel", True)
success, upos_uri = uploadFile(
cookie, videoPath, enableParallel=enableParallel)
if not success:
return False, "", ""
s = tool.Session()
s.cookies.update(cookie)
csrf = cookie["bili_jct"]
def cover(csrf, uploadInfo):
vid = uploadInfo["id"]
__url = "https://member.bilibili.com/x/vu/web/cover/up"
__imgURL = f"https://i1.ytimg.com/vi/{vid}/maxresdefault.jpg"
__imgURL2 = f"https://i1.ytimg.com/vi/{vid}/hqdefault.jpg"
__rs = s.get(__imgURL, useProxy=True, wantStatusCode=200)
if __rs is None:
__rs = s.get(__imgURL2, useProxy=True, wantStatusCode=200)
__send = {"cover": "data:image/jpeg;base64," +
base64.b64encode(__rs.content).decode(),
"csrf": csrf
}
__res = s.post(url=__url, data=__send).json()
return __res["data"]["url"].replace("http:", "").replace("https:", "")
url = "https://member.bilibili.com/x/vu/web/add?csrf=" + csrf
# s.headers.pop("X-Upos-Auth")
_data = s.get("https://member.bilibili.com/x/geetest/pre/add").text
logger.debug(_data)
send_data = {"copyright": 2,
"videos": [{"filename": upos_uri.split(".")[0],
"title": uploadInfo["title"],
"desc": ""}],
"source": "https://www.youtube.com/watch?v=" + uploadInfo["id"],
"tid": int(uploadInfo["tid"]),
"cover": cover(csrf, uploadInfo),
"title": uploadInfo["ptitle"],
"tag": ','.join(uploadInfo["tags"]),
"desc_format_id": 0,
"desc": uploadInfo["desc"],
"dynamic": "#" + "##".join(uploadInfo["tags"]) + "#",
"subtitle": {
"open": 0,
"lan": ""}
}
logger.debug(json.dumps(send_data))
# s.headers.update({"Content-Type": "application/json;charset=UTF-8"})
while True:
res = s.post(url=url, json=send_data).text
logger.debug(res)
code = json.loads(res)["code"]
if code == 0:
break
if code == 21070:
time.sleep(20)
else:
logger.error(f"未知状态码{code}")
s.close()
return True, res, upos_uri
# if __name__ == "__main__":
# get_youtube_url2("iCfr8N0Q8IA")
| 35.693182 | 85 | 0.537621 |
4a1b7db7c79b0df13ab1f5965c32e3e2ca8f100b
| 15,416 |
py
|
Python
|
rep/metaml/folding.py
|
vkuznet/rep
|
71939da79b45c28aa130680a11c13ede08f48a0e
|
[
"Apache-2.0"
] | null | null | null |
rep/metaml/folding.py
|
vkuznet/rep
|
71939da79b45c28aa130680a11c13ede08f48a0e
|
[
"Apache-2.0"
] | null | null | null |
rep/metaml/folding.py
|
vkuznet/rep
|
71939da79b45c28aa130680a11c13ede08f48a0e
|
[
"Apache-2.0"
] | null | null | null |
"""
This is specific meta-algorithm based on the idea of cross-validation.
"""
from __future__ import division, print_function, absolute_import
import numpy
from sklearn import clone
from six.moves import zip
from . import utils
from sklearn.cross_validation import KFold
from sklearn.utils.validation import check_random_state
from .factory import train_estimator
from ..estimators.interface import Classifier, Regressor
from ..estimators.utils import check_inputs
import pandas
__author__ = 'Tatiana Likhomanenko, Alex Rogozhnikov'
__all__ = ['FoldingClassifier', 'FoldingRegressor']
from .utils import get_classifier_probabilities, get_classifier_staged_proba, get_regressor_prediction, \
get_regressor_staged_predict
class FoldingBase(object):
"""
Base class for FoldingClassifier and FoldingRegressor
"""
def __init__(self,
base_estimator,
n_folds=2,
random_state=None,
features=None,
parallel_profile=None):
self.estimators = []
self.parallel_profile = parallel_profile
self.n_folds = n_folds
self.base_estimator = base_estimator
self._folds_indices = None
self.random_state = random_state
self._random_number = None
# setting features directly
self.features = features
def _get_folds_column(self, length):
"""
Return special column with indices of folds for all events.
"""
if self._random_number is None:
self._random_number = check_random_state(self.random_state).randint(0, 100000)
folds_column = numpy.zeros(length)
for fold_number, (_, folds_indices) in enumerate(
KFold(length, self.n_folds, shuffle=True, random_state=self._random_number)):
folds_column[folds_indices] = fold_number
return folds_column
def _prepare_data(self, X, y, sample_weight):
raise NotImplementedError('To be implemented in descendant')
def fit(self, X, y, sample_weight=None):
"""
Train the classifier, will train several base classifiers on overlapping
subsets of training dataset.
:param X: pandas.DataFrame of shape [n_samples, n_features]
:param y: labels of events - array-like of shape [n_samples]
:param sample_weight: weight of events,
array-like of shape [n_samples] or None if all weights are equal
"""
if hasattr(self.base_estimator, 'features'):
assert self.base_estimator.features is None, \
'Base estimator must have None features! Use features parameter in Folding instead'
self.train_length = len(X)
X, y, sample_weight = self._prepare_data(X, y, sample_weight)
folds_column = self._get_folds_column(len(X))
for _ in range(self.n_folds):
self.estimators.append(clone(self.base_estimator))
if sample_weight is None:
weights_iterator = [None] * self.n_folds
else:
weights_iterator = (sample_weight[folds_column != index] for index in range(self.n_folds))
result = utils.map_on_cluster(self.parallel_profile, train_estimator,
range(len(self.estimators)),
self.estimators,
(X.iloc[folds_column != index, :].copy() for index in range(self.n_folds)),
(y[folds_column != index] for index in range(self.n_folds)),
weights_iterator)
for status, data in result:
if status == 'success':
name, classifier, spent_time = data
self.estimators[name] = classifier
else:
print('Problem while training on the node, report:\n', data)
return self
def _folding_prediction(self, X, prediction_function, vote_function=None):
"""
Supplementary function to predict (labels, probabilities, values)
:param X: dataset to predict
:param prediction_function: function(classifier, X) -> prediction
:param vote_function: if using averaging over predictions of folds, this function shall be passed.
For instance: lambda x: numpy.mean(x, axis=0), which means averaging result over all folds.
Another useful option is lambda x: numpy.median(x, axis=0)
"""
X = self._get_features(X)
if vote_function is not None:
print('KFold prediction with voting function')
results = []
for estimator in self.estimators:
results.append(prediction_function(estimator, X))
# results: [n_classifiers, n_samples, n_dimensions], reduction over 0th axis
results = numpy.array(results)
return vote_function(results)
else:
if len(X) != self.train_length:
print('KFold prediction using random classifier (length of data passed not equal to length of train)')
else:
print('KFold prediction using folds column')
folds_column = self._get_folds_column(len(X))
parts = []
for fold in range(self.n_folds):
parts.append(prediction_function(self.estimators[fold], X.iloc[folds_column == fold, :]))
result_shape = [len(X)] + list(numpy.shape(parts[0])[1:])
results = numpy.zeros(shape=result_shape)
folds_indices = [numpy.where(folds_column == fold)[0] for fold in range(self.n_folds)]
for fold, part in enumerate(parts):
results[folds_indices[fold]] = part
return results
def _staged_folding_prediction(self, X, prediction_function, vote_function=None):
X = self._get_features(X)
if vote_function is not None:
print('Using voting KFold prediction')
iterators = [prediction_function(estimator, X) for estimator in self.estimators]
for fold_prob in zip(*iterators):
result = numpy.array(fold_prob)
yield vote_function(result)
else:
if len(X) != self.train_length:
print('KFold prediction using random classifier (length of data passed not equal to length of train)')
else:
print('KFold prediction using folds column')
folds_column = self._get_folds_column(len(X))
iterators = [prediction_function(self.estimators[fold], X.iloc[folds_column == fold, :])
for fold in range(self.n_folds)]
folds_indices = [numpy.where(folds_column == fold)[0] for fold in range(self.n_folds)]
for stage_results in zip(*iterators):
result_shape = [len(X)] + list(numpy.shape(stage_results[0])[1:])
result = numpy.zeros(result_shape)
for fold in range(self.n_folds):
result[folds_indices[fold]] = stage_results[fold]
yield result
def _get_feature_importances(self):
"""
Get features importance
:return: pandas.DataFrame with column effect and `index=features`
"""
importances = numpy.sum([est.feature_importances_ for est in self.estimators], axis=0)
# to get train_features, not features
one_importances = self.estimators[0].get_feature_importances()
return pandas.DataFrame({'effect': importances / numpy.max(importances)}, index=one_importances.index)
class FoldingRegressor(FoldingBase, Regressor):
"""
This meta-regressor implements folding algorithm:
* training data is splitted into n equal parts;
* we train n regressors, each one is trained using n-1 folds
To build unbiased predictions for data, pass the **same** dataset (with same order of events)
as in training to `predict` or `staged_predict`, in which case
classifier will use to predict each event that base classifier which didn't use that event during training.
To use information from not one, but several estimators during predictions,
provide appropriate voting function. Examples of voting function:
>>> voting = lambda x: numpy.mean(x, axis=0)
>>> voting = lambda x: numpy.median(x, axis=0)
Parameters:
-----------
:param sklearn.BaseEstimator base_estimator: base classifier, which will be used for training
:param int n_folds: count of folds
:param features: features used in training
:type features: None or list[str]
:param parallel_profile: profile for IPython cluster, None to compute locally.
:type parallel_profile: None or str
:param random_state: random state for reproducibility
:type random_state: None or int or RandomState
"""
def _prepare_data(self, X, y, sample_weight):
X = self._get_features(X)
y_shape = numpy.shape(y)
self.n_outputs_ = 1 if len(y_shape) < 2 else y_shape[1]
return check_inputs(X, y, sample_weight=sample_weight, allow_multiple_targets=True)
def predict(self, X, vote_function=None):
"""
Get predictions. To get unbiased predictions on training dataset, pass training data
(with same order of events) and vote_function=None.
:param X: pandas.DataFrame of shape [n_samples, n_features]
:param vote_function: function to combine prediction of folds' estimators.
If None then folding scheme is used. Parameters: numpy.ndarray [n_classifiers, n_samples]
:type vote_function: None or function
:rtype: numpy.array of shape [n_samples, n_outputs]
"""
return self._folding_prediction(X, prediction_function=get_regressor_prediction,
vote_function=vote_function)
def staged_predict(self, X, vote_function=None):
"""
Get predictions after each iteration of base estimator.
To get unbiased predictions on training dataset, pass training data
(with same order of events) and vote_function=None.
:param X: pandas.DataFrame of shape [n_samples, n_features]
:param vote_function: function to combine prediction of folds' estimators.
If None then folding scheme is used. Parameters: numpy.ndarray [n_classifiers, n_samples]
:type vote_function: None or function
:rtype: sequence of numpy.array of shape [n_samples, n_outputs]
"""
return self._folding_prediction(X, prediction_function=get_regressor_staged_predict,
vote_function=vote_function)
def get_feature_importances(self):
"""
Get features importance
:rtype: pandas.DataFrame with column effect and `index=features`
"""
return self._get_feature_importances()
@property
def feature_importances_(self):
"""Sklearn-way of returning feature importance.
This returned as numpy.array, assuming that initially passed train_features=None """
return self.get_feature_importances().ix[self.features, 'effect'].values
class FoldingClassifier(FoldingBase, Classifier):
"""
This meta-classifier implements folding algorithm:
* training data is splitted into n equal parts;
* we train n classifiers, each one is trained using n-1 folds
To build unbiased predictions for data, pass the **same** dataset (with same order of events)
as in training to `predict`, `predict_proba` or `staged_predict_proba`, in which case
classifier will use to predict each event that base classifier which didn't use that event during training.
To use information from not one, but several estimators during predictions,
provide appropriate voting function. Examples of voting function:
>>> voting = lambda x: numpy.mean(x, axis=0)
>>> voting = lambda x: numpy.median(x, axis=0)
Parameters:
-----------
:param sklearn.BaseEstimator base_estimator: base classifier, which will be used for training
:param int n_folds: count of folds
:param features: features used in training
:type features: None or list[str]
:param parallel_profile: profile for IPython cluster, None to compute locally.
:type parallel_profile: None or str
:param random_state: random state for reproducibility
:type random_state: None or int or RandomState
"""
def _prepare_data(self, X, y, sample_weight):
X = self._get_features(X)
self._set_classes(y)
return check_inputs(X, y, sample_weight=sample_weight, allow_multiple_targets=True)
def predict(self, X, vote_function=None):
"""
Predict labels. To get unbiased predictions on training dataset, pass training data
(with same order of events) and vote_function=None.
:param X: pandas.DataFrame of shape [n_samples, n_features]
:param vote_function: function to combine prediction of folds' estimators.
If None then folding scheme is used.
:type vote_function: None or function
:rtype: numpy.array of shape [n_samples]
"""
return numpy.argmax(self.predict_proba(X, vote_function=vote_function), axis=1)
def predict_proba(self, X, vote_function=None):
"""
Predict probabilities. To get unbiased predictions on training dataset, pass training data
(with same order of events) and vote_function=None.
:param X: pandas.DataFrame of shape [n_samples, n_features]
:param vote_function: function to combine prediction of folds' estimators.
If None then folding scheme is used.
:type vote_function: None or function
:rtype: numpy.array of shape [n_samples, n_classes]
"""
result = self._folding_prediction(X, prediction_function=get_classifier_probabilities,
vote_function=vote_function)
return result / numpy.sum(result, axis=1, keepdims=True)
def staged_predict_proba(self, X, vote_function=None):
"""
Predict probabilities after each stage of base_estimator.
To get unbiased predictions on training dataset, pass training data
(with same order of events) and vote_function=None.
:param X: pandas.DataFrame of shape [n_samples, n_features]
:param vote_function: function to combine prediction of folds' estimators.
If None then folding scheme is used.
:type vote_function: None or function
:rtype: sequence of numpy.arrays of shape [n_samples, n_classes]
"""
for proba in self._staged_folding_prediction(X, prediction_function=get_classifier_staged_proba,
vote_function=vote_function):
yield proba / numpy.sum(proba, axis=1, keepdims=True)
def get_feature_importances(self):
"""
Get features importance
:rtype: pandas.DataFrame with column effect and `index=features`
"""
return self._get_feature_importances()
@property
def feature_importances_(self):
"""Sklearn-way of returning feature importance.
This returned as numpy.array, assuming that initially passed train_features=None """
return self.get_feature_importances().ix[self.features, 'effect'].values
| 44.813953 | 118 | 0.659639 |
4a1b8142d6513fa2b1dbe4aba1485dae793082e6
| 6,643 |
py
|
Python
|
accelbyte_py_sdk/api/social/models/bulk_user_stat_item_update.py
|
encyphered/accelbyte-python-sdk
|
09c1e989d7251de308150fdcd3119d662ca2d205
|
[
"MIT"
] | null | null | null |
accelbyte_py_sdk/api/social/models/bulk_user_stat_item_update.py
|
encyphered/accelbyte-python-sdk
|
09c1e989d7251de308150fdcd3119d662ca2d205
|
[
"MIT"
] | null | null | null |
accelbyte_py_sdk/api/social/models/bulk_user_stat_item_update.py
|
encyphered/accelbyte-python-sdk
|
09c1e989d7251de308150fdcd3119d662ca2d205
|
[
"MIT"
] | null | null | null |
# Auto-generated at 2021-09-27T17:12:34.256874+08:00
# from: Justice Social Service (1.17.1)
# Copyright (c) 2018 - 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
class BulkUserStatItemUpdate(Model):
"""Bulk user stat item update
Properties:
user_id: (userId) REQUIRED str
additional_key: (additionalKey) OPTIONAL str
stat_code: (statCode) REQUIRED str
update_strategy: (updateStrategy) REQUIRED str
value: (value) REQUIRED float
additional_data: (additionalData) OPTIONAL Dict[str, Any]
"""
# region fields
user_id: str # REQUIRED
additional_key: str # OPTIONAL
stat_code: str # REQUIRED
update_strategy: str # REQUIRED
value: float # REQUIRED
additional_data: Dict[str, Any] # OPTIONAL
# endregion fields
# region with_x methods
def with_user_id(self, value: str) -> BulkUserStatItemUpdate:
self.user_id = value
return self
def with_additional_key(self, value: str) -> BulkUserStatItemUpdate:
self.additional_key = value
return self
def with_stat_code(self, value: str) -> BulkUserStatItemUpdate:
self.stat_code = value
return self
def with_update_strategy(self, value: str) -> BulkUserStatItemUpdate:
self.update_strategy = value
return self
def with_value(self, value: float) -> BulkUserStatItemUpdate:
self.value = value
return self
def with_additional_data(self, value: Dict[str, Any]) -> BulkUserStatItemUpdate:
self.additional_data = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result = {}
if hasattr(self, "user_id") and self.user_id:
result["userId"] = str(self.user_id)
elif include_empty:
result["userId"] = str()
if hasattr(self, "additional_key") and self.additional_key:
result["additionalKey"] = str(self.additional_key)
elif include_empty:
result["additionalKey"] = str()
if hasattr(self, "stat_code") and self.stat_code:
result["statCode"] = str(self.stat_code)
elif include_empty:
result["statCode"] = str()
if hasattr(self, "update_strategy") and self.update_strategy:
result["updateStrategy"] = str(self.update_strategy)
elif include_empty:
result["updateStrategy"] = str()
if hasattr(self, "value") and self.value:
result["value"] = float(self.value)
elif include_empty:
result["value"] = float()
if hasattr(self, "additional_data") and self.additional_data:
result["additionalData"] = {str(k0): v0 for k0, v0 in self.additional_data.items()}
elif include_empty:
result["additionalData"] = {}
return result
# endregion to methods
# region static methods
@classmethod
def create(
cls,
user_id: str,
stat_code: str,
update_strategy: str,
value: float,
additional_key: Optional[str] = None,
additional_data: Optional[Dict[str, Any]] = None,
) -> BulkUserStatItemUpdate:
instance = cls()
instance.user_id = user_id
instance.stat_code = stat_code
instance.update_strategy = update_strategy
instance.value = value
if additional_key is not None:
instance.additional_key = additional_key
if additional_data is not None:
instance.additional_data = additional_data
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> BulkUserStatItemUpdate:
instance = cls()
if not dict_:
return instance
if "userId" in dict_ and dict_["userId"] is not None:
instance.user_id = str(dict_["userId"])
elif include_empty:
instance.user_id = str()
if "additionalKey" in dict_ and dict_["additionalKey"] is not None:
instance.additional_key = str(dict_["additionalKey"])
elif include_empty:
instance.additional_key = str()
if "statCode" in dict_ and dict_["statCode"] is not None:
instance.stat_code = str(dict_["statCode"])
elif include_empty:
instance.stat_code = str()
if "updateStrategy" in dict_ and dict_["updateStrategy"] is not None:
instance.update_strategy = str(dict_["updateStrategy"])
elif include_empty:
instance.update_strategy = str()
if "value" in dict_ and dict_["value"] is not None:
instance.value = float(dict_["value"])
elif include_empty:
instance.value = float()
if "additionalData" in dict_ and dict_["additionalData"] is not None:
instance.additional_data = {str(k0): v0 for k0, v0 in dict_["additionalData"].items()}
elif include_empty:
instance.additional_data = {}
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"userId": "user_id",
"additionalKey": "additional_key",
"statCode": "stat_code",
"updateStrategy": "update_strategy",
"value": "value",
"additionalData": "additional_data",
}
# endregion static methods
| 36.5 | 109 | 0.59732 |
4a1b8173d80b00b2a0a2a9f60b007879c7ad459f
| 2,139 |
py
|
Python
|
sis/terms.py
|
ryanlovett/sis-cli
|
5efe5b9344b547c3f1365ef63a0ad33ec013fcca
|
[
"Apache-2.0"
] | null | null | null |
sis/terms.py
|
ryanlovett/sis-cli
|
5efe5b9344b547c3f1365ef63a0ad33ec013fcca
|
[
"Apache-2.0"
] | null | null | null |
sis/terms.py
|
ryanlovett/sis-cli
|
5efe5b9344b547c3f1365ef63a0ad33ec013fcca
|
[
"Apache-2.0"
] | null | null | null |
# vim:set et sw=4 ts=4:
import logging
import sys
from . import sis
# logging
logging.basicConfig(stream=sys.stdout, level=logging.WARNING)
logger = logging.getLogger(__name__)
# SIS endpoint
terms_uri = "https://apis.berkeley.edu/sis/v1/terms"
async def get_term_name(app_id, app_key, term_id):
'''Given a term id, return the term's friendly name.'''
uri = f'{terms_uri}/{term_id}'
headers = {
"Accept": "application/json",
"app_id": app_id, "app_key": app_key
}
terms = await sis.get_items(uri, params, headers, 'terms')
if len(terms) == 0: # if we are between terms
return None
return terms[0]['name']
async def get_term_id(app_id, app_key, position='Current'):
'''Given a temporal position of Current, Previous, or Next, return
the corresponding term's ID.'''
uri = terms_uri
headers = {
"Accept": "application/json",
"app_id": app_id, "app_key": app_key
}
params = { "temporal-position": position }
terms = await sis.get_items(uri, params, headers, 'terms')
logger.debug(f"terms: {terms}")
if len(terms) == 0: # if we are between terms
return None
return terms[0]['id']
async def get_term_id_from_year_sem(app_id, app_key, year, semester):
'''Given a year and Berkeley semester, return the corresponding
term's ID.'''
headers = {
"Accept": "application/json",
"app_id": app_id, "app_key": app_key
}
if semester == 'spring':
mm_dd = '02-01'
elif semester == 'summer':
mm_dd = '07-01'
elif semester == 'fall':
mm_dd = '10-01'
else:
raise Exception(f"No such semester: {semester}")
params = { "as-of-date": f"{year}-{mm_dd}" }
uri = terms_uri
terms = await sis.get_items(uri, params, headers, 'terms')
return terms[0]['id']
async def normalize_term_id(app_id, app_key, term_id):
'''Convert temporal position (current, next, previous) to a numeric term id,
or passthrough a numeric term id.'''
if term_id.isalpha():
term_id = await get_term_id(app_id, app_key, term_id)
return term_id
| 31.455882 | 80 | 0.635344 |
4a1b81854597a10ae125307527d20961311fee42
| 960 |
py
|
Python
|
isi_sdk_8_2_0/test/test_groupnet_subnet_extended.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24 |
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_2_0/test/test_groupnet_subnet_extended.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46 |
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_2_0/test/test_groupnet_subnet_extended.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29 |
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 7
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_2_0
from isi_sdk_8_2_0.models.groupnet_subnet_extended import GroupnetSubnetExtended # noqa: E501
from isi_sdk_8_2_0.rest import ApiException
class TestGroupnetSubnetExtended(unittest.TestCase):
"""GroupnetSubnetExtended unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGroupnetSubnetExtended(self):
"""Test GroupnetSubnetExtended"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_2_0.models.groupnet_subnet_extended.GroupnetSubnetExtended() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.414634 | 102 | 0.722917 |
4a1b81f0e2e61e39548eb91722cdc593e4c9e1f4
| 1,395 |
py
|
Python
|
folio/urls.py
|
KieranSweeden/fol.io
|
a6f231e3f9fb96841387b04d72131470c5fc3239
|
[
"OLDAP-2.5",
"OLDAP-2.4",
"OLDAP-2.3"
] | null | null | null |
folio/urls.py
|
KieranSweeden/fol.io
|
a6f231e3f9fb96841387b04d72131470c5fc3239
|
[
"OLDAP-2.5",
"OLDAP-2.4",
"OLDAP-2.3"
] | null | null | null |
folio/urls.py
|
KieranSweeden/fol.io
|
a6f231e3f9fb96841387b04d72131470c5fc3239
|
[
"OLDAP-2.5",
"OLDAP-2.4",
"OLDAP-2.3"
] | null | null | null |
"""folio URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('allauth.urls')),
path('', include('home.urls')),
path('library/', include('library.urls')),
path('suite/', include('suite.urls')),
path('license/', include('license.urls')),
path('account/', include('account.urls')),
path('showcase/', include('showcase.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
handler400 = 'folio.views.bad_request_400'
handler403 = 'folio.views.user_forbidden_403'
handler404 = 'folio.views.page_not_found_404'
handler500 = 'folio.views.server_error_500'
| 36.710526 | 77 | 0.711828 |
4a1b822d02b7891aabc174ef83fe38a5ec6ba4ec
| 3,322 |
py
|
Python
|
KNN/mnistparser.py
|
ab3llini/MachineLearning
|
2ca8529782ed800a6463e35509e646d094dc42a1
|
[
"MIT"
] | null | null | null |
KNN/mnistparser.py
|
ab3llini/MachineLearning
|
2ca8529782ed800a6463e35509e646d094dc42a1
|
[
"MIT"
] | null | null | null |
KNN/mnistparser.py
|
ab3llini/MachineLearning
|
2ca8529782ed800a6463e35509e646d094dc42a1
|
[
"MIT"
] | null | null | null |
from scipy.io import loadmat
import random
import sys
import math
import numpy as np
# This class parses the mnist dataset and returns x and y vectors for training
class MnistParser:
def __init__(self, target=0):
# Load the matlab file
self.df = loadmat('mnist.mat')['mnist'][0][0]
# Convert data from uint8 to int32 to avoid overflows and unexpected behaviour during computation
self.tr_images = np.array(self.df[target]).astype(np.int32)
self.tr_labels = np.array(self.df[target + 2]).astype(np.int32)
# Returns an image at index idx
def fetch_img(self, idx):
return self.tr_images[:, :, idx], self.tr_labels[idx]
# Select size samples from the dataset, shuffle is turned on by default
def select(self, seed=None, shuffle=True, size=1000):
if seed is not None:
random.seed(seed)
x, y = [], []
# Size of the dataset
ltr = len(self.tr_labels)
# Used as a helper to shuffle the images preserving proper label
container = []
# Fetch all the images
for idx in range(ltr):
img, label = self.fetch_img(idx)
container.append([img, label])
# If wanted, shuffle
if shuffle:
random.shuffle(container)
# Select randomly a subset of container and use it as training set
start_idx = random.randint(0, ltr - 1)
# Method used to select a portion of a data frame, given start and end index
x, y = self._subsplit(container, start_idx, size)
return x, y
# This method creates and returns k folds for the x and y vectors
def k_fold(self, x, y, k):
size = len(y)
fold_size = math.floor(size / k)
# Depending on size and k, we might need to account even for non equal folds
rest = size % k
x_folds = []
y_folds = []
# Creates and append elements to each fold
for i in range(k):
idx = i * fold_size
if i == k - 1:
x_folds.append(x[idx:idx + fold_size + rest])
y_folds.append(y[idx:idx + fold_size + rest])
else:
x_folds.append(x[idx:idx + fold_size])
y_folds.append(y[idx:idx + fold_size])
return x_folds, y_folds
@staticmethod
def _subsplit(data, start, size):
# Containers for x and y (either train, validation or test)
x, y = [], []
# Starting index
idx = start
data_size = len(data)
# Counter to keep track of how many samples have been added to the list
added = 0
while added < size:
if idx == data_size:
idx = 0
# This is the current sample we are analyzing
sample = data[idx]
x.append(sample[0])
y.append(sample[1][0])
# Increment both counter and index
added += 1
idx += 1
return x, y
# Aux method used for debug to print an image on console
def print_img_repr(self, img):
for r in img:
for c in r:
if c > 0:
sys.stdout.write("*")
else:
sys.stdout.write("-")
sys.stdout.write("\n")
| 28.152542 | 105 | 0.562914 |
4a1b830a324209f7abbb9bebca002c79d2c0da4e
| 5,803 |
py
|
Python
|
homeassistant/components/opencv/image_processing.py
|
itewk/home-assistant
|
769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4
|
[
"Apache-2.0"
] | 23 |
2017-11-15T21:03:53.000Z
|
2021-03-29T21:33:48.000Z
|
homeassistant/components/opencv/image_processing.py
|
itewk/home-assistant
|
769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4
|
[
"Apache-2.0"
] | 9 |
2022-01-27T06:32:10.000Z
|
2022-03-31T07:07:51.000Z
|
homeassistant/components/opencv/image_processing.py
|
itewk/home-assistant
|
769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4
|
[
"Apache-2.0"
] | 10 |
2018-01-01T00:12:51.000Z
|
2021-12-21T23:08:05.000Z
|
"""Support for OpenCV classification on images."""
from datetime import timedelta
import logging
import numpy
import requests
import voluptuous as vol
from homeassistant.components.image_processing import (
CONF_ENTITY_ID,
CONF_NAME,
CONF_SOURCE,
PLATFORM_SCHEMA,
ImageProcessingEntity,
)
from homeassistant.core import split_entity_id
import homeassistant.helpers.config_validation as cv
try:
# Verify that the OpenCV python package is pre-installed
import cv2
CV2_IMPORTED = True
except ImportError:
CV2_IMPORTED = False
_LOGGER = logging.getLogger(__name__)
ATTR_MATCHES = "matches"
ATTR_TOTAL_MATCHES = "total_matches"
CASCADE_URL = (
"https://raw.githubusercontent.com/opencv/opencv/master/data/"
+ "lbpcascades/lbpcascade_frontalface.xml"
)
CONF_CLASSIFIER = "classifier"
CONF_FILE = "file"
CONF_MIN_SIZE = "min_size"
CONF_NEIGHBORS = "neighbors"
CONF_SCALE = "scale"
DEFAULT_CLASSIFIER_PATH = "lbp_frontalface.xml"
DEFAULT_MIN_SIZE = (30, 30)
DEFAULT_NEIGHBORS = 4
DEFAULT_SCALE = 1.1
DEFAULT_TIMEOUT = 10
SCAN_INTERVAL = timedelta(seconds=2)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_CLASSIFIER): {
cv.string: vol.Any(
cv.isfile,
vol.Schema(
{
vol.Required(CONF_FILE): cv.isfile,
vol.Optional(CONF_SCALE, DEFAULT_SCALE): float,
vol.Optional(
CONF_NEIGHBORS, DEFAULT_NEIGHBORS
): cv.positive_int,
vol.Optional(CONF_MIN_SIZE, DEFAULT_MIN_SIZE): vol.Schema(
(int, int)
),
}
),
)
}
}
)
def _create_processor_from_config(hass, camera_entity, config):
"""Create an OpenCV processor from configuration."""
classifier_config = config.get(CONF_CLASSIFIER)
name = "{} {}".format(
config[CONF_NAME], split_entity_id(camera_entity)[1].replace("_", " ")
)
processor = OpenCVImageProcessor(hass, camera_entity, name, classifier_config)
return processor
def _get_default_classifier(dest_path):
"""Download the default OpenCV classifier."""
_LOGGER.info("Downloading default classifier")
req = requests.get(CASCADE_URL, stream=True)
with open(dest_path, "wb") as fil:
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
fil.write(chunk)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the OpenCV image processing platform."""
if not CV2_IMPORTED:
_LOGGER.error(
"No OpenCV library found! Install or compile for your system "
"following instructions here: http://opencv.org/releases.html"
)
return
entities = []
if CONF_CLASSIFIER not in config:
dest_path = hass.config.path(DEFAULT_CLASSIFIER_PATH)
_get_default_classifier(dest_path)
config[CONF_CLASSIFIER] = {"Face": dest_path}
for camera in config[CONF_SOURCE]:
entities.append(
OpenCVImageProcessor(
hass,
camera[CONF_ENTITY_ID],
camera.get(CONF_NAME),
config[CONF_CLASSIFIER],
)
)
add_entities(entities)
class OpenCVImageProcessor(ImageProcessingEntity):
"""Representation of an OpenCV image processor."""
def __init__(self, hass, camera_entity, name, classifiers):
"""Initialize the OpenCV entity."""
self.hass = hass
self._camera_entity = camera_entity
if name:
self._name = name
else:
self._name = "OpenCV {0}".format(split_entity_id(camera_entity)[1])
self._classifiers = classifiers
self._matches = {}
self._total_matches = 0
self._last_image = None
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return self._camera_entity
@property
def name(self):
"""Return the name of the image processor."""
return self._name
@property
def state(self):
"""Return the state of the entity."""
return self._total_matches
@property
def state_attributes(self):
"""Return device specific state attributes."""
return {ATTR_MATCHES: self._matches, ATTR_TOTAL_MATCHES: self._total_matches}
def process_image(self, image):
"""Process the image."""
cv_image = cv2.imdecode(numpy.asarray(bytearray(image)), cv2.IMREAD_UNCHANGED)
for name, classifier in self._classifiers.items():
scale = DEFAULT_SCALE
neighbors = DEFAULT_NEIGHBORS
min_size = DEFAULT_MIN_SIZE
if isinstance(classifier, dict):
path = classifier[CONF_FILE]
scale = classifier.get(CONF_SCALE, scale)
neighbors = classifier.get(CONF_NEIGHBORS, neighbors)
min_size = classifier.get(CONF_MIN_SIZE, min_size)
else:
path = classifier
cascade = cv2.CascadeClassifier(path)
detections = cascade.detectMultiScale(
cv_image, scaleFactor=scale, minNeighbors=neighbors, minSize=min_size
)
matches = {}
total_matches = 0
regions = []
# pylint: disable=invalid-name
for (x, y, w, h) in detections:
regions.append((int(x), int(y), int(w), int(h)))
total_matches += 1
matches[name] = regions
self._matches = matches
self._total_matches = total_matches
| 29.912371 | 86 | 0.619335 |
4a1b830fc255c9c999aec0f224cbc85e4e04ebe7
| 397 |
py
|
Python
|
venv/Scripts/pip-script.py
|
libingluan/UI-test
|
3deba2931203aa8bfc564f6a162e82b6649e6606
|
[
"MIT"
] | null | null | null |
venv/Scripts/pip-script.py
|
libingluan/UI-test
|
3deba2931203aa8bfc564f6a162e82b6649e6606
|
[
"MIT"
] | null | null | null |
venv/Scripts/pip-script.py
|
libingluan/UI-test
|
3deba2931203aa8bfc564f6a162e82b6649e6606
|
[
"MIT"
] | null | null | null |
#!F:\Python\engage-ui-test\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| 30.538462 | 69 | 0.657431 |
4a1b83baef9aae8296c8a804e89e74606272081c
| 562 |
py
|
Python
|
manage.py
|
bartongroup/slivka-bio
|
049aee943503963ce5c9b14267fe001edd8e0125
|
[
"Apache-2.0"
] | null | null | null |
manage.py
|
bartongroup/slivka-bio
|
049aee943503963ce5c9b14267fe001edd8e0125
|
[
"Apache-2.0"
] | 3 |
2021-09-01T16:47:02.000Z
|
2022-02-09T09:01:31.000Z
|
manage.py
|
bartongroup/slivka-bio
|
049aee943503963ce5c9b14267fe001edd8e0125
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import os
import sys
home = os.path.dirname(os.path.abspath(__file__))
if __name__ == "__main__":
home = os.environ.get('SLIVKA_HOME', home)
os.environ.setdefault('SLIVKA_HOME', home)
sys.path.append(home)
try:
import slivka.cli
except ImportError:
raise ImportError(
"Couldn't import slivka. Make sure it's installed corectly "
"and available on you PYTHONPATH environment variable. "
"Check if you activated virtual environment."
)
slivka.cli.main()
| 26.761905 | 72 | 0.649466 |
4a1b847094437e89e761dd4222f6ccc6a83dca94
| 1,795 |
py
|
Python
|
fortnite.py
|
laynebergstrom5505/TechnoAyanBOT
|
684901d2754ecf187cd7b419f7409c469f84ea66
|
[
"MIT"
] | 3 |
2020-06-05T21:21:55.000Z
|
2020-06-06T19:45:17.000Z
|
fortnite.py
|
laynebergstrom5505/TechnoAyanBOT
|
684901d2754ecf187cd7b419f7409c469f84ea66
|
[
"MIT"
] | null | null | null |
fortnite.py
|
laynebergstrom5505/TechnoAyanBOT
|
684901d2754ecf187cd7b419f7409c469f84ea66
|
[
"MIT"
] | 1 |
2021-01-09T11:32:05.000Z
|
2021-01-09T11:32:05.000Z
|
"""Emoji
Available Commands:
.isro
built by @r4v4n4 , isse bhi loot lo betichod"""
from telethon import events
import asyncio
from uniborg.util import admin_cmd
@borg.on(admin_cmd(pattern=r"fortnite"))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 5)
#input_str = event.pattern_match.group(1)
# if input_str == "isro":
await event.edit("Connecting..")
animation_chars = [
"⠀⠀⠀⣶⣿⣶
⠀⠀⠀⣿⣿⣿⣀
⠀⣀⣿⣿⣿⣿⣿⣿
⣶⣿⠛⣭⣿⣿⣿⣿
⠛⠛⠛⣿⣿⣿⣿⠿
⠀⠀⠀⠀⣿⣿⣿
⠀⠀⣀⣭⣿⣿⣿⣿⣀
⠀⠤⣿⣿⣿⣿⣿⣿⠉
⠀⣿⣿⣿⣿⣿⣿⠉
⣿⣿⣿⣿⣿⣿
⣿⣿⣶⣿⣿
⠉⠛⣿⣿⣶⣤
⠀⠀⠉⠿⣿⣿⣤
⠀⠀⣀⣤⣿⣿⣿
⠀⠒⠿⠛⠉⠿⣿
⠀⠀⠀⠀⠀⣀⣿⣿
⠀⠀⠀⠀⣶⠿⠿⠛",
"⠀⠀⠀⠀⠀⠀⠀⠀⠀⣤⣤
⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⣿
⠀⠀⣶⠀⠀⣀⣤⣶⣤⣉⣿⣿⣤⣀
⠤⣤⣿⣤⣿⠿⠿⣿⣿⣿⣿⣿⣿⣿⣿⣀
⠀⠛⠿⠀⠀⠀⠀⠉⣿⣿⣿⣿⣿⠉⠛⠿⣿⣤
⠀⠀⠀⠀⠀⠀⠀⠀⠿⣿⣿⣿⠛⠀⠀⠀⣶⠿
⠀⠀⠀⠀⠀⠀⠀⠀⣀⣿⣿⣿⣿⣤⠀⣿⠿
⠀⠀⠀⠀⠀⠀⠀⣶⣿⣿⣿⣿⣿⣿⣿⣿
⠀⠀⠀⠀⠀⠀⠀⠿⣿⣿⣿⣿⣿⠿⠉⠉
⠀⠀⠀⠀⠀⠀⠀⠉⣿⣿⣿⣿⠿
⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⣿⠉
⠀⠀⠀⠀⠀⠀⠀⠀⣛⣿⣭⣶⣀
⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⣿⣿⣿
⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⠉⠛⣿
⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⠀⠀⣿⣿
⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣉⠀⣶⠿
⠀⠀⠀⠀⠀⠀⠀⠀⣶⣿⠿
⠀⠀⠀⠀⠀⠀⠀⠛⠿⠛",
"⠀⠀⠀⠀⠀⠀⠀⠀⠀⣤⣶
⠀⠀⠀⠀⠀⣀⣀⠀⣶⣿⣿⠶
⣶⣿⠿⣿⣿⣿⣿⣿⣿⣿⣿⣤⣤
⠀⠉⠶⣶⣀⣿⣿⣿⣿⣿⣿⣿⠿⣿⣤⣀
⠀⠀⠀⣿⣿⠿⠉⣿⣿⣿⣿⣭⠀⠶⠿⠿
⠀⠀⠛⠛⠿⠀⠀⣿⣿⣿⣉⠿⣿⠶
⠀⠀⠀⠀⠀⣤⣶⣿⣿⣿⣿⣿
⠀⠀⠀⠀⠀⣿⣿⣿⣿⣿⣿⣿⠒
⠀⠀⠀⠀⣀⣿⣿⣿⣿⣿⣿⣿
⠀⠀⠀⠀⠀⣿⣿⣿⠛⣭⣭⠉
⠀⠀⠀⠀⠀⣿⣿⣭⣤⣿⠛
⠀⠀⠀⠀⠀⠛⠿⣿⣿⣿⣭
⠀⠀⠀⠀⠀⠀⠀⣿⣿⠉⠛⠿⣶⣤
⠀⠀⠀⠀⠀⠀⣀⣿⠀⠀⣶⣶⠿⠿⠿
⠀⠀⠀⠀⠀⠀⣿⠛
⠀⠀⠀⠀⠀⠀⣭⣶",
"⠀⠀⠀⠀⠀⠀⣶⣿⣶
⠀⠀⠀⣤⣤⣤⣿⣿⣿
⠀⠀⣶⣿⣿⣿⣿⣿⣿⣿⣶
⠀⠀⣿⣿⣿⣿⣿⣿⣿⣿⣿
⠀⠀⣿⣉⣿⣿⣿⣿⣉⠉⣿⣶
⠀⠀⣿⣿⣿⣿⣿⣿⣿⣿⠿⣿
⠀⣤⣿⣿⣿⣿⣿⣿⣿⠿⠀⣿⣶
⣤⣿⠿⣿⣿⣿⣿⣿⠿⠀⠀⣿⣿⣤
⠉⠉⠀⣿⣿⣿⣿⣿⠀⠀⠒⠛⠿⠿⠿
⠀⠀⠀⠉⣿⣿⣿⠀⠀⠀⠀⠀⠀⠉
⠀⠀⠀⣿⣿⣿⣿⣿⣶
⠀⠀⠀⠀⣿⠉⠿⣿⣿
⠀⠀⠀⠀⣿⣤⠀⠛⣿⣿
⠀⠀⠀⠀⣶⣿⠀⠀⠀⣿⣶
⠀⠀⠀⠀⠀⠀⠀⠀⠀⣭⣿⣿
⠀⠀⠀⠀⠀⠀⠀⠀⣤⣿⣿⠉",
"⠀⠀⠀⠀⣀
⠀⠀⣶⣿⠿⠀⠀⠀⣀⠀⣤⣤
⠀⣶⣿⠀⠀⠀⠀⣿⣿⣿⠛⠛⠿⣤⣀
⣶⣿⣤⣤⣤⣤⣤⣿⣿⣿⣀⣤⣶⣭⣿⣶⣀
⠉⠉⠉⠛⠛⠿⣿⣿⣿⣿⣿⣿⣿⠛⠛⠿⠿
⠀⠀⠀⠀⠀⠀⠀⣿⣿⣿⣿⣿⠿
⠀⠀⠀⠀⠀⠀⠀⠿⣿⣿⣿⣿
⠀⠀⠀⠀⠀⠀⠀⠀⣭⣿⣿⣿⣿⣿
⠀⠀⠀⠀⠀⠀⠀⣤⣿⣿⣿⣿⣿⣿
⠀⠀⠀⠀⠀⠀⠀⣿⣿⣿⣿⣿⣿⠿
⠀⠀⠀⠀⠀⠀⠀⣿⣿⣿⣿⣿⠿
⠀⠀⠀⠀⠀⠀⠀⣿⣿⣿⣿⣿
⠀⠀⠀⠀⠀⠀⠀⠉⣿⣿⣿⣿
⠀⠀⠀⠀⠀⠀⠀⠀⠉⣿⣿⣿⣿
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠉⣿⠛⠿⣿⣤
⠀⠀⠀⠀⠀⠀⠀⠀⠀⣀⣿⠀⠀⠀⣿⣿⣤
⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⠀⠀⠀⣶⣿⠛⠉
⠀⠀⠀⠀⠀⠀⠀⠀⣤⣿⣿⠀⠀⠉
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠉",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 5])
| 13.496241 | 48 | 0.22507 |
4a1b85e0ae71dd959553748c119218db7fc5350a
| 6,622 |
py
|
Python
|
src/gerenciador_de_trajetos.py
|
denielfer/Malha-aeria---pbl-conectvidade-problema3
|
55fe417e797b2ecadb6dcf971191d328aaf9582a
|
[
"MIT"
] | null | null | null |
src/gerenciador_de_trajetos.py
|
denielfer/Malha-aeria---pbl-conectvidade-problema3
|
55fe417e797b2ecadb6dcf971191d328aaf9582a
|
[
"MIT"
] | null | null | null |
src/gerenciador_de_trajetos.py
|
denielfer/Malha-aeria---pbl-conectvidade-problema3
|
55fe417e797b2ecadb6dcf971191d328aaf9582a
|
[
"MIT"
] | null | null | null |
from trecho import Trecho
class Gerenciador_de_trajetos():
def __init__(self, trechos:list[Trecho]):
self.trechos = trechos
self.trajetos = {trecho.saida:{} for trecho in trechos}
for i, trecho in enumerate(trechos):
if(trecho.destino not in self.trajetos[trecho.saida]):
self.trajetos[trecho.saida][trecho.destino] = []
self.trajetos[trecho.saida][trecho.destino].append({
'companhia': trecho.empresa,
'index': i,
'opção': len(self.trajetos[trecho.saida][trecho.destino]) + 1,
'custo': trecho.custo,
'tempo': trecho.tempo,
'vagas_totais': trecho.quantidade_maxima_de_vagas})
def find_from(self, saida:str) -> dict:
'''
Função para achar cidades conectadas as cidade informada ( {saida} )
@param saida: string inidicando de qual cidade queremos saber
@return dict contendo informações das cidades conectadas e informações do voo que as conecta
( neste dicionario temos como chave o nome da cidade conectada e o seu dado
correspondente é uma lista de dados de voos que conectam estas cidades )
'''
if (saida in self.trajetos):
return self.trajetos[saida]
return None
def find_from_to(self, saida:str, destino:str) -> list[dict]:
'''
Função para achar os voos que conectam cidade {saida} a {destino}
@param saida: string inidicando de qual cidade queremos saber
@param destino: string indicando para qual cidade queremos
@return list de dict contendo informações dos voos que conectam estas cidades
'''
if (saida in self.trajetos):
if(destino in self.trajetos[saida]):
return self.trajetos[saida][destino]
return None
def make_all_trajetos(self, saida:str, destino:str):
'''
Função que faz a busca de todos os trajetos que ligam {saida} a {destino}
@param saida: string indiciando a cidade de saida
@parma destino: string indicando a cidade de destino
@return: tupla contendo (success,resultado_da_busca) no qual
success é um bool que guarda True se a existe caminhos em
resultado_da_busca
Resultado_da_busca é uma list de trajetos, um trajeto
é uma lista de nome de cidades na ordem que leva de
{saida} para {destino}
'''
resultado = []
success = False
search = {f'_{saida}':[saida]}
while search != {}: # emquanto a busca nao acaba
next_iteraration = {}
for id in search:
cidade = id.split('_')[1]
if(cidade in self.trajetos):
vizinhos = search[id]
for cidade_conectada in self.trajetos[cidade]:
if(cidade_conectada not in vizinhos):
if cidade_conectada == destino:
resultado.append(vizinhos + [cidade_conectada])
success = True
else:
next_iteraration[f'{cidade}_{cidade_conectada}'] = vizinhos + [cidade_conectada] #é usado {cidade}_{cidade_conectada} pois como usamos um dicionario pra guardar as buscas em processo ainda caso hajam em um mesmo nivel 2 cidades que tenham conecção com uma terceira cidade uma das 2 cidades teria sua busca apagada
search = next_iteraration
return success, resultado
def get_vagas(self, index:int):
'''
informa a quantidade de vagas disponiveis do trecho especificado
@param index: int indicando o index do voo solicitado na lista de voos
@return int indicando a quantidade de vagas resmanecentes do voo solicitado
'''
trecho = self.trechos[index]
return trecho.quantidade_maxima_de_vagas - trecho.quantidade_de_vagas_ocupadas
def add_voo(self, voo:Trecho):
'''
Função para adiciona um voo no gerenciador
@param voo: Trecho que sera adicionado
'''
self.trechos.append(voo)
if(voo.saida not in self.trajetos):
self.trajetos[voo.saida] = {}
if(voo.destino not in self.trajetos[voo.saida]):
self.trajetos[voo.saida][voo.destino] = []
self.trajetos[voo.saida][voo.destino].append({
'companhia': voo.empresa,
'index': len(self.trechos) - 1,
'opção': len(self.trajetos[voo.saida][voo.destino]) + 1,
'custo': voo.custo,
'tempo': voo.tempo,
'vagas_totais': voo.quantidade_maxima_de_vagas})
if(__name__=='__main__'):
saidas = 'asdfasaeqfef'
destinos ='sdqefasdsqwd'
trechos = []
for i in range(len(saidas)):
trechos.append(Trecho(saida = saidas[i], destino = destinos[i], custo = 1, tempo = 1, empresa = 'A', quantidade_maxima_de_vagas = 10))
gerenciador = Gerenciador_de_trajetos(trechos)
# print(f"Esperado :(True, [['a', 's', 'd'], ['a', 'f', 'd'], ['a', 'f', 'e', 'd'], ['a', 'f', 'q', 's', 'd']]) ->")
print(f"Resultado busca de 'a' para 'd':{gerenciador.make_all_trajetos('a','d')}")
assert (True, [['a', 's', 'd'], ['a', 'f', 'd'], ['a', 'f', 'e', 'd'], ['a', 'f', 'q', 's', 'd']]) == gerenciador.make_all_trajetos('a','d')
# print('passed test: 1')
# print(f"Esperado :(True, [['f', 'q', 's', 'a'], ['f', 'e', 'd', 'q', 's', 'a'], ['f', 'e', 'd', 'q', 's', 'a']]) -> ")
print(f"Resultado busca de 's' para 'a':{gerenciador.make_all_trajetos('f','a')}")
assert (True, [['f', 'q', 's', 'a'], ['f', 'd', 'q', 's', 'a'], ['f', 'e', 'd', 'q', 's', 'a']]) == gerenciador.make_all_trajetos('f','a')
# print('passed test: 2')
# del(trechos[8])
# print(f"Esperado :(True, [['a', 's', 'd'], ['a', 'f', 'd'], ['a', 'f', 'e', 'd'], ['a', 'f', 'q', 's', 'd']]) -> ")
print(f"Resultado busca de 'a' para 'q':{gerenciador.make_all_trajetos('a','q')}")
assert (True, [['a', 'f', 'q'], ['a', 's', 'd', 'q'], ['a', 'f', 'd', 'q'], ['a', 'f', 'e', 'd', 'q']]) == gerenciador.make_all_trajetos('a','q')
# print('passed test: 3')
# print(f"Esperado :(False, []) -> ")
print(f"Resultado busca de 'w' para 'a':{gerenciador.make_all_trajetos('w','a')}")
assert (False, []) == gerenciador.make_all_trajetos('w','a')
# print('passed test: 4')
| 50.938462 | 345 | 0.561915 |
4a1b86a97e14ddaa5b69880886592f618d3b3342
| 8,707 |
py
|
Python
|
seleniumwire/proxy/client.py
|
nck/selenium-wire
|
be2cca0dc556ebf84daac84a3a0315378f871f48
|
[
"MIT"
] | 5 |
2020-03-11T06:08:08.000Z
|
2021-09-22T02:46:51.000Z
|
seleniumwire/proxy/client.py
|
nck/selenium-wire
|
be2cca0dc556ebf84daac84a3a0315378f871f48
|
[
"MIT"
] | 3 |
2020-08-18T09:35:55.000Z
|
2021-11-15T17:49:55.000Z
|
seleniumwire/proxy/client.py
|
nck/selenium-wire
|
be2cca0dc556ebf84daac84a3a0315378f871f48
|
[
"MIT"
] | 1 |
2021-03-04T08:39:21.000Z
|
2021-03-04T08:39:21.000Z
|
import http.client
import json
import logging
import threading
from urllib.parse import quote_plus
from .handler import ADMIN_PATH, CaptureRequestHandler, create_custom_capture_request_handler
from .server import ProxyHTTPServer
log = logging.getLogger(__name__)
class AdminClient:
"""Provides an API for sending commands to a remote proxy server."""
def __init__(self, proxy_mgr_addr=None, proxy_mgr_port=None):
# The address of the proxy manager if set
self._proxy_mgr_addr = proxy_mgr_addr
self._proxy_mgr_port = proxy_mgr_port
# Reference to a created proxy instance and its address/port
self._proxy = None
self._proxy_addr = None
self._proxy_port = None
self._capture_request_handler = None
def create_proxy(self, addr='127.0.0.1', port=0, proxy_config=None, options=None):
"""Creates a new proxy server and returns the address and port number that the
server was started on.
Args:
addr: The address the proxy server will listen on. Default 127.0.0.1.
port: The port the proxy server will listen on. Default 0 - which means
use the first available port.
proxy_config: The configuration for any upstream proxy server. Default
is None.
options: Additional options to configure the proxy.
Returns:
A tuple of the address and port number of the created proxy server.
"""
if self._proxy_mgr_addr is not None and self._proxy_mgr_port is not None:
# TODO: ask the proxy manager to create a proxy and return that
pass
if options is None:
options = {}
custom_response_handler = options.get('custom_response_handler')
if custom_response_handler is not None:
self._capture_request_handler = create_custom_capture_request_handler(custom_response_handler)
else:
self._capture_request_handler = CaptureRequestHandler
self._capture_request_handler.protocol_version = 'HTTP/1.1'
self._capture_request_handler.timeout = options.get('connection_timeout', 5)
self._proxy = ProxyHTTPServer((addr, port), self._capture_request_handler,
proxy_config=proxy_config, options=options)
t = threading.Thread(name='Selenium Wire Proxy Server', target=self._proxy.serve_forever)
t.daemon = not options.get('standalone')
t.start()
socketname = self._proxy.socket.getsockname()
self._proxy_addr = socketname[0]
self._proxy_port = socketname[1]
log.info('Created proxy listening on {}:{}'.format(self._proxy_addr, self._proxy_port))
return self._proxy_addr, self._proxy_port
def destroy_proxy(self):
"""Stops the proxy server and performs any clean up actions."""
log.info('Destroying proxy')
# If proxy manager set, we would ask it to do this
self._proxy.shutdown()
self._proxy.server_close() # Closes the server socket
def get_requests(self):
"""Returns the requests currently captured by the proxy server.
The data is returned as a list of dictionaries in the format:
[{
'id': 'request id',
'method': 'GET',
'path': 'http://www.example.com/some/path',
'headers': {
'Accept': '*/*',
'Host': 'www.example.com'
},
'response': {
'status_code': 200,
'reason': 'OK',
'headers': {
'Content-Type': 'text/plain',
'Content-Length': '15012'
}
}
}, ...]
Note that the value of the 'response' key may be None where no response
is associated with a given request.
Returns:
A list of request dictionaries.
"""
return self._make_request('GET', '/requests')
def get_last_request(self):
"""Returns the last request captured by the proxy server.
This is more efficient than running get_requests()[-1]
Returns:
The last request as a dictionary or None if no requests have been
made.
"""
return self._make_request('GET', '/last_request')
def clear_requests(self):
"""Clears any previously captured requests from the proxy server."""
self._make_request('DELETE', '/requests')
def find(self, path):
"""Find the first request that contains the specified path.
Requests are searched in chronological order.
Args:
path: The request path which can be any part of the request URL.
"""
return self._make_request('GET', '/find?path={}'.format(quote_plus(str(path))))
def get_request_body(self, request_id):
"""Returns the body of the request with the specified request_id.
Args:
request_id: The request identifier.
Returns:
The binary request body, or None if the request has no body.
"""
return self._make_request('GET', '/request_body?request_id={}'.format(request_id)) or None
def get_response_body(self, request_id):
"""Returns the body of the response associated with the request with the
specified request_id.
Args:
request_id: The request identifier.
Returns:
The binary response body, or None if the response has no body.
"""
return self._make_request('GET', '/response_body?request_id={}'.format(request_id)) or None
def set_header_overrides(self, headers):
"""Sets the header overrides.
Args:
headers: A dictionary of headers to be used as overrides. Where the value
of a header is set to None, this header will be filtered out.
"""
self._make_request('POST', '/header_overrides', data=headers)
def clear_header_overrides(self):
"""Clears any previously set header overrides."""
self._make_request('DELETE', '/header_overrides')
def get_header_overrides(self):
"""Gets any previously set header overrides"""
return self._make_request('GET', '/header_overrides')
def set_rewrite_rules(self, rewrite_rules):
"""Sets the rewrite rules.
Args:
rewrite_rules: A list of rewrite rules. Each rule is a sublist (or 2-tuple)
containing the pattern and replacement.
"""
self._make_request('POST', '/rewrite_rules', data=rewrite_rules)
def clear_rewrite_rules(self):
"""Clears any previously set rewrite rules."""
self._make_request('DELETE', '/rewrite_rules')
def get_rewrite_rules(self):
"""Gets any previously set rewrite rules"""
return self._make_request('GET', '/rewrite_rules')
def set_scopes(self, scopes):
"""Sets the scopes for the seleniumwire to log/modify request and response.
Args:
scopes: a regex string or list of regex string.
"""
self._make_request('POST', '/scopes', data=scopes)
def reset_scopes(self):
"""Reset scopes to let proxy capture all requests."""
self._make_request('DELETE', '/scopes')
def get_scopes(self):
"""Gets any previously set scopes"""
return self._make_request('GET', '/scopes')
def _make_request(self, command, path, data=None):
url = '{}{}'.format(ADMIN_PATH, path)
conn = http.client.HTTPConnection(self._proxy_addr, self._proxy_port)
args = {}
if data is not None:
args['body'] = json.dumps(data).encode('utf-8')
conn.request(command, url, **args)
try:
response = conn.getresponse()
if response.status != 200:
raise ProxyException('Proxy returned status code {} for {}'.format(response.status, url))
data = response.read()
try:
if response.getheader('Content-Type') == 'application/json':
data = json.loads(data.decode(encoding='utf-8'))
except (UnicodeDecodeError, ValueError):
pass
return data
except ProxyException:
raise
except Exception as e:
raise ProxyException('Unable to retrieve data from proxy: {}'.format(e))
finally:
try:
conn.close()
except ConnectionError:
pass
class ProxyException(Exception):
"""Raised when there is a problem communicating with the proxy server."""
| 36.279167 | 106 | 0.618008 |
4a1b88ce65de32150bf2e8ceef8c499e16db5c81
| 14,246 |
py
|
Python
|
mutations/__init__.py
|
jkobject/JKBio
|
b25c0e3fb28a5088aacfa487b7500b786762dd7d
|
[
"Apache-2.0"
] | 1 |
2021-04-06T18:04:51.000Z
|
2021-04-06T18:04:51.000Z
|
mutations/__init__.py
|
jkobject/JKBio
|
b25c0e3fb28a5088aacfa487b7500b786762dd7d
|
[
"Apache-2.0"
] | 3 |
2021-01-08T15:09:32.000Z
|
2021-02-02T13:23:37.000Z
|
mutations/__init__.py
|
jkobject/JKBio
|
b25c0e3fb28a5088aacfa487b7500b786762dd7d
|
[
"Apache-2.0"
] | 2 |
2021-01-08T15:01:45.000Z
|
2021-11-12T18:30:22.000Z
|
# Jeremie Kalfon
# for BroadInsitute
# in 2019
from __future__ import print_function
import pandas as pd
import numpy as np
from JKBio.utils import helper as h
import gzip
import seaborn as sns
from taigapy import TaigaClient
tc = TaigaClient()
def vcf_to_df(path, hasfilter=False, samples=['sample'], additional_cols=[]):
"""
transforms a vcf file into a dataframe file as best as it can
Args:
-----
path: str filepath to the vcf file
hasfilter: bool whether or not the vcf has a filter column
samples: list[str] colnames of the sample names.
additional_cols: list[str] of additional colnames in the vcf already looks for 'DB', 'SOMATIC', 'GERMLINE', "OVERLAP", "IN_PON", "STR", "ReverseComplementedAlleles"
Returns:
--------
a dataframe fo the vcf
a dict associating each column with its description (gathered from the vcf header)
"""
uniqueargs = ['DB', 'SOMATIC', 'GERMLINE', "OVERLAP", "IN_PON",
"STR", "ReverseComplementedAlleles"] + additional_cols
def read_comments(f):
fields = {}
description = {}
for l in f:
l = l.decode("utf-8") if type(l) is not str else l
if l.startswith('##'):
if 'FORMAT' in l[:20]:
res = l.split('ID=')[1].split(',')[0]
desc = l.split('Description=')[1][:-2]
description.update({res: desc})
if 'INFO' in l[:20]:
res = l.split('ID=')[1].split(',')[0]
desc = l.split('Description=')[1][:-2]
description.update({res: desc})
fields.update({res: []})
else:
break
return fields, description
if path.endswith('.gz'):
with gzip.open(path, 'r') as f:
fields, description = read_comments(f)
else:
with open(path, 'r') as f:
fields, description = read_comments(f)
names = ['chr', 'pos', 'id', 'ref', 'alt', 'qual']
names += ['filter'] if hasfilter else ['strand']
names += ['data', 'format'] + samples
a = pd.read_csv(path, sep='\t', comment="#", header=None,
names=names, index_col=False)
print(description)
try:
for j, val in enumerate(a.data.str.split(';').values.tolist()):
res = dict([(v, True) if v in uniqueargs else tuple(
v.split('=')) for v in val])
for k in fields.keys():
fields[k].append(res.get(k, None))
except ValueError:
print(val)
raise ValueError('unknown field')
a = pd.concat([a.drop(columns='data'), pd.DataFrame(
data=fields, index=a.index)], axis=1)
for sample in samples:
sorting = a.format[0].split(':')
res = a[sample].str.split(':').values.tolist()
maxcols = max([len(v) for v in res])
if maxcols - len(sorting) > 0:
for i in range(maxcols - len(sorting)):
sorting.append(sorting[-1] + '_' + str(i + 1))
if len(samples) > 1:
sorting = [sample + '_' + v for v in sorting]
a = pd.concat([a.drop(columns=sample), pd.DataFrame(
data=res, columns=sorting, index=a.index)], axis=1)
return a.drop(columns='format'), description
def mafToMat(maf, boolify=False, freqcol='tumor_f', samplesCol="DepMap_ID", mutNameCol="Hugo_Symbol"):
"""
turns a maf file into a matrix of mutations x samples (works with multiple sample file)
Args:
-----
maf: dataframe of the maf file
sample_col: str colname for samples
boolify: bool whether or not to convert the matrix into a boolean (mut/no mut)
freqcol: str colname where ref/alt frequencies are stored
mutNameCol: str colname where mutation names are stored
Returns:
--------
the dataframe matrix
"""
maf = maf.sort_values(by=mutNameCol)
samples = set(maf[samplesCol])
mut = pd.DataFrame(data=np.zeros((len(set(maf[mutNameCol])), 1)), columns=[
'fake'], index=set(maf[mutNameCol])).astype(float)
for i, val in enumerate(samples):
h.showcount(i, len(samples))
mut = mut.join(maf[maf[samplesCol] == val].drop_duplicates(
mutNameCol).set_index(mutNameCol)[freqcol].rename(val))
return mut.fillna(0).astype(bool if boolify else float).drop(columns=['fake'])
def mergeAnnotations(firstmaf, additionalmaf, Genome_Change="Genome_Change",
Start_position="Start_position", Chromosome="Chromosome", samplename="DepMap_ID",
useSecondForConflict=True, dry_run=False):
"""
merges two maf files, taking carre of duplicate samples and duplicate (works with multiple sample file)
Args:
-----
firstmaf: dataframe the first maf file
additionalmaf: dataframe the second maf file (need to contain same colnames)
Genome_Change: str colnames of the Genome_Change column
Start_position: str colnames of the Start_position column
Chromosome: str colnames of the Chromosome column
samplename: str colnames of the samplename column (for multiple samples, even if one, needs to have this column)
useSecondForConflict: bool if false use the first df as reference else use the second one
dry_run: if true, will just output conflict regions and not merge the dataframes
Returns:
-------
dataframe of the maf file if not dryrun, else an np array of the merge issues
"""
mutations = firstmaf.copy()
mutations['ind'] = mutations[samplename]+"_"+mutations[Genome_Change]
mutations['loci'] = mutations[samplename] + "_" + \
mutations[Chromosome] + "_" + mutations[Start_position].astype(str)
additionalmaf['ind'] = additionalmaf[samplename] + \
"_"+additionalmaf[Genome_Change]
additionalmaf['loci'] = additionalmaf[samplename] + "_" + \
additionalmaf[Chromosome] + "_" + \
additionalmaf[Start_position].astype(str)
inboth = set(additionalmaf['loci']) & set(mutations['loci'])
notineach = set(additionalmaf['ind']) ^ set(mutations['ind'])
submut = mutations[mutations.loci.isin(
inboth) & mutations.ind.isin(notineach)]
subother = additionalmaf[additionalmaf.loci.isin(
inboth) & additionalmaf.ind.isin(notineach)]
issues = None
if len(submut) > 0:
print("found " + str(len(submut)) + " nonmatching mutations")
issues = np.vstack([submut.sort_values(by='loci')[
Genome_Change].values, subother.sort_values(by='loci')[Genome_Change].values]).T
if dry_run:
print(issues)
if not dry_run:
if issues is not None:
if useSecondForConflict:
mutations = mutations[~mutations.ind.isin(set(submut.ind))]
else:
additionalmaf = additionalmaf[~additionalmaf.ind.isin(
set(subother.ind))]
mutations = mutations.append(additionalmaf[additionalmaf['ind'].isin(
set(additionalmaf['ind']) - set(mutations['ind']))])
return mutations.drop(columns=['loci', 'ind']).sort_values(by=[samplename, Chromosome, Start_position])
else:
return issues
def filterAllelicFraction(maf, loc=['CGA_WES_AC'], sep=':', frac=0.1):
"""
filters a MAF file based on allelic fraction (works with multiple sample file)
Args:
-----
maf: dataframe of the maf file
loc: list[str] colnames with the ref:alt
sep: str separato between ref:alt
frac: float min fraction
Returns:
-------
dataframe of the maf file
"""
muts = np.zeros((len(maf), 2))
for val in loc:
muts += np.array([[v[0], 0] if 'NA' in v else v for v in maf[val].fillna(
'0'+sep+'0').astype(str).str.split(sep).tolist()]).astype(int)
muts = muts[:, 0]/(muts[:, 0]+muts[:, 1])
return maf[muts >= frac]
def filterCoverage(maf, loc=['CGA_WES_AC'], sep=':', cov=4, altloc=0):
"""
filters a MAF file based on read coverage (works with multiple sample file)
Args:
-----
maf: dataframe of the maf file
loc: list[str] colnames with the ref:alt
sep: str separato between ref:alt
cov: min coverage
altloc: 0 to filter on alt and 1 to filter on ref
Returns:
-------
dataframe of the maf file
"""
muts = np.zeros((len(maf), 2))
for val in loc:
muts += np.array([[v[0], 0] if 'NA' in v else v for v in maf[val].fillna(
'0'+sep+'0').astype(str).str.split(sep).tolist()]).astype(int)
return maf[muts[:, altloc] >= cov]
def manageGapsInSegments(segtocp, Chromosome='Chromosome', End="End", Start="Start", cyto=None):
"""
extends the ends of segments in a segment file from GATK so as to remove all gaps ove the genome (works with multiple sample file)
Args:
----
segtocp: dataframe of segments from GATK CN pipeline
Chromosome: str the value for the Chromosome columns
End: str the value for the End columns
Start: str the value for the Start columns
cyto: dataframe with chrom;end; columns giving the size of each chromosome (else puts last segment to 1000000000)
"""
prevchr = ''
prevend = 0
count = 0
l = []
segments = segtocp.copy()
le = len(segments)
for k, val in segments.iterrows():
h.showcount(count, le)
count += 1
if val[Chromosome] != prevchr: # we changed chromosome
# we extend the previous segment (last of the prev chrom) to.. way enough
if len(l) > 0:
l[-1][2] = 1000000000 if cyto is None else cyto[cyto['chrom']
== prevchr]['end'].values[-1]
# we extend the first segment to 0
l.append([val[Chromosome], 0, val[End]])
else:
if val[Start] > prevend + 1: # we have a gap in the same chrom
sizeofgap = val[Start] - prevend
# we add to the previous one half of the gap
l[-1][2] += int(sizeofgap /
2) if sizeofgap % 2 == 0 else int(sizeofgap / 2) + 1
# the rest to the other
l.append([val[Chromosome], val[Start] - int(sizeofgap / 2), val[End]])
elif val[Start] < prevend: # this should never happen
raise ValueError("start comes after end")
else:
l.append([val[Chromosome], val[Start], val[End]])
prevchr = val[Chromosome]
prevend = val[End]
# we extend the last one
l[-1][2] = 1000000000 if cyto is None else cyto[cyto['chrom']
== prevchr]['end'].values[-1]
segments[[Chromosome, Start, End]] = l
return segments
def toGeneMatrix(segments, gene_mapping, style='weighted', missingchrom=['Y']):
"""
makes a geneXsample matrix from segment level copy number (works with multiple sample file)
Args:
----
style: str one of "weighted","mean","closest"
segments: dataframe of segments containing: [Chromosome, Segment_Mean, Chromosome, start, end] columns
gene_mapping: dataframe with symbol, ensembl_id columns for each gene
missingchrom: list[str] chromosomes not to look into
Returns:
-------
pd.dataframe: the matrix
"""
samples = list(set(segments.DepMap_ID))
data = np.zeros((len(samples), len(gene_mapping)))
for i, sample in enumerate(samples):
segs = segments[segments.DepMap_ID == sample][[
'Chromosome', 'Start', 'End', "Segment_Mean"]].values
hasmissing = set(missingchrom) - set(segs[:, 0])
j = 0
h.showcount(i, len(samples))
for k, gene in enumerate(gene_mapping[['Chromosome', 'start', 'end']].values):
if gene[0] in hasmissing:
data[i, k] = np.nan
continue
while gene[0] != segs[j][0] or gene[1] >= segs[j][2]:
#print("went beyong",gene, segs[j])
j += 1
# some genes are within other genes, we need to go back in the list of segment in that case
while gene[1] < segs[j][1]:
j -= 1
#print("decrease gene",gene)
# we are entirely within the segment
c = 1
if gene[2] <= segs[j][2]:
data[i, k] = segs[j][3]
else:
# how much of the gene is covered by the segment
coef = (segs[j][2] - gene[1]) / (gene[2] - gene[1])
# print('coef',coef)
val = segs[j][3] * coef if style == "weighted" else segs[j][3]
end = segs[j][2]
# until the end of a segments goes beyon the end of the gene (say if we have X segments within the gene)
while end < gene[2]:
# pdb.set_trace()
j += 1
c += 1
nextend = segs[j][2] if segs[j][2] < gene[2] else gene[2]
# here, end (of prevsegment) is the next segment's start
ncoef = (nextend - end) / (gene[2] - gene[1])
# print('multi',gene, ncoef)
if style == "closest":
if ncoef > coef:
val = segs[j][3]
else:
# we switch it back (see line 894)
ncoef = coef
else:
val += segs[j][3] * ncoef if style == "weighted" else segs[j][3]
end = segs[j][2]
coef = ncoef
data[i, k] = val if style == "weighted" else val / c
return pd.DataFrame(data=data, index=samples, columns=[i['symbol'] + ' (' + str(i['ensembl_id']) + ')' for _, i in gene_mapping.iterrows()])
def checkAmountOfSegments(segmentcn, thresh=850, samplecol="DepMap_ID"):
"""
if there is too many segments, something might be wrong (works with multiple sample file)
will compute the number of segments for each samples from a df of segments from RSEM
Args:
----
segmentcn: segment dataframe
thresh: max ok amount
"""
failed = []
celllines = set(segmentcn[samplecol].tolist())
amounts = []
for cellline in celllines:
val = segmentcn[segmentcn[samplecol] == cellline].shape[0]
amounts.append(val)
if val > thresh:
failed.append(cellline)
print(cellline, val)
sns.kdeplot(amounts)
return failed
def checkGeneChangeAccrossAll(genecn, thresh=0.2):
"""
used to find poor quality genes in CN data (works with multiple sample file)
compute given a df of gene x sample CN counts, how much change there is accross samples for
a same gene and returns ones that are below the threshold
Args:
-----
genecn: gene cn data frame
thresh: threshold in logfold change accross all of them
"""
return genecn.columns[genecn.var()<thresh].tolist()
| 37.989333 | 170 | 0.616945 |
4a1b89915f976f58166ac5862ff5ebe43c44a19d
| 3,365 |
py
|
Python
|
word_clustering.py
|
brannondorsey/GloVe-experiments
|
8dafbeaa20784282f8b339ce80b8fd4c81d3431c
|
[
"MIT"
] | 59 |
2017-10-29T23:12:07.000Z
|
2022-03-11T04:21:28.000Z
|
word_clustering.py
|
brannondorsey/GloVe-experiments
|
8dafbeaa20784282f8b339ce80b8fd4c81d3431c
|
[
"MIT"
] | 2 |
2018-07-06T12:47:59.000Z
|
2020-05-27T13:38:27.000Z
|
word_clustering.py
|
brannondorsey/GloVe-experiments
|
8dafbeaa20784282f8b339ce80b8fd4c81d3431c
|
[
"MIT"
] | 20 |
2017-10-30T22:16:57.000Z
|
2022-02-19T03:18:20.000Z
|
# Notes for extension of script:
# - User readline() to interactively search for word groups
# - On a word miss, use L2 or cosine distance to select the nearest word vector
# - This would require all 6B tokens to loaded in ram (but not clustered)
# - Or use levenshtein distance assuming the word is spelled the same.
# - Provide an interface to perform basic arithmetic on words (king - man + woman = queen)
# Look at this result from 2014 English Wikipedia:
# 'islamic', 'militant', 'islam', 'radical', 'extremists', 'islamist', 'extremist', 'outlawed'
# 'war' - 'violence' + 'peace' = 'treaty' | 300d
from sklearn.cluster import KMeans
from numbers import Number
from pandas import DataFrame
import numpy as np
import os, sys, codecs, argparse, pprint, time
from utils import *
from word_arithmetic import *
def find_word_clusters(labels_array, cluster_labels):
cluster_to_words = autovivify_list()
for c, i in enumerate(cluster_labels):
cluster_to_words[i].append(labels_array[c])
return cluster_to_words
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--vector_dim', '-d',
type=int,
choices=[50, 100, 200, 300],
default=100,
help='What vector GloVe vector dimension to use '
'(default: 100).')
parser.add_argument('--num_words', '-n',
type=int,
default=10000,
help='The number of lines to read from the GloVe '
'vector file (default: 10000).')
parser.add_argument('--num_clusters', '-k',
default=1000,
type=int,
help='Number of resulting word clusters. '
'The number of K in K-Means (default: 1000).')
parser.add_argument('--n_jobs', '-j',
type=int,
default=-1,
help='Number of cores to use when fitting K-Means. '
'-1 = all cores. '
'More cores = less time, more memory (default: -1).')
parser.add_argument('--glove_path', '-i',
default='data/glove',
help='GloVe vector file path (default: data/glove)')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
filename = path = 'data/cache/{}'.format(get_cache_filename_from_args(args))
cluster_to_words = None
start_time = time.time()
vector_file = args.glove_path + '/' + 'glove.6B.' + str(args.vector_dim) + 'd.txt'
df, labels_array = build_word_vector_matrix(vector_file, args.num_words)
# if these are clustering parameters we've never seen before
if not os.path.isfile(filename):
print('No cached cluster found. Clustering using K-Means... ')
kmeans_model = KMeans(init='k-means++', n_clusters=args.num_clusters, n_jobs=args.n_jobs, n_init=10)
kmeans_model.fit(df)
cluster_labels = kmeans_model.labels_
# cluster_inertia = kmeans_model.inertia_
cluster_to_words = list(find_word_clusters(labels_array, cluster_labels).values())
# cache these clustering results
save_json(path, cluster_to_words)
print('Saved {} clusters to {}. Cached for later use.'.format(len(cluster_to_words), path))
# if this kmeans fitting has already been cached
else:
print('Cached K-Means cluster found, loading from disk.')
cluster_to_words = load_json(filename)
for i, words in enumerate(cluster_to_words):
print('CLUSTER {}: {}'.format(i + 1, ', '.join(words)))
if start_time != None:
print("--- {:.2f} seconds ---".format((time.time() - start_time)))
| 37.388889 | 102 | 0.692422 |
4a1b89fcd318a738f02a055985aa241526fe3abb
| 2,115 |
py
|
Python
|
cisco-ios-xr/ydk/models/cisco_ios_xr/SNMPv2_TC.py
|
CiscoDevNet/ydk-py
|
073731fea50694d0bc6cd8ebf10fec308dcc0aa9
|
[
"ECL-2.0",
"Apache-2.0"
] | 177 |
2016-03-15T17:03:51.000Z
|
2022-03-18T16:48:44.000Z
|
cisco-ios-xr/ydk/models/cisco_ios_xr/SNMPv2_TC.py
|
CiscoDevNet/ydk-py
|
073731fea50694d0bc6cd8ebf10fec308dcc0aa9
|
[
"ECL-2.0",
"Apache-2.0"
] | 18 |
2016-03-30T10:45:22.000Z
|
2020-07-14T16:28:13.000Z
|
cisco-ios-xr/ydk/models/cisco_ios_xr/SNMPv2_TC.py
|
CiscoDevNet/ydk-py
|
073731fea50694d0bc6cd8ebf10fec308dcc0aa9
|
[
"ECL-2.0",
"Apache-2.0"
] | 85 |
2016-03-16T20:38:57.000Z
|
2022-02-22T04:26:02.000Z
|
""" SNMPv2_TC
"""
import sys
from collections import OrderedDict
from ydk.types import Entity as _Entity_
from ydk.types import EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class RowStatus(Enum):
"""
RowStatus (Enum Class)
.. data:: active = 1
.. data:: notInService = 2
.. data:: notReady = 3
.. data:: createAndGo = 4
.. data:: createAndWait = 5
.. data:: destroy = 6
"""
active = Enum.YLeaf(1, "active")
notInService = Enum.YLeaf(2, "notInService")
notReady = Enum.YLeaf(3, "notReady")
createAndGo = Enum.YLeaf(4, "createAndGo")
createAndWait = Enum.YLeaf(5, "createAndWait")
destroy = Enum.YLeaf(6, "destroy")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _SNMPv2_TC as meta
return meta._meta_table['RowStatus']
class StorageType(Enum):
"""
StorageType (Enum Class)
.. data:: other = 1
.. data:: volatile = 2
.. data:: nonVolatile = 3
.. data:: permanent = 4
.. data:: readOnly = 5
"""
other = Enum.YLeaf(1, "other")
volatile = Enum.YLeaf(2, "volatile")
nonVolatile = Enum.YLeaf(3, "nonVolatile")
permanent = Enum.YLeaf(4, "permanent")
readOnly = Enum.YLeaf(5, "readOnly")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _SNMPv2_TC as meta
return meta._meta_table['StorageType']
class TruthValue(Enum):
"""
TruthValue (Enum Class)
.. data:: true = 1
.. data:: false = 2
"""
true = Enum.YLeaf(1, "true")
false = Enum.YLeaf(2, "false")
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _SNMPv2_TC as meta
return meta._meta_table['TruthValue']
| 19.40367 | 126 | 0.644917 |
4a1b8aa00e01755ab622116bb0978e9ffeeef1f2
| 9,548 |
py
|
Python
|
gitops_server/workers/deployer/deploy.py
|
uptick/gitops
|
f65057ef337a6fe34f3510c889c2284c1fdb7218
|
[
"BSD-2-Clause"
] | 6 |
2020-05-12T14:58:20.000Z
|
2022-02-01T09:40:55.000Z
|
gitops_server/workers/deployer/deploy.py
|
uptick/gitops
|
f65057ef337a6fe34f3510c889c2284c1fdb7218
|
[
"BSD-2-Clause"
] | 22 |
2019-09-04T13:33:12.000Z
|
2022-03-17T06:05:16.000Z
|
gitops_server/workers/deployer/deploy.py
|
uptick/gitops
|
f65057ef337a6fe34f3510c889c2284c1fdb7218
|
[
"BSD-2-Clause"
] | null | null | null |
import asyncio
import json
import logging
import os
import tempfile
import uuid
from typing import List, Optional
from gitops.common.app import App
from gitops_server import settings
from gitops_server.types import AppDefinitions, UpdateAppResult
from gitops_server.utils import get_repo_name_from_url, github, run, slack
from gitops_server.utils.git import temp_repo
BASE_REPO_DIR = "/var/gitops/repos"
ROLE_ARN = f"arn:aws:iam::{settings.ACCOUNT_ID}:role/GitopsAccess"
logger = logging.getLogger("gitops")
GITOPS_MAX_PARALLEL_DEPLOYS = os.environ.get("GITOPS_MAX_PARALLEL_DEPLOYS", "5")
async def post_init_summary(
source, username, added_apps, updated_apps, removed_apps, commit_message
):
deltas = ""
for typ, d in [("Adding", added_apps), ("Updating", updated_apps), ("Removing", removed_apps)]:
if d:
deltas += f"\n\t• {typ}: {', '.join(f'`{app}`' for app in sorted(d))}"
await slack.post(
f"A deployment from `{source}` has been initiated by *{username}* for cluster"
f" `{settings.CLUSTER_NAME}`, the following apps will be updated:{deltas}\nCommit Message:"
f" {commit_message}"
)
async def post_result(app: App, source: str, result: UpdateAppResult):
github_deployment_url = str(app.values.get("github/deployment_url", ""))
if result["exit_code"] != 0:
await github.update_deployment(
github_deployment_url,
status=github.STATUSES.failure,
description=f"Failed to deploy app. {result['output']}",
)
await slack.post(
f"Failed to deploy app `{result['app_name']}` from `{source}` for cluster"
f" `{settings.CLUSTER_NAME}`:\n>>>{result['output']}"
)
else:
await github.update_deployment(
github_deployment_url,
status=github.STATUSES.in_progress,
description="Helm installed app into cluster. Waiting for pods to deploy.",
)
async def post_result_summary(source: str, results: List[UpdateAppResult]):
n_success = sum([r["exit_code"] == 0 for r in results])
n_failed = sum([r["exit_code"] != 0 for r in results])
await slack.post(
f"Deployment from `{source}` for `{settings.CLUSTER_NAME}` results summary:\n"
f"\t• {n_success} succeeded\n"
f"\t• {n_failed} failed"
)
async def load_app_definitions(url: str, sha: str) -> AppDefinitions:
logger.info(f'Loading app definitions at "{sha}".')
async with temp_repo(url, sha=sha) as repo:
app_definitions = AppDefinitions(name=get_repo_name_from_url(url))
app_definitions.from_path(repo)
return app_definitions
class Deployer:
def __init__(
self,
pusher: str,
commit_message: str,
current_app_definitions: AppDefinitions,
previous_app_definitions: AppDefinitions,
skip_migrations: bool = False,
):
self.pusher = pusher
self.commit_message = commit_message
self.current_app_definitions = current_app_definitions
self.previous_app_definitions = previous_app_definitions
self.deploy_id = str(uuid.uuid4())
self.skip_migrations = skip_migrations
# Max parallel helm installs at a time
# Kube api may rate limit otherwise
self.semaphore = asyncio.Semaphore(int(GITOPS_MAX_PARALLEL_DEPLOYS))
@classmethod
async def from_push_event(cls, push_event):
url = push_event["repository"]["clone_url"]
pusher = push_event["pusher"]["name"]
commit_message = push_event.get("head_commit", {}).get("message")
skip_migrations = "--skip-migrations" in commit_message
logger.info(f'Initialising deployer for "{url}".')
before = push_event["before"]
after = push_event["after"]
current_app_definitions = await load_app_definitions(url, sha=after)
# TODO: Handle case where there is no previous commit.
previous_app_definitions = await load_app_definitions(url, sha=before)
return cls(
pusher,
commit_message,
current_app_definitions,
previous_app_definitions,
skip_migrations,
)
async def deploy(self):
added_apps, updated_apps, removed_apps = self.calculate_app_deltas()
if not (added_apps | updated_apps | removed_apps):
logger.info("No deltas; aborting.")
return
logger.info(
f"Running deployment for these deltas: A{list(added_apps)}, U{list(updated_apps)},"
f" R{list(removed_apps)}"
)
await post_init_summary(
self.current_app_definitions.name,
self.pusher,
added_apps=added_apps,
updated_apps=updated_apps,
removed_apps=removed_apps,
commit_message=self.commit_message,
)
update_results = await asyncio.gather(
*[
self.update_app_deployment(self.current_app_definitions.apps[app_name])
for app_name in (added_apps | updated_apps)
]
)
uninstall_results = await asyncio.gather(
*[
self.uninstall_app(self.previous_app_definitions.apps[app_name])
for app_name in removed_apps
]
)
await post_result_summary(
self.current_app_definitions.name, update_results + uninstall_results
)
async def uninstall_app(self, app: App) -> UpdateAppResult:
async with self.semaphore:
logger.info(f"Uninstalling app {app.name!r}.")
result = await run(
f"helm uninstall {app.name} -n {app.values['namespace']}", suppress_errors=True
)
update_result = UpdateAppResult(app_name=app.name, **result)
await post_result(app, self.current_app_definitions.name, update_result)
return update_result
async def update_app_deployment(self, app: App) -> Optional[UpdateAppResult]:
app.set_value("deployment.labels.gitops/deploy_id", self.deploy_id)
app.set_value("deployment.labels.gitops/status", github.STATUSES.in_progress)
if github_deployment_url := app.values.get("github/deployment_url"):
app.set_value("deployment.annotations.github/deployment_url", github_deployment_url)
async with self.semaphore:
logger.info(f"Deploying app {app.name!r}.")
if app.chart.type == "git":
async with temp_repo(
app.chart.git_repo_url, sha=app.chart.git_sha
) as chart_folder_path:
await run(f"cd {chart_folder_path}; helm dependency build")
with tempfile.NamedTemporaryFile(suffix=".yml") as cfg:
cfg.write(json.dumps(app.values).encode())
cfg.flush()
os.fsync(cfg.fileno())
result = await run(
"helm upgrade"
" --install"
" --timeout=600s"
f"{' --set skip_migrations=true' if self.skip_migrations else ''}"
f" -f {cfg.name}"
f" --namespace={app.values['namespace']}"
f" {app.name}"
f" {chart_folder_path}",
suppress_errors=True,
)
elif app.chart.type == "helm":
with tempfile.NamedTemporaryFile(suffix=".yml") as cfg:
cfg.write(json.dumps(app.values).encode())
cfg.flush()
os.fsync(cfg.fileno())
chart_version_arguments = (
f" --version={app.chart.version}" if app.chart.version else ""
)
await run(f"helm repo add {app.chart.helm_repo} {app.chart.helm_repo_url}")
result = await run(
"helm upgrade"
" --install"
" --timeout=600s"
f"{' --set skip_migrations=true' if self.skip_migrations else ''}"
f" -f {cfg.name}"
f" --namespace={app.values['namespace']}"
f" {app.name}"
f" {app.chart.helm_chart} {chart_version_arguments}",
suppress_errors=True,
)
else:
logger.warning("Local is not implemented yet")
return None
update_result = UpdateAppResult(app_name=app.name, **result)
await post_result(app, self.current_app_definitions.name, update_result)
return update_result
def calculate_app_deltas(self):
cur = self.current_app_definitions.apps.keys()
prev = self.previous_app_definitions.apps.keys()
added = cur - prev
common = cur & prev
removed = prev - cur
updated = set()
for app_name in common:
cur_app = self.current_app_definitions.apps[app_name]
prev_app = self.previous_app_definitions.apps[app_name]
if cur_app != prev_app:
if cur_app.is_inactive():
logger.info(f"Skipping changes in app {app_name!r}: marked inactive.")
continue
updated.add(app_name)
return added, updated, removed
| 41.513043 | 99 | 0.593632 |
4a1b8ced5d17906823e74441ca4fa96f1e9b02f0
| 4,344 |
py
|
Python
|
2020-02/three_way/train.py
|
kimwooglae/keraspp
|
0c052c0e1cd693c852122bdeee743c841c369f85
|
[
"MIT"
] | null | null | null |
2020-02/three_way/train.py
|
kimwooglae/keraspp
|
0c052c0e1cd693c852122bdeee743c841c369f85
|
[
"MIT"
] | null | null | null |
2020-02/three_way/train.py
|
kimwooglae/keraspp
|
0c052c0e1cd693c852122bdeee743c841c369f85
|
[
"MIT"
] | null | null | null |
# USAGE
# python train.py --model sequential --plot output/sequential.png
# python train.py --model functional --plot output/functional.png
# python train.py --model class --plot output/class.png
# set the matplotlib backend so figures can be saved in the background
import matplotlib
matplotlib.use("Agg")
# there seems to be an issue with TensorFlow 2.0 throwing non-critical
# warnings regarding gradients when using the model sub-classing
# feature -- I found that by setting the logging level I can suppress
# the warnings from showing up (likely won't be required in future
# releases of TensorFlow)
import logging
logging.getLogger("tensorflow").setLevel(logging.CRITICAL)
# import the necessary packages
from model import MiniVGGNetModel
from model import minigooglenet_functional
from model import shallownet_sequential
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import classification_report
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.datasets import cifar10
import matplotlib.pyplot as plt
import numpy as np
import argparse
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", type=str, default="sequential",
choices=["sequential", "functional", "class"],
help="type of model architecture")
ap.add_argument("-p", "--plot", type=str, required=True,
help="path to output plot file")
args = vars(ap.parse_args())
# initialize the initial learning rate, batch size, and number of
# epochs to train for
INIT_LR = 1e-2
BATCH_SIZE = 128
NUM_EPOCHS = 60
# initialize the label names for the CIFAR-10 dataset
labelNames = ["airplane", "automobile", "bird", "cat", "deer", "dog",
"frog", "horse", "ship", "truck"]
# load the CIFAR-10 dataset
print("[INFO] loading CIFAR-10 dataset...")
((trainX, trainY), (testX, testY)) = cifar10.load_data()
# scale the data to the range [0, 1]
trainX = trainX.astype("float32") / 255.0
testX = testX.astype("float32") / 255.0
# convert the labels from integers to vectors
lb = LabelBinarizer()
trainY = lb.fit_transform(trainY)
testY = lb.transform(testY)
# construct the image generator for data augmentation
aug = ImageDataGenerator(rotation_range=18, zoom_range=0.15,
width_shift_range=0.2, height_shift_range=0.2, shear_range=0.15,
horizontal_flip=True, fill_mode="nearest")
# check to see if we are using a Keras Sequential model
if args["model"] == "sequential":
# instantiate a Keras Sequential model
print("[INFO] using sequential model...")
model = shallownet_sequential(32, 32, 3, len(labelNames))
# check to see if we are using a Keras Functional model
elif args["model"] == "functional":
# instantiate a Keras Functional model
print("[INFO] using functional model...")
model = minigooglenet_functional(32, 32, 3, len(labelNames))
# check to see if we are using a Keras Model class
elif args["model"] == "class":
# instantiate a Keras Model sub-class model
print("[INFO] using model sub-classing...")
model = MiniVGGNetModel(len(labelNames))
# initialize the optimizer compile the model and
opt = SGD(lr=INIT_LR, momentum=0.9, decay=INIT_LR / NUM_EPOCHS)
print("[INFO] training network...")
model.compile(loss="categorical_crossentropy", optimizer=opt,
metrics=["accuracy"])
# train the network
H = model.fit_generator(
aug.flow(trainX, trainY, batch_size=BATCH_SIZE),
validation_data=(testX, testY),
steps_per_epoch=trainX.shape[0] // BATCH_SIZE,
epochs=NUM_EPOCHS,
verbose=1)
# evaluate the network
print("[INFO] evaluating network...")
predictions = model.predict(testX, batch_size=BATCH_SIZE)
print(classification_report(testY.argmax(axis=1),
predictions.argmax(axis=1), target_names=labelNames))
# determine the number of epochs and then construct the plot title
N = np.arange(0, NUM_EPOCHS)
title = "Training Loss and Accuracy on CIFAR-10 ({})".format(
args["model"])
# plot the training loss and accuracy
plt.style.use("ggplot")
plt.figure()
plt.plot(N, H.history["loss"], label="train_loss")
plt.plot(N, H.history["val_loss"], label="val_loss")
plt.plot(N, H.history["accuracy"], label="train_acc")
plt.plot(N, H.history["val_accuracy"], label="val_acc")
plt.title(title)
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.savefig(args["plot"])
| 35.317073 | 70 | 0.755525 |
4a1b8d3c9b87305626c844701a94d02473bf4082
| 1,653 |
py
|
Python
|
scripts/interf.py
|
mkarim2017/insarzd
|
e7d05f836e7ca044166e38bad549629ed00d71f1
|
[
"ECL-2.0",
"Apache-2.0"
] | 28 |
2019-10-04T01:18:29.000Z
|
2022-02-15T11:18:18.000Z
|
scripts/interf.py
|
mkarim2017/insarzd
|
e7d05f836e7ca044166e38bad549629ed00d71f1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
scripts/interf.py
|
mkarim2017/insarzd
|
e7d05f836e7ca044166e38bad549629ed00d71f1
|
[
"ECL-2.0",
"Apache-2.0"
] | 11 |
2019-10-04T08:36:54.000Z
|
2021-06-21T08:47:28.000Z
|
#!/usr/bin/env python3
import os
import sys
import shutil
import argparse
import isce
import isceobj
from crlpac import getWidth
from crlpac import getLength
from crlpac import runCmd
from crlpac import create_xml
def cmdLineParse():
'''
Command line parser.
'''
parser = argparse.ArgumentParser( description='interferometry')
parser.add_argument('-m', '--master', dest='master', type=str, required=True,
help = 'master SLC')
parser.add_argument('-s', '--slave', dest='slave', type=str, required=True,
help = 'resampled slave SLC')
parser.add_argument('-i', '--intf', dest='intf', type=str, required=True,
help = '(output) interferogram')
parser.add_argument('-a', '--amp', dest='amp', type=str, required=True,
help = '(output) amplitudes of master and slave SLCs')
if len(sys.argv) <= 1:
print('')
parser.print_help()
sys.exit(1)
else:
return parser.parse_args()
if __name__ == '__main__':
inps = cmdLineParse()
#get information
masterWidth = getWidth(inps.master + '.xml')
masterLength = getLength(inps.master + '.xml')
#run interf
cmd = "$INSAR_ZERODOP_BIN/interf {} {} {} {} {}".format(inps.master, inps.slave, inps.intf, inps.amp, masterWidth)
#print("{}".format(cmd))
runCmd(cmd)
#get xml file for interferogram
create_xml(inps.intf, masterWidth, masterLength, 'int')
#get xml file for amplitude image
create_xml(inps.amp, masterWidth, masterLength, 'amp')
#./interf.py -m 20130927.slc -s 20141211.slc -i 20130927-20141211.int -a 20130927-20141211.amp
| 25.828125 | 118 | 0.646098 |
4a1b8e3ee29b5aa7eee417fe30560a6ac7150289
| 13,324 |
py
|
Python
|
manager/test/test_workers.py
|
mrandrey228/Naumachia
|
93cacfd44eea0f004c630fb37de2f4529f9699ad
|
[
"MIT"
] | 79 |
2017-07-10T16:19:53.000Z
|
2022-03-19T17:57:40.000Z
|
manager/test/test_workers.py
|
mrandrey228/Naumachia
|
93cacfd44eea0f004c630fb37de2f4529f9699ad
|
[
"MIT"
] | 33 |
2018-01-11T06:18:02.000Z
|
2022-02-26T05:22:48.000Z
|
manager/test/test_workers.py
|
mrandrey228/Naumachia
|
93cacfd44eea0f004c630fb37de2f4529f9699ad
|
[
"MIT"
] | 11 |
2018-02-10T12:20:55.000Z
|
2021-12-04T23:46:41.000Z
|
from .common import ensure_redis_is_online
from app.db import DB, Address
from functools import wraps
from os import path
from redis import Redis
import app.manager as sut
import unittest
import weakref
test_dir = path.dirname(path.realpath(__file__))
sut.CHALLENGE_FODLER = test_dir
class Shim:
"""A simple shim class to allow test isolation
Attributes:
real: An object or model this Shim will proxy access to
overrides (dict[str, object]): A dictionary of attributes overridden by the Shim and the origonal values
active (bool): If true this Shim will be forwarding it's sets and gets
Args:
real: Sets the ``real`` attribute
"""
def __init__(self, real):
super(Shim, self).__setattr__('real', real)
super(Shim, self).__setattr__('overrides', dict())
super(Shim, self).__setattr__('active', False)
def __getattr__(self, attr):
try:
return super(Shim, self).__getattribute__(attr)
except AttributeError:
if not self.active:
raise
return getattr(self.real, attr)
def __setattr__(self, attr, val):
if self.active:
self.overrides[attr] = getattr(self.real, attr)
setattr(self.real, attr, val)
else:
super(Shim, self).__setattr__(attr, val)
def __enter__(self):
super(Shim, self).__setattr__('active', True)
return self
def __exit__(self, *args, **kwargs):
super(Shim, self).__setattr__('active', False)
for attr, val in self.overrides.items():
setattr(self.real, attr, val)
def assertCalled(test, times=None):
def decorator(fn):
fn._called = 0
def finalize(fn):
if times is not None:
test.assertEqual(fn._called, times, "Function '{}' was not called the correct number of times".format(fn.__name__))
else:
test.assertTrue(fn._called, "Function '{}' was never called".format(fn.__name__))
weakref.finalize(fn, finalize, fn)
@wraps(fn)
def wrapper(*args, **kwargs):
fn._called += 1
fn(*args, **kwargs)
return wrapper
return decorator
def assertNotCalled(test, name=None):
def do_not_call(*args, **kwargs):
if name:
test.assertTrue(False, "Function {} should not be called".format(name))
else:
test.assertTrue(False, "Function should not be called")
return do_not_call
class OnlineWorkerTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.container_token = ensure_redis_is_online()
cls.redis = Redis(host='localhost', port=6379, db=0)
DB.redis = cls.redis
def setUp(self):
self.redis.flushall()
self.vpn = DB.Vpn('abc')
self.vpn.update(
veth = "fakeveth",
veth_state = "down",
chal = DB.Challenge('chalfoo')
)
self.vpn.chal.files.extend(["test-compose.yml"])
self.user = DB.User('auserid')
self.user.update(
vlan = 4000,
cn = 'ausername',
status = "active"
)
self.addr = Address('127.0.0.1', 42)
self.connection = DB.Connection(self.addr)
self.connection.update(
addr = self.addr,
alive = True,
user = self.user,
vpn = self.vpn
)
self.cluster = None
self.user.connections.add(self.connection)
self.vpn.links[4000] = 'bridged'
def test_veth_worker_run(self):
with Shim(sut) as manager:
@assertCalled(self, 1)
def ensure_vlan_up(vpn, verbose):
self.assertEqual(self.vpn.id, vpn.id)
manager.ensure_veth_up = ensure_vlan_up
uut = manager.VethWorker(DB.Vpn.veth.key(self.vpn), 'set')
uut.run()
uut = manager.VethWorker(DB.Vpn.veth.key(self.vpn), 'del')
uut.run()
def test_vlan_worker_run(self):
with Shim(sut) as manager:
with Shim(manager.VlanWorker) as Worker:
@assertCalled(self, 3)
def ensure_veth_up(vpn, verbose=False):
self.assertEqual(self.vpn.id, vpn.id)
manager.ensure_veth_up = ensure_veth_up
@assertCalled(self, 1)
def bring_up_link(worker, vpn, user):
self.assertEqual(self.vpn.id, vpn.id)
self.assertEqual(self.user.id, user.id)
Worker.bring_up_link = bring_up_link
@assertCalled(self, 2)
def bridge_cluster(worker, vpn, user):
self.assertEqual(self.vpn.id, vpn.id)
self.assertEqual(self.user.id, user.id)
Worker.bridge_cluster = bridge_cluster
self.vpn.links[self.user.vlan] = 'down'
uut = manager.VlanWorker(DB.Connection.alive.key(self.connection), 'set')
uut.run()
Worker.bring_up_link = assertNotCalled(self, 'bring_up_link')
self.vpn.links[self.user.vlan] = 'up'
uut.run()
Worker.bridge_cluster = assertNotCalled(self, 'bridge_cluster')
self.vpn.links[self.user.vlan] = 'bridged'
uut.run()
manager.ensure_veth_up = assertNotCalled(self, 'ensure_veth_up')
uut = manager.VlanWorker(DB.Connection.alive.key(self.connection), 'del')
uut.run()
def test_cluster_worker_run(self):
with Shim(sut.ClusterWorker) as Worker:
@assertCalled(self, 1)
def ensure_cluster_up(worker, user, vpn, cluster, connection):
self.assertEqual(self.vpn.id, vpn.id)
self.assertEqual(self.user.id, user.id)
self.assertEqual(DB.Cluster(user, vpn).id, cluster.id)
self.assertEqual(self.connection.id, connection.id)
Worker.ensure_cluster_up = ensure_cluster_up
Worker.ensure_cluster_stopped = assertNotCalled(self, 'ensure_cluster_stopped')
uut = sut.ClusterWorker(DB.Connection.alive.key(self.connection), 'set')
uut.run()
Worker.ensure_cluster_up = assertNotCalled(self, 'ensure_cluster_up')
self.connection.alive = False
uut = sut.ClusterWorker(DB.Connection.alive.key(self.connection), 'set')
uut.run()
self.assertFalse(self.connection.exists())
self.connection.update(
addr = self.addr,
alive = False,
user = self.user,
vpn = self.vpn
)
uut = sut.ClusterWorker(DB.Connection.alive.key(self.connection), 'del')
uut.run()
@assertCalled(self, 1)
def ensure_cluster_stopped(worker, user, vpn, cluster):
self.assertEqual(self.vpn.id, vpn.id)
self.assertEqual(self.user.id, user.id)
self.assertEqual(DB.Cluster(user, vpn).id, cluster.id)
Worker.ensure_cluster_stopped = ensure_cluster_stopped
self.user.status = 'disconnected'
uut = sut.ClusterWorker(DB.Connection.alive.key(self.connection), 'set')
uut.run()
self.assertFalse(self.connection.exists())
def test_ensure_veth_up(self):
with Shim(sut) as manager:
class FakeCmd:
@assertCalled(self, 1)
def __init__(this, veth):
self.assertEqual(veth, self.vpn.veth)
@assertCalled(self, 1)
def run(self):
pass
manager.LinkUpCmd = FakeCmd
manager.ensure_veth_up(self.vpn)
manager.LinkUpCmd = assertNotCalled(self, "LinkUpCmd")
self.assertEqual(self.vpn.veth_state, 'up')
manager.ensure_veth_up(self.vpn)
def test_vlan_worker_bring_up_link(self):
with Shim(sut) as manager:
class FakeCmd:
ADD = object()
@assertCalled(self, 1)
def __init__(this, action, veth, vlan):
self.assertEqual(veth, self.vpn.veth)
self.assertEqual(vlan, self.user.vlan)
@assertCalled(self, 1)
def run(self):
pass
manager.VlanCmd = FakeCmd
worker = manager.VlanWorker('foo', 'bar')
worker.bring_up_link(self.vpn, self.user)
self.assertEquals(self.vpn.links[self.user.vlan], 'up')
def test_vlan_worker_bridge_cluster(self):
with Shim(sut) as manager:
class FakeCmd:
ADDIF = object()
@assertCalled(self, 1)
def __init__(this, action, bridge_id, vlan_if):
pass
@assertCalled(self, 1)
def run(self):
pass
manager.BrctlCmd = FakeCmd
manager.get_bridge_id = lambda cluster_id: "bogus" # The real function requires docker
cluster = DB.Cluster(self.user, self.vpn)
self.vpn.links[self.user.vlan] = 'up'
worker = manager.VlanWorker('foo', 'bar')
worker.bridge_cluster(self.vpn, self.user)
self.assertNotEqual(self.vpn.links[self.user.vlan], 'bridged')
cluster.status = "up"
worker.bridge_cluster(self.vpn, self.user)
self.assertEqual(self.vpn.links[self.user.vlan], 'bridged')
def test_cluster_worker_ensure_cluster_up(self):
with Shim(sut) as manager:
with Shim(sut.ClusterWorker) as Worker:
class FakeCmd:
UP = object()
@assertCalled(self, 1)
def __init__(this, action, project, files):
pass
@assertCalled(self, 1)
def run(self):
pass
@assertCalled(self, 1)
def bridge_link_if_ready(this, user, vpn, cluster):
self.assertEqual(self.vpn.id, vpn.id)
self.assertEqual(self.user.id, user.id)
self.assertEqual(DB.Cluster(user, vpn).id, cluster.id)
Worker.bridge_link_if_ready = bridge_link_if_ready
manager.ComposeCmd = FakeCmd
cluster = DB.Cluster(self.user, self.vpn)
cluster.delete()
worker = manager.ClusterWorker('foo', 'bar')
worker.ensure_cluster_up(self.user, self.vpn, cluster, self.connection)
worker.ensure_cluster_up(self.user, self.vpn, cluster, self.connection)
self.assertEqual(cluster.status, 'up')
def test_cluster_worker_ensure_cluster_stopped(self):
with Shim(sut) as manager:
class FakeCmd:
STOP = object()
@assertCalled(self, 1)
def __init__(this, action, project, files):
pass
@assertCalled(self, 1)
def run(self):
pass
manager.ComposeCmd = FakeCmd
cluster = DB.Cluster(self.user, self.vpn)
cluster.delete()
worker = manager.ClusterWorker('foo', 'bar')
worker.ensure_cluster_stopped(self.user, self.vpn, cluster)
cluster.status = 'up'
worker.ensure_cluster_stopped(self.user, self.vpn, cluster)
self.assertEqual(cluster.status, 'stopped')
worker.ensure_cluster_stopped(self.user, self.vpn, cluster)
self.assertEqual(cluster.status, 'stopped')
def test_cluster_worker_bridge_link_if_ready(self):
with Shim(sut) as manager:
class FakeBrctlCmd:
ADDIF = object()
@assertCalled(self, 1)
def __init__(this, action, bridge_id, vlan_if):
pass
@assertCalled(self, 1)
def run(self):
pass
manager.BrctlCmd = FakeBrctlCmd
class FakeIpFlushCmd:
@assertCalled(self)
def __init__(this, bridge_id):
pass
@assertCalled(self)
def run(self):
pass
manager.IpFlushCmd = FakeIpFlushCmd
manager.get_bridge_id = lambda cluster_id: "bogus" # The real function requires docker
cluster = DB.Cluster(self.user, self.vpn)
cluster.delete()
self.vpn.links[self.user.vlan] = 'down'
worker = manager.ClusterWorker('foo', 'bar')
worker.bridge_link_if_ready(self.user, self.vpn, cluster)
self.assertEqual(self.vpn.links[self.user.vlan], 'down')
self.vpn.links[self.user.vlan] = 'up'
worker.bridge_link_if_ready(self.user, self.vpn, cluster)
self.assertEqual(self.vpn.links[self.user.vlan], 'bridged')
worker.bridge_link_if_ready(self.user, self.vpn, cluster)
self.assertEqual(self.vpn.links[self.user.vlan], 'bridged')
| 33.062035 | 131 | 0.561693 |
4a1b8e7ed603d50aa125e307dacad440773900f8
| 2,226 |
py
|
Python
|
clitool/_unicodecsv.py
|
skitazaki/python-clitool
|
4971f8d093d51c6fd0e6cc536bbb597f78b570ab
|
[
"Apache-2.0"
] | 1 |
2016-10-24T14:19:40.000Z
|
2016-10-24T14:19:40.000Z
|
clitool/_unicodecsv.py
|
skitazaki/python-clitool
|
4971f8d093d51c6fd0e6cc536bbb597f78b570ab
|
[
"Apache-2.0"
] | null | null | null |
clitool/_unicodecsv.py
|
skitazaki/python-clitool
|
4971f8d093d51c6fd0e6cc536bbb597f78b570ab
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
This file is for Python 2.7.
And all classes are from `Python Official Document`_.
.. _`Python Official Document`: http://docs.python.org/library/csv.html
More sophisticated implementation is available on "csvkit_" module.
.. _csvkit: http://pypi.python.org/pypi/csvkit
"""
import codecs
import csv
from six import PY3
if PY3:
import io as cStringIO
else:
import cStringIO
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class UnicodeReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = self.reader.next()
return [unicode(s, "utf-8") for s in row]
def __iter__(self):
return self
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
# vim: set et ts=4 sw=4 cindent fileencoding=utf-8 :
| 25.883721 | 74 | 0.62938 |
4a1b8edb8e85ba2d17d692f0bbeae2cde32aae2c
| 12 |
py
|
Python
|
python/testData/console/ipython/psi/shell2.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2 |
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/console/ipython/psi/shell2.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173 |
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/console/ipython/psi/shell2.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2 |
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
!cd /var/etc
| 12 | 12 | 0.666667 |
4a1b908f38eb338741885ed85eb1dc51241590d5
| 22,293 |
py
|
Python
|
jdma_control/backends/ElasticTapeBackend.py
|
cedadev/django-jdma_control
|
9223aa2f8773e78c6f78197ae89289452d1285c9
|
[
"BSD-3-Clause"
] | null | null | null |
jdma_control/backends/ElasticTapeBackend.py
|
cedadev/django-jdma_control
|
9223aa2f8773e78c6f78197ae89289452d1285c9
|
[
"BSD-3-Clause"
] | 23 |
2018-03-21T14:36:24.000Z
|
2020-04-17T07:58:33.000Z
|
jdma_control/backends/ElasticTapeBackend.py
|
cedadev/django-jdma_control
|
9223aa2f8773e78c6f78197ae89289452d1285c9
|
[
"BSD-3-Clause"
] | null | null | null |
"""Class for a JASMIN Data Migration App backend which targets Elastic Tape.
Note that this is the simplified version, which (simply) uses functions from
the Elastic Tape client to GET and PUT the data.
Transport is handled by the script ET_transfer_mp, which is a version of the
et_transfer_mp script which runs on et1.ceda.ac.uk."""
import os
from django.db.models import Q
import requests
from bs4 import BeautifulSoup
from time import sleep
import subprocess
import logging
from jdma_control.backends.Backend import Backend
from jdma_control.backends.ConnectionPool import ConnectionPool
from jdma_control.scripts.common import get_ip_address
from jdma_control.scripts.config import read_backend_config
# Import elastic_tape client library
import elastic_tape.client as ET_client
import jdma_site.settings as settings
# create the connection pool - these are needed for the get transfers, as each
# transfer thread requires a connection that is kept up
et_connection_pool = ConnectionPool()
class ETException(Exception):
pass
def get_completed_puts(backend_object):
"""Get all the completed puts for the Elastic Tape"""
# avoiding a circular dependency
from jdma_control.models import MigrationRequest, Migration, StorageQuota
# get the storage id
storage_id = StorageQuota.get_storage_index("elastictape")
# list of completed PUTs to return
completed_PUTs = []
ET_Settings = backend_object.ET_Settings
# now loop over the PUT requests
put_reqs = MigrationRequest.objects.filter(
(Q(request_type=MigrationRequest.PUT)
| Q(request_type=MigrationRequest.MIGRATE))
& Q(stage=MigrationRequest.PUTTING)
& Q(migration__stage=Migration.PUTTING)
& Q(migration__storage__storage=storage_id)
)
for pr in put_reqs:
# form the url and get the response, parse the document using bs4
holdings_url = "{}?batch={}".format(
ET_Settings["ET_INPUT_BATCH_SUMMARY_URL"],
pr.migration.external_id
)
sleep(0.1) # 100 ms delay to avoid overloading the server
r = requests.get(holdings_url)
if r.status_code == 200:
bs = BeautifulSoup(r.content, "xml")
else:
# log error rather than raising exception
logging.error("Error in ET monitor:{} is unreachable".format(str(holdings_url)))
continue
# get the 2nd table - 1st is just a heading table
table = bs.find_all("table")[1]
if len(table) == 0:
continue
# get the first row
rows = table.find_all("tr")
if len(rows) < 2:
continue
row_1 = table.find_all("tr")[1]
# the status is the first column
cols = row_1.find_all("td")
if len(cols) < 3:
continue
transfer_id = cols[0].get_text()
status = cols[0].get_text()
# check for completion
if status in ["SYNCED", "TAPED"]:
completed_PUTs.append(pr.migration.external_id)
return completed_PUTs
def get_completed_gets(backend_object):
# avoiding a circular dependency
from jdma_control.models import MigrationRequest, StorageQuota, MigrationArchive
# get the storage id
storage_id = StorageQuota.get_storage_index("elastictape")
ET_Settings = backend_object.ET_Settings
# list of completed GETs to return
completed_GETs = []
# now loop over the GET requests
get_reqs = MigrationRequest.objects.filter(
(Q(stage=MigrationRequest.GETTING)
| Q(stage=MigrationRequest.VERIFY_GETTING))
& Q(migration__storage__storage=storage_id)
)
#
backend = ElasticTapeBackend()
for gr in get_reqs:
# get a list of synced files for this workspace and user and batch
retrieval_url = "{}?rr_id={};workspace={}".format(
ET_Settings["ET_RETRIEVAL_URL"],
gr.transfer_id,
gr.migration.workspace.workspace,
)
# use requests to fetch the URL
sleep(0.1) # 100 ms delay to avoid overloading the server
r = requests.get(retrieval_url)
if r.status_code == 200:
bs = BeautifulSoup(r.content, "xml")
else:
logging.error("Error in ET monitor:{} is unreachable".format(str(holdings_url)))
continue
# get the 2nd table from beautiful soup
table = bs.find_all("table")[1]
# check that a table has been found - there might be a slight
# synchronisation difference between jdma_transfer and jdma_monitor
# i.e. the entry might be in the database but not updated on the
# RETRIEVAL_URL
if len(table) == 0:
continue
# get the first row
rows = table.find_all("tr")
if len(rows) < 2:
continue
row_1 = table.find_all("tr")[1]
# the transfer id is the first column, the status is the third
cols = row_1.find_all("td")
if len(cols) < 3:
continue
transfer_id = cols[0].get_text()
status = cols[2].get_text()
# this is a paranoid check - this really shouldn't happen!
if (transfer_id != gr.transfer_id):
raise ETException("Transfer id mismatch")
# check for completion
if status == "COMPLETED":
completed_GETs.append(gr.transfer_id)
return completed_GETs
def get_completed_deletes(backend_object):
"""Get all the completed deletes for the Elastic Tape"""
# avoiding a circular dependency
from jdma_control.models import MigrationRequest, Migration, StorageQuota # get the storage id
storage_id = StorageQuota.get_storage_index("elastictape")
ET_Settings = backend_object.ET_Settings
# list of completed DELETEs to return
completed_DELETEs = []
# now loop over the PUT requests
del_reqs = MigrationRequest.objects.filter(
(Q(request_type=MigrationRequest.DELETE))
& Q(stage=MigrationRequest.DELETING)
& Q(migration__storage__storage=storage_id)
)
for dr in del_reqs:
# assume deleted
deleted = True
# get a list of synced batches for this workspace and user
holdings_url = "{}?workspace={};caller={};level=batch".format(
ET_Settings["ET_HOLDINGS_URL"],
dr.migration.workspace.workspace,
dr.migration.user.name
)
# use requests to fetch the URL
sleep(0.1) # 100 ms delay to avoid overloading the server
r = requests.get(holdings_url)
if r.status_code == 200:
bs = BeautifulSoup(r.content, "xml")
else:
logging.error("Error in ET monitor:{} is unreachable".format(str(holdings_url)))
continue
# if the dr.migration.external_id is not in the list of batches
# then the delete has completed
batches = bs.select("batch")
for b in batches:
batch_id = b.find("batch_id").text.strip()
if batch_id == dr.migration.external_id:
deleted = False
if deleted:
# it's been deleted so add to the returned list of completed DELETEs
completed_DELETEs.append(dr.migration.external_id)
return completed_DELETEs
def user_in_workspace(jdma_user, jdma_workspace, ET_Settings):
"""Determine whether a user is in a workspace by using requests to fetch
a URL and beautifulsoup to parse the table returned.
We'll ask Kevin O'Neill to provide a JSON version of this."""
# get from requests
sleep(0.1) # 100 ms delay to avoid overloading the server
r = requests.get(ET_Settings["ET_ROLE_URL"])
if r.status_code == 200:
bs = BeautifulSoup(r.content, "html5lib")
else:
raise ETException(ET_Settings["ET_ROLE_URL"] + " is unreachable.")
# parse into dictionary from table
gws_roles = {}
current_gws = ""
for row in bs.select("tr"):
if row is not None:
cells = row.findAll("td")
if len(cells) >= 4:
# get the group workspace
gws = cells[0].text.strip()
user = cells[2].text.strip()
if len(gws) > 0:
current_gws = gws
gws_roles[current_gws] = [user]
else:
gws_roles[current_gws].append(user)
# no roles were returned
if gws_roles == {}:
raise ETException(
ET_Settings["ET_ROLE_URL"] + " did not return a valid list of roles"
)
# check if workspace exists
if jdma_workspace not in gws_roles:
return False
else:
return jdma_user in gws_roles[jdma_workspace]
def workspace_quota_remaining(jdma_user, jdma_workspace, ET_Settings):
"""Get the workspace quota by using requests to fetch a URL. Unfortunately,
the JSON version of this URL returns ill-formatted JSON with a XML header!
So we can't just parse that, and we use the regular HTML table view and
parse using beautifulsoup again."""
# form the URL
url = "{}{}{}{}{}".format(ET_Settings["ET_QUOTA_URL"],
"?workspace=", jdma_workspace,
";caller=", jdma_user)
# fetch using requests
sleep(0.1) # 100 ms delay to avoid overloading the server
r = requests.get(url)
if r.status_code == 200:
# success, so parse the json
bs = BeautifulSoup(r.content, "html5lib")
else:
raise ETException(url + " is unreachable.")
quota_allocated = -1
quota_used = -1
for row in bs.select("tr"):
if row is not None:
cells = row.findAll("td")
# quota_allocated is position 4, quota_used is position 5 (both in bytes)
if len(cells) == 7:
quota_allocated = int(cells[4].text.strip())
quota_used = int(cells[5].text.strip())
# check that valid quotas were returned
if quota_allocated == -1 or quota_used == -1:
raise ETException(url + " did not return a quota.")
return quota_allocated - quota_used
class ElasticTapeBackend(Backend):
"""Class for a JASMIN Data Migration App backend which targets Elastic Tape.
Inherits from Backend class and overloads inherited functions."""
def __init__(self):
"""Need to set the verification directory and archive staging directory"""
self.ET_Settings = read_backend_config(self.get_id())
self.VERIFY_DIR = self.ET_Settings["VERIFY_DIR"]
self.ARCHIVE_STAGING_DIR = self.ET_Settings["ARCHIVE_STAGING_DIR"]
def exit(self):
"""Shutdown the backend. Do nothing for ET."""
return
def available(self, credentials):
"""Return whether the elastic tape is available or not"""
try:
return "available"
except Exception:
return "not available"
def monitor(self, thread_number=None):
"""Determine which batches have completed."""
completed_PUTs = []
completed_GETs = []
completed_DELETEs = []
try:
completed_PUTs = get_completed_puts(self)
except SystemExit:
completed_PUTs = []
except ETException as e:
logging.error("Error in ET monitor: {}".format(str(e)))
try:
completed_GETs = get_completed_gets(self)
except SystemExit:
completed_GETs = []
except ETException as e:
logging.error("Error in ET monitor: {}".format(str(e)))
try:
completed_DELETEs = get_completed_deletes(self)
except SystemExit:
completed_DELETEs = []
except ETException as e:
logging.error("Error in ET monitor: {}".format(str(e)))
return completed_PUTs, completed_GETs, completed_DELETEs
def pack_data(self):
"""Should the data be packed into a tarfile for this backend?"""
return False
def piecewise(self):
"""For elastic tape the data shouldn't be uploaded archive by archive
but uploaded all at once."""
return False
def create_connection(self, user, workspace, credentials, mode="upload"):
"""Create connection to Elastic Tape, using the supplied credentials.
(There are no required credentials!)
"""
if mode == "upload" or mode == "delete":
conn = ET_client.client.client(
self.ET_Settings["PUT_HOST"],
self.ET_Settings["PORT"]
)
elif mode == "download":
conn = ET_client.client.client(
self.ET_Settings["GET_HOST"],
self.ET_Settings["PORT"]
)
conn.connect()
# save the user and workspace
conn.jdma_user = user
conn.jdma_workspace = workspace
return conn
def close_connection(self, conn):
"""Close the connection to the backend.
"""
conn.close()
return
def download_files(self, conn, get_req, file_list, target_dir):
"""Create a download batch for the elastic tape.
This will also instigate the transfer, as ET requires the conn to stay
up during the transfer.
"""
# the ET client interface is contained in ET_conn
try:
# don't do anything if filelist length is 0
if len(file_list) == 0:
return
# get the external id
external_id = get_req.migration.external_id
# Get the ip address of the sender
ip = get_ip_address()
# create a new batch
batch = conn.newBatch(conn.jdma_workspace, None)
# override the requester in the Batch
batch.requestor = conn.jdma_user
# override the ip address
batch.PI = ip
# overwrite any files
batch.override = 1
# get the common_path
cp = get_req.migration.common_path
# add the files to the batch
for f in file_list:
fname = os.path.join(cp,f)
batch.addFile(fname)
"""The code below here is replicated from elastic_tape.client.client
We need to replicate it as we need to get the transfer id for
monitoring purposes."""
# get the files from the ET client
retrieve_batch = batch.retrieve()
# get the request id and store it in the migration request
reqID = conn.msgIface.retrieveBatch(retrieve_batch)
get_req.transfer_id = reqID
get_req.save()
conn.msgIface.sendStartRetrieve(reqID)
downloadThreads = []
handler = ET_client.client.DownloadThread
for i in range(self.ET_Settings["THREADS"]):
t = handler()
t.daemon = True
downloadThreads.append(t)
t.setup(reqID, target_dir, conn.host, conn.port)
t.start()
while not conn.msgIface.checkRRComplete(reqID):
sleep(5)
bad_files = conn.msgIface.FinishRR(reqID)
for t in downloadThreads:
t.stop()
t.join()
# raise the list of badfiles as an exception - mark migraiton as
# FAILED
if len(bad_files) > 0:
raise Exception(
"Could not download files: {}".format(bad_files)
)
# elastic tape copies these files to a directory that looks like:
# /target_dir/group_workspaces/jasmin4/gws_name/user_name/original_directory
# whereas what we want them to look like :
# /target_dir/original_directory
# This can be acheived by using the original path of the migration
# and moving the files from the /target_dir/common_path... to just
# the target dir
# we have to trim the first character from the common path (which is
# a / to get os.path.join to join the paths correctly)
source_dir_cp = os.path.join(target_dir, cp[1:])
# get a list of all the files in the source directory
for f in os.listdir(source_dir_cp):
full_source_path = "{}/{}".format(source_dir_cp, f)
subprocess.call(["/bin/mv", full_source_path, target_dir])
# we now want to delete the empty directories that are left after the move
# this is everything beneath /target_dir/first_directory_of_common_path
dir_to_remove = os.path.join(target_dir, cp.split("/")[1])
subprocess.call(["/bin/rm", "-r", dir_to_remove])
except Exception as e:
raise Exception(str(e))
return str(external_id)
def upload_files(self, conn, put_req, prefix, file_list):
"""Create a batch on the elastic tape and upload the filenames.
The batch id will be created and saved to the Migration.
"""
try:
# don't do anything if filelist length is 0
if len(file_list) == 0:
return
# Get the ip address of the sender
ip = get_ip_address()
batch_name = put_req.migration.label
# create a new batch
batch = conn.newBatch(conn.jdma_workspace, batch_name)
# override the requester in the Batch
batch.requestor = conn.jdma_user
# override the ip address
batch.PI = ip
# override the override
batch.override = 0
# add the files to the batch
for f in file_list:
batch.addFile(f)
# close the batch and get the batch id - convert to string on return
# from function
batch_id = batch.register()
# register the batch_id as the external id
put_req.migration.external_id = batch_id
put_req.migration.save()
except Exception as e:
batch_id = None
raise ETException(str(e))
return str(batch_id)
def delete_batch(self, conn, del_req, batch_id):
"""Delete a archive of files from the elastic tape"""
conn.deleteBatchByID(conn.jdma_workspace,
conn.jdma_user,
int(batch_id))
def user_has_put_permission(self, conn):
"""Check whether the user has permission to access the elastic tape,
and whether they have permission from the groupworkspace
"""
# groupworkspace permission
gws_permission = Backend._user_has_put_permission(
self, conn.jdma_user, conn.jdma_workspace.workspace
)
# elastic tape permission - fetch from URL and use beautifulsoup to
# parse the returned table into something meaningful
et_permission = user_in_workspace(
conn.jdma_user,
conn.jdma_workspace.workspace,
self.ET_Settings
)
return gws_permission & et_permission
def user_has_get_permission(self, batch_id, conn):
"""Check whether the user has permission to access the elastic tape,
and whether they have permission from the groupworkspace
"""
gws_permission = Backend._user_has_get_permission(
self, conn.jdma_user, conn.jdma_workspace.workspace
)
# elastic tape permission
et_permission = user_in_workspace(
conn.jdma_user,
conn.jdma_workspace.workspace,
self.ET_Settings
)
return gws_permission & et_permission
def user_has_delete_permission(self, batch_id, conn):
"""Check whether the user has permission to delete the object from the
elastic tape, and whether they have permission from the groupworkspace
LDAP.
"""
# check from the groupworkspace
gws_permission = Backend._user_has_delete_permission(
self, conn.jdma_user, conn.jdma_workspace.workspace, batch_id
)
# elastic tape permission
et_permission = user_in_workspace(
conn.jdma_user,
conn.jdma_workspace.workspace,
self.ET_Settings
)
return gws_permission & et_permission
def user_has_put_quota(self, conn):
"""Check the remaining quota for the user in the workspace.
We just check the database here, i.e. check that we are not over
quota.
When jdma_lock calculates the file sizes we can check the quota again
and flag the transfer as FAILED if it goes over the quota.
"""
from jdma_control.models import StorageQuota
# get the storage id
storage_id = StorageQuota.get_storage_index("elastictape")
storage_quota = StorageQuota.objects.filter(
storage=storage_id,
workspace__workspace=conn.jdma_workspace
)[0]
jdma_quota_remaining = storage_quota.quota_size - storage_quota.quota_used
# get the quota from the elastic tape feed
et_quota_remaining = workspace_quota_remaining(
conn.jdma_user,
conn.jdma_workspace.workspace,
self.ET_Settings,
)
return (jdma_quota_remaining > 0) & (et_quota_remaining > 0)
def get_name(self):
return "Elastic Tape"
def get_id(self):
return "elastictape"
def required_credentials(self):
"""Get the keys of the required credentials to use this backend.
These keys, along with their values, will be stored in a hidden file
in the user's home directory.
They will be encrypted and stored in the MigrationRequest so that
the daemon processes can carry out the Migrations on behalf of the
user.
"""
return []
def minimum_object_size(self):
"""Minimum recommend size for elastic tape = 2GB? (check with Kevin
O'Neil)
"""
return int(self.ET_Settings["OBJECT_SIZE"])
def maximum_object_count(self):
"""Maximum number of objects in an archive"""
return (int(self.ET_Settings["OBJECT_COUNT"]))
| 36.666118 | 101 | 0.619656 |
4a1b90919674baf22f530bd53a63514f1f8244f6
| 9,009 |
py
|
Python
|
code/model.py
|
Salingo/CTF-Net
|
39c87b271ea0d263c31e43439997cc4f5f8602b3
|
[
"MIT"
] | 9 |
2021-05-21T13:38:09.000Z
|
2022-03-11T09:56:11.000Z
|
code/model.py
|
Salingo/CTF-Net
|
39c87b271ea0d263c31e43439997cc4f5f8602b3
|
[
"MIT"
] | null | null | null |
code/model.py
|
Salingo/CTF-Net
|
39c87b271ea0d263c31e43439997cc4f5f8602b3
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import trans
from pointnet_util import PointNetEncoder
class Convlayer(nn.Module):
def __init__(self, point_scales, nchannel=3):
super(Convlayer, self).__init__()
self.point_scales = point_scales
self.conv1 = torch.nn.Conv2d(1, 64, (1, nchannel))
self.conv2 = torch.nn.Conv2d(64, 64, 1)
self.conv3 = torch.nn.Conv2d(64, 128, 1)
self.conv4 = torch.nn.Conv2d(128, 256, 1)
self.conv5 = torch.nn.Conv2d(256, 512, 1)
self.conv6 = torch.nn.Conv2d(512, 1024, 1)
self.maxpool = torch.nn.MaxPool2d((self.point_scales, 1), 1)
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(64)
self.bn3 = nn.BatchNorm2d(128)
self.bn4 = nn.BatchNorm2d(256)
self.bn5 = nn.BatchNorm2d(512)
self.bn6 = nn.BatchNorm2d(1024)
def forward(self, x):
x = torch.unsqueeze(x, 1)
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x_128 = F.relu(self.bn3(self.conv3(x)))
x_256 = F.relu(self.bn4(self.conv4(x_128)))
x_512 = F.relu(self.bn5(self.conv5(x_256)))
x_1024 = F.relu(self.bn6(self.conv6(x_512)))
x_128 = torch.squeeze(self.maxpool(x_128), 2)
x_256 = torch.squeeze(self.maxpool(x_256), 2)
x_512 = torch.squeeze(self.maxpool(x_512), 2)
x_1024 = torch.squeeze(self.maxpool(x_1024), 2)
L = [x_1024, x_512, x_256, x_128]
x = torch.cat(L, 1)
return x
class STN3d(nn.Module):
def __init__(self, nchannel=3):
super(STN3d, self).__init__()
self.conv1 = torch.nn.Conv1d(nchannel, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 9)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
def forward(self, x):
x = x.transpose(2, 1)
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 1024)
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = Variable(torch.from_numpy(np.array([1,0,0,0,1,0,0,0,1]).astype(np.float32))).view(1, 9).repeat(batchsize, 1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, 3, 3)
return x
class Latentfeature(nn.Module):
def __init__(self, num_resolution, input_scale_list, nchannel=3, use_stn=False):
super(Latentfeature, self).__init__()
self.num_resolution = num_resolution
self.input_scale_list = input_scale_list
self.num_channels = nchannel
self.use_stn = use_stn
if use_stn:
self.stn = STN3d(nchannel)
self.Convlayers = nn.ModuleList([Convlayer(point_scales=self.input_scale_list[i], nchannel=nchannel)for i in range(self.num_resolution)])
self.conv1 = torch.nn.Conv1d(num_resolution, 1, 1)
self.bn1 = nn.BatchNorm1d(1)
def forward(self, x):
outs = []
for i in range(self.num_resolution):
if self.use_stn:
trans = self.stn(x[i])
if self.num_channels > 3:
x[i], feature = x[i].split(3, dim=2)
x[i] = torch.bmm(x[i], trans)
if self.num_channels > 3:
x[i] = torch.cat([x[i], feature], dim=2)
outs.append(self.Convlayers[i](x[i]))
latentfeature = torch.cat(outs, 2)
latentfeature = latentfeature.transpose(1, 2)
latentfeature = F.relu(self.bn1(self.conv1(latentfeature)))
latentfeature = torch.squeeze(latentfeature, 1)
return latentfeature
class comp_model(nn.Module):
def __init__(self, input_scale_list, output_scale_list, nchannel=3):
super(comp_model, self).__init__()
num_resolution = 3
self.output_scale_list = output_scale_list
self.latentfeature = Latentfeature(num_resolution, input_scale_list, nchannel)
self.fc_fine_1 = nn.Linear(1920, 1024)
self.fc_mid_1 = nn.Linear(1024, 512)
self.fc_low_1 = nn.Linear(512, 256)
self.fc_fine_2 = nn.Linear(1024, self.output_scale_list[2] * 16)
self.fc_mid_2 = nn.Linear(512, self.output_scale_list[1] * 16)
self.fc_low_2 = nn.Linear(256, self.output_scale_list[0] * 3)
self.conv_mid_1 = torch.nn.Conv1d(int(self.output_scale_list[1] / self.output_scale_list[0] * 16), int(self.output_scale_list[1] / self.output_scale_list[0] * 3), 1)
self.conv_fine_1 = torch.nn.Conv1d(int(self.output_scale_list[2] / self.output_scale_list[1] * 16), int(self.output_scale_list[2] / self.output_scale_list[1] * 16), 1)
self.conv_fine_2 = torch.nn.Conv1d(int(self.output_scale_list[2] / self.output_scale_list[1] * 16), int(self.output_scale_list[2] / self.output_scale_list[1] * 8), 1)
self.conv_fine_3 = torch.nn.Conv1d(int(self.output_scale_list[2] / self.output_scale_list[1] * 8), int(self.output_scale_list[2] / self.output_scale_list[1] * 3), 1)
def forward(self, x1, x2, x3):
x = self.latentfeature([x1,x2,x3])
x_fine = F.relu(self.fc_fine_1(x)) #1024
x_mid = F.relu(self.fc_mid_1(x_fine)) #512
x_low = F.relu(self.fc_low_1(x_mid)) #256
pc_low_feat = self.fc_low_2(x_low)
pc_low_xyz = pc_low_feat.reshape(-1, self.output_scale_list[0], 3) # scale_0 x 3 (output_0)
pc_mid_feat = F.relu(self.fc_mid_2(x_mid))
pc_mid_feat = pc_mid_feat.reshape(-1, int(self.output_scale_list[1] / self.output_scale_list[0] * 16), self.output_scale_list[0])
pc_mid_xyz = self.conv_mid_1(pc_mid_feat) # ((scale_1 / scale_0) * 3) x scale_0
pc_fine_feat = F.relu(self.fc_fine_2(x_fine))
pc_fine_feat = pc_fine_feat.reshape(-1, int(self.output_scale_list[2] / self.output_scale_list[1] * 16), self.output_scale_list[1])
pc_fine_feat = F.relu(self.conv_fine_1(pc_fine_feat))
pc_fine_feat = F.relu(self.conv_fine_2(pc_fine_feat))
pc_fine_xyz = self.conv_fine_3(pc_fine_feat) # ((scale_2 / scale_1) * 3) x scale_1
pc_low_xyz_expand = torch.unsqueeze(pc_low_xyz, 2)
pc_mid_xyz = pc_mid_xyz.transpose(1, 2)
pc_mid_xyz = pc_mid_xyz.reshape(-1, self.output_scale_list[0], int(self.output_scale_list[1] / self.output_scale_list[0]), 3)
pc_mid_xyz = pc_low_xyz_expand + pc_mid_xyz
pc_mid_xyz = pc_mid_xyz.reshape(-1, self.output_scale_list[1], 3) # scale_1 x 3 (output_1)
pc_mid_xyz_expand = torch.unsqueeze(pc_mid_xyz, 2)
pc_fine_xyz = pc_fine_xyz.transpose(1, 2)
pc_fine_xyz = pc_fine_xyz.reshape(-1, self.output_scale_list[1], int(self.output_scale_list[2] / self.output_scale_list[1]), 3)
pc_fine_xyz = pc_mid_xyz_expand + pc_fine_xyz
pc_fine_xyz = pc_fine_xyz.reshape(-1, self.output_scale_list[2], 3) # scale_2 x 3 (output_2)
return pc_low_xyz, pc_mid_xyz, pc_fine_xyz
class orient_model(nn.Module):
def __init__(self):
super(orient_model, self).__init__()
self.encoder = PointNetEncoder(global_feat=True, feature_transform=False, channel=3)
self.fc0 = nn.Linear(1024, 1024)
self.bn0 = nn.BatchNorm1d(1024)
self.fc1 = nn.Linear(1024, 512)
self.bn1 = nn.BatchNorm1d(512)
self.drop1 = nn.Dropout(0.4)
self.fc2 = nn.Linear(512, 128)
self.bn2 = nn.BatchNorm1d(128)
self.drop2 = nn.Dropout(0.5)
self.fc3 = nn.Linear(128, 4) # para_rotation
def forward(self, xyz):
xr = self.encoder(xyz.transpose(2, 1))
xr = F.relu(self.bn0(self.fc0(xr)))
xr = self.drop1(F.relu(self.bn1(self.fc1(xr))))
xr = self.drop2(F.relu(self.bn2(self.fc2(xr))))
para_r = F.tanh(self.fc3(xr))
R = trans.quaternion2matrix_torch(para_r)
return para_r
class regi_model(nn.Module):
def __init__(self):
super(regi_model, self).__init__()
self.encoder = PointNetEncoder(global_feat=True, feature_transform=True, channel=3)
self.fc0 = nn.Linear(1024, 512)
self.bn0 = nn.BatchNorm1d(512)
self.fc1 = nn.Linear(1024, 512)
self.bn1 = nn.BatchNorm1d(512)
self.drop1 = nn.Dropout(0.4)
self.fc2 = nn.Linear(512, 128)
self.bn2 = nn.BatchNorm1d(128)
self.drop2 = nn.Dropout(0.5)
self.fc3 = nn.Linear(128, 4) # para_rotation
self.fc4 = nn.Linear(1024, 512)
self.bn4 = nn.BatchNorm1d(512)
self.drop4 = nn.Dropout(0.4)
self.fc5 = nn.Linear(512, 128)
self.bn5 = nn.BatchNorm1d(128)
self.drop5 = nn.Dropout(0.5)
self.fc6 = nn.Linear(128, 3) #para_translation
def forward(self, xyz1, xyz2):
fea1 = self.encoder(xyz1.transpose(2, 1))
fea1 = F.relu(self.bn0(self.fc0(fea1)))
fea2 = self.encoder(xyz2.transpose(2, 1))
fea2 = F.relu(self.bn0(self.fc0(fea2)))
xr = torch.cat((fea1, fea2), 1)
xr = self.drop1(F.relu(self.bn1(self.fc1(xr))))
xr = self.drop2(F.relu(self.bn2(self.fc2(xr))))
para_r21 = F.tanh(self.fc3(xr))
R = trans.quaternion2matrix_torch(para_r21)
transformed_xyz2 = trans.transform_pts_torch(xyz2, R)
fea3 = self.encoder(transformed_xyz2.transpose(2, 1))
fea3 = F.relu(self.bn0(self.fc0(fea3)))
xt = torch.cat((fea1, fea3), 1)
xt = self.drop4(F.relu(self.bn4(self.fc4(xt))))
xt = self.drop5(F.relu(self.bn5(self.fc5(xt))))
para_t21 = F.tanh(self.fc6(xt))
T = trans.translation2matrix_torch(para_t21)
transformed_xyz2 = trans.transform_pts_torch(transformed_xyz2, T)
return para_r21, para_t21, transformed_xyz2
| 43.3125 | 169 | 0.70807 |
4a1b90968192a4c4a3fd3cf3dbe8f335c3118f39
| 2,506 |
py
|
Python
|
tempest/services/compute/json/quotas_client.py
|
KiranPawar72/tempest
|
1fef3dd92b083055793065dd0693454735ec2c01
|
[
"Apache-2.0"
] | null | null | null |
tempest/services/compute/json/quotas_client.py
|
KiranPawar72/tempest
|
1fef3dd92b083055793065dd0693454735ec2c01
|
[
"Apache-2.0"
] | null | null | null |
tempest/services/compute/json/quotas_client.py
|
KiranPawar72/tempest
|
1fef3dd92b083055793065dd0693454735ec2c01
|
[
"Apache-2.0"
] | 1 |
2020-07-21T02:18:23.000Z
|
2020-07-21T02:18:23.000Z
|
# Copyright 2012 NTT Data
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
from tempest.api_schema.response.compute.v2_1 import quotas as schema
from tempest.common import service_client
class QuotasClient(service_client.ServiceClient):
def show_quota_set(self, tenant_id, user_id=None):
"""List the quota set for a tenant."""
url = 'os-quota-sets/%s' % tenant_id
if user_id:
url += '?user_id=%s' % user_id
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(schema.get_quota_set, resp, body)
return service_client.ResponseBody(resp, body)
def show_default_quota_set(self, tenant_id):
"""List the default quota set for a tenant."""
url = 'os-quota-sets/%s/defaults' % tenant_id
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(schema.get_quota_set, resp, body)
return service_client.ResponseBody(resp, body)
def update_quota_set(self, tenant_id, user_id=None, **kwargs):
"""
Updates the tenant's quota limits for one or more resources
"""
post_body = json.dumps({'quota_set': kwargs})
if user_id:
resp, body = self.put('os-quota-sets/%s?user_id=%s' %
(tenant_id, user_id), post_body)
else:
resp, body = self.put('os-quota-sets/%s' % tenant_id,
post_body)
body = json.loads(body)
self.validate_response(schema.update_quota_set, resp, body)
return service_client.ResponseBody(resp, body)
def delete_quota_set(self, tenant_id):
"""Delete the tenant's quota set."""
resp, body = self.delete('os-quota-sets/%s' % tenant_id)
self.validate_response(schema.delete_quota, resp, body)
return service_client.ResponseBody(resp, body)
| 37.969697 | 78 | 0.654828 |
4a1b91c599f0023ee5ae0c23c87fe71617b0ec03
| 9,210 |
py
|
Python
|
tasks/utils.py
|
hostcc/datadog-agent
|
34cbcd79040936169b445fb579898e292c0c8198
|
[
"Apache-2.0"
] | null | null | null |
tasks/utils.py
|
hostcc/datadog-agent
|
34cbcd79040936169b445fb579898e292c0c8198
|
[
"Apache-2.0"
] | null | null | null |
tasks/utils.py
|
hostcc/datadog-agent
|
34cbcd79040936169b445fb579898e292c0c8198
|
[
"Apache-2.0"
] | null | null | null |
"""
Miscellaneous functions, no tasks here
"""
from __future__ import print_function
import os
import platform
import re
import sys
import json
from subprocess import check_output
import invoke
# constants
ORG_PATH = "github.com/DataDog"
REPO_PATH = "{}/datadog-agent".format(ORG_PATH)
def bin_name(name, android=False):
"""
Generate platform dependent names for binaries
"""
if android:
return "{}.aar".format(name)
if sys.platform == 'win32':
return "{}.exe".format(name)
return name
def get_gopath(ctx):
gopath = os.environ.get("GOPATH")
if not gopath:
gopath = ctx.run("go env GOPATH", hide=True).stdout.strip()
return gopath
def get_multi_python_location(embedded_path=None, rtloader_root=None):
if rtloader_root is None:
rtloader_lib = "{}/lib".format(rtloader_root or embedded_path)
rtloader_headers = "{}/include".format(rtloader_root or embedded_path)
rtloader_common_headers = "{}/common".format(rtloader_root or embedded_path)
# if rtloader_root is specified we're working in dev mode from the rtloader folder
else:
rtloader_lib = "{}/rtloader".format(rtloader_root)
rtloader_headers = "{}/include".format(rtloader_root)
rtloader_common_headers = "{}/common".format(rtloader_root)
return rtloader_lib, rtloader_headers, rtloader_common_headers
def get_build_flags(ctx, static=False, prefix=None, embedded_path=None,
rtloader_root=None, python_home_2=None, python_home_3=None, arch="x64"):
"""
Build the common value for both ldflags and gcflags, and return an env accordingly.
We need to invoke external processes here so this function need the
Context object.
"""
gcflags = ""
ldflags = get_version_ldflags(ctx, prefix)
env = {}
if sys.platform == 'win32':
env["CGO_LDFLAGS_ALLOW"] = "-Wl,--allow-multiple-definition"
if embedded_path is None:
# fall back to local dev path
embedded_path = "{}/src/github.com/DataDog/datadog-agent/dev".format(get_gopath(ctx))
rtloader_lib, rtloader_headers, rtloader_common_headers = \
get_multi_python_location(embedded_path, rtloader_root)
# setting python homes in the code
if python_home_2:
ldflags += "-X {}/pkg/collector/python.pythonHome2={} ".format(REPO_PATH, python_home_2)
if python_home_3:
ldflags += "-X {}/pkg/collector/python.pythonHome3={} ".format(REPO_PATH, python_home_3)
# adding rtloader libs and headers to the env
env['DYLD_LIBRARY_PATH'] = os.environ.get('DYLD_LIBRARY_PATH', '') + ":{}".format(rtloader_lib) # OSX
env['LD_LIBRARY_PATH'] = os.environ.get('LD_LIBRARY_PATH', '') + ":{}".format(rtloader_lib) # linux
env['CGO_LDFLAGS'] = os.environ.get('CGO_LDFLAGS', '') + " -L{}".format(rtloader_lib)
env['CGO_CFLAGS'] = os.environ.get('CGO_CFLAGS', '') + " -w -I{} -I{}".format(rtloader_headers,
rtloader_common_headers)
# if `static` was passed ignore setting rpath, even if `embedded_path` was passed as well
if static:
ldflags += "-s -w -linkmode=external '-extldflags=-static' "
else:
ldflags += "-r {}/lib ".format(embedded_path)
if os.environ.get("DELVE"):
gcflags = "-N -l"
if sys.platform == 'win32':
# On windows, need to build with the extra argument -ldflags="-linkmode internal"
# if you want to be able to use the delve debugger.
ldflags += "-linkmode internal "
elif os.environ.get("NO_GO_OPT"):
gcflags = "-N -l"
return ldflags, gcflags, env
def get_payload_version():
"""
Return the Agent payload version found in the Gopkg.toml file.
"""
current = {}
# parse the TOML file line by line
with open("Gopkg.lock") as toml:
for line in toml.readlines():
# skip empty lines and comments
if not line or line[0] == "#":
continue
# change the parser "state" when we find a [[projects]] section
if "[[projects]]" in line:
# see if the current section is what we're searching for
if current.get("name") == "github.com/DataDog/agent-payload":
return current.get("version")
# if not, reset the "state" and proceed with the next line
current = {}
continue
# search for an assignment, ignore subsequent `=` chars
toks = line.split('=', 2)
if len(toks) == 2:
# strip whitespaces
key = toks[0].strip()
# strip whitespaces and quotes
value = toks[-1].replace('"', '').strip()
current[key] = value
return ""
def get_version_ldflags(ctx, prefix=None):
"""
Compute the version from the git tags, and set the appropriate compiler
flags
"""
payload_v = get_payload_version()
commit = get_git_commit()
ldflags = "-X {}/pkg/version.Commit={} ".format(REPO_PATH, commit)
ldflags += "-X {}/pkg/version.AgentVersion={} ".format(REPO_PATH, get_version(ctx, include_git=True, prefix=prefix))
ldflags += "-X {}/pkg/serializer.AgentPayloadVersion={} ".format(REPO_PATH, payload_v)
return ldflags
def get_git_commit():
"""
Get the current commit
"""
return check_output(['git', 'rev-parse', '--short', 'HEAD']).decode('utf-8').strip()
def get_go_version():
"""
Get the version of Go used
"""
return check_output(['go', 'version']).decode('utf-8').strip()
def get_root():
"""
Get the root of the Go project
"""
return check_output(['git', 'rev-parse', '--show-toplevel']).decode('utf-8').strip()
def get_git_branch_name():
"""
Return the name of the current git branch
"""
return check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"]).decode('utf-8').strip()
def query_version(ctx, git_sha_length=7, prefix=None):
# The string that's passed in will look something like this: 6.0.0-beta.0-1-g4f19118
# if the tag is 6.0.0-beta.0, it has been one commit since the tag and that commit hash is g4f19118
cmd = "git describe --tags --candidates=50"
if prefix and type(prefix) == str:
cmd += " --match \"{}-*\"".format(prefix)
else:
cmd += " --match \"[0-9]*\""
if git_sha_length and type(git_sha_length) == int:
cmd += " --abbrev={}".format(git_sha_length)
described_version = ctx.run(cmd, hide=True).stdout.strip()
# for the example above, 6.0.0-beta.0-1-g4f19118, this will be 1
commit_number_match = re.match(r"^.*-(?P<commit_number>\d+)-g[0-9a-f]+$", described_version)
commit_number = 0
if commit_number_match:
commit_number = int(commit_number_match.group('commit_number'))
version_re = r"v?(?P<version>\d+\.\d+\.\d+)(?:(?:-|\.)(?P<pre>[0-9A-Za-z.-]+))?"
if prefix and type(prefix) == str:
version_re = r"^(?:{}-)?".format(prefix) + version_re
else:
version_re = r"^" + version_re
if commit_number == 0:
version_re += r"(?P<git_sha>)$"
else:
version_re += r"-\d+-g(?P<git_sha>[0-9a-f]+)$"
version_match = re.match(
version_re,
described_version)
if not version_match:
raise Exception("Could not query valid version from tags of local git repository")
# version: for the tag 6.0.0-beta.0, this will match 6.0.0
# pre: for the output, 6.0.0-beta.0-1-g4f19118, this will match beta.0
# if there have been no commits since, it will be just 6.0.0-beta.0,
# and it will match beta.0
# git_sha: for the output, 6.0.0-beta.0-1-g4f19118, this will match g4f19118
version, pre, git_sha = version_match.group('version', 'pre', 'git_sha')
return version, pre, commit_number, git_sha
def get_version(ctx, include_git=False, url_safe=False, git_sha_length=7, prefix=None):
# we only need the git info for the non omnibus builds, omnibus includes all this information by default
version = ""
version, pre, commits_since_version, git_sha = query_version(ctx, git_sha_length, prefix)
if pre:
version = "{0}-{1}".format(version, pre)
if commits_since_version and include_git:
if url_safe:
version = "{0}.git.{1}.{2}".format(version, commits_since_version,git_sha)
else:
version = "{0}+git.{1}.{2}".format(version, commits_since_version,git_sha)
# version could be unicode as it comes from `query_version`
return str(version)
def get_version_numeric_only(ctx):
version, _, _, _ = query_version(ctx)
return version
def load_release_versions(ctx, target_version):
with open("release.json", "r") as f:
versions = json.load(f)
if target_version in versions:
# windows runners don't accepts anything else than strings in the
# environment when running a subprocess.
return {str(k):str(v) for k, v in versions[target_version].items()}
raise Exception("Could not find '{}' version in release.json".format(target_version))
| 37.137097 | 120 | 0.631813 |
4a1b925ca99b2beb2fe047d327fc0dbda79bacf3
| 5,938 |
py
|
Python
|
pymongo/server_api.py
|
ldennis/mongo-python-driver
|
cc029a1e6208863eaab453777363d3935b927f32
|
[
"Apache-2.0"
] | 2,593 |
2015-01-02T10:53:55.000Z
|
2022-03-28T15:42:47.000Z
|
pymongo/server_api.py
|
ldennis/mongo-python-driver
|
cc029a1e6208863eaab453777363d3935b927f32
|
[
"Apache-2.0"
] | 356 |
2015-02-05T15:57:18.000Z
|
2022-03-31T19:12:30.000Z
|
pymongo/server_api.py
|
ldennis/mongo-python-driver
|
cc029a1e6208863eaab453777363d3935b927f32
|
[
"Apache-2.0"
] | 774 |
2015-01-05T09:30:07.000Z
|
2022-03-30T03:36:25.000Z
|
# Copyright 2020-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Support for MongoDB Versioned API.
.. _versioned-api-ref:
MongoDB Versioned API
=====================
Starting in MongoDB 5.0, applications can specify the server API version
to use when creating a :class:`~pymongo.mongo_client.MongoClient`. Doing so
ensures that the driver behaves in a manner compatible with that server API
version, regardless of the server's actual release version.
Declaring an API Version
````````````````````````
.. attention:: Versioned API requires MongoDB >=5.0.
To configure MongoDB Versioned API, pass the ``server_api`` keyword option to
:class:`~pymongo.mongo_client.MongoClient`::
>>> from pymongo.mongo_client import MongoClient
>>> from pymongo.server_api import ServerApi
>>>
>>> # Declare API version "1" for MongoClient "client"
>>> server_api = ServerApi('1')
>>> client = MongoClient(server_api=server_api)
The declared API version is applied to all commands run through ``client``,
including those sent through the generic
:meth:`~pymongo.database.Database.command` helper.
.. note:: Declaring an API version on the
:class:`~pymongo.mongo_client.MongoClient` **and** specifying versioned
API options in :meth:`~pymongo.database.Database.command` command document
is not supported and will lead to undefined behaviour.
To run any command without declaring a server API version or using a different
API version, create a separate :class:`~pymongo.mongo_client.MongoClient`
instance.
Strict Mode
```````````
Configuring ``strict`` mode will cause the MongoDB server to reject all
commands that are not part of the declared :attr:`ServerApi.version`. This
includes command options and aggregation pipeline stages.
For example::
>>> server_api = ServerApi('1', strict=True)
>>> client = MongoClient(server_api=server_api)
>>> client.test.command('count', 'test')
Traceback (most recent call last):
...
pymongo.errors.OperationFailure: Provided apiStrict:true, but the command count is not in API Version 1, full error: {'ok': 0.0, 'errmsg': 'Provided apiStrict:true, but the command count is not in API Version 1', 'code': 323, 'codeName': 'APIStrictError'
Detecting API Deprecations
``````````````````````````
The ``deprecationErrors`` option can be used to enable command failures
when using functionality that is deprecated from the configured
:attr:`ServerApi.version`. For example::
>>> server_api = ServerApi('1', deprecation_errors=True)
>>> client = MongoClient(server_api=server_api)
Note that at the time of this writing, no deprecated APIs exist.
Classes
=======
"""
class ServerApiVersion:
"""An enum that defines values for :attr:`ServerApi.version`.
.. versionadded:: 3.12
"""
V1 = "1"
"""Server API version "1"."""
class ServerApi(object):
"""MongoDB Versioned API."""
def __init__(self, version, strict=None, deprecation_errors=None):
"""Options to configure MongoDB Versioned API.
:Parameters:
- `version`: The API version string. Must be one of the values in
:class:`ServerApiVersion`.
- `strict` (optional): Set to ``True`` to enable API strict mode.
Defaults to ``None`` which means "use the server's default".
- `deprecation_errors` (optional): Set to ``True`` to enable
deprecation errors. Defaults to ``None`` which means "use the
server's default".
.. versionadded:: 3.12
"""
if version != ServerApiVersion.V1:
raise ValueError("Unknown ServerApi version: %s" % (version,))
if strict is not None and not isinstance(strict, bool):
raise TypeError(
"Wrong type for ServerApi strict, value must be an instance "
"of bool, not %s" % (type(strict),))
if (deprecation_errors is not None and
not isinstance(deprecation_errors, bool)):
raise TypeError(
"Wrong type for ServerApi deprecation_errors, value must be "
"an instance of bool, not %s" % (type(deprecation_errors),))
self._version = version
self._strict = strict
self._deprecation_errors = deprecation_errors
@property
def version(self):
"""The API version setting.
This value is sent to the server in the "apiVersion" field.
"""
return self._version
@property
def strict(self):
"""The API strict mode setting.
When set, this value is sent to the server in the "apiStrict" field.
"""
return self._strict
@property
def deprecation_errors(self):
"""The API deprecation errors setting.
When set, this value is sent to the server in the
"apiDeprecationErrors" field.
"""
return self._deprecation_errors
def _add_to_command(cmd, server_api):
"""Internal helper which adds API versioning options to a command.
:Parameters:
- `cmd`: The command.
- `server_api` (optional): A :class:`ServerApi` or ``None``.
"""
if not server_api:
return
cmd['apiVersion'] = server_api.version
if server_api.strict is not None:
cmd['apiStrict'] = server_api.strict
if server_api.deprecation_errors is not None:
cmd['apiDeprecationErrors'] = server_api.deprecation_errors
| 35.136095 | 258 | 0.675648 |
4a1b95b5a8bcf3c8fa27bc2724ccfed28ab72513
| 880 |
py
|
Python
|
calculadora.py
|
MarceloPorfirio/Calculadora_Pintura
|
abd3d72f9742db31592ecb460daf056d3b10d0a8
|
[
"MIT"
] | null | null | null |
calculadora.py
|
MarceloPorfirio/Calculadora_Pintura
|
abd3d72f9742db31592ecb460daf056d3b10d0a8
|
[
"MIT"
] | null | null | null |
calculadora.py
|
MarceloPorfirio/Calculadora_Pintura
|
abd3d72f9742db31592ecb460daf056d3b10d0a8
|
[
"MIT"
] | null | null | null |
class Calculadora:
__area_paredes: float ## __UNDERSCORE X2 deixa seu acesso limitado , só pode ser acessado por atributos e métodos dentro da sua propria classe.
__area_teto: float ## são declarados como privados
def calcular_area_paredes(self, comodo):
self.__area_paredes = 2*(comodo.largura + comodo.profundidade)* comodo.altura
return self.__area_paredes
def calcular_area_teto(self, comodo):
self.__area_teto = comodo.largura * comodo.profundidade
return self.__area_teto
def litragem_necessaria(self):
if self.__area_paredes <= 0 or self.__area_teto <= 0: ## verifica se a parede ou o teto for menor ou igual a zero
print('Não é possível calcular a litragem com os valores digitados.')
exit() ## sai e finaliza a aplicação
return (self.__area_paredes * self.__area_teto)/10
| 51.764706 | 147 | 0.706818 |
4a1b95c63a53367ada44e69b6e851e31b78b835c
| 3,037 |
py
|
Python
|
contrib/linearize/linearize-hashes.py
|
MNOSIO/mnos-wallet
|
07bf0b6d1477daa895319be4b6a69a42aa2110ac
|
[
"MIT"
] | null | null | null |
contrib/linearize/linearize-hashes.py
|
MNOSIO/mnos-wallet
|
07bf0b6d1477daa895319be4b6a69a42aa2110ac
|
[
"MIT"
] | null | null | null |
contrib/linearize/linearize-hashes.py
|
MNOSIO/mnos-wallet
|
07bf0b6d1477daa895319be4b6a69a42aa2110ac
|
[
"MIT"
] | 4 |
2018-07-17T13:48:13.000Z
|
2018-08-20T04:32:42.000Z
|
#!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def execute(self, obj):
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read()
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 6556
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
| 26.640351 | 90 | 0.682581 |
4a1b9677bd6d2e51564c0529961043a0396a6bff
| 979 |
py
|
Python
|
conj1.py
|
kishlaya/random_graph
|
076e002b9a44ad554bd161c72d781fd66e3a89e2
|
[
"MIT"
] | null | null | null |
conj1.py
|
kishlaya/random_graph
|
076e002b9a44ad554bd161c72d781fd66e3a89e2
|
[
"MIT"
] | null | null | null |
conj1.py
|
kishlaya/random_graph
|
076e002b9a44ad554bd161c72d781fd66e3a89e2
|
[
"MIT"
] | null | null | null |
import main as graph
def check(adjacency_matrix):
global fail_count
mu, phi = graph.eigen(adjacency_matrix)
n = len(mu)
max_index = 0
for i in range(0,n):
if mu[i] > mu[max_index]:
max_index = i
mu_1 = mu[max_index]
phi_1 = phi[:, max_index]
test = []
for i in range(0,n):
x = abs(phi_1[i])
y = graph.find_degree(adjacency_matrix, i)
test.append((i,x,y))
test.sort(key=lambda x: x[1])
for i in range(0,n-1):
d_u = test[i][2]
d_v = test[i+1][2]
if d_u > d_v:
fail_count = fail_count + 1
break
# print(adjacency_matrix)
# print()
# print(test)
# print()
# graph.show_graph(adjacency_matrix)
# exit()
fail_count = 0
n = 50
trials = 10000
for j in range(0,trials):
g = graph.random_graph(n)
if graph.is_connected(g):
check(g)
print(fail_count/trials)
| 21.282609 | 50 | 0.531154 |
4a1b969ebe2890f32a98df6c81d72242d236a93a
| 4,683 |
py
|
Python
|
einsum_function.py
|
ronekko/capsnet
|
28dc11945ecf2d65f45834f7bc52cb9c5f079af4
|
[
"MIT"
] | null | null | null |
einsum_function.py
|
ronekko/capsnet
|
28dc11945ecf2d65f45834f7bc52cb9c5f079af4
|
[
"MIT"
] | null | null | null |
einsum_function.py
|
ronekko/capsnet
|
28dc11945ecf2d65f45834f7bc52cb9c5f079af4
|
[
"MIT"
] | null | null | null |
from collections import OrderedDict
import chainer.functions as F
def _parse_subscripts(subscripts):
if '->' in subscripts:
has_arrow = True
else:
subscripts = subscripts + '->'
has_arrow = False
in_subs, out_sub = subscripts.split('->')
in_subs = in_subs.split(',')
out_sub = out_sub.strip()
in_subs = [in_sub.strip() for in_sub in in_subs]
if not has_arrow:
out_sub = ''
shared_chars = set.intersection(*[set(sub) for sub in in_subs])
for subs in in_subs:
for char in subs:
if char not in shared_chars:
out_sub += char
return in_subs, out_sub
def einsum(subscripts, *operands):
"""Computs the einsum for Chainer's Variable.
Note that this is a very early implementation and supports only a few
functionalities of original numpy.einsum function.
"""
if not len(operands) == 2:
raise ValueError(
'Currently, `*operands` only supports the case of 2 arguments.')
a, b = operands
in_subs, out_subs = _parse_subscripts(subscripts)
a_subs, b_subs = in_subs
a_shape = OrderedDict()
for sub, length in zip(a_subs, a.shape):
a_shape[sub] = length
b_shape = OrderedDict()
for sub, length in zip(b_subs, b.shape):
b_shape[sub] = length
# Classify subscripts in a_subs and b_subs into 4 classes.
# There are 4 sets of subscripts that are:
# set_1: included in a_subs and b_subs and out_subs
# set_2: included in a_subs and b_subs but not in out_subs
# set_3a, set_3d: included in only a_subs or b_subs, and in out_subs
# set_4a, set_4d: included in only a_subs or b_subs, and not in out_subs
set_a = set(a_subs)
set_b = set(b_subs)
set_out = set(out_subs)
set_1 = set_a & set_b
set_3a = set_a - set_1
set_3b = set_b - set_1
set_2 = set_1 - set_out
set_1 = set_1 - set_2
set_4a = set_3a - set_out
set_3a = set_3a - set_4a
set_4b = set_3b - set_out
set_3b = set_3b - set_4b
#####################################################
# Arrange a and b to prepare to input to F.batch_matmul
# For the array a
a_new_subs_class = ['', '', '']
a_new_shape = [1, 1, 1]
for sub, length in a_shape.items():
if sub in set_1:
a_new_subs_class[0] += sub
a_new_shape[0] *= length
elif sub in set_2:
a_new_subs_class[2] += sub
a_new_shape[2] *= length
else:
a_new_subs_class[1] += sub
a_new_shape[1] *= length
transpose_axes = []
for sub in ''.join(a_new_subs_class):
transpose_axes.append(a_subs.index(sub))
a_ = F.transpose(a, transpose_axes)
a_ = a_.reshape(a_new_shape)
# For the array b
b_new_subs_class = ['', '', '']
b_new_shape = [1, 1, 1]
for sub, length in b_shape.items():
if sub in set_1:
b_new_subs_class[0] += sub
b_new_shape[0] *= length
elif sub in set_2:
b_new_subs_class[1] += sub
b_new_shape[1] *= length
else:
b_new_subs_class[2] += sub
b_new_shape[2] *= length
transpose_axes = []
for sub in ''.join(b_new_subs_class):
transpose_axes.append(b_subs.index(sub))
b_ = F.transpose(b, transpose_axes)
b_ = b_.reshape(b_new_shape)
#################
# Target axes are reduced by F.batch_matmul and F.sum
c_ = F.matmul(a_, b_)
c_subs_class = [
a_new_subs_class[0], a_new_subs_class[1], b_new_subs_class[2]]
if set_4a:
size_sum = 1
for sub in set_4a:
size_sum *= a_shape[sub]
c_subs_class[1] = c_subs_class[1].replace(sub, '')
size_kept = a_new_shape[1] // size_sum
c_shape = c_.shape
c_ = c_.reshape(c_shape[0], size_kept, size_sum, c_shape[2])
c_ = F.sum(c_, axis=2)
if set_4b:
size_sum = 1
for sub in set_4b:
size_sum *= b_shape[sub]
c_subs_class[2] = c_subs_class[2].replace(sub, '')
size_kept = b_new_shape[2] // size_sum
c_shape = c_.shape
c_ = c_.reshape(c_shape[0], c_shape[1], size_kept, size_sum)
c_ = F.sum(c_, axis=3)
############################
# The output array is rearranged to output shape
c_subs = ''.join(c_subs_class)
sub_to_size = {**a_shape, **b_shape} # merging two dicts
c_shape = [sub_to_size[sub] for sub in c_subs]
c = c_.reshape(c_shape)
transpose_axes = []
for sub in out_subs:
transpose_axes.append(c_subs.index(sub))
c = c.transpose(transpose_axes)
return c
| 31.013245 | 76 | 0.592569 |
4a1b9707b924c8fea9e04a043d626d7f2f5014f8
| 189 |
py
|
Python
|
Core/Null_Factory.py
|
BernardoB95/Extrator_SPEDFiscal
|
10b4697833c561d24654251da5f22d044f03fc16
|
[
"MIT"
] | 1 |
2021-04-25T13:53:20.000Z
|
2021-04-25T13:53:20.000Z
|
Core/Null_Factory.py
|
BernardoB95/Extrator_SPEDFiscal
|
10b4697833c561d24654251da5f22d044f03fc16
|
[
"MIT"
] | null | null | null |
Core/Null_Factory.py
|
BernardoB95/Extrator_SPEDFiscal
|
10b4697833c561d24654251da5f22d044f03fc16
|
[
"MIT"
] | null | null | null |
from .IFactory import IFactory
from Regs import NullReg
class Null_Factory(IFactory):
def create_block_object(self, line):
self.null = _null = NullReg()
return _null
| 18.9 | 40 | 0.703704 |
4a1b97b62fc3394d7f87f867bbd8e5b86a1d0a9e
| 11,550 |
py
|
Python
|
py/p4/v1/p4runtime_pb2_grpc.py
|
kishanps/p4runtime
|
235fbd62cc9e806f4d3e1b8ef448bab4ad91f031
|
[
"Apache-2.0"
] | null | null | null |
py/p4/v1/p4runtime_pb2_grpc.py
|
kishanps/p4runtime
|
235fbd62cc9e806f4d3e1b8ef448bab4ad91f031
|
[
"Apache-2.0"
] | null | null | null |
py/p4/v1/p4runtime_pb2_grpc.py
|
kishanps/p4runtime
|
235fbd62cc9e806f4d3e1b8ef448bab4ad91f031
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from p4.v1 import p4runtime_pb2 as p4_dot_v1_dot_p4runtime__pb2
class P4RuntimeStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Write = channel.unary_unary(
'/p4.v1.P4Runtime/Write',
request_serializer=p4_dot_v1_dot_p4runtime__pb2.WriteRequest.SerializeToString,
response_deserializer=p4_dot_v1_dot_p4runtime__pb2.WriteResponse.FromString,
)
self.Read = channel.unary_stream(
'/p4.v1.P4Runtime/Read',
request_serializer=p4_dot_v1_dot_p4runtime__pb2.ReadRequest.SerializeToString,
response_deserializer=p4_dot_v1_dot_p4runtime__pb2.ReadResponse.FromString,
)
self.SetForwardingPipelineConfig = channel.unary_unary(
'/p4.v1.P4Runtime/SetForwardingPipelineConfig',
request_serializer=p4_dot_v1_dot_p4runtime__pb2.SetForwardingPipelineConfigRequest.SerializeToString,
response_deserializer=p4_dot_v1_dot_p4runtime__pb2.SetForwardingPipelineConfigResponse.FromString,
)
self.GetForwardingPipelineConfig = channel.unary_unary(
'/p4.v1.P4Runtime/GetForwardingPipelineConfig',
request_serializer=p4_dot_v1_dot_p4runtime__pb2.GetForwardingPipelineConfigRequest.SerializeToString,
response_deserializer=p4_dot_v1_dot_p4runtime__pb2.GetForwardingPipelineConfigResponse.FromString,
)
self.StreamChannel = channel.stream_stream(
'/p4.v1.P4Runtime/StreamChannel',
request_serializer=p4_dot_v1_dot_p4runtime__pb2.StreamMessageRequest.SerializeToString,
response_deserializer=p4_dot_v1_dot_p4runtime__pb2.StreamMessageResponse.FromString,
)
self.Capabilities = channel.unary_unary(
'/p4.v1.P4Runtime/Capabilities',
request_serializer=p4_dot_v1_dot_p4runtime__pb2.CapabilitiesRequest.SerializeToString,
response_deserializer=p4_dot_v1_dot_p4runtime__pb2.CapabilitiesResponse.FromString,
)
class P4RuntimeServicer(object):
"""Missing associated documentation comment in .proto file."""
def Write(self, request, context):
"""Update one or more P4 entities on the target.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Read(self, request, context):
"""Read one or more P4 entities from the target.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetForwardingPipelineConfig(self, request, context):
"""Sets the P4 forwarding-pipeline config.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetForwardingPipelineConfig(self, request, context):
"""Gets the current P4 forwarding-pipeline config.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StreamChannel(self, request_iterator, context):
"""Represents the bidirectional stream between the controller and the
switch (initiated by the controller), and is managed for the following
purposes:
- connection initiation through client arbitration
- indicating switch session liveness: the session is live when switch
sends a positive client arbitration update to the controller, and is
considered dead when either the stream breaks or the switch sends a
negative update for client arbitration
- the controller sending/receiving packets to/from the switch
- streaming of notifications from the switch
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Capabilities(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_P4RuntimeServicer_to_server(servicer, server):
rpc_method_handlers = {
'Write': grpc.unary_unary_rpc_method_handler(
servicer.Write,
request_deserializer=p4_dot_v1_dot_p4runtime__pb2.WriteRequest.FromString,
response_serializer=p4_dot_v1_dot_p4runtime__pb2.WriteResponse.SerializeToString,
),
'Read': grpc.unary_stream_rpc_method_handler(
servicer.Read,
request_deserializer=p4_dot_v1_dot_p4runtime__pb2.ReadRequest.FromString,
response_serializer=p4_dot_v1_dot_p4runtime__pb2.ReadResponse.SerializeToString,
),
'SetForwardingPipelineConfig': grpc.unary_unary_rpc_method_handler(
servicer.SetForwardingPipelineConfig,
request_deserializer=p4_dot_v1_dot_p4runtime__pb2.SetForwardingPipelineConfigRequest.FromString,
response_serializer=p4_dot_v1_dot_p4runtime__pb2.SetForwardingPipelineConfigResponse.SerializeToString,
),
'GetForwardingPipelineConfig': grpc.unary_unary_rpc_method_handler(
servicer.GetForwardingPipelineConfig,
request_deserializer=p4_dot_v1_dot_p4runtime__pb2.GetForwardingPipelineConfigRequest.FromString,
response_serializer=p4_dot_v1_dot_p4runtime__pb2.GetForwardingPipelineConfigResponse.SerializeToString,
),
'StreamChannel': grpc.stream_stream_rpc_method_handler(
servicer.StreamChannel,
request_deserializer=p4_dot_v1_dot_p4runtime__pb2.StreamMessageRequest.FromString,
response_serializer=p4_dot_v1_dot_p4runtime__pb2.StreamMessageResponse.SerializeToString,
),
'Capabilities': grpc.unary_unary_rpc_method_handler(
servicer.Capabilities,
request_deserializer=p4_dot_v1_dot_p4runtime__pb2.CapabilitiesRequest.FromString,
response_serializer=p4_dot_v1_dot_p4runtime__pb2.CapabilitiesResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'p4.v1.P4Runtime', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class P4Runtime(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def Write(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/p4.v1.P4Runtime/Write',
p4_dot_v1_dot_p4runtime__pb2.WriteRequest.SerializeToString,
p4_dot_v1_dot_p4runtime__pb2.WriteResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Read(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/p4.v1.P4Runtime/Read',
p4_dot_v1_dot_p4runtime__pb2.ReadRequest.SerializeToString,
p4_dot_v1_dot_p4runtime__pb2.ReadResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SetForwardingPipelineConfig(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/p4.v1.P4Runtime/SetForwardingPipelineConfig',
p4_dot_v1_dot_p4runtime__pb2.SetForwardingPipelineConfigRequest.SerializeToString,
p4_dot_v1_dot_p4runtime__pb2.SetForwardingPipelineConfigResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetForwardingPipelineConfig(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/p4.v1.P4Runtime/GetForwardingPipelineConfig',
p4_dot_v1_dot_p4runtime__pb2.GetForwardingPipelineConfigRequest.SerializeToString,
p4_dot_v1_dot_p4runtime__pb2.GetForwardingPipelineConfigResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def StreamChannel(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_stream(request_iterator, target, '/p4.v1.P4Runtime/StreamChannel',
p4_dot_v1_dot_p4runtime__pb2.StreamMessageRequest.SerializeToString,
p4_dot_v1_dot_p4runtime__pb2.StreamMessageResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Capabilities(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/p4.v1.P4Runtime/Capabilities',
p4_dot_v1_dot_p4runtime__pb2.CapabilitiesRequest.SerializeToString,
p4_dot_v1_dot_p4runtime__pb2.CapabilitiesResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 46.95122 | 123 | 0.68 |
4a1b97f9467bf7a99a0b32a14bd0850c7c0ec253
| 3,069 |
py
|
Python
|
mmaction/datasets/audio_visual_dataset.py
|
Naoki-Wake/mmaction2
|
a2032605db82509744a18d993c94a06feb1efd15
|
[
"Apache-2.0"
] | 47 |
2021-09-02T10:42:29.000Z
|
2022-03-31T01:37:49.000Z
|
mmaction/datasets/audio_visual_dataset.py
|
xumingze0308/mmaction2
|
777546f27f8f5a3c83e10d966e2149be2fc9fa31
|
[
"Apache-2.0"
] | 2 |
2021-12-05T02:28:42.000Z
|
2022-01-05T06:46:10.000Z
|
mmaction/datasets/audio_visual_dataset.py
|
xumingze0308/mmaction2
|
777546f27f8f5a3c83e10d966e2149be2fc9fa31
|
[
"Apache-2.0"
] | 6 |
2021-09-19T16:31:32.000Z
|
2022-03-03T06:57:34.000Z
|
import os.path as osp
from .rawframe_dataset import RawframeDataset
from .registry import DATASETS
@DATASETS.register_module()
class AudioVisualDataset(RawframeDataset):
"""Dataset that reads both audio and visual data, supporting both rawframes
and videos. The annotation file is same as that of the rawframe dataset,
such as:
.. code-block:: txt
some/directory-1 163 1
some/directory-2 122 1
some/directory-3 258 2
some/directory-4 234 2
some/directory-5 295 3
some/directory-6 121 3
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
audio_prefix (str): Directory of the audio files.
kwargs (dict): Other keyword args for `RawframeDataset`. `video_prefix`
is also allowed if pipeline is designed for videos.
"""
def __init__(self, ann_file, pipeline, audio_prefix, **kwargs):
self.audio_prefix = audio_prefix
self.video_prefix = kwargs.pop('video_prefix', None)
self.data_prefix = kwargs.get('data_prefix', None)
super().__init__(ann_file, pipeline, **kwargs)
def load_annotations(self):
video_infos = []
with open(self.ann_file, 'r') as fin:
for line in fin:
line_split = line.strip().split()
video_info = {}
idx = 0
# idx for frame_dir
frame_dir = line_split[idx]
if self.audio_prefix is not None:
audio_path = osp.join(self.audio_prefix,
frame_dir + '.npy')
video_info['audio_path'] = audio_path
if self.video_prefix:
video_path = osp.join(self.video_prefix,
frame_dir + '.mp4')
video_info['filename'] = video_path
if self.data_prefix is not None:
frame_dir = osp.join(self.data_prefix, frame_dir)
video_info['frame_dir'] = frame_dir
idx += 1
if self.with_offset:
# idx for offset and total_frames
video_info['offset'] = int(line_split[idx])
video_info['total_frames'] = int(line_split[idx + 1])
idx += 2
else:
# idx for total_frames
video_info['total_frames'] = int(line_split[idx])
idx += 1
# idx for label[s]
label = [int(x) for x in line_split[idx:]]
assert len(label), f'missing label in line: {line}'
if self.multi_class:
assert self.num_classes is not None
video_info['label'] = label
else:
assert len(label) == 1
video_info['label'] = label[0]
video_infos.append(video_info)
return video_infos
| 39.857143 | 79 | 0.539264 |
4a1b980d338f6f43158e458ec3cf43a49a62e188
| 89 |
py
|
Python
|
exercicios/ex024.py
|
Matheus1199/python
|
c87859d4bf63ba0edea43d864fcbce4915da7e6a
|
[
"MIT"
] | null | null | null |
exercicios/ex024.py
|
Matheus1199/python
|
c87859d4bf63ba0edea43d864fcbce4915da7e6a
|
[
"MIT"
] | null | null | null |
exercicios/ex024.py
|
Matheus1199/python
|
c87859d4bf63ba0edea43d864fcbce4915da7e6a
|
[
"MIT"
] | null | null | null |
cidade = input('Em qual cidade você nasceu? ').strip()
print(cidade[:5].upper == 'SANTO')
| 44.5 | 54 | 0.674157 |
4a1b99127012512bde3b1a15ffa974c55820beb4
| 10,409 |
py
|
Python
|
src/genie/libs/parser/ios/tests/test_show_spanning_tree.py
|
Drey/genieparser
|
f16649efabf1f3c892bcaad340ae24ce5403ba6b
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/ios/tests/test_show_spanning_tree.py
|
Drey/genieparser
|
f16649efabf1f3c892bcaad340ae24ce5403ba6b
|
[
"Apache-2.0"
] | 1 |
2019-04-02T16:51:56.000Z
|
2019-04-02T16:51:56.000Z
|
src/genie/libs/parser/ios/tests/test_show_spanning_tree.py
|
Drey/genieparser
|
f16649efabf1f3c892bcaad340ae24ce5403ba6b
|
[
"Apache-2.0"
] | 1 |
2021-01-29T17:31:33.000Z
|
2021-01-29T17:31:33.000Z
|
#!/bin/env python
import unittest
from unittest.mock import Mock
from ats.topology import Device
from genie.metaparser.util.exceptions import SchemaEmptyParserError,\
SchemaMissingKeyError
from genie.libs.parser.ios.show_spanning_tree import ShowSpanningTreeDetail, \
ShowSpanningTreeMstDetail, \
ShowSpanningTreeSummary, \
ShowErrdisableRecovery, \
ShowSpanningTree, \
ShowSpanningTreeMstConfiguration
from genie.libs.parser.iosxe.tests.test_show_spanning_tree import \
test_show_spanning_tree_summary as test_show_spanning_tree_summary_iosxe,\
test_show_spanning_tree_mst_configuration as test_show_spanning_tree_mst_configuration_iosxe,\
test_show_spanning_tree_detail as test_show_spanning_tree_detail_iosxe,\
test_show_spanning_tree_mst_detail as test_show_spanning_tree_mst_detail_iosxe,\
test_show_errdisable_recovery as test_show_errdisable_recovery_iosxe,\
test_show_spanning_tree as test_show_spanning_tree_iosxe
class test_show_spanning_tree_summary(test_show_spanning_tree_summary_iosxe):
golden_parsed_output_ios = {
"total_statistics": {
"forwardings": 0,
"listenings": 0,
"stp_actives": 0,
"learnings": 0,
"blockings": 0
},
"root_bridge_for": "none",
"bpdu_guard": False,
"uplink_fast": False,
"backbone_fast": False,
}
golden_output_ios = {'execute.return_value': '''\
Root bridge for: none.
PortFast BPDU Guard is disabled
UplinkFast is disabled
BackboneFast is disabled
Name Blocking Listening Learning Forwarding STP Active
-------------------- -------- --------- -------- ---------- ----------
Total 0 0 0 0 0
'''
}
def test_empty(self):
self.dev1 = Mock(**self.empty_output)
obj = ShowSpanningTreeSummary(device=self.dev1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden_mst(self):
self.maxDiff = None
self.dev_c3850 = Mock(**self.golden_output_mstp)
obj = ShowSpanningTreeSummary(device=self.dev_c3850)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output_mstp)
def test_golden_ios(self):
self.maxDiff = None
self.dev_c3850 = Mock(**self.golden_output_ios)
obj = ShowSpanningTreeSummary(device=self.dev_c3850)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output_ios)
def test_golden_single_mst(self):
self.maxDiff = None
self.dev_c3850 = Mock(**self.golden_output_mstp_single_mst)
obj = ShowSpanningTreeSummary(device=self.dev_c3850)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output_mstp_single_mst)
def test_golden_pvst(self):
self.maxDiff = None
self.dev_c3850 = Mock(**self.golden_output_pvst)
obj = ShowSpanningTreeSummary(device=self.dev_c3850)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output_pvst)
def test_golden_rpvst(self):
self.maxDiff = None
self.dev_c3850 = Mock(**self.golden_output_rpvst)
obj = ShowSpanningTreeSummary(device=self.dev_c3850)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output_rpvst)
class test_show_spanning_tree_detail(test_show_spanning_tree_detail_iosxe):
def test_empty(self):
self.dev1 = Mock(**self.empty_output)
obj = ShowSpanningTreeDetail(device=self.dev1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden_mstp(self):
self.dev_c3850 = Mock(**self.golden_output_mstp)
obj = ShowSpanningTreeDetail(device=self.dev_c3850)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output_mstp)
def test_golden_pvst(self):
self.maxDiff = None
self.dev_c3850 = Mock(**self.golden_output_pvst)
obj = ShowSpanningTreeDetail(device=self.dev_c3850)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output_pvst)
def test_golden_rapid_pvst(self):
self.maxDiff = None
self.dev_c3850 = Mock(**self.golden_output_rapid_pvst)
obj = ShowSpanningTreeDetail(device=self.dev_c3850)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output_rapid_pvst)
class test_show_spanning_tree_mst_detail(test_show_spanning_tree_mst_detail_iosxe):
def test_empty(self):
self.dev1 = Mock(**self.empty_output)
obj = ShowSpanningTreeMstDetail(device=self.dev1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.maxDiff = None
self.dev_c3850 = Mock(**self.golden_output)
obj = ShowSpanningTreeMstDetail(device=self.dev_c3850)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output)
class test_show_errdisable_recovery(test_show_errdisable_recovery_iosxe):
def test_empty(self):
self.dev1 = Mock(**self.empty_output)
obj = ShowErrdisableRecovery(device=self.dev1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.maxDiff = None
self.dev_c3850 = Mock(**self.golden_output)
obj = ShowErrdisableRecovery(device=self.dev_c3850)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output)
class test_show_spanning_tree(test_show_spanning_tree_iosxe):
golden_parsed_output_vlan = {
"mstp": {
"mst_instances": {
3: {
"root": {
"max_age": 20,
"interface": "GigabitEthernet3/8",
"forward_delay": 15,
"priority": 32771,
"cost": 20000,
"port": 136,
"address": "0050.14bb.6000",
"hello_time": 2
},
"bridge": {
"max_age": 20,
"priority": 32771,
"forward_delay": 15,
"configured_bridge_priority": 32768,
"sys_id_ext": 3,
"address": "00d0.003f.8800",
"hello_time": 2
},
"interfaces": {
"GigabitEthernet3/8": {
"port_num": 136,
"role": "root",
"port_state": "forwarding",
"type": "P2p",
"port_priority": 128,
"cost": 20000
},
"Port-channel1": {
"port_num": 833,
"role": "designated",
"port_state": "forwarding",
"type": "P2p",
"port_priority": 128,
"cost": 20000
}
}
}
}
}
}
golden_output_vlan = {'execute.return_value': '''
cat# show spanning-tree vlan 333
MST03
Spanning tree enabled protocol mstp
Root ID Priority 32771
Address 0050.14bb.6000
Cost 20000
Port 136 (GigabitEthernet3/8)
Hello Time 2 sec Max Age 20 sec Forward Delay 15 sec
Bridge ID Priority 32771 (priority 32768 sys-id-ext 3)
Address 00d0.003f.8800
Hello Time 2 sec Max Age 20 sec Forward Delay 15 sec
Interface Role Sts Cost Prio.Nbr Status
---------------- ---- --- --------- -------- ------------------------
Gi3/8 Root FWD 20000 128.136 P2p
Po1 Desg FWD 20000 128.833 P2p
'''}
def test_empty(self):
self.dev1 = Mock(**self.empty_output)
obj = ShowSpanningTree(device=self.dev1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden_mst(self):
self.maxDiff = None
self.dev_c3850 = Mock(**self.golden_output_mstp)
obj = ShowSpanningTree(device=self.dev_c3850)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output_mstp)
def test_golden_rstp(self):
self.maxDiff = None
self.dev_c3850 = Mock(**self.golden_output_rstp)
obj = ShowSpanningTree(device=self.dev_c3850)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output_rstp)
def test_golden_vlan(self):
self.maxDiff = None
self.dev_c3850 = Mock(**self.golden_output_vlan)
obj = ShowSpanningTree(device=self.dev_c3850)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output_vlan)
class test_show_spanning_tree_mst_configuration(test_show_spanning_tree_mst_configuration_iosxe):
def test_empty(self):
self.dev1 = Mock(**self.empty_output)
obj = ShowSpanningTreeMstConfiguration(device=self.dev1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.maxDiff = None
self.dev_c3850 = Mock(**self.golden_output)
obj = ShowSpanningTreeMstConfiguration(device=self.dev_c3850)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output)
if __name__ == '__main__':
unittest.main()
| 39.42803 | 98 | 0.591603 |
4a1b9961f1aa3acb809d7813feee8332b87add36
| 17,390 |
py
|
Python
|
ryu/base/app_manager.py
|
hiArvin/ryu
|
b568088f8fe1d2334d9773f6ddaac8674f2a0f61
|
[
"Apache-2.0"
] | 269 |
2015-03-08T11:32:45.000Z
|
2022-03-30T11:18:16.000Z
|
ryu/base/app_manager.py
|
hiArvin/ryu
|
b568088f8fe1d2334d9773f6ddaac8674f2a0f61
|
[
"Apache-2.0"
] | null | null | null |
ryu/base/app_manager.py
|
hiArvin/ryu
|
b568088f8fe1d2334d9773f6ddaac8674f2a0f61
|
[
"Apache-2.0"
] | 205 |
2015-01-13T04:52:25.000Z
|
2022-03-30T13:37:33.000Z
|
# Copyright (C) 2011-2014 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2011 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The central management of Ryu applications.
- Load Ryu applications
- Provide `contexts` to Ryu applications
- Route messages among Ryu applications
"""
import inspect
import itertools
import logging
import sys
import os
from ryu import cfg
from ryu import utils
from ryu.app import wsgi
from ryu.controller.handler import register_instance, get_dependent_services
from ryu.controller.controller import Datapath
from ryu.controller import event
from ryu.controller.event import EventRequestBase, EventReplyBase
from ryu.lib import hub
from ryu.ofproto import ofproto_protocol
LOG = logging.getLogger('ryu.base.app_manager')
SERVICE_BRICKS = {}
def lookup_service_brick(name):
# LOG.info('lookup_service_brick %s', name)
return SERVICE_BRICKS.get(name)
def _lookup_service_brick_by_ev_cls(ev_cls):
return _lookup_service_brick_by_mod_name(ev_cls.__module__)
def _lookup_service_brick_by_mod_name(mod_name):
return lookup_service_brick(mod_name.split('.')[-1])
def register_app(app):
assert isinstance(app, RyuApp)
# print "SERVICE_BRICKS:", SERVICE_BRICKS
assert app.name not in SERVICE_BRICKS
SERVICE_BRICKS[app.name] = app
register_instance(app)
def unregister_app(app):
SERVICE_BRICKS.pop(app.name)
def require_app(app_name, api_style=False):
"""
Request the application to be automatically loaded.
If this is used for "api" style modules, which is imported by a client
application, set api_style=True.
If this is used for client application module, set api_style=False.
"""
if api_style:
frm = inspect.stack()[2] # skip a frame for "api" module
else:
frm = inspect.stack()[1]
m = inspect.getmodule(frm[0]) # client module
m._REQUIRED_APP = getattr(m, '_REQUIRED_APP', [])
m._REQUIRED_APP.append(app_name)
LOG.debug('require_app: %s is required by %s', app_name, m.__name__)
class RyuApp(object):
"""
The base class for Ryu applications.
RyuApp subclasses are instantiated after ryu-manager loaded
all requested Ryu application modules.
__init__ should call RyuApp.__init__ with the same arguments.
It's illegal to send any events in __init__.
The instance attribute 'name' is the name of the class used for
message routing among Ryu applications. (Cf. send_event)
It's set to __class__.__name__ by RyuApp.__init__.
It's discouraged for subclasses to override this.
"""
_CONTEXTS = {}
"""
A dictionary to specify contexts which this Ryu application wants to use.
Its key is a name of context and its value is an ordinary class
which implements the context. The class is instantiated by app_manager
and the instance is shared among RyuApp subclasses which has _CONTEXTS
member with the same key. A RyuApp subclass can obtain a reference to
the instance via its __init__'s kwargs as the following.
Example::
_CONTEXTS = {
'network': network.Network
}
def __init__(self, *args, *kwargs):
self.network = kwargs['network']
"""
_EVENTS = []
"""
A list of event classes which this RyuApp subclass would generate.
This should be specified if and only if event classes are defined in
a different python module from the RyuApp subclass is.
"""
OFP_VERSIONS = None
"""
A list of supported OpenFlow versions for this RyuApp.
The default is all versions supported by the framework.
Examples::
OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION,
ofproto_v1_2.OFP_VERSION]
If multiple Ryu applications are loaded in the system,
the intersection of their OFP_VERSIONS is used.
"""
@classmethod
def context_iteritems(cls):
"""
Return iterator over the (key, contxt class) of application context
"""
return cls._CONTEXTS.iteritems()
def __init__(self, *_args, **_kwargs):
super(RyuApp, self).__init__()
self.name = self.__class__.__name__
self.event_handlers = {} # ev_cls -> handlers:list
self.observers = {} # ev_cls -> observer-name -> states:set
self.threads = []
self.events = hub.Queue(128)
if hasattr(self.__class__, 'LOGGER_NAME'):
self.logger = logging.getLogger(self.__class__.LOGGER_NAME)
else:
self.logger = logging.getLogger(self.name)
self.CONF = cfg.CONF
# prevent accidental creation of instances of this class outside RyuApp
class _EventThreadStop(event.EventBase):
pass
self._event_stop = _EventThreadStop()
self.is_active = True
def start(self):
"""
Hook that is called after startup initialization is done.
"""
self.threads.append(hub.spawn(self._event_loop))
def stop(self):
self.is_active = False
self._send_event(self._event_stop, None)
hub.joinall(self.threads)
def register_handler(self, ev_cls, handler):
assert callable(handler)
self.event_handlers.setdefault(ev_cls, [])
self.event_handlers[ev_cls].append(handler)
def unregister_handler(self, ev_cls, handler):
assert callable(handler)
self.event_handlers[ev_cls].remove(handler)
if not self.event_handlers[ev_cls]:
del self.event_handlers[ev_cls]
def register_observer(self, ev_cls, name, states=None):
states = states or set()
ev_cls_observers = self.observers.setdefault(ev_cls, {})
ev_cls_observers.setdefault(name, set()).update(states)
def unregister_observer(self, ev_cls, name):
observers = self.observers.get(ev_cls, {})
observers.pop(name)
def unregister_observer_all_event(self, name):
for observers in self.observers.values():
observers.pop(name, None)
def observe_event(self, ev_cls, states=None):
brick = _lookup_service_brick_by_ev_cls(ev_cls)
if brick is not None:
brick.register_observer(ev_cls, self.name, states)
def unobserve_event(self, ev_cls):
brick = _lookup_service_brick_by_ev_cls(ev_cls)
if brick is not None:
brick.unregister_observer(ev_cls, self.name)
def get_handlers(self, ev, state=None):
"""Returns a list of handlers for the specific event.
:param ev: The event to handle.
:param state: The current state. ("dispatcher")
If None is given, returns all handlers for the event.
Otherwise, returns only handlers that are interested
in the specified state.
The default is None.
"""
ev_cls = ev.__class__
handlers = self.event_handlers.get(ev_cls, [])
if state is None:
return handlers
def test(h):
if not hasattr(h, 'callers') or ev_cls not in h.callers:
# dynamically registered handlers does not have
# h.callers element for the event.
return True
states = h.callers[ev_cls].dispatchers
if not states:
# empty states means all states
return True
return state in states
return filter(test, handlers)
def get_observers(self, ev, state):
observers = []
for k, v in self.observers.get(ev.__class__, {}).iteritems():
if not state or not v or state in v:
observers.append(k)
return observers
def send_request(self, req):
"""
Make a synchronous request.
Set req.sync to True, send it to a Ryu application specified by
req.dst, and block until receiving a reply.
Returns the received reply.
The argument should be an instance of EventRequestBase.
"""
assert isinstance(req, EventRequestBase)
req.sync = True
req.reply_q = hub.Queue()
self.send_event(req.dst, req)
# going to sleep for the reply
return req.reply_q.get()
def _event_loop(self):
while self.is_active or not self.events.empty():
ev, state = self.events.get()
if ev == self._event_stop:
continue
handlers = self.get_handlers(ev, state)
for handler in handlers:
handler(ev)
def _send_event(self, ev, state):
self.events.put((ev, state))
def send_event(self, name, ev, state=None):
"""
Send the specified event to the RyuApp instance specified by name.
"""
if name in SERVICE_BRICKS:
if isinstance(ev, EventRequestBase):
ev.src = self.name
LOG.debug("EVENT %s->%s %s" %
(self.name, name, ev.__class__.__name__))
SERVICE_BRICKS[name]._send_event(ev, state)
else:
LOG.debug("EVENT LOST %s->%s %s" %
(self.name, name, ev.__class__.__name__))
def send_event_to_observers(self, ev, state=None):
"""
Send the specified event to all observers of this RyuApp.
"""
for observer in self.get_observers(ev, state):
self.send_event(observer, ev, state)
def reply_to_request(self, req, rep):
"""
Send a reply for a synchronous request sent by send_request.
The first argument should be an instance of EventRequestBase.
The second argument should be an instance of EventReplyBase.
"""
assert isinstance(req, EventRequestBase)
assert isinstance(rep, EventReplyBase)
rep.dst = req.src
if req.sync:
req.reply_q.put(rep)
else:
self.send_event(rep.dst, rep)
def close(self):
"""
teardown method.
The method name, close, is chosen for python context manager
"""
pass
class AppManager(object):
# singletone
_instance = None
@staticmethod
def run_apps(app_lists):
"""Run a set of Ryu applications
A convenient method to load and instantiate apps.
This blocks until all relevant apps stop.
"""
app_mgr = AppManager.get_instance()
app_mgr.load_apps(app_lists)
contexts = app_mgr.create_contexts()
services = app_mgr.instantiate_apps(**contexts)
webapp = wsgi.start_service(app_mgr)
if webapp:
services.append(hub.spawn(webapp))
try:
hub.joinall(services)
finally:
app_mgr.close()
@staticmethod
def get_instance():
if not AppManager._instance:
AppManager._instance = AppManager()
return AppManager._instance
def __init__(self):
self.applications_cls = {}
self.applications = {}
self.contexts_cls = {}
self.contexts = {}
def load_app(self, name):
mod = utils.import_module(name)
clses = inspect.getmembers(mod,
lambda cls: (inspect.isclass(cls) and
issubclass(cls, RyuApp) and
mod.__name__ ==
cls.__module__))
if clses:
return clses[0][1]
return None
def load_apps(self, app_lists):
app_lists = [app for app
in itertools.chain.from_iterable(app.split(',')
for app in app_lists)]
while len(app_lists) > 0:
app_cls_name = app_lists.pop(0)
context_modules = map(lambda x: x.__module__,
self.contexts_cls.values())
if app_cls_name in context_modules:
continue
LOG.info('loading app %s', app_cls_name)
cls = self.load_app(app_cls_name)
if cls is None:
continue
self.applications_cls[app_cls_name] = cls
services = []
for key, context_cls in cls.context_iteritems():
v = self.contexts_cls.setdefault(key, context_cls)
assert v == context_cls
context_modules.append(context_cls.__module__)
if issubclass(context_cls, RyuApp):
services.extend(get_dependent_services(context_cls))
# we can't load an app that will be initiataed for
# contexts.
for i in get_dependent_services(cls):
if i not in context_modules:
services.append(i)
if services:
app_lists.extend([s for s in set(services)
if s not in app_lists])
def create_contexts(self):
for key, cls in self.contexts_cls.items():
if issubclass(cls, RyuApp):
# hack for dpset
context = self._instantiate(None, cls)
else:
context = cls()
LOG.info('creating context %s', key)
assert key not in self.contexts
self.contexts[key] = context
return self.contexts
def _update_bricks(self):
for i in SERVICE_BRICKS.values():
for _k, m in inspect.getmembers(i, inspect.ismethod):
if not hasattr(m, 'callers'):
continue
for ev_cls, c in m.callers.iteritems():
if not c.ev_source:
continue
brick = _lookup_service_brick_by_mod_name(c.ev_source)
if brick:
brick.register_observer(ev_cls, i.name,
c.dispatchers)
# allow RyuApp and Event class are in different module
for brick in SERVICE_BRICKS.itervalues():
if ev_cls in brick._EVENTS:
brick.register_observer(ev_cls, i.name,
c.dispatchers)
@staticmethod
def _report_brick(name, app):
LOG.debug("BRICK %s" % name)
for ev_cls, list_ in app.observers.items():
LOG.debug(" PROVIDES %s TO %s" % (ev_cls.__name__, list_))
for ev_cls in app.event_handlers.keys():
LOG.debug(" CONSUMES %s" % (ev_cls.__name__,))
@staticmethod
def report_bricks():
for brick, i in SERVICE_BRICKS.items():
AppManager._report_brick(brick, i)
def _instantiate(self, app_name, cls, *args, **kwargs):
# for now, only single instance of a given module
# Do we need to support multiple instances?
# Yes, maybe for slicing.
#LOG.info('instantiating app %s of %s', app_name, cls.__name__)
if hasattr(cls, 'OFP_VERSIONS') and cls.OFP_VERSIONS is not None:
ofproto_protocol.set_app_supported_versions(cls.OFP_VERSIONS)
if app_name is not None:
assert app_name not in self.applications
app = cls(*args, **kwargs)
register_app(app)
assert app.name not in self.applications
self.applications[app.name] = app
return app
def instantiate(self, cls, *args, **kwargs):
app = self._instantiate(None, cls, *args, **kwargs)
self._update_bricks()
self._report_brick(app.name, app)
return app
def instantiate_apps(self, *args, **kwargs):
for app_name, cls in self.applications_cls.items():
self._instantiate(app_name, cls, *args, **kwargs)
self._update_bricks()
self.report_bricks()
threads = []
for app in self.applications.values():
t = app.start()
if t is not None:
threads.append(t)
return threads
@staticmethod
def _close(app):
close_method = getattr(app, 'close', None)
if callable(close_method):
close_method()
def uninstantiate(self, name):
app = self.applications.pop(name)
unregister_app(app)
for app_ in SERVICE_BRICKS.values():
app_.unregister_observer_all_event(name)
app.stop()
self._close(app)
events = app.events
if not events.empty():
app.logger.debug('%s events remians %d', app.name, events.qsize())
def close(self):
def close_all(close_dict):
for app in close_dict.values():
self._close(app)
close_dict.clear()
close_all(self.applications)
close_all(self.contexts)
| 33.442308 | 79 | 0.608166 |
4a1b9987fbab6e4449e88811a6e14ba2c29acc01
| 3,043 |
py
|
Python
|
tpdatasrc/co8infra/scr/Spell432 - Shout.py
|
edoipi/TemplePlus
|
f0e552289822fea908f16daa379fa568b1bd286d
|
[
"MIT"
] | 69 |
2015-05-05T14:09:25.000Z
|
2022-02-15T06:13:04.000Z
|
tpdatasrc/co8infra/scr/Spell432 - Shout.py
|
edoipi/TemplePlus
|
f0e552289822fea908f16daa379fa568b1bd286d
|
[
"MIT"
] | 457 |
2015-05-01T22:07:45.000Z
|
2022-03-31T02:19:10.000Z
|
tpdatasrc/co8infra/scr/Spell432 - Shout.py
|
edoipi/TemplePlus
|
f0e552289822fea908f16daa379fa568b1bd286d
|
[
"MIT"
] | 25 |
2016-02-04T21:19:53.000Z
|
2021-11-15T23:14:51.000Z
|
from toee import *
def OnBeginSpellCast( spell ):
print "Shout OnBeginSpellCast"
print "spell.target_list=", spell.target_list
print "spell.caster=", spell.caster, " caster.level= ", spell.caster_level
game.particles( "sp-evocation-conjure", spell.caster )
def OnSpellEffect( spell ):
print "Shout OnSpellEffect"
remove_list = []
damage_dice = dice_new( '5d6' )
duration_dice = dice_new( '2d6' )
spell.duration = duration_dice.roll()
earth_dam = dice_new ( '1d6' )
earth_dam.number = min( 15, spell.caster_level )
game.particles( 'sp-Shout', spell.caster )
# get all targets in a 25ft + 2ft/level cone (60')
npc = spell.caster ## added so NPC's can target Shout
#### Caster is NOT in game party
if npc.type != obj_t_pc and npc.leader_get() == OBJ_HANDLE_NULL:
# range = 25 + 2 * int(spell.caster_level/2)
range = 30
target_list = list(game.obj_list_cone( spell.caster, OLC_CRITTERS, range, -30, 90 ))
target_list.remove(spell.caster)
for obj in target_list:
if not obj.saving_throw_spell( spell.dc, D20_Save_Fortitude, D20STD_F_NONE, spell.caster, spell.id ):
# saving throw unsuccessful
obj.spell_damage( spell.caster, D20DT_SONIC, damage_dice, D20DAP_UNSPECIFIED, D20A_CAST_SPELL, spell.id )
obj.condition_add_with_args( 'sp-Shout', spell.id, spell.duration, 0 )
# obj.partsys_id = game.particles( 'sp-Shout-Hit', obj )
game.particles( 'sp-Shout-Hit', obj )
else:
# saving throw successful, apply half damage
obj.float_mesfile_line( 'mes\\spell.mes', 30001 )
obj.spell_damage_with_reduction( spell.caster, D20DT_SONIC, damage_dice, D20DAP_UNSPECIFIED, DAMAGE_REDUCTION_HALF, D20A_CAST_SPELL, spell.id )
# obj.partsys_id = game.particles( 'sp-Shout-Hit', obj )
game.particles( 'sp-Shout-Hit', obj )
game.particles( 'Fizzle', obj )
#### Caster is in game party
if npc.type == obj_t_pc or npc.leader_get() != OBJ_HANDLE_NULL:
for target_item in spell.target_list:
if not target_item.obj.saving_throw_spell( spell.dc, D20_Save_Fortitude, D20STD_F_NONE, spell.caster, spell.id ):
# saving throw unsuccessful
target_item.obj.spell_damage( spell.caster, D20DT_SONIC, damage_dice, D20DAP_UNSPECIFIED, D20A_CAST_SPELL, spell.id )
target_item.obj.condition_add_with_args( 'sp-Shout', spell.id, spell.duration, 0 )
target_item.partsys_id = game.particles( 'sp-Shout-Hit', target_item.obj )
else:
# saving throw successful, apply half damage
target_item.obj.float_mesfile_line( 'mes\\spell.mes', 30001 )
target_item.obj.spell_damage_with_reduction( spell.caster, D20DT_SONIC, damage_dice, D20DAP_UNSPECIFIED, DAMAGE_REDUCTION_HALF, D20A_CAST_SPELL, spell.id )
target_item.partsys_id = game.particles( 'sp-Shout-Hit', target_item.obj )
game.particles( 'Fizzle', target_item.obj )
remove_list.append( target_item.obj )
spell.target_list.remove_list( remove_list )
spell.spell_end( spell.id )
def OnBeginRound( spell ):
print "Shout OnBeginRound"
def OnEndSpellCast( spell ):
print "Shout OnEndSpellCast"
| 40.039474 | 159 | 0.734473 |
4a1b99e7edeaaddd9feabf3859830c9bcaab5ca1
| 1,032 |
py
|
Python
|
jp.atcoder/abc111/arc103_b/9362830.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1 |
2022-02-09T03:06:25.000Z
|
2022-02-09T03:06:25.000Z
|
jp.atcoder/abc111/arc103_b/9362830.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1 |
2022-02-05T22:53:18.000Z
|
2022-02-09T01:29:30.000Z
|
jp.atcoder/abc111/arc103_b/9362830.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | null | null | null |
import sys
n = int(sys.stdin.readline().rstrip())
xy = list(zip(*[map(int, sys.stdin.read().split())] * 2))
mask = (1 << 31) - 1
def solve(x, y, mode):
u = x + y
v = x - y
if mode == 0:
u -= 1
v -= 1
s = (u + mask) >> 1
t = (v + mask) >> 1
l = (~(s | t)) & mask
r = s & t & mask
d = (~s) & t & mask
u = s & (~t) & mask
res = ''
for i in range(30, -1, -1):
if l >> i & 1:
res += 'L'
elif r >> i & 1:
res += 'R'
elif d >> i & 1:
res += 'D'
elif u >> i & 1:
res += 'U'
if mode == 0:
res += 'R'
return [res]
def main():
oe = (xy[0][0] + xy[0][1]) % 2
for x, y in xy:
if (x + y) % 2 != oe:
return [-1]
yield [31]
yield [2 ** i for i in range(31)]
for x, y in xy:
yield solve(x, y, oe)
if __name__ == '__main__':
ans = main()
for i in ans:
print(*i, sep=' ')
| 19.846154 | 58 | 0.351744 |
4a1b99f4b9bc9adbb608c4a914229d206f5f1be5
| 1,877 |
py
|
Python
|
homeassistant/components/binary_sensor/verisure.py
|
veresspeter/home-assistant
|
a3c22c6eec2c77a28a8fa859e899984269708666
|
[
"Apache-2.0"
] | 37 |
2018-05-22T07:17:26.000Z
|
2022-03-03T13:14:46.000Z
|
homeassistant/components/binary_sensor/verisure.py
|
veresspeter/home-assistant
|
a3c22c6eec2c77a28a8fa859e899984269708666
|
[
"Apache-2.0"
] | 125 |
2018-12-11T07:31:20.000Z
|
2021-07-27T08:20:03.000Z
|
homeassistant/components/binary_sensor/verisure.py
|
y1ngyang/home-assistant
|
7e39a5c4d50cf5754f5f32a84870ca57a5778b02
|
[
"Apache-2.0"
] | 8 |
2018-05-30T20:05:26.000Z
|
2021-02-19T14:17:05.000Z
|
"""
Interfaces with Verisure sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/binary_sensor.verisure/
"""
import logging
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.components.verisure import CONF_DOOR_WINDOW
from homeassistant.components.verisure import HUB as hub
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Verisure binary sensors."""
sensors = []
hub.update_overview()
if int(hub.config.get(CONF_DOOR_WINDOW, 1)):
sensors.extend([
VerisureDoorWindowSensor(device_label)
for device_label in hub.get(
"$.doorWindow.doorWindowDevice[*].deviceLabel")])
add_devices(sensors)
class VerisureDoorWindowSensor(BinarySensorDevice):
"""Representation of a Verisure door window sensor."""
def __init__(self, device_label):
"""Initialize the Verisure door window sensor."""
self._device_label = device_label
@property
def name(self):
"""Return the name of the binary sensor."""
return hub.get_first(
"$.doorWindow.doorWindowDevice[?(@.deviceLabel=='%s')].area",
self._device_label)
@property
def is_on(self):
"""Return the state of the sensor."""
return hub.get_first(
"$.doorWindow.doorWindowDevice[?(@.deviceLabel=='%s')].state",
self._device_label) == "OPEN"
@property
def available(self):
"""Return True if entity is available."""
return hub.get_first(
"$.doorWindow.doorWindowDevice[?(@.deviceLabel=='%s')]",
self._device_label) is not None
def update(self):
"""Update the state of the sensor."""
hub.update_overview()
| 31.283333 | 74 | 0.667022 |
4a1b9a7ed076a140db7cc0775024fc8634e48c2c
| 28 |
py
|
Python
|
tlbpy/rl_algos/__init__.py
|
Guitheg/tlbpy
|
67f87012bbca67142a9810bbeb0e5ba413993d41
|
[
"MIT"
] | null | null | null |
tlbpy/rl_algos/__init__.py
|
Guitheg/tlbpy
|
67f87012bbca67142a9810bbeb0e5ba413993d41
|
[
"MIT"
] | null | null | null |
tlbpy/rl_algos/__init__.py
|
Guitheg/tlbpy
|
67f87012bbca67142a9810bbeb0e5ba413993d41
|
[
"MIT"
] | null | null | null |
from .rl_algo import Rl_algo
| 28 | 28 | 0.857143 |
4a1b9c399dd2f094c9c86f1571a16dd8f9da4fe5
| 1,715 |
py
|
Python
|
Tools/ProjectGenerator/CreateProject.py
|
uberpixel/Rayne
|
94f601561aedfc3200e67ff9522f64fbc5ca4d8c
|
[
"MIT"
] | 13 |
2020-08-08T11:57:05.000Z
|
2022-03-10T17:29:19.000Z
|
Tools/ProjectGenerator/CreateProject.py
|
uberpixel/Rayne
|
94f601561aedfc3200e67ff9522f64fbc5ca4d8c
|
[
"MIT"
] | 1 |
2022-03-10T17:35:28.000Z
|
2022-03-10T18:21:57.000Z
|
Tools/ProjectGenerator/CreateProject.py
|
uberpixel/Rayne
|
94f601561aedfc3200e67ff9522f64fbc5ca4d8c
|
[
"MIT"
] | 3 |
2020-08-08T14:22:34.000Z
|
2021-05-15T21:12:17.000Z
|
import os
import sys
import datetime
def main():
templateName = input("Template name? ")
projectNameString = input("Project name? ")
projectName = projectNameString.encode()
projectTarget = projectNameString.replace(" ", "").encode()
prefixString = input("Project prefix? ")
prefix = prefixString.encode()
companyName = input("Company name? ").encode()
bundleID = input("Bundle ID? ").encode()
year = str(datetime.datetime.now().year).encode()
fromdir = os.path.join(os.path.dirname(sys.argv[0]), "Templates")
fromdir = os.path.join(fromdir, templateName)
for root, subdirs, files in os.walk(fromdir):
relativeRoot = os.path.relpath(root, fromdir)
for subdir in subdirs:
os.makedirs(os.path.join(relativeRoot, subdir))
for filename in files:
if filename == ".DS_Store":
continue
readFilePath = os.path.join(root, filename)
if filename == "gitattributes":
filename = ".gitattributes"
filename = filename.replace("__TMP__", prefixString)
writeFilePath = os.path.join(relativeRoot, filename)
with open(readFilePath, 'rb') as readFile:
fileContent = readFile.read()
fileContent = fileContent.replace("__TMP__".encode(), prefix)
fileContent = fileContent.replace("__TMP_BUNDLE_ID__".encode(), bundleID)
fileContent = fileContent.replace("__TMP_APPLICATION_NAME__".encode(), projectName)
fileContent = fileContent.replace("__TMP_APPLICATION_TARGET__".encode(), projectTarget)
fileContent = fileContent.replace("__TMP_COMPANY__".encode(), companyName)
fileContent = fileContent.replace("__TMP_YEAR__".encode(), year)
with open(writeFilePath, 'wb') as writeFile:
writeFile.write(fileContent)
if __name__ == '__main__':
main()
| 35 | 91 | 0.724781 |
4a1b9f76326f96b5e172977b7e2c29700d0ab015
| 2,654 |
py
|
Python
|
tests/test_unasync.py
|
nsidnev/unasyncer
|
e48f9104b123707f621adce87721f1028912094d
|
[
"MIT"
] | null | null | null |
tests/test_unasync.py
|
nsidnev/unasyncer
|
e48f9104b123707f621adce87721f1028912094d
|
[
"MIT"
] | null | null | null |
tests/test_unasync.py
|
nsidnev/unasyncer
|
e48f9104b123707f621adce87721f1028912094d
|
[
"MIT"
] | null | null | null |
import os
import pathlib
import pytest
from unasyncer.unasync import unasync_path
@pytest.fixture
def expected_unasynced_code(sources: pathlib.Path) -> str:
with open(sources / "right_sync.py") as right_sync_file:
return right_sync_file.read()
def test_create_right_sync_structure_for_dir(sources: pathlib.Path) -> None:
async_folder = sources / "_async"
sync_folder = sources / "_sync"
[_ for _ in unasync_path(async_folder, sync_folder)] # consume iterator
required_paths = [
sync_folder / "__init__.py",
sync_folder / "inner_package",
sync_folder / "inner_package" / "__init__.py",
sync_folder / "inner_package" / "logic.py",
]
for path in required_paths:
assert path.exists()
def test_create_unasynced_single_file(sources: pathlib.Path) -> None:
async_file_path = sources / "_async" / "inner_package" / "logic.py"
sync_file_path = sources / "_async" / "inner_package" / "sync.py"
next(unasync_path(async_file_path, sync_file_path))
assert sync_file_path.exists()
def test_unasynced_file_content(
sources: pathlib.Path, expected_unasynced_code: str
) -> None:
async_file_path = sources / "_async" / "inner_package" / "logic.py"
sync_file_path = sources / "_async" / "inner_package" / "sync.py"
next(unasync_path(async_file_path, sync_file_path))
with open(sync_file_path) as sync_file:
content = sync_file.read()
assert content == expected_unasynced_code
def test_skiping_not_files_or_dirs(sources: pathlib.Path) -> None:
async_folder = sources / "_async"
sync_folder = sources / "_sync"
fifo_path = async_folder / "fifo"
os.mkfifo(fifo_path)
[_ for _ in unasync_path(async_folder, sync_folder)] # consume iterator
assert not (sync_folder / "fifo").exists()
def test_raising_error_for_not_files_or_dirs_in_unasync_path(
sources: pathlib.Path,
) -> None:
async_folder = sources / "_async"
sync_folder = sources / "_sync"
fifo_path = async_folder / "fifo"
os.mkfifo(fifo_path)
with pytest.raises(ValueError):
next(unasync_path(fifo_path, sync_folder / "fifo"))
def test_raising_error_if_dir_does_not_exist_and_creation_disabled(
sources: pathlib.Path,
) -> None:
async_folder = sources / "_async"
sync_folder = sources / "_sync"
with pytest.raises(RuntimeError):
next(unasync_path(async_folder, sync_folder, create_missed_paths=False))
def test_raising_error_if_path_does_not_exist() -> None:
with pytest.raises(FileNotFoundError):
next(unasync_path(pathlib.Path("error_path"), pathlib.Path("_sync")))
| 28.234043 | 80 | 0.712509 |
4a1b9fce96b9c9acdff3098e6234608f93bcd4a1
| 27,024 |
py
|
Python
|
src/pretix/base/signals.py
|
NicsTr/pretix
|
e6d2380d9ed1836cc64a688b2be20d00a8500eab
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/pretix/base/signals.py
|
NicsTr/pretix
|
e6d2380d9ed1836cc64a688b2be20d00a8500eab
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/pretix/base/signals.py
|
NicsTr/pretix
|
e6d2380d9ed1836cc64a688b2be20d00a8500eab
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import warnings
from typing import Any, Callable, List, Tuple
import django.dispatch
from django.apps import apps
from django.conf import settings
from django.dispatch.dispatcher import NO_RECEIVERS
from .models import Event
app_cache = {}
def _populate_app_cache():
global app_cache
apps.check_apps_ready()
for ac in apps.app_configs.values():
app_cache[ac.name] = ac
class EventPluginSignal(django.dispatch.Signal):
"""
This is an extension to Django's built-in signals which differs in a way that it sends
out it's events only to receivers which belong to plugins that are enabled for the given
Event.
"""
def _is_active(self, sender, receiver):
if sender is None:
# Send to all events!
return True
# Find the Django application this belongs to
searchpath = receiver.__module__
core_module = any([searchpath.startswith(cm) for cm in settings.CORE_MODULES])
app = None
if not core_module:
while True:
app = app_cache.get(searchpath)
if "." not in searchpath or app:
break
searchpath, _ = searchpath.rsplit(".", 1)
# Only fire receivers from active plugins and core modules
excluded = settings.PRETIX_PLUGINS_EXCLUDE
if core_module or (sender and app and app.name in sender.get_plugins() and app.name not in excluded):
if not hasattr(app, 'compatibility_errors') or not app.compatibility_errors:
return True
return False
def send(self, sender: Event, **named) -> List[Tuple[Callable, Any]]:
"""
Send signal from sender to all connected receivers that belong to
plugins enabled for the given Event.
sender is required to be an instance of ``pretix.base.models.Event``.
"""
if sender and not isinstance(sender, Event):
raise ValueError("Sender needs to be an event.")
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
if not app_cache:
_populate_app_cache()
for receiver in self._sorted_receivers(sender):
if self._is_active(sender, receiver):
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response))
return responses
def send_chained(self, sender: Event, chain_kwarg_name, **named) -> List[Tuple[Callable, Any]]:
"""
Send signal from sender to all connected receivers. The return value of the first receiver
will be used as the keyword argument specified by ``chain_kwarg_name`` in the input to the
second receiver and so on. The return value of the last receiver is returned by this method.
sender is required to be an instance of ``pretix.base.models.Event``.
"""
if sender and not isinstance(sender, Event):
raise ValueError("Sender needs to be an event.")
response = named.get(chain_kwarg_name)
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return response
if not app_cache:
_populate_app_cache()
for receiver in self._sorted_receivers(sender):
if self._is_active(sender, receiver):
named[chain_kwarg_name] = response
response = receiver(signal=self, sender=sender, **named)
return response
def send_robust(self, sender: Event, **named) -> List[Tuple[Callable, Any]]:
"""
Send signal from sender to all connected receivers. If a receiver raises an exception
instead of returning a value, the exception is included as the result instead of
stopping the response chain at the offending receiver.
sender is required to be an instance of ``pretix.base.models.Event``.
"""
if sender and not isinstance(sender, Event):
raise ValueError("Sender needs to be an event.")
responses = []
if (
not self.receivers
or self.sender_receivers_cache.get(sender) is NO_RECEIVERS
):
return []
if not app_cache:
_populate_app_cache()
for receiver in self._sorted_receivers(sender):
if self._is_active(sender, receiver):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception as err:
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _sorted_receivers(self, sender):
orig_list = self._live_receivers(sender)
sorted_list = sorted(
orig_list,
key=lambda receiver: (
0 if any(receiver.__module__.startswith(m) for m in settings.CORE_MODULES) else 1,
receiver.__module__,
receiver.__name__,
)
)
return sorted_list
class GlobalSignal(django.dispatch.Signal):
def send_chained(self, sender: Event, chain_kwarg_name, **named) -> List[Tuple[Callable, Any]]:
"""
Send signal from sender to all connected receivers. The return value of the first receiver
will be used as the keyword argument specified by ``chain_kwarg_name`` in the input to the
second receiver and so on. The return value of the last receiver is returned by this method.
"""
response = named.get(chain_kwarg_name)
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return response
for receiver in self._live_receivers(sender):
named[chain_kwarg_name] = response
response = receiver(signal=self, sender=sender, **named)
return response
class DeprecatedSignal(django.dispatch.Signal):
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
warnings.warn('This signal is deprecated and will soon be removed', stacklevel=3)
super().connect(receiver, sender=None, weak=True, dispatch_uid=None)
event_live_issues = EventPluginSignal(
providing_args=[]
)
"""
This signal is sent out to determine whether an event can be taken live. If you want to
prevent the event from going live, return a string that will be displayed to the user
as the error message. If you don't, your receiver should return ``None``.
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
register_payment_providers = EventPluginSignal(
providing_args=[]
)
"""
This signal is sent out to get all known payment providers. Receivers should return a
subclass of pretix.base.payment.BasePaymentProvider or a list of these
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
register_mail_placeholders = EventPluginSignal(
providing_args=[]
)
"""
This signal is sent out to get all known email text placeholders. Receivers should return
an instance of a subclass of pretix.base.email.BaseMailTextPlaceholder or a list of these.
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
register_html_mail_renderers = EventPluginSignal(
providing_args=[]
)
"""
This signal is sent out to get all known HTML email renderers. Receivers should return a
subclass of pretix.base.email.BaseHTMLMailRenderer or a list of these
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
register_invoice_renderers = EventPluginSignal(
providing_args=[]
)
"""
This signal is sent out to get all known invoice renderers. Receivers should return a
subclass of pretix.base.invoice.BaseInvoiceRenderer or a list of these
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
register_ticket_secret_generators = EventPluginSignal(
providing_args=[]
)
"""
This signal is sent out to get all known ticket secret generators. Receivers should return a
subclass of ``pretix.base.secrets.BaseTicketSecretGenerator`` or a list of these
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
register_data_shredders = EventPluginSignal(
providing_args=[]
)
"""
This signal is sent out to get all known data shredders. Receivers should return a
subclass of pretix.base.shredder.BaseDataShredder or a list of these
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
register_ticket_outputs = EventPluginSignal(
providing_args=[]
)
"""
This signal is sent out to get all known ticket outputs. Receivers should return a
subclass of pretix.base.ticketoutput.BaseTicketOutput
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
register_notification_types = EventPluginSignal(
providing_args=[]
)
"""
This signal is sent out to get all known notification types. Receivers should return an
instance of a subclass of pretix.base.notifications.NotificationType or a list of such
instances.
As with all event-plugin signals, the ``sender`` keyword argument will contain the event,
however for this signal, the ``sender`` **may also be None** to allow creating the general
notification settings!
"""
register_sales_channels = django.dispatch.Signal(
providing_args=[]
)
"""
This signal is sent out to get all known sales channels types. Receivers should return an
instance of a subclass of ``pretix.base.channels.SalesChannel`` or a list of such
instances.
"""
register_data_exporters = EventPluginSignal(
providing_args=[]
)
"""
This signal is sent out to get all known data exporters. Receivers should return a
subclass of pretix.base.exporter.BaseExporter
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
register_multievent_data_exporters = django.dispatch.Signal(
providing_args=["event"]
)
"""
This signal is sent out to get all known data exporters, which support exporting data for
multiple events. Receivers should return a subclass of pretix.base.exporter.BaseExporter
The ``sender`` keyword argument will contain an organizer.
"""
validate_order = EventPluginSignal(
providing_args=["payment_provider", "positions", "email", "locale", "invoice_address",
"meta_info"]
)
"""
This signal is sent out when the user tries to confirm the order, before we actually create
the order. It allows you to inspect the cart positions. Your return value will be ignored,
but you can raise an OrderError with an appropriate exception message if you like to block
the order. We strongly discourage making changes to the order here.
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
validate_cart = EventPluginSignal(
providing_args=["positions"]
)
"""
This signal is sent out before the user starts checkout. It includes an iterable
with the current CartPosition objects.
The response of receivers will be ignored, but you can raise a CartError with an
appropriate exception message.
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
validate_cart_addons = EventPluginSignal(
providing_args=["addons", "base_position", "iao"]
)
"""
This signal is sent when a user tries to select a combination of addons. In contrast to
``validate_cart``, this is executed before the cart is actually modified. You are passed
an argument ``addons`` containing a dict of ``(item, variation or None) → count`` tuples as well
as the ``ItemAddOn`` object as the argument ``iao`` and the base cart position as
``base_position``.
The response of receivers will be ignored, but you can raise a CartError with an
appropriate exception message.
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
order_placed = EventPluginSignal(
providing_args=["order"]
)
"""
This signal is sent out every time an order is placed. The order object is given
as the first argument. This signal is *not* sent out if an order is created through
splitting an existing order, so you can not expect to see all orders by listening
to this signal.
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
order_paid = EventPluginSignal(
providing_args=["order"]
)
"""
This signal is sent out every time an order is paid. The order object is given
as the first argument. This signal is *not* sent out if an order is marked as paid
because an already-paid order has been split.
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
order_canceled = EventPluginSignal(
providing_args=["order"]
)
"""
This signal is sent out every time an order is canceled. The order object is given
as the first argument.
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
order_reactivated = EventPluginSignal(
providing_args=["order"]
)
"""
This signal is sent out every time a canceled order is reactivated. The order object is given
as the first argument.
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
order_expired = EventPluginSignal(
providing_args=["order"]
)
"""
This signal is sent out every time an order is marked as expired. The order object is given
as the first argument.
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
order_modified = EventPluginSignal(
providing_args=["order"]
)
"""
This signal is sent out every time an order's information is modified. The order object is given
as the first argument.
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
order_changed = EventPluginSignal(
providing_args=["order"]
)
"""
This signal is sent out every time an order's content is changed. The order object is given
as the first argument.
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
order_approved = EventPluginSignal(
providing_args=["order"]
)
"""
This signal is sent out every time an order is being approved. The order object is given
as the first argument.
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
order_denied = EventPluginSignal(
providing_args=["order"]
)
"""
This signal is sent out every time an order is being denied. The order object is given
as the first argument.
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
order_gracefully_delete = EventPluginSignal(
providing_args=["order"]
)
"""
This signal is sent out every time a test-mode order is being deleted. The order object
is given as the first argument.
Any plugin receiving this signals is supposed to perform any cleanup necessary at this
point, so that the underlying order has no more external constraints that would inhibit
the deletion of the order.
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
checkin_created = EventPluginSignal(
providing_args=["checkin"],
)
"""
This signal is sent out every time a check-in is created (i.e. an order position is marked as
checked in). It is not send if the position was already checked in and is force-checked-in a second time.
The check-in object is given as the first argument
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
logentry_display = EventPluginSignal(
providing_args=["logentry"]
)
"""
To display an instance of the ``LogEntry`` model to a human user,
``pretix.base.signals.logentry_display`` will be sent out with a ``logentry`` argument.
The first received response that is not ``None`` will be used to display the log entry
to the user. The receivers are expected to return plain text.
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
logentry_object_link = EventPluginSignal(
providing_args=["logentry"]
)
"""
To display the relationship of an instance of the ``LogEntry`` model to another model
to a human user, ``pretix.base.signals.logentry_object_link`` will be sent out with a
``logentry`` argument.
The first received response that is not ``None`` will be used to display the related object
to the user. The receivers are expected to return a HTML link. The internal implementation
builds the links like this::
a_text = _('Tax rule {val}')
a_map = {
'href': reverse('control:event.settings.tax.edit', kwargs={
'event': sender.slug,
'organizer': sender.organizer.slug,
'rule': logentry.content_object.id
}),
'val': escape(logentry.content_object.name),
}
a_map['val'] = '<a href="{href}">{val}</a>'.format_map(a_map)
return a_text.format_map(a_map)
Make sure that any user content in the HTML code you return is properly escaped!
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
requiredaction_display = EventPluginSignal(
providing_args=["action", "request"]
)
"""
To display an instance of the ``RequiredAction`` model to a human user,
``pretix.base.signals.requiredaction_display`` will be sent out with a ``action`` argument.
You will also get the current ``request`` in a different argument.
The first received response that is not ``None`` will be used to display the log entry
to the user. The receivers are expected to return HTML code.
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
event_copy_data = EventPluginSignal(
providing_args=["other", "tax_map", "category_map", "item_map", "question_map", "variation_map", "checkin_list_map"]
)
"""
This signal is sent out when a new event is created as a clone of an existing event, i.e.
the settings from the older event are copied to the newer one. You can listen to this
signal to copy data or configuration stored within your plugin's models as well.
You don't need to copy data inside the general settings storage which is cloned automatically,
but you might need to modify that data.
The ``sender`` keyword argument will contain the event of the **new** event. The ``other``
keyword argument will contain the event to **copy from**. The keyword arguments
``tax_map``, ``category_map``, ``item_map``, ``question_map``, ``variation_map`` and
``checkin_list_map`` contain mappings from object IDs in the original event to objects
in the new event of the respective types.
"""
item_copy_data = EventPluginSignal(
providing_args=["source", "target"]
)
"""
This signal is sent out when a new product is created as a clone of an existing product, i.e.
the settings from the older product are copied to the newer one. You can listen to this
signal to copy data or configuration stored within your plugin's models as well.
The ``sender`` keyword argument will contain the event. The ``target`` will contain the item to
copy to, the ``source`` keyword argument will contain the product to **copy from**.
"""
periodic_task = django.dispatch.Signal()
"""
This is a regular django signal (no pretix event signal) that we send out every
time the periodic task cronjob runs. This interval is not sharply defined, it can
be everything between a minute and a day. The actions you perform should be
idempotent, i.e. it should not make a difference if this is sent out more often
than expected.
"""
register_global_settings = django.dispatch.Signal()
"""
All plugins that are installed may send fields for the global settings form, as
an OrderedDict of (setting name, form field).
"""
order_fee_calculation = EventPluginSignal(
providing_args=['positions', 'invoice_address', 'meta_info', 'total', 'gift_cards']
)
"""
This signals allows you to add fees to an order while it is being created. You are expected to
return a list of ``OrderFee`` objects that are not yet saved to the database
(because there is no order yet).
As with all plugin signals, the ``sender`` keyword argument will contain the event. A ``positions``
argument will contain the cart positions and ``invoice_address`` the invoice address (useful for
tax calculation). The argument ``meta_info`` contains the order's meta dictionary. The ``total``
keyword argument will contain the total cart sum without any fees. You should not rely on this
``total`` value for fee calculations as other fees might interfere. The ``gift_cards`` argument lists
the gift cards in use.
"""
order_fee_type_name = EventPluginSignal(
providing_args=['request', 'fee']
)
"""
This signals allows you to return a human-readable description for a fee type based on the ``fee_type``
and ``internal_type`` attributes of the ``OrderFee`` model that you get as keyword arguments. You are
expected to return a string or None, if you don't know about this fee.
As with all plugin signals, the ``sender`` keyword argument will contain the event.
"""
allow_ticket_download = EventPluginSignal(
providing_args=['order']
)
"""
This signal is sent out to check if tickets for an order can be downloaded. If any receiver returns false,
a download will not be offered.
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
email_filter = EventPluginSignal(
providing_args=['message', 'order', 'user']
)
"""
This signal allows you to implement a middleware-style filter on all outgoing emails. You are expected to
return a (possibly modified) copy of the message object passed to you.
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
The ``message`` argument will contain an ``EmailMultiAlternatives`` object.
If the email is associated with a specific order, the ``order`` argument will be passed as well, otherwise
it will be ``None``.
If the email is associated with a specific user, e.g. a notification email, the ``user`` argument will be passed as
well, otherwise it will be ``None``.
"""
global_email_filter = GlobalSignal(
providing_args=['message', 'order', 'user']
)
"""
This signal allows you to implement a middleware-style filter on all outgoing emails. You are expected to
return a (possibly modified) copy of the message object passed to you.
This signal is called on all events and even if there is no known event. ``sender`` is an event or None.
The ``message`` argument will contain an ``EmailMultiAlternatives`` object.
If the email is associated with a specific order, the ``order`` argument will be passed as well, otherwise
it will be ``None``.
If the email is associated with a specific user, e.g. a notification email, the ``user`` argument will be passed as
well, otherwise it will be ``None``.
"""
layout_text_variables = EventPluginSignal()
"""
This signal is sent out to collect variables that can be used to display text in ticket-related PDF layouts.
Receivers are expected to return a dictionary with globally unique identifiers as keys and more
dictionaries as values that contain keys like in the following example::
return {
"product": {
"label": _("Product name"),
"editor_sample": _("Sample product"),
"evaluate": lambda orderposition, order, event: str(orderposition.item)
}
}
The evaluate member will be called with the order position, order and event as arguments. The event might
also be a subevent, if applicable.
"""
timeline_events = EventPluginSignal()
"""
This signal is sent out to collect events for the time line shown on event dashboards. You are passed
a ``subevent`` argument which might be none and you are expected to return a list of instances of
``pretix.base.timeline.TimelineEvent``, which is a ``namedtuple`` with the fields ``event``, ``subevent``,
``datetime``, ``description`` and ``edit_url``.
"""
quota_availability = EventPluginSignal(
providing_args=['quota', 'result', 'count_waitinglist']
)
"""
This signal allows you to modify the availability of a quota. You are passed the ``quota`` and an
``availability`` result calculated by pretix code or other plugins. ``availability`` is a tuple
with the first entry being one of the ``Quota.AVAILABILITY_*`` constants and the second entry being
the number of available tickets (or ``None`` for unlimited). You are expected to return a value
of the same type. The parameter ``count_waitinglists`` specifies whether waiting lists should be taken
into account.
**Warning: Use this signal with great caution, it allows you to screw up the performance of the
system really bad.** Also, keep in mind that your response is subject to caching and out-of-date
quotas might be used for display (not for actual order processing).
"""
order_split = EventPluginSignal(
providing_args=["original", "split_order"]
)
"""
This signal is sent out when an order is split into two orders and allows you to copy related models
to the new order. You will be passed the old order as ``original`` and the new order as ``split_order``.
"""
invoice_line_text = EventPluginSignal(
providing_args=["position"]
)
"""
This signal is sent out when an invoice is built for an order. You can return additional text that
should be shown on the invoice for the given ``position``.
"""
order_import_columns = EventPluginSignal(
providing_args=[]
)
"""
This signal is sent out if the user performs an import of orders from an external source. You can use this
to define additional columns that can be read during import. You are expected to return a list of instances of
``ImportColumn`` subclasses.
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
validate_event_settings = EventPluginSignal(
providing_args=["settings_dict"]
)
"""
This signal is sent out if the user performs an update of event settings through the API or web interface.
You are passed a ``settings_dict`` dictionary with the new state of the event settings object and are expected
to raise a ``django.core.exceptions.ValidationError`` if the new state is not valid.
You can not modify the dictionary. This is only recommended to use if you have multiple settings
that can only be validated together. To validate individual settings, pass a validator to the
serializer field instead.
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
api_event_settings_fields = EventPluginSignal(
providing_args=[]
)
"""
This signal is sent out to collect serializable settings fields for the API. You are expected to
return a dictionary mapping names of attributes in the settings store to DRF serializer field instances.
As with all event-plugin signals, the ``sender`` keyword argument will contain the event.
"""
| 38.061972 | 120 | 0.729315 |
4a1b9ff8969405142293748d3e19d2f62807d220
| 7,168 |
py
|
Python
|
packages/python/plotly/plotly/graph_objs/scattergl/_line.py
|
eranws/plotly.py
|
5b0e8d3ccab55fe1a6e4ba123cfc9d718a9ffc5a
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/graph_objs/scattergl/_line.py
|
eranws/plotly.py
|
5b0e8d3ccab55fe1a6e4ba123cfc9d718a9ffc5a
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/graph_objs/scattergl/_line.py
|
eranws/plotly.py
|
5b0e8d3ccab55fe1a6e4ba123cfc9d718a9ffc5a
|
[
"MIT"
] | null | null | null |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattergl"
_path_str = "scattergl.line"
_valid_props = {"color", "dash", "shape", "width"}
# color
# -----
@property
def color(self):
"""
Sets the line color.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# dash
# ----
@property
def dash(self):
"""
Sets the style of the lines.
The 'dash' property is an enumeration that may be specified as:
- One of the following enumeration values:
['solid', 'dot', 'dash', 'longdash', 'dashdot',
'longdashdot']
Returns
-------
Any
"""
return self["dash"]
@dash.setter
def dash(self, val):
self["dash"] = val
# shape
# -----
@property
def shape(self):
"""
Determines the line shape. The values correspond to step-wise
line shapes.
The 'shape' property is an enumeration that may be specified as:
- One of the following enumeration values:
['linear', 'hv', 'vh', 'hvh', 'vhv']
Returns
-------
Any
"""
return self["shape"]
@shape.setter
def shape(self, val):
self["shape"] = val
# width
# -----
@property
def width(self):
"""
Sets the line width (in px).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the line color.
dash
Sets the style of the lines.
shape
Determines the line shape. The values correspond to
step-wise line shapes.
width
Sets the line width (in px).
"""
def __init__(
self, arg=None, color=None, dash=None, shape=None, width=None, **kwargs
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattergl.Line`
color
Sets the line color.
dash
Sets the style of the lines.
shape
Determines the line shape. The values correspond to
step-wise line shapes.
width
Sets the line width (in px).
Returns
-------
Line
"""
super(Line, self).__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergl.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattergl.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("dash", None)
_v = dash if dash is not None else _v
if _v is not None:
self["dash"] = _v
_v = arg.pop("shape", None)
_v = shape if shape is not None else _v
if _v is not None:
self["shape"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 31.165217 | 82 | 0.527623 |
4a1ba0697069f81bec8ee3fd2f3e7ae3c9e8051e
| 4,943 |
py
|
Python
|
cirq-core/cirq/interop/quirk/cells/control_cells_test.py
|
dabacon/Cirq
|
54286063f679d67501ff1b905cd16b879feaae27
|
[
"Apache-2.0"
] | 1 |
2021-04-29T15:30:32.000Z
|
2021-04-29T15:30:32.000Z
|
cirq-core/cirq/interop/quirk/cells/control_cells_test.py
|
bradyb/Cirq
|
610b0d4ea3a7862169610797266734c844ddcc1f
|
[
"Apache-2.0"
] | 1 |
2020-04-03T20:23:20.000Z
|
2020-04-03T20:23:20.000Z
|
cirq-core/cirq/interop/quirk/cells/control_cells_test.py
|
bradyb/Cirq
|
610b0d4ea3a7862169610797266734c844ddcc1f
|
[
"Apache-2.0"
] | 2 |
2021-09-22T11:16:46.000Z
|
2021-09-23T12:55:22.000Z
|
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cirq
from cirq.interop.quirk.cells.testing import assert_url_to_circuit_returns
def test_controls():
a, b = cirq.LineQubit.range(2)
assert_url_to_circuit_returns(
'{"cols":[["•","X"]]}',
cirq.Circuit(
cirq.X(b).controlled_by(a),
),
)
assert_url_to_circuit_returns(
'{"cols":[["◦","X"]]}',
cirq.Circuit(
cirq.X(a),
cirq.X(b).controlled_by(a),
cirq.X(a),
),
)
assert_url_to_circuit_returns(
'{"cols":[["⊕","X"]]}',
cirq.Circuit(
cirq.Y(a) ** 0.5,
cirq.X(b).controlled_by(a),
cirq.Y(a) ** -0.5,
),
output_amplitudes_from_quirk=[
{"r": 0.5, "i": 0},
{"r": -0.5, "i": 0},
{"r": 0.5, "i": 0},
{"r": 0.5, "i": 0},
],
)
assert_url_to_circuit_returns(
'{"cols":[["⊖","X"]]}',
cirq.Circuit(
cirq.Y(a) ** -0.5,
cirq.X(b).controlled_by(a),
cirq.Y(a) ** +0.5,
),
output_amplitudes_from_quirk=[
{"r": 0.5, "i": 0},
{"r": 0.5, "i": 0},
{"r": 0.5, "i": 0},
{"r": -0.5, "i": 0},
],
)
assert_url_to_circuit_returns(
'{"cols":[["⊗","X"]]}',
cirq.Circuit(
cirq.X(a) ** -0.5,
cirq.X(b).controlled_by(a),
cirq.X(a) ** +0.5,
),
output_amplitudes_from_quirk=[
{"r": 0.5, "i": 0},
{"r": 0, "i": -0.5},
{"r": 0.5, "i": 0},
{"r": 0, "i": 0.5},
],
)
assert_url_to_circuit_returns(
'{"cols":[["(/)","X"]]}',
cirq.Circuit(
cirq.X(a) ** +0.5,
cirq.X(b).controlled_by(a),
cirq.X(a) ** -0.5,
),
output_amplitudes_from_quirk=[
{"r": 0.5, "i": 0},
{"r": 0, "i": 0.5},
{"r": 0.5, "i": 0},
{"r": 0, "i": -0.5},
],
)
qs = cirq.LineQubit.range(8)
assert_url_to_circuit_returns(
'{"cols":[["X","•","◦","⊕","⊖","⊗","(/)","Z"]]}',
cirq.Circuit(
cirq.X(qs[2]),
cirq.Y(qs[3]) ** 0.5,
cirq.Y(qs[4]) ** -0.5,
cirq.X(qs[5]) ** -0.5,
cirq.X(qs[6]) ** 0.5,
cirq.X(qs[0]).controlled_by(*qs[1:7]),
cirq.Z(qs[7]).controlled_by(*qs[1:7]),
cirq.X(qs[6]) ** -0.5,
cirq.X(qs[5]) ** 0.5,
cirq.Y(qs[4]) ** 0.5,
cirq.Y(qs[3]) ** -0.5,
cirq.X(qs[2]),
),
)
def test_parity_controls():
a, b, c, d, e = cirq.LineQubit.range(5)
assert_url_to_circuit_returns(
'{"cols":[["Y","xpar","ypar","zpar","Z"]]}',
cirq.Circuit(
cirq.Y(b) ** 0.5,
cirq.X(c) ** -0.5,
cirq.CNOT(c, b),
cirq.CNOT(d, b),
cirq.Y(a).controlled_by(b),
cirq.Z(e).controlled_by(b),
cirq.CNOT(d, b),
cirq.CNOT(c, b),
cirq.X(c) ** 0.5,
cirq.Y(b) ** -0.5,
),
)
def test_control_with_line_qubits_mapped_to():
a, b = cirq.LineQubit.range(2)
a2, b2 = cirq.NamedQubit.range(2, prefix='q')
cell = cirq.interop.quirk.cells.control_cells.ControlCell(a, [cirq.Y(b) ** 0.5])
mapped_cell = cirq.interop.quirk.cells.control_cells.ControlCell(a2, [cirq.Y(b2) ** 0.5])
assert cell != mapped_cell
assert cell.with_line_qubits_mapped_to([a2, b2]) == mapped_cell
def test_parity_control_with_line_qubits_mapped_to():
a, b, c = cirq.LineQubit.range(3)
a2, b2, c2 = cirq.NamedQubit.range(3, prefix='q')
cell = cirq.interop.quirk.cells.control_cells.ParityControlCell([a, b], [cirq.Y(c) ** 0.5])
mapped_cell = cirq.interop.quirk.cells.control_cells.ParityControlCell(
[a2, b2], [cirq.Y(c2) ** 0.5]
)
assert cell != mapped_cell
assert cell.with_line_qubits_mapped_to([a2, b2, c2]) == mapped_cell
def test_repr():
a, b, c = cirq.LineQubit.range(3)
cirq.testing.assert_equivalent_repr(
cirq.interop.quirk.cells.control_cells.ControlCell(a, [cirq.Y(b) ** 0.5])
)
cirq.testing.assert_equivalent_repr(
cirq.interop.quirk.cells.control_cells.ParityControlCell([a, b], [cirq.Y(c) ** 0.5])
)
| 30.325153 | 95 | 0.500303 |
4a1ba0b5463d6f10801673a6153e02189f5026f9
| 4,573 |
py
|
Python
|
src/analyzeScenario.py
|
shinshio/AC2C
|
3d7d5ce33a6d2e77a8ae2cd697bd6db4518b0bf8
|
[
"OLDAP-2.2.1"
] | null | null | null |
src/analyzeScenario.py
|
shinshio/AC2C
|
3d7d5ce33a6d2e77a8ae2cd697bd6db4518b0bf8
|
[
"OLDAP-2.2.1"
] | null | null | null |
src/analyzeScenario.py
|
shinshio/AC2C
|
3d7d5ce33a6d2e77a8ae2cd697bd6db4518b0bf8
|
[
"OLDAP-2.2.1"
] | null | null | null |
# coding: utf-8
'''
Analyze scenario and, extract each items.
items:cover, scenario, CAN, Relay information ...
TODO:
No Item.
'''
# import from std
import sys
# import from pypi
import pandas as pd
def takeInCover(filename: str) -> list:
"""
Take in cover information from scenario file
Parameters
----------
filename: str
full path of scenario file
Returns
----------
lst_cover: list
0: title, 1: author, 2:ECU type 3: ECU code,
4: summary(1), 5: summary(2), 6: summary(3), 7: summary(4)
"""
snHeader = 3
snIndexCol = 0
snSname = 'cover'
df_cover = pd.read_excel(filename,header=snHeader,index_col=snIndexCol,sheet_name=snSname)
lst_cover = sum(df_cover.fillna('').values.tolist(), [])
return lst_cover
def takeInScenario(filename: str) -> list:
"""
Take in scenario information from scenario file
Parameters
----------
filename: str
full path of scenario file
Returns
----------
lst_scenario: list
demension 1: scenarios
demension 2:
0: numbers, 1: scenario's items (orders and recipes) 2: judges
"""
snHeader = 0
snIndexCol = None
snSname = 'scenario'
df_scenario = pd.read_excel(filename,header=snHeader,index_col=snIndexCol,sheet_name=snSname)
lst_scenario = df_scenario.fillna('').values.tolist()
return lst_scenario
def takeInCanInfo(filename: str) -> list:
"""
Take in CAN information from scenario file
Parameters
----------
filename: str
full path of scenario file
Returns
----------
lst_can: list
demension 1: keywords
0: id for send, 1: id for response 2: message of dtc read
3: message of dtc clear, 4: message of additional request
5,6,7: reservation
demension 2: value
0: keywords, 1: value
"""
snHeader = 0
snIndexCol = None
snSname = 'can'
df_can = pd.read_excel(filename,header=snHeader,index_col=snIndexCol,sheet_name=snSname)
lst_can = df_can.fillna('').values.tolist()
return lst_can
def takeInRyDefInfo(filename: str) -> list:
"""
Take in Relay default information from scenario file
Parameters
----------
filename: str
full path of scenario file
Returns
----------
lst_can: list
demension 1: number of relay
demension 2: value
0:number, 1: value (on, off, na)
"""
snHeader = 0
snIndexCol = None
snSname = 'relay'
df_ry = pd.read_excel(filename,header=snHeader,index_col=snIndexCol,sheet_name=snSname)
lst_ry = df_ry.fillna('').values.tolist()
return lst_ry
def sn2numZ3(scenario: list) -> str:
"""
Convert scenario number to str (zero padding of 3)
Parameters
----------
scenario: list
use only index 0
0: number, 1: items (order and recipe), 2:judge
Returns
----------
str
str of scenario number (zero padding of 3)
"""
# return result
return str(scenario[0]).zfill(3)
def sn2order(scenario: list) -> str:
"""
Extract scenario item to order
Parameters
----------
scenario: list
use only index 1
0: number, 1: items (order and recipe), 2:judge
Returns
----------
str
order of scenario
"""
return str(scenario[1].split('<')[0])
def sn2recipe(scenario: list) -> list:
"""
Extract scenario item to recipe
Parameters
----------
scenario: list
use only index 1
0: number, 1: items (order and recipe), 2:judge
Returns
----------
subOrder: list
0: mainOrder, 1-end:subOrders
"""
# slice main task
allOrder = str(scenario[1].split('<')[1])
mainOrder = allOrder.split('_')[0]
subOrders = str(allOrder.split('_',1)[1]).replace('>','').split('_')
subOrders.insert(0, mainOrder)
# return result
return subOrders
def sn2judge(scenario: list) -> str:
"""
Extract scenario judge to recipe
Parameters
----------
scenario: list
use only index 2
0: number, 1: items (order and recipe), 2:judge
Returns
----------
str
judge of scenario
"""
# return result
return str(scenario[2])
'''
----------
main
----------
'''
if __name__ == '__main__':
pass
| 23.572165 | 97 | 0.563306 |
4a1ba38fbd883569b92f720b0c94418d8328c3cf
| 4,615 |
bzl
|
Python
|
tensorflow/core/platform/build_config.bzl
|
jraman/tensorflow
|
41c6bf7c6215bea9bfb9bf0a9b63f2084e6f3058
|
[
"Apache-2.0"
] | 1 |
2020-02-15T14:00:01.000Z
|
2020-02-15T14:00:01.000Z
|
tensorflow/core/platform/build_config.bzl
|
rakeshacharya-d/tensorflow
|
9028828d3b8a2a622f7203a317002cc749531695
|
[
"Apache-2.0"
] | 1 |
2022-02-10T01:08:48.000Z
|
2022-02-10T01:08:48.000Z
|
tensorflow/core/platform/build_config.bzl
|
rakeshacharya-d/tensorflow
|
9028828d3b8a2a622f7203a317002cc749531695
|
[
"Apache-2.0"
] | null | null | null |
"""Provides a redirection point for platform specific implementations of starlark utilities."""
load(
"//tensorflow/core/platform/default:build_config.bzl",
_if_llvm_aarch64_available = "if_llvm_aarch64_available",
_pyx_library = "pyx_library",
_tf_additional_all_protos = "tf_additional_all_protos",
_tf_additional_binary_deps = "tf_additional_binary_deps",
_tf_additional_core_deps = "tf_additional_core_deps",
_tf_additional_cupti_utils_cuda_deps = "tf_additional_cupti_utils_cuda_deps",
_tf_additional_device_tracer_srcs = "tf_additional_device_tracer_srcs",
_tf_additional_env_hdrs = "tf_additional_env_hdrs",
_tf_additional_lib_deps = "tf_additional_lib_deps",
_tf_additional_lib_hdrs = "tf_additional_lib_hdrs",
_tf_additional_monitoring_hdrs = "tf_additional_monitoring_hdrs",
_tf_additional_rpc_deps = "tf_additional_rpc_deps",
_tf_additional_tensor_coding_deps = "tf_additional_tensor_coding_deps",
_tf_additional_test_deps = "tf_additional_test_deps",
_tf_additional_test_srcs = "tf_additional_test_srcs",
_tf_fingerprint_deps = "tf_fingerprint_deps",
_tf_google_mobile_srcs_no_runtime = "tf_google_mobile_srcs_no_runtime",
_tf_google_mobile_srcs_only_runtime = "tf_google_mobile_srcs_only_runtime",
_tf_jspb_proto_library = "tf_jspb_proto_library",
_tf_kernel_tests_linkstatic = "tf_kernel_tests_linkstatic",
_tf_lib_proto_parsing_deps = "tf_lib_proto_parsing_deps",
_tf_logging_deps = "tf_logging_deps",
_tf_monitoring_deps = "tf_monitoring_deps",
_tf_platform_alias = "tf_platform_alias",
_tf_platform_deps = "tf_platform_deps",
_tf_portable_deps_no_runtime = "tf_portable_deps_no_runtime",
_tf_proto_library = "tf_proto_library",
_tf_proto_library_cc = "tf_proto_library_cc",
_tf_proto_library_py = "tf_proto_library_py",
_tf_protobuf_compiler_deps = "tf_protobuf_compiler_deps",
_tf_protobuf_deps = "tf_protobuf_deps",
_tf_protobuf_full_deps = "tf_protobuf_full_deps",
_tf_protobuf_lite_deps = "tf_protobuf_lite_deps",
_tf_protos_all = "tf_protos_all",
_tf_protos_all_impl = "tf_protos_all_impl",
_tf_protos_grappler = "tf_protos_grappler",
_tf_protos_grappler_impl = "tf_protos_grappler_impl",
_tf_protos_profiler_impl = "tf_protos_profiler_impl",
_tf_py_clif_cc = "tf_py_clif_cc",
_tf_pyclif_proto_library = "tf_pyclif_proto_library",
_tf_resource_deps = "tf_resource_deps",
_tf_windows_aware_platform_deps = "tf_windows_aware_platform_deps",
)
if_llvm_aarch64_available = _if_llvm_aarch64_available
pyx_library = _pyx_library
tf_additional_all_protos = _tf_additional_all_protos
tf_additional_binary_deps = _tf_additional_binary_deps
tf_additional_core_deps = _tf_additional_core_deps
tf_additional_cupti_utils_cuda_deps = _tf_additional_cupti_utils_cuda_deps
tf_additional_device_tracer_srcs = _tf_additional_device_tracer_srcs
tf_additional_env_hdrs = _tf_additional_env_hdrs
tf_additional_lib_deps = _tf_additional_lib_deps
tf_additional_lib_hdrs = _tf_additional_lib_hdrs
tf_additional_monitoring_hdrs = _tf_additional_monitoring_hdrs
tf_additional_rpc_deps = _tf_additional_rpc_deps
tf_additional_tensor_coding_deps = _tf_additional_tensor_coding_deps
tf_additional_test_deps = _tf_additional_test_deps
tf_additional_test_srcs = _tf_additional_test_srcs
tf_fingerprint_deps = _tf_fingerprint_deps
tf_google_mobile_srcs_no_runtime = _tf_google_mobile_srcs_no_runtime
tf_google_mobile_srcs_only_runtime = _tf_google_mobile_srcs_only_runtime
tf_jspb_proto_library = _tf_jspb_proto_library
tf_kernel_tests_linkstatic = _tf_kernel_tests_linkstatic
tf_lib_proto_parsing_deps = _tf_lib_proto_parsing_deps
tf_logging_deps = _tf_logging_deps
tf_monitoring_deps = _tf_monitoring_deps
tf_platform_alias = _tf_platform_alias
tf_platform_deps = _tf_platform_deps
tf_portable_deps_no_runtime = _tf_portable_deps_no_runtime
tf_proto_library = _tf_proto_library
tf_proto_library_cc = _tf_proto_library_cc
tf_proto_library_py = _tf_proto_library_py
tf_protobuf_compiler_deps = _tf_protobuf_compiler_deps
tf_protobuf_deps = _tf_protobuf_deps
tf_protobuf_full_deps = _tf_protobuf_full_deps
tf_protobuf_lite_deps = _tf_protobuf_lite_deps
tf_protos_all = _tf_protos_all
tf_protos_all_impl = _tf_protos_all_impl
tf_protos_grappler = _tf_protos_grappler
tf_protos_grappler_impl = _tf_protos_grappler_impl
tf_protos_profiler_impl = _tf_protos_profiler_impl
tf_py_clif_cc = _tf_py_clif_cc
tf_pyclif_proto_library = _tf_pyclif_proto_library
tf_resource_deps = _tf_resource_deps
tf_windows_aware_platform_deps = _tf_windows_aware_platform_deps
| 50.714286 | 95 | 0.855038 |
4a1ba5d3141bc005f7093dc1de3b73f0218e0f47
| 274 |
py
|
Python
|
ex_08_05/ex_08_05.py
|
rovelee/py4e
|
32125f5d62b6c7b6a56c8e1a250c1d81c6d54006
|
[
"MIT"
] | null | null | null |
ex_08_05/ex_08_05.py
|
rovelee/py4e
|
32125f5d62b6c7b6a56c8e1a250c1d81c6d54006
|
[
"MIT"
] | null | null | null |
ex_08_05/ex_08_05.py
|
rovelee/py4e
|
32125f5d62b6c7b6a56c8e1a250c1d81c6d54006
|
[
"MIT"
] | null | null | null |
fname = 'mbox-short.txt'
fh = open(fname)
count = 0
for line in fh:
if not line.startswith('From:'):
continue
lsl = line.split()
eml = lsl[1]
print(eml)
count = count + 1
print('There were %d lines in the file with From as the first word'%count)
| 22.833333 | 74 | 0.620438 |
4a1ba72bc45ec4c707293b985e89da042358b4f0
| 23,478 |
py
|
Python
|
Fill.py
|
ThreePendants/OoT-Randomizer
|
001d7823281bc8e9684058203df339aac1263062
|
[
"MIT"
] | null | null | null |
Fill.py
|
ThreePendants/OoT-Randomizer
|
001d7823281bc8e9684058203df339aac1263062
|
[
"MIT"
] | null | null | null |
Fill.py
|
ThreePendants/OoT-Randomizer
|
001d7823281bc8e9684058203df339aac1263062
|
[
"MIT"
] | null | null | null |
import random
import logging
from State import State
from Rules import set_shop_rules
from Location import DisableType
from LocationList import location_groups
from ItemPool import songlist, get_junk_item, item_groups, remove_junk_items
from ItemList import item_table
from Item import ItemFactory
from Search import Search
from functools import reduce
logger = logging.getLogger('')
class ShuffleError(RuntimeError):
pass
class FillError(ShuffleError):
pass
# Places all items into the world
def distribute_items_restrictive(window, worlds, fill_locations=None):
song_locations = [world.get_location(location) for world in worlds for location in
['Song from Composers Grave', 'Song from Impa', 'Song from Malon', 'Song from Saria',
'Song from Ocarina of Time', 'Song from Windmill', 'Sheik in Forest', 'Sheik at Temple',
'Sheik in Crater', 'Sheik in Ice Cavern', 'Sheik in Kakariko', 'Sheik at Colossus']]
shop_locations = [location for world in worlds for location in world.get_unfilled_locations() if location.type == 'Shop' and location.price == None]
# If not passed in, then get a shuffled list of locations to fill in
if not fill_locations:
fill_locations = [location for world in worlds for location in world.get_unfilled_locations() \
if location not in song_locations and \
location not in shop_locations and \
location.type != 'GossipStone']
world_states = [world.state for world in worlds]
window.locationcount = len(fill_locations) + len(song_locations) + len(shop_locations)
window.fillcount = 0
# Generate the itempools
shopitempool = [item for world in worlds for item in world.itempool if item.type == 'Shop']
songitempool = [item for world in worlds for item in world.itempool if item.type == 'Song']
itempool = [item for world in worlds for item in world.itempool if item.type != 'Shop' and item.type != 'Song']
if worlds[0].shuffle_song_items:
itempool.extend(songitempool)
fill_locations.extend(song_locations)
songitempool = []
song_locations = []
# add unrestricted dungeon items to main item pool
itempool.extend([item for world in worlds for item in world.get_unrestricted_dungeon_items()])
dungeon_items = [item for world in worlds for item in world.get_restricted_dungeon_items()]
random.shuffle(itempool) # randomize item placement order. this ordering can greatly affect the location accessibility bias
progitempool = [item for item in itempool if item.advancement]
prioitempool = [item for item in itempool if not item.advancement and item.priority]
restitempool = [item for item in itempool if not item.advancement and not item.priority]
cloakable_locations = shop_locations + song_locations + fill_locations
all_models = shopitempool + dungeon_items + songitempool + itempool
worlds[0].settings.distribution.fill(window, worlds, [shop_locations, song_locations, fill_locations], [shopitempool, dungeon_items, songitempool, progitempool, prioitempool, restitempool])
itempool = progitempool + prioitempool + restitempool
# set ice traps to have the appearance of other random items in the item pool
ice_traps = [item for item in itempool if item.name == 'Ice Trap']
# Extend with ice traps manually placed in plandomizer
ice_traps.extend(
location.item for location in cloakable_locations
if (location.has_preview()
and location.item is not None
and location.item.name == 'Ice Trap'
and location.item.looks_like_item is None))
junk_items = remove_junk_items.copy()
junk_items.remove('Ice Trap')
major_items = [item for (item, data) in item_table.items() if data[0] == 'Item' and data[1] and data[2] is not None]
fake_items = []
if worlds[0].settings.ice_trap_appearance == 'major_only':
model_items = [item for item in itempool if item.majoritem]
if len(model_items) == 0: # All major items were somehow removed from the pool (can happen in plando)
model_items = ItemFactory(major_items)
elif worlds[0].settings.ice_trap_appearance == 'junk_only':
model_items = [item for item in itempool if item.name in junk_items]
if len(model_items) == 0: # All junk was removed
model_items = ItemFactory(junk_items)
else: # world[0].settings.ice_trap_appearance == 'anything':
model_items = [item for item in itempool if item.name != 'Ice Trap']
if len(model_items) == 0: # All major items and junk were somehow removed from the pool (can happen in plando)
model_items = ItemFactory(major_items) + ItemFactory(junk_items)
while len(ice_traps) > len(fake_items):
# if there are more ice traps than model items, then double up on model items
fake_items.extend(model_items)
for random_item in random.sample(fake_items, len(ice_traps)):
ice_trap = ice_traps.pop(0)
ice_trap.looks_like_item = random_item
# Start a search cache here.
search = Search([world.state for world in worlds])
# We place all the shop items first. Like songs, they have a more limited
# set of locations that they can be placed in, so placing them first will
# reduce the odds of creating unbeatable seeds. This also avoids needing
# to create item rules for every location for whether they are a shop item
# or not. This shouldn't have much affect on item bias.
if shop_locations:
logger.info('Placing shop items.')
fill_ownworld_restrictive(window, worlds, search, shop_locations, shopitempool, itempool + songitempool + dungeon_items, "shop")
# Update the shop item access rules
for world in worlds:
set_shop_rules(world)
search.collect_locations()
# If there are dungeon items that are restricted to their original dungeon,
# we must place them first to make sure that there is always a location to
# place them. This could probably be replaced for more intelligent item
# placement, but will leave as is for now
if dungeon_items:
logger.info('Placing dungeon items.')
fill_dungeons_restrictive(window, worlds, search, fill_locations, dungeon_items, itempool + songitempool)
search.collect_locations()
# places the songs into the world
# Currently places songs only at song locations. if there's an option
# to allow at other locations then they should be in the main pool.
# Placing songs on their own since they have a relatively high chance
# of failing compared to other item type. So this way we only have retry
# the song locations only.
if not worlds[0].shuffle_song_items:
logger.info('Placing song items.')
fill_ownworld_restrictive(window, worlds, search, song_locations, songitempool, progitempool, "song")
search.collect_locations()
fill_locations += [location for location in song_locations if location.item is None]
# Put one item in every dungeon, needs to be done before other items are
# placed to ensure there is a spot available for them
if worlds[0].one_item_per_dungeon:
logger.info('Placing one major item per dungeon.')
fill_dungeon_unique_item(window, worlds, search, fill_locations, progitempool)
search.collect_locations()
# Place all progression items. This will include keys in keysanity.
# Items in this group will check for reachability and will be placed
# such that the game is guaranteed beatable.
logger.info('Placing progression items.')
fill_restrictive(window, worlds, search, fill_locations, progitempool)
search.collect_locations()
# Place all priority items.
# These items are items that only check if the item is allowed to be
# placed in the location, not checking reachability. This is important
# for things like Ice Traps that can't be found at some locations
logger.info('Placing priority items.')
fill_restrictive_fast(window, worlds, fill_locations, prioitempool)
# Place the rest of the items.
# No restrictions at all. Places them completely randomly. Since they
# cannot affect the beatability, we don't need to check them
logger.info('Placing the rest of the items.')
fast_fill(window, fill_locations, restitempool)
# Log unplaced item/location warnings
for item in progitempool + prioitempool + restitempool:
logger.error('Unplaced Items: %s [World %d]' % (item.name, item.world.id))
for location in fill_locations:
logger.error('Unfilled Locations: %s [World %d]' % (location.name, location.world.id))
if progitempool + prioitempool + restitempool:
raise FillError('Not all items are placed.')
if fill_locations:
raise FillError('Not all locations have an item.')
if not search.can_beat_game():
raise FillError('Cannot beat game!')
worlds[0].settings.distribution.cloak(worlds, [cloakable_locations], [all_models])
for world in worlds:
for location in world.get_filled_locations():
# Get the maximum amount of wallets required to purchase an advancement item.
if world.maximum_wallets < 3 and location.price and location.item.advancement:
if location.price > 500:
world.maximum_wallets = 3
elif world.maximum_wallets < 2 and location.price > 200:
world.maximum_wallets = 2
elif world.maximum_wallets < 1 and location.price > 99:
world.maximum_wallets = 1
# Get Light Arrow location for later usage.
if location.item and location.item.name == 'Light Arrows':
location.item.world.light_arrow_location = location
# Places restricted dungeon items into the worlds. To ensure there is room for them.
# they are placed first so it will assume all other items are reachable
def fill_dungeons_restrictive(window, worlds, search, shuffled_locations, dungeon_items, itempool):
# List of states with all non-key items
base_search = search.copy()
base_search.collect_all(itempool)
base_search.collect_locations()
# shuffle this list to avoid placement bias
random.shuffle(dungeon_items)
# sort in the order Other, Small Key, Boss Key before placing dungeon items
# python sort is stable, so the ordering is still random within groups
# fill_restrictive processes the resulting list backwards so the Boss Keys will actually be placed first
sort_order = {"BossKey": 3, "SmallKey": 2}
dungeon_items.sort(key=lambda item: sort_order.get(item.type, 1))
# place dungeon items
fill_restrictive(window, worlds, base_search, shuffled_locations, dungeon_items)
# Places items into dungeon locations. This is used when there should be exactly
# one progression item per dungeon. This should be ran before all the progression
# items are places to ensure there is space to place them.
def fill_dungeon_unique_item(window, worlds, search, fill_locations, itempool):
# We should make sure that we don't count event items, shop items,
# token items, or dungeon items as a major item. itempool at this
# point should only be able to have tokens of those restrictions
# since the rest are already placed.
major_items = [item for item in itempool if item.majoritem]
minor_items = [item for item in itempool if not item.majoritem]
dungeons = [dungeon for world in worlds for dungeon in world.dungeons]
double_dungeons = []
for dungeon in dungeons:
# we will count spirit temple twice so that it gets 2 items to match vanilla
if dungeon.name == 'Spirit Temple':
double_dungeons.append(dungeon)
dungeons.extend(double_dungeons)
random.shuffle(dungeons)
random.shuffle(itempool)
base_search = search.copy()
base_search.collect_all(minor_items)
base_search.collect_locations()
all_dungeon_locations = []
# iterate of all the dungeons in a random order, placing the item there
for dungeon in dungeons:
dungeon_locations = [location for region in dungeon.regions for location in region.locations if location in fill_locations]
# cache this list to flag afterwards
all_dungeon_locations.extend(dungeon_locations)
# place 1 item into the dungeon
fill_restrictive(window, worlds, base_search, dungeon_locations, major_items, 1)
# update the location and item pool, removing any placed items and filled locations
# the fact that you can remove items from a list you're iterating over is python magic
for item in itempool:
if item.location != None:
fill_locations.remove(item.location)
itempool.remove(item)
# flag locations to not place further major items. it's important we do it on the
# locations instead of the dungeon because some locations are not in the dungeon
for location in all_dungeon_locations:
location.minor_only = True
logger.info("Unique dungeon items placed")
# Places items restricting placement to the recipient player's own world
def fill_ownworld_restrictive(window, worlds, search, locations, ownpool, itempool, description="Unknown", attempts=15):
# get the locations for each world
# look for preplaced items
placed_prizes = [loc.item.name for loc in locations if loc.item is not None]
unplaced_prizes = [item for item in ownpool if item.name not in placed_prizes]
empty_locations = [loc for loc in locations if loc.item is None]
prizepool_dict = {world.id: [item for item in unplaced_prizes if item.world.id == world.id] for world in worlds}
prize_locs_dict = {world.id: [loc for loc in empty_locations if loc.world.id == world.id] for world in worlds}
# Shop item being sent in to this method are tied to their own world.
# Therefore, let's do this one world at a time. We do this to help
# increase the chances of successfully placing songs
for world in worlds:
# List of states with all items
unplaced_prizes = [item for item in unplaced_prizes if item not in prizepool_dict[world.id]]
base_search = search.copy()
base_search.collect_all(itempool + unplaced_prizes)
world_attempts = attempts
while world_attempts:
world_attempts -= 1
try:
prizepool = list(prizepool_dict[world.id])
prize_locs = list(prize_locs_dict[world.id])
random.shuffle(prizepool)
fill_restrictive(window, worlds, base_search, prize_locs, prizepool)
logger.info("Placed %s items for world %s.", description, (world.id+1))
except FillError as e:
logger.info("Failed to place %s items for world %s. Will retry %s more times.", description, (world.id+1), world_attempts)
for location in prize_locs_dict[world.id]:
location.item = None
if location.disabled == DisableType.DISABLED:
location.disabled = DisableType.PENDING
logger.info('\t%s' % str(e))
continue
break
else:
raise FillError('Unable to place %s items in world %d' % (description, (world.id+1)))
# Places items in the itempool into locations.
# worlds is a list of worlds and is redundant of the worlds in the base_state_list
# base_state_list is a list of world states prior to placing items in the item pool
# items and locations have pointers to the world that they belong to
#
# The algorithm places items in the world in reverse.
# This means we first assume we have every item in the item pool and
# remove an item and try to place it somewhere that is still reachable
# This method helps distribution of items locked behind many requirements
#
# count is the number of items to place. If count is negative, then it will place
# every item. Raises an error if specified count of items are not placed.
#
# This function will modify the location and itempool arguments. placed items and
# filled locations will be removed. If this returns and error, then the state of
# those two lists cannot be guaranteed.
def fill_restrictive(window, worlds, base_search, locations, itempool, count=-1):
unplaced_items = []
# don't run over this search, just keep it as an item collection
items_search = base_search.copy()
items_search.collect_all(itempool)
# loop until there are no items or locations
while itempool and locations:
# if remaining count is 0, return. Negative means unbounded.
if count == 0:
break
# get an item and remove it from the itempool
item_to_place = itempool.pop()
if item_to_place.majoritem:
l2cations = [l for l in locations if not l.minor_only]
else:
l2cations = locations
random.shuffle(l2cations)
# generate the max search with every remaining item
# this will allow us to place this item in a reachable location
items_search.uncollect(item_to_place)
max_search = items_search.copy()
max_search.collect_locations()
# perform_access_check checks location reachability
perform_access_check = True
if worlds[0].check_beatable_only:
# if any world can not longer be beatable with the remaining items
# then we must check for reachability no matter what.
# This way the reachability test is monotonic. If we were to later
# stop checking, then we could place an item needed in one world
# in an unreachable place in another world.
# scan_for_items would cause an unnecessary copy+collect
perform_access_check = not max_search.can_beat_game(scan_for_items=False)
# find a location that the item can be placed. It must be a valid location
# in the world we are placing it (possibly checking for reachability)
spot_to_fill = None
for location in l2cations:
if location.can_fill(max_search.state_list[location.world.id], item_to_place, perform_access_check):
# for multiworld, make it so that the location is also reachable
# in the world the item is for. This is to prevent early restrictions
# in one world being placed late in another world. If this is not
# done then one player may be waiting a long time for other players.
if location.world.id != item_to_place.world.id:
try:
source_location = item_to_place.world.get_location(location.name)
if not source_location.can_fill(max_search.state_list[item_to_place.world.id], item_to_place, perform_access_check):
# location wasn't reachable in item's world, so skip it
continue
except KeyError:
# This location doesn't exist in the other world, let's look elsewhere.
# Check access to whatever parent region exists in the other world.
can_reach = True
parent_region = location.parent_region
while parent_region:
try:
source_region = item_to_place.world.get_region(parent_region.name)
can_reach = max_search.can_reach(source_region)
break
except KeyError:
parent_region = parent_region.entrances[0].parent_region
if not can_reach:
continue
if location.disabled == DisableType.PENDING:
if not max_search.can_beat_game(False):
continue
location.disabled = DisableType.DISABLED
# location is reachable (and reachable in item's world), so place item here
spot_to_fill = location
break
# if we failed to find a suitable location
if spot_to_fill is None:
# if we specify a count, then we only want to place a subset, so a miss might be ok
if count > 0:
# don't decrement count, we didn't place anything
unplaced_items.append(item_to_place)
items_search.collect(item_to_place)
continue
else:
# we expect all items to be placed
raise FillError('Game unbeatable: No more spots to place %s [World %d] from %d locations (%d total); %d other items left to place, plus %d skipped' % (item_to_place, item_to_place.world.id + 1, len(l2cations), len(locations), len(itempool), len(unplaced_items)))
# Place the item in the world and continue
spot_to_fill.world.push_item(spot_to_fill, item_to_place)
locations.remove(spot_to_fill)
window.fillcount += 1
window.update_progress(5 + ((window.fillcount / window.locationcount) * 30))
# decrement count
count -= 1
# assert that the specified number of items were placed
if count > 0:
raise FillError('Could not place the specified number of item. %d remaining to be placed.' % count)
if count < 0 and len(itempool) > 0:
raise FillError('Could not place all the item. %d remaining to be placed.' % len(itempool))
# re-add unplaced items that were skipped
itempool.extend(unplaced_items)
# This places items in the itempool into the locations
# It does not check for reachability, only that the item is
# allowed in the location
def fill_restrictive_fast(window, worlds, locations, itempool):
while itempool and locations:
item_to_place = itempool.pop()
random.shuffle(locations)
# get location that allows this item
spot_to_fill = None
for location in locations:
if location.can_fill_fast(item_to_place):
spot_to_fill = location
break
# if we failed to find a suitable location, then stop placing items
# we don't need to check beatability since world must be beatable
# at this point
if spot_to_fill is None:
if not worlds[0].check_beatable_only:
logger.debug('Not all items placed. Game beatable anyway.')
break
# Place the item in the world and continue
spot_to_fill.world.push_item(spot_to_fill, item_to_place)
locations.remove(spot_to_fill)
window.fillcount += 1
window.update_progress(5 + ((window.fillcount / window.locationcount) * 30))
# this places item in item_pool completely randomly into
# fill_locations. There is no checks for validity since
# there should be none for these remaining items
def fast_fill(window, locations, itempool):
random.shuffle(locations)
while itempool and locations:
spot_to_fill = locations.pop()
item_to_place = itempool.pop()
spot_to_fill.world.push_item(spot_to_fill, item_to_place)
window.fillcount += 1
window.update_progress(5 + ((window.fillcount / window.locationcount) * 30))
| 48.810811 | 278 | 0.683534 |
4a1ba830c98d6428367b858c1f35638a26e76c1e
| 84,150 |
py
|
Python
|
Lib/test/test_math.py
|
gerph/cpython
|
98813cb03c2371789669c3d8debf8fca2a344de9
|
[
"CNRI-Python-GPL-Compatible"
] | 2,441 |
2020-07-31T06:45:53.000Z
|
2022-03-30T15:56:49.000Z
|
Lib/test/test_math.py
|
gerph/cpython
|
98813cb03c2371789669c3d8debf8fca2a344de9
|
[
"CNRI-Python-GPL-Compatible"
] | 238 |
2020-10-21T04:54:00.000Z
|
2022-03-31T21:49:03.000Z
|
Lib/test/test_math.py
|
gerph/cpython
|
98813cb03c2371789669c3d8debf8fca2a344de9
|
[
"CNRI-Python-GPL-Compatible"
] | 93 |
2020-08-09T12:00:17.000Z
|
2022-03-25T07:57:24.000Z
|
# Python test set -- math module
# XXXX Should not do tests around zero only
from test.support import run_unittest, verbose, requires_IEEE_754
from test import support
import unittest
import itertools
import decimal
import math
import os
import platform
import random
import struct
import sys
eps = 1E-05
NAN = float('nan')
INF = float('inf')
NINF = float('-inf')
FLOAT_MAX = sys.float_info.max
FLOAT_MIN = sys.float_info.min
# detect evidence of double-rounding: fsum is not always correctly
# rounded on machines that suffer from double rounding.
x, y = 1e16, 2.9999 # use temporary values to defeat peephole optimizer
HAVE_DOUBLE_ROUNDING = (x + y == 1e16 + 4)
# locate file with test values
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
test_dir = os.path.dirname(file) or os.curdir
math_testcases = os.path.join(test_dir, 'math_testcases.txt')
test_file = os.path.join(test_dir, 'cmath_testcases.txt')
def to_ulps(x):
"""Convert a non-NaN float x to an integer, in such a way that
adjacent floats are converted to adjacent integers. Then
abs(ulps(x) - ulps(y)) gives the difference in ulps between two
floats.
The results from this function will only make sense on platforms
where native doubles are represented in IEEE 754 binary64 format.
Note: 0.0 and -0.0 are converted to 0 and -1, respectively.
"""
n = struct.unpack('<q', struct.pack('<d', x))[0]
if n < 0:
n = ~(n+2**63)
return n
def ulp(x):
"""Return the value of the least significant bit of a
float x, such that the first float bigger than x is x+ulp(x).
Then, given an expected result x and a tolerance of n ulps,
the result y should be such that abs(y-x) <= n * ulp(x).
The results from this function will only make sense on platforms
where native doubles are represented in IEEE 754 binary64 format.
"""
x = abs(float(x))
if math.isnan(x) or math.isinf(x):
return x
# Find next float up from x.
n = struct.unpack('<q', struct.pack('<d', x))[0]
x_next = struct.unpack('<d', struct.pack('<q', n + 1))[0]
if math.isinf(x_next):
# Corner case: x was the largest finite float. Then it's
# not an exact power of two, so we can take the difference
# between x and the previous float.
x_prev = struct.unpack('<d', struct.pack('<q', n - 1))[0]
return x - x_prev
else:
return x_next - x
# Here's a pure Python version of the math.factorial algorithm, for
# documentation and comparison purposes.
#
# Formula:
#
# factorial(n) = factorial_odd_part(n) << (n - count_set_bits(n))
#
# where
#
# factorial_odd_part(n) = product_{i >= 0} product_{0 < j <= n >> i; j odd} j
#
# The outer product above is an infinite product, but once i >= n.bit_length,
# (n >> i) < 1 and the corresponding term of the product is empty. So only the
# finitely many terms for 0 <= i < n.bit_length() contribute anything.
#
# We iterate downwards from i == n.bit_length() - 1 to i == 0. The inner
# product in the formula above starts at 1 for i == n.bit_length(); for each i
# < n.bit_length() we get the inner product for i from that for i + 1 by
# multiplying by all j in {n >> i+1 < j <= n >> i; j odd}. In Python terms,
# this set is range((n >> i+1) + 1 | 1, (n >> i) + 1 | 1, 2).
def count_set_bits(n):
"""Number of '1' bits in binary expansion of a nonnnegative integer."""
return 1 + count_set_bits(n & n - 1) if n else 0
def partial_product(start, stop):
"""Product of integers in range(start, stop, 2), computed recursively.
start and stop should both be odd, with start <= stop.
"""
numfactors = (stop - start) >> 1
if not numfactors:
return 1
elif numfactors == 1:
return start
else:
mid = (start + numfactors) | 1
return partial_product(start, mid) * partial_product(mid, stop)
def py_factorial(n):
"""Factorial of nonnegative integer n, via "Binary Split Factorial Formula"
described at http://www.luschny.de/math/factorial/binarysplitfact.html
"""
inner = outer = 1
for i in reversed(range(n.bit_length())):
inner *= partial_product((n >> i + 1) + 1 | 1, (n >> i) + 1 | 1)
outer *= inner
return outer << (n - count_set_bits(n))
def ulp_abs_check(expected, got, ulp_tol, abs_tol):
"""Given finite floats `expected` and `got`, check that they're
approximately equal to within the given number of ulps or the
given absolute tolerance, whichever is bigger.
Returns None on success and an error message on failure.
"""
ulp_error = abs(to_ulps(expected) - to_ulps(got))
abs_error = abs(expected - got)
# Succeed if either abs_error <= abs_tol or ulp_error <= ulp_tol.
if abs_error <= abs_tol or ulp_error <= ulp_tol:
return None
else:
fmt = ("error = {:.3g} ({:d} ulps); "
"permitted error = {:.3g} or {:d} ulps")
return fmt.format(abs_error, ulp_error, abs_tol, ulp_tol)
def parse_mtestfile(fname):
"""Parse a file with test values
-- starts a comment
blank lines, or lines containing only a comment, are ignored
other lines are expected to have the form
id fn arg -> expected [flag]*
"""
with open(fname) as fp:
for line in fp:
# strip comments, and skip blank lines
if '--' in line:
line = line[:line.index('--')]
if not line.strip():
continue
lhs, rhs = line.split('->')
id, fn, arg = lhs.split()
rhs_pieces = rhs.split()
exp = rhs_pieces[0]
flags = rhs_pieces[1:]
yield (id, fn, float(arg), float(exp), flags)
def parse_testfile(fname):
"""Parse a file with test values
Empty lines or lines starting with -- are ignored
yields id, fn, arg_real, arg_imag, exp_real, exp_imag
"""
with open(fname) as fp:
for line in fp:
# skip comment lines and blank lines
if line.startswith('--') or not line.strip():
continue
lhs, rhs = line.split('->')
id, fn, arg_real, arg_imag = lhs.split()
rhs_pieces = rhs.split()
exp_real, exp_imag = rhs_pieces[0], rhs_pieces[1]
flags = rhs_pieces[2:]
yield (id, fn,
float(arg_real), float(arg_imag),
float(exp_real), float(exp_imag),
flags)
def result_check(expected, got, ulp_tol=5, abs_tol=0.0):
# Common logic of MathTests.(ftest, test_testcases, test_mtestcases)
"""Compare arguments expected and got, as floats, if either
is a float, using a tolerance expressed in multiples of
ulp(expected) or absolutely (if given and greater).
As a convenience, when neither argument is a float, and for
non-finite floats, exact equality is demanded. Also, nan==nan
as far as this function is concerned.
Returns None on success and an error message on failure.
"""
# Check exactly equal (applies also to strings representing exceptions)
if got == expected:
return None
failure = "not equal"
# Turn mixed float and int comparison (e.g. floor()) to all-float
if isinstance(expected, float) and isinstance(got, int):
got = float(got)
elif isinstance(got, float) and isinstance(expected, int):
expected = float(expected)
if isinstance(expected, float) and isinstance(got, float):
if math.isnan(expected) and math.isnan(got):
# Pass, since both nan
failure = None
elif math.isinf(expected) or math.isinf(got):
# We already know they're not equal, drop through to failure
pass
else:
# Both are finite floats (now). Are they close enough?
failure = ulp_abs_check(expected, got, ulp_tol, abs_tol)
# arguments are not equal, and if numeric, are too far apart
if failure is not None:
fail_fmt = "expected {!r}, got {!r}"
fail_msg = fail_fmt.format(expected, got)
fail_msg += ' ({})'.format(failure)
return fail_msg
else:
return None
class IntSubclass(int):
pass
# Class providing an __index__ method.
class MyIndexable(object):
def __init__(self, value):
self.value = value
def __index__(self):
return self.value
class MathTests(unittest.TestCase):
def ftest(self, name, got, expected, ulp_tol=5, abs_tol=0.0):
"""Compare arguments expected and got, as floats, if either
is a float, using a tolerance expressed in multiples of
ulp(expected) or absolutely, whichever is greater.
As a convenience, when neither argument is a float, and for
non-finite floats, exact equality is demanded. Also, nan==nan
in this function.
"""
failure = result_check(expected, got, ulp_tol, abs_tol)
if failure is not None:
self.fail("{}: {}".format(name, failure))
def testConstants(self):
# Ref: Abramowitz & Stegun (Dover, 1965)
self.ftest('pi', math.pi, 3.141592653589793238462643)
self.ftest('e', math.e, 2.718281828459045235360287)
self.assertEqual(math.tau, 2*math.pi)
def testAcos(self):
self.assertRaises(TypeError, math.acos)
self.ftest('acos(-1)', math.acos(-1), math.pi)
self.ftest('acos(0)', math.acos(0), math.pi/2)
self.ftest('acos(1)', math.acos(1), 0)
self.assertRaises(ValueError, math.acos, INF)
self.assertRaises(ValueError, math.acos, NINF)
self.assertRaises(ValueError, math.acos, 1 + eps)
self.assertRaises(ValueError, math.acos, -1 - eps)
self.assertTrue(math.isnan(math.acos(NAN)))
def testAcosh(self):
self.assertRaises(TypeError, math.acosh)
self.ftest('acosh(1)', math.acosh(1), 0)
self.ftest('acosh(2)', math.acosh(2), 1.3169578969248168)
self.assertRaises(ValueError, math.acosh, 0)
self.assertRaises(ValueError, math.acosh, -1)
self.assertEqual(math.acosh(INF), INF)
self.assertRaises(ValueError, math.acosh, NINF)
self.assertTrue(math.isnan(math.acosh(NAN)))
def testAsin(self):
self.assertRaises(TypeError, math.asin)
self.ftest('asin(-1)', math.asin(-1), -math.pi/2)
self.ftest('asin(0)', math.asin(0), 0)
self.ftest('asin(1)', math.asin(1), math.pi/2)
self.assertRaises(ValueError, math.asin, INF)
self.assertRaises(ValueError, math.asin, NINF)
self.assertRaises(ValueError, math.asin, 1 + eps)
self.assertRaises(ValueError, math.asin, -1 - eps)
self.assertTrue(math.isnan(math.asin(NAN)))
def testAsinh(self):
self.assertRaises(TypeError, math.asinh)
self.ftest('asinh(0)', math.asinh(0), 0)
self.ftest('asinh(1)', math.asinh(1), 0.88137358701954305)
self.ftest('asinh(-1)', math.asinh(-1), -0.88137358701954305)
self.assertEqual(math.asinh(INF), INF)
self.assertEqual(math.asinh(NINF), NINF)
self.assertTrue(math.isnan(math.asinh(NAN)))
def testAtan(self):
self.assertRaises(TypeError, math.atan)
self.ftest('atan(-1)', math.atan(-1), -math.pi/4)
self.ftest('atan(0)', math.atan(0), 0)
self.ftest('atan(1)', math.atan(1), math.pi/4)
self.ftest('atan(inf)', math.atan(INF), math.pi/2)
self.ftest('atan(-inf)', math.atan(NINF), -math.pi/2)
self.assertTrue(math.isnan(math.atan(NAN)))
def testAtanh(self):
self.assertRaises(TypeError, math.atan)
self.ftest('atanh(0)', math.atanh(0), 0)
self.ftest('atanh(0.5)', math.atanh(0.5), 0.54930614433405489)
self.ftest('atanh(-0.5)', math.atanh(-0.5), -0.54930614433405489)
self.assertRaises(ValueError, math.atanh, 1)
self.assertRaises(ValueError, math.atanh, -1)
self.assertRaises(ValueError, math.atanh, INF)
self.assertRaises(ValueError, math.atanh, NINF)
self.assertTrue(math.isnan(math.atanh(NAN)))
def testAtan2(self):
self.assertRaises(TypeError, math.atan2)
self.ftest('atan2(-1, 0)', math.atan2(-1, 0), -math.pi/2)
self.ftest('atan2(-1, 1)', math.atan2(-1, 1), -math.pi/4)
self.ftest('atan2(0, 1)', math.atan2(0, 1), 0)
self.ftest('atan2(1, 1)', math.atan2(1, 1), math.pi/4)
self.ftest('atan2(1, 0)', math.atan2(1, 0), math.pi/2)
# math.atan2(0, x)
self.ftest('atan2(0., -inf)', math.atan2(0., NINF), math.pi)
self.ftest('atan2(0., -2.3)', math.atan2(0., -2.3), math.pi)
self.ftest('atan2(0., -0.)', math.atan2(0., -0.), math.pi)
self.assertEqual(math.atan2(0., 0.), 0.)
self.assertEqual(math.atan2(0., 2.3), 0.)
self.assertEqual(math.atan2(0., INF), 0.)
self.assertTrue(math.isnan(math.atan2(0., NAN)))
# math.atan2(-0, x)
self.ftest('atan2(-0., -inf)', math.atan2(-0., NINF), -math.pi)
self.ftest('atan2(-0., -2.3)', math.atan2(-0., -2.3), -math.pi)
self.ftest('atan2(-0., -0.)', math.atan2(-0., -0.), -math.pi)
self.assertEqual(math.atan2(-0., 0.), -0.)
self.assertEqual(math.atan2(-0., 2.3), -0.)
self.assertEqual(math.atan2(-0., INF), -0.)
self.assertTrue(math.isnan(math.atan2(-0., NAN)))
# math.atan2(INF, x)
self.ftest('atan2(inf, -inf)', math.atan2(INF, NINF), math.pi*3/4)
self.ftest('atan2(inf, -2.3)', math.atan2(INF, -2.3), math.pi/2)
self.ftest('atan2(inf, -0.)', math.atan2(INF, -0.0), math.pi/2)
self.ftest('atan2(inf, 0.)', math.atan2(INF, 0.0), math.pi/2)
self.ftest('atan2(inf, 2.3)', math.atan2(INF, 2.3), math.pi/2)
self.ftest('atan2(inf, inf)', math.atan2(INF, INF), math.pi/4)
self.assertTrue(math.isnan(math.atan2(INF, NAN)))
# math.atan2(NINF, x)
self.ftest('atan2(-inf, -inf)', math.atan2(NINF, NINF), -math.pi*3/4)
self.ftest('atan2(-inf, -2.3)', math.atan2(NINF, -2.3), -math.pi/2)
self.ftest('atan2(-inf, -0.)', math.atan2(NINF, -0.0), -math.pi/2)
self.ftest('atan2(-inf, 0.)', math.atan2(NINF, 0.0), -math.pi/2)
self.ftest('atan2(-inf, 2.3)', math.atan2(NINF, 2.3), -math.pi/2)
self.ftest('atan2(-inf, inf)', math.atan2(NINF, INF), -math.pi/4)
self.assertTrue(math.isnan(math.atan2(NINF, NAN)))
# math.atan2(+finite, x)
self.ftest('atan2(2.3, -inf)', math.atan2(2.3, NINF), math.pi)
self.ftest('atan2(2.3, -0.)', math.atan2(2.3, -0.), math.pi/2)
self.ftest('atan2(2.3, 0.)', math.atan2(2.3, 0.), math.pi/2)
self.assertEqual(math.atan2(2.3, INF), 0.)
self.assertTrue(math.isnan(math.atan2(2.3, NAN)))
# math.atan2(-finite, x)
self.ftest('atan2(-2.3, -inf)', math.atan2(-2.3, NINF), -math.pi)
self.ftest('atan2(-2.3, -0.)', math.atan2(-2.3, -0.), -math.pi/2)
self.ftest('atan2(-2.3, 0.)', math.atan2(-2.3, 0.), -math.pi/2)
self.assertEqual(math.atan2(-2.3, INF), -0.)
self.assertTrue(math.isnan(math.atan2(-2.3, NAN)))
# math.atan2(NAN, x)
self.assertTrue(math.isnan(math.atan2(NAN, NINF)))
self.assertTrue(math.isnan(math.atan2(NAN, -2.3)))
self.assertTrue(math.isnan(math.atan2(NAN, -0.)))
self.assertTrue(math.isnan(math.atan2(NAN, 0.)))
self.assertTrue(math.isnan(math.atan2(NAN, 2.3)))
self.assertTrue(math.isnan(math.atan2(NAN, INF)))
self.assertTrue(math.isnan(math.atan2(NAN, NAN)))
def testCeil(self):
self.assertRaises(TypeError, math.ceil)
self.assertEqual(int, type(math.ceil(0.5)))
self.ftest('ceil(0.5)', math.ceil(0.5), 1)
self.ftest('ceil(1.0)', math.ceil(1.0), 1)
self.ftest('ceil(1.5)', math.ceil(1.5), 2)
self.ftest('ceil(-0.5)', math.ceil(-0.5), 0)
self.ftest('ceil(-1.0)', math.ceil(-1.0), -1)
self.ftest('ceil(-1.5)', math.ceil(-1.5), -1)
#self.assertEqual(math.ceil(INF), INF)
#self.assertEqual(math.ceil(NINF), NINF)
#self.assertTrue(math.isnan(math.ceil(NAN)))
class TestCeil:
def __ceil__(self):
return 42
class TestNoCeil:
pass
self.ftest('ceil(TestCeil())', math.ceil(TestCeil()), 42)
self.assertRaises(TypeError, math.ceil, TestNoCeil())
t = TestNoCeil()
t.__ceil__ = lambda *args: args
self.assertRaises(TypeError, math.ceil, t)
self.assertRaises(TypeError, math.ceil, t, 0)
@requires_IEEE_754
def testCopysign(self):
self.assertEqual(math.copysign(1, 42), 1.0)
self.assertEqual(math.copysign(0., 42), 0.0)
self.assertEqual(math.copysign(1., -42), -1.0)
self.assertEqual(math.copysign(3, 0.), 3.0)
self.assertEqual(math.copysign(4., -0.), -4.0)
self.assertRaises(TypeError, math.copysign)
# copysign should let us distinguish signs of zeros
self.assertEqual(math.copysign(1., 0.), 1.)
self.assertEqual(math.copysign(1., -0.), -1.)
self.assertEqual(math.copysign(INF, 0.), INF)
self.assertEqual(math.copysign(INF, -0.), NINF)
self.assertEqual(math.copysign(NINF, 0.), INF)
self.assertEqual(math.copysign(NINF, -0.), NINF)
# and of infinities
self.assertEqual(math.copysign(1., INF), 1.)
self.assertEqual(math.copysign(1., NINF), -1.)
self.assertEqual(math.copysign(INF, INF), INF)
self.assertEqual(math.copysign(INF, NINF), NINF)
self.assertEqual(math.copysign(NINF, INF), INF)
self.assertEqual(math.copysign(NINF, NINF), NINF)
self.assertTrue(math.isnan(math.copysign(NAN, 1.)))
self.assertTrue(math.isnan(math.copysign(NAN, INF)))
self.assertTrue(math.isnan(math.copysign(NAN, NINF)))
self.assertTrue(math.isnan(math.copysign(NAN, NAN)))
# copysign(INF, NAN) may be INF or it may be NINF, since
# we don't know whether the sign bit of NAN is set on any
# given platform.
self.assertTrue(math.isinf(math.copysign(INF, NAN)))
# similarly, copysign(2., NAN) could be 2. or -2.
self.assertEqual(abs(math.copysign(2., NAN)), 2.)
def testCos(self):
self.assertRaises(TypeError, math.cos)
self.ftest('cos(-pi/2)', math.cos(-math.pi/2), 0, abs_tol=ulp(1))
self.ftest('cos(0)', math.cos(0), 1)
self.ftest('cos(pi/2)', math.cos(math.pi/2), 0, abs_tol=ulp(1))
self.ftest('cos(pi)', math.cos(math.pi), -1)
try:
self.assertTrue(math.isnan(math.cos(INF)))
self.assertTrue(math.isnan(math.cos(NINF)))
except ValueError:
self.assertRaises(ValueError, math.cos, INF)
self.assertRaises(ValueError, math.cos, NINF)
self.assertTrue(math.isnan(math.cos(NAN)))
def testCosh(self):
self.assertRaises(TypeError, math.cosh)
self.ftest('cosh(0)', math.cosh(0), 1)
self.ftest('cosh(2)-2*cosh(1)**2', math.cosh(2)-2*math.cosh(1)**2, -1) # Thanks to Lambert
self.assertEqual(math.cosh(INF), INF)
self.assertEqual(math.cosh(NINF), INF)
self.assertTrue(math.isnan(math.cosh(NAN)))
def testDegrees(self):
self.assertRaises(TypeError, math.degrees)
self.ftest('degrees(pi)', math.degrees(math.pi), 180.0)
self.ftest('degrees(pi/2)', math.degrees(math.pi/2), 90.0)
self.ftest('degrees(-pi/4)', math.degrees(-math.pi/4), -45.0)
self.ftest('degrees(0)', math.degrees(0), 0)
def testExp(self):
self.assertRaises(TypeError, math.exp)
self.ftest('exp(-1)', math.exp(-1), 1/math.e)
self.ftest('exp(0)', math.exp(0), 1)
self.ftest('exp(1)', math.exp(1), math.e)
self.assertEqual(math.exp(INF), INF)
self.assertEqual(math.exp(NINF), 0.)
self.assertTrue(math.isnan(math.exp(NAN)))
self.assertRaises(OverflowError, math.exp, 1000000)
def testFabs(self):
self.assertRaises(TypeError, math.fabs)
self.ftest('fabs(-1)', math.fabs(-1), 1)
self.ftest('fabs(0)', math.fabs(0), 0)
self.ftest('fabs(1)', math.fabs(1), 1)
def testFactorial(self):
self.assertEqual(math.factorial(0), 1)
self.assertEqual(math.factorial(0.0), 1)
total = 1
for i in range(1, 1000):
total *= i
self.assertEqual(math.factorial(i), total)
self.assertEqual(math.factorial(float(i)), total)
self.assertEqual(math.factorial(i), py_factorial(i))
self.assertRaises(ValueError, math.factorial, -1)
self.assertRaises(ValueError, math.factorial, -1.0)
self.assertRaises(ValueError, math.factorial, -10**100)
self.assertRaises(ValueError, math.factorial, -1e100)
self.assertRaises(ValueError, math.factorial, math.pi)
def testFactorialNonIntegers(self):
self.assertRaises(TypeError, math.factorial, decimal.Decimal(5.2))
self.assertRaises(TypeError, math.factorial, "5")
# Other implementations may place different upper bounds.
@support.cpython_only
def testFactorialHugeInputs(self):
# Currently raises OverflowError for inputs that are too large
# to fit into a C long.
self.assertRaises(OverflowError, math.factorial, 10**100)
self.assertRaises(OverflowError, math.factorial, 1e100)
def testFloor(self):
self.assertRaises(TypeError, math.floor)
self.assertEqual(int, type(math.floor(0.5)))
self.ftest('floor(0.5)', math.floor(0.5), 0)
self.ftest('floor(1.0)', math.floor(1.0), 1)
self.ftest('floor(1.5)', math.floor(1.5), 1)
self.ftest('floor(-0.5)', math.floor(-0.5), -1)
self.ftest('floor(-1.0)', math.floor(-1.0), -1)
self.ftest('floor(-1.5)', math.floor(-1.5), -2)
# pow() relies on floor() to check for integers
# This fails on some platforms - so check it here
self.ftest('floor(1.23e167)', math.floor(1.23e167), 1.23e167)
self.ftest('floor(-1.23e167)', math.floor(-1.23e167), -1.23e167)
#self.assertEqual(math.ceil(INF), INF)
#self.assertEqual(math.ceil(NINF), NINF)
#self.assertTrue(math.isnan(math.floor(NAN)))
class TestFloor:
def __floor__(self):
return 42
class TestNoFloor:
pass
self.ftest('floor(TestFloor())', math.floor(TestFloor()), 42)
self.assertRaises(TypeError, math.floor, TestNoFloor())
t = TestNoFloor()
t.__floor__ = lambda *args: args
self.assertRaises(TypeError, math.floor, t)
self.assertRaises(TypeError, math.floor, t, 0)
def testFmod(self):
self.assertRaises(TypeError, math.fmod)
self.ftest('fmod(10, 1)', math.fmod(10, 1), 0.0)
self.ftest('fmod(10, 0.5)', math.fmod(10, 0.5), 0.0)
self.ftest('fmod(10, 1.5)', math.fmod(10, 1.5), 1.0)
self.ftest('fmod(-10, 1)', math.fmod(-10, 1), -0.0)
self.ftest('fmod(-10, 0.5)', math.fmod(-10, 0.5), -0.0)
self.ftest('fmod(-10, 1.5)', math.fmod(-10, 1.5), -1.0)
self.assertTrue(math.isnan(math.fmod(NAN, 1.)))
self.assertTrue(math.isnan(math.fmod(1., NAN)))
self.assertTrue(math.isnan(math.fmod(NAN, NAN)))
self.assertRaises(ValueError, math.fmod, 1., 0.)
self.assertRaises(ValueError, math.fmod, INF, 1.)
self.assertRaises(ValueError, math.fmod, NINF, 1.)
self.assertRaises(ValueError, math.fmod, INF, 0.)
self.assertEqual(math.fmod(3.0, INF), 3.0)
self.assertEqual(math.fmod(-3.0, INF), -3.0)
self.assertEqual(math.fmod(3.0, NINF), 3.0)
self.assertEqual(math.fmod(-3.0, NINF), -3.0)
self.assertEqual(math.fmod(0.0, 3.0), 0.0)
self.assertEqual(math.fmod(0.0, NINF), 0.0)
def testFrexp(self):
self.assertRaises(TypeError, math.frexp)
def testfrexp(name, result, expected):
(mant, exp), (emant, eexp) = result, expected
if abs(mant-emant) > eps or exp != eexp:
self.fail('%s returned %r, expected %r'%\
(name, result, expected))
testfrexp('frexp(-1)', math.frexp(-1), (-0.5, 1))
testfrexp('frexp(0)', math.frexp(0), (0, 0))
testfrexp('frexp(1)', math.frexp(1), (0.5, 1))
testfrexp('frexp(2)', math.frexp(2), (0.5, 2))
self.assertEqual(math.frexp(INF)[0], INF)
self.assertEqual(math.frexp(NINF)[0], NINF)
self.assertTrue(math.isnan(math.frexp(NAN)[0]))
@requires_IEEE_754
@unittest.skipIf(HAVE_DOUBLE_ROUNDING,
"fsum is not exact on machines with double rounding")
def testFsum(self):
# math.fsum relies on exact rounding for correct operation.
# There's a known problem with IA32 floating-point that causes
# inexact rounding in some situations, and will cause the
# math.fsum tests below to fail; see issue #2937. On non IEEE
# 754 platforms, and on IEEE 754 platforms that exhibit the
# problem described in issue #2937, we simply skip the whole
# test.
# Python version of math.fsum, for comparison. Uses a
# different algorithm based on frexp, ldexp and integer
# arithmetic.
from sys import float_info
mant_dig = float_info.mant_dig
etiny = float_info.min_exp - mant_dig
def msum(iterable):
"""Full precision summation. Compute sum(iterable) without any
intermediate accumulation of error. Based on the 'lsum' function
at http://code.activestate.com/recipes/393090/
"""
tmant, texp = 0, 0
for x in iterable:
mant, exp = math.frexp(x)
mant, exp = int(math.ldexp(mant, mant_dig)), exp - mant_dig
if texp > exp:
tmant <<= texp-exp
texp = exp
else:
mant <<= exp-texp
tmant += mant
# Round tmant * 2**texp to a float. The original recipe
# used float(str(tmant)) * 2.0**texp for this, but that's
# a little unsafe because str -> float conversion can't be
# relied upon to do correct rounding on all platforms.
tail = max(len(bin(abs(tmant)))-2 - mant_dig, etiny - texp)
if tail > 0:
h = 1 << (tail-1)
tmant = tmant // (2*h) + bool(tmant & h and tmant & 3*h-1)
texp += tail
return math.ldexp(tmant, texp)
test_values = [
([], 0.0),
([0.0], 0.0),
([1e100, 1.0, -1e100, 1e-100, 1e50, -1.0, -1e50], 1e-100),
([2.0**53, -0.5, -2.0**-54], 2.0**53-1.0),
([2.0**53, 1.0, 2.0**-100], 2.0**53+2.0),
([2.0**53+10.0, 1.0, 2.0**-100], 2.0**53+12.0),
([2.0**53-4.0, 0.5, 2.0**-54], 2.0**53-3.0),
([1./n for n in range(1, 1001)],
float.fromhex('0x1.df11f45f4e61ap+2')),
([(-1.)**n/n for n in range(1, 1001)],
float.fromhex('-0x1.62a2af1bd3624p-1')),
([1e16, 1., 1e-16], 10000000000000002.0),
([1e16-2., 1.-2.**-53, -(1e16-2.), -(1.-2.**-53)], 0.0),
# exercise code for resizing partials array
([2.**n - 2.**(n+50) + 2.**(n+52) for n in range(-1074, 972, 2)] +
[-2.**1022],
float.fromhex('0x1.5555555555555p+970')),
]
# Telescoping sum, with exact differences (due to Sterbenz)
terms = [1.7**i for i in range(1001)]
test_values.append((
[terms[i+1] - terms[i] for i in range(1000)] + [-terms[1000]],
-terms[0]
))
for i, (vals, expected) in enumerate(test_values):
try:
actual = math.fsum(vals)
except OverflowError:
self.fail("test %d failed: got OverflowError, expected %r "
"for math.fsum(%.100r)" % (i, expected, vals))
except ValueError:
self.fail("test %d failed: got ValueError, expected %r "
"for math.fsum(%.100r)" % (i, expected, vals))
self.assertEqual(actual, expected)
from random import random, gauss, shuffle
for j in range(1000):
vals = [7, 1e100, -7, -1e100, -9e-20, 8e-20] * 10
s = 0
for i in range(200):
v = gauss(0, random()) ** 7 - s
s += v
vals.append(v)
shuffle(vals)
s = msum(vals)
self.assertEqual(msum(vals), math.fsum(vals))
def testGcd(self):
gcd = math.gcd
self.assertEqual(gcd(0, 0), 0)
self.assertEqual(gcd(1, 0), 1)
self.assertEqual(gcd(-1, 0), 1)
self.assertEqual(gcd(0, 1), 1)
self.assertEqual(gcd(0, -1), 1)
self.assertEqual(gcd(7, 1), 1)
self.assertEqual(gcd(7, -1), 1)
self.assertEqual(gcd(-23, 15), 1)
self.assertEqual(gcd(120, 84), 12)
self.assertEqual(gcd(84, -120), 12)
self.assertEqual(gcd(1216342683557601535506311712,
436522681849110124616458784), 32)
c = 652560
x = 434610456570399902378880679233098819019853229470286994367836600566
y = 1064502245825115327754847244914921553977
a = x * c
b = y * c
self.assertEqual(gcd(a, b), c)
self.assertEqual(gcd(b, a), c)
self.assertEqual(gcd(-a, b), c)
self.assertEqual(gcd(b, -a), c)
self.assertEqual(gcd(a, -b), c)
self.assertEqual(gcd(-b, a), c)
self.assertEqual(gcd(-a, -b), c)
self.assertEqual(gcd(-b, -a), c)
c = 576559230871654959816130551884856912003141446781646602790216406874
a = x * c
b = y * c
self.assertEqual(gcd(a, b), c)
self.assertEqual(gcd(b, a), c)
self.assertEqual(gcd(-a, b), c)
self.assertEqual(gcd(b, -a), c)
self.assertEqual(gcd(a, -b), c)
self.assertEqual(gcd(-b, a), c)
self.assertEqual(gcd(-a, -b), c)
self.assertEqual(gcd(-b, -a), c)
self.assertRaises(TypeError, gcd, 120.0, 84)
self.assertRaises(TypeError, gcd, 120, 84.0)
self.assertEqual(gcd(MyIndexable(120), MyIndexable(84)), 12)
def testHypot(self):
from decimal import Decimal
from fractions import Fraction
hypot = math.hypot
# Test different numbers of arguments (from zero to five)
# against a straightforward pure python implementation
args = math.e, math.pi, math.sqrt(2.0), math.gamma(3.5), math.sin(2.1)
for i in range(len(args)+1):
self.assertAlmostEqual(
hypot(*args[:i]),
math.sqrt(sum(s**2 for s in args[:i]))
)
# Test allowable types (those with __float__)
self.assertEqual(hypot(12.0, 5.0), 13.0)
self.assertEqual(hypot(12, 5), 13)
self.assertEqual(hypot(Decimal(12), Decimal(5)), 13)
self.assertEqual(hypot(Fraction(12, 32), Fraction(5, 32)), Fraction(13, 32))
self.assertEqual(hypot(bool(1), bool(0), bool(1), bool(1)), math.sqrt(3))
# Test corner cases
self.assertEqual(hypot(0.0, 0.0), 0.0) # Max input is zero
self.assertEqual(hypot(-10.5), 10.5) # Negative input
self.assertEqual(hypot(), 0.0) # Negative input
self.assertEqual(1.0,
math.copysign(1.0, hypot(-0.0)) # Convert negative zero to positive zero
)
self.assertEqual( # Handling of moving max to the end
hypot(1.5, 1.5, 0.5),
hypot(1.5, 0.5, 1.5),
)
# Test handling of bad arguments
with self.assertRaises(TypeError): # Reject keyword args
hypot(x=1)
with self.assertRaises(TypeError): # Reject values without __float__
hypot(1.1, 'string', 2.2)
int_too_big_for_float = 10 ** (sys.float_info.max_10_exp + 5)
with self.assertRaises((ValueError, OverflowError)):
hypot(1, int_too_big_for_float)
# Any infinity gives positive infinity.
self.assertEqual(hypot(INF), INF)
self.assertEqual(hypot(0, INF), INF)
self.assertEqual(hypot(10, INF), INF)
self.assertEqual(hypot(-10, INF), INF)
self.assertEqual(hypot(NAN, INF), INF)
self.assertEqual(hypot(INF, NAN), INF)
self.assertEqual(hypot(NINF, NAN), INF)
self.assertEqual(hypot(NAN, NINF), INF)
self.assertEqual(hypot(-INF, INF), INF)
self.assertEqual(hypot(-INF, -INF), INF)
self.assertEqual(hypot(10, -INF), INF)
# If no infinity, any NaN gives a NaN.
self.assertTrue(math.isnan(hypot(NAN)))
self.assertTrue(math.isnan(hypot(0, NAN)))
self.assertTrue(math.isnan(hypot(NAN, 10)))
self.assertTrue(math.isnan(hypot(10, NAN)))
self.assertTrue(math.isnan(hypot(NAN, NAN)))
self.assertTrue(math.isnan(hypot(NAN)))
# Verify scaling for extremely large values
fourthmax = FLOAT_MAX / 4.0
for n in range(32):
self.assertEqual(hypot(*([fourthmax]*n)), fourthmax * math.sqrt(n))
# Verify scaling for extremely small values
for exp in range(32):
scale = FLOAT_MIN / 2.0 ** exp
self.assertEqual(math.hypot(4*scale, 3*scale), 5*scale)
def testDist(self):
from decimal import Decimal as D
from fractions import Fraction as F
dist = math.dist
sqrt = math.sqrt
# Simple exact cases
self.assertEqual(dist((1.0, 2.0, 3.0), (4.0, 2.0, -1.0)), 5.0)
self.assertEqual(dist((1, 2, 3), (4, 2, -1)), 5.0)
# Test different numbers of arguments (from zero to nine)
# against a straightforward pure python implementation
for i in range(9):
for j in range(5):
p = tuple(random.uniform(-5, 5) for k in range(i))
q = tuple(random.uniform(-5, 5) for k in range(i))
self.assertAlmostEqual(
dist(p, q),
sqrt(sum((px - qx) ** 2.0 for px, qx in zip(p, q)))
)
# Test non-tuple inputs
self.assertEqual(dist([1.0, 2.0, 3.0], [4.0, 2.0, -1.0]), 5.0)
self.assertEqual(dist(iter([1.0, 2.0, 3.0]), iter([4.0, 2.0, -1.0])), 5.0)
# Test allowable types (those with __float__)
self.assertEqual(dist((14.0, 1.0), (2.0, -4.0)), 13.0)
self.assertEqual(dist((14, 1), (2, -4)), 13)
self.assertEqual(dist((D(14), D(1)), (D(2), D(-4))), D(13))
self.assertEqual(dist((F(14, 32), F(1, 32)), (F(2, 32), F(-4, 32))),
F(13, 32))
self.assertEqual(dist((True, True, False, True, False),
(True, False, True, True, False)),
sqrt(2.0))
# Test corner cases
self.assertEqual(dist((13.25, 12.5, -3.25),
(13.25, 12.5, -3.25)),
0.0) # Distance with self is zero
self.assertEqual(dist((), ()), 0.0) # Zero-dimensional case
self.assertEqual(1.0, # Convert negative zero to positive zero
math.copysign(1.0, dist((-0.0,), (0.0,)))
)
self.assertEqual(1.0, # Convert negative zero to positive zero
math.copysign(1.0, dist((0.0,), (-0.0,)))
)
self.assertEqual( # Handling of moving max to the end
dist((1.5, 1.5, 0.5), (0, 0, 0)),
dist((1.5, 0.5, 1.5), (0, 0, 0))
)
# Verify tuple subclasses are allowed
class T(tuple):
pass
self.assertEqual(dist(T((1, 2, 3)), ((4, 2, -1))), 5.0)
# Test handling of bad arguments
with self.assertRaises(TypeError): # Reject keyword args
dist(p=(1, 2, 3), q=(4, 5, 6))
with self.assertRaises(TypeError): # Too few args
dist((1, 2, 3))
with self.assertRaises(TypeError): # Too many args
dist((1, 2, 3), (4, 5, 6), (7, 8, 9))
with self.assertRaises(TypeError): # Scalars not allowed
dist(1, 2)
with self.assertRaises(TypeError): # Reject values without __float__
dist((1.1, 'string', 2.2), (1, 2, 3))
with self.assertRaises(ValueError): # Check dimension agree
dist((1, 2, 3, 4), (5, 6, 7))
with self.assertRaises(ValueError): # Check dimension agree
dist((1, 2, 3), (4, 5, 6, 7))
with self.assertRaises(TypeError): # Rejects invalid types
dist("abc", "xyz")
int_too_big_for_float = 10 ** (sys.float_info.max_10_exp + 5)
with self.assertRaises((ValueError, OverflowError)):
dist((1, int_too_big_for_float), (2, 3))
with self.assertRaises((ValueError, OverflowError)):
dist((2, 3), (1, int_too_big_for_float))
# Verify that the one dimensional case is equivalent to abs()
for i in range(20):
p, q = random.random(), random.random()
self.assertEqual(dist((p,), (q,)), abs(p - q))
# Test special values
values = [NINF, -10.5, -0.0, 0.0, 10.5, INF, NAN]
for p in itertools.product(values, repeat=3):
for q in itertools.product(values, repeat=3):
diffs = [px - qx for px, qx in zip(p, q)]
if any(map(math.isinf, diffs)):
# Any infinite difference gives positive infinity.
self.assertEqual(dist(p, q), INF)
elif any(map(math.isnan, diffs)):
# If no infinity, any NaN gives a NaN.
self.assertTrue(math.isnan(dist(p, q)))
# Verify scaling for extremely large values
fourthmax = FLOAT_MAX / 4.0
for n in range(32):
p = (fourthmax,) * n
q = (0.0,) * n
self.assertEqual(dist(p, q), fourthmax * math.sqrt(n))
self.assertEqual(dist(q, p), fourthmax * math.sqrt(n))
# Verify scaling for extremely small values
for exp in range(32):
scale = FLOAT_MIN / 2.0 ** exp
p = (4*scale, 3*scale)
q = (0.0, 0.0)
self.assertEqual(math.dist(p, q), 5*scale)
self.assertEqual(math.dist(q, p), 5*scale)
def testIsqrt(self):
# Test a variety of inputs, large and small.
test_values = (
list(range(1000))
+ list(range(10**6 - 1000, 10**6 + 1000))
+ [2**e + i for e in range(60, 200) for i in range(-40, 40)]
+ [3**9999, 10**5001]
)
for value in test_values:
with self.subTest(value=value):
s = math.isqrt(value)
self.assertIs(type(s), int)
self.assertLessEqual(s*s, value)
self.assertLess(value, (s+1)*(s+1))
# Negative values
with self.assertRaises(ValueError):
math.isqrt(-1)
# Integer-like things
s = math.isqrt(True)
self.assertIs(type(s), int)
self.assertEqual(s, 1)
s = math.isqrt(False)
self.assertIs(type(s), int)
self.assertEqual(s, 0)
class IntegerLike(object):
def __init__(self, value):
self.value = value
def __index__(self):
return self.value
s = math.isqrt(IntegerLike(1729))
self.assertIs(type(s), int)
self.assertEqual(s, 41)
with self.assertRaises(ValueError):
math.isqrt(IntegerLike(-3))
# Non-integer-like things
bad_values = [
3.5, "a string", decimal.Decimal("3.5"), 3.5j,
100.0, -4.0,
]
for value in bad_values:
with self.subTest(value=value):
with self.assertRaises(TypeError):
math.isqrt(value)
def testLdexp(self):
self.assertRaises(TypeError, math.ldexp)
self.ftest('ldexp(0,1)', math.ldexp(0,1), 0)
self.ftest('ldexp(1,1)', math.ldexp(1,1), 2)
self.ftest('ldexp(1,-1)', math.ldexp(1,-1), 0.5)
self.ftest('ldexp(-1,1)', math.ldexp(-1,1), -2)
self.assertRaises(OverflowError, math.ldexp, 1., 1000000)
self.assertRaises(OverflowError, math.ldexp, -1., 1000000)
self.assertEqual(math.ldexp(1., -1000000), 0.)
self.assertEqual(math.ldexp(-1., -1000000), -0.)
self.assertEqual(math.ldexp(INF, 30), INF)
self.assertEqual(math.ldexp(NINF, -213), NINF)
self.assertTrue(math.isnan(math.ldexp(NAN, 0)))
# large second argument
for n in [10**5, 10**10, 10**20, 10**40]:
self.assertEqual(math.ldexp(INF, -n), INF)
self.assertEqual(math.ldexp(NINF, -n), NINF)
self.assertEqual(math.ldexp(1., -n), 0.)
self.assertEqual(math.ldexp(-1., -n), -0.)
self.assertEqual(math.ldexp(0., -n), 0.)
self.assertEqual(math.ldexp(-0., -n), -0.)
self.assertTrue(math.isnan(math.ldexp(NAN, -n)))
self.assertRaises(OverflowError, math.ldexp, 1., n)
self.assertRaises(OverflowError, math.ldexp, -1., n)
self.assertEqual(math.ldexp(0., n), 0.)
self.assertEqual(math.ldexp(-0., n), -0.)
self.assertEqual(math.ldexp(INF, n), INF)
self.assertEqual(math.ldexp(NINF, n), NINF)
self.assertTrue(math.isnan(math.ldexp(NAN, n)))
def testLog(self):
self.assertRaises(TypeError, math.log)
self.ftest('log(1/e)', math.log(1/math.e), -1)
self.ftest('log(1)', math.log(1), 0)
self.ftest('log(e)', math.log(math.e), 1)
self.ftest('log(32,2)', math.log(32,2), 5)
self.ftest('log(10**40, 10)', math.log(10**40, 10), 40)
self.ftest('log(10**40, 10**20)', math.log(10**40, 10**20), 2)
self.ftest('log(10**1000)', math.log(10**1000),
2302.5850929940457)
self.assertRaises(ValueError, math.log, -1.5)
self.assertRaises(ValueError, math.log, -10**1000)
self.assertRaises(ValueError, math.log, NINF)
self.assertEqual(math.log(INF), INF)
self.assertTrue(math.isnan(math.log(NAN)))
def testLog1p(self):
self.assertRaises(TypeError, math.log1p)
for n in [2, 2**90, 2**300]:
self.assertAlmostEqual(math.log1p(n), math.log1p(float(n)))
self.assertRaises(ValueError, math.log1p, -1)
self.assertEqual(math.log1p(INF), INF)
@requires_IEEE_754
def testLog2(self):
self.assertRaises(TypeError, math.log2)
# Check some integer values
self.assertEqual(math.log2(1), 0.0)
self.assertEqual(math.log2(2), 1.0)
self.assertEqual(math.log2(4), 2.0)
# Large integer values
self.assertEqual(math.log2(2**1023), 1023.0)
self.assertEqual(math.log2(2**1024), 1024.0)
self.assertEqual(math.log2(2**2000), 2000.0)
self.assertRaises(ValueError, math.log2, -1.5)
self.assertRaises(ValueError, math.log2, NINF)
self.assertTrue(math.isnan(math.log2(NAN)))
@requires_IEEE_754
# log2() is not accurate enough on Mac OS X Tiger (10.4)
@support.requires_mac_ver(10, 5)
def testLog2Exact(self):
# Check that we get exact equality for log2 of powers of 2.
actual = [math.log2(math.ldexp(1.0, n)) for n in range(-1074, 1024)]
expected = [float(n) for n in range(-1074, 1024)]
self.assertEqual(actual, expected)
def testLog10(self):
self.assertRaises(TypeError, math.log10)
self.ftest('log10(0.1)', math.log10(0.1), -1)
self.ftest('log10(1)', math.log10(1), 0)
self.ftest('log10(10)', math.log10(10), 1)
self.ftest('log10(10**1000)', math.log10(10**1000), 1000.0)
self.assertRaises(ValueError, math.log10, -1.5)
self.assertRaises(ValueError, math.log10, -10**1000)
self.assertRaises(ValueError, math.log10, NINF)
self.assertEqual(math.log(INF), INF)
self.assertTrue(math.isnan(math.log10(NAN)))
def testModf(self):
self.assertRaises(TypeError, math.modf)
def testmodf(name, result, expected):
(v1, v2), (e1, e2) = result, expected
if abs(v1-e1) > eps or abs(v2-e2):
self.fail('%s returned %r, expected %r'%\
(name, result, expected))
testmodf('modf(1.5)', math.modf(1.5), (0.5, 1.0))
testmodf('modf(-1.5)', math.modf(-1.5), (-0.5, -1.0))
self.assertEqual(math.modf(INF), (0.0, INF))
self.assertEqual(math.modf(NINF), (-0.0, NINF))
modf_nan = math.modf(NAN)
self.assertTrue(math.isnan(modf_nan[0]))
self.assertTrue(math.isnan(modf_nan[1]))
def testPow(self):
self.assertRaises(TypeError, math.pow)
self.ftest('pow(0,1)', math.pow(0,1), 0)
self.ftest('pow(1,0)', math.pow(1,0), 1)
self.ftest('pow(2,1)', math.pow(2,1), 2)
self.ftest('pow(2,-1)', math.pow(2,-1), 0.5)
self.assertEqual(math.pow(INF, 1), INF)
self.assertEqual(math.pow(NINF, 1), NINF)
self.assertEqual((math.pow(1, INF)), 1.)
self.assertEqual((math.pow(1, NINF)), 1.)
self.assertTrue(math.isnan(math.pow(NAN, 1)))
self.assertTrue(math.isnan(math.pow(2, NAN)))
self.assertTrue(math.isnan(math.pow(0, NAN)))
self.assertEqual(math.pow(1, NAN), 1)
# pow(0., x)
self.assertEqual(math.pow(0., INF), 0.)
self.assertEqual(math.pow(0., 3.), 0.)
self.assertEqual(math.pow(0., 2.3), 0.)
self.assertEqual(math.pow(0., 2.), 0.)
self.assertEqual(math.pow(0., 0.), 1.)
self.assertEqual(math.pow(0., -0.), 1.)
self.assertRaises(ValueError, math.pow, 0., -2.)
self.assertRaises(ValueError, math.pow, 0., -2.3)
self.assertRaises(ValueError, math.pow, 0., -3.)
self.assertRaises(ValueError, math.pow, 0., NINF)
self.assertTrue(math.isnan(math.pow(0., NAN)))
# pow(INF, x)
self.assertEqual(math.pow(INF, INF), INF)
self.assertEqual(math.pow(INF, 3.), INF)
self.assertEqual(math.pow(INF, 2.3), INF)
self.assertEqual(math.pow(INF, 2.), INF)
self.assertEqual(math.pow(INF, 0.), 1.)
self.assertEqual(math.pow(INF, -0.), 1.)
self.assertEqual(math.pow(INF, -2.), 0.)
self.assertEqual(math.pow(INF, -2.3), 0.)
self.assertEqual(math.pow(INF, -3.), 0.)
self.assertEqual(math.pow(INF, NINF), 0.)
self.assertTrue(math.isnan(math.pow(INF, NAN)))
# pow(-0., x)
self.assertEqual(math.pow(-0., INF), 0.)
self.assertEqual(math.pow(-0., 3.), -0.)
self.assertEqual(math.pow(-0., 2.3), 0.)
self.assertEqual(math.pow(-0., 2.), 0.)
self.assertEqual(math.pow(-0., 0.), 1.)
self.assertEqual(math.pow(-0., -0.), 1.)
self.assertRaises(ValueError, math.pow, -0., -2.)
self.assertRaises(ValueError, math.pow, -0., -2.3)
self.assertRaises(ValueError, math.pow, -0., -3.)
self.assertRaises(ValueError, math.pow, -0., NINF)
self.assertTrue(math.isnan(math.pow(-0., NAN)))
# pow(NINF, x)
self.assertEqual(math.pow(NINF, INF), INF)
self.assertEqual(math.pow(NINF, 3.), NINF)
self.assertEqual(math.pow(NINF, 2.3), INF)
self.assertEqual(math.pow(NINF, 2.), INF)
self.assertEqual(math.pow(NINF, 0.), 1.)
self.assertEqual(math.pow(NINF, -0.), 1.)
self.assertEqual(math.pow(NINF, -2.), 0.)
self.assertEqual(math.pow(NINF, -2.3), 0.)
self.assertEqual(math.pow(NINF, -3.), -0.)
self.assertEqual(math.pow(NINF, NINF), 0.)
self.assertTrue(math.isnan(math.pow(NINF, NAN)))
# pow(-1, x)
self.assertEqual(math.pow(-1., INF), 1.)
self.assertEqual(math.pow(-1., 3.), -1.)
self.assertRaises(ValueError, math.pow, -1., 2.3)
self.assertEqual(math.pow(-1., 2.), 1.)
self.assertEqual(math.pow(-1., 0.), 1.)
self.assertEqual(math.pow(-1., -0.), 1.)
self.assertEqual(math.pow(-1., -2.), 1.)
self.assertRaises(ValueError, math.pow, -1., -2.3)
self.assertEqual(math.pow(-1., -3.), -1.)
self.assertEqual(math.pow(-1., NINF), 1.)
self.assertTrue(math.isnan(math.pow(-1., NAN)))
# pow(1, x)
self.assertEqual(math.pow(1., INF), 1.)
self.assertEqual(math.pow(1., 3.), 1.)
self.assertEqual(math.pow(1., 2.3), 1.)
self.assertEqual(math.pow(1., 2.), 1.)
self.assertEqual(math.pow(1., 0.), 1.)
self.assertEqual(math.pow(1., -0.), 1.)
self.assertEqual(math.pow(1., -2.), 1.)
self.assertEqual(math.pow(1., -2.3), 1.)
self.assertEqual(math.pow(1., -3.), 1.)
self.assertEqual(math.pow(1., NINF), 1.)
self.assertEqual(math.pow(1., NAN), 1.)
# pow(x, 0) should be 1 for any x
self.assertEqual(math.pow(2.3, 0.), 1.)
self.assertEqual(math.pow(-2.3, 0.), 1.)
self.assertEqual(math.pow(NAN, 0.), 1.)
self.assertEqual(math.pow(2.3, -0.), 1.)
self.assertEqual(math.pow(-2.3, -0.), 1.)
self.assertEqual(math.pow(NAN, -0.), 1.)
# pow(x, y) is invalid if x is negative and y is not integral
self.assertRaises(ValueError, math.pow, -1., 2.3)
self.assertRaises(ValueError, math.pow, -15., -3.1)
# pow(x, NINF)
self.assertEqual(math.pow(1.9, NINF), 0.)
self.assertEqual(math.pow(1.1, NINF), 0.)
self.assertEqual(math.pow(0.9, NINF), INF)
self.assertEqual(math.pow(0.1, NINF), INF)
self.assertEqual(math.pow(-0.1, NINF), INF)
self.assertEqual(math.pow(-0.9, NINF), INF)
self.assertEqual(math.pow(-1.1, NINF), 0.)
self.assertEqual(math.pow(-1.9, NINF), 0.)
# pow(x, INF)
self.assertEqual(math.pow(1.9, INF), INF)
self.assertEqual(math.pow(1.1, INF), INF)
self.assertEqual(math.pow(0.9, INF), 0.)
self.assertEqual(math.pow(0.1, INF), 0.)
self.assertEqual(math.pow(-0.1, INF), 0.)
self.assertEqual(math.pow(-0.9, INF), 0.)
self.assertEqual(math.pow(-1.1, INF), INF)
self.assertEqual(math.pow(-1.9, INF), INF)
# pow(x, y) should work for x negative, y an integer
self.ftest('(-2.)**3.', math.pow(-2.0, 3.0), -8.0)
self.ftest('(-2.)**2.', math.pow(-2.0, 2.0), 4.0)
self.ftest('(-2.)**1.', math.pow(-2.0, 1.0), -2.0)
self.ftest('(-2.)**0.', math.pow(-2.0, 0.0), 1.0)
self.ftest('(-2.)**-0.', math.pow(-2.0, -0.0), 1.0)
self.ftest('(-2.)**-1.', math.pow(-2.0, -1.0), -0.5)
self.ftest('(-2.)**-2.', math.pow(-2.0, -2.0), 0.25)
self.ftest('(-2.)**-3.', math.pow(-2.0, -3.0), -0.125)
self.assertRaises(ValueError, math.pow, -2.0, -0.5)
self.assertRaises(ValueError, math.pow, -2.0, 0.5)
# the following tests have been commented out since they don't
# really belong here: the implementation of ** for floats is
# independent of the implementation of math.pow
#self.assertEqual(1**NAN, 1)
#self.assertEqual(1**INF, 1)
#self.assertEqual(1**NINF, 1)
#self.assertEqual(1**0, 1)
#self.assertEqual(1.**NAN, 1)
#self.assertEqual(1.**INF, 1)
#self.assertEqual(1.**NINF, 1)
#self.assertEqual(1.**0, 1)
def testRadians(self):
self.assertRaises(TypeError, math.radians)
self.ftest('radians(180)', math.radians(180), math.pi)
self.ftest('radians(90)', math.radians(90), math.pi/2)
self.ftest('radians(-45)', math.radians(-45), -math.pi/4)
self.ftest('radians(0)', math.radians(0), 0)
@requires_IEEE_754
def testRemainder(self):
from fractions import Fraction
def validate_spec(x, y, r):
"""
Check that r matches remainder(x, y) according to the IEEE 754
specification. Assumes that x, y and r are finite and y is nonzero.
"""
fx, fy, fr = Fraction(x), Fraction(y), Fraction(r)
# r should not exceed y/2 in absolute value
self.assertLessEqual(abs(fr), abs(fy/2))
# x - r should be an exact integer multiple of y
n = (fx - fr) / fy
self.assertEqual(n, int(n))
if abs(fr) == abs(fy/2):
# If |r| == |y/2|, n should be even.
self.assertEqual(n/2, int(n/2))
# triples (x, y, remainder(x, y)) in hexadecimal form.
testcases = [
# Remainders modulo 1, showing the ties-to-even behaviour.
'-4.0 1 -0.0',
'-3.8 1 0.8',
'-3.0 1 -0.0',
'-2.8 1 -0.8',
'-2.0 1 -0.0',
'-1.8 1 0.8',
'-1.0 1 -0.0',
'-0.8 1 -0.8',
'-0.0 1 -0.0',
' 0.0 1 0.0',
' 0.8 1 0.8',
' 1.0 1 0.0',
' 1.8 1 -0.8',
' 2.0 1 0.0',
' 2.8 1 0.8',
' 3.0 1 0.0',
' 3.8 1 -0.8',
' 4.0 1 0.0',
# Reductions modulo 2*pi
'0x0.0p+0 0x1.921fb54442d18p+2 0x0.0p+0',
'0x1.921fb54442d18p+0 0x1.921fb54442d18p+2 0x1.921fb54442d18p+0',
'0x1.921fb54442d17p+1 0x1.921fb54442d18p+2 0x1.921fb54442d17p+1',
'0x1.921fb54442d18p+1 0x1.921fb54442d18p+2 0x1.921fb54442d18p+1',
'0x1.921fb54442d19p+1 0x1.921fb54442d18p+2 -0x1.921fb54442d17p+1',
'0x1.921fb54442d17p+2 0x1.921fb54442d18p+2 -0x0.0000000000001p+2',
'0x1.921fb54442d18p+2 0x1.921fb54442d18p+2 0x0p0',
'0x1.921fb54442d19p+2 0x1.921fb54442d18p+2 0x0.0000000000001p+2',
'0x1.2d97c7f3321d1p+3 0x1.921fb54442d18p+2 0x1.921fb54442d14p+1',
'0x1.2d97c7f3321d2p+3 0x1.921fb54442d18p+2 -0x1.921fb54442d18p+1',
'0x1.2d97c7f3321d3p+3 0x1.921fb54442d18p+2 -0x1.921fb54442d14p+1',
'0x1.921fb54442d17p+3 0x1.921fb54442d18p+2 -0x0.0000000000001p+3',
'0x1.921fb54442d18p+3 0x1.921fb54442d18p+2 0x0p0',
'0x1.921fb54442d19p+3 0x1.921fb54442d18p+2 0x0.0000000000001p+3',
'0x1.f6a7a2955385dp+3 0x1.921fb54442d18p+2 0x1.921fb54442d14p+1',
'0x1.f6a7a2955385ep+3 0x1.921fb54442d18p+2 0x1.921fb54442d18p+1',
'0x1.f6a7a2955385fp+3 0x1.921fb54442d18p+2 -0x1.921fb54442d14p+1',
'0x1.1475cc9eedf00p+5 0x1.921fb54442d18p+2 0x1.921fb54442d10p+1',
'0x1.1475cc9eedf01p+5 0x1.921fb54442d18p+2 -0x1.921fb54442d10p+1',
# Symmetry with respect to signs.
' 1 0.c 0.4',
'-1 0.c -0.4',
' 1 -0.c 0.4',
'-1 -0.c -0.4',
' 1.4 0.c -0.4',
'-1.4 0.c 0.4',
' 1.4 -0.c -0.4',
'-1.4 -0.c 0.4',
# Huge modulus, to check that the underlying algorithm doesn't
# rely on 2.0 * modulus being representable.
'0x1.dp+1023 0x1.4p+1023 0x0.9p+1023',
'0x1.ep+1023 0x1.4p+1023 -0x0.ap+1023',
'0x1.fp+1023 0x1.4p+1023 -0x0.9p+1023',
]
for case in testcases:
with self.subTest(case=case):
x_hex, y_hex, expected_hex = case.split()
x = float.fromhex(x_hex)
y = float.fromhex(y_hex)
expected = float.fromhex(expected_hex)
validate_spec(x, y, expected)
actual = math.remainder(x, y)
# Cheap way of checking that the floats are
# as identical as we need them to be.
self.assertEqual(actual.hex(), expected.hex())
# Test tiny subnormal modulus: there's potential for
# getting the implementation wrong here (for example,
# by assuming that modulus/2 is exactly representable).
tiny = float.fromhex('1p-1074') # min +ve subnormal
for n in range(-25, 25):
if n == 0:
continue
y = n * tiny
for m in range(100):
x = m * tiny
actual = math.remainder(x, y)
validate_spec(x, y, actual)
actual = math.remainder(-x, y)
validate_spec(-x, y, actual)
# Special values.
# NaNs should propagate as usual.
for value in [NAN, 0.0, -0.0, 2.0, -2.3, NINF, INF]:
self.assertIsNaN(math.remainder(NAN, value))
self.assertIsNaN(math.remainder(value, NAN))
# remainder(x, inf) is x, for non-nan non-infinite x.
for value in [-2.3, -0.0, 0.0, 2.3]:
self.assertEqual(math.remainder(value, INF), value)
self.assertEqual(math.remainder(value, NINF), value)
# remainder(x, 0) and remainder(infinity, x) for non-NaN x are invalid
# operations according to IEEE 754-2008 7.2(f), and should raise.
for value in [NINF, -2.3, -0.0, 0.0, 2.3, INF]:
with self.assertRaises(ValueError):
math.remainder(INF, value)
with self.assertRaises(ValueError):
math.remainder(NINF, value)
with self.assertRaises(ValueError):
math.remainder(value, 0.0)
with self.assertRaises(ValueError):
math.remainder(value, -0.0)
def testSin(self):
self.assertRaises(TypeError, math.sin)
self.ftest('sin(0)', math.sin(0), 0)
self.ftest('sin(pi/2)', math.sin(math.pi/2), 1)
self.ftest('sin(-pi/2)', math.sin(-math.pi/2), -1)
try:
self.assertTrue(math.isnan(math.sin(INF)))
self.assertTrue(math.isnan(math.sin(NINF)))
except ValueError:
self.assertRaises(ValueError, math.sin, INF)
self.assertRaises(ValueError, math.sin, NINF)
self.assertTrue(math.isnan(math.sin(NAN)))
def testSinh(self):
self.assertRaises(TypeError, math.sinh)
self.ftest('sinh(0)', math.sinh(0), 0)
self.ftest('sinh(1)**2-cosh(1)**2', math.sinh(1)**2-math.cosh(1)**2, -1)
self.ftest('sinh(1)+sinh(-1)', math.sinh(1)+math.sinh(-1), 0)
self.assertEqual(math.sinh(INF), INF)
self.assertEqual(math.sinh(NINF), NINF)
self.assertTrue(math.isnan(math.sinh(NAN)))
def testSqrt(self):
self.assertRaises(TypeError, math.sqrt)
self.ftest('sqrt(0)', math.sqrt(0), 0)
self.ftest('sqrt(1)', math.sqrt(1), 1)
self.ftest('sqrt(4)', math.sqrt(4), 2)
self.assertEqual(math.sqrt(INF), INF)
self.assertRaises(ValueError, math.sqrt, -1)
self.assertRaises(ValueError, math.sqrt, NINF)
self.assertTrue(math.isnan(math.sqrt(NAN)))
def testTan(self):
self.assertRaises(TypeError, math.tan)
self.ftest('tan(0)', math.tan(0), 0)
self.ftest('tan(pi/4)', math.tan(math.pi/4), 1)
self.ftest('tan(-pi/4)', math.tan(-math.pi/4), -1)
try:
self.assertTrue(math.isnan(math.tan(INF)))
self.assertTrue(math.isnan(math.tan(NINF)))
except:
self.assertRaises(ValueError, math.tan, INF)
self.assertRaises(ValueError, math.tan, NINF)
self.assertTrue(math.isnan(math.tan(NAN)))
def testTanh(self):
self.assertRaises(TypeError, math.tanh)
self.ftest('tanh(0)', math.tanh(0), 0)
self.ftest('tanh(1)+tanh(-1)', math.tanh(1)+math.tanh(-1), 0,
abs_tol=ulp(1))
self.ftest('tanh(inf)', math.tanh(INF), 1)
self.ftest('tanh(-inf)', math.tanh(NINF), -1)
self.assertTrue(math.isnan(math.tanh(NAN)))
@requires_IEEE_754
def testTanhSign(self):
# check that tanh(-0.) == -0. on IEEE 754 systems
self.assertEqual(math.tanh(-0.), -0.)
self.assertEqual(math.copysign(1., math.tanh(-0.)),
math.copysign(1., -0.))
def test_trunc(self):
self.assertEqual(math.trunc(1), 1)
self.assertEqual(math.trunc(-1), -1)
self.assertEqual(type(math.trunc(1)), int)
self.assertEqual(type(math.trunc(1.5)), int)
self.assertEqual(math.trunc(1.5), 1)
self.assertEqual(math.trunc(-1.5), -1)
self.assertEqual(math.trunc(1.999999), 1)
self.assertEqual(math.trunc(-1.999999), -1)
self.assertEqual(math.trunc(-0.999999), -0)
self.assertEqual(math.trunc(-100.999), -100)
class TestTrunc(object):
def __trunc__(self):
return 23
class TestNoTrunc(object):
pass
self.assertEqual(math.trunc(TestTrunc()), 23)
self.assertRaises(TypeError, math.trunc)
self.assertRaises(TypeError, math.trunc, 1, 2)
self.assertRaises(TypeError, math.trunc, TestNoTrunc())
def testIsfinite(self):
self.assertTrue(math.isfinite(0.0))
self.assertTrue(math.isfinite(-0.0))
self.assertTrue(math.isfinite(1.0))
self.assertTrue(math.isfinite(-1.0))
self.assertFalse(math.isfinite(float("nan")))
self.assertFalse(math.isfinite(float("inf")))
self.assertFalse(math.isfinite(float("-inf")))
def testIsnan(self):
self.assertTrue(math.isnan(float("nan")))
self.assertTrue(math.isnan(float("-nan")))
self.assertTrue(math.isnan(float("inf") * 0.))
self.assertFalse(math.isnan(float("inf")))
self.assertFalse(math.isnan(0.))
self.assertFalse(math.isnan(1.))
def testIsinf(self):
self.assertTrue(math.isinf(float("inf")))
self.assertTrue(math.isinf(float("-inf")))
self.assertTrue(math.isinf(1E400))
self.assertTrue(math.isinf(-1E400))
self.assertFalse(math.isinf(float("nan")))
self.assertFalse(math.isinf(0.))
self.assertFalse(math.isinf(1.))
@requires_IEEE_754
def test_nan_constant(self):
self.assertTrue(math.isnan(math.nan))
@requires_IEEE_754
def test_inf_constant(self):
self.assertTrue(math.isinf(math.inf))
self.assertGreater(math.inf, 0.0)
self.assertEqual(math.inf, float("inf"))
self.assertEqual(-math.inf, float("-inf"))
# RED_FLAG 16-Oct-2000 Tim
# While 2.0 is more consistent about exceptions than previous releases, it
# still fails this part of the test on some platforms. For now, we only
# *run* test_exceptions() in verbose mode, so that this isn't normally
# tested.
@unittest.skipUnless(verbose, 'requires verbose mode')
def test_exceptions(self):
try:
x = math.exp(-1000000000)
except:
# mathmodule.c is failing to weed out underflows from libm, or
# we've got an fp format with huge dynamic range
self.fail("underflowing exp() should not have raised "
"an exception")
if x != 0:
self.fail("underflowing exp() should have returned 0")
# If this fails, probably using a strict IEEE-754 conforming libm, and x
# is +Inf afterwards. But Python wants overflows detected by default.
try:
x = math.exp(1000000000)
except OverflowError:
pass
else:
self.fail("overflowing exp() didn't trigger OverflowError")
# If this fails, it could be a puzzle. One odd possibility is that
# mathmodule.c's macros are getting confused while comparing
# Inf (HUGE_VAL) to a NaN, and artificially setting errno to ERANGE
# as a result (and so raising OverflowError instead).
try:
x = math.sqrt(-1.0)
except ValueError:
pass
else:
self.fail("sqrt(-1) didn't raise ValueError")
@requires_IEEE_754
def test_testfile(self):
# Some tests need to be skipped on ancient OS X versions.
# See issue #27953.
SKIP_ON_TIGER = {'tan0064'}
osx_version = None
if sys.platform == 'darwin':
version_txt = platform.mac_ver()[0]
try:
osx_version = tuple(map(int, version_txt.split('.')))
except ValueError:
pass
fail_fmt = "{}: {}({!r}): {}"
failures = []
for id, fn, ar, ai, er, ei, flags in parse_testfile(test_file):
# Skip if either the input or result is complex
if ai != 0.0 or ei != 0.0:
continue
if fn in ['rect', 'polar']:
# no real versions of rect, polar
continue
# Skip certain tests on OS X 10.4.
if osx_version is not None and osx_version < (10, 5):
if id in SKIP_ON_TIGER:
continue
func = getattr(math, fn)
if 'invalid' in flags or 'divide-by-zero' in flags:
er = 'ValueError'
elif 'overflow' in flags:
er = 'OverflowError'
try:
result = func(ar)
except ValueError:
result = 'ValueError'
except OverflowError:
result = 'OverflowError'
# Default tolerances
ulp_tol, abs_tol = 5, 0.0
failure = result_check(er, result, ulp_tol, abs_tol)
if failure is None:
continue
msg = fail_fmt.format(id, fn, ar, failure)
failures.append(msg)
if failures:
self.fail('Failures in test_testfile:\n ' +
'\n '.join(failures))
@requires_IEEE_754
def test_mtestfile(self):
fail_fmt = "{}: {}({!r}): {}"
failures = []
for id, fn, arg, expected, flags in parse_mtestfile(math_testcases):
func = getattr(math, fn)
if 'invalid' in flags or 'divide-by-zero' in flags:
expected = 'ValueError'
elif 'overflow' in flags:
expected = 'OverflowError'
try:
got = func(arg)
except ValueError:
got = 'ValueError'
except OverflowError:
got = 'OverflowError'
# Default tolerances
ulp_tol, abs_tol = 5, 0.0
# Exceptions to the defaults
if fn == 'gamma':
# Experimental results on one platform gave
# an accuracy of <= 10 ulps across the entire float
# domain. We weaken that to require 20 ulp accuracy.
ulp_tol = 20
elif fn == 'lgamma':
# we use a weaker accuracy test for lgamma;
# lgamma only achieves an absolute error of
# a few multiples of the machine accuracy, in
# general.
abs_tol = 1e-15
elif fn == 'erfc' and arg >= 0.0:
# erfc has less-than-ideal accuracy for large
# arguments (x ~ 25 or so), mainly due to the
# error involved in computing exp(-x*x).
#
# Observed between CPython and mpmath at 25 dp:
# x < 0 : err <= 2 ulp
# 0 <= x < 1 : err <= 10 ulp
# 1 <= x < 10 : err <= 100 ulp
# 10 <= x < 20 : err <= 300 ulp
# 20 <= x : < 600 ulp
#
if arg < 1.0:
ulp_tol = 10
elif arg < 10.0:
ulp_tol = 100
else:
ulp_tol = 1000
failure = result_check(expected, got, ulp_tol, abs_tol)
if failure is None:
continue
msg = fail_fmt.format(id, fn, arg, failure)
failures.append(msg)
if failures:
self.fail('Failures in test_mtestfile:\n ' +
'\n '.join(failures))
def test_prod(self):
prod = math.prod
self.assertEqual(prod([]), 1)
self.assertEqual(prod([], start=5), 5)
self.assertEqual(prod(list(range(2,8))), 5040)
self.assertEqual(prod(iter(list(range(2,8)))), 5040)
self.assertEqual(prod(range(1, 10), start=10), 3628800)
self.assertEqual(prod([1, 2, 3, 4, 5]), 120)
self.assertEqual(prod([1.0, 2.0, 3.0, 4.0, 5.0]), 120.0)
self.assertEqual(prod([1, 2, 3, 4.0, 5.0]), 120.0)
self.assertEqual(prod([1.0, 2.0, 3.0, 4, 5]), 120.0)
# Test overflow in fast-path for integers
self.assertEqual(prod([1, 1, 2**32, 1, 1]), 2**32)
# Test overflow in fast-path for floats
self.assertEqual(prod([1.0, 1.0, 2**32, 1, 1]), float(2**32))
self.assertRaises(TypeError, prod)
self.assertRaises(TypeError, prod, 42)
self.assertRaises(TypeError, prod, ['a', 'b', 'c'])
self.assertRaises(TypeError, prod, ['a', 'b', 'c'], '')
self.assertRaises(TypeError, prod, [b'a', b'c'], b'')
values = [bytearray(b'a'), bytearray(b'b')]
self.assertRaises(TypeError, prod, values, bytearray(b''))
self.assertRaises(TypeError, prod, [[1], [2], [3]])
self.assertRaises(TypeError, prod, [{2:3}])
self.assertRaises(TypeError, prod, [{2:3}]*2, {2:3})
self.assertRaises(TypeError, prod, [[1], [2], [3]], [])
with self.assertRaises(TypeError):
prod([10, 20], [30, 40]) # start is a keyword-only argument
self.assertEqual(prod([0, 1, 2, 3]), 0)
self.assertEqual(prod([1, 0, 2, 3]), 0)
self.assertEqual(prod([1, 2, 3, 0]), 0)
def _naive_prod(iterable, start=1):
for elem in iterable:
start *= elem
return start
# Big integers
iterable = range(1, 10000)
self.assertEqual(prod(iterable), _naive_prod(iterable))
iterable = range(-10000, -1)
self.assertEqual(prod(iterable), _naive_prod(iterable))
iterable = range(-1000, 1000)
self.assertEqual(prod(iterable), 0)
# Big floats
iterable = [float(x) for x in range(1, 1000)]
self.assertEqual(prod(iterable), _naive_prod(iterable))
iterable = [float(x) for x in range(-1000, -1)]
self.assertEqual(prod(iterable), _naive_prod(iterable))
iterable = [float(x) for x in range(-1000, 1000)]
self.assertIsNaN(prod(iterable))
# Float tests
self.assertIsNaN(prod([1, 2, 3, float("nan"), 2, 3]))
self.assertIsNaN(prod([1, 0, float("nan"), 2, 3]))
self.assertIsNaN(prod([1, float("nan"), 0, 3]))
self.assertIsNaN(prod([1, float("inf"), float("nan"),3]))
self.assertIsNaN(prod([1, float("-inf"), float("nan"),3]))
self.assertIsNaN(prod([1, float("nan"), float("inf"),3]))
self.assertIsNaN(prod([1, float("nan"), float("-inf"),3]))
self.assertEqual(prod([1, 2, 3, float('inf'),-3,4]), float('-inf'))
self.assertEqual(prod([1, 2, 3, float('-inf'),-3,4]), float('inf'))
self.assertIsNaN(prod([1,2,0,float('inf'), -3, 4]))
self.assertIsNaN(prod([1,2,0,float('-inf'), -3, 4]))
self.assertIsNaN(prod([1, 2, 3, float('inf'), -3, 0, 3]))
self.assertIsNaN(prod([1, 2, 3, float('-inf'), -3, 0, 2]))
# Type preservation
self.assertEqual(type(prod([1, 2, 3, 4, 5, 6])), int)
self.assertEqual(type(prod([1, 2.0, 3, 4, 5, 6])), float)
self.assertEqual(type(prod(range(1, 10000))), int)
self.assertEqual(type(prod(range(1, 10000), start=1.0)), float)
self.assertEqual(type(prod([1, decimal.Decimal(2.0), 3, 4, 5, 6])),
decimal.Decimal)
def testPerm(self):
perm = math.perm
factorial = math.factorial
# Test if factorial definition is satisfied
for n in range(100):
for k in range(n + 1):
self.assertEqual(perm(n, k),
factorial(n) // factorial(n - k))
# Test for Pascal's identity
for n in range(1, 100):
for k in range(1, n):
self.assertEqual(perm(n, k), perm(n - 1, k - 1) * k + perm(n - 1, k))
# Test corner cases
for n in range(1, 100):
self.assertEqual(perm(n, 0), 1)
self.assertEqual(perm(n, 1), n)
self.assertEqual(perm(n, n), factorial(n))
# Test one argument form
for n in range(20):
self.assertEqual(perm(n), factorial(n))
self.assertEqual(perm(n, None), factorial(n))
# Raises TypeError if any argument is non-integer or argument count is
# not 1 or 2
self.assertRaises(TypeError, perm, 10, 1.0)
self.assertRaises(TypeError, perm, 10, decimal.Decimal(1.0))
self.assertRaises(TypeError, perm, 10, "1")
self.assertRaises(TypeError, perm, 10.0, 1)
self.assertRaises(TypeError, perm, decimal.Decimal(10.0), 1)
self.assertRaises(TypeError, perm, "10", 1)
self.assertRaises(TypeError, perm)
self.assertRaises(TypeError, perm, 10, 1, 3)
self.assertRaises(TypeError, perm)
# Raises Value error if not k or n are negative numbers
self.assertRaises(ValueError, perm, -1, 1)
self.assertRaises(ValueError, perm, -2**1000, 1)
self.assertRaises(ValueError, perm, 1, -1)
self.assertRaises(ValueError, perm, 1, -2**1000)
# Returns zero if k is greater than n
self.assertEqual(perm(1, 2), 0)
self.assertEqual(perm(1, 2**1000), 0)
n = 2**1000
self.assertEqual(perm(n, 0), 1)
self.assertEqual(perm(n, 1), n)
self.assertEqual(perm(n, 2), n * (n-1))
if support.check_impl_detail(cpython=True):
self.assertRaises(OverflowError, perm, n, n)
for n, k in (True, True), (True, False), (False, False):
self.assertEqual(perm(n, k), 1)
self.assertIs(type(perm(n, k)), int)
self.assertEqual(perm(IntSubclass(5), IntSubclass(2)), 20)
self.assertEqual(perm(MyIndexable(5), MyIndexable(2)), 20)
for k in range(3):
self.assertIs(type(perm(IntSubclass(5), IntSubclass(k))), int)
self.assertIs(type(perm(MyIndexable(5), MyIndexable(k))), int)
def testComb(self):
comb = math.comb
factorial = math.factorial
# Test if factorial definition is satisfied
for n in range(100):
for k in range(n + 1):
self.assertEqual(comb(n, k), factorial(n)
// (factorial(k) * factorial(n - k)))
# Test for Pascal's identity
for n in range(1, 100):
for k in range(1, n):
self.assertEqual(comb(n, k), comb(n - 1, k - 1) + comb(n - 1, k))
# Test corner cases
for n in range(100):
self.assertEqual(comb(n, 0), 1)
self.assertEqual(comb(n, n), 1)
for n in range(1, 100):
self.assertEqual(comb(n, 1), n)
self.assertEqual(comb(n, n - 1), n)
# Test Symmetry
for n in range(100):
for k in range(n // 2):
self.assertEqual(comb(n, k), comb(n, n - k))
# Raises TypeError if any argument is non-integer or argument count is
# not 2
self.assertRaises(TypeError, comb, 10, 1.0)
self.assertRaises(TypeError, comb, 10, decimal.Decimal(1.0))
self.assertRaises(TypeError, comb, 10, "1")
self.assertRaises(TypeError, comb, 10.0, 1)
self.assertRaises(TypeError, comb, decimal.Decimal(10.0), 1)
self.assertRaises(TypeError, comb, "10", 1)
self.assertRaises(TypeError, comb, 10)
self.assertRaises(TypeError, comb, 10, 1, 3)
self.assertRaises(TypeError, comb)
# Raises Value error if not k or n are negative numbers
self.assertRaises(ValueError, comb, -1, 1)
self.assertRaises(ValueError, comb, -2**1000, 1)
self.assertRaises(ValueError, comb, 1, -1)
self.assertRaises(ValueError, comb, 1, -2**1000)
# Returns zero if k is greater than n
self.assertEqual(comb(1, 2), 0)
self.assertEqual(comb(1, 2**1000), 0)
n = 2**1000
self.assertEqual(comb(n, 0), 1)
self.assertEqual(comb(n, 1), n)
self.assertEqual(comb(n, 2), n * (n-1) // 2)
self.assertEqual(comb(n, n), 1)
self.assertEqual(comb(n, n-1), n)
self.assertEqual(comb(n, n-2), n * (n-1) // 2)
if support.check_impl_detail(cpython=True):
self.assertRaises(OverflowError, comb, n, n//2)
for n, k in (True, True), (True, False), (False, False):
self.assertEqual(comb(n, k), 1)
self.assertIs(type(comb(n, k)), int)
self.assertEqual(comb(IntSubclass(5), IntSubclass(2)), 10)
self.assertEqual(comb(MyIndexable(5), MyIndexable(2)), 10)
for k in range(3):
self.assertIs(type(comb(IntSubclass(5), IntSubclass(k))), int)
self.assertIs(type(comb(MyIndexable(5), MyIndexable(k))), int)
def test_issue39871(self):
# A SystemError should not be raised if the first arg to atan2(),
# copysign(), or remainder() cannot be converted to a float.
class F:
def __float__(self):
self.converted = True
1/0
for func in math.atan2, math.copysign, math.remainder:
y = F()
with self.assertRaises(TypeError):
func("not a number", y)
# There should not have been any attempt to convert the second
# argument to a float.
self.assertFalse(getattr(y, "converted", False))
# Custom assertions.
def assertIsNaN(self, value):
if not math.isnan(value):
self.fail("Expected a NaN, got {!r}.".format(value))
class IsCloseTests(unittest.TestCase):
isclose = math.isclose # subclasses should override this
def assertIsClose(self, a, b, *args, **kwargs):
self.assertTrue(self.isclose(a, b, *args, **kwargs),
msg="%s and %s should be close!" % (a, b))
def assertIsNotClose(self, a, b, *args, **kwargs):
self.assertFalse(self.isclose(a, b, *args, **kwargs),
msg="%s and %s should not be close!" % (a, b))
def assertAllClose(self, examples, *args, **kwargs):
for a, b in examples:
self.assertIsClose(a, b, *args, **kwargs)
def assertAllNotClose(self, examples, *args, **kwargs):
for a, b in examples:
self.assertIsNotClose(a, b, *args, **kwargs)
def test_negative_tolerances(self):
# ValueError should be raised if either tolerance is less than zero
with self.assertRaises(ValueError):
self.assertIsClose(1, 1, rel_tol=-1e-100)
with self.assertRaises(ValueError):
self.assertIsClose(1, 1, rel_tol=1e-100, abs_tol=-1e10)
def test_identical(self):
# identical values must test as close
identical_examples = [(2.0, 2.0),
(0.1e200, 0.1e200),
(1.123e-300, 1.123e-300),
(12345, 12345.0),
(0.0, -0.0),
(345678, 345678)]
self.assertAllClose(identical_examples, rel_tol=0.0, abs_tol=0.0)
def test_eight_decimal_places(self):
# examples that are close to 1e-8, but not 1e-9
eight_decimal_places_examples = [(1e8, 1e8 + 1),
(-1e-8, -1.000000009e-8),
(1.12345678, 1.12345679)]
self.assertAllClose(eight_decimal_places_examples, rel_tol=1e-8)
self.assertAllNotClose(eight_decimal_places_examples, rel_tol=1e-9)
def test_near_zero(self):
# values close to zero
near_zero_examples = [(1e-9, 0.0),
(-1e-9, 0.0),
(-1e-150, 0.0)]
# these should not be close to any rel_tol
self.assertAllNotClose(near_zero_examples, rel_tol=0.9)
# these should be close to abs_tol=1e-8
self.assertAllClose(near_zero_examples, abs_tol=1e-8)
def test_identical_infinite(self):
# these are close regardless of tolerance -- i.e. they are equal
self.assertIsClose(INF, INF)
self.assertIsClose(INF, INF, abs_tol=0.0)
self.assertIsClose(NINF, NINF)
self.assertIsClose(NINF, NINF, abs_tol=0.0)
def test_inf_ninf_nan(self):
# these should never be close (following IEEE 754 rules for equality)
not_close_examples = [(NAN, NAN),
(NAN, 1e-100),
(1e-100, NAN),
(INF, NAN),
(NAN, INF),
(INF, NINF),
(INF, 1.0),
(1.0, INF),
(INF, 1e308),
(1e308, INF)]
# use largest reasonable tolerance
self.assertAllNotClose(not_close_examples, abs_tol=0.999999999999999)
def test_zero_tolerance(self):
# test with zero tolerance
zero_tolerance_close_examples = [(1.0, 1.0),
(-3.4, -3.4),
(-1e-300, -1e-300)]
self.assertAllClose(zero_tolerance_close_examples, rel_tol=0.0)
zero_tolerance_not_close_examples = [(1.0, 1.000000000000001),
(0.99999999999999, 1.0),
(1.0e200, .999999999999999e200)]
self.assertAllNotClose(zero_tolerance_not_close_examples, rel_tol=0.0)
def test_asymmetry(self):
# test the asymmetry example from PEP 485
self.assertAllClose([(9, 10), (10, 9)], rel_tol=0.1)
def test_integers(self):
# test with integer values
integer_examples = [(100000001, 100000000),
(123456789, 123456788)]
self.assertAllClose(integer_examples, rel_tol=1e-8)
self.assertAllNotClose(integer_examples, rel_tol=1e-9)
def test_decimals(self):
# test with Decimal values
from decimal import Decimal
decimal_examples = [(Decimal('1.00000001'), Decimal('1.0')),
(Decimal('1.00000001e-20'), Decimal('1.0e-20')),
(Decimal('1.00000001e-100'), Decimal('1.0e-100')),
(Decimal('1.00000001e20'), Decimal('1.0e20'))]
self.assertAllClose(decimal_examples, rel_tol=1e-8)
self.assertAllNotClose(decimal_examples, rel_tol=1e-9)
def test_fractions(self):
# test with Fraction values
from fractions import Fraction
fraction_examples = [
(Fraction(1, 100000000) + 1, Fraction(1)),
(Fraction(100000001), Fraction(100000000)),
(Fraction(10**8 + 1, 10**28), Fraction(1, 10**20))]
self.assertAllClose(fraction_examples, rel_tol=1e-8)
self.assertAllNotClose(fraction_examples, rel_tol=1e-9)
def test_main():
from doctest import DocFileSuite
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(MathTests))
suite.addTest(unittest.makeSuite(IsCloseTests))
suite.addTest(DocFileSuite("ieee754.txt"))
run_unittest(suite)
if __name__ == '__main__':
test_main()
| 41.310751 | 98 | 0.56574 |
4a1ba9e317be7c16062368d40d123f917b95afb4
| 929 |
py
|
Python
|
object_detection/parser.py
|
tugot17/Pytorch-Lightning-Templates-
|
36ec7ac8f1699e4e266f0d4f08df67a2873ba0f8
|
[
"MIT"
] | 2 |
2020-11-04T19:14:51.000Z
|
2021-03-04T13:14:17.000Z
|
object_detection/parser.py
|
tugot17/Pytorch-Lightning-Templates-
|
36ec7ac8f1699e4e266f0d4f08df67a2873ba0f8
|
[
"MIT"
] | null | null | null |
object_detection/parser.py
|
tugot17/Pytorch-Lightning-Templates-
|
36ec7ac8f1699e4e266f0d4f08df67a2873ba0f8
|
[
"MIT"
] | null | null | null |
import json
from pathlib import Path
from typing import List, Union
from icevision.all import parsers, IDMap, ClassMap, BBox
class ArtifactParser(parsers.FasterRCNN, parsers.FilepathMixin, parsers.SizeMixin):
def __init__(self, df):
self.df = df
self.imageid_map = IDMap()
self.class_map = ClassMap(["Port", "Necklace", "Endo", "ICD"])
def __iter__(self):
yield from self.df.itertuples()
def __len__(self):
return len(self.df)
def imageid(self, o) -> int:
return self.imageid_map[o.image_id]
def filepath(self, o) -> Union[str, Path]:
return o.img_path
def height(self, o) -> int:
return o.height
def width(self, o) -> int:
return o.width
def labels(self, o) -> List[int]:
return [self.class_map.get_name(o.label)]
def bboxes(self, o) -> List[BBox]:
return [BBox.from_xyxy(*json.loads(o.bbox))]
| 25.805556 | 83 | 0.630786 |
4a1baa6eccd0dd667225fbec048fbb7ac6a19eac
| 577 |
py
|
Python
|
tests/rst/__init__.py
|
LudditeLabs/autodoc-tool
|
b4ae7e3b61907e7e9c3a1b534fce055e5860ffab
|
[
"Apache-2.0"
] | null | null | null |
tests/rst/__init__.py
|
LudditeLabs/autodoc-tool
|
b4ae7e3b61907e7e9c3a1b534fce055e5860ffab
|
[
"Apache-2.0"
] | null | null | null |
tests/rst/__init__.py
|
LudditeLabs/autodoc-tool
|
b4ae7e3b61907e7e9c3a1b534fce055e5860ffab
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Luddite Labs Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 41.214286 | 74 | 0.767764 |
4a1baaf2b4f6eb30b14993dc570d8d4c542fecc9
| 1,386 |
py
|
Python
|
templates/drawDifferentialMissingnessPlot.py
|
sickle-in-africa/h3agwas
|
b0d7eccc48f4a2c70f9d606ca9f9589a32e27924
|
[
"MIT"
] | null | null | null |
templates/drawDifferentialMissingnessPlot.py
|
sickle-in-africa/h3agwas
|
b0d7eccc48f4a2c70f9d606ca9f9589a32e27924
|
[
"MIT"
] | null | null | null |
templates/drawDifferentialMissingnessPlot.py
|
sickle-in-africa/h3agwas
|
b0d7eccc48f4a2c70f9d606ca9f9589a32e27924
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.ticker import MaxNLocator
import pandas as pd
import numpy as np
import argparse
import sys
import math
import bisect
from scipy.stats import gaussian_kde
def parseArguments():
if len(sys.argv)<=1:
sys.argv=\
"drawDifferentialMissingnessPlot.py $input $output".split()
parser=argparse.ArgumentParser()
parser.add_argument('input', type=str, metavar='imiss'),
parser.add_argument('output', type=str, metavar='output'),
args = parser.parse_args()
return args
args = parseArguments()
frq = pd.read_csv(args.input,delim_whitespace=True)
if len(frq) >= 1:
frq["logP"] = np.log10(frq["P"])
fig, ax = plt.subplots(figsize=(9,8))
font = {'family' : 'normal','weight' : 'bold','size' : 13}
matplotlib.rc('font', **font)
matplotlib.rcParams['xtick.labelsize']=15
matplotlib.rcParams['ytick.labelsize']=15
miss=np.sort(frq["logP"])[1:]
n = np.arange(1,len(miss)+1) / np.float(len(miss))
ax.step(miss,n)
ax.set_xlabel("logP differential missingness",fontsize=14)
ax.set_ylabel("Fraction of SNPs",fontsize=14)
ax.set_title("Cumulative proportion of SNPs with given differential missingness.")
fig.tight_layout()
plt.savefig(args.output)
else:
g=open(args.output,"w")
g.close()
| 29.489362 | 84 | 0.715007 |
4a1babca2ca0a07d11c138954674dd72309b2d9e
| 24,651 |
py
|
Python
|
Packs/CrowdStrikeFalconSandbox/Integrations/CrowdstrikeFalconSandboxV2/CrowdStrikeFalconSandboxV2.py
|
cstone112/content
|
7f039931b8cfc20e89df52d895440b7321149a0d
|
[
"MIT"
] | 2 |
2021-12-06T21:38:24.000Z
|
2022-01-13T08:23:36.000Z
|
Packs/CrowdStrikeFalconSandbox/Integrations/CrowdstrikeFalconSandboxV2/CrowdStrikeFalconSandboxV2.py
|
cstone112/content
|
7f039931b8cfc20e89df52d895440b7321149a0d
|
[
"MIT"
] | 87 |
2022-02-23T12:10:53.000Z
|
2022-03-31T11:29:05.000Z
|
Packs/CrowdStrikeFalconSandbox/Integrations/CrowdstrikeFalconSandboxV2/CrowdStrikeFalconSandboxV2.py
|
cstone112/content
|
7f039931b8cfc20e89df52d895440b7321149a0d
|
[
"MIT"
] | 2 |
2022-01-05T15:27:01.000Z
|
2022-02-01T19:27:43.000Z
|
from requests import Response
import demistomock as demisto
from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import
import requests
import traceback
from typing import Dict, Any, Callable
# Disable insecure warnings
requests.packages.urllib3.disable_warnings() # pylint: disable=no-member
SERVER_URL = 'https://www.hybrid-analysis.com/api/v2'
SEARCH_TERM_QUERY_ARGS = ('filename', 'filetype', 'filetype_desc', 'env_id', 'country', 'verdict', 'av_detect',
'vx_family', 'tag', 'date_from', 'date_to', 'port', 'host', 'domain', 'url', 'similar_to',
'context', 'imp_hash', 'ssdeep', 'authentihash')
# envid repeated for bw compatibility. Must be in this order so old overrides new default
SUBMISSION_PARAMETERS = ('environmentID', 'environmentId', 'no_share_third_party', 'allow_community_access',
'no_hash_lookup',
'action_script', 'hybrid_analysis', 'experimental_anti_evasion', 'script_logging',
'input_sample_tampering', 'network_settings', 'email', 'comment', 'custom_cmd_line',
'custom_run_time', 'submit_name', 'priority', 'document_password', 'environment_variable',
)
class Client(BaseClient):
def get_environments(self) -> List[Dict]:
return self._http_request(method='GET', url_suffix='/system/environments')
def get_screenshots(self, key: str) -> List[Dict[str, Any]]:
return self._http_request(method='GET', url_suffix=f'/report/{key}/screenshots')
def search(self, query_args: Dict[str, Any]):
self._headers['Content-Type'] = 'application/x-www-form-urlencoded'
return self._http_request(method='POST', url_suffix='/search/terms', data=query_args)
def scan(self, files: List[str]) -> List[Dict[str, Any]]:
self._headers['Content-Type'] = 'application/x-www-form-urlencoded'
return self._http_request(method='POST', url_suffix='/search/hashes', data={'hashes[]': files})
def analysis_overview(self, sha256hash: str) -> Dict[str, Any]:
return self._http_request(method='GET', url_suffix=f'/overview/{sha256hash}')
def analysis_overview_summary(self, sha256hash: str) -> Dict[str, Any]:
return self._http_request(method='GET', url_suffix=f'/overview/{sha256hash}/summary')
def analysis_overview_refresh(self, sha256hash: str):
self._http_request(method='GET', url_suffix=f'/overview/{sha256hash}/refresh')
def get_report(self, key: str, filetype: str) -> Response:
return self._http_request(method='GET', url_suffix=f'/report/{key}/report/{filetype}', resp_type='response',
ok_codes=(200, 404))
def get_state(self, key: str) -> Dict[str, Any]:
return self._http_request(method='GET', url_suffix=f'/report/{key}/state')
def submit_url(self, url: str, params: Dict[str, Any]) -> Dict[str, Any]:
return self._http_request(method='POST', data={'url': url, **params},
url_suffix='/submit/url')
def submit_file(self, file_contents: Dict, params: Dict[str, Any]) -> Dict:
return self._http_request(method='POST', data=params, url_suffix='/submit/file',
files={'file': (file_contents['name'], open(file_contents['path'], 'rb'))})
def download_sample(self, sha256hash: str) -> Response:
return self._http_request(method='GET', url_suffix=f'/overview/{sha256hash}/sample', resp_type='response')
# for BW compatibility with v1 we need to return same object keys
def map_dict_keys(obj: Dict, map_rules: Dict[str, str], only_given_fields: bool = False) -> Dict[Any, Any]:
"""
This function will switch the keys of a dictionary according to the provided map_rules.
Example: map_dict_keys( {'a' : 'b', 'c' : 'd' }, {'a' : 'y', 'c' : 'z' }) -> {'y' : 'b' , 'z' : 'd'}
Args:
obj: The original dictionary
map_rules: The mapping the keys should be changed by
only_given_fields: whether fields besides for those in map_rules should be used
Returns : a Dict according to the map_rules
"""
return {map_rules.get(key, key): obj[key] for key in obj.keys() if not only_given_fields or key in map_rules}
def translate_verdict(param: str) -> int:
return {
'Whitelisted': 1,
'NoVerdict': 2,
'NoSpecificThreat': 3,
'Suspicious': 4,
'Malicious': 5
}[param]
def split_query_to_term_args(query: str) -> Dict[str, Any]:
def get_value(term: str) -> str:
return term[term.index(':') + 1:].strip()
def get_key(term: str) -> str:
return term[:term.index(':')].strip()
return {get_key(term): get_value(term) for term in query.split(',') if get_value(term)}
def validated_term(key: str, val: Any) -> Any:
if key == 'verdict':
return translate_verdict(val)
if key == 'country' and len(val) != 3:
raise ValueError('Country ISO code should be 3 characters long')
return val
def validated_search_terms(query_args: Dict[str, Any]) -> Dict[str, Any]:
if len(query_args) == 0:
raise ValueError('Must have at least one search term')
return {key: validated_term(key, query_args[key]) for key in query_args}
def get_search_term_args(args: Dict[str, Any]) -> Dict[str, Any]:
if args.get('query'):
return split_query_to_term_args(args['query'])
else:
return {term: args[term] for term in SEARCH_TERM_QUERY_ARGS if args.get(term, None)}
def get_api_id(args: Dict[str, Any]) -> str:
# must fist check environmentId (deprecated) to override default args of environmentID
if args.get('file') and (args.get('environmentId') or args.get('environmentID')):
return f"{args['file']}:{args.get('environmentId') or args.get('environmentID')}"
elif args.get('JobID'):
return args['JobID']
else:
raise ValueError('Must supply JobID or environmentID and file')
def test_module(client: Client, _) -> str:
"""Tests API connectivity and authentication'
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
try:
client.get_environments()
message = 'ok'
except DemistoException as e:
if 'Forbidden' in str(e) or 'Authorization' in str(e):
message = 'Authorization Error: make sure API Key is correctly set'
else:
raise e
return message
def get_default_file_name(filetype: str) -> str:
return f'CrowdStrike_report_{round(time.time())}.{get_file_suffix(filetype)}'
def get_file_suffix(filetype: str = 'bin') -> str: # check this
if filetype in ('pcap', 'bin', 'xml', 'html'):
return 'gz'
if filetype == 'json':
return 'json'
if filetype in ('misp', 'stix'):
return 'xml'
return filetype
def has_error_state(client: Client, key: str) -> bool:
state = client.get_state(key)
demisto.debug(f'state to check if should poll response: {state}')
if state['state'] == 'ERROR':
raise Exception(f'Got Error state from server: {state}')
return False
class BWCFile(Common.File):
def __init__(self, bwc_fields: Dict, key_change_map: Dict, only_given_fields, *args, **kwargs):
super(BWCFile, self).__init__(*args, **kwargs)
self.bwc_fields = bwc_fields
self.key_change_map = key_change_map
self.only_given_fields = only_given_fields
def to_context(self) -> Any:
super_ret = super().to_context()
for key in super_ret.keys():
if key.startswith('File'):
super_ret[key].update(map_dict_keys(self.bwc_fields, self.key_change_map, self.only_given_fields))
return super_ret
def create_scan_results_readable_output(scan_response) -> Any:
table_field_dict = {
'submit_name': 'submit name',
'threat_level': 'threat level',
'threat_score': 'threat score',
'verdict': 'verdict',
'total_network_connections': 'total network connections',
'target_url': 'target url',
'classification_tags': 'classification tags',
'total_processes': 'total processes',
'environment_description': 'environment description',
'interesting': 'interesting',
'environment_id': 'environment id',
'url_analysis': 'url analysis',
'analysis_start_time': 'analysis start time',
'total_signatures': 'total signatures',
'type': 'type',
'type_short': 'type short',
'vx_family': 'Malware Family',
'sha256': 'sha256'
}
return tableToMarkdown('Scan Results:', scan_response, headers=list(table_field_dict.keys()),
headerTransform=lambda x: table_field_dict.get(x, x), removeNull=True)
def get_dbot_score(filehash, threat_score: int) -> Common.DBotScore:
def calc_score() -> int:
return {3: 0,
2: 3,
1: 2,
0: 1}.get(threat_score, 0)
return Common.DBotScore(indicator=filehash, integration_name='CrowdStrike Falcon Sandbox V2',
indicator_type=DBotScoreType.FILE, score=calc_score(),
malicious_description=f'Score of {calc_score()}',
reliability=DBotScoreReliability.get_dbot_score_reliability_from_str(
demisto.params().get('integrationReliability')))
def get_submission_arguments(args: Dict[str, Any]) -> Dict[str, Any]:
return {camel_case_to_underscore(arg): args[arg] for arg in SUBMISSION_PARAMETERS if args.get(arg)}
def submission_response(client, response, polling) -> List[CommandResults]:
context = {'CrowdStrike.Submit(val.submission_id && val.submission_id === obj.submission_id)': response,
'CrowdStrike.JobID': response.get('job_id'),
'CrowdStrike.EnvironmentID': response.get('environment_id')}
submission_res = CommandResults(outputs_prefix='', outputs_key_field='',
raw_response=response, outputs=context,
readable_output=tableToMarkdown('Submission Data:', response,
headerTransform=underscore_to_space))
if not polling:
return [submission_res]
else:
return_results(submission_res) # return early
return crowdstrike_scan_command(client, {'file': response.get('sha256'), 'JobID': response.get('job_id'),
'polling': True})
def crowdstrike_submit_url_command(client: Client, args: Dict[str, Any]) -> List[CommandResults]:
submission_args = get_submission_arguments(args)
url = args['url']
response = client.submit_url(url, submission_args)
return submission_response(client, response, args.get('polling'))
def crowdstrike_submit_sample_command(client: Client, args: Dict[str, Any]) -> List[CommandResults]:
file_contents = demisto.getFilePath(args['entryId'])
submission_args = get_submission_arguments(args)
response = client.submit_file(file_contents, submission_args)
return submission_response(client, response, args.get('polling'))
def crowdstrike_analysis_overview_command(client: Client, args: Dict[str, Any]) -> CommandResults:
result = client.analysis_overview(args['file'])
file = Common.File(Common.DBotScore.NONE, sha256=result.get('sha256'), size=result.get('size'),
file_type=result.get('type'), name=result.get('last_file_name'))
table_cols = ['last_file_name', 'threat_score', 'other_file_name', 'sha256', 'verdict', 'url_analysis', 'size',
'type', 'type_short']
return CommandResults(
outputs_prefix='CrowdStrike.AnalysisOverview',
outputs_key_field='sha256',
outputs=result,
raw_response=result,
indicator=file,
readable_output=tableToMarkdown('Analysis Overview:', result, headers=table_cols,
headerTransform=underscore_to_space,
removeNull=True)
)
def crowdstrike_search_command(client: Client, args: Dict[str, Any]) -> List[CommandResults]:
query_args: Dict = get_search_term_args(args)
query_args = validated_search_terms(query_args)
response = client.search(query_args)
key_name_changes = {'job_id': 'JobID',
'sha256': 'SHA256',
'environment_id': 'environmentId',
'threat_score': 'threatscore',
'environment_description': 'environmentDescription',
'submit_name': 'submitname',
'analysis_start_time': 'start_time'}
# each indicator needs its own CR since indicators is deprecated.
def convert_to_file_res(res) -> CommandResults:
return CommandResults(
readable_output=tableToMarkdown('Search Results:', res, removeNull=True,
headerTransform=underscore_to_space,
headers=['submit_name', 'verdict', 'vx_family', 'threat_score', 'sha256',
'size',
'environment_id', 'type', 'type_short', 'analysis_start_time']),
indicator=BWCFile(res, key_name_changes, False, size=res.get('size'),
sha256=res.get('sha256'),
dbot_score=Common.DBotScore.NONE,
extension=res.get('type_short'),
name=res.get('submit_name'),
malware_family=res.get('vx_family')))
try:
return [CommandResults(
raw_response=response, outputs_prefix='CrowdStrike.Search',
outputs_key_field='sha256', outputs=response.get('result'),
readable_output=f"Search returned {response.get('count')} results. Limit set to {args['limit']}"),
*[convert_to_file_res(res) for res in response.get('result')[:max(int(args['limit']), 1)]]]
except ValueError:
raise ValueError("The limit argument must be numeric.")
@polling_function('cs-falcon-sandbox-scan')
def crowdstrike_scan_command(client: Client, args: Dict[str, Any]):
hashes = args['file'].split(',')
scan_response = client.scan(hashes)
def file_with_bwc_fields(res) -> CommandResults:
return CommandResults(
readable_output=create_scan_results_readable_output(res),
indicator=BWCFile(res, only_given_fields=False, size=res.get('size'),
file_type=res.get('type'), sha1=res.get('sha1'),
sha256=res.get('sha256'), md5=res.get('md5'), sha512=res.get('sha512'),
name=res.get('submit_name'),
ssdeep=res.get('ssdeep'), malware_family=res.get('vx_family'),
dbot_score=get_dbot_score(res.get('sha256'), res.get('threat_level')),
key_change_map={
'sha1': 'SHA1', 'sha256': 'SHA256', 'md5': 'MD5',
'job_id': 'JobID', 'environment_id': 'environmentId',
'threat_score': 'threatscore', 'environment_description': 'environmentDescription',
'submit_name': 'submitname', 'url_analysis': 'isurlanalysis',
'interesting:': 'isinteresting', 'vx_family': 'family'},
)
)
command_result = [CommandResults(outputs_prefix='CrowdStrike.Report',
raw_response=scan_response, outputs=scan_response,
readable_output=f'Scan returned {len(scan_response)} results'),
*[file_with_bwc_fields(res) for res in scan_response]]
if len(scan_response) != 0:
return PollResult(command_result)
try:
if len(hashes) == 1:
key = get_api_id(args)
demisto.debug(f'key found for poll state: {key}')
return PollResult(continue_to_poll=lambda: not has_error_state(client, key), response=command_result)
except ValueError:
demisto.debug(f'Cannot get a key to check state for {hashes}')
return PollResult(continue_to_poll=True, response=command_result)
def crowdstrike_analysis_overview_summary_command(client: Client, args: Dict[str, Any]) -> CommandResults:
result = client.analysis_overview_summary(args['file'])
return CommandResults(
outputs_prefix='CrowdStrike.AnalysisOverview',
outputs_key_field='sha256',
outputs=result,
raw_response=result,
readable_output=tableToMarkdown('Analysis Overview Summary:', result,
headerTransform=lambda x: {'analysis_start_time': 'Analysis Start Time',
'last_multi_scan': 'Last Multi Scan',
'multiscan_result': 'Multiscan Result',
'threat_score': 'Threat Score',
'verdict': 'Verdict',
'sha256': 'Sha256'
}.get(x, x), removeNull=True)
)
def crowdstrike_analysis_overview_refresh_command(client: Client, args: Dict[str, Any]) -> CommandResults:
client.analysis_overview_refresh(args['file'])
return CommandResults(readable_output='The request to refresh the analysis overview was sent successfully.')
@polling_function('cs-falcon-sandbox-result')
def crowdstrike_result_command(client: Client, args: Dict[str, Any]):
key = get_api_id(args)
report_response = client.get_report(key, args['file-type'])
demisto.debug(f'get report response code: {report_response.status_code}')
successful_response = report_response.status_code == 200
if successful_response:
ret_list = [fileResult(get_default_file_name(args['file-type']), report_response.content,
file_type=EntryType.ENTRY_INFO_FILE)]
if args.get('file'):
ret_list.append(crowdstrike_scan_command(client, args))
return PollResult(ret_list)
else:
error_response = CommandResults(raw_response=report_response,
readable_output='Falcon Sandbox returned an error: status code '
+ f'{report_response.status_code}, response: '
+ f'{report_response.text}',
entry_type=entryTypes['error'])
return PollResult(continue_to_poll=lambda: not has_error_state(client, key), response=error_response)
def underscore_to_space(x: str) -> str:
return pascalToSpace(underscoreToCamelCase(x))
def crowdstrike_report_state_command(client: Client, args: Dict[str, Any]) -> CommandResults:
key = get_api_id(args)
state = client.get_state(key)
return CommandResults(outputs_prefix='CrowdStrike.State', raw_response=state, outputs=state,
readable_output=tableToMarkdown('State', state, headerTransform=underscore_to_space))
def crowdstrike_get_environments_command(client: Client, _) -> CommandResults:
environments = client.get_environments()
environments = [map_dict_keys(env, {'environment_id': 'ID', 'total_virtual_machines': 'VMs_total',
'analysis_mode': 'analysisMode', 'group_icon': 'groupicon',
'busy_virtual_machines': 'VMs_busy'}) for env in environments]
readable_output_column_conversion = {
'ID': '_ID', 'description': 'Description', 'architecture': 'Architecture',
'VMs_total': 'Total VMS', 'VMs_busy': 'Busy VMS', 'analysisMode': 'Analysis mode',
'groupicon': 'Group icon'
}
return CommandResults(
outputs_prefix='CrowdStrike.Environment',
outputs_key_field='id',
outputs=environments,
readable_output=tableToMarkdown('Execution Environments:',
environments, list(readable_output_column_conversion.keys()), removeNull=True,
headerTransform=lambda x: readable_output_column_conversion[x]),
raw_response=environments
)
def crowdstrike_get_screenshots_command(client: Client, args: Dict[str, Any]):
def to_image_result(image: Dict):
return fileResult(image['name'], base64.b64decode(image['image']), entryTypes['entryInfoFile'])
key = get_api_id(args)
ret = [to_image_result(image) for image in client.get_screenshots(key)]
return ret if len(ret) > 0 else CommandResults(readable_output='No screenshots returned')
def crowdstrike_sample_download_command(client: Client, args: Dict[str, Any]):
hash_value = args['file']
response = client.download_sample(hash_value)
file_name = response.headers.get('Vx-Filename', hash_value + '.gz')
return fileResult(file_name, data=response.content, file_type=EntryType.FILE)
def main() -> None:
"""main function, parses params and runs command_func functions
:return:
:rtype:
"""
params: Dict[str, Any] = demisto.params()
args: Dict[str, Any] = demisto.args()
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
demisto_command = demisto.command()
demisto.debug(f'Command being called is {demisto_command}')
try:
headers: Dict = {
'api-key': demisto.params().get('credentials', {}).get('password'),
'User-Agent': 'Falcon Sandbox'
}
client = Client(
base_url=SERVER_URL,
verify=verify_certificate,
headers=headers,
proxy=proxy)
command_func: Callable
if demisto_command in ['test-module']:
command_func = test_module
elif demisto_command in ['cs-falcon-sandbox-search', 'crowdstrike-search']:
command_func = crowdstrike_search_command
elif demisto_command in ['cs-falcon-sandbox-scan', 'crowdstrike-scan', 'file']:
command_func = crowdstrike_scan_command
elif demisto_command in ['crowdstrike-get-environments', 'cs-falcon-sandbox-get-environments']:
command_func = crowdstrike_get_environments_command
elif demisto_command in ['cs-falcon-sandbox-get-screenshots', 'crowdstrike-get-screenshots']:
command_func = crowdstrike_get_screenshots_command
elif demisto_command in ['cs-falcon-sandbox-result', 'crowdstrike-result']:
command_func = crowdstrike_result_command
elif demisto_command in ['cs-falcon-sandbox-analysis-overview']:
command_func = crowdstrike_analysis_overview_command
elif demisto_command in ['cs-falcon-sandbox-analysis-overview-summary']:
command_func = crowdstrike_analysis_overview_summary_command
elif demisto_command in ['cs-falcon-sandbox-analysis-overview-refresh']:
command_func = crowdstrike_analysis_overview_refresh_command
elif demisto_command in ['crowdstrike-submit-sample', 'cs-falcon-sandbox-submit-sample']:
command_func = crowdstrike_submit_sample_command
elif demisto_command in ['cs-falcon-sandbox-submit-url', 'crowdstrike-submit-url']:
command_func = crowdstrike_submit_url_command
elif demisto_command in ['cs-falcon-sandbox-sample-download']:
command_func = crowdstrike_sample_download_command
elif demisto_command in ['cs-falcon-sandbox-report-state']:
command_func = crowdstrike_report_state_command
else:
raise NotImplementedError(f'Command not implemented: {demisto_command}')
return_results(command_func(client, args))
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto_command} command_func.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 47.043893 | 118 | 0.626465 |
4a1bac05e34ecef21220e3dbcf0d3da83dd5cc40
| 2,149 |
py
|
Python
|
aiconf/model.py
|
mmoran0032/aiconf-allennlp-tutorial
|
f3e68ce6b7de0a2e470a2cc3e5ec8daf557fe3b7
|
[
"Apache-2.0"
] | null | null | null |
aiconf/model.py
|
mmoran0032/aiconf-allennlp-tutorial
|
f3e68ce6b7de0a2e470a2cc3e5ec8daf557fe3b7
|
[
"Apache-2.0"
] | null | null | null |
aiconf/model.py
|
mmoran0032/aiconf-allennlp-tutorial
|
f3e68ce6b7de0a2e470a2cc3e5ec8daf557fe3b7
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict, Optional
from allennlp.data.vocabulary import Vocabulary
from allennlp.models import Model
from allennlp.modules.text_field_embedders import TextFieldEmbedder
from allennlp.modules.seq2vec_encoders import Seq2VecEncoder
from allennlp.nn.util import get_text_field_mask
from allennlp.training.metrics import CategoricalAccuracy
import torch
@Model.register("bbc")
class BBCModel(Model):
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
encoder: Seq2VecEncoder,
dropout: Optional[float] = 0.0,
) -> None:
super().__init__(vocab)
self.text_field_embedder = text_field_embedder
self.encoder = encoder
hidden_dim = encoder.get_output_dim()
num_classes = vocab.get_vocab_size("labels")
self.linear = torch.nn.Linear(in_features=hidden_dim, out_features=num_classes)
self.dropout = torch.nn.Dropout(p=dropout)
self.loss = torch.nn.CrossEntropyLoss()
self.metrics = {
"acc1": CategoricalAccuracy(),
"acc3": CategoricalAccuracy(top_k=3),
}
def forward(
self, text: Dict[str, torch.Tensor], category: Optional[torch.Tensor] = None
) -> Dict[str, torch.Tensor]:
embedded = self.text_field_embedder(text)
mask = get_text_field_mask(text)
encoded = self.encoder(embedded, mask)
logits = self.linear(self.dropout(encoded))
num_categories = self.vocab.get_vocab_size("labels")
category_names = [
[
self.vocab.get_token_from_index(i, namespace="labels")
for i in range(num_categories)
]
]
output = {"logits": logits, "category_names": category_names}
if category is not None:
output["loss"] = self.loss(logits, category)
for metric in self.metrics.values():
metric(logits, category)
return output
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {name: metric.get_metric(reset) for name, metric in self.metrics.items()}
| 33.578125 | 88 | 0.655654 |
4a1bac824dea14375f9cd2820282646c5dcf3365
| 8,262 |
py
|
Python
|
apps/psystem_2d/psystem.py
|
arashb/pyclaw
|
dbe0bc28ed801b16c501bdff58012c30f7a90b9f
|
[
"BSD-3-Clause"
] | 1 |
2018-03-26T11:51:17.000Z
|
2018-03-26T11:51:17.000Z
|
apps/psystem_2d/psystem.py
|
arashb/pyclaw
|
dbe0bc28ed801b16c501bdff58012c30f7a90b9f
|
[
"BSD-3-Clause"
] | null | null | null |
apps/psystem_2d/psystem.py
|
arashb/pyclaw
|
dbe0bc28ed801b16c501bdff58012c30f7a90b9f
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
import numpy as np
# material parameters
E1=1.; p1=1.
E2=4.; p2=4.
# interface parameters
alphax=0.5; deltax=1.
alphay=0.5; deltay=1.
# Linearity parameters
linearity_mat1=2; linearity_mat2=2
# heterogeneity type
het_type='checkerboard'
#het_type='sinusoidal'
#het_type='smooth_checkerboard'
sharpness=10
def qinit(state,A,x0,y0,varx,vary):
r""" Set initial conditions for q."""
x =state.grid.x.centers; y =state.grid.y.centers
# Create meshgrid
[yy,xx]=np.meshgrid(y,x)
s=A*np.exp(-(xx-x0)**2/(2*varx)-(yy-y0)**2/(2*vary)) #sigma(@t=0)
#parameters from aux
linearity_mat=state.aux[2,:]
E=state.aux[1,:]
#initial condition
state.q[0,:,:]=np.where(linearity_mat==1,1,0)*s/E+np.where(linearity_mat==2,1,0)*np.log(s+1)/E
state.q[1,:,:]=0; state.q[2,:,:]=0
def setaux(x,y):
r"""Creates a matrix representing every grid cell in the domain,
whose size is len(x),len(y)
Each entry of the matrix contains a vector of size 3 with:
The material density p
The young modulus E
A flag indicating which material the grid is made of.
The domain pattern is a checkerboard."""
aux = np.empty((4,len(x),len(y)), order='F')
if het_type == 'checkerboard':
# xfrac and yfrac are x and y relative to deltax and deltay resp.
xfrac=x-np.floor(x/deltax)*deltax
yfrac=y-np.floor(y/deltay)*deltay
# create a meshgrid out of xfrac and yfrac
[yyfrac,xxfrac]=np.meshgrid(yfrac,xfrac)
# density
aux[0,:,:]=p1*(xxfrac<=alphax*deltax)*(yyfrac<=alphay*deltay)\
+p1*(xxfrac >alphax*deltax)*(yyfrac >alphay*deltay)\
+p2*(xxfrac >alphax*deltax)*(yyfrac<=alphay*deltay)\
+p2*(xxfrac<=alphax*deltax)*(yyfrac >alphay*deltay)
#Young modulus
aux[1,:,:]=E1*(xxfrac<=alphax*deltax)*(yyfrac<=alphay*deltay)\
+E1*(xxfrac >alphax*deltax)*(yyfrac >alphay*deltay)\
+E2*(xxfrac >alphax*deltax)*(yyfrac<=alphay*deltay)\
+E2*(xxfrac<=alphax*deltax)*(yyfrac >alphay*deltay)
# linearity of material
aux[2,:,:]=linearity_mat1*(xxfrac<=alphax*deltax)*(yyfrac<=alphay*deltay)\
+linearity_mat1*(xxfrac >alphax*deltax)*(yyfrac >alphay*deltay)\
+linearity_mat2*(xxfrac >alphax*deltax)*(yyfrac<=alphay*deltay)\
+linearity_mat2*(xxfrac<=alphax*deltax)*(yyfrac >alphay*deltay)
elif het_type == 'sinusoidal' or het_type == 'smooth_checkerboard':
[yy,xx]=np.meshgrid(y,x)
Amp_p=np.abs(p1-p2)/2; offset_p=(p1+p2)/2
Amp_E=np.abs(E1-E2)/2; offset_E=(E1+E2)/2
if het_type == 'sinusoidal':
frec_x=2*np.pi/deltax; frec_y=2*np.pi/deltay
fun=np.sin(frec_x*xx)*np.sin(frec_y*yy)
else:
fun_x=xx*0; fun_y=yy*0
for i in xrange(0,1+int(np.ceil((x[-1]-x[0])/(deltax*0.5)))):
fun_x=fun_x+(-1)**i*np.tanh(sharpness*(xx-deltax*i*0.5))
for i in xrange(0,1+int(np.ceil((y[-1]-y[0])/(deltay*0.5)))):
fun_y=fun_y+(-1)**i*np.tanh(sharpness*(yy-deltay*i*0.5))
fun=fun_x*fun_y
aux[0,:,:]=Amp_p*fun+offset_p
aux[1,:,:]=Amp_E*fun+offset_E
aux[2,:,:]=linearity_mat1
return aux
def b4step(solver,solution):
r"""put in aux[3,:,:] the value of q[0,:,:] (eps). This is required in rptpv.f"""
state = solution.states[0]
state.aux[3,:,:] = state.q[0,:,:]
# To set to 0 1st 1/2 of the domain. Used in rect domains with PBC in x
if state.problem_data['turnZero_half_2D']==1:
if state.t>=state.problem_data['t_turnZero'] and state.t<=state.problem_data['t_turnZero']+1:
Y,X = np.meshgrid(state.grid.y,state.grid.x)
state.q = state.q * (X<solution.domain.grid.upper[0]/2)
def compute_p(state):
state.p[0,:,:]=np.exp(state.q[0,:,:]*state.aux[1,:,:])-1
def compute_F(state):
rho = state.aux[0,:,:]; E = state.aux[1,:,:]
#Compute the entropy
u = state.q[1,:,:]/rho
v = state.q[2,:,:]/rho
nrg=rho * (u**2 + v**2)/2.
eps = state.q[0,:,:]
sigma = np.exp(E*eps) - 1.
sigint = (sigma-np.log(sigma+1.))/E
dx=state.grid.delta[0]; dy=state.grid.delta[1]
state.F[0,:,:] = (sigint+nrg)*dx*dy
state.F[1,:,:] = 10*state.F[0,:,:]
state.F[2,:,:] = 100*state.F[0,:,:]
def gauge_pfunction(q,aux):
p = np.exp(q[0]*aux[1])-1
return [p,10*p]
def psystem2D(use_petsc=False,solver_type='classic',iplot=False,htmlplot=False):
"""
Solve the p-system in 2D with variable coefficients
"""
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
####################################
######### MAIN PARAMETERS ##########
####################################
# Domain
x_lower=0.25; x_upper=20.25
y_lower=0.25; y_upper=20.25
# cells per layer
Ng=10
mx=(x_upper-x_lower)*Ng; my=(y_upper-y_lower)*Ng
# Initial condition parameters
A=10.
x0=0.25 # Center of initial perturbation
y0=0.25 # Center of initial perturbation
varx=0.5; vary=0.5 # Width of initial perturbation
# Boundary conditions
bc_x_lower=pyclaw.BC.wall; bc_x_upper=pyclaw.BC.extrap
bc_y_lower=pyclaw.BC.wall; bc_y_upper=pyclaw.BC.extrap
# Turning off 1st half of the domain. Useful in rect domains
turnZero_half_2D=0 #flag
t_turnZero=50
num_output_times=10
# restart options
restart_from_frame = None
if solver_type=='classic':
solver = pyclaw.ClawSolver2D()
elif solver_type=='sharpclaw':
solver = pyclaw.SharpClawSolver2D()
from clawpack import riemann
solver.rp = riemann.rp2_psystem
solver.num_waves = 2
solver.limiters = pyclaw.limiters.tvd.superbee
solver.bc_lower[0]=bc_x_lower
solver.bc_upper[0]=bc_x_upper
solver.bc_lower[1]=bc_y_lower
solver.bc_upper[1]=bc_y_upper
solver.aux_bc_lower[0]=bc_x_lower
solver.aux_bc_upper[0]=bc_x_upper
solver.aux_bc_lower[1]=bc_y_lower
solver.aux_bc_upper[1]=bc_y_upper
solver.fwave = True
solver.cfl_max = 0.9
solver.cfl_desired = 0.8
solver.before_step = b4step
solver.dimensional_split=False
#controller
claw = pyclaw.Controller()
claw.tfinal = 40.0
claw.solver = solver
if restart_from_frame is not None:
claw.solution = pyclaw.Solution(restart_from_frame, format='petsc',read_aux=False)
claw.solution.state.mp = 1
grid = claw.solution.domain.grid
claw.solution.state.aux = setaux(grid.x.centers,grid.y.centers)
claw.num_output_times = num_output_times - restart_from_frame
claw.start_frame = restart_from_frame
else:
####################################
####################################
####################################
#Creation of Domain
x = pyclaw.Dimension('x',x_lower,x_upper,mx)
y = pyclaw.Dimension('y',y_lower,y_upper,my)
domain = pyclaw.Domain([x,y])
num_eqn = 3
num_aux = 4
state = pyclaw.State(domain,num_eqn,num_aux)
state.mF = 3
#Set global parameters
state.problem_data = {}
state.problem_data['turnZero_half_2D'] = turnZero_half_2D
state.problem_data['t_turnZero'] = t_turnZero
state.mp = 1
grid = state.grid
state.aux = setaux(grid.x.centers,grid.y.centers)
#Initial condition
qinit(state,A,x0,y0,varx,vary)
claw.solution = pyclaw.Solution(state,domain)
claw.num_output_times = num_output_times
#claw.p_function = p_function
claw.compute_F = compute_F
grid.add_gauges([[0.25,0.25],[0.75,0.25],[0.25,0.75],[0.75,0.75]])
solver.compute_gauge_values = gauge_pfunction
claw.write_aux_init = False
#Solve
status = claw.run()
#strain=claw.frames[claw.num_output_times].state.gqVec.getArray().reshape([grid.num_cells[0],grid.num_cells[1],num_eqn])[:,:,0]
#return strain
if iplot: pyclaw.plot.interactive_plot()
if htmlplot: pyclaw.plot.html_plot()
if __name__=="__main__":
from clawpack.pyclaw.util import run_app_from_main
output = run_app_from_main(psystem2D)
| 34.860759 | 131 | 0.614984 |
4a1bac8e5e106e21b89b5e6cc96941427993f0bd
| 3,879 |
py
|
Python
|
tensorflow/python/distribute/one_device_strategy_test.py
|
satsumas/tensorflow
|
6a286bfcfeebcfa2b91b95334d83d9f0edf735b9
|
[
"Apache-2.0"
] | 2 |
2019-06-26T14:51:16.000Z
|
2021-08-07T08:40:01.000Z
|
tensorflow/python/distribute/one_device_strategy_test.py
|
jensfreudenau/tensorflow
|
3fe3f2b1984aab6f159b89aa3ab0069988925689
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/distribute/one_device_strategy_test.py
|
jensfreudenau/tensorflow
|
3fe3f2b1984aab6f159b89aa3ab0069988925689
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for class OneDeviceStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import strategy_test_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import test
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.one_device_strategy_gpu
],
mode=["eager", "graph"]))
class OneDeviceStrategyTest(
strategy_test_lib.DistributionTestBase,
strategy_test_lib.OneDeviceDistributionTestBase):
def testMinimizeLoss(self, distribution):
if context.executing_eagerly():
self._test_minimize_loss_eager(distribution)
else:
self._test_minimize_loss_graph(distribution)
def testReplicaId(self, distribution):
self._test_replica_id(distribution)
def testCallAndMergeExceptions(self, distribution):
self._test_call_and_merge_exceptions(distribution)
def testMakeInputFnIteratorWithDataset(self, distribution):
dataset_fn = lambda: dataset_ops.Dataset.range(10)
expected_values = [[i] for i in range(10)]
input_fn = self._input_fn_to_test_input_context(
dataset_fn,
expected_num_replicas_in_sync=1,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(
iterator, distribution.extended.worker_devices, expected_values)
def testMakeInputFnIteratorWithCallable(self, distribution):
def fn():
dataset = dataset_ops.Dataset.range(10)
it = dataset.make_one_shot_iterator()
return it.get_next
expected_values = [[i] for i in range(10)]
input_fn = self._input_fn_to_test_input_context(
fn,
expected_num_replicas_in_sync=1,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(
iterator, distribution.extended.worker_devices, expected_values,
test_reinitialize=False)
def testNumpyIterator(self, distribution):
self._test_numpy_iterator(distribution)
def testRun(self, distribution):
self._test_run(distribution)
def testAllReduceSum(self, distribution):
self._test_all_reduce_sum(distribution)
def testAllReduceSumGradients(self, distribution):
self._test_all_reduce_sum_gradients(distribution)
def testAllReduceSumGradientTape(self, distribution):
self._test_all_reduce_sum_gradient_tape(distribution)
def testAllReduceMean(self, distribution):
self._test_all_reduce_mean(distribution)
def testAllReduceMeanGradients(self, distribution):
self._test_all_reduce_mean_gradients(distribution)
def testAllReduceMeanGradientTape(self, distribution):
self._test_all_reduce_mean_gradient_tape(distribution)
if __name__ == "__main__":
test.main()
| 36.59434 | 80 | 0.760505 |
4a1bacdbce80f66eeb70c948ce0467249a536ad3
| 14,161 |
py
|
Python
|
ec2api/tests/unit/test_ec2utils.py
|
JioCloudVPC/compute-ec2-api-vagrant
|
9e7cf7b566f86aee4a0c73316533e1c410d1a897
|
[
"Apache-2.0"
] | null | null | null |
ec2api/tests/unit/test_ec2utils.py
|
JioCloudVPC/compute-ec2-api-vagrant
|
9e7cf7b566f86aee4a0c73316533e1c410d1a897
|
[
"Apache-2.0"
] | null | null | null |
ec2api/tests/unit/test_ec2utils.py
|
JioCloudVPC/compute-ec2-api-vagrant
|
9e7cf7b566f86aee4a0c73316533e1c410d1a897
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fixtures
from glanceclient.common import exceptions as glance_exception
import mock
from oslo_config import fixture as config_fixture
import testtools
from ec2api.api import ec2utils
from ec2api import exception
from ec2api.tests.unit import fakes
from ec2api.tests.unit import matchers
class EC2UtilsTestCase(testtools.TestCase):
@mock.patch('ec2api.db.api.IMPL')
def test_get_db_item(self, db_api):
item = {'fake_key': 'fake_value'}
db_api.get_item_by_id.return_value = item
def check_normal_flow(kind, ec2_id):
item['id'] = ec2_id
res = ec2utils.get_db_item('fake_context', ec2_id)
self.assertThat(res, matchers.DictMatches(item))
db_api.get_item_by_id.assert_called_once_with('fake_context',
ec2_id)
db_api.reset_mock()
check_normal_flow('vpc', 'vpc-001234af')
check_normal_flow('igw', 'igw-00000022')
def check_not_found(kind, ex_class):
ec2_id = fakes.random_ec2_id(kind)
self.assertRaises(ex_class,
ec2utils.get_db_item,
'fake_context', ec2_id)
db_api.get_item_by_id.assert_called_once_with('fake_context',
ec2_id)
db_api.reset_mock()
db_api.get_item_by_id.return_value = None
check_not_found('vpc', exception.InvalidVpcIDNotFound)
check_not_found('igw', exception.InvalidInternetGatewayIDNotFound)
check_not_found('subnet', exception.InvalidSubnetIDNotFound)
check_not_found('eni', exception.InvalidNetworkInterfaceIDNotFound)
check_not_found('dopt', exception.InvalidDhcpOptionsIDNotFound)
check_not_found('eipalloc', exception.InvalidAllocationIDNotFound)
check_not_found('sg', exception.InvalidGroupNotFound)
check_not_found('rtb', exception.InvalidRouteTableIDNotFound)
check_not_found('i', exception.InvalidInstanceIDNotFound)
check_not_found('vol', exception.InvalidVolumeNotFound)
check_not_found('snap', exception.InvalidSnapshotNotFound)
check_not_found('ami', exception.InvalidAMIIDNotFound)
check_not_found('ari', exception.InvalidAMIIDNotFound)
check_not_found('aki', exception.InvalidAMIIDNotFound)
@mock.patch('ec2api.db.api.IMPL')
def test_get_db_items(self, db_api):
items = [{'id': fakes.random_ec2_id('fake'),
'fake_key': 'fake_value'},
{'id': fakes.random_ec2_id('fake'),
'fake_key': 'fake_value'}]
db_api.get_items.return_value = items
db_api.get_items_by_ids.return_value = items
def check_with_no_filter(empty_filter):
res = ec2utils.get_db_items('fake_context', 'fake', empty_filter)
self.assertThat(res, matchers.ListMatches(items))
db_api.get_items.assert_called_once_with('fake_context', 'fake')
db_api.reset_mock()
check_with_no_filter(None)
check_with_no_filter([])
def check_with_filter(item_ids):
res = ec2utils.get_db_items('fake_context', 'fake', item_ids)
self.assertThat(res, matchers.ListMatches(items))
db_api.get_items_by_ids.assert_called_once_with(
'fake_context', set(item_ids))
db_api.reset_mock()
item_ids = [i['id'] for i in items]
check_with_filter(item_ids)
check_with_filter(item_ids * 2)
def check_not_found(kind, ex_class):
items = [{'id': fakes.random_ec2_id(kind),
'fake_key': 'fake_value'} for _ in range(2)]
item_ids = [i['id'] for i in items]
item_ids.append(fakes.random_ec2_id(kind))
db_api.get_items_by_ids.return_value = items
self.assertRaises(ex_class, ec2utils.get_db_items,
'fake_context', kind, item_ids)
db_api.reset_mock()
check_not_found('vpc', exception.InvalidVpcIDNotFound)
check_not_found('igw', exception.InvalidInternetGatewayIDNotFound)
check_not_found('subnet', exception.InvalidSubnetIDNotFound)
check_not_found('eni', exception.InvalidNetworkInterfaceIDNotFound)
check_not_found('dopt', exception.InvalidDhcpOptionsIDNotFound)
check_not_found('eipalloc', exception.InvalidAllocationIDNotFound)
check_not_found('sg', exception.InvalidGroupNotFound)
check_not_found('rtb', exception.InvalidRouteTableIDNotFound)
check_not_found('i', exception.InvalidInstanceIDNotFound)
check_not_found('vol', exception.InvalidVolumeNotFound)
check_not_found('snap', exception.InvalidSnapshotNotFound)
check_not_found('ami', exception.InvalidAMIIDNotFound)
check_not_found('aki', exception.InvalidAMIIDNotFound)
check_not_found('ari', exception.InvalidAMIIDNotFound)
"""Unit test api xml conversion."""
def test_number_conversion(self):
conv = ec2utils._try_convert
self.assertIsNone(conv('None'))
self.assertEqual(conv('True'), True)
self.assertEqual(conv('TRUE'), True)
self.assertEqual(conv('true'), True)
self.assertEqual(conv('False'), False)
self.assertEqual(conv('FALSE'), False)
self.assertEqual(conv('false'), False)
self.assertEqual(conv('0'), 0)
self.assertEqual(conv('42'), 42)
self.assertEqual(conv('3.14'), 3.14)
self.assertEqual(conv('-57.12'), -57.12)
self.assertEqual(conv('0x57'), 0x57)
self.assertEqual(conv('-0x57'), -0x57)
self.assertEqual(conv('-'), '-')
self.assertEqual(conv('-0'), 0)
self.assertEqual(conv('0.0'), 0.0)
self.assertEqual(conv('1e-8'), 0.0)
self.assertEqual(conv('-1e-8'), 0.0)
self.assertEqual(conv('0xDD8G'), '0xDD8G')
self.assertEqual(conv('0XDD8G'), '0XDD8G')
self.assertEqual(conv('-stringy'), '-stringy')
self.assertEqual(conv('stringy'), 'stringy')
self.assertEqual(conv('add'), 'add')
self.assertEqual(conv('remove'), 'remove')
self.assertEqual(conv(''), '')
@mock.patch('ec2api.db.api.IMPL')
def test_os_id_to_ec2_id(self, db_api):
fake_context = mock.Mock(service_catalog=[{'type': 'fake'}])
fake_id = fakes.random_ec2_id('fake')
fake_os_id = fakes.random_os_id()
# no cache, item is found
db_api.get_items_ids.return_value = [(fake_id, fake_os_id)]
item_id = ec2utils.os_id_to_ec2_id(fake_context, 'fake', fake_os_id)
self.assertEqual(fake_id, item_id)
db_api.get_items_ids.assert_called_once_with(
fake_context, 'fake', item_ids=None, item_os_ids=(fake_os_id,))
self.assertFalse(db_api.add_item_id.called)
# no cache, item isn't found
db_api.get_items_ids.return_value = []
db_api.add_item_id.return_value = fake_id
item_id = ec2utils.os_id_to_ec2_id(fake_context, 'fake', fake_os_id)
self.assertEqual(fake_id, item_id)
db_api.add_item_id.assert_called_once_with(
fake_context, 'fake', fake_os_id, project_id=None)
# no item in cache, item isn't found
db_api.reset_mock()
ids_cache = {fakes.random_os_id(): fakes.random_ec2_id('fake')}
item_id = ec2utils.os_id_to_ec2_id(fake_context, 'fake', fake_os_id,
ids_by_os_id=ids_cache)
self.assertEqual(fake_id, item_id)
self.assertIn(fake_os_id, ids_cache)
self.assertEqual(fake_id, ids_cache[fake_os_id])
db_api.add_item_id.assert_called_once_with(
fake_context, 'fake', fake_os_id, project_id=None)
# no item in cache, item is found
db_api.reset_mock()
db_api.get_items_ids.return_value = [(fake_id, fake_os_id)]
ids_cache = {}
item_id = ec2utils.os_id_to_ec2_id(fake_context, 'fake', fake_os_id,
ids_by_os_id=ids_cache)
self.assertEqual(fake_id, item_id)
self.assertEqual({fake_os_id: fake_id}, ids_cache)
self.assertFalse(db_api.add_item_id.called)
# item in cache
db_api.reset_mock()
ids_cache = {fake_os_id: fake_id}
item_id = ec2utils.os_id_to_ec2_id(fake_context, 'fake', fake_os_id,
ids_by_os_id=ids_cache)
self.assertEqual(fake_id, item_id)
self.assertEqual({fake_os_id: fake_id}, ids_cache)
self.assertFalse(db_api.get_items_ids.called)
self.assertFalse(db_api.add_item_id.called)
# item in items dict
items_dict = {fake_os_id: {'id': fake_id,
'os_id': fake_os_id}}
ids_cache = {}
item_id = ec2utils.os_id_to_ec2_id(fake_context, 'fake', fake_os_id,
items_by_os_id=items_dict,
ids_by_os_id=ids_cache)
self.assertEqual(fake_id, item_id)
self.assertFalse(db_api.get_items_ids.called)
self.assertFalse(db_api.add_item_id.called)
self.assertEqual({}, ids_cache)
# item not in items dict, item is found
items_dict = {fake_os_id: {'id': fake_id,
'os_id': fake_os_id}}
db_api.get_items_ids.return_value = [(fake_id, fake_os_id)]
item_id = ec2utils.os_id_to_ec2_id(fake_context, 'fake', fake_os_id,
items_by_os_id=items_dict)
self.assertEqual(fake_id, item_id)
self.assertFalse(db_api.add_item_id.called)
@mock.patch('glanceclient.client.Client')
@mock.patch('ec2api.db.api.IMPL')
def test_get_os_image(self, db_api, glance):
glance = glance.return_value
fake_context = mock.Mock(service_catalog=[{'type': 'fake'}])
os_image = fakes.OSImage(fakes.OS_IMAGE_1)
glance.images.get.return_value = os_image
# NOTE(ft): check normal flow for an user owned image
db_api.get_public_items.return_value = []
db_api.get_item_by_id.return_value = fakes.DB_IMAGE_1
self.assertEqual(
os_image,
ec2utils.get_os_image(fake_context, fakes.ID_EC2_IMAGE_1))
db_api.get_item_by_id.assert_called_with(
mock.ANY, fakes.ID_EC2_IMAGE_1)
glance.images.get.assert_called_with(fakes.ID_OS_IMAGE_1)
# NOTE(ft): check normal flow for a public image
db_api.get_public_items.return_value = [fakes.DB_IMAGE_1]
db_api.get_item_by_id.return_value = None
self.assertEqual(
os_image,
ec2utils.get_os_image(fake_context, fakes.ID_EC2_IMAGE_1))
db_api.get_public_items.assert_called_with(
mock.ANY, 'ami', (fakes.ID_EC2_IMAGE_1,))
glance.images.get.assert_called_with(fakes.ID_OS_IMAGE_1)
# NOTE(ft): check case of absence of an image in OS
glance.images.get.side_effect = glance_exception.HTTPNotFound()
self.assertRaises(
exception.InvalidAMIIDNotFound,
ec2utils.get_os_image,
fake_context, fakes.ID_EC2_IMAGE_1)
# NOTE(ft): check case of an unknown image id
db_api.get_public_items.return_value = []
db_api.get_item_by_id.return_value = None
self.assertRaises(
exception.InvalidAMIIDNotFound,
ec2utils.get_os_image,
fake_context, fakes.random_ec2_id('ami'))
@mock.patch('neutronclient.v2_0.client.Client')
def test_get_os_public_network(self, neutron):
neutron = neutron.return_value
context = mock.Mock(service_catalog=[{'type': 'fake'}])
conf = self.useFixture(config_fixture.Config())
conf.config(external_network='fake_public_network')
neutron.list_networks.return_value = {'networks': ['network_object']}
net = ec2utils.get_os_public_network(context)
self.assertEqual('network_object', net)
neutron.list_networks.assert_called_once_with(
**{'router:external': True, 'name': 'fake_public_network'})
neutron.list_networks.return_value = {'networks': []}
with fixtures.FakeLogger() as log:
self.assertRaises(exception.Unsupported,
ec2utils.get_os_public_network, context)
self.assertNotEqual(0, len(log.output))
self.assertIn('fake_public_network', log.output)
neutron.list_networks.return_value = {'networks': ['obj1', 'obj2']}
with fixtures.FakeLogger() as log:
self.assertRaises(exception.Unsupported,
ec2utils.get_os_public_network, context)
self.assertNotEqual(0, len(log.output))
self.assertIn('fake_public_network', log.output)
conf.config(external_network=None)
with fixtures.FakeLogger() as log:
self.assertRaises(exception.Unsupported,
ec2utils.get_os_public_network, context)
self.assertNotEqual(0, len(log.output))
self.assertNotIn('None', log.output)
neutron.list_networks.return_value = {'networks': []}
with fixtures.FakeLogger() as log:
self.assertRaises(exception.Unsupported,
ec2utils.get_os_public_network, context)
self.assertNotEqual(0, len(log.output))
self.assertNotIn('None', log.output)
| 45.533762 | 77 | 0.648401 |
4a1bad3442b25b8e45c6a7bbdcec2d1ee4973064
| 12,968 |
py
|
Python
|
Lib/test/test_pulldom.py
|
shawwn/cpython
|
0ff8a3b374286d2218fc18f47556a5ace202dad3
|
[
"0BSD"
] | 5,926 |
2015-01-01T07:45:08.000Z
|
2022-03-31T12:34:38.000Z
|
Lib/test/test_pulldom.py
|
shawwn/cpython
|
0ff8a3b374286d2218fc18f47556a5ace202dad3
|
[
"0BSD"
] | 1,728 |
2015-01-01T01:09:12.000Z
|
2022-03-30T23:25:22.000Z
|
Lib/test/test_pulldom.py
|
shawwn/cpython
|
0ff8a3b374286d2218fc18f47556a5ace202dad3
|
[
"0BSD"
] | 574 |
2015-01-02T01:36:10.000Z
|
2022-03-26T10:18:48.000Z
|
import io
import unittest
import xml.sax
from xml.sax.xmlreader import AttributesImpl
from xml.sax.handler import feature_external_ges
from xml.dom import pulldom
from test.support import findfile
tstfile = findfile("test.xml", subdir="xmltestdata")
# A handy XML snippet, containing attributes, a namespace prefix, and a
# self-closing tag:
SMALL_SAMPLE = """<?xml version="1.0"?>
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:xdc="http://www.xml.com/books">
<!-- A comment -->
<title>Introduction to XSL</title>
<hr/>
<p><xdc:author xdc:attrib="prefixed attribute" attrib="other attrib">A. Namespace</xdc:author></p>
</html>"""
class PullDOMTestCase(unittest.TestCase):
def test_parse(self):
"""Minimal test of DOMEventStream.parse()"""
# This just tests that parsing from a stream works. Actual parser
# semantics are tested using parseString with a more focused XML
# fragment.
# Test with a filename:
handler = pulldom.parse(tstfile)
self.addCleanup(handler.stream.close)
list(handler)
# Test with a file object:
with open(tstfile, "rb") as fin:
list(pulldom.parse(fin))
def test_parse_semantics(self):
"""Test DOMEventStream parsing semantics."""
items = pulldom.parseString(SMALL_SAMPLE)
evt, node = next(items)
# Just check the node is a Document:
self.assertTrue(hasattr(node, "createElement"))
self.assertEqual(pulldom.START_DOCUMENT, evt)
evt, node = next(items)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("html", node.tagName)
self.assertEqual(2, len(node.attributes))
self.assertEqual(node.attributes.getNamedItem("xmlns:xdc").value,
"http://www.xml.com/books")
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt) # Line break
evt, node = next(items)
# XXX - A comment should be reported here!
# self.assertEqual(pulldom.COMMENT, evt)
# Line break after swallowed comment:
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual("title", node.tagName)
title_node = node
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
self.assertEqual("Introduction to XSL", node.data)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("title", node.tagName)
self.assertTrue(title_node is node)
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("hr", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("hr", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("p", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("xdc:author", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("xdc:author", node.tagName)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
evt, node = next(items)
self.assertEqual(pulldom.CHARACTERS, evt)
evt, node = next(items)
self.assertEqual(pulldom.END_ELEMENT, evt)
# XXX No END_DOCUMENT item is ever obtained:
#evt, node = next(items)
#self.assertEqual(pulldom.END_DOCUMENT, evt)
def test_expandItem(self):
"""Ensure expandItem works as expected."""
items = pulldom.parseString(SMALL_SAMPLE)
# Loop through the nodes until we get to a "title" start tag:
for evt, item in items:
if evt == pulldom.START_ELEMENT and item.tagName == "title":
items.expandNode(item)
self.assertEqual(1, len(item.childNodes))
break
else:
self.fail("No \"title\" element detected in SMALL_SAMPLE!")
# Loop until we get to the next start-element:
for evt, node in items:
if evt == pulldom.START_ELEMENT:
break
self.assertEqual("hr", node.tagName,
"expandNode did not leave DOMEventStream in the correct state.")
# Attempt to expand a standalone element:
items.expandNode(node)
self.assertEqual(next(items)[0], pulldom.CHARACTERS)
evt, node = next(items)
self.assertEqual(node.tagName, "p")
items.expandNode(node)
next(items) # Skip character data
evt, node = next(items)
self.assertEqual(node.tagName, "html")
with self.assertRaises(StopIteration):
next(items)
items.clear()
self.assertIsNone(items.parser)
self.assertIsNone(items.stream)
@unittest.expectedFailure
def test_comment(self):
"""PullDOM does not receive "comment" events."""
items = pulldom.parseString(SMALL_SAMPLE)
for evt, _ in items:
if evt == pulldom.COMMENT:
break
else:
self.fail("No comment was encountered")
@unittest.expectedFailure
def test_end_document(self):
"""PullDOM does not receive "end-document" events."""
items = pulldom.parseString(SMALL_SAMPLE)
# Read all of the nodes up to and including </html>:
for evt, node in items:
if evt == pulldom.END_ELEMENT and node.tagName == "html":
break
try:
# Assert that the next node is END_DOCUMENT:
evt, node = next(items)
self.assertEqual(pulldom.END_DOCUMENT, evt)
except StopIteration:
self.fail(
"Ran out of events, but should have received END_DOCUMENT")
def test_getitem_deprecation(self):
parser = pulldom.parseString(SMALL_SAMPLE)
with self.assertWarnsRegex(DeprecationWarning,
r'Use iterator protocol instead'):
# This should have returned 'END_ELEMENT'.
self.assertEqual(parser[-1][0], pulldom.START_DOCUMENT)
def test_external_ges_default(self):
parser = pulldom.parseString(SMALL_SAMPLE)
saxparser = parser.parser
ges = saxparser.getFeature(feature_external_ges)
self.assertEqual(ges, False)
class ThoroughTestCase(unittest.TestCase):
"""Test the hard-to-reach parts of pulldom."""
def test_thorough_parse(self):
"""Test some of the hard-to-reach parts of PullDOM."""
self._test_thorough(pulldom.parse(None, parser=SAXExerciser()))
@unittest.expectedFailure
def test_sax2dom_fail(self):
"""SAX2DOM can"t handle a PI before the root element."""
pd = SAX2DOMTestHelper(None, SAXExerciser(), 12)
self._test_thorough(pd)
def test_thorough_sax2dom(self):
"""Test some of the hard-to-reach parts of SAX2DOM."""
pd = SAX2DOMTestHelper(None, SAX2DOMExerciser(), 12)
self._test_thorough(pd, False)
def _test_thorough(self, pd, before_root=True):
"""Test some of the hard-to-reach parts of the parser, using a mock
parser."""
evt, node = next(pd)
self.assertEqual(pulldom.START_DOCUMENT, evt)
# Just check the node is a Document:
self.assertTrue(hasattr(node, "createElement"))
if before_root:
evt, node = next(pd)
self.assertEqual(pulldom.COMMENT, evt)
self.assertEqual("a comment", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.PROCESSING_INSTRUCTION, evt)
self.assertEqual("target", node.target)
self.assertEqual("data", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("html", node.tagName)
evt, node = next(pd)
self.assertEqual(pulldom.COMMENT, evt)
self.assertEqual("a comment", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.PROCESSING_INSTRUCTION, evt)
self.assertEqual("target", node.target)
self.assertEqual("data", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.START_ELEMENT, evt)
self.assertEqual("p", node.tagName)
evt, node = next(pd)
self.assertEqual(pulldom.CHARACTERS, evt)
self.assertEqual("text", node.data)
evt, node = next(pd)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("p", node.tagName)
evt, node = next(pd)
self.assertEqual(pulldom.END_ELEMENT, evt)
self.assertEqual("html", node.tagName)
evt, node = next(pd)
self.assertEqual(pulldom.END_DOCUMENT, evt)
class SAXExerciser(object):
"""A fake sax parser that calls some of the harder-to-reach sax methods to
ensure it emits the correct events"""
def setContentHandler(self, handler):
self._handler = handler
def parse(self, _):
h = self._handler
h.startDocument()
# The next two items ensure that items preceding the first
# start_element are properly stored and emitted:
h.comment("a comment")
h.processingInstruction("target", "data")
h.startElement("html", AttributesImpl({}))
h.comment("a comment")
h.processingInstruction("target", "data")
h.startElement("p", AttributesImpl({"class": "paraclass"}))
h.characters("text")
h.endElement("p")
h.endElement("html")
h.endDocument()
def stub(self, *args, **kwargs):
"""Stub method. Does nothing."""
pass
setProperty = stub
setFeature = stub
class SAX2DOMExerciser(SAXExerciser):
"""The same as SAXExerciser, but without the processing instruction and
comment before the root element, because S2D can"t handle it"""
def parse(self, _):
h = self._handler
h.startDocument()
h.startElement("html", AttributesImpl({}))
h.comment("a comment")
h.processingInstruction("target", "data")
h.startElement("p", AttributesImpl({"class": "paraclass"}))
h.characters("text")
h.endElement("p")
h.endElement("html")
h.endDocument()
class SAX2DOMTestHelper(pulldom.DOMEventStream):
"""Allows us to drive SAX2DOM from a DOMEventStream."""
def reset(self):
self.pulldom = pulldom.SAX2DOM()
# This content handler relies on namespace support
self.parser.setFeature(xml.sax.handler.feature_namespaces, 1)
self.parser.setContentHandler(self.pulldom)
class SAX2DOMTestCase(unittest.TestCase):
def confirm(self, test, testname="Test"):
self.assertTrue(test, testname)
def test_basic(self):
"""Ensure SAX2DOM can parse from a stream."""
with io.StringIO(SMALL_SAMPLE) as fin:
sd = SAX2DOMTestHelper(fin, xml.sax.make_parser(),
len(SMALL_SAMPLE))
for evt, node in sd:
if evt == pulldom.START_ELEMENT and node.tagName == "html":
break
# Because the buffer is the same length as the XML, all the
# nodes should have been parsed and added:
self.assertGreater(len(node.childNodes), 0)
def testSAX2DOM(self):
"""Ensure SAX2DOM expands nodes as expected."""
sax2dom = pulldom.SAX2DOM()
sax2dom.startDocument()
sax2dom.startElement("doc", {})
sax2dom.characters("text")
sax2dom.startElement("subelm", {})
sax2dom.characters("text")
sax2dom.endElement("subelm")
sax2dom.characters("text")
sax2dom.endElement("doc")
sax2dom.endDocument()
doc = sax2dom.document
root = doc.documentElement
(text1, elm1, text2) = root.childNodes
text3 = elm1.childNodes[0]
self.assertIsNone(text1.previousSibling)
self.assertIs(text1.nextSibling, elm1)
self.assertIs(elm1.previousSibling, text1)
self.assertIs(elm1.nextSibling, text2)
self.assertIs(text2.previousSibling, elm1)
self.assertIsNone(text2.nextSibling)
self.assertIsNone(text3.previousSibling)
self.assertIsNone(text3.nextSibling)
self.assertIs(root.parentNode, doc)
self.assertIs(text1.parentNode, root)
self.assertIs(elm1.parentNode, root)
self.assertIs(text2.parentNode, root)
self.assertIs(text3.parentNode, elm1)
doc.unlink()
if __name__ == "__main__":
unittest.main()
| 36.32493 | 98 | 0.628933 |
4a1badc0b506761ac81209545641a14aedb0ab7f
| 284 |
py
|
Python
|
src/main.py
|
aaaaaaaalesha/stack_machine
|
ca54f59ffb315338d2b196a627feacc45842d50e
|
[
"MIT"
] | null | null | null |
src/main.py
|
aaaaaaaalesha/stack_machine
|
ca54f59ffb315338d2b196a627feacc45842d50e
|
[
"MIT"
] | null | null | null |
src/main.py
|
aaaaaaaalesha/stack_machine
|
ca54f59ffb315338d2b196a627feacc45842d50e
|
[
"MIT"
] | null | null | null |
# Copyright 2021 aaaaaaaalesha <sks2311211@mail.ru>
from src.stack_machine import StackMachine
if __name__ == '__main__':
"""Example from the task."""
with open('../binomial.txt', 'r') as f:
source_code = f.read()
sm = StackMachine(source_code)
sm.launch()
| 23.666667 | 51 | 0.661972 |
4a1bae765e76caf87fcdf1c2148949164d164b9a
| 11,324 |
py
|
Python
|
train.py
|
heavenbean/tacotron2
|
361abdcb3130601726c770bce929e443c9009d9e
|
[
"BSD-3-Clause"
] | null | null | null |
train.py
|
heavenbean/tacotron2
|
361abdcb3130601726c770bce929e443c9009d9e
|
[
"BSD-3-Clause"
] | null | null | null |
train.py
|
heavenbean/tacotron2
|
361abdcb3130601726c770bce929e443c9009d9e
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import time
import argparse
import math
from numpy import finfo
import torch
from distributed import apply_gradient_allreduce
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from model import Tacotron2
from data_utils import TextMelLoader, TextMelCollate
from loss_function import Tacotron2Loss
from logger import Tacotron2Logger
from hparams import create_hparams
global_val_loss = 10000
def reduce_tensor(tensor, n_gpus):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.reduce_op.SUM)
rt /= n_gpus
return rt
def init_distributed(hparams, n_gpus, rank, group_name):
assert torch.cuda.is_available(), "Distributed mode requires CUDA."
print("Initializing Distributed")
# Set cuda device so everything is done on the right GPU.
torch.cuda.set_device(rank % torch.cuda.device_count())
# Initialize distributed communication
dist.init_process_group(
backend=hparams.dist_backend, init_method=hparams.dist_url,
world_size=n_gpus, rank=rank, group_name=group_name)
print("Done initializing distributed")
def prepare_dataloaders(hparams):
# Get data, data loaders and collate function ready
trainset = TextMelLoader(hparams.training_files, hparams)
valset = TextMelLoader(hparams.validation_files, hparams)
collate_fn = TextMelCollate(hparams.n_frames_per_step)
if hparams.distributed_run:
train_sampler = DistributedSampler(trainset)
shuffle = False
else:
train_sampler = None
shuffle = True
train_loader = DataLoader(trainset, num_workers=1, shuffle=shuffle,
sampler=train_sampler,
batch_size=hparams.batch_size, pin_memory=False,
drop_last=True, collate_fn=collate_fn)
return train_loader, valset, collate_fn
def prepare_directories_and_logger(output_directory, log_directory, rank):
if rank == 0:
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
os.chmod(output_directory, 0o775)
logger = Tacotron2Logger(os.path.join(output_directory, log_directory))
else:
logger = None
return logger
def load_model(hparams):
model = Tacotron2(hparams).cuda()
if hparams.fp16_run:
model.decoder.attention_layer.score_mask_value = finfo('float16').min
if hparams.distributed_run:
model = apply_gradient_allreduce(model)
return model
def warm_start_model(checkpoint_path, model, ignore_layers):
assert os.path.isfile(checkpoint_path)
print("Warm starting model from checkpoint '{}'".format(checkpoint_path))
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
model_dict = checkpoint_dict['state_dict']
if len(ignore_layers) > 0:
model_dict = {k: v for k, v in model_dict.items()
if k not in ignore_layers}
dummy_dict = model.state_dict()
dummy_dict.update(model_dict)
model_dict = dummy_dict
model.load_state_dict(model_dict)
return model
def load_checkpoint(checkpoint_path, model, optimizer):
assert os.path.isfile(checkpoint_path)
print("Loading checkpoint '{}'".format(checkpoint_path))
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
model.load_state_dict(checkpoint_dict['state_dict'])
optimizer.load_state_dict(checkpoint_dict['optimizer'])
learning_rate = checkpoint_dict['learning_rate']
iteration = checkpoint_dict['iteration']
print("Loaded checkpoint '{}' from iteration {}" .format(
checkpoint_path, iteration))
return model, optimizer, learning_rate, iteration
def save_checkpoint(model, optimizer, learning_rate, iteration, filepath):
print("Saving model and optimizer state at iteration {} to {}".format(
iteration, filepath))
torch.save({'iteration': iteration,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'learning_rate': learning_rate}, filepath)
def validate(model, criterion, valset, iteration, batch_size, n_gpus,
collate_fn, logger, distributed_run, rank):
"""Handles all the validation scoring and printing"""
global global_val_loss
model.eval()
with torch.no_grad():
val_sampler = DistributedSampler(valset) if distributed_run else None
val_loader = DataLoader(valset, sampler=val_sampler, num_workers=1,
shuffle=False, batch_size=batch_size,
pin_memory=False, collate_fn=collate_fn)
val_loss = 0.0
for i, batch in enumerate(val_loader):
x, y = model.parse_batch(batch)
y_pred = model(x)
loss = criterion(y_pred, y)
if distributed_run:
reduced_val_loss = reduce_tensor(loss.data, n_gpus).item()
else:
reduced_val_loss = loss.item()
val_loss += reduced_val_loss
val_loss = val_loss / (i + 1)
model.train()
if rank == 0:
global_val_loss = val_loss
print("Validation loss {}: {:9f} ".format(iteration, val_loss))
logger.log_validation(val_loss, model, y, y_pred, iteration)
def train(output_directory, log_directory, checkpoint_path, warm_start, n_gpus,
rank, group_name, hparams):
"""Training and validation logging results to tensorboard and stdout
Params
------
output_directory (string): directory to save checkpoints
log_directory (string) directory to save tensorboard logs
checkpoint_path(string): checkpoint path
n_gpus (int): number of gpus
rank (int): rank of current gpu
hparams (object): comma separated list of "name=value" pairs.
"""
global global_val_loss
if hparams.distributed_run:
init_distributed(hparams, n_gpus, rank, group_name)
torch.manual_seed(hparams.seed)
torch.cuda.manual_seed(hparams.seed)
model = load_model(hparams)
learning_rate = hparams.learning_rate
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate,
weight_decay=hparams.weight_decay)
if hparams.fp16_run:
from apex import amp
model, optimizer = amp.initialize(
model, optimizer, opt_level='O2')
if hparams.distributed_run:
model = apply_gradient_allreduce(model)
criterion = Tacotron2Loss()
logger = prepare_directories_and_logger(
output_directory, log_directory, rank)
train_loader, valset, collate_fn = prepare_dataloaders(hparams)
# Load checkpoint if one exists
iteration = 0
epoch_offset = 0
if checkpoint_path is not None:
if warm_start:
model = warm_start_model(
checkpoint_path, model, hparams.ignore_layers)
else:
model, optimizer, _learning_rate, iteration = load_checkpoint(
checkpoint_path, model, optimizer)
if hparams.use_saved_learning_rate:
learning_rate = _learning_rate
iteration += 1 # next iteration is iteration + 1
epoch_offset = max(0, int(iteration / len(train_loader)))
model.train()
is_overflow = False
# ================ MAIN TRAINNIG LOOP! ===================
for epoch in range(epoch_offset, hparams.epochs):
print("Epoch: {}".format(epoch))
for i, batch in enumerate(train_loader):
start = time.perf_counter()
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
model.zero_grad()
x, y = model.parse_batch(batch)
y_pred = model(x)
loss = criterion(y_pred, y)
if hparams.distributed_run:
reduced_loss = reduce_tensor(loss.data, n_gpus).item()
else:
reduced_loss = loss.item()
if hparams.fp16_run:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if hparams.fp16_run:
grad_norm = torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), hparams.grad_clip_thresh)
is_overflow = math.isnan(grad_norm)
else:
grad_norm = torch.nn.utils.clip_grad_norm_(
model.parameters(), hparams.grad_clip_thresh)
optimizer.step()
if not is_overflow and rank == 0:
duration = time.perf_counter() - start
print("Train loss {} {:.6f} Grad Norm {:.6f} {:.2f}s/it".format(
iteration, reduced_loss, grad_norm, duration))
logger.log_training(
reduced_loss, grad_norm, learning_rate, duration, iteration)
if not is_overflow and (iteration % hparams.iters_per_checkpoint == 0):
validate(model, criterion, valset, iteration,
hparams.batch_size, n_gpus, collate_fn, logger,
hparams.distributed_run, rank)
if rank == 0:
checkpoint_path = os.path.join(
output_directory, "checkpoint_%d_%.6f" % (iteration, global_val_loss))
save_checkpoint(model, optimizer, learning_rate, iteration,
checkpoint_path)
iteration += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output_directory', type=str,
help='directory to save checkpoints')
parser.add_argument('-l', '--log_directory', type=str,
help='directory to save tensorboard logs')
parser.add_argument('-c', '--checkpoint_path', type=str, default=None,
required=False, help='checkpoint path')
parser.add_argument('--warm_start', action='store_true',
help='load model weights only, ignore specified layers')
parser.add_argument('--n_gpus', type=int, default=1,
required=False, help='number of gpus')
parser.add_argument('--rank', type=int, default=0,
required=False, help='rank of current gpu')
parser.add_argument('--group_name', type=str, default='group_name',
required=False, help='Distributed group name')
parser.add_argument('--hparams', type=str,
required=False, help='comma separated name=value pairs')
args = parser.parse_args()
hparams = create_hparams(args.hparams)
torch.backends.cudnn.enabled = hparams.cudnn_enabled
torch.backends.cudnn.benchmark = hparams.cudnn_benchmark
print("FP16 Run:", hparams.fp16_run)
print("Dynamic Loss Scaling:", hparams.dynamic_loss_scaling)
print("Distributed Run:", hparams.distributed_run)
print("cuDNN Enabled:", hparams.cudnn_enabled)
print("cuDNN Benchmark:", hparams.cudnn_benchmark)
train(args.output_directory, args.log_directory, args.checkpoint_path,
args.warm_start, args.n_gpus, args.rank, args.group_name, hparams)
| 38.256757 | 94 | 0.648534 |
4a1bb0c6ee559c920e315a41c8daf3dd88aa4c53
| 3,351 |
py
|
Python
|
recipes/g2o/master/conanfile.py
|
SolarFramework/conan-solar
|
cdb6253556c056baacaf3e5f28b595869adddb88
|
[
"Apache-2.0"
] | null | null | null |
recipes/g2o/master/conanfile.py
|
SolarFramework/conan-solar
|
cdb6253556c056baacaf3e5f28b595869adddb88
|
[
"Apache-2.0"
] | 3 |
2021-02-15T08:48:16.000Z
|
2021-02-24T16:35:54.000Z
|
recipes/g2o/master/conanfile.py
|
SolarFramework/conan-solar
|
cdb6253556c056baacaf3e5f28b595869adddb88
|
[
"Apache-2.0"
] | null | null | null |
import os
from conans import ConanFile, CMake, tools
class Libg2oConan(ConanFile):
name = "g2o"
upstream_version = "master"
package_revision = ""
version = "{0}{1}".format(upstream_version, package_revision)
generators = "cmake"
settings = "os", "arch", "compiler", "build_type"
options = {"shared": [True, False],
"csparse": [True,False],
"opengl": [True,False],
"apps":[True,False],
"examples":[True,False]
}
default_options = {"shared": True,
"csparse": True,
"opengl": False,
"apps":False,
"examples":False}
exports = [
]
url = "https://github.com/Solar-Framework/conan-solar/recipes/g2o/20200410"
homepage = "https://github.com/RainerKuemmerle/g2o/"
license = "BSD license (lGPL3+ with csparse_extension, GPL3+ with viewer, incremental and slam2d_g2o extension)"
description = ("g2o is an open-source C++ framework for optimizing graph-based nonlinear error functions.")
source_subfolder = "source_subfolder"
short_paths = True
def requirements(self):
self.requires("eigen/3.3.7@conan-solar/stable")
self.requires("common/1.0.2@conan-solar/stable")
def source(self):
tools.get("https://github.com/RainerKuemmerle/g2o/archive/{0}.tar.gz".format(self.upstream_version))
os.rename("g2o-" + self.upstream_version, self.source_subfolder)
@property
def _android_arch(self):
arch = str(self.settings.arch)
return tools.to_android_abi(arch)
def build(self):
g2o_source_dir = os.path.join(self.source_folder, self.source_subfolder)
# Import common flags and defines
import common
# Generate Cmake wrapper
common.generate_cmake_wrapper(
cmakelists_path='CMakeLists.txt',
source_subfolder=self.source_subfolder,
build_type=self.settings.build_type
)
cmake = CMake(self)
cmake.definitions["BUILD_LGPL_SHARED_LIBS"] = False
cmake.definitions["BUILD_SHARED_LIBS"] = self.options.shared
cmake.definitions["G2O_USE_OPENGL"] = self.options.opengl
cmake.definitions["G2O_USE_CSPARSE"] = self.options.csparse
cmake.definitions["BUILD_CSPARSE"] = self.options.csparse
cmake.definitions["G2O_BUILD_APPS"] = self.options.apps
cmake.definitions["G2O_BUILD_EXAMPLES"] = self.options.examples
if not tools.os_info.is_windows:
cmake.definitions["CMAKE_POSITION_INDEPENDENT_CODE"] = True
cmake.configure()
cmake.build()
cmake.install()
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
def package(self):
# Retrieve common helpers
import common
# Fix all hard coded path to conan package in all .cmake files
common.fix_conan_path(self, self.package_folder, '*.cmake')
if self.settings.os == 'Android':
if not self.options.shared:
self.cpp_info.includedirs.append(
os.path.join('sdk', 'native', 'jni', 'include'))
self.cpp_info.libdirs.append(
os.path.join('sdk', 'native', 'staticlibs', self._android_arch))
| 36.423913 | 116 | 0.623993 |
4a1bb1d8ad1b77be1b6b226fd465f8b1453aaffd
| 15,822 |
py
|
Python
|
alba/AA-Jacob-Heyblocq/reuse/scripts/bijdragersAAJH-WerkenInEuropeana.py
|
KBNLwikimedia/Alba-Amicorum
|
e41e10518830ec83a2a43d292b8c73b940f912a9
|
[
"CC0-1.0"
] | null | null | null |
alba/AA-Jacob-Heyblocq/reuse/scripts/bijdragersAAJH-WerkenInEuropeana.py
|
KBNLwikimedia/Alba-Amicorum
|
e41e10518830ec83a2a43d292b8c73b940f912a9
|
[
"CC0-1.0"
] | null | null | null |
alba/AA-Jacob-Heyblocq/reuse/scripts/bijdragersAAJH-WerkenInEuropeana.py
|
KBNLwikimedia/Alba-Amicorum
|
e41e10518830ec83a2a43d292b8c73b940f912a9
|
[
"CC0-1.0"
] | null | null | null |
# Aim: Find works in Europeana by/from/about/related to the contributors to the Album amicorum Jacob Heyblocq
# Output of this script: https://github.com/KBNLwikimedia/Alba-Amicorum/blob/main/alba/AA-Jacob-Heyblocq/reuse/excels/AAJH-contributors-works-Europeana.xlsx
# Script is not fully finished and/or 100% reliable
# Last update dd 16-06-2021 by Olaf Janssen
# https://www.wikidata.org/wiki/Wikidata:WikiProject_Alba_amicorum_National_Library_of_the_Netherlands/Jacob_Heyblocq/Contributors#Europeana
# https://kbnlwikimedia.github.io/KBCollectionHighlights/stories/Cool%20new%20things%20you%20can%20now%20do%20with%20the%20KB's%20collection%20highlights/Part%205%2C%20Reuse.html (item 48)
######################################################################
import json
import requests
from googletrans import Translator
import pandas as pd
import openpyxl
import os
translator = Translator() # Use Google Translate for translations from English into Dutch - https://pypi.org/project/googletrans/
eurapi_baseurl="https://www.europeana.eu/api"
eurdata_baseurl="http://data.europeana.eu"
eurapi_key="?wskey=apidemo"
eurapi2_key="?wskey=api2demo"
dcTypelist = []
dctermsIssuedlist = []
dcDatelist= []
dcCoveragelist=[]
dctermsTemporallist=[]
dctermsCreatedlist=[]
dctypes_allowed = ['tekening', 'grafiek','prent', 'etsen', 'handschrift','ornamentprent', 'graveren (drukprocedé)',
'prenttekening','pen','kostuumprent','boekillustratie','albumblad', 'doordruk', 'kleurendruk',
'schilderij', 'crayonmanier', 'roulette','droge naald','penseel','mezzotint','toonlithografie',
'lithografie','lithografie (techniek)','historieprent','aquatint','titelpagina', 'met de hand kleuren',
'kopergravure (prent)','Grafiek Drukwerk', 'Portretten','Portret','Aquarel', 'miniatuur',
'Monograph','print', 'etching','Painting','engraving','drawing','pen','Line etching','Miniatures',
'Miniature','text','counterproof', 'brush','drypoint', 'graphics', 'handwriting', 'ornament print',
'engraving (printing process)','print drawing', 'costume print','book illustration', 'album page',
'blow-through', 'color printing', 'crayon way', 'roulette', 'paintbrush', 'mezzotint', 'tone lithography',
'lithography', 'lithography (technique)', 'history print', 'aquatint', 'title page', 'coloring by hand',
'copper engraving (print)', 'Graphic Print', 'Portraits', 'Portrait', 'Watercolor']
# Setup empty Pandas Dataframe
contribs_works_df = pd.DataFrame(columns=[
'Contributor',
'ContributorLabel',
'EuropeanaAgentID',
'EuropeanaAgentJsonURL',
'EuropeanaAgentBioInfoEN',
'EuropeanaAgentBioInfoNL',
'EuropeanaAgentWorksJsonURL',
'WorkJsonURL',
'Title',
'Description',
'Creators',
'ThumbURL',
'ImageURL',
'Institution',
'NativeInterface',
'EuropeanaInterface',
'WorkSuitableForHackaLOD',
'Year',
'dctermsIssued',
'dcDate',
'dcCoverage',
'dctermsTemporal',
'dctermsCreated'
])
############ Wikidata: Retrieve AAJH contributors that have a Europeana ID P7704
wd_jsonurl = "https://query.wikidata.org/sparql?query=SELECT%20DISTINCT%20%3Fentity%20%3FentityLabel%20%3Fvalue%20%3FvalueLabel%20WHERE%20%7B%20%20%3Fentity%20wdt%3AP31%20wd%3AQ5%3Bwdt%3AP3919%20wd%3AQ72752496%20.%20%20%3Fentity%20wdt%3AP3919%20wd%3AQ72752496%20.%20%20%3Fentity%20p%3AP7704%20%3Fprop%20.%20OPTIONAL%20%7B%20%3Fprop%20ps%3AP7704%20%3Fvalue%20%7D%20%20SERVICE%20wikibase%3Alabel%20%7B%20bd%3AserviceParam%20wikibase%3Alanguage%20%22%5BAUTO_LANGUAGE%5D%2Cen%22.%20%7D%7D&format=json"
wdr = requests.get(wd_jsonurl)
wddata = json.loads(wdr.content) # utf-8, see https://stackoverflow.com/questions/44203397/python-requests-get-returns-improperly-decoded-text-instead-of-utf-8
wdcontribs = list(wddata['results']['bindings'])
#print(wdcontribs)
for c in wdcontribs: # [0:1]: #Iterate over AAJH contributors
if c.get('entityLabel', 'aa') != 'aa':
contributorLabel = c.get('entityLabel').get('value')
print(' '*40)
print(f"************* {contributorLabel.upper()} ************************")
print(f"-- Info about this contributor from Wikidata --")
print(f" * ContributorLabel: {contributorLabel}")
if c.get('entity', 'aa') != 'aa':
contributor = c.get('entity').get('value')
print(f" * Contributor: {contributor}")
if c.get('value', 'aa') != 'aa':
euragent = c.get('value').get('value')
print(f" * EuropeanaAgent: {euragent}")
########## Europeana: Retrieve data about this contributor from the Europeana API / json
euragent_jsonurl=f"{eurapi_baseurl}/entities/{euragent}.json{eurapi_key}"
euragentr = requests.get(euragent_jsonurl)
euragentdata = json.loads(euragentr.content)
if euragentdata.get('biographicalInformation', 'aa') != 'aa': #bio info is available from Europeana,
biolist = euragentdata.get('biographicalInformation')
biographicalInformationEN= [bio['@value'] for bio in biolist if bio['@language'] == 'en'][0] #Bio info in English is always present
print(f"-- Info about this contributor from Europeana --")
print(f" * euragent_jsonurl: {euragent_jsonurl}")
print(f" * BiographicalInformationEN: {biographicalInformationEN}")
#biographicalInformationNL = translator.translate(biographicalInformationEN, dest='nl')
biographicalInformationNL = 'ToEnableInScript'
#print(f"biographicalInformationNL (Google Translate): {biographicalInformationNL.text}")
########## Europeana: Retrieve works (books, poems, paintings..) from/by this contributor from the Europeana search API in json.
# Maximum = 100 works, but pagination to the next 100 results is possible, see https://pro.europeana.eu/page/search#pagination
# API docs: https://pro.europeana.eu/page/search
# Europeana portal: https://www.europeana.eu/nl/collections/person/148386 --> 147 results
# Europeana search API: https://www.europeana.eu/api/v2/search.json?wskey=api2demo&media=true&start=1&rows=100&profile=minimal&query=%22http://data.europeana.eu/agent/base/148386%22
# --> also 147 results
eurworks_jsonurl = f"{eurapi_baseurl}/v2/search.json{eurapi2_key}&media=true&start=1&rows=100&profile=minimal&query=%22{eurdata_baseurl}/{euragent}%22"
print(f"-- Info about works by this contributor from Europeana --")
print(f" * eurworks_jsonurl (max. 100 rows per page, see https://pro.europeana.eu/page/search#pagination): {eurworks_jsonurl}")
eurworksr = requests.get(eurworks_jsonurl)
eurworksdata = json.loads(eurworksr.content)
items = eurworksdata['items']
for item in items:
# For each work (item/object) we want to retrieve the following fields
# - Title of the work --> title
if item.get('title', 'aa') != 'aa':
# if wtitle is a list with 2 or more items, concat list items with "/" inbetween
# https://stackoverflow.com/questions/12453580/how-to-concatenate-items-in-a-list-to-a-single-string
wtitle = ' // '.join(item.get('title'))
print(f" ----WORK: {wtitle} ----")
print(f" * Title: {wtitle}")
else: wtitle =''
# - Description of the work --> dcDescription
if item.get('dcDescription', 'aa') != 'aa':
wdescription = ' // '.join(item.get('dcDescription'))
print(f" * Description: {wdescription}")
else: wdescription =''
# - Creator(s) --> dcCreator
if item.get('dcCreator', 'aa') != 'aa':
wcreators = item.get('dcCreator')
print(f" * Creators: {wcreators}")
# TODO - Creators: nog verder uitsplitsen -- Creators: ['Constantijn Huygens', 'http://data.europeana.eu/agent/base/147162']
else: wcreators =''
# - Date of creation --> year
if item.get('year', 'aa') != 'aa':
wyear = item.get('year')[0]
print(f" * Year created: {wyear}")
else: wyear =''
# - Thumb/Preview --> edmPreview
if item.get('edmPreview', 'aa') != 'aa':
wthumb = item.get('edmPreview')[0]
print(f" * Preview: {wthumb}")
else: wthumb =''
# - Full image ('.jpg') --> edmIsShownBy
if item.get('edmIsShownBy', 'aa') != 'aa':
wimage = item.get('edmIsShownBy')[0]
print(f" * Full image: {wimage}")
else: wimage =''
# - Institution/ data provider --> dataProvider
if item.get('dataProvider', 'aa') != 'aa':
winstitution = item.get('dataProvider')[0]
print(f" * Institution: {winstitution}")
else: winstitution =''
# - URL of work in native interface of institution --> edmIsShownAt
if item.get('edmIsShownAt', 'aa') != 'aa':
wnatinterface = item.get('edmIsShownAt')[0]
print(f" * Work in interface of {winstitution}: {wnatinterface}")
else: wnatinterface =''
# - URL of work in Europeana interface --> guid
if item.get('guid', 'aa') != 'aa':
weurinterface = item.get('guid')
print(f" * Work in Europeana: {weurinterface}")
else: weurinterface =''
########## Europeana: Retrieve additional details from each work separately
# 1) type of work (etching, painting, book, poem...)
# 2) year of publication/creation/issuance/
# 3) lat-long
# - Json representation of the work --> link
if item.get('link', 'aa') != 'aa':
wjsonurl = item.get('link')
print(f" * Json representation URL of this work: {wjsonurl}")
wr = requests.get(wjsonurl)
workdata = json.loads(wr.content)
work = workdata['object']
print(f" * Work dictionary: {work}")
if work.get('proxies', 'aa') != 'aa':
proxies=work.get('proxies')[0]
print(f" * Proxies: {proxies}")
# 1) type of work (etching, painting, book, poem...) --> proxies/dctype
if proxies.get('dcType', 'aa') != 'aa':
wdctype = proxies.get('dcType')
print(f" * Type of work: {wdctype}")
# dcTypelist.append(wdctype)
# For HackaLOD purposes we want to filter the types of works/objects, based on (the value of) the dcType field
# For convenience/uncomplications we'll only use NL and EN as filter languages
# Allowed values of dcType for HackaLOD: see list 'dctypes_allowed' above
for allowtype in dctypes_allowed:
if str(allowtype).lower() in str(wdctype).lower():
suitableforHackaLOD = "True"
break
else:
suitableforHackaLOD = "False"
print(f" * Object is of suitable type for HackaLOD: {suitableforHackaLOD}")
else:
print(f" * No dcType specified, object excluded for HackaLOD")
suitableforHackaLOD = "False"
# 2) year of publication/issuance -->
# a) proxies/dctermsIssued or
# b) proxies/dcDate or
# c) proxies/dcCoverage
# d) dctermsTemporal
# e) dctermsCreated
if proxies.get('dctermsIssued', 'aa') != 'aa':
#dctermsIssued = proxies.get('dctermsIssued').get('def')[0]
# https://stackoverflow.com/questions/33709331/dictionary-get-value-without-knowing-the-key
dctermsIssued=list(proxies.get('dctermsIssued').values())[0][0]
if dctermsIssued.startswith('http://semium.org/time/'):
dctermsIssued = dctermsIssued.split('/time/')[1]
dctermsIssuedlist.append(dctermsIssued)
print(f" * dctermsIssued: {dctermsIssued}")
else: dctermsIssued = ''
if proxies.get('dcDate', 'aa') != 'aa':
dcDate = list(proxies.get('dcDate').values())[0][0]
# Do some dates cleaning
if dcDate.startswith('http://semium.org/time/'):
dcDate = dcDate.split('/time/')[1]
if dcDate.startswith('geboorte\xa0'):
dcDate = dcDate.split('geboorte\xa0')[1]
if dcDate.startswith('\xa0'):
dcDate = dcDate.split('\xa0')[1]
dcDatelist.append(dcDate)
print(f" * dcDate: {dcDate}")
else: dcDate = ''
if proxies.get('dcCoverage', 'aa') != 'aa':
dcCoverage = list(proxies.get('dcCoverage').values())[0][0]
dcCoveragelist.append(dcCoverage)
print(f" * dcCoverage: {dcCoverage}")
else: dcCoverage = ''
if proxies.get('dctermsTemporal', 'aa') != 'aa':
dctermsTemporal = list(proxies.get('dctermsTemporal').values())[0][0]
dctermsTemporallist.append(dctermsTemporal)
print(f" * dctermsTemporal: {dctermsTemporal}")
else: dctermsTemporal = ''
if proxies.get('dctermsCreated', 'aa') != 'aa':
dctermsCreated = list(proxies.get('dctermsCreated').values())[0][0]
dctermsCreatedlist.append(dctermsCreated)
print(f" * dctermsCreated: {dctermsCreated}")
else: dctermsCreated = ''
# 3) TODO Places and lat-long
else: wjsonurl =''
# Create 1 row in dataframe
df_row = {
'Contributor' : contributor ,
'ContributorLabel' : contributorLabel ,
'EuropeanaAgentID' : euragent ,
'EuropeanaAgentJsonURL' : euragent_jsonurl,
'EuropeanaAgentBioInfoEN' : biographicalInformationEN,
'EuropeanaAgentBioInfoNL' : biographicalInformationNL,
'EuropeanaAgentWorksJsonURL': eurworks_jsonurl,
'WorkJsonURL': wjsonurl,
'Title' : wtitle,
'Description' : wdescription,
'Creators' : wcreators,
'ThumbURL' : wthumb,
'ImageURL' : wimage,
'Institution' : winstitution,
'NativeInterface' : wnatinterface,
'EuropeanaInterface' : weurinterface,
'WorkSuitableForHackaLOD' : suitableforHackaLOD,
'Year' : wyear,
'dctermsIssued' : dctermsIssued,
'dcDate' : dcDate,
'dcCoverage' : dcCoverage,
'dctermsTemporal' : dctermsTemporal,
'dctermsCreated' : dctermsCreated
}
print(f" * Dataframe row: {str(df_row)}")
# add row to df
contribs_works_df = contribs_works_df.append(df_row, ignore_index = True)
print(contribs_works_df)
# Export df tot Excel
file_name = '../excels/AAJH-contributors-works-Europeana.xlsx'
contribs_works_df.to_excel(file_name, index = True, header=True, sheet_name='AAJHContributors-Works')
print(' '*40)
print(f"* dcTypelist: {dcTypelist}")
print(' ' * 40)
print(f"* dctermsIssuedlist: {dctermsIssuedlist}")
print(' ' * 40)
print(f"* dcDatelist: {dcDatelist}")
print(' ' * 40)
print(f"* dcCoveragelist: {dcCoveragelist}")
print(' ' * 40)
print(f"* dctermsTemporallist: {dctermsTemporallist}")
print(' ' * 40)
print(f"* dctermsCreatedlist: {dctermsCreatedlist}")
| 51.203883 | 498 | 0.601315 |
4a1bb31a4f0b7f37f1f98f1fa6d5c3a0d3868217
| 1,284 |
py
|
Python
|
Lib/idlelib/MultiStatusBar.py
|
sireliah/polish-python
|
605df4944c2d3bc25f8bf6964b274c0a0d297cc3
|
[
"PSF-2.0"
] | 1 |
2018-06-21T18:21:24.000Z
|
2018-06-21T18:21:24.000Z
|
Lib/idlelib/MultiStatusBar.py
|
sireliah/polish-python
|
605df4944c2d3bc25f8bf6964b274c0a0d297cc3
|
[
"PSF-2.0"
] | null | null | null |
Lib/idlelib/MultiStatusBar.py
|
sireliah/polish-python
|
605df4944c2d3bc25f8bf6964b274c0a0d297cc3
|
[
"PSF-2.0"
] | null | null | null |
z tkinter zaimportuj *
klasa MultiStatusBar(Frame):
def __init__(self, master=Nic, **kw):
jeżeli master jest Nic:
master = Tk()
Frame.__init__(self, master, **kw)
self.labels = {}
def set_label(self, name, text='', side=LEFT):
jeżeli name nie w self.labels:
label = Label(self, bd=1, relief=SUNKEN, anchor=W)
label.pack(side=side)
self.labels[name] = label
inaczej:
label = self.labels[name]
label.config(text=text)
def _multistatus_bar(parent):
root = Tk()
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d" %(x, y + 150))
root.title("Test multistatus bar")
frame = Frame(root)
text = Text(frame)
text.pack()
msb = MultiStatusBar(frame)
msb.set_label("one", "hello")
msb.set_label("two", "world")
msb.pack(side=BOTTOM, fill=X)
def change():
msb.set_label("one", "foo")
msb.set_label("two", "bar")
button = Button(root, text="Update status", command=change)
button.pack(side=BOTTOM)
frame.pack()
frame.mainloop()
root.mainloop()
jeżeli __name__ == '__main__':
z idlelib.idle_test.htest zaimportuj run
run(_multistatus_bar)
| 27.913043 | 77 | 0.595016 |
4a1bb399f2d63bd74cd98642f8e4f22438213b7d
| 1,516 |
py
|
Python
|
summary.py
|
bubbliiiing/mobilenet-yolov4-pytorch
|
ae3dd3bf307b1f3061765722a23b0b3e1037eaa0
|
[
"MIT"
] | 55 |
2021-12-09T06:57:33.000Z
|
2022-03-29T05:35:44.000Z
|
summary.py
|
bubbliiiing/mobilenet-yolov4-pytorch
|
ae3dd3bf307b1f3061765722a23b0b3e1037eaa0
|
[
"MIT"
] | 8 |
2021-12-21T00:57:34.000Z
|
2022-03-22T15:49:07.000Z
|
summary.py
|
bubbliiiing/mobilenet-yolov4-pytorch
|
ae3dd3bf307b1f3061765722a23b0b3e1037eaa0
|
[
"MIT"
] | 28 |
2021-12-10T08:38:24.000Z
|
2022-03-29T06:47:26.000Z
|
#--------------------------------------------#
# 该部分代码用于看网络结构
#--------------------------------------------#
import torch
from thop import clever_format, profile
from torchsummary import summary
from nets.yolo import YoloBody
if __name__ == "__main__":
input_shape = [416, 416]
anchors_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
num_classes = 80
backbone = 'mobilenetv1'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
m = YoloBody(anchors_mask, num_classes, backbone=backbone).to(device)
summary(m, (3, input_shape[0], input_shape[1]))
# mobilenetv1-yolov4 40,952,893
# mobilenetv2-yolov4 39,062,013
# mobilenetv3-yolov4 39,989,933
# 修改了panet的mobilenetv1-yolov4 12,692,029
# 修改了panet的mobilenetv2-yolov4 10,801,149
# 修改了panet的mobilenetv3-yolov4 11,729,069
dummy_input = torch.randn(1, 3, input_shape[0], input_shape[1]).to(device)
flops, params = profile(m.to(device), (dummy_input, ), verbose=False)
#--------------------------------------------------------#
# flops * 2是因为profile没有将卷积作为两个operations
# 有些论文将卷积算乘法、加法两个operations。此时乘2
# 有些论文只考虑乘法的运算次数,忽略加法。此时不乘2
# 本代码选择乘2,参考YOLOX。
#--------------------------------------------------------#
flops = flops * 2
flops, params = clever_format([flops, params], "%.3f")
print('Total GFLOPS: %s' % (flops))
print('Total params: %s' % (params))
| 37.9 | 83 | 0.537599 |
4a1bb4ee00c382294ce10b617298df0b11dfdbf5
| 5,465 |
py
|
Python
|
docs/source/conf.py
|
leoleoasd/dgl-ke
|
3a6f7635421a10f6276936b63c56738ba0fcd044
|
[
"Apache-2.0"
] | 1 |
2022-03-27T07:19:35.000Z
|
2022-03-27T07:19:35.000Z
|
docs/source/conf.py
|
leoleoasd/dgl-ke
|
3a6f7635421a10f6276936b63c56738ba0fcd044
|
[
"Apache-2.0"
] | null | null | null |
docs/source/conf.py
|
leoleoasd/dgl-ke
|
3a6f7635421a10f6276936b63c56738ba0fcd044
|
[
"Apache-2.0"
] | 1 |
2022-03-27T15:26:36.000Z
|
2022-03-27T15:26:36.000Z
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = u'dglke'
copyright = u'2020, dgl-team'
author = u'dgl-team'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u'0.1.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'dglkedoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'dglke.tex', u'dglke Documentation',
u'dgl-team', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'dglke', u'dglke Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'dglke', u'dglke Documentation',
author, 'dglke', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| 29.540541 | 79 | 0.643916 |
4a1bb5371f6fbeff1afff233141bfd3fd2514d8d
| 1,013 |
py
|
Python
|
plaso/formatters/bsm.py
|
Defense-Cyber-Crime-Center/plaso
|
4f3a85fbea10637c1cdbf0cde9fc539fdcea9c47
|
[
"Apache-2.0"
] | 2 |
2016-02-18T12:46:29.000Z
|
2022-03-13T03:04:59.000Z
|
plaso/formatters/bsm.py
|
CNR-ITTIG/plasodfaxp
|
923797fc00664fa9e3277781b0334d6eed5664fd
|
[
"Apache-2.0"
] | null | null | null |
plaso/formatters/bsm.py
|
CNR-ITTIG/plasodfaxp
|
923797fc00664fa9e3277781b0334d6eed5664fd
|
[
"Apache-2.0"
] | 6 |
2016-12-18T08:05:36.000Z
|
2021-04-06T14:19:11.000Z
|
# -*- coding: utf-8 -*-
"""The Basic Security Module (BSM) binary files event formatter."""
from plaso.formatters import interface
from plaso.formatters import manager
class BSMFormatter(interface.ConditionalEventFormatter):
"""Formatter for an BSM log entry."""
DATA_TYPE = u'bsm:event'
FORMAT_STRING_PIECES = [
u'Type: {event_type}',
u'Information: {extra_tokens}']
FORMAT_STRING_SHORT_PIECES = [
u'Type: {event_type}']
SOURCE_LONG = u'BSM entry'
SOURCE_SHORT = u'LOG'
class MacBSMFormatter(interface.ConditionalEventFormatter):
"""Formatter for a Mac OS X BSM log entry."""
DATA_TYPE = u'mac:bsm:event'
FORMAT_STRING_PIECES = [
u'Type: {event_type}',
u'Return: {return_value}',
u'Information: {extra_tokens}']
FORMAT_STRING_SHORT_PIECES = [
u'Type: {event_type}',
u'Return: {return_value}']
SOURCE_LONG = u'BSM entry'
SOURCE_SHORT = u'LOG'
manager.FormattersManager.RegisterFormatters([
BSMFormatter, MacBSMFormatter])
| 23.022727 | 67 | 0.694965 |
4a1bb6648bf8a2fafa5125e79b74decf4086260e
| 4,031 |
py
|
Python
|
rubin_changelog/repository.py
|
womullan/lsst_git_changelog
|
63d27f6540146ec78071fac555a45494e7ed31f6
|
[
"BSD-2-Clause"
] | 2 |
2017-02-27T17:34:17.000Z
|
2020-06-01T06:07:44.000Z
|
rubin_changelog/repository.py
|
womullan/lsst_git_changelog
|
63d27f6540146ec78071fac555a45494e7ed31f6
|
[
"BSD-2-Clause"
] | 3 |
2020-07-15T03:38:48.000Z
|
2021-09-22T20:03:44.000Z
|
rubin_changelog/repository.py
|
womullan/lsst_git_changelog
|
63d27f6540146ec78071fac555a45494e7ed31f6
|
[
"BSD-2-Clause"
] | 4 |
2021-07-22T22:56:14.000Z
|
2022-01-26T01:10:02.000Z
|
import logging
import os
import re
import shutil
import subprocess
from datetime import datetime
from typing import List, Set, Optional
def call_git(*args: str, cwd: str, git_exec: str = "/usr/bin/git") -> str:
to_exec = [git_exec] + list(args)
logging.debug(to_exec)
logging.debug(cwd)
return subprocess.check_output(to_exec, cwd=cwd, stderr=subprocess.STDOUT).decode(
"utf-8"
)
class Repository(object):
def __init__(self, path: str, *, branch_name: str = "master"):
self.path = path
self.branch_name = branch_name
self._tags: Set[str] = set() # populated on demand.
# Make sure we're using the appropriate branch
self.__call_git("symbolic-ref", "HEAD", f"refs/heads/{branch_name}")
def __call_git(self, *args: str) -> str:
return call_git(*args, cwd=self.path)
def commits(
self, reachable_from: Optional[str] = None, merges_only: bool = False
) -> List[str]:
args = ["log", "--pretty=format:%H"]
if reachable_from:
args.append(reachable_from)
if merges_only:
args.append("--merges")
return self.__call_git(*args).split()
def merges_between(self, old, new):
args = ["log", "--pretty=format:%H", "--merges", f"{old}...{new}"]
try:
return self.__call_git(*args).split()
except:
return []
def message(self, commit_hash: str) -> str:
return self.__call_git("show", commit_hash, "--pretty=format:%s")
def tag_date(self, tag_name: str) -> datetime:
return datetime.fromtimestamp(
int(self.__call_git("tag", "-l", tag_name, "--format=%(taggerdate:unix)"))
)
def sha_for_date(self, date: datetime):
return self.__call_git(
"rev-list", "-1", f'--before="{date}"', self.branch_name
).strip()
@property
def tags(self) -> Set[str]:
if not self._tags:
self._tags = set(tag for tag in self.__call_git("tag").split())
return self._tags
def add_tag(self, tag_name: str, target: str) -> None:
if tag_name in self.tags:
self.__call_git("tag", "-d", tag_name)
self.__call_git("tag", tag_name, target)
self._tags.add(tag_name)
def update(self) -> str:
return self.__call_git(
"fetch", "origin", f"{self.branch_name}:{self.branch_name}"
)
@staticmethod
def ticket(message: str) -> Optional[str]:
try:
result = re.search(r"(DM-\d+)", message, re.IGNORECASE).group(1) # type: ignore[union-attr]
except AttributeError:
logging.debug(message)
result = None
return result
@classmethod
def materialize(
cls, url: str, target_dir: str, *, branch_name: str = "master"
) -> "Repository":
# Try to re-use an on disk repository. However, if it's corrupted,
# blow it away and clone a fresh copy.
repo_dir_name = re.sub(r".git$", "", url.split("/")[-1])
clone_path = os.path.join(target_dir, repo_dir_name)
os.makedirs(target_dir, exist_ok=True)
def clone() -> None:
"""Clone repo at url into a subdirectory target_dir, clobbering
pre-existing content, returning the resulting path.
"""
call_git(
"clone",
"--bare",
"--branch",
branch_name,
url,
repo_dir_name,
cwd=target_dir,
)
if not os.path.exists(clone_path):
clone()
repo = cls(clone_path, branch_name=branch_name)
try:
repo.update()
except subprocess.CalledProcessError as e:
logging.warn(f"Unable to update {clone_path}: {e}; {e.output}; resetting")
shutil.rmtree(clone_path)
clone()
repo = cls(clone_path, branch_name=branch_name)
repo.update()
return repo
| 32.248 | 104 | 0.573059 |
4a1bb809b47bc6c5e01f998daf103fe39d51d9bf
| 734 |
py
|
Python
|
CountMillionCharacters-Variations/variation1.py
|
thiru15/Python-1
|
f276f34a77579e552ca2adb3b5a3a1b0f3ebddee
|
[
"MIT"
] | 12 |
2019-12-27T07:32:35.000Z
|
2022-02-20T20:15:08.000Z
|
CountMillionCharacters-Variations/variation1.py
|
DiasNikita/Python
|
f276f34a77579e552ca2adb3b5a3a1b0f3ebddee
|
[
"MIT"
] | 1 |
2020-10-01T14:14:31.000Z
|
2020-10-01T14:14:58.000Z
|
CountMillionCharacters-Variations/variation1.py
|
DiasNikita/Python
|
f276f34a77579e552ca2adb3b5a3a1b0f3ebddee
|
[
"MIT"
] | 22 |
2019-10-06T20:30:25.000Z
|
2022-01-11T16:31:14.000Z
|
try:
input = raw_input
except NameError:
pass
def count_chars(filename):
count = {}
with open(filename) as info: # inputFile Replaced with filename
readfile = info.read()
for character in readfile.upper():
count[character] = count.get(character, 0) + 1
return count
def main():
is_exist=True
#Try to open file if exist else raise exception and try again
while(is_exist):
try:
inputFile = input("File Name / (0)exit : ").strip()
if inputFile == "0":
break
print(count_chars(inputFile))
except FileNotFoundError:
print("File not found...Try again!")
if __name__ == '__main__':
main()
| 22.9375 | 68 | 0.584469 |
4a1bb87547952d4c4b7386524b075668da1c21bb
| 5,703 |
py
|
Python
|
roblox/bases/baseuniverse.py
|
Boegie19/ro.py
|
68b8acb506ab3057670168434fbf90ea0d05943f
|
[
"MIT"
] | null | null | null |
roblox/bases/baseuniverse.py
|
Boegie19/ro.py
|
68b8acb506ab3057670168434fbf90ea0d05943f
|
[
"MIT"
] | null | null | null |
roblox/bases/baseuniverse.py
|
Boegie19/ro.py
|
68b8acb506ab3057670168434fbf90ea0d05943f
|
[
"MIT"
] | 1 |
2021-06-28T14:57:39.000Z
|
2021-06-28T14:57:39.000Z
|
"""
This file contains the BaseUniverse object, which represents a Roblox universe ID.
It also contains the UniverseLiveStats object, which represents a universe's live stats.
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Dict, List
if TYPE_CHECKING:
from ..badges import Badge
from .baseitem import BaseItem
from ..utilities.shared import ClientSharedObject
from ..utilities.iterators import PageIterator, SortOrder
from ..gamepasses import GamePass
from ..sociallinks import UniverseSocialLink
class UniverseLiveStats:
"""
Represents a universe's live stats.
Attributes:
total_player_count: The amount of players present in this universe's subplaces.
game_count: The amount of active servers for this universe's subplaces.
player_counts_by_device_type: A dictionary where the keys are device types and the values are the amount of
this universe's subplace's active players which are on that device type.
"""
def __init__(self, data: dict):
self.total_player_count: int = data["totalPlayerCount"]
self.game_count: int = data["gameCount"]
self.player_counts_by_device_type: Dict[str, int] = data["playerCountsByDeviceType"]
def _universe_badges_handler(shared: ClientSharedObject, data: dict) -> Badge:
# inline imports are used here, sorry
from ..badges import Badge
return Badge(shared=shared, data=data)
class BaseUniverse(BaseItem):
"""
Represents a Roblox universe ID.
Attributes:
_shared: The ClientSharedObject.
id: The universe ID.
"""
def __init__(self, shared: ClientSharedObject, universe_id: int):
"""
Arguments:
shared: The ClientSharedObject.
universe_id: The universe ID.
"""
self._shared: ClientSharedObject = shared
self.id: int = universe_id
async def get_favorite_count(self) -> int:
"""
Grabs the universe's favorite count.
Returns:
The universe's favorite count.
"""
favorite_count_response = await self._shared.requests.get(
url=self._shared.url_generator.get_url("games", f"v1/games/{self.id}/favorites/count")
)
favorite_count_data = favorite_count_response.json()
return favorite_count_data["favoritesCount"]
async def is_favorited(self) -> bool:
"""
Grabs the authenticated user's favorite status for this game.
Returns:
Whether the authenticated user has favorited this game.
"""
is_favorited_response = await self._shared.requests.get(
url=self._shared.url_generator.get_url("games", f"v1/games/{self.id}/favorites")
)
is_favorited_data = is_favorited_response.json()
return is_favorited_data["isFavorited"]
def get_badges(self, page_size: int = 10, sort_order: SortOrder = SortOrder.Ascending,
max_items: int = None) -> PageIterator:
"""
Gets the universe's badges.
Arguments:
page_size: How many members should be returned for each page.
sort_order: Order in which data should be grabbed.
max_items: The maximum items to return when looping through this object.
Returns:
A PageIterator containing this universe's badges.
"""
return PageIterator(
shared=self._shared,
url=self._shared.url_generator.get_url("badges", f"v1/universes/{self.id}/badges"),
page_size=page_size,
sort_order=sort_order,
max_items=max_items,
handler=_universe_badges_handler,
)
async def get_live_stats(self) -> UniverseLiveStats:
"""
Gets the universe's live stats.
This data does not update live. These are just the stats that are shown on the website's live stats display.
Returns:
The universe's live stats.
"""
stats_response = await self._shared.requests.get(
url=self._shared.url_generator.get_url("develop", f"v1/universes/{self.id}/live-stats")
)
stats_data = stats_response.json()
return UniverseLiveStats(data=stats_data)
def get_gamepasses(self, page_size: int = 10, sort_order: SortOrder = SortOrder.Ascending,
max_items: int = None) -> PageIterator:
"""
Gets the universe's gamepasses.
Arguments:
page_size: How many members should be returned for each page.
sort_order: Order in which data should be grabbed.
max_items: The maximum items to return when looping through this object.
Returns:
A PageIterator containing the universe's gamepasses.
"""
return PageIterator(
shared=self._shared,
url=self._shared.url_generator.get_url("games", f"v1/games/{self.id}/game-passes"),
page_size=page_size,
sort_order=sort_order,
max_items=max_items,
handler=lambda shared, data: GamePass(shared, data),
)
async def get_social_links(self) -> List[UniverseSocialLink]:
"""
Gets the universe's social links.
Returns:
A list of the universe's social links.
"""
links_response = await self._shared.requests.get(
url=self._shared.url_generator.get_url("games", f"v1/games/{self.id}/social-links/list")
)
links_data = links_response.json()["data"]
return [UniverseSocialLink(shared=self._shared, data=link_data) for link_data in links_data]
| 34.98773 | 116 | 0.649833 |
4a1bb92180789809a2104b418ab394c7c5d123c3
| 110 |
py
|
Python
|
examples/compare.py
|
tebzzz/archon
|
5276e06ccc2e08e034473108a4dd8e36b6ba6078
|
[
"MIT"
] | 78 |
2018-09-20T12:50:42.000Z
|
2021-12-17T11:44:03.000Z
|
examples/compare.py
|
tebzzz/archon
|
5276e06ccc2e08e034473108a4dd8e36b6ba6078
|
[
"MIT"
] | 49 |
2018-11-18T16:58:27.000Z
|
2019-09-29T18:41:51.000Z
|
examples/compare.py
|
tebzzz/archon
|
5276e06ccc2e08e034473108a4dd8e36b6ba6078
|
[
"MIT"
] | 21 |
2018-09-20T11:31:50.000Z
|
2021-10-19T17:01:47.000Z
|
import archon.feeds.cryptocompare as cryptocompare
e = "Kucoin"
h = cryptocompare.get_hist("LTC","BTC",e)
| 18.333333 | 50 | 0.736364 |
4a1bb92ba321da397a256c3423be0478285ba832
| 15,216 |
py
|
Python
|
sympy/physics/vector/point.py
|
Corwinpro/sympy
|
a2efa19333fa0b3b18db872efabbb46248cde63b
|
[
"BSD-3-Clause"
] | 15 |
2020-06-29T08:33:39.000Z
|
2022-02-12T00:28:51.000Z
|
sympy/physics/vector/point.py
|
Corwinpro/sympy
|
a2efa19333fa0b3b18db872efabbb46248cde63b
|
[
"BSD-3-Clause"
] | 13 |
2020-03-24T17:53:51.000Z
|
2022-02-10T20:01:14.000Z
|
sympy/physics/vector/point.py
|
Corwinpro/sympy
|
a2efa19333fa0b3b18db872efabbb46248cde63b
|
[
"BSD-3-Clause"
] | 11 |
2020-06-29T08:40:24.000Z
|
2022-02-24T17:39:16.000Z
|
from __future__ import print_function, division
from sympy.core.compatibility import range, string_types
from .vector import Vector, _check_vector
from .frame import _check_frame
__all__ = ['Point']
class Point(object):
"""This object represents a point in a dynamic system.
It stores the: position, velocity, and acceleration of a point.
The position is a vector defined as the vector distance from a parent
point to this point.
"""
def __init__(self, name):
"""Initialization of a Point object. """
self.name = name
self._pos_dict = {}
self._vel_dict = {}
self._acc_dict = {}
self._pdlist = [self._pos_dict, self._vel_dict, self._acc_dict]
def __str__(self):
return self.name
__repr__ = __str__
def _check_point(self, other):
if not isinstance(other, Point):
raise TypeError('A Point must be supplied')
def _pdict_list(self, other, num):
"""Creates a list from self to other using _dcm_dict. """
outlist = [[self]]
oldlist = [[]]
while outlist != oldlist:
oldlist = outlist[:]
for i, v in enumerate(outlist):
templist = v[-1]._pdlist[num].keys()
for i2, v2 in enumerate(templist):
if not v.__contains__(v2):
littletemplist = v + [v2]
if not outlist.__contains__(littletemplist):
outlist.append(littletemplist)
for i, v in enumerate(oldlist):
if v[-1] != other:
outlist.remove(v)
outlist.sort(key=len)
if len(outlist) != 0:
return outlist[0]
raise ValueError('No Connecting Path found between ' + other.name +
' and ' + self.name)
def a1pt_theory(self, otherpoint, outframe, interframe):
"""Sets the acceleration of this point with the 1-point theory.
The 1-point theory for point acceleration looks like this:
^N a^P = ^B a^P + ^N a^O + ^N alpha^B x r^OP + ^N omega^B x (^N omega^B
x r^OP) + 2 ^N omega^B x ^B v^P
where O is a point fixed in B, P is a point moving in B, and B is
rotating in frame N.
Parameters
==========
otherpoint : Point
The first point of the 1-point theory (O)
outframe : ReferenceFrame
The frame we want this point's acceleration defined in (N)
fixedframe : ReferenceFrame
The intermediate frame in this calculation (B)
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame
>>> from sympy.physics.vector import Vector, dynamicsymbols
>>> q = dynamicsymbols('q')
>>> q2 = dynamicsymbols('q2')
>>> qd = dynamicsymbols('q', 1)
>>> q2d = dynamicsymbols('q2', 1)
>>> N = ReferenceFrame('N')
>>> B = ReferenceFrame('B')
>>> B.set_ang_vel(N, 5 * B.y)
>>> O = Point('O')
>>> P = O.locatenew('P', q * B.x)
>>> P.set_vel(B, qd * B.x + q2d * B.y)
>>> O.set_vel(N, 0)
>>> P.a1pt_theory(O, N, B)
(-25*q + q'')*B.x + q2''*B.y - 10*q'*B.z
"""
_check_frame(outframe)
_check_frame(interframe)
self._check_point(otherpoint)
dist = self.pos_from(otherpoint)
v = self.vel(interframe)
a1 = otherpoint.acc(outframe)
a2 = self.acc(interframe)
omega = interframe.ang_vel_in(outframe)
alpha = interframe.ang_acc_in(outframe)
self.set_acc(outframe, a2 + 2 * (omega ^ v) + a1 + (alpha ^ dist) +
(omega ^ (omega ^ dist)))
return self.acc(outframe)
def a2pt_theory(self, otherpoint, outframe, fixedframe):
"""Sets the acceleration of this point with the 2-point theory.
The 2-point theory for point acceleration looks like this:
^N a^P = ^N a^O + ^N alpha^B x r^OP + ^N omega^B x (^N omega^B x r^OP)
where O and P are both points fixed in frame B, which is rotating in
frame N.
Parameters
==========
otherpoint : Point
The first point of the 2-point theory (O)
outframe : ReferenceFrame
The frame we want this point's acceleration defined in (N)
fixedframe : ReferenceFrame
The frame in which both points are fixed (B)
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame, dynamicsymbols
>>> q = dynamicsymbols('q')
>>> qd = dynamicsymbols('q', 1)
>>> N = ReferenceFrame('N')
>>> B = N.orientnew('B', 'Axis', [q, N.z])
>>> O = Point('O')
>>> P = O.locatenew('P', 10 * B.x)
>>> O.set_vel(N, 5 * N.x)
>>> P.a2pt_theory(O, N, B)
- 10*q'**2*B.x + 10*q''*B.y
"""
_check_frame(outframe)
_check_frame(fixedframe)
self._check_point(otherpoint)
dist = self.pos_from(otherpoint)
a = otherpoint.acc(outframe)
omega = fixedframe.ang_vel_in(outframe)
alpha = fixedframe.ang_acc_in(outframe)
self.set_acc(outframe, a + (alpha ^ dist) + (omega ^ (omega ^ dist)))
return self.acc(outframe)
def acc(self, frame):
"""The acceleration Vector of this Point in a ReferenceFrame.
Parameters
==========
frame : ReferenceFrame
The frame in which the returned acceleration vector will be defined in
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p1.set_acc(N, 10 * N.x)
>>> p1.acc(N)
10*N.x
"""
_check_frame(frame)
if not (frame in self._acc_dict):
if self._vel_dict[frame] != 0:
return (self._vel_dict[frame]).dt(frame)
else:
return Vector(0)
return self._acc_dict[frame]
def locatenew(self, name, value):
"""Creates a new point with a position defined from this point.
Parameters
==========
name : str
The name for the new point
value : Vector
The position of the new point relative to this point
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, Point
>>> N = ReferenceFrame('N')
>>> P1 = Point('P1')
>>> P2 = P1.locatenew('P2', 10 * N.x)
"""
if not isinstance(name, string_types):
raise TypeError('Must supply a valid name')
if value == 0:
value = Vector(0)
value = _check_vector(value)
p = Point(name)
p.set_pos(self, value)
self.set_pos(p, -value)
return p
def pos_from(self, otherpoint):
"""Returns a Vector distance between this Point and the other Point.
Parameters
==========
otherpoint : Point
The otherpoint we are locating this one relative to
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p2 = Point('p2')
>>> p1.set_pos(p2, 10 * N.x)
>>> p1.pos_from(p2)
10*N.x
"""
outvec = Vector(0)
plist = self._pdict_list(otherpoint, 0)
for i in range(len(plist) - 1):
outvec += plist[i]._pos_dict[plist[i + 1]]
return outvec
def set_acc(self, frame, value):
"""Used to set the acceleration of this Point in a ReferenceFrame.
Parameters
==========
frame : ReferenceFrame
The frame in which this point's acceleration is defined
value : Vector
The vector value of this point's acceleration in the frame
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p1.set_acc(N, 10 * N.x)
>>> p1.acc(N)
10*N.x
"""
if value == 0:
value = Vector(0)
value = _check_vector(value)
_check_frame(frame)
self._acc_dict.update({frame: value})
def set_pos(self, otherpoint, value):
"""Used to set the position of this point w.r.t. another point.
Parameters
==========
otherpoint : Point
The other point which this point's location is defined relative to
value : Vector
The vector which defines the location of this point
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p2 = Point('p2')
>>> p1.set_pos(p2, 10 * N.x)
>>> p1.pos_from(p2)
10*N.x
"""
if value == 0:
value = Vector(0)
value = _check_vector(value)
self._check_point(otherpoint)
self._pos_dict.update({otherpoint: value})
otherpoint._pos_dict.update({self: -value})
def set_vel(self, frame, value):
"""Sets the velocity Vector of this Point in a ReferenceFrame.
Parameters
==========
frame : ReferenceFrame
The frame in which this point's velocity is defined
value : Vector
The vector value of this point's velocity in the frame
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p1.set_vel(N, 10 * N.x)
>>> p1.vel(N)
10*N.x
"""
if value == 0:
value = Vector(0)
value = _check_vector(value)
_check_frame(frame)
self._vel_dict.update({frame: value})
def v1pt_theory(self, otherpoint, outframe, interframe):
"""Sets the velocity of this point with the 1-point theory.
The 1-point theory for point velocity looks like this:
^N v^P = ^B v^P + ^N v^O + ^N omega^B x r^OP
where O is a point fixed in B, P is a point moving in B, and B is
rotating in frame N.
Parameters
==========
otherpoint : Point
The first point of the 2-point theory (O)
outframe : ReferenceFrame
The frame we want this point's velocity defined in (N)
interframe : ReferenceFrame
The intermediate frame in this calculation (B)
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame
>>> from sympy.physics.vector import Vector, dynamicsymbols
>>> q = dynamicsymbols('q')
>>> q2 = dynamicsymbols('q2')
>>> qd = dynamicsymbols('q', 1)
>>> q2d = dynamicsymbols('q2', 1)
>>> N = ReferenceFrame('N')
>>> B = ReferenceFrame('B')
>>> B.set_ang_vel(N, 5 * B.y)
>>> O = Point('O')
>>> P = O.locatenew('P', q * B.x)
>>> P.set_vel(B, qd * B.x + q2d * B.y)
>>> O.set_vel(N, 0)
>>> P.v1pt_theory(O, N, B)
q'*B.x + q2'*B.y - 5*q*B.z
"""
_check_frame(outframe)
_check_frame(interframe)
self._check_point(otherpoint)
dist = self.pos_from(otherpoint)
v1 = self.vel(interframe)
v2 = otherpoint.vel(outframe)
omega = interframe.ang_vel_in(outframe)
self.set_vel(outframe, v1 + v2 + (omega ^ dist))
return self.vel(outframe)
def v2pt_theory(self, otherpoint, outframe, fixedframe):
"""Sets the velocity of this point with the 2-point theory.
The 2-point theory for point velocity looks like this:
^N v^P = ^N v^O + ^N omega^B x r^OP
where O and P are both points fixed in frame B, which is rotating in
frame N.
Parameters
==========
otherpoint : Point
The first point of the 2-point theory (O)
outframe : ReferenceFrame
The frame we want this point's velocity defined in (N)
fixedframe : ReferenceFrame
The frame in which both points are fixed (B)
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame, dynamicsymbols
>>> q = dynamicsymbols('q')
>>> qd = dynamicsymbols('q', 1)
>>> N = ReferenceFrame('N')
>>> B = N.orientnew('B', 'Axis', [q, N.z])
>>> O = Point('O')
>>> P = O.locatenew('P', 10 * B.x)
>>> O.set_vel(N, 5 * N.x)
>>> P.v2pt_theory(O, N, B)
5*N.x + 10*q'*B.y
"""
_check_frame(outframe)
_check_frame(fixedframe)
self._check_point(otherpoint)
dist = self.pos_from(otherpoint)
v = otherpoint.vel(outframe)
omega = fixedframe.ang_vel_in(outframe)
self.set_vel(outframe, v + (omega ^ dist))
return self.vel(outframe)
def vel(self, frame):
"""The velocity Vector of this Point in the ReferenceFrame.
Parameters
==========
frame : ReferenceFrame
The frame in which the returned velocity vector will be defined in
Examples
========
>>> from sympy.physics.vector import Point, ReferenceFrame
>>> N = ReferenceFrame('N')
>>> p1 = Point('p1')
>>> p1.set_vel(N, 10 * N.x)
>>> p1.vel(N)
10*N.x
"""
_check_frame(frame)
if not (frame in self._vel_dict):
raise ValueError('Velocity of point ' + self.name + ' has not been'
' defined in ReferenceFrame ' + frame.name)
return self._vel_dict[frame]
def partial_velocity(self, frame, *gen_speeds):
"""Returns the partial velocities of the linear velocity vector of this
point in the given frame with respect to one or more provided
generalized speeds.
Parameters
==========
frame : ReferenceFrame
The frame with which the velocity is defined in.
gen_speeds : functions of time
The generalized speeds.
Returns
=======
partial_velocities : tuple of Vector
The partial velocity vectors corresponding to the provided
generalized speeds.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, Point
>>> from sympy.physics.vector import dynamicsymbols
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> p = Point('p')
>>> u1, u2 = dynamicsymbols('u1, u2')
>>> p.set_vel(N, u1 * N.x + u2 * A.y)
>>> p.partial_velocity(N, u1)
N.x
>>> p.partial_velocity(N, u1, u2)
(N.x, A.y)
"""
partials = [self.vel(frame).diff(speed, frame, var_in_dcm=False) for
speed in gen_speeds]
if len(partials) == 1:
return partials[0]
else:
return tuple(partials)
| 30.492986 | 82 | 0.541732 |
4a1bbb894ac6a047c11cb8ce7e34177f1bef1449
| 615 |
py
|
Python
|
setup.py
|
TravisJRCain/lambdata-zmurray
|
61a906c896ec629e2cd486b300b04921159840da
|
[
"MIT"
] | null | null | null |
setup.py
|
TravisJRCain/lambdata-zmurray
|
61a906c896ec629e2cd486b300b04921159840da
|
[
"MIT"
] | null | null | null |
setup.py
|
TravisJRCain/lambdata-zmurray
|
61a906c896ec629e2cd486b300b04921159840da
|
[
"MIT"
] | null | null | null |
# setup.py file
from setuptools import find_packages, setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="lambdata-zmurray", # the name that you will install via pip
version="1.2",
author="Zack Murray",
author_email="zachery.murray@gmail.com",
description="A short description",
long_description=long_description,
long_description_content_type="text/markdown", # required if using a md file for long desc
license="MIT",
url="https://github.com/zack-murray/lambdata-zmurray",
#keywords="",
packages=find_packages() # ["my_lambdata"]
)
| 30.75 | 94 | 0.700813 |
4a1bbc0956f7996313736d93816db3f8f5b0f092
| 1,787 |
py
|
Python
|
tests/bugs/core_2067_test.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1 |
2022-02-05T11:37:13.000Z
|
2022-02-05T11:37:13.000Z
|
tests/bugs/core_2067_test.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1 |
2021-09-03T11:47:00.000Z
|
2021-09-03T12:42:10.000Z
|
tests/bugs/core_2067_test.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1 |
2021-06-30T14:14:16.000Z
|
2021-06-30T14:14:16.000Z
|
#coding:utf-8
#
# id: bugs.core_2067
# title: GROUP by and RDB$DB_KEY problems
# decription:
# tracker_id: CORE-2067
# min_versions: []
# versions: 2.5.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5.0
# resources: None
substitutions_1 = []
init_script_1 = """create table t1 (n integer);
insert into t1 values (1);
insert into t1 values (2);
insert into t1 values (3);
commit;
"""
db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
test_script_1 = """-- First problem: it should be invalid to group by n and select rdb$db_key
select n, rdb$db_key from t1 group by n;
-- Second problem: error: column unknown DB_KEY is wrong raised
select n, rdb$db_key from t1 group by 1, 2;
-- Third problem: wrong values for rdb$db_key are returned
select n, t1.rdb$db_key from t1 group by 1, 2;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
N DB_KEY
============ ================
1 8000000001000000
2 8000000002000000
3 8000000003000000
N DB_KEY
============ ================
1 8000000001000000
2 8000000002000000
3 8000000003000000
"""
expected_stderr_1 = """Statement failed, SQLSTATE = 42000
Dynamic SQL Error
-SQL error code = -104
-Invalid expression in the select list (not contained in either an aggregate function or the GROUP BY clause)
"""
@pytest.mark.version('>=2.5.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.expected_stderr = expected_stderr_1
act_1.execute()
assert act_1.clean_stderr == act_1.clean_expected_stderr
assert act_1.clean_stdout == act_1.clean_expected_stdout
| 25.898551 | 109 | 0.672636 |
4a1bbc1d002785f3c7bbae4630b128546892f67d
| 405 |
py
|
Python
|
blog/serializers.py
|
canokay/coblog-backend
|
51854ed2d69f8484877bc9dcc95c19e3aa7d4107
|
[
"MIT"
] | 1 |
2020-12-19T15:55:47.000Z
|
2020-12-19T15:55:47.000Z
|
blog/serializers.py
|
canokay/coblog-backend
|
51854ed2d69f8484877bc9dcc95c19e3aa7d4107
|
[
"MIT"
] | null | null | null |
blog/serializers.py
|
canokay/coblog-backend
|
51854ed2d69f8484877bc9dcc95c19e3aa7d4107
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from rest_framework.serializers import ModelSerializer
from blog.models import Blog
class BlogListSerializer(ModelSerializer):
class Meta:
model = Blog
fields = ['id','title','slug','content','is_active']
class BlogDetailSerializer(ModelSerializer):
class Meta:
model = Blog
fields = ['title','slug','content','is_active']
| 27 | 60 | 0.708642 |
4a1bbc2fe91a78228e6b27d66123adad72d4eb6b
| 3,435 |
py
|
Python
|
sdk/communication/azure-mgmt-communication/azure/mgmt/communication/_configuration.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 8 |
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/communication/azure-mgmt-communication/azure/mgmt/communication/_configuration.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 2 |
2021-11-03T06:10:36.000Z
|
2021-12-01T06:29:39.000Z
|
sdk/communication/azure-mgmt-communication/azure/mgmt/communication/_configuration.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 1 |
2021-05-19T02:55:10.000Z
|
2021-05-19T02:55:10.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
class CommunicationServiceManagementClientConfiguration(Configuration):
"""Configuration for CommunicationServiceManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: Gets subscription ID which uniquely identifies the Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(CommunicationServiceManagementClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2020-08-20-preview"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-communication/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| 47.708333 | 174 | 0.695488 |
4a1bbcb45603daa705d1b49be50128d8ff576329
| 4,115 |
py
|
Python
|
benchmark/startCirq1544.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startCirq1544.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startCirq1544.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=5
# total number=56
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=3
c.append(cirq.H.on(input_qubit[1])) # number=4
c.append(cirq.H.on(input_qubit[2])) # number=5
c.append(cirq.H.on(input_qubit[3])) # number=6
c.append(cirq.H.on(input_qubit[4])) # number=21
for i in range(2):
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[0])) # number=17
c.append(cirq.H.on(input_qubit[1])) # number=18
c.append(cirq.H.on(input_qubit[3])) # number=51
c.append(cirq.H.on(input_qubit[2])) # number=19
c.append(cirq.H.on(input_qubit[3])) # number=20
c.append(cirq.H.on(input_qubit[0])) # number=31
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=32
c.append(cirq.H.on(input_qubit[1])) # number=52
c.append(cirq.H.on(input_qubit[0])) # number=33
c.append(cirq.H.on(input_qubit[1])) # number=44
c.append(cirq.CZ.on(input_qubit[0],input_qubit[1])) # number=45
c.append(cirq.H.on(input_qubit[1])) # number=46
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=53
c.append(cirq.X.on(input_qubit[1])) # number=54
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=55
c.append(cirq.H.on(input_qubit[1])) # number=48
c.append(cirq.CZ.on(input_qubit[0],input_qubit[1])) # number=49
c.append(cirq.H.on(input_qubit[1])) # number=50
c.append(cirq.X.on(input_qubit[0])) # number=26
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=27
c.append(cirq.H.on(input_qubit[1])) # number=37
c.append(cirq.CZ.on(input_qubit[0],input_qubit[1])) # number=38
c.append(cirq.H.on(input_qubit[1])) # number=39
c.append(cirq.X.on(input_qubit[1])) # number=35
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=36
c.append(cirq.X.on(input_qubit[2])) # number=11
c.append(cirq.X.on(input_qubit[3])) # number=12
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[2])) # number=43
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[2])) # number=47
c.append(cirq.X.on(input_qubit[0])) # number=13
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=22
c.append(cirq.X.on(input_qubit[1])) # number=23
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=24
c.append(cirq.X.on(input_qubit[2])) # number=15
c.append(cirq.X.on(input_qubit[1])) # number=29
c.append(cirq.Y.on(input_qubit[4])) # number=28
c.append(cirq.X.on(input_qubit[3])) # number=16
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 5
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq1544.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
| 39.190476 | 77 | 0.64593 |
4a1bbed8bb2565d4ad0b36fa8cb795ab82feadcb
| 1,074 |
py
|
Python
|
mac/google-cloud-sdk/lib/third_party/requests_oauthlib/compliance_fixes/facebook.py
|
bopopescu/cndw
|
ee432efef88a4351b355f3d6d5350defc7f4246b
|
[
"Apache-2.0"
] | 2 |
2019-11-10T09:17:07.000Z
|
2019-12-18T13:44:08.000Z
|
mac/google-cloud-sdk/lib/third_party/requests_oauthlib/compliance_fixes/facebook.py
|
bopopescu/cndw
|
ee432efef88a4351b355f3d6d5350defc7f4246b
|
[
"Apache-2.0"
] | 11 |
2020-02-29T02:51:12.000Z
|
2022-03-30T23:20:08.000Z
|
mac/google-cloud-sdk/lib/third_party/requests_oauthlib/compliance_fixes/facebook.py
|
bopopescu/cndw
|
ee432efef88a4351b355f3d6d5350defc7f4246b
|
[
"Apache-2.0"
] | 1 |
2020-07-25T18:17:57.000Z
|
2020-07-25T18:17:57.000Z
|
from json import dumps
try:
from urlparse import parse_qsl
except ImportError:
from urllib.parse import parse_qsl
from oauthlib.common import to_unicode
def facebook_compliance_fix(session):
def _compliance_fix(r):
# if Facebook claims to be sending us json, let's trust them.
if 'application/json' in r.headers.get('content-type', {}):
return r
# Facebook returns a content-type of text/plain when sending their
# x-www-form-urlencoded responses, along with a 200. If not, let's
# assume we're getting JSON and bail on the fix.
if 'text/plain' in r.headers.get('content-type',
{}) and r.status_code == 200:
token = dict(parse_qsl(r.text, keep_blank_values=True))
else:
return r
expires = token.get('expires')
if expires is not None:
token['expires_in'] = expires
token['token_type'] = 'Bearer'
r._content = to_unicode(dumps(token)).encode('UTF-8')
return r
session.register_compliance_hook('access_token_response', _compliance_fix)
return session
| 30.685714 | 76 | 0.681564 |
4a1bc02ab810db3e56985f69ba800508c8f8e9c0
| 9,370 |
py
|
Python
|
dragonchain/lib/interfaces/storage_utest.py
|
cheeseandcereal/dragonchain
|
34d34e344b887c2a0eeb591ede2015cc2506a323
|
[
"Apache-2.0"
] | null | null | null |
dragonchain/lib/interfaces/storage_utest.py
|
cheeseandcereal/dragonchain
|
34d34e344b887c2a0eeb591ede2015cc2506a323
|
[
"Apache-2.0"
] | null | null | null |
dragonchain/lib/interfaces/storage_utest.py
|
cheeseandcereal/dragonchain
|
34d34e344b887c2a0eeb591ede2015cc2506a323
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Dragonchain, Inc.
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
# You may obtain a copy of the Apache License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
import os
import importlib
import unittest
from unittest.mock import MagicMock, patch
from dragonchain import test_env # noqa: F401
from dragonchain import exceptions
from dragonchain.lib.interfaces import storage
class TestStorageInterfaceImport(unittest.TestCase):
def tearDown(self):
# Make sure we fix the import after the test continuing
os.environ["STORAGE_TYPE"] = "s3"
importlib.reload(storage)
def test_storage_raises_not_implemented_with_bad_storage_type(self):
os.environ["STORAGE_TYPE"] = "testing"
self.assertRaises(NotImplementedError, importlib.reload, storage)
class TestStorageInterface(unittest.TestCase):
def setUp(self):
os.environ["STORAGE_TYPE"] = "s3"
storage.STORAGE_LOCATION = "test"
storage.storage = MagicMock()
storage.redis.cache_get = MagicMock(return_value=None)
storage.redis.cache_put = MagicMock(return_value=None)
storage.redis.cache_delete = MagicMock(return_value=None)
storage.redis.cache_condition = True
def tearDown(self):
importlib.reload(storage)
importlib.reload(storage.redis)
def test_get_calls_storage_get_with_params(self):
storage.get("thing")
storage.storage.get.assert_called_once_with("test", "thing")
def test_get_raises_storage_error(self):
storage.storage.get = MagicMock(side_effect=RuntimeError)
self.assertRaises(exceptions.StorageError, storage.get, "thing")
def test_get_calls_cache_with_correct_params(self):
storage.storage.get = MagicMock(return_value=b"val")
storage.redis.cache_get = MagicMock(return_value=None)
storage.redis.cache_put = MagicMock(return_value=None)
storage.get("thing")
storage.redis.cache_get.assert_called_once_with("thing")
storage.redis.cache_put.assert_called_once_with("thing", b"val", None)
def test_get_raises_not_found(self):
storage.storage.get = MagicMock(side_effect=exceptions.NotFound)
self.assertRaises(exceptions.NotFound, storage.get, "thing")
def test_put_calls_storage_put_with_params(self):
storage.put("thing", b"val")
storage.storage.put.assert_called_once_with("test", "thing", b"val")
def test_put_raises_storage_error(self):
storage.storage.put = MagicMock(side_effect=RuntimeError)
self.assertRaises(exceptions.StorageError, storage.put, "thing", b"val")
def test_put_calls_cache_with_correct_params(self):
storage.put("thing", b"val")
storage.redis.cache_put.assert_called_once_with("thing", b"val", None)
def test_delete_calls_storage_delete_with_params(self):
storage.delete("thing")
storage.storage.delete.assert_called_once_with("test", "thing")
def test_delete_raises_storage_error(self):
storage.storage.delete = MagicMock(side_effect=RuntimeError)
self.assertRaises(exceptions.StorageError, storage.delete, "thing")
def test_delete_calls_cache_with_correct_params(self):
storage.delete("thing")
storage.redis.cache_delete.assert_called_once_with("thing")
def test_list_objects_calls_storage_list_objects_with_params(self):
storage.storage.list_objects = MagicMock()
storage.list_objects("prefix")
storage.storage.list_objects.assert_called_once_with("test", "prefix")
def test_list_objects_throws_storage_error(self):
storage.storage.list_objects = MagicMock(side_effect=RuntimeError)
self.assertRaises(exceptions.StorageError, storage.list_objects, "thing")
def test_does_superkey_exist_calls_storage_does_superkey_exist_with_params(self):
storage.does_superkey_exist("prefix")
storage.storage.does_superkey_exist.assert_called_once_with("test", "prefix")
def test_does_superkey_exist_throws_storage_error(self):
storage.storage.does_superkey_exist = MagicMock(side_effect=RuntimeError)
self.assertRaises(exceptions.StorageError, storage.does_superkey_exist, "thing")
def test_does_object_exist_calls_storage_does_object_exist_with_params(self):
storage.does_object_exist("prefix")
storage.storage.does_object_exist.assert_called_once_with("test", "prefix")
def test_does_object_exist_throws_storage_error(self):
storage.storage.does_object_exist = MagicMock(side_effect=RuntimeError)
self.assertRaises(exceptions.StorageError, storage.does_object_exist, "thing")
def test_put_object_as_json_calls_put_with_correct_params(self):
storage.put = MagicMock()
storage.put_object_as_json("key", {})
storage.put.assert_called_once_with("key", b"{}", None, True)
def test_get_json_from_object_calls_get_with_correct_params(self):
storage.get = MagicMock(return_value=b"{}")
storage.get_json_from_object("key")
storage.get.assert_called_once_with("key", None, True)
def test_get_json_from_object_returns_correct_json(self):
storage.get = MagicMock(return_value=b"{}")
self.assertEqual(storage.get_json_from_object("key"), {})
storage.get_json_from_object("key")
def test_delete_directory_calls_list_objects_with_correct_params(self):
storage.list_objects = MagicMock(return_value=[])
storage.delete_directory("thing")
storage.list_objects.assert_called_once_with("thing")
def test_delete_directory_calls_delete_with_correct_params(self):
storage.list_objects = MagicMock(return_value=["obj"])
storage.delete = MagicMock()
storage.delete_directory("thing")
storage.delete.assert_called_once_with("obj")
def test_delete_directory_calls_delete_directory_with_correct_params(self):
storage.list_objects = MagicMock(return_value=[])
storage.delete_directory("thing")
storage.storage.delete_directory.assert_called_once_with("test", "thing")
def test_delete_directory_raises_storage_exception(self):
storage.list_objects = MagicMock(side_effect=RuntimeError)
self.assertRaises(exceptions.StorageError, storage.delete_directory, "thing")
def test_select_transaction_calls_storage_select_transaction_with_params(self):
storage.storage.select_transaction = MagicMock(return_value={})
storage.select_transaction("block", "txn")
storage.storage.select_transaction.assert_called_once_with("test", "block", "txn")
def test_select_transaction_calls_cache_get_with_params(self):
storage.storage.select_transaction = MagicMock(return_value={})
storage.redis.cache_get = MagicMock(return_value="{}")
storage.select_transaction("block", "txn")
storage.redis.cache_get.assert_called_once_with("block/txn")
def test_select_transaction_returns_correct_value_from_cache(self):
storage.storage.select_transaction = MagicMock(return_value={})
storage.redis.cache_get = MagicMock(return_value="{}")
self.assertEqual(storage.select_transaction("block", "txn"), {})
def test_select_transaction_returns_correct_value_from_storage(self):
storage.storage.select_transaction = MagicMock(return_value={})
self.assertEqual(storage.select_transaction("block", "txn"), {})
def test_select_transaction_calls_cache_put_with_params(self):
storage.storage.select_transaction = MagicMock(return_value={})
storage.select_transaction("block", "txn")
storage.redis.cache_put("block/txn", "{}", None)
def test_select_transaction_raises_not_found(self):
storage.storage.select_transaction = MagicMock(side_effect=exceptions.NotFound)
self.assertRaises(exceptions.NotFound, storage.select_transaction, "block", "transaction")
def test_select_transaction_raises_storage_error(self):
storage.storage.select_transaction = MagicMock(side_effect=RuntimeError)
self.assertRaises(exceptions.StorageError, storage.select_transaction, "block", "txn")
@patch("time.time", return_value=123)
def test_save_error_message(self, mock_time):
storage.put = MagicMock()
storage.save_error_message("some message")
storage.put.assert_called_once_with("error_testing_123.log", b"some message", should_cache=False)
| 48.051282 | 105 | 0.746638 |
4a1bc0a80e692edd597794f82d1a51f5584a99fa
| 2,869 |
py
|
Python
|
google/cloud/devtools/containeranalysis_v1/types/containeranalysis.py
|
LaudateCorpus1/python-containeranalysis
|
72c88b3f38cf4fde60d8fae7462869857719782b
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/devtools/containeranalysis_v1/types/containeranalysis.py
|
LaudateCorpus1/python-containeranalysis
|
72c88b3f38cf4fde60d8fae7462869857719782b
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/devtools/containeranalysis_v1/types/containeranalysis.py
|
LaudateCorpus1/python-containeranalysis
|
72c88b3f38cf4fde60d8fae7462869857719782b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from grafeas.grafeas_v1.types import severity # type: ignore
__protobuf__ = proto.module(
package="google.devtools.containeranalysis.v1",
manifest={
"GetVulnerabilityOccurrencesSummaryRequest",
"VulnerabilityOccurrencesSummary",
},
)
class GetVulnerabilityOccurrencesSummaryRequest(proto.Message):
r"""Request to get a vulnerability summary for some set of
occurrences.
Attributes:
parent (str):
Required. The name of the project to get a vulnerability
summary for in the form of ``projects/[PROJECT_ID]``.
filter (str):
The filter expression.
"""
parent = proto.Field(proto.STRING, number=1,)
filter = proto.Field(proto.STRING, number=2,)
class VulnerabilityOccurrencesSummary(proto.Message):
r"""A summary of how many vulnerability occurrences there are per
resource and severity type.
Attributes:
counts (Sequence[google.cloud.devtools.containeranalysis_v1.types.VulnerabilityOccurrencesSummary.FixableTotalByDigest]):
A listing by resource of the number of
fixable and total vulnerabilities.
"""
class FixableTotalByDigest(proto.Message):
r"""Per resource and severity counts of fixable and total
vulnerabilities.
Attributes:
resource_uri (str):
The affected resource.
severity (grafeas.v1.severity.Severity):
The severity for this count. SEVERITY_UNSPECIFIED indicates
total across all severities.
fixable_count (int):
The number of fixable vulnerabilities
associated with this resource.
total_count (int):
The total number of vulnerabilities
associated with this resource.
"""
resource_uri = proto.Field(proto.STRING, number=1,)
severity = proto.Field(proto.ENUM, number=2, enum=severity.Severity,)
fixable_count = proto.Field(proto.INT64, number=3,)
total_count = proto.Field(proto.INT64, number=4,)
counts = proto.RepeatedField(proto.MESSAGE, number=1, message=FixableTotalByDigest,)
__all__ = tuple(sorted(__protobuf__.manifest))
| 34.566265 | 129 | 0.682468 |
4a1bc0e9c2d4a0bf97e27e9bc50d7e615ad0613c
| 511 |
py
|
Python
|
aio-server.py
|
menecio/aiohttp-django-example
|
b7c4142a5bf6dddc7d49224cc160204159f002fc
|
[
"Apache-2.0"
] | 4 |
2020-04-05T10:33:01.000Z
|
2020-07-16T17:27:39.000Z
|
aio-server.py
|
menecio/aiohttp-django-example
|
b7c4142a5bf6dddc7d49224cc160204159f002fc
|
[
"Apache-2.0"
] | null | null | null |
aio-server.py
|
menecio/aiohttp-django-example
|
b7c4142a5bf6dddc7d49224cc160204159f002fc
|
[
"Apache-2.0"
] | 2 |
2020-06-14T20:36:27.000Z
|
2020-11-24T03:39:11.000Z
|
from aiohttp import web
from django import setup
from django.conf import settings
from mymoviedb import settings as my_settings # not the same as django.conf.settings
from movies.routes import movies_app
async def setup_django(app):
settings.configure(
INSTALLED_APPS=my_settings.INSTALLED_APPS,
DATABASES=my_settings.DATABASES)
setup()
app = web.Application()
app.on_startup.append(setup_django)
app.add_subapp('/api/', movies_app)
if __name__ == '__main__':
web.run_app(app)
| 24.333333 | 85 | 0.761252 |
4a1bc2f49e44556ecf9eb7f68ba624eb41d85b4b
| 3,542 |
py
|
Python
|
openGaussBase/testcase/TOOLS/INTERNAL_TOOLS/gs_ctl/Opengauss_Function_Tools_gs_ctl_Case0094.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/TOOLS/INTERNAL_TOOLS/gs_ctl/Opengauss_Function_Tools_gs_ctl_Case0094.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/TOOLS/INTERNAL_TOOLS/gs_ctl/Opengauss_Function_Tools_gs_ctl_Case0094.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 系统内部使用工具
Case Name : 主机执行gs_ctl notify使用-D指定正确数据库实例目录是否成功
Description :
1.以pending的方式启动主机
2.查看集群状态,主节点是否为pending状态
3.主机指定正确的数据库实录目录执行notify
4.查看集群状态,主节点是否为primary
Expect :
1.以pending的方式启动主机成功
2.查看集群状态成功,主节点为pending状态
3.主机指定正确的数据库实录目录执行notify成功
4.查看集群状态,主节点为primary
History :
"""
import unittest
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from yat.test import Node
from yat.test import macro
LOG = Logger()
class SystemInternalTools(unittest.TestCase):
def setUp(self):
LOG.info('----this is setup------')
LOG.info('---Opengauss_Function_Tools_gs_ctl_Case0094开始执行-----')
self.constant = Constant()
self.PrimaryNode = Node('PrimaryDbUser')
self.sh_primary = CommonSH('PrimaryDbUser')
def test_system_internal_tools(self):
LOG.info('-------若为单机环境,后续不执行,直接通过-------')
query_cmd = f'''source {macro.DB_ENV_PATH};
gs_om -t status --detail;
'''
LOG.info(query_cmd)
query_msg = self.PrimaryNode.sh(query_cmd).result()
LOG.info(query_msg)
if 'Standby' not in query_msg:
return '单机环境,后续不执行,直接通过'
else:
self.StandbyNode = Node('Standby1DbUser')
self.sh_standby = CommonSH('Standby1DbUser')
LOG.info('--------------以pending的方式启动主机------------------')
query_cmd = f'''source {macro.DB_ENV_PATH};
gs_ctl restart -D {macro.DB_INSTANCE_PATH} -M pending ;
'''
LOG.info(query_cmd)
query_msg = self.PrimaryNode.sh(query_cmd).result()
LOG.info(query_msg)
self.assertIn(self.constant.RESTART_SUCCESS_MSG, query_msg)
LOG.info('----------------查看主机状态-------------------')
excute_cmd = f'''source {macro.DB_ENV_PATH};
gs_ctl query -D {macro.DB_INSTANCE_PATH};
'''
LOG.info(excute_cmd)
msg = self.PrimaryNode.sh(excute_cmd).result()
LOG.info(msg)
self.assertIn('Pending', msg)
LOG.info('---------主机指定正确的数据库实录目录执行notify------')
query_cmd = f'''source {macro.DB_ENV_PATH};
gs_ctl notify -D {macro.DB_INSTANCE_PATH} -M primary ;
'''
LOG.info(query_cmd)
query_msg = self.StandbyNode.sh(query_cmd).result()
LOG.info(query_msg)
status = self.sh_primary.get_db_instance_status()
self.assertTrue(status)
def tearDown(self):
LOG.info('--------------this is tearDown--------------')
LOG.info('----------------恢复集群状态------------------')
query_cmd = f'''source {macro.DB_ENV_PATH};
gs_ctl restart -D {macro.DB_INSTANCE_PATH} -M primary ;
'''
LOG.info(query_cmd)
query_msg = self.PrimaryNode.sh(query_cmd).result()
LOG.info(query_msg)
LOG.info('---Opengauss_Function_Tools_gs_ctl_Case0094执行完成---')
| 34.72549 | 84 | 0.618577 |
4a1bc331bc96a2041b2715b487b1aa526e30a932
| 5,573 |
py
|
Python
|
section3.py
|
csivitu/TechEx-Flappy-Bird
|
6f8d45d4f098c94aac122944f2e0ba9314dbda8c
|
[
"MIT"
] | null | null | null |
section3.py
|
csivitu/TechEx-Flappy-Bird
|
6f8d45d4f098c94aac122944f2e0ba9314dbda8c
|
[
"MIT"
] | null | null | null |
section3.py
|
csivitu/TechEx-Flappy-Bird
|
6f8d45d4f098c94aac122944f2e0ba9314dbda8c
|
[
"MIT"
] | null | null | null |
import random # For generating random numbers
import sys # We will use sys.exit to exit the program
import pygame
from pygame.locals import * # Basic pygame imports
# Global Variables for the game
FPS = 40 # number of times the frame will be rendered in 1 sec
SCREENWIDTH = 289 # defining width
SCREENHEIGHT = 511 # defining height
SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT)) # sending height and
GROUNDY = SCREENHEIGHT * 0.8
GAME_SPRITES = {}
PLAYER = "gallery/sprites/bird.png"
BACKGROUND = "gallery/sprites/background.png"
PIPE = "gallery/sprites/pipe.png"
def welcomeScreen():
"""
Shows welcome images on the screen
"""
playerx = int(SCREENWIDTH / 5)
playery = int((SCREENHEIGHT - GAME_SPRITES["player"].get_height()) / 2)
messagex = int((SCREENWIDTH - GAME_SPRITES["message"].get_width()) / 2)
messagey = int(SCREENHEIGHT * 0.13)
basex = 0
# Game
while True:
for event in pygame.event.get():
# if user clicks on cross button, close the game
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
# If the user presses space or up key, start the game for them
elif event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP):
return
else:
SCREEN.blit(GAME_SPRITES["background"], (0, 0))
SCREEN.blit(GAME_SPRITES["player"], (playerx, playery))
SCREEN.blit(GAME_SPRITES["message"], (messagex, messagey))
SCREEN.blit(GAME_SPRITES["base"], (basex, GROUNDY))
pygame.display.update()
FPSCLOCK.tick(FPS)
def mainGame():
playerx = int(SCREENWIDTH / 5)
playery = int(SCREENWIDTH / 2)
basex = 0
# Create 2 pipes for blitting on the screen
newPipe1 = getRandomPipe()
newPipe2 = getRandomPipe()
# my List of upper pipes
upperPipes = [
{"x": SCREENWIDTH + 200, "y": newPipe1[0]["y"]},
{"x": SCREENWIDTH + 200 + (SCREENWIDTH / 2), "y": newPipe2[0]["y"]},
]
# my List of lower pipes
lowerPipes = [
{"x": SCREENWIDTH + 200, "y": newPipe1[1]["y"]},
{"x": SCREENWIDTH + 200 + (SCREENWIDTH / 2), "y": newPipe2[1]["y"]},
]
pipeVelX = -4
playerVelY = -9
playerMaxVelY = 10
playerMinVelY = -8
playerAccY = 1
playerFlapAccv = -8 # velocity while flapping
while True:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP):
if playery > 0: # Player is in the screen
playerVelY = playerFlapAccv
if playerVelY < playerMaxVelY:
playerVelY += playerAccY
print(playerVelY)
playerHeight = GAME_SPRITES["player"].get_height()
playery = playery + min(playerVelY, GROUNDY - playery - playerHeight)
# move pipes to the left
for upperPipe, lowerPipe in zip(upperPipes, lowerPipes):
upperPipe["x"] += pipeVelX
lowerPipe["x"] += pipeVelX
# Add a new pipe when the first is about to cross the leftmost part of the screen
if 0 < upperPipes[0]["x"] < 5:
newpipe = getRandomPipe()
upperPipes.append(newpipe[0])
lowerPipes.append(newpipe[1])
# if the pipe is out of the screen, remove it
if upperPipes[0]["x"] < -GAME_SPRITES["pipe"][0].get_width():
upperPipes.pop(0)
lowerPipes.pop(0)
# Lets blit our sprites now
SCREEN.blit(GAME_SPRITES["background"], (0, 0))
for upperPipe, lowerPipe in zip(upperPipes, lowerPipes):
SCREEN.blit(GAME_SPRITES["pipe"][0], (upperPipe["x"], upperPipe["y"]))
SCREEN.blit(GAME_SPRITES["pipe"][1], (lowerPipe["x"], lowerPipe["y"]))
SCREEN.blit(GAME_SPRITES["base"], (basex, GROUNDY))
SCREEN.blit(GAME_SPRITES["player"], (playerx, playery))
pygame.display.update()
FPSCLOCK.tick(FPS)
def getRandomPipe():
pipeHeight = GAME_SPRITES["pipe"][0].get_height()
fixpoint = int(SCREENHEIGHT / 3)
y2 = random.randint(
int(fixpoint + 0.2 * fixpoint),
int(SCREENHEIGHT - GAME_SPRITES["base"].get_height() - 0.5 * fixpoint),
)
y1 = pipeHeight - y2 + 100
pipex = SCREENWIDTH + 10
pipe = [{"x": pipex, "y": -y1}, {"x": pipex, "y": y2}] # Upper Pipe # lowepipe
return pipe
pygame.init() # Initialize all pygame's modules
FPSCLOCK = pygame.time.Clock()
pygame.display.set_caption("Flappy Bird CSI")
GAME_SPRITES["message"] = pygame.image.load(
"gallery/sprites/message.png"
).convert_alpha()
GAME_SPRITES["base"] = pygame.image.load("gallery/sprites/base.png").convert_alpha()
GAME_SPRITES["pipe"] = (
pygame.transform.rotate(pygame.image.load(PIPE).convert_alpha(), 180),
pygame.image.load(PIPE).convert_alpha(),
)
GAME_SPRITES["background"] = pygame.image.load(BACKGROUND).convert()
GAME_SPRITES["player"] = pygame.image.load(PLAYER).convert_alpha()
print(GAME_SPRITES["pipe"])
while True:
welcomeScreen() # Shows welcome screen to the user until he presses a button
mainGame()
| 34.83125 | 90 | 0.594653 |
4a1bc3a36525c87113d2378e5927a9dbbbf9ee93
| 1,043 |
py
|
Python
|
manage.py
|
Echie/MealTracker
|
117fcfdfeee76af8829aca558a08675b22097c96
|
[
"MIT"
] | null | null | null |
manage.py
|
Echie/MealTracker
|
117fcfdfeee76af8829aca558a08675b22097c96
|
[
"MIT"
] | null | null | null |
manage.py
|
Echie/MealTracker
|
117fcfdfeee76af8829aca558a08675b22097c96
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
from pathlib import Path
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# This allows easy placement of apps within the interior
# mealtracker directory.
current_path = Path(__file__).parent.resolve()
sys.path.append(str(current_path / "mealtracker"))
execute_from_command_line(sys.argv)
| 32.59375 | 77 | 0.659636 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.