hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a1e7379875462f4fe145fc08dd78a3679b1531f
| 1,613 |
py
|
Python
|
examples/concertbot/actions.py
|
n01deas/rasa
|
79f0feeb02919142eb06b8c52da5632f1c25c251
|
[
"Apache-2.0"
] | 5 |
2019-06-06T08:59:15.000Z
|
2020-01-19T10:56:45.000Z
|
examples/concertbot/actions.py
|
RakibulAsheeque/rasa
|
7d3804cd081c73d78ab5e973f95a55845eed1e89
|
[
"Apache-2.0"
] | 21 |
2019-12-16T17:37:54.000Z
|
2020-07-06T06:19:04.000Z
|
examples/concertbot/actions.py
|
RakibulAsheeque/rasa
|
7d3804cd081c73d78ab5e973f95a55845eed1e89
|
[
"Apache-2.0"
] | 4 |
2019-05-19T21:19:32.000Z
|
2021-01-06T14:26:37.000Z
|
from rasa_sdk import Action
from rasa_sdk.events import SlotSet
class ActionSearchConcerts(Action):
def name(self):
return "action_search_concerts"
def run(self, dispatcher, tracker, domain):
concerts = [
{"artist": "Foo Fighters", "reviews": 4.5},
{"artist": "Katy Perry", "reviews": 5.0},
]
description = ", ".join([c["artist"] for c in concerts])
dispatcher.utter_message("{}".format(description))
return [SlotSet("concerts", concerts)]
class ActionSearchVenues(Action):
def name(self):
return "action_search_venues"
def run(self, dispatcher, tracker, domain):
venues = [
{"name": "Big Arena", "reviews": 4.5},
{"name": "Rock Cellar", "reviews": 5.0},
]
dispatcher.utter_message("here are some venues I found")
description = ", ".join([c["name"] for c in venues])
dispatcher.utter_message("{}".format(description))
return [SlotSet("venues", venues)]
class ActionShowConcertReviews(Action):
def name(self):
return "action_show_concert_reviews"
def run(self, dispatcher, tracker, domain):
concerts = tracker.get_slot("concerts")
dispatcher.utter_message("concerts from slots: {}".format(concerts))
return []
class ActionShowVenueReviews(Action):
def name(self):
return "action_show_venue_reviews"
def run(self, dispatcher, tracker, domain):
venues = tracker.get_slot("venues")
dispatcher.utter_message("venues from slots: {}".format(venues))
return []
| 31.019231 | 76 | 0.624303 |
4a1e73a18bf1a8348840ce2fc4a605d83c9d9b46
| 10,345 |
py
|
Python
|
5-pendulum/1-ddpg/ddpg_demo.py
|
buyizhiyou/reinforcement-learning
|
71d0fad7866ba7394eed9f7a5df7c1f1b8376398
|
[
"MIT"
] | 6 |
2019-02-22T13:06:25.000Z
|
2022-02-20T14:01:37.000Z
|
5-pendulum/1-ddpg/ddpg_demo.py
|
buyizhiyou/reinforcement-learning
|
71d0fad7866ba7394eed9f7a5df7c1f1b8376398
|
[
"MIT"
] | 10 |
2020-09-25T23:01:24.000Z
|
2022-03-11T23:32:19.000Z
|
5-pendulum/1-ddpg/ddpg_demo.py
|
buyizhiyou/reinforcement-learning
|
71d0fad7866ba7394eed9f7a5df7c1f1b8376398
|
[
"MIT"
] | 2 |
2019-02-22T13:06:27.000Z
|
2020-10-30T06:38:27.000Z
|
"""
Deep Deterministic Policy Gradient (DDPG), Reinforcement Learning.
DDPG is Actor Critic based algorithm.
Pendulum example.
View more on my tutorial page: https://morvanzhou.github.io/tutorials/
Using:
tensorflow 1.0
gym 0.8.0
"""
import tensorflow as tf
import numpy as np
import gym
import time
np.random.seed(1)
tf.set_random_seed(1)
##################### hyper parameters ####################
MAX_EPISODES = 200
MAX_EP_STEPS = 200
lr_a = 0.001 # learning rate for actor
lr_c = 0.001 # learning rate for critic
gamma = 0.9 # reward discount
REPLACEMENT = [
dict(name='soft', tau=0.01),
dict(name='hard', rep_iter_a=600, rep_iter_c=500)
][0] # you can try different target replacement strategies
MEMORY_CAPACITY = 10000
BATCH_SIZE = 32
RENDER = True
OUTPUT_GRAPH = True
ENV_NAME = 'Pendulum-v0'
############################### Actor ####################################
class Actor(object):
def __init__(self, sess, action_dim, action_bound, learning_rate, replacement):
self.sess = sess
self.a_dim = action_dim
self.action_bound = action_bound
self.lr = learning_rate
self.replacement = replacement
self.t_replace_counter = 0
with tf.variable_scope('Actor'):
# 这个网络用于及时更新参数
# input s, output a
self.a = self._build_net(S, scope='eval_net', trainable=True)
##这个网络不及时更新参数, 用于预测action
# input s_, output a, get a_ for critic
self.a_ = self._build_net(S_, scope='target_net', trainable=False)
self.e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval_net')
self.t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target_net')
if self.replacement['name'] == 'hard':
self.t_replace_counter = 0
self.hard_replace = [tf.assign(t, e) for t, e in zip(self.t_params, self.e_params)]
else:
self.soft_replace = [tf.assign(t, (1 - self.replacement['tau']) * t + self.replacement['tau'] * e)
for t, e in zip(self.t_params, self.e_params)]
def _build_net(self, s, scope, trainable):#根据state预测action的网络
with tf.variable_scope(scope):
init_w = tf.random_normal_initializer(0., 0.3)
init_b = tf.constant_initializer(0.1)
net = tf.layers.dense(s, 30, activation=tf.nn.relu,
kernel_initializer=init_w, bias_initializer=init_b, name='l1',
trainable=trainable)
with tf.variable_scope('a'):
actions = tf.layers.dense(net, self.a_dim, activation=tf.nn.tanh, kernel_initializer=init_w,
bias_initializer=init_b, name='a', trainable=trainable)
scaled_a = tf.multiply(actions, self.action_bound, name='scaled_a') # Scale output to -action_bound to action_bound
return scaled_a
def learn(self, s): # batch update
self.sess.run(self.train_op, feed_dict={S: s})
if self.replacement['name'] == 'soft':
self.sess.run(self.soft_replace)
else:
if self.t_replace_counter % self.replacement['rep_iter_a'] == 0:
self.sess.run(self.hard_replace)
self.t_replace_counter += 1
def choose_action(self, s):
s = s[np.newaxis, :] # single state
return self.sess.run(self.a, feed_dict={S: s})[0] # single action
def add_grad_to_graph(self, a_grads):
with tf.variable_scope('policy_grads'):
# ys = policy;
# xs = policy's parameters;
# a_grads = the gradients of the policy to get more Q
# tf.gradients will calculate dys/dxs with a initial gradients for ys, so this is dq/da * da/dparams
self.policy_grads = tf.gradients(ys=self.a, xs=self.e_params, grad_ys=a_grads)
with tf.variable_scope('A_train'):
opt = tf.train.AdamOptimizer(-self.lr) # (- learning rate) for ascent policy
self.train_op = opt.apply_gradients(zip(self.policy_grads, self.e_params))#对eval_net的参数更新
############################### Critic ####################################
class Critic(object):
def __init__(self, sess, state_dim, action_dim, learning_rate, gamma, replacement, a, a_):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.lr = learning_rate
self.gamma = gamma
self.replacement = replacement
with tf.variable_scope('Critic'):
# Input (s, a), output q
self.a = tf.stop_gradient(a) # stop critic update flows to actor
# 这个网络用于及时更新参数
self.q = self._build_net(S, self.a, 'eval_net', trainable=True)
# 这个网络不及时更新参数, 用于评价actor
# Input (s_, a_), output q_ for q_target
self.q_ = self._build_net(S_, a_, 'target_net', trainable=False) # target_q is based on a_ from Actor's target_net
self.e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval_net')
self.t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target_net')
with tf.variable_scope('target_q'):
self.target_q = R + self.gamma * self.q_#target计算
with tf.variable_scope('TD_error'):
self.loss = tf.reduce_mean(tf.squared_difference(self.target_q, self.q))#计算loss
with tf.variable_scope('C_train'):
self.train_op = tf.train.AdamOptimizer(self.lr).minimize(self.loss)#训练
with tf.variable_scope('a_grad'):
self.a_grads = tf.gradients(self.q, a)[0] # tensor of gradients of each sample (None, a_dim)
if self.replacement['name'] == 'hard':
self.t_replace_counter = 0
self.hard_replacement = [tf.assign(t, e) for t, e in zip(self.t_params, self.e_params)]
else:
self.soft_replacement = [tf.assign(t, (1 - self.replacement['tau']) * t + self.replacement['tau'] * e)
for t, e in zip(self.t_params, self.e_params)]
def _build_net(self, s, a, scope, trainable):#Q网络,计算Q(s,a)
with tf.variable_scope(scope):
init_w = tf.random_normal_initializer(0., 0.1)
init_b = tf.constant_initializer(0.1)
with tf.variable_scope('l1'):
n_l1 = 30
w1_s = tf.get_variable('w1_s', [self.s_dim, n_l1], initializer=init_w, trainable=trainable)
w1_a = tf.get_variable('w1_a', [self.a_dim, n_l1], initializer=init_w, trainable=trainable)
b1 = tf.get_variable('b1', [1, n_l1], initializer=init_b, trainable=trainable)
net = tf.nn.relu(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1)
with tf.variable_scope('q'):
q = tf.layers.dense(net, 1, kernel_initializer=init_w, bias_initializer=init_b, trainable=trainable) # Q(s,a)
return q
def learn(self, s, a, r, s_):
self.sess.run(self.train_op, feed_dict={S: s, self.a: a, R: r, S_: s_})
if self.replacement['name'] == 'soft':
self.sess.run(self.soft_replacement)
else:
if self.t_replace_counter % self.replacement['rep_iter_c'] == 0:
self.sess.run(self.hard_replacement)
self.t_replace_counter += 1
##################### Memory ####################
class Memory(object):
def __init__(self, capacity, dims):
self.capacity = capacity
self.data = np.zeros((capacity, dims))
self.pointer = 0
def store_transition(self, s, a, r, s_):
transition = np.hstack((s, a, [r], s_))
index = self.pointer % self.capacity # replace the old memory with new memory
self.data[index, :] = transition
self.pointer += 1
def sample(self, n):
assert self.pointer >= self.capacity, 'Memory has not been fulfilled'
indices = np.random.choice(self.capacity, size=n)
return self.data[indices, :]
import pdb; pdb.set_trace()
env = gym.make(ENV_NAME)
env = env.unwrapped
env.seed(1)
state_dim = env.observation_space.shape[0]#3
action_dim = env.action_space.shape[0]#1 连续动作,一维
action_bound = env.action_space.high#[2]
# all placeholder for tf
with tf.name_scope('S'):
S = tf.placeholder(tf.float32, shape=[None, state_dim], name='s')
with tf.name_scope('R'):
R = tf.placeholder(tf.float32, [None, 1], name='r')
with tf.name_scope('S_'):
S_ = tf.placeholder(tf.float32, shape=[None, state_dim], name='s_')
sess = tf.Session()
# Create actor and critic.
# They are actually connected to each other, details can be seen in tensorboard or in this picture:
actor = Actor(sess, action_dim, action_bound, lr_a, REPLACEMENT)
critic = Critic(sess, state_dim, action_dim, lr_c, gamma, REPLACEMENT, actor.a, actor.a_)
actor.add_grad_to_graph(critic.a_grads)# # 将 critic 产出的 dQ/da 加入到 Actor 的 Graph 中去
sess.run(tf.global_variables_initializer())
M = Memory(MEMORY_CAPACITY, dims=2 * state_dim + action_dim + 1)
if OUTPUT_GRAPH:
tf.summary.FileWriter("logs/", sess.graph)
var = 3 # control exploration
t1 = time.time()
for i in range(MAX_EPISODES):
s = env.reset()
ep_reward = 0
for j in range(MAX_EP_STEPS):
if RENDER:
env.render()
# Add exploration noise
a = actor.choose_action(s)
a = np.clip(np.random.normal(a, var), -2, 2) # add randomness to action selection for exploration
s_, r, done, info = env.step(a)
M.store_transition(s, a, r / 10, s_)
if M.pointer > MEMORY_CAPACITY:
var *= .9995 # decay the action randomness
b_M = M.sample(BATCH_SIZE)
b_s = b_M[:, :state_dim]
b_a = b_M[:, state_dim: state_dim + action_dim]
b_r = b_M[:, -state_dim - 1: -state_dim]
b_s_ = b_M[:, -state_dim:]
critic.learn(b_s, b_a, b_r, b_s_)
actor.learn(b_s)
s = s_
ep_reward += r
if j == MAX_EP_STEPS-1:
print('Episode:', i, ' Reward: %i' % int(ep_reward), 'Explore: %.2f' % var, )
if ep_reward > -300:
RENDER = True
break
print('Running time: ', time.time()-t1)
| 38.033088 | 132 | 0.604543 |
4a1e74ff8af00d27c275d21625778857a1dc71da
| 749 |
py
|
Python
|
app/device.py
|
zmops/zeus-modbus
|
3de989f2233e994876cf2a98ac46d9213e53ff3c
|
[
"Apache-2.0"
] | 3 |
2022-01-26T04:27:49.000Z
|
2022-03-04T14:02:41.000Z
|
app/device.py
|
zmops/zeus-modbus
|
3de989f2233e994876cf2a98ac46d9213e53ff3c
|
[
"Apache-2.0"
] | null | null | null |
app/device.py
|
zmops/zeus-modbus
|
3de989f2233e994876cf2a98ac46d9213e53ff3c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# 配置设置
# 作者: 三石
# 时间: 2021-12-10
from pydantic import BaseSettings
class DeviceSettings(BaseSettings):
plc366: dict = {
"id": "plc366",
"model": "napro_300",
"ipaddress": "172.16.3.66",
"port": 502,
"slave": 1,
}
hf318: dict = {
"id": "hf318",
"model": "jiandarenke_temperature_and_humidity",
"ipaddress": "172.16.3.18",
"port": 512,
"slave": 1,
}
air367: dict = {
"id": "air367",
"model": "demurui_air_conditioning_gateway",
"ipaddress": "172.16.3.67",
"port": 502,
"slave": 1,
"sub": 8
}
devices: list = [
plc366,
hf318,
air367,
]
| 17.833333 | 56 | 0.473965 |
4a1e766f0dfa21a0112c3ad33b0595f0fce9fa0f
| 5,023 |
py
|
Python
|
source/speechViewer.py
|
ishandutta2007/nvda
|
d473de81aa23cc8af4785e5aa64731d6f6e9936a
|
[
"bzip2-1.0.6"
] | 3 |
2020-08-15T12:55:00.000Z
|
2021-02-02T22:35:54.000Z
|
source/speechViewer.py
|
ishandutta2007/nvda
|
d473de81aa23cc8af4785e5aa64731d6f6e9936a
|
[
"bzip2-1.0.6"
] | null | null | null |
source/speechViewer.py
|
ishandutta2007/nvda
|
d473de81aa23cc8af4785e5aa64731d6f6e9936a
|
[
"bzip2-1.0.6"
] | 2 |
2019-07-13T23:09:46.000Z
|
2019-10-16T20:56:26.000Z
|
#speechViewer.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2018 NV Access Limited
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import wx
import gui
import config
from logHandler import log
from speech import SpeechSequence
class SpeechViewerFrame(wx.Dialog):
def __init__(self, onDestroyCallBack):
dialogSize=wx.Size(500, 500)
dialogPos=wx.DefaultPosition
if not config.conf["speechViewer"]["autoPositionWindow"] and self.doDisplaysMatchConfig():
log.debug("Setting speechViewer window position")
speechViewSection = config.conf["speechViewer"]
dialogSize = wx.Size(speechViewSection["width"], speechViewSection["height"])
dialogPos = wx.Point(x=speechViewSection["x"], y=speechViewSection["y"])
super(SpeechViewerFrame, self).__init__(gui.mainFrame, wx.ID_ANY, _("NVDA Speech Viewer"), size=dialogSize, pos=dialogPos, style=wx.CAPTION | wx.RESIZE_BORDER | wx.STAY_ON_TOP)
self.onDestroyCallBack = onDestroyCallBack
self.Bind(wx.EVT_CLOSE, self.onClose)
self.Bind(wx.EVT_WINDOW_DESTROY, self.onDestroy)
sizer = wx.BoxSizer(wx.VERTICAL)
self.textCtrl = wx.TextCtrl(self, -1,style=wx.TE_RICH2|wx.TE_READONLY|wx.TE_MULTILINE)
sizer.Add(self.textCtrl, proportion=1, flag=wx.EXPAND)
# Translators: The label for a setting in the speech viewer that controls whether the speech viewer is shown at startup or not.
self.shouldShowOnStartupCheckBox = wx.CheckBox(self,wx.ID_ANY,label=_("&Show Speech Viewer on Startup"))
self.shouldShowOnStartupCheckBox.SetValue(config.conf["speechViewer"]["showSpeechViewerAtStartup"])
self.shouldShowOnStartupCheckBox.Bind(wx.EVT_CHECKBOX, self.onShouldShowOnStartupChanged)
sizer.Add(self.shouldShowOnStartupCheckBox, border=5, flag=wx.ALL)
# set the check box as having focus, by default the textCtrl has focus which stops the speechviewer output (even if another window is in focus)
self.shouldShowOnStartupCheckBox.SetFocus()
self.SetSizer(sizer)
self.Show(True)
def onClose(self, evt):
deactivate()
return
if not evt.CanVeto():
self.Destroy()
return
evt.Veto()
def onShouldShowOnStartupChanged(self, evt):
config.conf["speechViewer"]["showSpeechViewerAtStartup"] = self.shouldShowOnStartupCheckBox.IsChecked()
def onDestroy(self, evt):
log.debug("SpeechViewer destroyed")
self.onDestroyCallBack()
evt.Skip()
def doDisplaysMatchConfig(self):
configSizes = config.conf["speechViewer"]["displays"]
attachedSizes = self.getAttachedDisplaySizesAsStringArray()
return len(configSizes) == len(attachedSizes) and all( configSizes[i] == attachedSizes[i] for i in range(len(configSizes)))
def getAttachedDisplaySizesAsStringArray(self):
displays = ( wx.Display(i).GetGeometry().GetSize() for i in range(wx.Display.GetCount()) )
return [repr( (i.width, i.height) ) for i in displays]
def savePositionInformation(self):
position = self.GetPosition()
config.conf["speechViewer"]["x"] = position.x
config.conf["speechViewer"]["y"] = position.y
size = self.GetSize()
config.conf["speechViewer"]["width"] = size.width
config.conf["speechViewer"]["height"] = size.height
config.conf["speechViewer"]["displays"] = self.getAttachedDisplaySizesAsStringArray()
config.conf["speechViewer"]["autoPositionWindow"] = False
_guiFrame=None
isActive=False
def activate():
"""
Function to call to trigger the speech viewer window to open.
"""
_setActive(True, SpeechViewerFrame(_cleanup))
def _setActive(isNowActive, speechViewerFrame=None):
global _guiFrame, isActive
isActive = isNowActive
_guiFrame = speechViewerFrame
if gui and gui.mainFrame:
gui.mainFrame.onSpeechViewerEnabled(isNowActive)
#: How to separate items in a speech sequence
SPEECH_ITEM_SEPARATOR = " "
#: How to separate speech sequences
SPEECH_SEQUENCE_SEPARATOR = "\n"
def appendSpeechSequence(sequence: SpeechSequence) -> None:
""" Appends a speech sequence to the speech viewer.
@param sequence: To append, items are separated with . Concluding with a newline.
"""
if not isActive:
return
# If the speech viewer text control has the focus, we want to disable updates
# Otherwise it would be impossible to select text, or even just read it (as a blind person).
if _guiFrame.FindFocus() == _guiFrame.textCtrl:
return
# to make the speech easier to read, we must separate the items.
text = SPEECH_ITEM_SEPARATOR.join(
speech for speech in sequence if isinstance(speech, str)
)
_guiFrame.textCtrl.AppendText(text + SPEECH_SEQUENCE_SEPARATOR)
def _cleanup():
global isActive
if not isActive:
return
_setActive(False)
def deactivate():
global _guiFrame, isActive
if not isActive:
return
# #7077: If the window is destroyed, text control will be gone, so save speech viewer position before destroying the window.
_guiFrame.savePositionInformation()
_guiFrame.Destroy()
isActive = False
| 38.638462 | 179 | 0.747561 |
4a1e773aa467b80b706a9a42d30475b4131c7833
| 11,351 |
py
|
Python
|
snippets/PV_Composite_Writer.py
|
banesullivan/PVGeophysics
|
1ce6311c4e5b195a4a31e3e0b1eb968f44aa34d2
|
[
"BSD-3-Clause"
] | 1 |
2017-08-17T17:38:46.000Z
|
2017-08-17T17:38:46.000Z
|
snippets/PV_Composite_Writer.py
|
banesullivan/PVGeophysics
|
1ce6311c4e5b195a4a31e3e0b1eb968f44aa34d2
|
[
"BSD-3-Clause"
] | null | null | null |
snippets/PV_Composite_Writer.py
|
banesullivan/PVGeophysics
|
1ce6311c4e5b195a4a31e3e0b1eb968f44aa34d2
|
[
"BSD-3-Clause"
] | 1 |
2018-06-06T05:56:17.000Z
|
2018-06-06T05:56:17.000Z
|
"""This file can be loaded as a plugin for ParaView >= 5.6
Author: Bane Sullivan <banesulli@gmail.com>
"""
import numpy as np
# This is module to import. It provides VTKPythonAlgorithmBase, the base class
# for all python-based vtkAlgorithm subclasses in VTK and decorators used to
# 'register' the algorithm with ParaView along with information about UI.
from paraview.util.vtkAlgorithm import *
import vtk
from vtk.numpy_interface import dataset_adapter as dsa
from vtk.util.vtkAlgorithm import VTKPythonAlgorithmBase
class WriterBase(VTKPythonAlgorithmBase):
"""This is a writer base class to add convienace methods to the
``VTKPythonAlgorithmBase`` for writer algorithms and was originally
implemented in `PVGeo`_ by `Bane Sullivan`_.
.. _PVGeo: http://pvgeo.org
.. _Bane Sullivan: http://banesullivan.com
For more information on what functionality is available, check out the VTK
Docs for the `vtkAlgorithm`_ and then check out the following blog posts:
* `vtkPythonAlgorithm is great`_
* A VTK pipeline primer `(part 1)`_, `(part 2)`_, and `(part 3)`_
* `ParaView Python Docs`_
.. _vtkAlgorithm: https://www.vtk.org/doc/nightly/html/classvtkAlgorithm.html
.. _vtkPythonAlgorithm is great: https://blog.kitware.com/vtkpythonalgorithm-is-great/
.. _(part 1): https://blog.kitware.com/a-vtk-pipeline-primer-part-1/
.. _(part 2): https://blog.kitware.com/a-vtk-pipeline-primer-part-2/
.. _(part 3): https://blog.kitware.com/a-vtk-pipeline-primer-part-3/
.. _ParaView Python Docs: https://www.paraview.org/ParaView/Doc/Nightly/www/py-doc/paraview.util.vtkAlgorithm.html
"""
def __init__(self, nInputPorts=1, inputType='vtkPolyData', **kwargs):
VTKPythonAlgorithmBase.__init__(
self, nInputPorts=nInputPorts, inputType=inputType, nOutputPorts=0
)
self.__filename = kwargs.get('filename', None)
self.__fmt = '%.9e'
# For composite datasets: not always used
self.__blockfilenames = None
self.__composite = False
def FillInputPortInformation(self, port, info):
"""Allows us to save composite datasets as well.
NOTE: I only care about ``vtkMultiBlockDataSet``s but you could hack
this method and ``RequestData`` to handle ``vtkMultiBlockDataSet``s for
a general use case.
"""
info.Set(self.INPUT_REQUIRED_DATA_TYPE(), self.InputType)
info.Append(
self.INPUT_REQUIRED_DATA_TYPE(), 'vtkMultiBlockDataSet'
) # 'vtkCompositeDataSet'
return 1
def SetFileName(self, filename):
"""Specify the filename for the output.
This will be appended if saving composite datasets.
"""
if not isinstance(filename, str):
raise RuntimeError(
'File name must be string. Only single file is supported.'
)
if self.__filename != filename:
self.__filename = filename
self.Modified()
def get_file_name(self):
"""Get the set filename."""
return self.__filename
def Write(self, input_data_object=None):
"""A Python focused conveinance method to perform the write out."""
if input_data_object:
self.SetInputDataObject(input_data_object)
self.Modified()
self.Update()
def PerformWriteOut(self, input_data_object, filename, object_name):
"""This method must be implemented. This is automatically called by
``RequestData`` for single inputs or composite inputs."""
raise NotImplementedError('PerformWriteOut must be implemented!')
def apply(self, input_data_object):
"""A convienace method if using these algorithms in a Python environment."""
self.SetInputDataObject(input_data_object)
self.Modified()
self.Update()
def set_format(self, fmt):
"""Use to set the ASCII format for the writer default is ``'%.9e'``"""
if self.__fmt != fmt and isinstance(fmt, str):
self.__fmt = fmt
self.Modified()
def get_format(self):
return self.__fmt
#### Following methods are for composite datasets ####
def use_composite(self):
"""True if input dataset is a composite dataset"""
return self.__composite
def set_block_filenames(self, n):
"""Gets a list of filenames based on user input filename and creates a
numbered list of filenames for the reader to save out. Assumes the
filename has an extension set already.
"""
number = n
count = 0
while number > 0:
number = number // 10
count = count + 1
count = '%d' % count
identifier = '_%.' + count + 'd'
blocknum = [identifier % i for i in range(n)]
# Check the file extension:
ext = self.get_file_name().split('.')[-1]
basename = self.get_file_name().replace('.%s' % ext, '')
self.__blockfilenames = [
basename + '%s.%s' % (blocknum[i], ext) for i in range(n)
]
return self.__blockfilenames
def get_block_filename(self, idx):
"""Get the filename for a specific block if composite dataset."""
return self.__blockfilenames[idx]
def RequestData(self, request, inInfo, outInfo):
"""Subclasses must implement a ``PerformWriteOut`` method that takes an
input data object and a filename. This method will automatically handle
composite data sets.
"""
inp = self.GetInputData(inInfo, 0, 0)
if isinstance(inp, vtk.vtkMultiBlockDataSet):
self.__composite = True
# Handle composite datasets. NOTE: This only handles vtkMultiBlockDataSet
if self.__composite:
num = inp.GetNumberOfBlocks()
self.set_block_filenames(num)
for i in range(num):
data = inp.GetBlock(i)
name = inp.GetMetaData(i).Get(vtk.vtkCompositeDataSet.NAME())
if data.IsTypeOf(self.InputType):
self.PerformWriteOut(data, self.get_block_filename(i), name)
else:
warnings.warn(
'Input block %d of type(%s) not saveable by writer.'
% (i, type(data))
)
# Handle single input dataset
else:
self.PerformWriteOut(inp, self.get_file_name(), None)
return 1
###############################################################################
## Now lets use ``WriterBase`` to make a writer algorithm that ParaView can use
class WriteCellCenterData(WriterBase):
"""This writer will save a file of the XYZ points for an input dataset's
cell centers and its cell data. Use in tandom with ParaView's native CSV
writer which saves the PointData. This class was originally
implemented in `PVGeo`_ by `Bane Sullivan`_.
.. _PVGeo: http://pvgeo.org
.. _Bane Sullivan: http://banesullivan.com
"""
def __init__(self):
WriterBase.__init__(self, inputType='vtkDataSet')
self.__delimiter = ','
def PerformWriteOut(self, input_data_object, filename, object_name):
# Find cell centers
filt = vtk.vtkCellCenters()
filt.SetInputDataObject(input_data_object)
filt.Update()
centers = dsa.WrapDataObject(filt.GetOutput(0)).Points
# Get CellData
wpdi = dsa.WrapDataObject(input_data_object)
celldata = wpdi.CellData
keys = celldata.keys()
# Save out using numpy
arr = np.zeros((len(centers), 3 + len(keys)))
arr[:, 0:3] = centers
for i, name in enumerate(keys):
arr[:, i + 3] = celldata[name]
# Now write out the data
# Clean data titles to make sure they do not contain the delimiter
repl = '_' if self.__delimiter != '_' else '-'
for i, name in enumerate(keys):
keys[i] = name.replace(self.__delimiter, repl)
header = ('%s' % self.__delimiter).join(['X', 'Y', 'Z'] + keys)
np.savetxt(
filename,
arr,
header=header,
delimiter=self.__delimiter,
fmt=self.get_format(),
comments='',
)
# Success for pipeline
return 1
def set_delimiter(self, deli):
"""The string delimiter to use"""
if self.__delimiter != deli:
self.__delimiter = deli
self.Modified()
###############################################################################
## Now lets use ``WriterBase`` to make a writer algorithm for image data
@smproxy.writer(
extensions="imgfmt", file_description="Write Custom ImageData", support_reload=False
)
@smproperty.input(name="Input", port_index=0)
@smdomain.datatype(dataTypes=["vtkImageData"], composite_data_supported=True)
class WriteCustomImageData(WriterBase):
"""This is an example of how to make your own file writer!
.. _PVGeo: http://pvgeo.org
.. _Bane Sullivan: http://banesullivan.com
"""
def __init__(self):
WriterBase.__init__(self, inputType='vtkImageData')
self.__delimiter = ','
def PerformWriteOut(self, input_data_object, filename, object_name):
"""Perform the file write to the given FileName with the given data
object. The super class handles all the complicated stuff.
"""
filename = filename.split('.')
filename = '.'.join(filename[0:-1]) + '_%s.%s' % (object_name, filename[-1])
writer = vtk.vtkXMLImageDataWriter()
writer.SetFileName(filename)
writer.SetInputDataObject(input_data_object)
writer.Write()
# Success for pipeline
return 1
@smproperty.stringvector(name="FileName", panel_visibility="never")
@smdomain.filelist()
def SetFileName(self, filename):
"""Specify filename for the file to write."""
WriterBase.SetFileName(self, filename)
###############################################################################
## Now wrap the cell centers writer for use in ParaView!
@smproxy.writer(
extensions="dat",
file_description="Cell Centers and Cell Data",
support_reload=False,
)
@smproperty.input(name="Input", port_index=0)
@smdomain.datatype(dataTypes=["vtkDataSet"], composite_data_supported=True)
class PVWriteCellCenterData(WriteCellCenterData):
"""The ``WriteCellCenterData`` class wrapped for use as a plugin in ParaView.
Be sure that the ``composite_data_supported`` flag is set to ``True``.
"""
def __init__(self):
WriteCellCenterData.__init__(self)
@smproperty.stringvector(name="FileName", panel_visibility="never")
@smdomain.filelist()
def SetFileName(self, filename):
"""Specify filename for the file to write."""
WriteCellCenterData.SetFileName(self, filename)
@smproperty.stringvector(name="Format", default_values='%.9e')
def set_format(self, fmt):
"""Use to set the ASCII format for the writer default is ``'%.9e'``"""
WriteCellCenterData.set_format(self, fmt)
@smproperty.stringvector(name="Delimiter", default_values=',')
def set_delimiter(self, deli):
"""The string delimiter to use"""
WriteCellCenterData.set_delimiter(self, deli)
| 38.477966 | 118 | 0.629989 |
4a1e778bd6e8b8ebc3004e5554370788fb8641ea
| 156 |
py
|
Python
|
hail/python/cluster-tests/test-skat.py
|
3vivekb/hail
|
82c9e0f3ec2154335f91f2219b84c0fb5dbac526
|
[
"MIT"
] | 789 |
2016-09-05T04:14:25.000Z
|
2022-03-30T09:51:54.000Z
|
hail/python/cluster-tests/test-skat.py
|
3vivekb/hail
|
82c9e0f3ec2154335f91f2219b84c0fb5dbac526
|
[
"MIT"
] | 5,724 |
2016-08-29T18:58:40.000Z
|
2022-03-31T23:49:42.000Z
|
hail/python/cluster-tests/test-skat.py
|
3vivekb/hail
|
82c9e0f3ec2154335f91f2219b84c0fb5dbac526
|
[
"MIT"
] | 233 |
2016-08-31T20:42:38.000Z
|
2022-02-17T16:42:39.000Z
|
import hail as hl
mt = hl.balding_nichols_model(3, 100, 100)
t = hl.skat(mt.locus, mt.ancestral_af, mt.pop, mt.GT.n_alt_alleles(), covariates=[1])
t.show()
| 31.2 | 85 | 0.717949 |
4a1e77a8a4ca4d5c29a127494b67146194405b09
| 1,212 |
py
|
Python
|
app/core/config.py
|
schmiedeone/audit-app-backend
|
3416bdefa4c794247545526aff4501bd358f618e
|
[
"MIT"
] | null | null | null |
app/core/config.py
|
schmiedeone/audit-app-backend
|
3416bdefa4c794247545526aff4501bd358f618e
|
[
"MIT"
] | null | null | null |
app/core/config.py
|
schmiedeone/audit-app-backend
|
3416bdefa4c794247545526aff4501bd358f618e
|
[
"MIT"
] | null | null | null |
import os
from dotenv import load_dotenv
from starlette.datastructures import CommaSeparatedStrings, Secret
from databases import DatabaseURL
API_V1_STR = "/api"
JWT_TOKEN_PREFIX = "Token"
ACCESS_TOKEN_EXPIRE_MINUTES = 60 * 24 * 7 # one week
load_dotenv(".env")
DATABASE_URL = os.getenv("DATABASE_URL", "") # deploying without docker-compose
if not DATABASE_URL:
POSTGRES_HOST = os.getenv("POSTGRES_HOST", "localhost")
POSTGRES_PORT = int(os.getenv("POSTGRES_PORT", 5432))
POSTGRES_USER = os.getenv("POSTGRES_USER", "postgres")
POSTGRES_PASS = os.getenv("POSTGRES_PASSWORD", "postgres")
POSTGRES_NAME = os.getenv("POSTGRES_DB", "postgres")
DATABASE_URL = DatabaseURL(
f"postgresql://{POSTGRES_USER}:{POSTGRES_PASS}@{POSTGRES_HOST}:{POSTGRES_PORT}/{POSTGRES_NAME}"
)
else:
DATABASE_URL = DatabaseURL(DATABASE_URL)
MAX_CONNECTIONS_COUNT = int(os.getenv("MAX_CONNECTIONS_COUNT", 10))
MIN_CONNECTIONS_COUNT = int(os.getenv("MIN_CONNECTIONS_COUNT", 10))
SECRET_KEY = Secret(os.getenv("SECRET_KEY", "secret key for project"))
PROJECT_NAME = os.getenv("PROJECT_NAME", "FastAPI example application")
ALLOWED_HOSTS = CommaSeparatedStrings(os.getenv("ALLOWED_HOSTS", ""))
| 35.647059 | 103 | 0.75495 |
4a1e784ba936094650adbbba214a5d0444162469
| 20,154 |
py
|
Python
|
tests/test_schema_proxy.py
|
felixonmars/elementpath
|
62584c7335a8188ebc7eecdcbf0cee52daebe301
|
[
"MIT"
] | null | null | null |
tests/test_schema_proxy.py
|
felixonmars/elementpath
|
62584c7335a8188ebc7eecdcbf0cee52daebe301
|
[
"MIT"
] | null | null | null |
tests/test_schema_proxy.py
|
felixonmars/elementpath
|
62584c7335a8188ebc7eecdcbf0cee52daebe301
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (c), 2018-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
import unittest
import xml.etree.ElementTree as ElementTree
import io
from textwrap import dedent
try:
import lxml.etree as lxml_etree
except ImportError:
lxml_etree = None
from elementpath import AttributeNode, XPathContext, XPath2Parser, MissingContextError
from elementpath.namespaces import XML_LANG, XSD_NAMESPACE, XSD_ANY_ATOMIC_TYPE, XSD_NOTATION
from elementpath.schema_proxy import AbstractXsdSchema
try:
# noinspection PyPackageRequirements
import xmlschema
from xmlschema.xpath import XMLSchemaProxy
except (ImportError, AttributeError):
xmlschema = None
try:
from tests import xpath_test_class
except ImportError:
import xpath_test_class
@unittest.skipIf(xmlschema is None, "xmlschema library required.")
class XMLSchemaProxyTest(xpath_test_class.XPathTestCase):
@classmethod
def setUpClass(cls):
cls.schema = xmlschema.XMLSchema('''
<!-- Dummy schema for testing proxy API -->
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
targetNamespace="http://xpath.test/ns">
<xs:element name="test_element" type="xs:string"/>
<xs:attribute name="test_attribute" type="xs:string"/>
<xs:element name="A">
<xs:complexType>
<xs:sequence>
<xs:element name="B1"/>
<xs:element name="B2"/>
<xs:element name="B3"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>''')
def setUp(self):
self.schema_proxy = XMLSchemaProxy(self.schema)
self.parser = XPath2Parser(namespaces=self.namespaces, schema=self.schema_proxy)
def test_abstract_xsd_schema(self):
class XsdSchema(AbstractXsdSchema):
@property
def attrib(self):
return {}
def __iter__(self):
return iter(())
schema = XsdSchema()
self.assertEqual(schema.tag, '{http://www.w3.org/2001/XMLSchema}schema')
self.assertIsNone(schema.text)
def test_schema_proxy_init(self):
schema_src = """<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="test_element" type="xs:string"/>
</xs:schema>"""
schema_tree = ElementTree.parse(io.StringIO(schema_src))
self.assertIsInstance(XMLSchemaProxy(), XMLSchemaProxy)
self.assertIsInstance(XMLSchemaProxy(xmlschema.XMLSchema(schema_src)), XMLSchemaProxy)
with self.assertRaises(TypeError):
XMLSchemaProxy(schema=schema_tree)
with self.assertRaises(TypeError):
XMLSchemaProxy(schema=xmlschema.XMLSchema(schema_src),
base_element=schema_tree)
with self.assertRaises(TypeError):
XMLSchemaProxy(schema=xmlschema.XMLSchema(schema_src),
base_element=schema_tree.getroot())
schema = xmlschema.XMLSchema(schema_src)
with self.assertRaises(ValueError):
XMLSchemaProxy(base_element=schema.elements['test_element'])
def test_xmlschema_proxy(self):
context = XPathContext(
root=self.etree.XML('<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"/>')
)
self.wrong_syntax("schema-element(*)")
self.wrong_name("schema-element(nil)")
self.wrong_name("schema-element(xs:string)")
self.check_value("schema-element(xs:complexType)", MissingContextError)
self.check_value("self::schema-element(xs:complexType)", NameError, context)
self.check_value("self::schema-element(xs:schema)", [context.item], context)
self.check_tree("schema-element(xs:group)", '(schema-element (: (xs) (group)))')
context.item = AttributeNode(XML_LANG, 'en')
self.wrong_syntax("schema-attribute(*)")
self.wrong_name("schema-attribute(nil)")
self.wrong_name("schema-attribute(xs:string)")
self.check_value("schema-attribute(xml:lang)", MissingContextError)
self.check_value("schema-attribute(xml:lang)", NameError, context)
self.check_value("self::schema-attribute(xml:lang)", [context.item], context)
self.check_tree("schema-attribute(xsi:schemaLocation)",
'(schema-attribute (: (xsi) (schemaLocation)))')
token = self.parser.parse("self::schema-attribute(xml:lang)")
context.item = AttributeNode(XML_LANG, 'en')
context.axis = 'attribute'
self.assertEqual(list(token.select(context)), [context.item])
def test_bind_parser_method(self):
schema_src = dedent("""
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:simpleType name="stringType">
<xs:restriction base="xs:string"/>
</xs:simpleType>
</xs:schema>""")
schema = xmlschema.XMLSchema(schema_src)
schema_proxy = XMLSchemaProxy(schema=schema)
parser = XPath2Parser(namespaces=self.namespaces)
self.assertFalse(parser.is_schema_bound())
schema_proxy.bind_parser(parser)
self.assertTrue(parser.is_schema_bound())
self.assertIs(schema_proxy, parser.schema)
# To test AbstractSchemaProxy.bind_parser()
parser = XPath2Parser(namespaces=self.namespaces)
super(XMLSchemaProxy, schema_proxy).bind_parser(parser)
self.assertIs(schema_proxy, parser.schema)
super(XMLSchemaProxy, schema_proxy).bind_parser(parser)
self.assertIs(schema_proxy, parser.schema)
def test_schema_constructors(self):
schema_src = dedent("""
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:simpleType name="stringType">
<xs:restriction base="xs:string"/>
</xs:simpleType>
<xs:simpleType name="intType">
<xs:restriction base="xs:int"/>
</xs:simpleType>
</xs:schema>""")
schema = xmlschema.XMLSchema(schema_src)
schema_proxy = XMLSchemaProxy(schema=schema)
parser = XPath2Parser(namespaces=self.namespaces, schema=schema_proxy)
with self.assertRaises(NameError) as ctx:
parser.schema_constructor(XSD_ANY_ATOMIC_TYPE)
self.assertIn('XPST0080', str(ctx.exception))
with self.assertRaises(NameError) as ctx:
parser.schema_constructor(XSD_NOTATION)
self.assertIn('XPST0080', str(ctx.exception))
token = parser.parse('stringType("apple")')
self.assertEqual(token.symbol, 'stringType')
self.assertEqual(token.label, 'constructor function')
self.assertEqual(token.evaluate(), 'apple')
token = parser.parse('stringType(())')
self.assertEqual(token.symbol, 'stringType')
self.assertEqual(token.label, 'constructor function')
self.assertEqual(token.evaluate(), [])
token = parser.parse('stringType(10)')
self.assertEqual(token.symbol, 'stringType')
self.assertEqual(token.label, 'constructor function')
self.assertEqual(token.evaluate(), '10')
token = parser.parse('stringType(.)')
self.assertEqual(token.symbol, 'stringType')
self.assertEqual(token.label, 'constructor function')
token = parser.parse('intType(10)')
self.assertEqual(token.symbol, 'intType')
self.assertEqual(token.label, 'constructor function')
self.assertEqual(token.evaluate(), 10)
with self.assertRaises(ValueError) as ctx:
parser.parse('intType(true())')
self.assertIn('FORG0001', str(ctx.exception))
def test_get_context_method(self):
schema_proxy = XMLSchemaProxy()
self.assertIsInstance(schema_proxy.get_context(), XPathContext)
self.assertIsInstance(super(XMLSchemaProxy, schema_proxy).get_context(), XPathContext)
def test_get_type_api(self):
schema_proxy = XMLSchemaProxy()
self.assertIsNone(schema_proxy.get_type('unknown'))
self.assertEqual(schema_proxy.get_type('{%s}string' % XSD_NAMESPACE),
xmlschema.XMLSchema.builtin_types()['string'])
def test_get_primitive_type_api(self):
schema_proxy = XMLSchemaProxy()
short_type = schema_proxy.get_type('{%s}short' % XSD_NAMESPACE)
decimal_type = schema_proxy.get_type('{%s}decimal' % XSD_NAMESPACE)
self.assertEqual(schema_proxy.get_primitive_type(short_type), decimal_type)
ntokens_type = schema_proxy.get_type('{%s}NMTOKENS' % XSD_NAMESPACE)
string_type = schema_proxy.get_type('{%s}string' % XSD_NAMESPACE)
self.assertEqual(schema_proxy.get_primitive_type(ntokens_type), string_type)
facet_type = schema_proxy.get_type('{%s}facet' % XSD_NAMESPACE)
any_type = schema_proxy.get_type('{%s}anyType' % XSD_NAMESPACE)
self.assertEqual(schema_proxy.get_primitive_type(facet_type), any_type)
self.assertEqual(schema_proxy.get_primitive_type(any_type), any_type)
any_simple_type = schema_proxy.get_type('{%s}anySimpleType' % XSD_NAMESPACE)
self.assertEqual(schema_proxy.get_primitive_type(any_simple_type), any_simple_type)
@unittest.skipIf(xmlschema is None or xmlschema.__version__ < '1.2.3',
"Old find API does not work")
def test_find_api(self):
schema_src = """<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="test_element" type="xs:string"/>
</xs:schema>"""
schema = xmlschema.XMLSchema(schema_src)
schema_proxy = XMLSchemaProxy(schema=schema)
self.assertEqual(schema_proxy.find('/test_element'), schema.elements['test_element'])
def test_is_instance_api(self):
self.assertFalse(self.schema_proxy.is_instance(True, '{%s}integer' % XSD_NAMESPACE))
self.assertTrue(self.schema_proxy.is_instance(5, '{%s}integer' % XSD_NAMESPACE))
self.assertFalse(self.schema_proxy.is_instance('alpha', '{%s}integer' % XSD_NAMESPACE))
self.assertTrue(self.schema_proxy.is_instance('alpha', '{%s}string' % XSD_NAMESPACE))
self.assertTrue(self.schema_proxy.is_instance('alpha beta', '{%s}token' % XSD_NAMESPACE))
self.assertTrue(self.schema_proxy.is_instance('alpha', '{%s}Name' % XSD_NAMESPACE))
self.assertFalse(self.schema_proxy.is_instance('alpha beta', '{%s}Name' % XSD_NAMESPACE))
self.assertFalse(self.schema_proxy.is_instance('1alpha', '{%s}Name' % XSD_NAMESPACE))
self.assertTrue(self.schema_proxy.is_instance('alpha', '{%s}NCName' % XSD_NAMESPACE))
self.assertFalse(self.schema_proxy.is_instance('eg:alpha', '{%s}NCName' % XSD_NAMESPACE))
def test_cast_as_api(self):
schema_proxy = XMLSchemaProxy()
self.assertEqual(schema_proxy.cast_as('19', '{%s}short' % XSD_NAMESPACE), 19)
def test_attributes_type(self):
parser = XPath2Parser(namespaces=self.namespaces)
token = parser.parse("@min le @max")
context = XPathContext(self.etree.XML('<root min="10" max="20" />'))
self.assertTrue(token.evaluate(context))
context = XPathContext(self.etree.XML('<root min="10" max="2" />'))
self.assertTrue(token.evaluate(context))
schema = xmlschema.XMLSchema('''
<xs:schema xmlns="http://xpath.test/ns" xmlns:xs="http://www.w3.org/2001/XMLSchema"
targetNamespace="http://xpath.test/ns">
<xs:element name="range" type="intRange"/>
<xs:complexType name="intRange">
<xs:attribute name="min" type="xs:int"/>
<xs:attribute name="max" type="xs:int"/>
</xs:complexType>
</xs:schema>''')
parser = XPath2Parser(namespaces=self.namespaces,
schema=XMLSchemaProxy(schema, schema.elements['range']))
token = parser.parse("@min le @max")
context = XPathContext(self.etree.XML('<root min="10" max="20" />'))
self.assertTrue(token.evaluate(context))
context = XPathContext(self.etree.XML('<root min="10" max="2" />'))
self.assertFalse(token.evaluate(context))
schema = xmlschema.XMLSchema('''
<xs:schema xmlns="http://xpath.test/ns" xmlns:xs="http://www.w3.org/2001/XMLSchema"
targetNamespace="http://xpath.test/ns">
<xs:element name="range" type="intRange"/>
<xs:complexType name="intRange">
<xs:attribute name="min" type="xs:int"/>
<xs:attribute name="max" type="xs:string"/>
</xs:complexType>
</xs:schema>''')
parser = XPath2Parser(namespaces=self.namespaces,
schema=XMLSchemaProxy(schema, schema.elements['range']))
self.assertRaises(TypeError, parser.parse, '@min le @max')
def test_elements_type(self):
schema = xmlschema.XMLSchema('''
<xs:schema xmlns="http://xpath.test/ns" xmlns:xs="http://www.w3.org/2001/XMLSchema"
targetNamespace="http://xpath.test/ns">
<xs:element name="values">
<xs:complexType>
<xs:sequence>
<xs:element name="a" type="xs:string"/>
<xs:element name="b" type="xs:integer"/>
<xs:element name="c" type="xs:boolean"/>
<xs:element name="d" type="xs:float"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>''')
parser = XPath2Parser(namespaces={'': "http://xpath.test/ns", 'xs': XSD_NAMESPACE},
schema=XMLSchemaProxy(schema))
token = parser.parse("//a")
self.assertEqual(token[0].xsd_types['a'], schema.maps.types['{%s}string' % XSD_NAMESPACE])
token = parser.parse("//b")
self.assertEqual(token[0].xsd_types['b'], schema.maps.types['{%s}integer' % XSD_NAMESPACE])
token = parser.parse("//values/c")
self.assertEqual(token[0][0].xsd_types["{http://xpath.test/ns}values"],
schema.elements['values'].type)
self.assertEqual(token[1].xsd_types['c'], schema.maps.types['{%s}boolean' % XSD_NAMESPACE])
token = parser.parse("values/c")
self.assertEqual(token[0].xsd_types['{http://xpath.test/ns}values'],
schema.elements['values'].type)
self.assertEqual(token[1].xsd_types['c'], schema.maps.types['{%s}boolean' % XSD_NAMESPACE])
token = parser.parse("values/*")
self.assertEqual(token[1].xsd_types, {
'a': schema.maps.types['{%s}string' % XSD_NAMESPACE],
'b': schema.maps.types['{%s}integer' % XSD_NAMESPACE],
'c': schema.maps.types['{%s}boolean' % XSD_NAMESPACE],
'd': schema.maps.types['{%s}float' % XSD_NAMESPACE],
})
def test_elements_and_attributes_type(self):
schema = xmlschema.XMLSchema('''
<xs:schema xmlns="http://xpath.test/ns" xmlns:xs="http://www.w3.org/2001/XMLSchema"
targetNamespace="http://xpath.test/ns">
<xs:element name="values">
<xs:complexType>
<xs:sequence>
<xs:element name="a" type="xs:string"/>
<xs:element name="b" type="rangeType"/>
<xs:element name="c" type="xs:boolean"/>
<xs:element name="d" type="xs:float"/>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:complexType name="rangeType">
<xs:simpleContent>
<xs:extension base="xs:integer">
<xs:attribute name="min" type="xs:integer"/>
<xs:attribute name="max" type="xs:integer"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
</xs:schema>''')
parser = XPath2Parser(namespaces={'': "http://xpath.test/ns", 'xs': XSD_NAMESPACE},
schema=XMLSchemaProxy(schema))
token = parser.parse("//a")
self.assertEqual(token[0].xsd_types['a'], schema.maps.types['{%s}string' % XSD_NAMESPACE])
token = parser.parse("//b")
self.assertEqual(token[0].xsd_types['b'], schema.types['rangeType'])
token = parser.parse("values/c")
self.assertEqual(token[0].xsd_types['{http://xpath.test/ns}values'],
schema.elements['values'].type)
self.assertEqual(token[1].xsd_types['c'], schema.maps.types['{%s}boolean' % XSD_NAMESPACE])
token = parser.parse("//b/@min")
self.assertEqual(token[0][0].xsd_types['b'], schema.types['rangeType'])
self.assertEqual(token[1][0].xsd_types['min'],
schema.maps.types['{%s}integer' % XSD_NAMESPACE])
token = parser.parse("values/b/@min")
self.assertEqual(token[0][0].xsd_types['{http://xpath.test/ns}values'],
schema.elements['values'].type)
self.assertEqual(token[0][1].xsd_types['b'], schema.types['rangeType'])
self.assertEqual(token[1][0].xsd_types['min'],
schema.maps.types['{%s}integer' % XSD_NAMESPACE])
token = parser.parse("//b/@min lt //b/@max")
self.assertEqual(token[0][0][0].xsd_types['b'], schema.types['rangeType'])
self.assertEqual(token[0][1][0].xsd_types['min'],
schema.maps.types['{%s}integer' % XSD_NAMESPACE])
self.assertEqual(token[1][0][0].xsd_types['b'], schema.types['rangeType'])
self.assertEqual(token[1][1][0].xsd_types['max'],
schema.maps.types['{%s}integer' % XSD_NAMESPACE])
root = self.etree.XML('<values xmlns="http://xpath.test/ns"><b min="19"/></values>')
self.assertIsNone(token.evaluate(context=XPathContext(root)))
root = self.etree.XML('<values xmlns="http://xpath.test/ns"><b min="19">30</b></values>')
self.assertIsNone(token.evaluate(context=XPathContext(root)))
root = self.etree.XML(
'<values xmlns="http://xpath.test/ns"><b min="19" max="40">30</b></values>')
context = XPathContext(root)
self.assertTrue(token.evaluate(context))
root = self.etree.XML(
'<values xmlns="http://xpath.test/ns"><b min="19" max="10">30</b></values>')
context = XPathContext(root)
self.assertFalse(token.evaluate(context))
def test_issue_10(self):
schema = xmlschema.XMLSchema('''
<xs:schema xmlns="http://xpath.test/ns#" xmlns:xs="http://www.w3.org/2001/XMLSchema"
targetNamespace="http://xpath.test/ns#">
<xs:element name="root" type="rootType" />
<xs:simpleType name="rootType">
<xs:restriction base="xs:string"/>
</xs:simpleType>
</xs:schema>''')
# TODO: test fail with xmlschema-1.0.17+, added namespaces as temporary fix for test.
# A fix for xmlschema.xpath.ElementPathMixin._get_xpath_namespaces() is required.
root = schema.find('root', namespaces={'': 'http://xpath.test/ns#'})
self.assertEqual(getattr(root, 'tag', None), '{http://xpath.test/ns#}root')
@unittest.skipIf(xmlschema is None or lxml_etree is None, "both xmlschema and lxml required")
class LxmlXMLSchemaProxyTest(XMLSchemaProxyTest):
etree = lxml_etree
if __name__ == '__main__':
unittest.main()
| 46.869767 | 99 | 0.612484 |
4a1e789d93eb2851f54cacf2a5c0b13a9dd2568a
| 471 |
py
|
Python
|
app/core/models.py
|
ClickTravel-VincentCleaver/django-training
|
8b07739487d8b5b6db33f9ec2b9d221b493de0be
|
[
"MIT"
] | null | null | null |
app/core/models.py
|
ClickTravel-VincentCleaver/django-training
|
8b07739487d8b5b6db33f9ec2b9d221b493de0be
|
[
"MIT"
] | 1 |
2021-09-09T11:33:32.000Z
|
2021-09-09T11:33:32.000Z
|
app/core/models.py
|
ClickTravel-VincentCleaver/django-training
|
8b07739487d8b5b6db33f9ec2b9d221b493de0be
|
[
"MIT"
] | null | null | null |
from django.db import models
class Recipe(models.Model):
"""Recipe"""
name = models.TextField(blank=False)
description = models.TextField()
def __str__(self):
return self.name
class Ingredient(models.Model):
"""Ingredient"""
name = models.TextField(blank=False)
recipe = models.ForeignKey(
Recipe,
on_delete=models.CASCADE,
related_name='ingredients'
)
def __str__(self):
return self.name
| 19.625 | 40 | 0.639066 |
4a1e7c1be9ab56a3b34688a6e05b8f5ade77d871
| 2,800 |
py
|
Python
|
src/fonduer/parser/models/figure.py
|
annelhote/fonduer
|
bd5b1feebfb2860286ae8b5a520b24baa023b445
|
[
"MIT"
] | 379 |
2018-03-29T10:06:02.000Z
|
2022-03-30T22:51:43.000Z
|
src/fonduer/parser/models/figure.py
|
annelhote/fonduer
|
bd5b1feebfb2860286ae8b5a520b24baa023b445
|
[
"MIT"
] | 328 |
2018-03-29T03:34:46.000Z
|
2021-09-02T21:24:41.000Z
|
src/fonduer/parser/models/figure.py
|
annelhote/fonduer
|
bd5b1feebfb2860286ae8b5a520b24baa023b445
|
[
"MIT"
] | 86 |
2018-03-30T12:56:33.000Z
|
2022-01-12T09:10:48.000Z
|
"""Fonduer figure context model."""
from sqlalchemy import Column, ForeignKey, Integer, String, UniqueConstraint
from sqlalchemy.orm import backref, relationship
from fonduer.parser.models.context import Context
class Figure(Context):
"""A figure Context in a Document.
Used to represent figures in a document.
.. note:: As of v0.6.2, ``<img>`` and ``<figure>`` tags turn into ``Figure``.
"""
__tablename__ = "figure"
#: The unique id of the ``Figure``.
id = Column(Integer, ForeignKey("context.id", ondelete="CASCADE"), primary_key=True)
#: The position of the ``Figure`` in the ``Document``.
position = Column(Integer, nullable=False)
#: The name of a ``Figure``.
name = Column(String, unique=False, nullable=True)
#: The id of the parent ``Document``.
document_id = Column(Integer, ForeignKey("document.id", ondelete="CASCADE"))
#: The parent ``Document``.
document = relationship(
"Document",
backref=backref("figures", order_by=position, cascade="all, delete-orphan"),
foreign_keys=document_id,
)
#: The id of the parent ``Section``.
section_id = Column(Integer, ForeignKey("section.id"))
#: The parent ``Section``.
section = relationship(
"Section",
backref=backref("figures", order_by=position, cascade="all, delete-orphan"),
foreign_keys=section_id,
)
#: The id of the parent ``Cell``, if any.
cell_id = Column(Integer, ForeignKey("cell.id"))
#: The the parent ``Cell``, if any.
cell = relationship(
"Cell",
backref=backref("figures", order_by=position, cascade="all, delete-orphan"),
foreign_keys=cell_id,
)
#: The ``Figure``'s URL.
url = Column(String)
__mapper_args__ = {"polymorphic_identity": "figure"}
__table_args__ = (UniqueConstraint(document_id, position),)
def __repr__(self) -> str:
"""Represent the context as a string."""
if self.cell:
return (
f"Figure("
f"Doc: {self.document.name}, "
f"Sec: {self.section.position}, "
f"Cell: {self.cell.position}, "
f"Pos: {self.position}, "
f"Url: {self.url}"
f")"
)
else:
return (
f"Figure("
f"Doc: {self.document.name}, "
f"Sec: {self.section.position}, "
f"Pos: {self.position}, "
f"Url: {self.url}"
f")"
)
def __gt__(self, other: "Figure") -> bool:
"""Check if the context is greater than another context."""
# Allow sorting by comparing the string representations of each
return self.__repr__() > other.__repr__()
| 32.183908 | 88 | 0.579643 |
4a1e7e48f92a02bfb553206c052e6e1368d5a3d0
| 1,164 |
py
|
Python
|
tutorials/e2e-distributed-pytorch-image/src/pytorch_dl_train/model.py
|
jplummer01/azureml-examples
|
6a073d157f21060312941f71cfbcf25d0c541183
|
[
"MIT"
] | null | null | null |
tutorials/e2e-distributed-pytorch-image/src/pytorch_dl_train/model.py
|
jplummer01/azureml-examples
|
6a073d157f21060312941f71cfbcf25d0c541183
|
[
"MIT"
] | null | null | null |
tutorials/e2e-distributed-pytorch-image/src/pytorch_dl_train/model.py
|
jplummer01/azureml-examples
|
6a073d157f21060312941f71cfbcf25d0c541183
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
This script provides code to load and setup a variety of models from torchvision.models.
"""
import logging
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.models as models
MODEL_ARCH_LIST = [
"resnet18",
]
def load_model(model_arch: str, output_dimension: int = 1, pretrained: bool = True):
"""Loads a model from a given arch and sets it up for training"""
logger = logging.getLogger(__name__)
logger.info(
f"Loading model from arch={model_arch} pretrained={pretrained} output_dimension={output_dimension}"
)
if model_arch in MODEL_ARCH_LIST:
model = getattr(models, model_arch)(pretrained=pretrained)
else:
raise NotImplementedError(
f"model_arch={model_arch} is not implemented in torchvision model zoo."
)
if model_arch == "resnet18":
model.fc = nn.Linear(model.fc.in_features, output_dimension)
else:
raise NotImplementedError(
f"loading model_arch={model_arch} is not implemented yet in our custom code."
)
return model
| 29.1 | 107 | 0.699313 |
4a1e7ef807f452bb2bfb994bdabcaf2c7f470d34
| 3,818 |
py
|
Python
|
tests/lexer/test_lexer.py
|
Pennycook/code-base-investigator
|
1673301553100f567ab4140ab27f7417d84e3f08
|
[
"BSD-3-Clause"
] | 17 |
2019-07-24T21:17:52.000Z
|
2022-03-22T01:15:46.000Z
|
tests/lexer/test_lexer.py
|
Pennycook/code-base-investigator
|
1673301553100f567ab4140ab27f7417d84e3f08
|
[
"BSD-3-Clause"
] | 1 |
2022-03-11T13:11:41.000Z
|
2022-03-11T13:11:41.000Z
|
tests/lexer/test_lexer.py
|
Pennycook/code-base-investigator
|
1673301553100f567ab4140ab27f7417d84e3f08
|
[
"BSD-3-Clause"
] | 7 |
2019-07-24T21:27:42.000Z
|
2022-03-21T22:47:34.000Z
|
# Copyright (C) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
import unittest
from codebasin import preprocessor
class TestLexer(unittest.TestCase):
"""
Test ability to tokenize strings correctly.
"""
def test_character(self):
"""characters"""
tokens = preprocessor.Lexer("'c'").tokenize()
self.assertTrue(len(tokens) == 1)
self.assertTrue(isinstance(tokens[0], preprocessor.CharacterConstant))
def test_numerical(self):
"""numbers"""
numbers = ["123", "123ul", "123.4", "123.4e+05", ".123", "0xFF", "0b10"]
for number in numbers:
tokens = preprocessor.Lexer(number).tokenize()
self.assertTrue(len(tokens) == 1)
self.assertTrue(isinstance(tokens[0], preprocessor.NumericalConstant))
def test_string(self):
"""strings"""
tokens = preprocessor.Lexer("\"this is a string constant\"").tokenize()
self.assertTrue(len(tokens) == 1)
self.assertTrue(isinstance(tokens[0], preprocessor.StringConstant))
def test_identifier(self):
"""identifiers"""
tokens = preprocessor.Lexer("this is a string of words").tokenize()
self.assertTrue(len(tokens) == 6)
self.assertTrue(all([isinstance(t, preprocessor.Identifier) for t in tokens]))
def test_operator(self):
"""operators"""
operators = ["||", "&&", ">>", "<<", "!=", ">=", "<=", "==", "##"] + \
["-", "+", "!", "*", "/", "|", "&", "^", "<", ">", "?", ":", "~", "#", "=", "%"]
for op in operators:
tokens = preprocessor.Lexer(op).tokenize()
self.assertTrue(len(tokens) == 1)
self.assertTrue(isinstance(tokens[0], preprocessor.Operator))
self.assertTrue(str(tokens[0].token) == op)
def test_puncuator(self):
"""punctuators"""
punctuators = ["(", ")", "{", "}", "[", "]", ",", ".", ";", "'", "\"", "\\"]
for punc in punctuators:
tokens = preprocessor.Lexer(punc).tokenize()
self.assertTrue(len(tokens) == 1)
self.assertTrue(isinstance(tokens[0], preprocessor.Punctuator))
self.assertTrue(str(tokens[0].token) == punc)
def test_expression(self):
"""expression"""
tokens = preprocessor.Lexer("foo(a,b) * 124 + 'c'").tokenize()
self.assertTrue(len(tokens) == 10)
self.assertTrue(isinstance(tokens[0], preprocessor.Identifier))
self.assertTrue(isinstance(tokens[1], preprocessor.Punctuator))
self.assertTrue(isinstance(tokens[2], preprocessor.Identifier))
self.assertTrue(isinstance(tokens[3], preprocessor.Punctuator))
self.assertTrue(isinstance(tokens[4], preprocessor.Identifier))
self.assertTrue(isinstance(tokens[5], preprocessor.Punctuator))
self.assertTrue(isinstance(tokens[6], preprocessor.Operator))
self.assertTrue(isinstance(tokens[7], preprocessor.NumericalConstant))
self.assertTrue(isinstance(tokens[8], preprocessor.Operator))
self.assertTrue(isinstance(tokens[9], preprocessor.CharacterConstant))
tokens = preprocessor.Lexer("a > b ? \"true_string\" : \"false_string\"").tokenize()
self.assertTrue(len(tokens) == 7)
self.assertTrue(isinstance(tokens[0], preprocessor.Identifier))
self.assertTrue(isinstance(tokens[1], preprocessor.Operator))
self.assertTrue(isinstance(tokens[2], preprocessor.Identifier))
self.assertTrue(isinstance(tokens[3], preprocessor.Operator))
self.assertTrue(isinstance(tokens[4], preprocessor.StringConstant))
self.assertTrue(isinstance(tokens[5], preprocessor.Operator))
self.assertTrue(isinstance(tokens[6], preprocessor.StringConstant))
if __name__ == '__main__':
unittest.main()
| 44.395349 | 92 | 0.62153 |
4a1e80ddd675f53fcfdbdbd8cc7af5a5fca6b252
| 11,339 |
py
|
Python
|
sdk/python/pulumi_aws_native/apigatewayv2/integration_response.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | 29 |
2021-09-30T19:32:07.000Z
|
2022-03-22T21:06:08.000Z
|
sdk/python/pulumi_aws_native/apigatewayv2/integration_response.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | 232 |
2021-09-30T19:26:26.000Z
|
2022-03-31T23:22:06.000Z
|
sdk/python/pulumi_aws_native/apigatewayv2/integration_response.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | 4 |
2021-11-10T19:42:01.000Z
|
2022-02-05T10:15:49.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['IntegrationResponseArgs', 'IntegrationResponse']
@pulumi.input_type
class IntegrationResponseArgs:
def __init__(__self__, *,
api_id: pulumi.Input[str],
integration_id: pulumi.Input[str],
integration_response_key: pulumi.Input[str],
content_handling_strategy: Optional[pulumi.Input[str]] = None,
response_parameters: Optional[Any] = None,
response_templates: Optional[Any] = None,
template_selection_expression: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a IntegrationResponse resource.
"""
pulumi.set(__self__, "api_id", api_id)
pulumi.set(__self__, "integration_id", integration_id)
pulumi.set(__self__, "integration_response_key", integration_response_key)
if content_handling_strategy is not None:
pulumi.set(__self__, "content_handling_strategy", content_handling_strategy)
if response_parameters is not None:
pulumi.set(__self__, "response_parameters", response_parameters)
if response_templates is not None:
pulumi.set(__self__, "response_templates", response_templates)
if template_selection_expression is not None:
pulumi.set(__self__, "template_selection_expression", template_selection_expression)
@property
@pulumi.getter(name="apiId")
def api_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "api_id")
@api_id.setter
def api_id(self, value: pulumi.Input[str]):
pulumi.set(self, "api_id", value)
@property
@pulumi.getter(name="integrationId")
def integration_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "integration_id")
@integration_id.setter
def integration_id(self, value: pulumi.Input[str]):
pulumi.set(self, "integration_id", value)
@property
@pulumi.getter(name="integrationResponseKey")
def integration_response_key(self) -> pulumi.Input[str]:
return pulumi.get(self, "integration_response_key")
@integration_response_key.setter
def integration_response_key(self, value: pulumi.Input[str]):
pulumi.set(self, "integration_response_key", value)
@property
@pulumi.getter(name="contentHandlingStrategy")
def content_handling_strategy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "content_handling_strategy")
@content_handling_strategy.setter
def content_handling_strategy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_handling_strategy", value)
@property
@pulumi.getter(name="responseParameters")
def response_parameters(self) -> Optional[Any]:
return pulumi.get(self, "response_parameters")
@response_parameters.setter
def response_parameters(self, value: Optional[Any]):
pulumi.set(self, "response_parameters", value)
@property
@pulumi.getter(name="responseTemplates")
def response_templates(self) -> Optional[Any]:
return pulumi.get(self, "response_templates")
@response_templates.setter
def response_templates(self, value: Optional[Any]):
pulumi.set(self, "response_templates", value)
@property
@pulumi.getter(name="templateSelectionExpression")
def template_selection_expression(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "template_selection_expression")
@template_selection_expression.setter
def template_selection_expression(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "template_selection_expression", value)
warnings.warn("""IntegrationResponse is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""", DeprecationWarning)
class IntegrationResponse(pulumi.CustomResource):
warnings.warn("""IntegrationResponse is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""", DeprecationWarning)
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_id: Optional[pulumi.Input[str]] = None,
content_handling_strategy: Optional[pulumi.Input[str]] = None,
integration_id: Optional[pulumi.Input[str]] = None,
integration_response_key: Optional[pulumi.Input[str]] = None,
response_parameters: Optional[Any] = None,
response_templates: Optional[Any] = None,
template_selection_expression: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Resource Type definition for AWS::ApiGatewayV2::IntegrationResponse
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: IntegrationResponseArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Resource Type definition for AWS::ApiGatewayV2::IntegrationResponse
:param str resource_name: The name of the resource.
:param IntegrationResponseArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(IntegrationResponseArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_id: Optional[pulumi.Input[str]] = None,
content_handling_strategy: Optional[pulumi.Input[str]] = None,
integration_id: Optional[pulumi.Input[str]] = None,
integration_response_key: Optional[pulumi.Input[str]] = None,
response_parameters: Optional[Any] = None,
response_templates: Optional[Any] = None,
template_selection_expression: Optional[pulumi.Input[str]] = None,
__props__=None):
pulumi.log.warn("""IntegrationResponse is deprecated: IntegrationResponse is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""")
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = IntegrationResponseArgs.__new__(IntegrationResponseArgs)
if api_id is None and not opts.urn:
raise TypeError("Missing required property 'api_id'")
__props__.__dict__["api_id"] = api_id
__props__.__dict__["content_handling_strategy"] = content_handling_strategy
if integration_id is None and not opts.urn:
raise TypeError("Missing required property 'integration_id'")
__props__.__dict__["integration_id"] = integration_id
if integration_response_key is None and not opts.urn:
raise TypeError("Missing required property 'integration_response_key'")
__props__.__dict__["integration_response_key"] = integration_response_key
__props__.__dict__["response_parameters"] = response_parameters
__props__.__dict__["response_templates"] = response_templates
__props__.__dict__["template_selection_expression"] = template_selection_expression
super(IntegrationResponse, __self__).__init__(
'aws-native:apigatewayv2:IntegrationResponse',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'IntegrationResponse':
"""
Get an existing IntegrationResponse resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = IntegrationResponseArgs.__new__(IntegrationResponseArgs)
__props__.__dict__["api_id"] = None
__props__.__dict__["content_handling_strategy"] = None
__props__.__dict__["integration_id"] = None
__props__.__dict__["integration_response_key"] = None
__props__.__dict__["response_parameters"] = None
__props__.__dict__["response_templates"] = None
__props__.__dict__["template_selection_expression"] = None
return IntegrationResponse(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="apiId")
def api_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "api_id")
@property
@pulumi.getter(name="contentHandlingStrategy")
def content_handling_strategy(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "content_handling_strategy")
@property
@pulumi.getter(name="integrationId")
def integration_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "integration_id")
@property
@pulumi.getter(name="integrationResponseKey")
def integration_response_key(self) -> pulumi.Output[str]:
return pulumi.get(self, "integration_response_key")
@property
@pulumi.getter(name="responseParameters")
def response_parameters(self) -> pulumi.Output[Optional[Any]]:
return pulumi.get(self, "response_parameters")
@property
@pulumi.getter(name="responseTemplates")
def response_templates(self) -> pulumi.Output[Optional[Any]]:
return pulumi.get(self, "response_templates")
@property
@pulumi.getter(name="templateSelectionExpression")
def template_selection_expression(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "template_selection_expression")
| 45.538153 | 208 | 0.681718 |
4a1e81109e81770c1fa318c0fb6c49da043fc4cc
| 2,703 |
py
|
Python
|
src/data_preparation/scripts/graph_generator/typeparsing/rewriterulevisitor.py
|
mir-am/typilus
|
d2c126f178c02cfcef9b0ce652c4b019c2462e09
|
[
"MIT"
] | null | null | null |
src/data_preparation/scripts/graph_generator/typeparsing/rewriterulevisitor.py
|
mir-am/typilus
|
d2c126f178c02cfcef9b0ce652c4b019c2462e09
|
[
"MIT"
] | null | null | null |
src/data_preparation/scripts/graph_generator/typeparsing/rewriterulevisitor.py
|
mir-am/typilus
|
d2c126f178c02cfcef9b0ce652c4b019c2462e09
|
[
"MIT"
] | null | null | null |
from typing import List
from .nodes import TypeAnnotationNode, SubscriptAnnotationNode, TupleAnnotationNode, ListAnnotationNode, \
AttributeAnnotationNode, IndexAnnotationNode, ElipsisAnnotationNode
from .rewriterules import RewriteRule
from .visitor import TypeAnnotationVisitor
class RewriteRuleVisitor(TypeAnnotationVisitor):
"""Replace Nodes based on a list of rules."""
def __init__(self, rules: List[RewriteRule]):
self.__rules = rules
def __apply_on_match(self, original_node: TypeAnnotationNode, parent: TypeAnnotationNode) -> TypeAnnotationNode:
for rule in self.__rules:
if rule.matches(original_node, parent):
return rule.apply(original_node)
return original_node
def visit_subscript_annotation(self, node: SubscriptAnnotationNode, parent: TypeAnnotationNode) -> SubscriptAnnotationNode:
node = SubscriptAnnotationNode(
value=node.value.accept_visitor(self, node),
slice=node.slice.accept_visitor(self, node) if node.slice is not None else None)
return self.__apply_on_match(node, parent)
def visit_tuple_annotation(self, node: TupleAnnotationNode, parent: TypeAnnotationNode) -> TupleAnnotationNode:
node = TupleAnnotationNode(
(e.accept_visitor(self, node) for e in node.elements)
)
return self.__apply_on_match(node, parent)
def visit_name_annotation(self, node, parent: TypeAnnotationNode):
return self.__apply_on_match(node, parent)
def visit_list_annotation(self, node: ListAnnotationNode, parent: TypeAnnotationNode):
node = ListAnnotationNode(
(e.accept_visitor(self, node) for e in node.elements)
)
return self.__apply_on_match(node, parent)
def visit_attribute_annotation(self, node: AttributeAnnotationNode, parent: TypeAnnotationNode):
node = AttributeAnnotationNode(node.value.accept_visitor(self, node), node.attribute)
return self.__apply_on_match(node, parent)
def visit_index_annotation(self, node: IndexAnnotationNode, parent: TypeAnnotationNode):
node = IndexAnnotationNode(node.value.accept_visitor(self, node))
return self.__apply_on_match(node, parent)
def visit_elipsis_annotation(self, node: ElipsisAnnotationNode, parent: TypeAnnotationNode):
return self.__apply_on_match(node, parent)
def visit_name_constant_annotation(self, node, parent: TypeAnnotationNode):
return self.__apply_on_match(node, parent)
def visit_unknown_annotation(self, node, parent: TypeAnnotationNode):
return self.__apply_on_match(node, parent)
| 47.421053 | 128 | 0.72771 |
4a1e8217211e6bb4f8386b520247b801f97dfe72
| 107,907 |
py
|
Python
|
trunk/mac/Build/cpplint.py
|
dyzmapl/BumpTop
|
1329ea41411c7368516b942d19add694af3d602f
|
[
"Apache-2.0"
] | 460 |
2016-01-13T12:49:34.000Z
|
2022-02-20T04:10:40.000Z
|
trunk/mac/Build/cpplint.py
|
dyzmapl/BumpTop
|
1329ea41411c7368516b942d19add694af3d602f
|
[
"Apache-2.0"
] | 24 |
2016-11-07T04:59:49.000Z
|
2022-03-14T06:34:12.000Z
|
trunk/mac/Build/cpplint.py
|
dyzmapl/BumpTop
|
1329ea41411c7368516b942d19add694af3d602f
|
[
"Apache-2.0"
] | 148 |
2016-01-17T03:16:43.000Z
|
2022-03-17T12:20:36.000Z
|
#!/usr/bin/python2.4
#
# cpplint.py is Copyright (C) 2009 Google Inc.
#
# It is free software; you can redistribute it and/or modify it under the
# terms of either:
#
# a) the GNU General Public License as published by the Free Software
# Foundation; either version 1, or (at your option) any later version, or
#
# b) the "Artistic License".
# Here are some issues that I've had people identify in my code during reviews,
# that I think are possible to flag automatically in a lint tool. If these were
# caught by lint, it would save time both for myself and that of my reviewers.
# Most likely, some of these are beyond the scope of the current lint framework,
# but I think it is valuable to retain these wish-list items even if they cannot
# be immediately implemented.
#
# Suggestions
# -----------
# - Check for no 'explicit' for multi-arg ctor
# - Check for boolean assign RHS in parens
# - Check for ctor initializer-list colon position and spacing
# - Check that if there's a ctor, there should be a dtor
# - Check accessors that return non-pointer member variables are
# declared const
# - Check accessors that return non-const pointer member vars are
# *not* declared const
# - Check for using public includes for testing
# - Check for spaces between brackets in one-line inline method
# - Check for no assert()
# - Check for spaces surrounding operators
# - Check for 0 in pointer context (should be NULL)
# - Check for 0 in char context (should be '\0')
# - Check for camel-case method name conventions for methods
# that are not simple inline getters and setters
# - Check that base classes have virtual destructors
# put " // namespace" after } that closes a namespace, with
# namespace's name after 'namespace' if it is named.
# - Do not indent namespace contents
# - Avoid inlining non-trivial constructors in header files
# include base/basictypes.h if DISALLOW_EVIL_CONSTRUCTORS is used
# - Check for old-school (void) cast for call-sites of functions
# ignored return value
# - Check gUnit usage of anonymous namespace
# - Check for class declaration order (typedefs, consts, enums,
# ctor(s?), dtor, friend declarations, methods, member vars)
#
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import getopt
import math # for log
import os
import re
import sre_compile
import string
import sys
import unicodedata
_USAGE = """
Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
<file> [file] ...
The style guidelines this tries to follow are those in
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To prevent specific lines from being linted, add a '// NOLINT' comment to the
end of the line.
The files passed in will be linted; at least one file must be provided.
Linted extensions are .cc, .cpp, and .h. Other file types will be ignored.
Flags:
output=vs7
By default, the output is formatted to ease emacs parsing. Visual Studio
compatible output (vs7) may also be used. Other formats are unsupported.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
"""
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
# \ used for clearer layout -- pylint: disable-msg=C6013
_ERROR_CATEGORIES = '''\
build/class
build/deprecated
build/endif_comment
build/forward_decl
build/header_guard
build/include
build/include_order
build/include_what_you_use
build/namespaces
build/printf_format
build/storage_class
legal/copyright
readability/braces
readability/casting
readability/check
readability/constructors
readability/fn_size
readability/function
readability/multiline_comment
readability/multiline_string
readability/streams
readability/todo
readability/utf8
runtime/arrays
runtime/casting
runtime/explicit
runtime/int
runtime/init
runtime/invalid_increment
runtime/memset
runtime/printf
runtime/printf_format
runtime/references
runtime/rtti
runtime/sizeof
runtime/string
runtime/threadsafe_fn
runtime/virtual
whitespace/blank_line
whitespace/braces
whitespace/comma
whitespace/comments
whitespace/end_of_line
whitespace/ending_newline
whitespace/indent
whitespace/labels
whitespace/line_length
whitespace/newline
whitespace/operators
whitespace/parens
whitespace/semicolon
whitespace/tab
whitespace/todo
'''
# We used to check for high-bit characters, but after much discussion we
# decided those were OK, as long as they were in UTF-8 and didn't represent
# hard-coded international strings, which belong in a seperate i18n file.
# Headers that we consider STL headers.
_STL_HEADERS = frozenset([
'algobase.h', 'algorithm', 'alloc.h', 'bitset', 'deque', 'exception',
'function.h', 'functional', 'hash_map', 'hash_map.h', 'hash_set',
'hash_set.h', 'iterator', 'list', 'list.h', 'map', 'memory', 'pair.h',
'pthread_alloc', 'queue', 'set', 'set.h', 'sstream', 'stack',
'stl_alloc.h', 'stl_relops.h', 'type_traits.h',
'utility', 'vector', 'vector.h',
])
# Non-STL C++ system headers.
_CPP_HEADERS = frozenset([
'algo.h', 'builtinbuf.h', 'bvector.h', 'cassert', 'cctype',
'cerrno', 'cfloat', 'ciso646', 'climits', 'clocale', 'cmath',
'complex', 'complex.h', 'csetjmp', 'csignal', 'cstdarg', 'cstddef',
'cstdio', 'cstdlib', 'cstring', 'ctime', 'cwchar', 'cwctype',
'defalloc.h', 'deque.h', 'editbuf.h', 'exception', 'fstream',
'fstream.h', 'hashtable.h', 'heap.h', 'indstream.h', 'iomanip',
'iomanip.h', 'ios', 'iosfwd', 'iostream', 'iostream.h', 'istream.h',
'iterator.h', 'limits', 'map.h', 'multimap.h', 'multiset.h',
'numeric', 'ostream.h', 'parsestream.h', 'pfstream.h', 'PlotFile.h',
'procbuf.h', 'pthread_alloc.h', 'rope', 'rope.h', 'ropeimpl.h',
'SFile.h', 'slist', 'slist.h', 'stack.h', 'stdexcept',
'stdiostream.h', 'streambuf.h', 'stream.h', 'strfile.h', 'string',
'strstream', 'strstream.h', 'tempbuf.h', 'tree.h', 'typeinfo', 'valarray',
])
# Assertion macros. These are defined in base/logging.h and
# testing/base/gunit.h. Note that the _M versions need to come first
# for substring matching to work.
_CHECK_MACROS = [
'CHECK',
'EXPECT_TRUE_M', 'EXPECT_TRUE',
'ASSERT_TRUE_M', 'ASSERT_TRUE',
'EXPECT_FALSE_M', 'EXPECT_FALSE',
'ASSERT_FALSE_M', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
_CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
_LIKELY_MY_HEADER = 3
_POSSIBLE_MY_HEADER = 4
_OTHER_HEADER = 5
_regexp_compile_cache = {}
def Match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
# The regexp compilation caching is inlined in both Match and Search for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
if not pattern in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if not pattern in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
class _IncludeState(dict):
"""Tracks line numbers for includes, and the order in which includes appear.
As a dict, an _IncludeState object serves as a mapping between include
filename and line number on which that file was included.
Call CheckNextIncludeOrder() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, CheckNextIncludeOrder will raise an error.
_INITIAL_SECTION = 0
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
_OTHER_H_SECTION = 4
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing. (This can't be an error.)",
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
dict.__init__(self)
self._section = self._INITIAL_SECTION
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
return ''
class _CppLintState(object):
"""Maintains module-wide state.."""
def __init__(self):
self.verbose_level = 1 # global setting.
self.error_count = 0 # global count of reported errors
self.filters = [] # filters to apply when emitting error messages
# output format:
# "emacs" - format that emacs can parse (default)
# "vs7" - format that Microsoft Visual Studio 7 can parse
self.output_format = 'emacs'
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetFilters(self, filters):
"""Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
if not filters:
self.filters = []
else:
self.filters = filters.split(',')
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
' (%s does not)' % filt)
def ResetErrorCount(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
def IncrementErrorCount(self):
"""Bumps the module's error statistic."""
self.error_count += 1
_cpplint_state = _CppLintState()
def _OutputFormat():
"""Gets the module's output format."""
return _cpplint_state.output_format
def _SetOutputFormat(output_format):
"""Sets the module's output format."""
_cpplint_state.SetOutputFormat(output_format)
def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level
def _SetVerboseLevel(level):
"""Sets the module's verbosity, and returns the previous setting."""
return _cpplint_state.SetVerboseLevel(level)
def _Filters():
"""Returns the module's list of output filters, as a list."""
return _cpplint_state.filters
def _SetFilters(filters):
"""Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.SetFilters(filters)
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body."""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
"""
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2**_VerboseLevel()
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def End(self):
"""Stop analizing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the include order in a file."""
pass
class FileInfo:
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def FullName(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN? Try to find a git top level directory by searching up from the
# current path.
root_dir = os.path.dirname(fullname)
while (root_dir != os.path.dirname(root_dir) and
not os.path.exists(os.path.join(root_dir, ".git"))):
root_dir = os.path.dirname(root_dir)
if os.path.exists(os.path.join(root_dir, ".git")):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.RepositoryName()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def BaseName(self):
"""File base name - text after the final slash, before the final period."""
return self.Split()[1]
def Extension(self):
"""File extension - text following the final period."""
return self.Split()[2]
def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2])
def IsSource(self):
"""File has a source file extension."""
return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
def _ShouldPrintError(category, confidence):
"""Returns true iff confidence >= verbose, and category passes filter."""
# There are two ways we might decide not to print an error message:
# the verbosity level isn't high enough, or the filters filter it out.
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True
def Error(filename, linenum, category, confidence, message):
"""Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
# There are two ways we might decide not to print an error message:
# the verbosity level isn't high enough, or the filters filter it out.
if _ShouldPrintError(category, confidence):
_cpplint_state.IncrementErrorCount()
if _cpplint_state.output_format == 'vs7':
sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
else:
sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
# Matches standard C++ escape esequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Matches strings. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"')
# Matches characters. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'")
# Matches multi-line C++ comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r"""(\s*/\*.*\*/\s*$|
/\*.*\*/\s+|
\s+/\*.*\*/(?=\W)|
/\*.*\*/)""", re.VERBOSE)
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '// dummy'
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos]
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 3 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments,
2) lines member contains lines without comments, and
3) raw member contains all the lines without processing.
All these three members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
for linenum in range(len(lines)):
self.lines.append(CleanseComments(lines[linenum]))
elided = self._CollapseStrings(lines[linenum])
self.elided.append(CleanseComments(elided))
def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines
@staticmethod
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if not _RE_PATTERN_INCLUDE.match(elided):
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided)
elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided)
return elided
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[', finds the the
linenum/pos that correspond to the closing of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
startchar = line[pos]
if startchar not in '({[':
return (line, clean_lines.NumLines(), -1)
if startchar == '(': endchar = ')'
if startchar == '[': endchar = ']'
if startchar == '{': endchar = '}'
num_open = line.count(startchar) - line.count(endchar)
while linenum < clean_lines.NumLines() and num_open > 0:
linenum += 1
line = clean_lines.elided[linenum]
num_open += line.count(startchar) - line.count(endchar)
# OK, now find the endchar that actually got us back to even
endpos = len(line)
while num_open >= 0:
endpos = line.rfind(')', 0, endpos)
num_open -= 1 # chopped off another )
return (line, linenum, endpos + 1)
def CheckForCopyright(filename, lines, error):
"""Logs an error if no Copyright message appears at the top of the file."""
# We'll say it should occur by line 10. Don't forget there's a
# dummy line at the front.
for line in xrange(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I): break
else: # means no copyright line was found
error(filename, 0, 'legal/copyright', 5,
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"')
def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
fileinfo = FileInfo(filename)
return re.sub(r'[-./\s]', '_', fileinfo.RepositoryName()).upper() + '_'
def CheckForHeaderGuard(filename, lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = None
ifndef_linenum = 0
define = None
endif = None
endif_linenum = 0
for linenum, line in enumerate(lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef or not define or ifndef != define:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
if endif != ('#endif // %s' % cppvar):
error_level = 0
if endif != ('#endif // %s' % (cppvar + '_')):
error_level = 5
error(filename, endif_linenum, 'build/header_guard', error_level,
'#endif line should be "#endif // %s"' % cppvar)
def CheckForUnicodeReplacementCharacters(filename, lines, error):
"""Logs an error for each line containing Unicode replacement characters.
These indicate that either the file contained invalid UTF-8 (likely)
or Unicode replacement characters (which it shouldn't). Note that
it's possible for this to throw off line numbering if the invalid
UTF-8 occurred adjacent to a newline.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if u'\ufffd' in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
def CheckForNewlineAtEOF(filename, lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. They\'re '
'ugly and unnecessary, and you should use concatenation instead".')
threading_list = (
('asctime(', 'asctime_r('),
('ctime(', 'ctime_r('),
('getgrgid(', 'getgrgid_r('),
('getgrnam(', 'getgrnam_r('),
('getlogin(', 'getlogin_r('),
('getpwnam(', 'getpwnam_r('),
('getpwuid(', 'getpwuid_r('),
('gmtime(', 'gmtime_r('),
('localtime(', 'localtime_r('),
('rand(', 'rand_r('),
('readdir(', 'readdir_r('),
('strtok(', 'strtok_r('),
('ttyname(', 'ttyname_r('),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_function, multithread_safe_function in threading_list:
ix = line.find(single_thread_function)
# Comparisons made explicit for clarity -- pylint: disable-msg=C6403
if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
line[ix - 1] not in ('_', '.', '>'))):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_function +
'...) instead of ' + single_thread_function +
'...) for improved thread safety.')
# Matches invalid increment: *count++, which moves pointer insead of
# incrementing a value.
_RE_PATTERN_IVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalud increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_IVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
class _ClassInfo(object):
"""Stores information about a class."""
def __init__(self, name, linenum):
self.name = name
self.linenum = linenum
self.seen_open_brace = False
self.is_derived = False
self.virtual_method_linenumber = None
self.has_virtual_destructor = False
self.brace_depth = 0
class _ClassState(object):
"""Holds the current state of the parse relating to class declarations.
It maintains a stack of _ClassInfos representing the parser's guess
as to the current nesting of class declarations. The innermost class
is at the top (back) of the stack. Typically, the stack will either
be empty or have exactly one entry.
"""
def __init__(self):
self.classinfo_stack = []
def CheckFinished(self, filename, error):
"""Checks that all classes have been completely parsed.
Call this when all lines in a file have been processed.
Args:
filename: The name of the current file.
error: The function to call with any errors found.
"""
if self.classinfo_stack:
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpplint_unittest.py for an example of this.
error(filename, self.classinfo_stack[0].linenum, 'build/class', 5,
'Failed to find complete declaration of class %s' %
self.classinfo_stack[0].name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
class_state, error):
"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
- classes with virtual methods need virtual destructors (compiler warning
available, but not turned on yet.)
Additionally, check for constructor/destructor style violations as it
is very convenient to do so while checking for gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
class_state: A _ClassState instance which maintains information about
the current stack of nested class declarations being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(auto|register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage class (static, extern, typedef, etc) should be first.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
# Track class entry and exit, and attempt to find cases within the
# class declaration that don't meet the C++ style
# guidelines. Tracking is very dependent on the code matching Google
# style guidelines, but it seems to perform well enough in testing
# to be a worthwhile addition to the checks.
classinfo_stack = class_state.classinfo_stack
# Look for a class declaration
class_decl_match = Match(
r'\s*(template\s*<[\w\s<>,:]*>\s*)?(class|struct)\s+(\w+(::\w+)*)', line)
if class_decl_match:
classinfo_stack.append(_ClassInfo(class_decl_match.group(3), linenum))
# Everything else in this function uses the top of the stack if it's
# not empty.
if not classinfo_stack:
return
classinfo = classinfo_stack[-1]
# If the opening brace hasn't been seen look for it and also
# parent class declarations.
if not classinfo.seen_open_brace:
# If the line has a ';' in it, assume it's a forward declaration or
# a single-line class declaration, which we won't process.
if line.find(';') != -1:
classinfo_stack.pop()
return
classinfo.seen_open_brace = (line.find('{') != -1)
# Look for a bare ':'
if Search('(^|[^:]):($|[^:])', line):
classinfo.is_derived = True
if not classinfo.seen_open_brace:
return # Everything else in this function is for after open brace
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style.
args = Match(r'(?<!explicit)\s+%s\s*\(([^,()]+)\)'
% re.escape(base_classname),
line)
if (args and
args.group(1) != 'void' and
not Match(r'(const\s+)?%s\s*&' % re.escape(base_classname),
args.group(1).strip())):
error(filename, linenum, 'runtime/explicit', 5,
'Single-argument constructors should be marked explicit.')
# Look for methods declared virtual.
if Search(r'\bvirtual\b', line):
classinfo.virtual_method_linenumber = linenum
# Only look for a destructor declaration on the same line. It would
# be extremely unlikely for the destructor declaration to occupy
# more than one line.
if Search(r'~%s\s*\(' % base_classname, line):
classinfo.has_virtual_destructor = True
# Look for class end.
brace_depth = classinfo.brace_depth
brace_depth = brace_depth + line.count('{') - line.count('}')
if brace_depth <= 0:
classinfo = classinfo_stack.pop()
# Try to detect missing virtual destructor declarations.
# For now, only warn if a non-derived class with virtual methods lacks
# a virtual destructor. This is to make it less likely that people will
# declare derived virtual destructors without declaring the base
# destructor virtual.
if ((classinfo.virtual_method_linenumber is not None) and
(not classinfo.has_virtual_destructor) and
(not classinfo.is_derived)): # Only warn for base classes
error(filename, classinfo.linenum, 'runtime/virtual', 4,
'The class %s probably needs a virtual destructor due to '
'having virtual method(s), one declared at line %d.'
% (classinfo.name, classinfo.virtual_method_linenumber))
else:
classinfo.brace_depth = brace_depth
def CheckSpacingForFunctionCall(filename, line, linenum, error):
"""Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
line: The text of the line to check.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|for|while|switch|return|delete)\b', fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'#\s*define|typedef', fncall)):
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and commments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
raw = clean_lines.raw_lines
raw_line = raw[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in xrange(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
elif Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
if not Search(r'\bNOLINT\b', raw_line):
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
def CheckComment(comment, filename, linenum, error):
"""Checks for common mistakes in TODO comments.
Args:
comment: The text of the comment from the line in question.
filename: The name of the current file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
if not username:
error(filename, linenum, 'readability/todo', 2,
'Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable-msg=C6403
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
def CheckSpacing(filename, clean_lines, linenum, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't have too many
blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
raw = clean_lines.raw_lines
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
if IsBlankLine(line):
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if (prevbrace != -1 and prev_line[prevbrace:].find('}') == -1
and prev_line[:prevbrace].find('namespace') == -1):
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the paramters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Blank line at the start of a code block. Is this needed?')
# This doesn't ignore whitespace at the end of a namespace block
# because that is too hard without pairing open/close braces;
# however, a special exception is made for namespace closing
# brackets which have a comment containing "namespace".
#
# Also, ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('namespace') == -1
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Blank line at the end of a code block. Is this needed?')
# Next, we complain if there's a comment too near the text
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
# Comparisons made explicit for clarity -- pylint: disable-msg=C6403
if (line.count('"', 0, commentpos) -
line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes
# Allow one space for new scopes, two spaces otherwise:
if (not Match(r'^\s*{ //', line) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# There should always be a space between the // and the comment
commentend = commentpos + 2
if commentend < len(line) and not line[commentend] == ' ':
# but some lines are exceptions -- e.g. if they're big
# comment delimiters like:
# //----------------------------------------------------------
match = Search(r'[=/-]{4,}\s*$', line[commentend:])
if not match:
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
CheckComment(line[commentpos:], filename, linenum, error)
line = clean_lines.elided[linenum] # get rid of comments and strings
# Don't try to do spacing checks for operator methods
line = re.sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line)
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
# Alas, we can't test < or > because they're legitimately used sans spaces
# (a->b, vector<int> a). The only time we can tell is a < with no >, and
# only if it's not template params list spilling into the next line.
match = Search(r'[^<>=!\s](==|!=|<=|>=)[^<>=!\s]', line)
if not match:
# Note that while it seems that the '<[^<]*' term in the following
# regexp could be simplified to '<.*', which would indeed match
# the same class of strings, the [^<] means that searching for the
# regexp takes linear rather than quadratic time.
if not Search(r'<[^<]*,\s*$', line): # template params spill
match = Search(r'[^<>=!\s](<)[^<>=!\s]([^>]|->)*$', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
# We allow no-spaces around << and >> when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
match = Search(r'[^0-9\s](<<|>>)[^0-9\s]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
# A pet peeve of mine: no spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" is allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4))):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if not len(match.group(2)) in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
# You should always have a space after a comma (either as fn arg or operator)
if Search(r',[^\s]', line):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# Next we will look for issues with function calls.
CheckSpacingForFunctionCall(filename, line, linenum, error)
# Except after an opening paren, you should have spaces before your braces.
# And since you should never have braces at the beginning of a line, this is
# an easy test.
if Search(r'[^ (]{', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []' or 'new char * []'.
if Search(r'\w\s+\[', line) and not Search(r'delete\s+\[', line):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use { } instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use { } instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use { } instead.')
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone
# is using braces in a block to explicitly create a new scope,
# which is commonly used to control the lifetime of
# stack-allocated variables. We don't detect this perfectly: we
# just don't complain if the last non-whitespace character on the
# previous non-blank line is ';', ':', '{', or '}'.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if not Search(r'[;:}{]\s*$', prevline):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\s*', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
if Search(r'}\s*else if([^{]*)$', line): # could be multi-line if
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
if endline[endpos:].find('{') == -1: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
else: # common case: else not followed by a multi-line if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Braces shouldn't be followed by a ; unless they're defining a struct
# or initializing an array.
# We can't tell in general, but we can for some common cases.
prevlinenum = linenum
while True:
(prevline, prevlinenum) = GetPreviousNonBlankLine(clean_lines, prevlinenum)
if Match(r'\s+{.*}\s*;', line) and not prevline.count(';'):
line = prevline + line
else:
break
if (Search(r'{.*}\s*;', line) and
line.count('{') == line.count('}') and
not Search(r'struct|class|enum|\s*=\s*{', line)):
error(filename, linenum, 'readability/braces', 4,
"You don't need a ; after a }")
def ReplaceableCheck(operator, macro, line):
"""Determine whether a basic CHECK can be replaced with a more specific one.
For example suggest using CHECK_EQ instead of CHECK(a == b) and
similarly for CHECK_GE, CHECK_GT, CHECK_LE, CHECK_LT, CHECK_NE.
Args:
operator: The C++ operator used in the CHECK.
macro: The CHECK or EXPECT macro being called.
line: The current source line.
Returns:
True if the CHECK can be replaced with a more specific one.
"""
# This matches decimal and hex integers, strings, and chars (in that order).
match_constant = r'([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')'
# Expression to match two sides of the operator with something that
# looks like a literal, since CHECK(x == iterator) won't compile.
# This means we can't catch all the cases where a more specific
# CHECK is possible, but it's less annoying than dealing with
# extraneous warnings.
match_this = (r'\s*' + macro + r'\((\s*' +
match_constant + r'\s*' + operator + r'[^<>].*|'
r'.*[^<>]' + operator + r'\s*' + match_constant +
r'\s*\))')
# Don't complain about CHECK(x == NULL) or similar because
# CHECK_EQ(x, NULL) won't compile (requires a cast).
# Also, don't complain about more complex boolean expressions
# involving && or || such as CHECK(a == b || c == d).
return Match(match_this, line) and not Search(r'NULL|&&|\|\|', line)
def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
raw_lines = clean_lines.raw_lines
current_macro = ''
for macro in _CHECK_MACROS:
if raw_lines[linenum].find(macro) >= 0:
current_macro = macro
break
if not current_macro:
# Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT'
return
line = clean_lines.elided[linenum] # get rid of comments and strings
# Encourage replacing plain CHECKs with CHECK_EQ/CHECK_NE/etc.
for operator in ['==', '!=', '>=', '>', '<=', '<']:
if ReplaceableCheck(operator, current_macro, line):
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[current_macro][operator],
current_macro, operator))
break
def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for c in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(c) in ('W', 'F'):
width += 2
elif not unicodedata.combining(c):
width += 1
return width
else:
return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
error: The function to call with any errors found.
"""
raw_lines = clean_lines.raw_lines
line = raw_lines[linenum]
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# There are certain situations we allow one space, notably for labels
elif ((initial_spaces == 1 or initial_spaces == 3) and
not Match(r'\s*\w+\s*:\s*$', cleansed_line)):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
# Labels should always be indented at least one space.
elif not initial_spaces and line[:2] != '//' and Search(r'[^:]:\s*$',
line):
error(filename, linenum, 'whitespace/labels', 4,
'Labels should always be indented at least one space. '
'If this is a member-initializer list in a constructor, '
'the colon should be on the line after the definition header.')
# Check if the line is a header guard.
is_header_guard = False
if file_extension == 'h':
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line)):
line_width = GetLineWidth(line)
if line_width > 120:
error(filename, linenum, 'whitespace/line_length', 4,
'Lines should very rarely be longer than 120 characters')
if (cleansed_line.count(';') > 1 and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 4,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckSpacing(filename, clean_lines, linenum, error)
CheckCheck(filename, clean_lines, linenum, error)
_RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"')
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _IsTestFilename(filename):
"""Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise.
"""
if (filename.endswith('_test.cc') or
filename.endswith('_unittest.cc') or
filename.endswith('_regtest.cc')):
return True
else:
return False
def _ClassifyInclude(fileinfo, include, is_system):
"""Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
is_system: True if the #include used <> rather than "".
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
... 'bar/foo_other_ext.h', False)
_POSSIBLE_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
is_stl_h = include in _STL_HEADERS
is_cpp_h = is_stl_h or include in _CPP_HEADERS
if is_system:
if is_cpp_h:
return _CPP_SYS_HEADER
else:
return _C_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
# lives in . , then it's likely to be owned by the target file.
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
if target_base == include_base and (
include_dir == target_dir or
include_dir == os.path.normpath(target_dir + '/../public')):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
# component, it's possible the target is implementing the
# include, so it's allowed to be first, but we'll never
# complain if it's not there.
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and
target_first_component.group(0) ==
include_first_component.group(0)):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
def CheckLanguage(filename, clean_lines, linenum, file_extension, include_state,
error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
# get rid of comments
comment_elided_line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
if _RE_PATTERN_INCLUDE_NEW_STYLE.search(comment_elided_line):
error(filename, linenum, 'build/include', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(comment_elided_line)
if match:
include = match.group(2)
is_system = (match.group(1) == '<')
if include in include_state:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, include_state[include]))
else:
include_state[include] = linenum
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, is_system))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
# Create an extended_line, which is the concatenation of the current and
# next lines, for more effective checking of code that may span more than one
# line.
if linenum + 1 < clean_lines.NumLines():
extended_line = line + clean_lines.elided[linenum + 1]
else:
extended_line = line
# Make Windows paths like Unix.
fullname = os.path.abspath(filename).replace('\\', '/')
# TODO(unknown): figure out if they're using default arguments in fn proto.
# Look for any of the stream classes that are part of standard C++.
match = _RE_PATTERN_INCLUDE.match(line)
if match:
include = match.group(2)
if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
# Many unit tests use cout, so we exempt them.
if not _IsTestFilename(filename):
error(filename, linenum, 'readability/streams', 3,
'Streams are highly discouraged.')
# Check for non-const references in functions. This is tricky because &
# is also used to take the address of something. We allow <> for templates,
# (ignoring whatever is between the braces) and : for classes.
# These are complicated re's. They try to capture the following:
# paren (for fn-prototype start), typename, &, varname. For the const
# version, we're willing for const to be before typename or after
# Don't check the implemention on same line.
fnline = line.split('{', 1)[0]
if (len(re.findall(r'\([^()]*\b(?:[\w:]|<[^()]*>)+(\s?&|&\s?)\w+', fnline)) >
len(re.findall(r'\([^()]*\bconst\s+(?:typename\s+)?(?:struct\s+)?'
r'(?:[\w:]|<[^()]*>)+(\s?&|&\s?)\w+', fnline)) +
len(re.findall(r'\([^()]*\b(?:[\w:]|<[^()]*>)+\s+const(\s?&|&\s?)[\w]+',
fnline))):
# We allow non-const references in a few standard places, like functions
# called "swap()" or iostream operators like "<<" or ">>".
if not Search(
r'(swap|Swap|operator[<>][<>])\s*\(\s*(?:[\w:]|<.*>)+\s*&',
fnline):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
'If so, make const or use a pointer.')
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
match = Search(
r'\b(int|float|double|bool|char|int32|uint32|int64|uint64)\([^)]', line)
if match:
# gMock methods are defined using some variant of MOCK_METHODx(name, type)
# where type may be float(), int(string), etc. Without context they are
# virtually indistinguishable from int(x) casts.
if not Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
match.group(1))
CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)',
error)
# This doesn't catch all cases. Consider (const char * const)"hello".
CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
if Search(
r'(&\([^)]+\)[\w(])|(&(static|dynamic|reinterpret)_cast\b)', line):
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access.
match = Match(
r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
line)
# Make sure it's not a function.
# Function template specialization looks like: "string foo<Type>(...".
# Class template definitions look like: "string Foo<Type>::Method(...".
if match and not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)',
match.group(3)):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string instead: '
'"%schar %s[]".' %
(match.group(1), match.group(2)))
# Check that we're not using RTTI outside of testing code.
if Search(r'\bdynamic_cast<', line) and not _IsTestFilename(filename):
error(filename, linenum, 'runtime/rtti', 5,
'Do not use dynamic_cast<>. If you need to cast within a class '
"hierarchy, use static_cast<> to upcast. Google doesn't support "
'RTTI.')
if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
if file_extension == 'h':
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match:
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\b', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\b', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
if Search(r'\bsscanf\b', line):
error(filename, linenum, 'runtime/printf', 1,
'sscanf can be ok, but is slow and can overflow buffers.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
match = re.search(r'\b((?:string)?printf)\s*\(([\w.\->()]+)\)', line, re.I)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (match.group(1), match.group(2)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
error(filename, linenum, 'build/namespaces', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token becasue we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or
# DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing
# in the class declaration.
match = Match(
(r'\s*'
r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'
r'\(.*\);$'),
line)
if match and linenum + 1 < clean_lines.NumLines():
next_line = clean_lines.elided[linenum + 1]
if not Search(r'^\s*};', next_line):
error(filename, linenum, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension == 'h'
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern,
error):
"""Checks for a C-style cast by looking for the pattern.
This also handles sizeof(type) warnings, due to similarity of content.
Args:
filename: The name of the current file.
linenum: The number of the line to check.
line: The line of code to check.
raw_line: The raw line of code to check, with comments.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast or static_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
"""
match = Search(pattern, line)
if not match:
return
# e.g., sizeof(int)
sizeof_match = Match(r'.*sizeof\s*$', line[0:match.start(1) - 1])
if sizeof_match:
error(filename, linenum, 'runtime/sizeof', 1,
'Using sizeof(type). Use sizeof(varname) instead if possible')
return
remainder = line[match.end(0):]
# The close paren is for function pointers as arguments to a function.
# eg, void foo(void (*bar)(int));
# The semicolon check is a more basic function check; also possibly a
# function pointer typedef.
# eg, void foo(int); or void foo(int) const;
# The equals check is for function pointer assignment.
# eg, void *(*foo)(int) = ...
#
# Right now, this will only catch cases where there's a single argument, and
# it's unnamed. It should probably be expanded to check for multiple
# arguments with some unnamed.
function_match = Match(r'\s*(\)|=|(const)?\s*(;|\{|throw\(\)))', remainder)
if function_match:
if (not function_match.group(3) or
function_match.group(3) == ';' or
raw_line.find('/*') < 0):
error(filename, linenum, 'readability/function', 3,
'All parameters should be named in a function')
return
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('map', 'multimap',)),
('<memory>', ('allocator',)),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('set', 'multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_HEADERS_ACCEPTED_BUT_NOT_PROMOTED = {
# We can trust with reasonable confidence that map gives us pair<>, too.
'pair<>': ('map', 'multimap', 'hash_map', 'hash_multimap')
}
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_algorithm_header = []
for _template in ('copy', 'min_element', 'sort', 'swap',
'transform'):
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# type::max().
_re_pattern_algorithm_header.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
'<algorithm>'))
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
We only check headers. We do not check inside cc-files. .cc files should be
able to depend on their respective header files for includes. However, there
is no simple way of producing this logic here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
"""
if filename.endswith('.cc'):
return
required = {} # A map of header name to linenumber and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for linenum in xrange(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
if _RE_PATTERN_STRING.search(line):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_algorithm_header:
if pattern.search(line):
required[header] = (linenum, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
if pattern.search(line):
required[header] = (linenum, template)
# All the lines have been processed, report the errors found.
for required_header_unstripped in required:
template = required[required_header_unstripped][1]
if template in _HEADERS_ACCEPTED_BUT_NOT_PROMOTED:
headers = _HEADERS_ACCEPTED_BUT_NOT_PROMOTED[template]
if [True for header in headers if header in include_state]:
continue
if required_header_unstripped.strip('<>"') not in include_state:
error(filename, required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
def ProcessLine(filename, file_extension,
clean_lines, line, include_state, function_state,
class_state, error):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
class_state: A _ClassState instance which maintains information about
the current stack of nested class declarations being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
raw_lines = clean_lines.raw_lines
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
if Search(r'\bNOLINT\b', raw_lines[line]): # ignore nolint lines
return
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
error)
CheckForNonStandardConstructs(filename, clean_lines, line,
class_state, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
def ProcessFileData(filename, file_extension, lines, error):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is termined with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
class_state = _ClassState()
CheckForCopyright(filename, lines, error)
if file_extension == 'h':
CheckForHeaderGuard(filename, lines, error)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
for line in xrange(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, class_state, error)
class_state.CheckFinished(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForUnicodeReplacementCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
def ProcessFile(filename, vlevel):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
"""
_SetVerboseLevel(vlevel)
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below. If it is not expected to be present (i.e. os.linesep !=
# '\r\n' as in Windows), a warning is issued below if this file
# is processed.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
carriage_return_found = False
# Remove trailing '\r'.
for linenum in range(len(lines)):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
carriage_return_found = True
except IOError:
sys.stderr.write(
"Skipping input '%s': Can't open for reading\n" % filename)
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if (filename != '-' and file_extension != 'cc' and file_extension != 'h'
and file_extension != 'cpp'):
sys.stderr.write('Ignoring %s; not a .cc or .h file\n' % filename)
else:
ProcessFileData(filename, file_extension, lines, Error)
if carriage_return_found and os.linesep != '\r\n':
# Use 0 for linenum since outputing only one error for potentially
# several lines.
Error(filename, 0, 'whitespace/newline', 1,
'One or more unexpected \\r (^M) found;'
'better to use only a \\n')
sys.stderr.write('Done processing %s\n' % filename)
def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE)
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(1)
def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(_ERROR_CATEGORIES)
sys.exit(0)
def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'filter='])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
elif opt == '--output':
if not val in ('emacs', 'vs7'):
PrintUsage('The only allowed output formats are emacs and vs7.')
output_format = val
elif opt == '--verbose':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
if not filenames:
PrintUsage('No files were specified.')
_SetOutputFormat(output_format)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
return filenames
def main():
filenames = ParseArguments(sys.argv[1:])
# Change stderr to write with replacement characters so we don't die
# if we try to print something containing non-ASCII characters.
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
_cpplint_state.ResetErrorCount()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
sys.stderr.write('Total errors found: %d\n' % _cpplint_state.error_count)
sys.exit(_cpplint_state.error_count > 0)
if __name__ == '__main__':
main()
| 39.068429 | 86 | 0.657548 |
4a1e828ab5230fd2480dd7bec589d584ff840868
| 318 |
py
|
Python
|
F_Machine_learning/1_Unsupervised-Learning/solutions/ex3_2.py
|
oercompbiomed/CBM101
|
20010dcb99fbf218c4789eb5918dcff8ceb94898
|
[
"MIT"
] | 7 |
2019-07-03T07:41:55.000Z
|
2022-02-06T20:25:37.000Z
|
F_Machine_learning/1_Unsupervised-Learning/solutions/ex3_2.py
|
oercompbiomed/CBM101
|
20010dcb99fbf218c4789eb5918dcff8ceb94898
|
[
"MIT"
] | 9 |
2019-03-14T15:15:09.000Z
|
2019-08-01T14:18:21.000Z
|
F_Machine_learning/1_Unsupervised-Learning/solutions/ex3_2.py
|
oercompbiomed/CBM101
|
20010dcb99fbf218c4789eb5918dcff8ceb94898
|
[
"MIT"
] | 11 |
2019-03-12T10:43:11.000Z
|
2021-10-05T12:15:00.000Z
|
# mixing matrix
np.random.seed(42)
A = np.random.randn(4,3)
print(A.T)
# apply transformation
X = S @ A.T
# EXPLANATION:
# each column in A.T defines one of the linear combinations (4 in total)
# the rows define how much to add from each source.
# E.g. the first combination will be (0.5*im1 - 0.14*im2 + 0.65*im3)
| 24.461538 | 72 | 0.694969 |
4a1e82b15efbd107c90105878666644b04fe5e67
| 3,844 |
py
|
Python
|
twisted/internet/test/_win32ifaces.py
|
ioggstream/twisted
|
34f9b1e3f097685839000c656332c66ee85be5d8
|
[
"Unlicense",
"MIT"
] | 3 |
2016-02-01T02:29:51.000Z
|
2020-09-04T17:19:24.000Z
|
twisted/internet/test/_win32ifaces.py
|
ioggstream/twisted
|
34f9b1e3f097685839000c656332c66ee85be5d8
|
[
"Unlicense",
"MIT"
] | 4 |
2017-02-19T23:58:13.000Z
|
2019-11-01T15:31:22.000Z
|
twisted/internet/test/_win32ifaces.py
|
ioggstream/twisted
|
34f9b1e3f097685839000c656332c66ee85be5d8
|
[
"Unlicense",
"MIT"
] | 6 |
2017-02-13T09:11:02.000Z
|
2021-06-29T11:22:18.000Z
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Windows implementation of local network interface enumeration.
"""
from socket import socket, AF_INET6, SOCK_STREAM
from ctypes import (
WinDLL, byref, create_string_buffer, c_int, c_void_p,
POINTER, Structure, cast, string_at)
WS2_32 = WinDLL('ws2_32')
SOCKET = c_int
DWORD = c_int
LPVOID = c_void_p
LPSOCKADDR = c_void_p
LPWSAPROTOCOL_INFO = c_void_p
LPTSTR = c_void_p
LPDWORD = c_void_p
LPWSAOVERLAPPED = c_void_p
LPWSAOVERLAPPED_COMPLETION_ROUTINE = c_void_p
# http://msdn.microsoft.com/en-us/library/ms741621(v=VS.85).aspx
# int WSAIoctl(
# __in SOCKET s,
# __in DWORD dwIoControlCode,
# __in LPVOID lpvInBuffer,
# __in DWORD cbInBuffer,
# __out LPVOID lpvOutBuffer,
# __in DWORD cbOutBuffer,
# __out LPDWORD lpcbBytesReturned,
# __in LPWSAOVERLAPPED lpOverlapped,
# __in LPWSAOVERLAPPED_COMPLETION_ROUTINE lpCompletionRoutine
# );
WSAIoctl = WS2_32.WSAIoctl
WSAIoctl.argtypes = [
SOCKET, DWORD, LPVOID, DWORD, LPVOID, DWORD, LPDWORD,
LPWSAOVERLAPPED, LPWSAOVERLAPPED_COMPLETION_ROUTINE]
WSAIoctl.restype = c_int
# http://msdn.microsoft.com/en-us/library/ms741516(VS.85).aspx
# INT WSAAPI WSAAddressToString(
# __in LPSOCKADDR lpsaAddress,
# __in DWORD dwAddressLength,
# __in_opt LPWSAPROTOCOL_INFO lpProtocolInfo,
# __inout LPTSTR lpszAddressString,
# __inout LPDWORD lpdwAddressStringLength
# );
WSAAddressToString = WS2_32.WSAAddressToStringA
WSAAddressToString.argtypes = [
LPSOCKADDR, DWORD, LPWSAPROTOCOL_INFO, LPTSTR, LPDWORD]
WSAAddressToString.restype = c_int
SIO_ADDRESS_LIST_QUERY = 0x48000016
WSAEFAULT = 10014
class SOCKET_ADDRESS(Structure):
_fields_ = [('lpSockaddr', c_void_p),
('iSockaddrLength', c_int)]
def make_SAL(ln):
class SOCKET_ADDRESS_LIST(Structure):
_fields_ = [('iAddressCount', c_int),
('Address', SOCKET_ADDRESS * ln)]
return SOCKET_ADDRESS_LIST
def win32GetLinkLocalIPv6Addresses():
"""
Return a list of strings in colon-hex format representing all the link local
IPv6 addresses available on the system, as reported by
I{WSAIoctl}/C{SIO_ADDRESS_LIST_QUERY}.
"""
s = socket(AF_INET6, SOCK_STREAM)
size = 4096
retBytes = c_int()
for i in range(2):
buf = create_string_buffer(size)
ret = WSAIoctl(
s.fileno(),
SIO_ADDRESS_LIST_QUERY, 0, 0, buf, size, byref(retBytes), 0, 0)
# WSAIoctl might fail with WSAEFAULT, which means there was not enough
# space in the buffer we gave it. There's no way to check the errno
# until Python 2.6, so we don't even try. :/ Maybe if retBytes is still
# 0 another error happened, though.
if ret and retBytes.value:
size = retBytes.value
else:
break
# If it failed, then we'll just have to give up. Still no way to see why.
if ret:
raise RuntimeError("WSAIoctl failure")
addrList = cast(buf, POINTER(make_SAL(0)))
addrCount = addrList[0].iAddressCount
addrList = cast(buf, POINTER(make_SAL(addrCount)))
addressStringBufLength = 1024
addressStringBuf = create_string_buffer(addressStringBufLength)
retList = []
for i in range(addrList[0].iAddressCount):
retBytes.value = addressStringBufLength
addr = addrList[0].Address[i]
ret = WSAAddressToString(
addr.lpSockaddr, addr.iSockaddrLength, 0, addressStringBuf,
byref(retBytes))
if ret:
raise RuntimeError("WSAAddressToString failure")
retList.append(string_at(addressStringBuf))
return [addr for addr in retList if '%' in addr]
| 32.033333 | 80 | 0.680021 |
4a1e835fee585f5f1158323e9a9ab99c70017a29
| 826 |
py
|
Python
|
src/bricks.py
|
PandelisZ/PyBreakout
|
49b1e296b58689453dae12b4ecc66c1682d525c0
|
[
"MIT"
] | null | null | null |
src/bricks.py
|
PandelisZ/PyBreakout
|
49b1e296b58689453dae12b4ecc66c1682d525c0
|
[
"MIT"
] | null | null | null |
src/bricks.py
|
PandelisZ/PyBreakout
|
49b1e296b58689453dae12b4ecc66c1682d525c0
|
[
"MIT"
] | null | null | null |
import pygame
import constants
class Brick:
def __init__(self, screen):
self.positionX = 0
self.positionY = 0
self.screen = screen
self.sizeX = 66
self.sizeY = 18
self.colourArr = [constants.colourRed,constants.colourPink,constants.colourOrange,constants.colourGreen,constants.colourYellow,constants.colourSpecial]
#Positioning
def setPosX(self, x):
self.positionX = x
def setPosY(self, y):
self.positionY = y
def getPosX(self):
return self.positionX
def getPosY(self):
return self.positionY
def getPosition(self):
return (self.positionX, self.positionY)
#/End Positioning
def draw(self,c):
pygame.draw.rect(self.screen, self.colourArr[c], [self.positionX, self.positionY,self.sizeX,self.sizeY])
| 26.645161 | 159 | 0.66707 |
4a1e836729714357c225a44fa6b0ef4b493bcf70
| 524 |
py
|
Python
|
py/07.py
|
fenglyu/projecteuler
|
f12746e20fd562e03d3a0e5ff6f4e944cb435aa1
|
[
"MIT"
] | null | null | null |
py/07.py
|
fenglyu/projecteuler
|
f12746e20fd562e03d3a0e5ff6f4e944cb435aa1
|
[
"MIT"
] | null | null | null |
py/07.py
|
fenglyu/projecteuler
|
f12746e20fd562e03d3a0e5ff6f4e944cb435aa1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
def prime(n):
i = 2
flag = True
while i*i <= n:
if n%i != 0:
i += 1
else:
flag = False
# i += 1
break
return flag
def index(m):
i = 2
count = 0
while True:
if prime(i):
count += 1
# print(i, count)
if count >= m:
break;
i += 1
print((i, count))
if __name__ == "__main__":
# print(prime(113))
# print(prime(122))
index(10001)
| 16.375 | 28 | 0.402672 |
4a1e83d51b1345997baa3a1056c62bd3b3998093
| 1,330 |
py
|
Python
|
novice/sql/sqlitemagic.py
|
marianekka/DEPRECATED-bc
|
55f5cec89376faaf4b12ab572c930f0a05ab8dd5
|
[
"CC-BY-3.0"
] | 34 |
2016-08-05T07:36:28.000Z
|
2022-01-30T20:08:55.000Z
|
novice/sql/sqlitemagic.py
|
UW-Madison-ACI/2014-08-25-wisc
|
35a4ab08b73634d7d11267ec2bb4685d381efce6
|
[
"CC-BY-3.0"
] | 2 |
2016-08-04T10:54:52.000Z
|
2016-08-04T10:55:12.000Z
|
novice/sql/sqlitemagic.py
|
UW-Madison-ACI/2014-08-25-wisc
|
35a4ab08b73634d7d11267ec2bb4685d381efce6
|
[
"CC-BY-3.0"
] | 22 |
2016-08-01T16:48:48.000Z
|
2022-02-24T22:42:36.000Z
|
"""sqlitemagic provices a simple magic for interacting with SQLite
databases stored on disk.
Usage:
%%sqlite filename.db
select personal, family from person;
produces:
Alan|Turing
Grace|Hopper
"""
# This file is copyright 2013 by Greg Wilson: see
# https://github.com/gvwilson/sqlitemagic/blob/master/LICENSE
# for the license.
# Inspired by https://github.com/tkf/ipython-sqlitemagic.
import sqlite3
from IPython.core.magic import Magics, magics_class, cell_magic
from IPython.display import display, HTML
@magics_class
class SqliteMagic(Magics):
'''Provide the 'sqlite' calling point.'''
@cell_magic
def sqlite(self, filename, query):
connection = sqlite3.connect(filename)
cursor = connection.cursor()
try:
cursor.execute(query)
results = cursor.fetchall()
display(HTML(self.tablify(results)))
except Exception, e:
import sys
print >> sys.stderr, "exception", e
cursor.close()
connection.close()
def tablify(self, rows):
return '<table>\n' + '\n'.join(self.rowify(r) for r in rows) + '\n</table>'
def rowify(self, row):
return '<tr>' + ''.join('<td>' + str(r) + '</td>' for r in row) + '</tr>'
def load_ipython_extension(ipython):
ipython.register_magics(SqliteMagic)
| 26.6 | 83 | 0.658647 |
4a1e8403d382b3f3d0206118db6e786f11911678
| 988 |
py
|
Python
|
supriya/ugens/PV_JensenAndersen.py
|
deeuu/supriya
|
14fcb5316eccb4dafbe498932ceff56e1abb9d27
|
[
"MIT"
] | null | null | null |
supriya/ugens/PV_JensenAndersen.py
|
deeuu/supriya
|
14fcb5316eccb4dafbe498932ceff56e1abb9d27
|
[
"MIT"
] | null | null | null |
supriya/ugens/PV_JensenAndersen.py
|
deeuu/supriya
|
14fcb5316eccb4dafbe498932ceff56e1abb9d27
|
[
"MIT"
] | null | null | null |
import collections
from supriya.ugens.PV_ChainUGen import PV_ChainUGen
class PV_JensenAndersen(PV_ChainUGen):
"""
A FFT feature detector for onset detection.
::
>>> pv_chain = supriya.ugens.FFT(
... source=supriya.ugens.WhiteNoise.ar(),
... )
>>> pv_jensen_andersen = supriya.ugens.PV_JensenAndersen.new(
... pv_chain=pv_chain,
... prophfc=0.25,
... prophfe=0.25,
... propsc=0.25,
... propsf=0.25,
... threshold=1,
... waittime=0.04,
... )
>>> pv_jensen_andersen
PV_JensenAndersen.kr()
"""
### CLASS VARIABLES ###
_ordered_input_names = collections.OrderedDict(
[
("pv_chain", None),
("propsc", 0.25),
("prophfe", 0.25),
("prophfc", 0.25),
("propsf", 0.25),
("threshold", 1),
("waittime", 0.04),
]
)
| 23.52381 | 69 | 0.480769 |
4a1e86d8de024e6faf3f8b41905a7f8e04401500
| 63,119 |
py
|
Python
|
PythonNetwork/venv/Lib/site-packages/scipy/stats/tests/test_morestats.py
|
Moldovandreii/RepetitionCount
|
b5715b0948b609fde0ce05d45023b7d4cfd635e7
|
[
"FTL"
] | 6,989 |
2017-07-18T06:23:18.000Z
|
2022-03-31T15:58:36.000Z
|
PythonNetwork/venv/Lib/site-packages/scipy/stats/tests/test_morestats.py
|
Moldovandreii/RepetitionCount
|
b5715b0948b609fde0ce05d45023b7d4cfd635e7
|
[
"FTL"
] | 1,978 |
2017-07-18T09:17:58.000Z
|
2022-03-31T14:28:43.000Z
|
PythonNetwork/venv/Lib/site-packages/scipy/stats/tests/test_morestats.py
|
Moldovandreii/RepetitionCount
|
b5715b0948b609fde0ce05d45023b7d4cfd635e7
|
[
"FTL"
] | 1,228 |
2017-07-18T09:03:13.000Z
|
2022-03-29T05:57:40.000Z
|
# Author: Travis Oliphant, 2002
#
# Further enhancements and tests added by numerous SciPy developers.
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.random import RandomState
from numpy.testing import (assert_array_equal,
assert_almost_equal, assert_array_less, assert_array_almost_equal,
assert_, assert_allclose, assert_equal, assert_warns)
import pytest
from pytest import raises as assert_raises
from scipy._lib._numpy_compat import suppress_warnings
from scipy import stats
from .common_tests import check_named_results
# Matplotlib is not a scipy dependency but is optionally used in probplot, so
# check if it's available
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except Exception:
have_matplotlib = False
# test data gear.dat from NIST for Levene and Bartlett test
# https://www.itl.nist.gov/div898/handbook/eda/section3/eda3581.htm
g1 = [1.006, 0.996, 0.998, 1.000, 0.992, 0.993, 1.002, 0.999, 0.994, 1.000]
g2 = [0.998, 1.006, 1.000, 1.002, 0.997, 0.998, 0.996, 1.000, 1.006, 0.988]
g3 = [0.991, 0.987, 0.997, 0.999, 0.995, 0.994, 1.000, 0.999, 0.996, 0.996]
g4 = [1.005, 1.002, 0.994, 1.000, 0.995, 0.994, 0.998, 0.996, 1.002, 0.996]
g5 = [0.998, 0.998, 0.982, 0.990, 1.002, 0.984, 0.996, 0.993, 0.980, 0.996]
g6 = [1.009, 1.013, 1.009, 0.997, 0.988, 1.002, 0.995, 0.998, 0.981, 0.996]
g7 = [0.990, 1.004, 0.996, 1.001, 0.998, 1.000, 1.018, 1.010, 0.996, 1.002]
g8 = [0.998, 1.000, 1.006, 1.000, 1.002, 0.996, 0.998, 0.996, 1.002, 1.006]
g9 = [1.002, 0.998, 0.996, 0.995, 0.996, 1.004, 1.004, 0.998, 0.999, 0.991]
g10 = [0.991, 0.995, 0.984, 0.994, 0.997, 0.997, 0.991, 0.998, 1.004, 0.997]
class TestBayes_mvs(object):
def test_basic(self):
# Expected values in this test simply taken from the function. For
# some checks regarding correctness of implementation, see review in
# gh-674
data = [6, 9, 12, 7, 8, 8, 13]
mean, var, std = stats.bayes_mvs(data)
assert_almost_equal(mean.statistic, 9.0)
assert_allclose(mean.minmax, (7.1036502226125329, 10.896349777387467),
rtol=1e-14)
assert_almost_equal(var.statistic, 10.0)
assert_allclose(var.minmax, (3.1767242068607087, 24.45910381334018),
rtol=1e-09)
assert_almost_equal(std.statistic, 2.9724954732045084, decimal=14)
assert_allclose(std.minmax, (1.7823367265645145, 4.9456146050146312),
rtol=1e-14)
def test_empty_input(self):
assert_raises(ValueError, stats.bayes_mvs, [])
def test_result_attributes(self):
x = np.arange(15)
attributes = ('statistic', 'minmax')
res = stats.bayes_mvs(x)
for i in res:
check_named_results(i, attributes)
class TestMvsdist(object):
def test_basic(self):
data = [6, 9, 12, 7, 8, 8, 13]
mean, var, std = stats.mvsdist(data)
assert_almost_equal(mean.mean(), 9.0)
assert_allclose(mean.interval(0.9), (7.1036502226125329,
10.896349777387467), rtol=1e-14)
assert_almost_equal(var.mean(), 10.0)
assert_allclose(var.interval(0.9), (3.1767242068607087,
24.45910381334018), rtol=1e-09)
assert_almost_equal(std.mean(), 2.9724954732045084, decimal=14)
assert_allclose(std.interval(0.9), (1.7823367265645145,
4.9456146050146312), rtol=1e-14)
def test_empty_input(self):
assert_raises(ValueError, stats.mvsdist, [])
def test_bad_arg(self):
# Raise ValueError if fewer than two data points are given.
data = [1]
assert_raises(ValueError, stats.mvsdist, data)
def test_warns(self):
# regression test for gh-5270
# make sure there are no spurious divide-by-zero warnings
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
[x.mean() for x in stats.mvsdist([1, 2, 3])]
[x.mean() for x in stats.mvsdist([1, 2, 3, 4, 5])]
class TestShapiro(object):
def test_basic(self):
x1 = [0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46,
4.43, 0.21, 4.75, 0.71, 1.52, 3.24,
0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]
w, pw = stats.shapiro(x1)
assert_almost_equal(w, 0.90047299861907959, 6)
assert_almost_equal(pw, 0.042089745402336121, 6)
x2 = [1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11,
3.48, 1.10, 0.88, -0.51, 1.46, 0.52, 6.20, 1.69,
0.08, 3.67, 2.81, 3.49]
w, pw = stats.shapiro(x2)
assert_almost_equal(w, 0.9590270, 6)
assert_almost_equal(pw, 0.52460, 3)
# Verified against R
np.random.seed(12345678)
x3 = stats.norm.rvs(loc=5, scale=3, size=100)
w, pw = stats.shapiro(x3)
assert_almost_equal(w, 0.9772805571556091, decimal=6)
assert_almost_equal(pw, 0.08144091814756393, decimal=3)
# Extracted from original paper
x4 = [0.139, 0.157, 0.175, 0.256, 0.344, 0.413, 0.503, 0.577, 0.614,
0.655, 0.954, 1.392, 1.557, 1.648, 1.690, 1.994, 2.174, 2.206,
3.245, 3.510, 3.571, 4.354, 4.980, 6.084, 8.351]
W_expected = 0.83467
p_expected = 0.000914
w, pw = stats.shapiro(x4)
assert_almost_equal(w, W_expected, decimal=4)
assert_almost_equal(pw, p_expected, decimal=5)
def test_2d(self):
x1 = [[0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46,
4.43, 0.21, 4.75], [0.71, 1.52, 3.24,
0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]]
w, pw = stats.shapiro(x1)
assert_almost_equal(w, 0.90047299861907959, 6)
assert_almost_equal(pw, 0.042089745402336121, 6)
x2 = [[1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11,
3.48, 1.10], [0.88, -0.51, 1.46, 0.52, 6.20, 1.69,
0.08, 3.67, 2.81, 3.49]]
w, pw = stats.shapiro(x2)
assert_almost_equal(w, 0.9590270, 6)
assert_almost_equal(pw, 0.52460, 3)
def test_empty_input(self):
assert_raises(ValueError, stats.shapiro, [])
assert_raises(ValueError, stats.shapiro, [[], [], []])
def test_not_enough_values(self):
assert_raises(ValueError, stats.shapiro, [1, 2])
assert_raises(ValueError, stats.shapiro, [[], [2]])
def test_bad_arg(self):
# Length of x is less than 3.
x = [1]
assert_raises(ValueError, stats.shapiro, x)
def test_nan_input(self):
x = np.arange(10.)
x[9] = np.nan
w, pw = stats.shapiro(x)
assert_equal(w, np.nan)
assert_almost_equal(pw, 1.0)
class TestAnderson(object):
def test_normal(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A, crit, sig = stats.anderson(x1)
assert_array_less(crit[:-1], A)
A, crit, sig = stats.anderson(x2)
assert_array_less(A, crit[-2:])
v = np.ones(10)
v[0] = 0
A, crit, sig = stats.anderson(v)
# The expected statistic 3.208057 was computed independently of scipy.
# For example, in R:
# > library(nortest)
# > v <- rep(1, 10)
# > v[1] <- 0
# > result <- ad.test(v)
# > result$statistic
# A
# 3.208057
assert_allclose(A, 3.208057)
def test_expon(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A, crit, sig = stats.anderson(x1, 'expon')
assert_array_less(A, crit[-2:])
olderr = np.seterr(all='ignore')
try:
A, crit, sig = stats.anderson(x2, 'expon')
finally:
np.seterr(**olderr)
assert_(A > crit[-1])
def test_gumbel(self):
# Regression test for gh-6306. Before that issue was fixed,
# this case would return a2=inf.
v = np.ones(100)
v[0] = 0.0
a2, crit, sig = stats.anderson(v, 'gumbel')
# A brief reimplementation of the calculation of the statistic.
n = len(v)
xbar, s = stats.gumbel_l.fit(v)
logcdf = stats.gumbel_l.logcdf(v, xbar, s)
logsf = stats.gumbel_l.logsf(v, xbar, s)
i = np.arange(1, n+1)
expected_a2 = -n - np.mean((2*i - 1) * (logcdf + logsf[::-1]))
assert_allclose(a2, expected_a2)
def test_bad_arg(self):
assert_raises(ValueError, stats.anderson, [1], dist='plate_of_shrimp')
def test_result_attributes(self):
rs = RandomState(1234567890)
x = rs.standard_exponential(size=50)
res = stats.anderson(x)
attributes = ('statistic', 'critical_values', 'significance_level')
check_named_results(res, attributes)
def test_gumbel_l(self):
# gh-2592, gh-6337
# Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist.
rs = RandomState(1234567890)
x = rs.gumbel(size=100)
A1, crit1, sig1 = stats.anderson(x, 'gumbel')
A2, crit2, sig2 = stats.anderson(x, 'gumbel_l')
assert_allclose(A2, A1)
def test_gumbel_r(self):
# gh-2592, gh-6337
# Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist.
rs = RandomState(1234567890)
x1 = rs.gumbel(size=100)
x2 = np.ones(100)
A1, crit1, sig1 = stats.anderson(x1, 'gumbel_r')
A2, crit2, sig2 = stats.anderson(x2, 'gumbel_r')
assert_array_less(A1, crit1[-2:])
assert_(A2 > crit2[-1])
class TestAndersonKSamp(object):
def test_example1a(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False)
assert_almost_equal(Tk, 4.449, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm[0:5], 4)
assert_allclose(p, 0.0021, atol=0.00025)
def test_example1b(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass arrays
t1 = np.array([38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0])
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=True)
assert_almost_equal(Tk, 4.480, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm[0:5], 4)
assert_allclose(p, 0.0020, atol=0.00025)
def test_example2a(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
# Pass lists instead of arrays
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=False)
assert_almost_equal(Tk, 3.288, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm[0:5], 4)
assert_allclose(p, 0.0041, atol=0.00025)
def test_example2b(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=True)
assert_almost_equal(Tk, 3.294, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm[0:5], 4)
assert_allclose(p, 0.0041, atol=0.00025)
def test_R_kSamples(self):
# test values generates with R package kSamples
# package version 1.2-6 (2017-06-14)
# r1 = 1:100
# continuous case (no ties) --> version 1
# res <- kSamples::ad.test(r1, r1 + 40.5)
# res$ad[1, "T.AD"] # 41.105
# res$ad[1, " asympt. P-value"] # 5.8399e-18
#
# discrete case (ties allowed) --> version 2 (here: midrank=True)
# res$ad[2, "T.AD"] # 41.235
#
# res <- kSamples::ad.test(r1, r1 + .5)
# res$ad[1, "T.AD"] # -1.2824
# res$ad[1, " asympt. P-value"] # 1
# res$ad[2, "T.AD"] # -1.2944
#
# res <- kSamples::ad.test(r1, r1 + 7.5)
# res$ad[1, "T.AD"] # 1.4923
# res$ad[1, " asympt. P-value"] # 0.077501
#
# res <- kSamples::ad.test(r1, r1 + 6)
# res$ad[2, "T.AD"] # 0.63892
# res$ad[2, " asympt. P-value"] # 0.17981
#
# res <- kSamples::ad.test(r1, r1 + 11.5)
# res$ad[1, "T.AD"] # 4.5042
# res$ad[1, " asympt. P-value"] # 0.00545
#
# res <- kSamples::ad.test(r1, r1 + 13.5)
# res$ad[1, "T.AD"] # 6.2982
# res$ad[1, " asympt. P-value"] # 0.00118
x1 = np.linspace(1, 100, 100)
# test case: different distributions;p-value floored at 0.001
# test case for issue #5493 / #8536
with suppress_warnings() as sup:
sup.filter(UserWarning, message='p-value floored')
s, _, p = stats.anderson_ksamp([x1, x1 + 40.5], midrank=False)
assert_almost_equal(s, 41.105, 3)
assert_equal(p, 0.001)
with suppress_warnings() as sup:
sup.filter(UserWarning, message='p-value floored')
s, _, p = stats.anderson_ksamp([x1, x1 + 40.5])
assert_almost_equal(s, 41.235, 3)
assert_equal(p, 0.001)
# test case: similar distributions --> p-value capped at 0.25
with suppress_warnings() as sup:
sup.filter(UserWarning, message='p-value capped')
s, _, p = stats.anderson_ksamp([x1, x1 + .5], midrank=False)
assert_almost_equal(s, -1.2824, 4)
assert_equal(p, 0.25)
with suppress_warnings() as sup:
sup.filter(UserWarning, message='p-value capped')
s, _, p = stats.anderson_ksamp([x1, x1 + .5])
assert_almost_equal(s, -1.2944, 4)
assert_equal(p, 0.25)
# test case: check interpolated p-value in [0.01, 0.25] (no ties)
s, _, p = stats.anderson_ksamp([x1, x1 + 7.5], midrank=False)
assert_almost_equal(s, 1.4923, 4)
assert_allclose(p, 0.0775, atol=0.005, rtol=0)
# test case: check interpolated p-value in [0.01, 0.25] (w/ ties)
s, _, p = stats.anderson_ksamp([x1, x1 + 6])
assert_almost_equal(s, 0.6389, 4)
assert_allclose(p, 0.1798, atol=0.005, rtol=0)
# test extended critical values for p=0.001 and p=0.005
s, _, p = stats.anderson_ksamp([x1, x1 + 11.5], midrank=False)
assert_almost_equal(s, 4.5042, 4)
assert_allclose(p, 0.00545, atol=0.0005, rtol=0)
s, _, p = stats.anderson_ksamp([x1, x1 + 13.5], midrank=False)
assert_almost_equal(s, 6.2982, 4)
assert_allclose(p, 0.00118, atol=0.0001, rtol=0)
def test_not_enough_samples(self):
assert_raises(ValueError, stats.anderson_ksamp, np.ones(5))
def test_no_distinct_observations(self):
assert_raises(ValueError, stats.anderson_ksamp,
(np.ones(5), np.ones(5)))
def test_empty_sample(self):
assert_raises(ValueError, stats.anderson_ksamp, (np.ones(5), []))
def test_result_attributes(self):
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
res = stats.anderson_ksamp((t1, t2), midrank=False)
attributes = ('statistic', 'critical_values', 'significance_level')
check_named_results(res, attributes)
class TestAnsari(object):
def test_small(self):
x = [1, 2, 3, 3, 4]
y = [3, 2, 6, 1, 6, 1, 4, 1]
with suppress_warnings() as sup:
sup.filter(UserWarning, "Ties preclude use of exact statistic.")
W, pval = stats.ansari(x, y)
assert_almost_equal(W, 23.5, 11)
assert_almost_equal(pval, 0.13499256881897437, 11)
def test_approx(self):
ramsay = np.array((111, 107, 100, 99, 102, 106, 109, 108, 104, 99,
101, 96, 97, 102, 107, 113, 116, 113, 110, 98))
parekh = np.array((107, 108, 106, 98, 105, 103, 110, 105, 104,
100, 96, 108, 103, 104, 114, 114, 113, 108,
106, 99))
with suppress_warnings() as sup:
sup.filter(UserWarning, "Ties preclude use of exact statistic.")
W, pval = stats.ansari(ramsay, parekh)
assert_almost_equal(W, 185.5, 11)
assert_almost_equal(pval, 0.18145819972867083, 11)
def test_exact(self):
W, pval = stats.ansari([1, 2, 3, 4], [15, 5, 20, 8, 10, 12])
assert_almost_equal(W, 10.0, 11)
assert_almost_equal(pval, 0.533333333333333333, 7)
def test_bad_arg(self):
assert_raises(ValueError, stats.ansari, [], [1])
assert_raises(ValueError, stats.ansari, [1], [])
def test_result_attributes(self):
x = [1, 2, 3, 3, 4]
y = [3, 2, 6, 1, 6, 1, 4, 1]
with suppress_warnings() as sup:
sup.filter(UserWarning, "Ties preclude use of exact statistic.")
res = stats.ansari(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
class TestBartlett(object):
def test_data(self):
# https://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
T, pval = stats.bartlett(*args)
assert_almost_equal(T, 20.78587342806484, 7)
assert_almost_equal(pval, 0.0136358632781, 7)
def test_bad_arg(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.bartlett, [1])
def test_result_attributes(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
res = stats.bartlett(*args)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_empty_arg(self):
args = (g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, [])
assert_equal((np.nan, np.nan), stats.bartlett(*args))
# temporary fix for issue #9252: only accept 1d input
def test_1d_input(self):
x = np.array([[1, 2], [3, 4]])
assert_raises(ValueError, stats.bartlett, g1, x)
class TestLevene(object):
def test_data(self):
# https://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
W, pval = stats.levene(*args)
assert_almost_equal(W, 1.7059176930008939, 7)
assert_almost_equal(pval, 0.0990829755522, 7)
def test_trimmed1(self):
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
W1, pval1 = stats.levene(g1, g2, g3, center='mean')
W2, pval2 = stats.levene(g1, g2, g3, center='trimmed',
proportiontocut=0.0)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
np.random.seed(1234)
x2 = np.random.permutation(x)
# Use center='trimmed'
W0, pval0 = stats.levene(x, y, center='trimmed',
proportiontocut=0.125)
W1, pval1 = stats.levene(x2, y, center='trimmed',
proportiontocut=0.125)
# Trim the data here, and use center='mean'
W2, pval2 = stats.levene(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(W0, W2)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_equal_mean_median(self):
x = np.linspace(-1, 1, 21)
np.random.seed(1234)
x2 = np.random.permutation(x)
y = x**3
W1, pval1 = stats.levene(x, y, center='mean')
W2, pval2 = stats.levene(x2, y, center='median')
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1, 1, 21)
assert_raises(TypeError, stats.levene, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1, 1, 21)
assert_raises(ValueError, stats.levene, x, x, center='trim')
def test_too_few_args(self):
assert_raises(ValueError, stats.levene, [1])
def test_result_attributes(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
res = stats.levene(*args)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
# temporary fix for issue #9252: only accept 1d input
def test_1d_input(self):
x = np.array([[1, 2], [3, 4]])
assert_raises(ValueError, stats.levene, g1, x)
class TestBinomP(object):
def test_data(self):
pval = stats.binom_test(100, 250)
assert_almost_equal(pval, 0.0018833009350757682, 11)
pval = stats.binom_test(201, 405)
assert_almost_equal(pval, 0.92085205962670713, 11)
pval = stats.binom_test([682, 243], p=3.0/4)
assert_almost_equal(pval, 0.38249155957481695, 11)
def test_bad_len_x(self):
# Length of x must be 1 or 2.
assert_raises(ValueError, stats.binom_test, [1, 2, 3])
def test_bad_n(self):
# len(x) is 1, but n is invalid.
# Missing n
assert_raises(ValueError, stats.binom_test, [100])
# n less than x[0]
assert_raises(ValueError, stats.binom_test, [100], n=50)
def test_bad_p(self):
assert_raises(ValueError, stats.binom_test, [50, 50], p=2.0)
def test_alternatives(self):
res = stats.binom_test(51, 235, p=1./6, alternative='less')
assert_almost_equal(res, 0.982022657605858)
res = stats.binom_test(51, 235, p=1./6, alternative='greater')
assert_almost_equal(res, 0.02654424571169085)
res = stats.binom_test(51, 235, p=1./6, alternative='two-sided')
assert_almost_equal(res, 0.0437479701823997)
class TestFligner(object):
def test_data(self):
# numbers from R: fligner.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.fligner(x1, x1**2),
(3.2282229927203536, 0.072379187848207877),
11)
def test_trimmed1(self):
# Perturb input to break ties in the transformed data
# See https://github.com/scipy/scipy/pull/8042 for more details
rs = np.random.RandomState(123)
_perturb = lambda g: (np.asarray(g) + 1e-10*rs.randn(len(g))).tolist()
g1_ = _perturb(g1)
g2_ = _perturb(g2)
g3_ = _perturb(g3)
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
Xsq1, pval1 = stats.fligner(g1_, g2_, g3_, center='mean')
Xsq2, pval2 = stats.fligner(g1_, g2_, g3_, center='trimmed',
proportiontocut=0.0)
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
# Use center='trimmed'
Xsq1, pval1 = stats.fligner(x, y, center='trimmed',
proportiontocut=0.125)
# Trim the data here, and use center='mean'
Xsq2, pval2 = stats.fligner(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
# The following test looks reasonable at first, but fligner() uses the
# function stats.rankdata(), and in one of the cases in this test,
# there are ties, while in the other (because of normal rounding
# errors) there are not. This difference leads to differences in the
# third significant digit of W.
#
#def test_equal_mean_median(self):
# x = np.linspace(-1,1,21)
# y = x**3
# W1, pval1 = stats.fligner(x, y, center='mean')
# W2, pval2 = stats.fligner(x, y, center='median')
# assert_almost_equal(W1, W2)
# assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1, 1, 21)
assert_raises(TypeError, stats.fligner, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1, 1, 21)
assert_raises(ValueError, stats.fligner, x, x, center='trim')
def test_bad_num_args(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.fligner, [1])
def test_empty_arg(self):
x = np.arange(5)
assert_equal((np.nan, np.nan), stats.fligner(x, x**2, []))
class TestMood(object):
def test_mood(self):
# numbers from R: mood.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.mood(x1, x1**2),
(-1.3830857299399906, 0.16663858066771478),
11)
def test_mood_order_of_args(self):
# z should change sign when the order of arguments changes, pvalue
# should not change
np.random.seed(1234)
x1 = np.random.randn(10, 1)
x2 = np.random.randn(15, 1)
z1, p1 = stats.mood(x1, x2)
z2, p2 = stats.mood(x2, x1)
assert_array_almost_equal([z1, p1], [-z2, p2])
def test_mood_with_axis_none(self):
# Test with axis = None, compare with results from R
x1 = [-0.626453810742332, 0.183643324222082, -0.835628612410047,
1.59528080213779, 0.329507771815361, -0.820468384118015,
0.487429052428485, 0.738324705129217, 0.575781351653492,
-0.305388387156356, 1.51178116845085, 0.389843236411431,
-0.621240580541804, -2.2146998871775, 1.12493091814311,
-0.0449336090152309, -0.0161902630989461, 0.943836210685299,
0.821221195098089, 0.593901321217509]
x2 = [-0.896914546624981, 0.184849184646742, 1.58784533120882,
-1.13037567424629, -0.0802517565509893, 0.132420284381094,
0.707954729271733, -0.23969802417184, 1.98447393665293,
-0.138787012119665, 0.417650750792556, 0.981752777463662,
-0.392695355503813, -1.03966897694891, 1.78222896030858,
-2.31106908460517, 0.878604580921265, 0.035806718015226,
1.01282869212708, 0.432265154539617, 2.09081920524915,
-1.19992581964387, 1.58963820029007, 1.95465164222325,
0.00493777682814261, -2.45170638784613, 0.477237302613617,
-0.596558168631403, 0.792203270299649, 0.289636710177348]
x1 = np.array(x1)
x2 = np.array(x2)
x1.shape = (10, 2)
x2.shape = (15, 2)
assert_array_almost_equal(stats.mood(x1, x2, axis=None),
[-1.31716607555, 0.18778296257])
def test_mood_2d(self):
# Test if the results of mood test in 2-D case are consistent with the
# R result for the same inputs. Numbers from R mood.test().
ny = 5
np.random.seed(1234)
x1 = np.random.randn(10, ny)
x2 = np.random.randn(15, ny)
z_vectest, pval_vectest = stats.mood(x1, x2)
for j in range(ny):
assert_array_almost_equal([z_vectest[j], pval_vectest[j]],
stats.mood(x1[:, j], x2[:, j]))
# inverse order of dimensions
x1 = x1.transpose()
x2 = x2.transpose()
z_vectest, pval_vectest = stats.mood(x1, x2, axis=1)
for i in range(ny):
# check axis handling is self consistent
assert_array_almost_equal([z_vectest[i], pval_vectest[i]],
stats.mood(x1[i, :], x2[i, :]))
def test_mood_3d(self):
shape = (10, 5, 6)
np.random.seed(1234)
x1 = np.random.randn(*shape)
x2 = np.random.randn(*shape)
for axis in range(3):
z_vectest, pval_vectest = stats.mood(x1, x2, axis=axis)
# Tests that result for 3-D arrays is equal to that for the
# same calculation on a set of 1-D arrays taken from the
# 3-D array
axes_idx = ([1, 2], [0, 2], [0, 1]) # the two axes != axis
for i in range(shape[axes_idx[axis][0]]):
for j in range(shape[axes_idx[axis][1]]):
if axis == 0:
slice1 = x1[:, i, j]
slice2 = x2[:, i, j]
elif axis == 1:
slice1 = x1[i, :, j]
slice2 = x2[i, :, j]
else:
slice1 = x1[i, j, :]
slice2 = x2[i, j, :]
assert_array_almost_equal([z_vectest[i, j],
pval_vectest[i, j]],
stats.mood(slice1, slice2))
def test_mood_bad_arg(self):
# Raise ValueError when the sum of the lengths of the args is
# less than 3
assert_raises(ValueError, stats.mood, [1], [])
class TestProbplot(object):
def test_basic(self):
np.random.seed(12345)
x = stats.norm.rvs(size=20)
osm, osr = stats.probplot(x, fit=False)
osm_expected = [-1.8241636, -1.38768012, -1.11829229, -0.91222575,
-0.73908135, -0.5857176, -0.44506467, -0.31273668,
-0.18568928, -0.06158146, 0.06158146, 0.18568928,
0.31273668, 0.44506467, 0.5857176, 0.73908135,
0.91222575, 1.11829229, 1.38768012, 1.8241636]
assert_allclose(osr, np.sort(x))
assert_allclose(osm, osm_expected)
res, res_fit = stats.probplot(x, fit=True)
res_fit_expected = [1.05361841, 0.31297795, 0.98741609]
assert_allclose(res_fit, res_fit_expected)
def test_sparams_keyword(self):
np.random.seed(123456)
x = stats.norm.rvs(size=100)
# Check that None, () and 0 (loc=0, for normal distribution) all work
# and give the same results
osm1, osr1 = stats.probplot(x, sparams=None, fit=False)
osm2, osr2 = stats.probplot(x, sparams=0, fit=False)
osm3, osr3 = stats.probplot(x, sparams=(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osm1, osm3)
assert_allclose(osr1, osr2)
assert_allclose(osr1, osr3)
# Check giving (loc, scale) params for normal distribution
osm, osr = stats.probplot(x, sparams=(), fit=False)
def test_dist_keyword(self):
np.random.seed(12345)
x = stats.norm.rvs(size=20)
osm1, osr1 = stats.probplot(x, fit=False, dist='t', sparams=(3,))
osm2, osr2 = stats.probplot(x, fit=False, dist=stats.t, sparams=(3,))
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
assert_raises(ValueError, stats.probplot, x, dist='wrong-dist-name')
assert_raises(AttributeError, stats.probplot, x, dist=[])
class custom_dist(object):
"""Some class that looks just enough like a distribution."""
def ppf(self, q):
return stats.norm.ppf(q, loc=2)
osm1, osr1 = stats.probplot(x, sparams=(2,), fit=False)
osm2, osr2 = stats.probplot(x, dist=custom_dist(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_plot_kwarg(self):
np.random.seed(7654321)
fig = plt.figure()
fig.add_subplot(111)
x = stats.t.rvs(3, size=100)
res1, fitres1 = stats.probplot(x, plot=plt)
plt.close()
res2, fitres2 = stats.probplot(x, plot=None)
res3 = stats.probplot(x, fit=False, plot=plt)
plt.close()
res4 = stats.probplot(x, fit=False, plot=None)
# Check that results are consistent between combinations of `fit` and
# `plot` keywords.
assert_(len(res1) == len(res2) == len(res3) == len(res4) == 2)
assert_allclose(res1, res2)
assert_allclose(res1, res3)
assert_allclose(res1, res4)
assert_allclose(fitres1, fitres2)
# Check that a Matplotlib Axes object is accepted
fig = plt.figure()
ax = fig.add_subplot(111)
stats.probplot(x, fit=False, plot=ax)
plt.close()
def test_probplot_bad_args(self):
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.probplot, [1], dist="plate_of_shrimp")
def test_empty(self):
assert_equal(stats.probplot([], fit=False),
(np.array([]), np.array([])))
assert_equal(stats.probplot([], fit=True),
((np.array([]), np.array([])),
(np.nan, np.nan, 0.0)))
def test_array_of_size_one(self):
with np.errstate(invalid='ignore'):
assert_equal(stats.probplot([1], fit=True),
((np.array([0.]), np.array([1])),
(np.nan, np.nan, 0.0)))
def test_wilcoxon_bad_arg():
# Raise ValueError when two args of different lengths are given or
# zero_method is unknown.
assert_raises(ValueError, stats.wilcoxon, [1], [1, 2])
assert_raises(ValueError, stats.wilcoxon, [1, 2], [1, 2], "dummy")
def test_wilcoxon_arg_type():
# Should be able to accept list as arguments.
# Address issue 6070.
arr = [1, 2, 3, 0, -1, 3, 1, 2, 1, 1, 2]
_ = stats.wilcoxon(arr, zero_method="pratt")
_ = stats.wilcoxon(arr, zero_method="zsplit")
_ = stats.wilcoxon(arr, zero_method="wilcox")
class TestKstat(object):
def test_moments_normal_distribution(self):
np.random.seed(32149)
data = np.random.randn(12345)
moments = []
for n in [1, 2, 3, 4]:
moments.append(stats.kstat(data, n))
expected = [0.011315, 1.017931, 0.05811052, 0.0754134]
assert_allclose(moments, expected, rtol=1e-4)
# test equivalence with `stats.moment`
m1 = stats.moment(data, moment=1)
m2 = stats.moment(data, moment=2)
m3 = stats.moment(data, moment=3)
assert_allclose((m1, m2, m3), expected[:-1], atol=0.02, rtol=1e-2)
def test_empty_input(self):
assert_raises(ValueError, stats.kstat, [])
def test_nan_input(self):
data = np.arange(10.)
data[6] = np.nan
assert_equal(stats.kstat(data), np.nan)
def test_kstat_bad_arg(self):
# Raise ValueError if n > 4 or n < 1.
data = np.arange(10)
for n in [0, 4.001]:
assert_raises(ValueError, stats.kstat, data, n=n)
class TestKstatVar(object):
def test_empty_input(self):
assert_raises(ValueError, stats.kstatvar, [])
def test_nan_input(self):
data = np.arange(10.)
data[6] = np.nan
assert_equal(stats.kstat(data), np.nan)
def test_bad_arg(self):
# Raise ValueError is n is not 1 or 2.
data = [1]
n = 10
assert_raises(ValueError, stats.kstatvar, data, n=n)
class TestPpccPlot(object):
def setup_method(self):
np.random.seed(7654321)
self.x = stats.loggamma.rvs(5, size=500) + 5
def test_basic(self):
N = 5
svals, ppcc = stats.ppcc_plot(self.x, -10, 10, N=N)
ppcc_expected = [0.21139644, 0.21384059, 0.98766719, 0.97980182,
0.93519298]
assert_allclose(svals, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
def test_dist(self):
# Test that we can specify distributions both by name and as objects.
svals1, ppcc1 = stats.ppcc_plot(self.x, -10, 10, dist='tukeylambda')
svals2, ppcc2 = stats.ppcc_plot(self.x, -10, 10,
dist=stats.tukeylambda)
assert_allclose(svals1, svals2, rtol=1e-20)
assert_allclose(ppcc1, ppcc2, rtol=1e-20)
# Test that 'tukeylambda' is the default dist
svals3, ppcc3 = stats.ppcc_plot(self.x, -10, 10)
assert_allclose(svals1, svals3, rtol=1e-20)
assert_allclose(ppcc1, ppcc3, rtol=1e-20)
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
ax = fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=plt)
fig.delaxes(ax)
# Check that a Matplotlib Axes object is accepted
ax = fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `b` has to be larger than `a`
assert_raises(ValueError, stats.ppcc_plot, self.x, 1, 0)
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.ppcc_plot, [1, 2, 3], 0, 1,
dist="plate_of_shrimp")
def test_empty(self):
# For consistency with probplot return for one empty array,
# ppcc contains all zeros and svals is the same as for normal array
# input.
svals, ppcc = stats.ppcc_plot([], 0, 1)
assert_allclose(svals, np.linspace(0, 1, num=80))
assert_allclose(ppcc, np.zeros(80, dtype=float))
class TestPpccMax(object):
def test_ppcc_max_bad_arg(self):
# Raise ValueError when given an invalid distribution.
data = [1]
assert_raises(ValueError, stats.ppcc_max, data, dist="plate_of_shrimp")
def test_ppcc_max_basic(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x), -0.71215366521264145, decimal=5)
def test_dist(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
# Test that we can specify distributions both by name and as objects.
max1 = stats.ppcc_max(x, dist='tukeylambda')
max2 = stats.ppcc_max(x, dist=stats.tukeylambda)
assert_almost_equal(max1, -0.71215366521264145, decimal=5)
assert_almost_equal(max2, -0.71215366521264145, decimal=5)
# Test that 'tukeylambda' is the default dist
max3 = stats.ppcc_max(x)
assert_almost_equal(max3, -0.71215366521264145, decimal=5)
def test_brack(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
assert_raises(ValueError, stats.ppcc_max, x, brack=(0.0, 1.0, 0.5))
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x, brack=(0, 1)),
-0.71215366521264145, decimal=5)
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x, brack=(-2, 2)),
-0.71215366521264145, decimal=5)
class TestBoxcox_llf(object):
def test_basic(self):
np.random.seed(54321)
x = stats.norm.rvs(size=10000, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf_expected = -x.size / 2. * np.log(np.sum(x.std()**2))
assert_allclose(llf, llf_expected)
def test_array_like(self):
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, list(x))
assert_allclose(llf, llf2, rtol=1e-12)
def test_2d_input(self):
# Note: boxcox_llf() was already working with 2-D input (sort of), so
# keep it like that. boxcox() doesn't work with 2-D input though, due
# to brent() returning a scalar.
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T)
assert_allclose([llf, llf], llf2, rtol=1e-12)
def test_empty(self):
assert_(np.isnan(stats.boxcox_llf(1, [])))
class TestBoxcox(object):
def test_fixed_lmbda(self):
np.random.seed(12345)
x = stats.loggamma.rvs(5, size=50) + 5
xt = stats.boxcox(x, lmbda=1)
assert_allclose(xt, x - 1)
xt = stats.boxcox(x, lmbda=-1)
assert_allclose(xt, 1 - 1/x)
xt = stats.boxcox(x, lmbda=0)
assert_allclose(xt, np.log(x))
# Also test that array_like input works
xt = stats.boxcox(list(x), lmbda=0)
assert_allclose(xt, np.log(x))
def test_lmbda_None(self):
np.random.seed(1234567)
# Start from normal rv's, do inverse transform to check that
# optimization function gets close to the right answer.
np.random.seed(1245)
lmbda = 2.5
x = stats.norm.rvs(loc=10, size=50000)
x_inv = (x * lmbda + 1)**(-lmbda)
xt, maxlog = stats.boxcox(x_inv)
assert_almost_equal(maxlog, -1 / lmbda, decimal=2)
def test_alpha(self):
np.random.seed(1234)
x = stats.loggamma.rvs(5, size=50) + 5
# Some regular values for alpha, on a small sample size
_, _, interval = stats.boxcox(x, alpha=0.75)
assert_allclose(interval, [4.004485780226041, 5.138756355035744])
_, _, interval = stats.boxcox(x, alpha=0.05)
assert_allclose(interval, [1.2138178554857557, 8.209033272375663])
# Try some extreme values, see we don't hit the N=500 limit
x = stats.loggamma.rvs(7, size=500) + 15
_, _, interval = stats.boxcox(x, alpha=0.001)
assert_allclose(interval, [0.3988867, 11.40553131])
_, _, interval = stats.boxcox(x, alpha=0.999)
assert_allclose(interval, [5.83316246, 5.83735292])
def test_boxcox_bad_arg(self):
# Raise ValueError if any data value is negative.
x = np.array([-1])
assert_raises(ValueError, stats.boxcox, x)
def test_empty(self):
assert_(stats.boxcox([]).shape == (0,))
class TestBoxcoxNormmax(object):
def setup_method(self):
np.random.seed(12345)
self.x = stats.loggamma.rvs(5, size=50) + 5
def test_pearsonr(self):
maxlog = stats.boxcox_normmax(self.x)
assert_allclose(maxlog, 1.804465, rtol=1e-6)
def test_mle(self):
maxlog = stats.boxcox_normmax(self.x, method='mle')
assert_allclose(maxlog, 1.758101, rtol=1e-6)
# Check that boxcox() uses 'mle'
_, maxlog_boxcox = stats.boxcox(self.x)
assert_allclose(maxlog_boxcox, maxlog)
def test_all(self):
maxlog_all = stats.boxcox_normmax(self.x, method='all')
assert_allclose(maxlog_all, [1.804465, 1.758101], rtol=1e-6)
class TestBoxcoxNormplot(object):
def setup_method(self):
np.random.seed(7654321)
self.x = stats.loggamma.rvs(5, size=500) + 5
def test_basic(self):
N = 5
lmbdas, ppcc = stats.boxcox_normplot(self.x, -10, 10, N=N)
ppcc_expected = [0.57783375, 0.83610988, 0.97524311, 0.99756057,
0.95843297]
assert_allclose(lmbdas, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
ax = fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=plt)
fig.delaxes(ax)
# Check that a Matplotlib Axes object is accepted
ax = fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `lb` has to be larger than `la`
assert_raises(ValueError, stats.boxcox_normplot, self.x, 1, 0)
# `x` can not contain negative values
assert_raises(ValueError, stats.boxcox_normplot, [-1, 1], 0, 1)
def test_empty(self):
assert_(stats.boxcox_normplot([], 0, 1).size == 0)
class TestYeojohnson_llf(object):
def test_array_like(self):
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=0)
lmbda = 1
llf = stats.yeojohnson_llf(lmbda, x)
llf2 = stats.yeojohnson_llf(lmbda, list(x))
assert_allclose(llf, llf2, rtol=1e-12)
def test_2d_input(self):
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.yeojohnson_llf(lmbda, x)
llf2 = stats.yeojohnson_llf(lmbda, np.vstack([x, x]).T)
assert_allclose([llf, llf], llf2, rtol=1e-12)
def test_empty(self):
assert_(np.isnan(stats.yeojohnson_llf(1, [])))
class TestYeojohnson(object):
def test_fixed_lmbda(self):
np.random.seed(12345)
# Test positive input
x = stats.loggamma.rvs(5, size=50) + 5
assert np.all(x > 0)
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt, x)
xt = stats.yeojohnson(x, lmbda=-1)
assert_allclose(xt, 1 - 1 / (x + 1))
xt = stats.yeojohnson(x, lmbda=0)
assert_allclose(xt, np.log(x + 1))
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt, x)
# Test negative input
x = stats.loggamma.rvs(5, size=50) - 5
assert np.all(x < 0)
xt = stats.yeojohnson(x, lmbda=2)
assert_allclose(xt, -np.log(-x + 1))
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt, x)
xt = stats.yeojohnson(x, lmbda=3)
assert_allclose(xt, 1 / (-x + 1) - 1)
# test both positive and negative input
x = stats.loggamma.rvs(5, size=50) - 2
assert not np.all(x < 0)
assert not np.all(x >= 0)
pos = x >= 0
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt[pos], x[pos])
xt = stats.yeojohnson(x, lmbda=-1)
assert_allclose(xt[pos], 1 - 1 / (x[pos] + 1))
xt = stats.yeojohnson(x, lmbda=0)
assert_allclose(xt[pos], np.log(x[pos] + 1))
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt[pos], x[pos])
neg = ~pos
xt = stats.yeojohnson(x, lmbda=2)
assert_allclose(xt[neg], -np.log(-x[neg] + 1))
xt = stats.yeojohnson(x, lmbda=1)
assert_allclose(xt[neg], x[neg])
xt = stats.yeojohnson(x, lmbda=3)
assert_allclose(xt[neg], 1 / (-x[neg] + 1) - 1)
@pytest.mark.parametrize('lmbda', [0, .1, .5, 2])
def test_lmbda_None(self, lmbda):
# Start from normal rv's, do inverse transform to check that
# optimization function gets close to the right answer.
def _inverse_transform(x, lmbda):
x_inv = np.zeros(x.shape, dtype=x.dtype)
pos = x >= 0
# when x >= 0
if abs(lmbda) < np.spacing(1.):
x_inv[pos] = np.exp(x[pos]) - 1
else: # lmbda != 0
x_inv[pos] = np.power(x[pos] * lmbda + 1, 1 / lmbda) - 1
# when x < 0
if abs(lmbda - 2) > np.spacing(1.):
x_inv[~pos] = 1 - np.power(-(2 - lmbda) * x[~pos] + 1,
1 / (2 - lmbda))
else: # lmbda == 2
x_inv[~pos] = 1 - np.exp(-x[~pos])
return x_inv
np.random.seed(1234567)
n_samples = 20000
x = np.random.normal(loc=0, scale=1, size=(n_samples))
x_inv = _inverse_transform(x, lmbda)
xt, maxlog = stats.yeojohnson(x_inv)
assert_allclose(maxlog, lmbda, atol=1e-2)
assert_almost_equal(0, np.linalg.norm(x - xt) / n_samples, decimal=2)
assert_almost_equal(0, xt.mean(), decimal=1)
assert_almost_equal(1, xt.std(), decimal=1)
def test_empty(self):
assert_(stats.yeojohnson([]).shape == (0,))
def test_array_like(self):
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=0)
lmbda = 1.5
xt1, _ = stats.yeojohnson(x)
xt2, _ = stats.yeojohnson(list(x))
assert_allclose(xt1, xt2, rtol=1e-12)
class TestYeojohnsonNormmax(object):
def setup_method(self):
np.random.seed(12345)
self.x = stats.loggamma.rvs(5, size=50) + 5
def test_mle(self):
maxlog = stats.yeojohnson_normmax(self.x)
assert_allclose(maxlog, 1.876393, rtol=1e-6)
def test_darwin_example(self):
# test from original paper "A new family of power transformations to
# improve normality or symmetry" by Yeo and Johnson.
x = [6.1, -8.4, 1.0, 2.0, 0.7, 2.9, 3.5, 5.1, 1.8, 3.6, 7.0, 3.0, 9.3,
7.5, -6.0]
lmbda = stats.yeojohnson_normmax(x)
assert np.allclose(lmbda, 1.305, atol=1e-3)
class TestCircFuncs(object):
def test_circfuncs(self):
x = np.array([355, 5, 2, 359, 10, 350])
M = stats.circmean(x, high=360)
Mval = 0.167690146
assert_allclose(M, Mval, rtol=1e-7)
V = stats.circvar(x, high=360)
Vval = 42.51955609
assert_allclose(V, Vval, rtol=1e-7)
S = stats.circstd(x, high=360)
Sval = 6.520702116
assert_allclose(S, Sval, rtol=1e-7)
def test_circfuncs_small(self):
x = np.array([20, 21, 22, 18, 19, 20.5, 19.2])
M1 = x.mean()
M2 = stats.circmean(x, high=360)
assert_allclose(M2, M1, rtol=1e-5)
V1 = x.var()
V2 = stats.circvar(x, high=360)
assert_allclose(V2, V1, rtol=1e-4)
S1 = x.std()
S2 = stats.circstd(x, high=360)
assert_allclose(S2, S1, rtol=1e-4)
def test_circmean_axis(self):
x = np.array([[355, 5, 2, 359, 10, 350],
[351, 7, 4, 352, 9, 349],
[357, 9, 8, 358, 4, 356]])
M1 = stats.circmean(x, high=360)
M2 = stats.circmean(x.ravel(), high=360)
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=1)
M2 = [stats.circmean(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=0)
M2 = [stats.circmean(x[:, i], high=360) for i in range(x.shape[1])]
assert_allclose(M1, M2, rtol=1e-14)
def test_circvar_axis(self):
x = np.array([[355, 5, 2, 359, 10, 350],
[351, 7, 4, 352, 9, 349],
[357, 9, 8, 358, 4, 356]])
V1 = stats.circvar(x, high=360)
V2 = stats.circvar(x.ravel(), high=360)
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=1)
V2 = [stats.circvar(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=0)
V2 = [stats.circvar(x[:, i], high=360) for i in range(x.shape[1])]
assert_allclose(V1, V2, rtol=1e-11)
def test_circstd_axis(self):
x = np.array([[355, 5, 2, 359, 10, 350],
[351, 7, 4, 352, 9, 349],
[357, 9, 8, 358, 4, 356]])
S1 = stats.circstd(x, high=360)
S2 = stats.circstd(x.ravel(), high=360)
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=1)
S2 = [stats.circstd(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=0)
S2 = [stats.circstd(x[:, i], high=360) for i in range(x.shape[1])]
assert_allclose(S1, S2, rtol=1e-11)
def test_circfuncs_array_like(self):
x = [355, 5, 2, 359, 10, 350]
assert_allclose(stats.circmean(x, high=360), 0.167690146, rtol=1e-7)
assert_allclose(stats.circvar(x, high=360), 42.51955609, rtol=1e-7)
assert_allclose(stats.circstd(x, high=360), 6.520702116, rtol=1e-7)
def test_empty(self):
assert_(np.isnan(stats.circmean([])))
assert_(np.isnan(stats.circstd([])))
assert_(np.isnan(stats.circvar([])))
def test_circmean_scalar(self):
x = 1.
M1 = x
M2 = stats.circmean(x)
assert_allclose(M2, M1, rtol=1e-5)
def test_circmean_range(self):
# regression test for gh-6420: circmean(..., high, low) must be
# between `high` and `low`
m = stats.circmean(np.arange(0, 2, 0.1), np.pi, -np.pi)
assert_(m < np.pi)
assert_(m > -np.pi)
def test_circfuncs_unit8(self):
# regression test for gh-7255: overflow when working with
# numpy uint8 data type
x = np.array([150, 10], dtype='uint8')
assert_equal(stats.circmean(x, high=180), 170.0)
assert_allclose(stats.circvar(x, high=180), 437.45871686, rtol=1e-7)
assert_allclose(stats.circstd(x, high=180), 20.91551378, rtol=1e-7)
def test_accuracy_wilcoxon():
freq = [1, 4, 16, 15, 8, 4, 5, 1, 2]
nums = range(-4, 5)
x = np.concatenate([[u] * v for u, v in zip(nums, freq)])
y = np.zeros(x.size)
T, p = stats.wilcoxon(x, y, "pratt")
assert_allclose(T, 423)
assert_allclose(p, 0.00197547303533107)
T, p = stats.wilcoxon(x, y, "zsplit")
assert_allclose(T, 441)
assert_allclose(p, 0.0032145343172473055)
T, p = stats.wilcoxon(x, y, "wilcox")
assert_allclose(T, 327)
assert_allclose(p, 0.00641346115861)
# Test the 'correction' option, using values computed in R with:
# > wilcox.test(x, y, paired=TRUE, exact=FALSE, correct={FALSE,TRUE})
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
T, p = stats.wilcoxon(x, y, correction=False)
assert_equal(T, 34)
assert_allclose(p, 0.6948866, rtol=1e-6)
T, p = stats.wilcoxon(x, y, correction=True)
assert_equal(T, 34)
assert_allclose(p, 0.7240817, rtol=1e-6)
def test_wilcoxon_result_attributes():
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
res = stats.wilcoxon(x, y, correction=False)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_wilcoxon_tie():
# Regression test for gh-2391.
# Corresponding R code is:
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=FALSE)
# > result$p.value
# [1] 0.001565402
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=TRUE)
# > result$p.value
# [1] 0.001904195
stat, p = stats.wilcoxon([0.1] * 10)
expected_p = 0.001565402
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
stat, p = stats.wilcoxon([0.1] * 10, correction=True)
expected_p = 0.001904195
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
class TestMedianTest(object):
def test_bad_n_samples(self):
# median_test requires at least two samples.
assert_raises(ValueError, stats.median_test, [1, 2, 3])
def test_empty_sample(self):
# Each sample must contain at least one value.
assert_raises(ValueError, stats.median_test, [], [1, 2, 3])
def test_empty_when_ties_ignored(self):
# The grand median is 1, and all values in the first argument are
# equal to the grand median. With ties="ignore", those values are
# ignored, which results in the first sample being (in effect) empty.
# This should raise a ValueError.
assert_raises(ValueError, stats.median_test,
[1, 1, 1, 1], [2, 0, 1], [2, 0], ties="ignore")
def test_empty_contingency_row(self):
# The grand median is 1, and with the default ties="below", all the
# values in the samples are counted as being below the grand median.
# This would result a row of zeros in the contingency table, which is
# an error.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1])
# With ties="above", all the values are counted as above the
# grand median.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1],
ties="above")
def test_bad_ties(self):
assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5],
ties="foo")
def test_bad_nan_policy(self):
assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5], nan_policy='foobar')
def test_bad_keyword(self):
assert_raises(TypeError, stats.median_test, [1, 2, 3], [4, 5],
foo="foo")
def test_simple(self):
x = [1, 2, 3]
y = [1, 2, 3]
stat, p, med, tbl = stats.median_test(x, y)
# The median is floating point, but this equality test should be safe.
assert_equal(med, 2.0)
assert_array_equal(tbl, [[1, 1], [2, 2]])
# The expected values of the contingency table equal the contingency
# table, so the statistic should be 0 and the p-value should be 1.
assert_equal(stat, 0)
assert_equal(p, 1)
def test_ties_options(self):
# Test the contingency table calculation.
x = [1, 2, 3, 4]
y = [5, 6]
z = [7, 8, 9]
# grand median is 5.
# Default 'ties' option is "below".
stat, p, m, tbl = stats.median_test(x, y, z)
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 1, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="ignore")
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 0, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="above")
assert_equal(m, 5)
assert_equal(tbl, [[0, 2, 3], [4, 0, 0]])
def test_nan_policy_options(self):
x = [1, 2, np.nan]
y = [4, 5, 6]
mt1 = stats.median_test(x, y, nan_policy='propagate')
s, p, m, t = stats.median_test(x, y, nan_policy='omit')
assert_equal(mt1, (np.nan, np.nan, np.nan, None))
assert_allclose(s, 0.31250000000000006)
assert_allclose(p, 0.57615012203057869)
assert_equal(m, 4.0)
assert_equal(t, np.array([[0, 2],[2, 1]]))
assert_raises(ValueError, stats.median_test, x, y, nan_policy='raise')
def test_basic(self):
# median_test calls chi2_contingency to compute the test statistic
# and p-value. Make sure it hasn't screwed up the call...
x = [1, 2, 3, 4, 5]
y = [2, 4, 6, 8]
stat, p, m, tbl = stats.median_test(x, y)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, lambda_=0)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, lambda_=0)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, correction=False)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, correction=False)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
| 38.41692 | 92 | 0.576609 |
4a1e88187e8e1ef9f01e4e0865b562d9eeb4b0f6
| 10,597 |
py
|
Python
|
src/data/online_features.py
|
Twitter-Fake/twitter-fake
|
0388630211470baef4f7dfc50614147e458c8df5
|
[
"Apache-2.0"
] | 1 |
2019-11-15T16:58:38.000Z
|
2019-11-15T16:58:38.000Z
|
src/data/online_features.py
|
Twitter-Fake/twitter-fake
|
0388630211470baef4f7dfc50614147e458c8df5
|
[
"Apache-2.0"
] | null | null | null |
src/data/online_features.py
|
Twitter-Fake/twitter-fake
|
0388630211470baef4f7dfc50614147e458c8df5
|
[
"Apache-2.0"
] | 1 |
2019-11-12T15:27:06.000Z
|
2019-11-12T15:27:06.000Z
|
"""
This function get all the featueres to online processing.
"""
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
import pandas as pd
from gensim.corpora.dictionary import Dictionary
from gensim.models.ldamodel import LdaModel
"""
Common text processing functionalities.
"""
def general_text_processing(data):
regex_list = [('\\S*@\\S*\\s?', ''),
('\\s+', ' '),
("\\'", ""),
("\\d+", "")
]
for regex_text in regex_list:
data = re.sub(regex_text[0], regex_text[1], data)
return data
"""
Parallelize stopwords
"""
import multiprocessing as mp # cpu_count, Parallel, Pool
import numpy as np
cores = mp.cpu_count() # Number of CPU cores on your system
partitions = cores # Define as many partitions as you want
def get_split(data, n):
size = data.shape[0]
ret = []
k = int((size + n) / n)
for i in range(1, size + 1):
ret.append(data[(i - 1) * k: min(size, i * k)])
return ret
def parallelize(data, func):
data_split = get_split(data, cores)
pool = mp.Pool(cores)
data = pd.concat(pool.map(func, data_split))
pool.close()
pool.join()
return data
stop_words = set(stopwords.words('english'))
"""
clean tweet function.
Standard refered from the website.
"""
def clean_tweet(text):
# Create a string form of our list of text
if pd.isnull(text) or pd.isna(text):
return ""
global stop_words
raw_string = text
#raw_string = ''.join(text)
no_links = re.sub(r'http\S+', '', raw_string)
no_unicode = re.sub(r"\\[a-z][a-z]?[0-9]+", '', no_links)
no_special_characters = re.sub('[^A-Za-z ]+', '', no_unicode)
words = no_special_characters.split(" ")
words = [w for w in words if len(w) > 2]
words = [w.lower() for w in words]
words = [w for w in words if w not in stop_words]
# ret = ' '.join(words)
return words
"""
Remove stopwords
"""
def remove_stop_words(text):
valid_words = [x for x in re.split('^[a-zA-Z]', text) if x not in stop_words]
valid_words = [x for x in valid_words if len(x) != 0]
### Empty
if (len(valid_words) == 0):
return ""
return " ".join(valid_words)
"""
Fill dictionary
"""
def fill_lda_result(df, lda_model, dictionary, topic_count):
values = df['tweet'].values.tolist()
doc2_corupus = [dictionary.doc2bow(text.split()) for
text in values]
predicted_values = [lda_model[vec] for vec in doc2_corupus]
"""
append to column
"""
for i in range(len(predicted_values)):
temp = [0 for x in range(topic_count)]
for ele in predicted_values[i]:
temp[ele[0]] = ele[1]
predicted_values[i] = temp
for index in range(topic_count):
col_name = "topic_" + str(index)
df[col_name] = [x[index] for x in predicted_values]
return df
def fill_lda_result_2(df, lda_model, dictionary, topic_count):
values = df['tweet'].values.tolist()
doc2_corupus = [dictionary.doc2bow(text) for
text in values]
predicted_values = [lda_model[vec] for vec in doc2_corupus]
"""
append to column
"""
for i in range(len(predicted_values)):
temp = [0 for x in range(topic_count)]
for ele in predicted_values[i]:
temp[ele[0]] = ele[1]
predicted_values[i] = temp
for index in range(topic_count):
col_name = "topic_" + str(index)
df[col_name] = [x[index] for x in predicted_values]
return df
import os
"""
Topic modeling features.
pass cached = False incase you don't want to used earlier data split.
"""
def topic_model(df_train, df_test, topic_count=10, cached=True):
lda_train_save_file = '../data/lsa_train.csv'
lda_test_save_file = '../data/lsa_test.csv'
if (os.path.exists(lda_train_save_file) and cached):
pd.read_csv(lda_train_save_file), pd.read_csv(lda_test_save_file)
### cleanup
#parallel_proces(test_src,'../data/training_user_tweet_processed.csv')
## general remove text
#df_train['tweet'] = df_train['tweet'].fillna("")
#df_test['tweet'] = df_test['tweet'].fillna("")
# df_train['tweet'] = df_train['tweet'].map(general_text_processing)
# df_test['tweet'] = df_test['tweet'].map(general_text_processing)
"""
Parallel tweet.
"""
# df_test['tweet'] = parallelize(df_test, clean_tweet)
# df_train['tweet'] = parallelize(df_train, clean_tweet)
#df_train['tweet'] = df_train['tweet'].map(clean_tweet)
#df_test['tweet'] = df_test['tweet'].map(clean_tweet)
## remove stop words
# df_train['tweet'] = df_train['tweet'].map(remove_stop_words)
# df_test['tweet'] = df_test['tweet'].map(remove_stop_words)
## gensim lda
# dictionary = Dictionary()
# for t in df_train.tweet.values.tolist():
# #print(t)
# dictionary.add_documents([t.split()])
dictionary = Dictionary()
for t in df_train.tweet.values.tolist():
# print(t)
dictionary.add_documents([t])
# for t in df_test['tweet'].values.tolist() :
# print(t)
# print(t[0].split())
# print(dictionary.doc2bow(t.split()))
train_doc2_corupus = [dictionary.doc2bow(text) for text in df_train['tweet'].values.tolist()]
# train_doc2_corupus = [dictionary.doc2bow(text.split()) for
# text in df_train['tweet'].values.tolist()]
# print(train_doc2_corupus)
print("Started LDA")
lda_model = LdaModel(train_doc2_corupus, num_topics=topic_count, iterations=30)
print("Completed LDA")
"""
fill topics
"""
df_test = fill_lda_result_2(df_test, lda_model, dictionary,
topic_count)
df_train = fill_lda_result_2(df_train, lda_model, dictionary,
topic_count)
"""
Save the file
"""
df_train.to_csv(lda_train_save_file, index=False)
df_test.to_csv(lda_test_save_file, index=False)
"""
return
"""
print('LDA Completed')
return df_train, df_test
"""
Load the glove 2 vec
"""
def load_glov_vec(glove_file):
mappings = {}
with open(glove_file) as file:
for line in file:
splits = line.split()
mappings[splits[0]] = splits[1:]
return mappings
"""
Gensim average word encoding.
@input: dataframe with column tweet
@output: dataframe with averge word_count
"""
def glove_encode(df, glove_file, dims=27):
glove_model = load_glov_vec(glove_file)
## create representation
tweets = df['tweet'].values.tolist()
mappings = []
"""
Get the tweet
"""
for t in tweets:
cur = [0 for x in range(dims)]
size = 0
for word in t.split():
word = word.lower()
if word in glove_model:
temp_vec = glove_model[word]
# print(temp_vec)
for i in range(dims):
cur[i] += float(temp_vec[i])
size += 1
if size != 0:
for i in range(dims):
cur[i] /= size
mappings.append(cur)
"""
append dataframe
"""
for i in range(dims):
col_name = 'glove_' + str(i)
df[col_name] = [x[i] for x in mappings]
return df
def text_process_split(input):
input_file, start, end, out_folder = input
out_file = os.path.join(out_folder, 'part-{}.csv'.format(start))
df = pd.read_csv(input_file)
df = df[start:end]
df['tweet'] = df.tweet.map(clean_tweet)
df.to_csv(out_file)
return True
def parallel_proces(input_file, out_folder):
df = pd.read_csv(input_file)
size = df.shape[0]
splits = []
cores = mp.cpu_count()//2
bucket = int(size / cores)
for i in range(1, cores + 1):
splits.append((input_file, (i - 1) * bucket, min(i * bucket, size), out_folder))
print(splits)
pool = mp.Pool(processes=cores)
result = None
"""
multi process and concat
"""
# for res in pool.imap_unordered(text_process_split, splits):
# # if result == None:
# # result = res
# # else:
# # result = pd.concat([result, res])
# pass
pool.map(text_process_split, splits)
#result.to_csv(out_file)
def process_df(df, temp_folder):
os.mkdir(temp_folder)
temp_df_file = os.path.join(temp_folder, 'temp.csv')
df.to_csv(temp_df_file)
## parrallel
parallel_proces(temp_df_file, temp_folder)
## read all files
result_df = pd.DataFrame()
for file in os.listdir(temp_folder):
if 'part-' in file:
file = os.path.join(temp_folder, file)
if(result_df.shape[0] == 0):
result_df = pd.read_csv(file)
else:
result_df = pd.concat([result_df, pd.read_csv(file)])
return result_df
"""
LDA parallel
"""
import shutil
def lda_parallel(df_train, df_test, topic_count):
temp_folder = '../data/temp'
if os.path.exists(temp_folder):
shutil.rmtree(temp_folder)
## mkdir
os.mkdir(temp_folder)
## make all dir
test_folder = os.path.join(temp_folder, 'test')
train_folder = os.path.join(temp_folder, 'train')
df_test = process_df(df_test, test_folder)
df_train = process_df(df_train, train_folder)
df_train, df_test = topic_model(df_train, df_test, topic_count=20)
return df_train, df_test
if __name__ == "__main__":
"""
Test lda
"""
test_src = '../data/training_user_tweet.csv'
#parallel_proces(test_src,'../data/training_user_tweet_processed.csv')
df = pd.read_csv(test_src)
# df.tweet.fillna("", inplace=True)
n_limt = int(df.shape[0] * 0.8)
df_train = df[0:n_limt]
df_test = df[n_limt:]
df_train, df_test = lda_parallel(df_train, df_test, topic_count=20)
#
print("######## test LDA #####")
print(list(df_train))
print(list(df_test))
# print("################ test glove ###### ")
# glove_file = '/media/shibin/disk/glove/glove.twitter.27B/glove.twitter.27B.25d.txt'
# glove_df = glove_encode(df_test, glove_file, 25 )
# print(list(glove_df))
| 26.625628 | 180 | 0.588846 |
4a1e881e75f55edb8ed6333cb57b69548d3654ec
| 13,115 |
py
|
Python
|
DFGN/tokenizer/tokenize_tool.py
|
mottled233/DFGN-pytorch
|
7d9f6a75404cfa429f1e2b57ec5055df382ed0a4
|
[
"MIT"
] | null | null | null |
DFGN/tokenizer/tokenize_tool.py
|
mottled233/DFGN-pytorch
|
7d9f6a75404cfa429f1e2b57ec5055df382ed0a4
|
[
"MIT"
] | null | null | null |
DFGN/tokenizer/tokenize_tool.py
|
mottled233/DFGN-pytorch
|
7d9f6a75404cfa429f1e2b57ec5055df382ed0a4
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
def convert_to_unicode(text):
"""
将文本转化为utf-8的统一编码,可输出到控制台或日志中的形式
判断python版本以及文本格式并进行处理
:param text: str
:return: str
"""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
"""
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
"""
def load_vocab(vocab_path):
"""
从文件中读入字典,并以有序字典的形式输出,格式为vocab[token]=num
:param vocab_path: str
:return: collections.OrderedDict
"""
vocab = collections.OrderedDict()
with open(vocab_path, encoding="utf-8") as fin:
for num, line in enumerate(fin):
items = convert_to_unicode(line.strip()).split("\t")
if len(items) > 2:
break
token = items[0].strip()
index = items[1] if len(items) == 2 else num
vocab[token] = int(index)
return vocab
def convert_tokens_to_ids(vocab, tokens):
"""
使用字典vocab将tokens转化为ids
:param vocab: dict
:param tokens: list of str
:return: list of int
"""
ids = []
for token in tokens:
ids.append(vocab[token])
return ids
def convert_ids_to_tokens(inv_vocab, ids):
"""
使用字典inv_vocab将ids转化为tokens
:param inv_vocab: dict
:param ids: list of int
:return: list of str
"""
tokens = []
for ID in ids:
tokens.append(inv_vocab[ID])
return tokens
def whitespace_tokenize(text):
"""
对一段字符串文本进行空白符的清理,并以此拆分子段
:param text: str
:return: list of str
"""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
def _is_whitespace(char):
"""
判断字符是否为空白符,包括\t\n\r, 空格, 各式制表
:param char: char
:return: bool
"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cate = unicodedata.category(char) # Zs
if cate == "Zs":
return True
return False
def _is_control(char):
"""
判断字符是否为控制符,包括除\t\n\r以外的全部控制符
:param char: char
:return: bool
"""
if char == "\t" or char == "\n" or char == "\r": # Cc
return False
cate = unicodedata.category(char) # Cf, Cn, Co, Cs without Cc
if cate.startswith("C"):
return True
return False
def _is_punctuation(char):
"""
判断字符是否为标点符号,这里包括其他除字母和数字以外的符号
:param char: char
:return: bool
"""
cp = ord(char)
if ((33 <= cp <= 47) or (58 <= cp <= 64) or
(91 <= cp <= 96) or (123 <= cp <= 126)):
return True
cate = unicodedata.category(char) # Pc, Pd, Pe, Pf, Pi, Po, Ps
if cate.startswith("P"):
return True
return False
def _is_chinese_char(char):
"""
判断字符是否为中日韩文字
:param char: char
:return: bool
"""
cp = ord(char)
if ((0x4E00 <= cp <= 0x9FFF) or
(0x3400 <= cp <= 0x4DBF) or
(0x20000 <= cp <= 0x2A6DF) or
(0x2A700 <= cp <= 0x2B73F) or
(0x2B740 <= cp <= 0x2B81F) or
(0x2B820 <= cp <= 0x2CEAF) or
(0xF900 <= cp <= 0xFAFF) or
(0x2F800 <= cp <= 0x2FA1F)):
return True
return False
def clean_text(text):
"""
清除文本中的无效字符、控制符与空白符
:param text: str
:return: str
"""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
def tokenize_chinese_chars(text):
"""
在中日韩文字前后各加一个空格,完成对它们的拆分
:param text: str
:return: str
"""
output = []
for char in text:
if _is_chinese_char(char):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def run_strip_accents(text):
"""
统一字符串中的字母表示方法,并去除无法处理的特殊字符
:param text: str
:return: str
"""
text = unicodedata.normalize("NFD", text) # 文本标准化,统一相同字符的不同表示
output = []
for char in text:
cate = unicodedata.category(char) # Mark, Nonspacing
if cate == "Mn":
continue
output.append(char)
return "".join(output)
def run_split_on_punc(text):
"""
将标点符号(定义如上)单独拆分出来
:param text: str
:return: list of str
"""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
class WordpieceTokenizer(object):
"""
用以完成对若干字符组成的连续短语的tokenize
"""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
# unk_token: 若短语过长或存在不在词典中的字符,返回未知值
# max_input_chars_per_word: 允许的短语最大长度
def tokenize(self, text):
"""
使用该tokenizer的词典,完成对短语的处理,返回处理完成的tokens列表
切割短语时,从首字母开始每次尽量匹配长度最长的语素
:param text: str,文段,可包含多个短语,但使用时只有一个
:return: list of str,token列表,若存在多个短语则依次排列
"""
output_tokens = []
text = convert_to_unicode(text)
for token in whitespace_tokenize(text):
chars = list(token) # 将短语切分成单字符列表
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr # 若不位于单词首部,则加上##前缀表示
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
'''
str_ = "".join(chars)
str_ = str_.replace("\\", "\\\\")
str_ = str_.replace("\"", "\\\"")
str_ = str_.replace("\'", "\\\'")
print(str_, end=" ")
'''
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True):
""" Constructs a BasicTokenizer.
Args:
**do_lower_case**: Whether to lower case the input.
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
**tokenize_chinese_chars**: (`optional`) boolean (default True)
Whether to tokenize Chinese characters.
This should likely be deactivated for Japanese:
see: https://github.com/huggingface/pytorch-pretrained-BERT/issues/328
"""
if never_split is None:
never_split = []
self.do_lower_case = do_lower_case
self.never_split = never_split
self.tokenize_chinese_chars = tokenize_chinese_chars
def tokenize(self, text, never_split=None):
""" Basic Tokenization of a piece of text.
Split on "white spaces" only, for sub-word tokenization, see WordPieceTokenizer.
Args:
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
"""
never_split = self.never_split + (never_split if never_split is not None else [])
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
if self.tokenize_chinese_chars:
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case and token not in never_split:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text, never_split=None):
"""Splits punctuation on a piece of text."""
if never_split is not None and text in never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xFFFD or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
| 29.40583 | 107 | 0.55448 |
4a1e8a5554d990bfa9877c871d5ee2c74aedab7b
| 350 |
py
|
Python
|
command3.py
|
BijendraSahu/anantara
|
720b0500c1a343a4ff9c965e450aa3d6017f5ddf
|
[
"MIT"
] | null | null | null |
command3.py
|
BijendraSahu/anantara
|
720b0500c1a343a4ff9c965e450aa3d6017f5ddf
|
[
"MIT"
] | null | null | null |
command3.py
|
BijendraSahu/anantara
|
720b0500c1a343a4ff9c965e450aa3d6017f5ddf
|
[
"MIT"
] | null | null | null |
import subprocess
import os
import webbrowser
#proc = subprocess.call('start',shell=True)
#proc=subprocess.Popen('cd xampp\htdocs\anantara_new_design')
proc=subprocess.Popen('php artisan serve --host 192.168.2.20')
chromedir= "C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s"
webbrowser.get(chromedir).open("http://192.168.2.20:8000")
| 38.888889 | 75 | 0.774286 |
4a1e8a899af13ccc5b575fa05310be59351911e9
| 1,657 |
py
|
Python
|
src/alignedseq.py
|
sanket-desai/pyblastn
|
15626c644bcfc4ad69ab132b0860dd1edb6c65d3
|
[
"MIT"
] | null | null | null |
src/alignedseq.py
|
sanket-desai/pyblastn
|
15626c644bcfc4ad69ab132b0860dd1edb6c65d3
|
[
"MIT"
] | null | null | null |
src/alignedseq.py
|
sanket-desai/pyblastn
|
15626c644bcfc4ad69ab132b0860dd1edb6c65d3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import sys
import re
from seq import Sequence
from seq import ResidueError
class AlignedSequence(Sequence):
def __init__(self, fn1=None, ssq1=None):
super().__init__(fn1, ssq1)
if not self.is_dna():
self.is_protein()
def __getitem__(self,i):
return self.seq_[i]
def is_gap(self,pos):
return self.seq_[pos]=="-"
def is_dna(self):
not re.search(r"[^ATGC-]",self.seq_)
def is_protein(self):
if self.is_dna():
return False
else:
for i in self.seq_:
if i not in ['G','A','V','L','I','P','F','Y','W','S','T','C','M','N','Q','K','R','H','D','E','-','X','Z','B']: #'X' a feature where the identity of the amino acid is unknown (an X is shown at this position in the sequence) and the only information concerning the modification is that the N-terminus is blocked: P80979 (Blocked amino end (Xaa))
#Note: Pyro-Glu is often indicated in papers as ‘pGlu’ and sometimes, in one-letter code as “U”, although this is now used for selenocysteine. In figures of publications, it may be cited as Z, pQ or E
raise ResidueError("Residue '%s' cannot be identified as either a nucleotide or amino acid for sequence %s."%(i, self.name_))
return True
#Given aligned position returns the actual sequece position
def aligned_to_sequence_position(self, apos):
spos=0
i=0
while i < apos:
if self.seq_[i] != "-":
spos=spos+1
i=i+1
return spos
#Given actual sequence position returns the aligned position
def sequence_to_aligned_position(self,spos):
apos=0
i=0
while spos > 0:
if not self.is_gap(i):
apos = apos+1
spos=spos-1
i=i+1
else:
apos= apos+1
i=i+1
return apos
| 33.816327 | 347 | 0.678938 |
4a1e8bf158d14193ab2d335a10082f2dfde95dc9
| 3,989 |
py
|
Python
|
questions_three/reporters/junit_reporter/junit_reporter.py
|
filfreire/questions-three
|
1d1d621d5647407bf2d1b271e0b9c7c9f1afc5c8
|
[
"MIT"
] | 5 |
2019-07-22T06:04:07.000Z
|
2021-07-23T06:01:51.000Z
|
questions_three/reporters/junit_reporter/junit_reporter.py
|
filfreire/questions-three
|
1d1d621d5647407bf2d1b271e0b9c7c9f1afc5c8
|
[
"MIT"
] | 15 |
2020-07-28T17:33:40.000Z
|
2021-08-23T17:30:05.000Z
|
questions_three/reporters/junit_reporter/junit_reporter.py
|
filfreire/questions-three
|
1d1d621d5647407bf2d1b271e0b9c7c9f1afc5c8
|
[
"MIT"
] | 4 |
2019-08-25T22:41:59.000Z
|
2020-10-21T14:28:15.000Z
|
from datetime import datetime
from junit_xml import TestCase, TestSuite
import os
from twin_sister import dependency
from xml.etree import ElementTree
from questions_three.constants import TestEvent, TestStatus
from questions_three.event_broker import EventBroker, subscribe_event_handlers
from questions_three.module_cfg import config_for_module
from questions_three.vanilla import format_exception, path_to_entry_script
def convert_status(status):
if TestStatus.erred == status:
return "error"
if TestStatus.failed == status:
return "failure"
if status in (None, TestStatus.passed):
return None
return status.name
def current_time():
return dependency(datetime).now()
def exception_str(e):
if e:
return "(%s) %s" % (type(e), e)
return ""
def extract_timestamp(test_result):
if test_result.start_time:
return test_result.start_time.isoformat()
return None
def convert_tests(test_results):
tests = []
for result in test_results:
if result.start_time and result.end_time:
duration = (result.end_time - result.start_time).total_seconds()
else:
duration = None
tc = TestCase(
name=result.name,
elapsed_sec=duration,
status=convert_status(result.status),
timestamp=extract_timestamp(result),
)
if result.exception:
if TestStatus.failed == result.status:
tc.add_failure_info(message=exception_str(result.exception), output=format_exception(result.exception))
elif TestStatus.erred == result.status:
tc.add_error_info(message=exception_str(result.exception), output=format_exception(result.exception))
elif TestStatus.skipped == result.status:
tc.add_skipped_info(message=exception_str(result.exception))
tests.append(tc)
return tests
def ci_workspace_path():
vars = dependency(os).environ
config = config_for_module(__name__)
key = config.ci_workspace_env_var
if key in vars.keys():
return vars[key]
return None
def infer_package_name():
"""
Use the path to the test script to infer a "package" name
for the Junit report.
"""
script = dependency(path_to_entry_script)()
if not script:
return ""
script_path, _ = os.path.split(script)
workspace_mask = ci_workspace_path()
if workspace_mask:
script_path = script_path.replace(workspace_mask, "")
else:
cwd_mask = dependency(os.getcwd)()
script_path = script_path.replace(cwd_mask, "")
name = script_path.replace("/", ".") + "."
if name.startswith("."):
name = name[1:]
return name
class JunitReporter:
REPORTS_DIRECTORY = "reports"
def __init__(self):
self._dummy_test_case = None
def activate(self):
subscribe_event_handlers(self)
def on_suite_erred(self, suite_name, exception=None, **kwargs):
self._dummy_test_case = TestCase(name=suite_name, status="error")
if exception:
self._dummy_test_case.add_error_info(message=exception_str(exception), output=format_exception(exception))
def on_suite_results_compiled(self, suite_results, **kwargs):
suite_name = suite_results.suite_name or "NamelessSuite"
test_cases = convert_tests(suite_results.tests)
if self._dummy_test_case:
test_cases.append(self._dummy_test_case)
suite = dependency(TestSuite)(
name=infer_package_name() + suite_name, timestamp=current_time().isoformat(), test_cases=test_cases
)
xml_report = ElementTree.tostring(suite.build_xml_doc(), encoding="utf-8").decode(encoding="utf-8")
EventBroker.publish(
event=TestEvent.report_created,
suite=suite,
cases=test_cases,
report_filename=suite_name + ".xml",
report_content=xml_report,
)
| 32.169355 | 119 | 0.675107 |
4a1e8c72036fae8291bea74265aba2db3f82e0b1
| 1,076 |
py
|
Python
|
Converter/Examples/Inception.py
|
EmilPi/PuzzleLib
|
31aa0fab3b5e9472b9b9871ca52e4d94ea683fa9
|
[
"Apache-2.0"
] | 52 |
2020-02-28T20:40:15.000Z
|
2021-08-25T05:35:17.000Z
|
Converter/Examples/Inception.py
|
EmilPi/PuzzleLib
|
31aa0fab3b5e9472b9b9871ca52e4d94ea683fa9
|
[
"Apache-2.0"
] | 2 |
2021-02-14T15:57:03.000Z
|
2021-10-05T12:21:34.000Z
|
Converter/Examples/Inception.py
|
EmilPi/PuzzleLib
|
31aa0fab3b5e9472b9b9871ca52e4d94ea683fa9
|
[
"Apache-2.0"
] | 8 |
2020-02-28T20:40:11.000Z
|
2020-07-09T13:27:23.000Z
|
from PuzzleLib import Config
Config.globalEvalMode = True
from PuzzleLib.Backend import gpuarray
from PuzzleLib.Models.Nets.Inception import loadInceptionBN, loadInceptionV3
from PuzzleLib.Converter.Examples.Common import loadVGGSample, loadLabels, loadV3Labels, showLabelResults
def main():
inceptionBNTest()
inceptionV3Test()
def inceptionBNTest():
net = loadInceptionBN(modelpath="../TestData/Inception-BN-0126.hdf")
sample = loadVGGSample("../TestData/tarantula.jpg")
labels = loadLabels(synpath="../TestData/synsets.txt", wordpath="../TestData/synset_words.txt")
res = net(gpuarray.to_gpu(sample)).get().reshape(-1)
showLabelResults(res, labels, header=net.name)
def inceptionV3Test():
net = loadInceptionV3(modelpath="../TestData/Inception-7-0001.hdf")
sample = loadVGGSample("../TestData/tarantula.jpg", shape=(299, 299), normalize=True)
labels = loadV3Labels(filename="../TestData/synset_inception_v3.txt")
res = net(gpuarray.to_gpu(sample)).get().reshape(-1)
showLabelResults(res, labels, header=net.name)
if __name__ == "__main__":
main()
| 29.081081 | 105 | 0.765799 |
4a1e8d38ab58cf65a05068f77c4c7e4f7846d451
| 2,065 |
py
|
Python
|
setup.py
|
hotmess47/dwave-inspector
|
b958ab0c2c943c8d55d610b5903cffd4b88a2dff
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
hotmess47/dwave-inspector
|
b958ab0c2c943c8d55d610b5903cffd4b88a2dff
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
hotmess47/dwave-inspector
|
b958ab0c2c943c8d55d610b5903cffd4b88a2dff
|
[
"Apache-2.0"
] | null | null | null |
import os
from setuptools import setup
# Load package info, without importing the package
basedir = os.path.dirname(os.path.abspath(__file__))
package_info_path = os.path.join(basedir, "dwave", "inspector", "package_info.py")
package_info = {}
with open(package_info_path, encoding='utf-8') as f:
exec(f.read(), package_info)
# Package requirements, minimal pinning
install_requires = [
'dimod>=0.8.17',
'dwave-system>=1.0.0',
'dwave-cloud-client>=0.8.3',
'Flask>=1.1.1',
# dwave-inspectorapp==0.2.2
]
# Package extras requirements
extras_require = {
'test': ['coverage', 'vcrpy'],
# backports
':python_version < "3.9"': ['importlib-resources>=3.2.0']
}
classifiers = [
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
]
packages = ['dwave', 'dwave.inspector']
python_requires = '>=3.5'
setup(
name=package_info['__package_name__'],
version=package_info['__version__'],
author=package_info['__author__'],
author_email=package_info['__author_email__'],
description=package_info['__description__'],
long_description=open('README.rst', encoding='utf-8').read(),
url=package_info['__url__'],
license=package_info['__license__'],
packages=packages,
entry_points={
'inspectorapp_viewers': [
'browser_tab = dwave.inspector.viewers:webbrowser_tab',
'browser_window = dwave.inspector.viewers:webbrowser_window',
],
'dwave_contrib': [
'dwave-inspector = dwave.inspector.package_info:contrib'
]
},
python_requires=python_requires,
install_requires=install_requires,
extras_require=extras_require,
classifiers=classifiers,
zip_safe=False,
)
| 29.084507 | 82 | 0.667312 |
4a1e8ea806b8468cb6e0fc18eaaf5e30c41cb9d3
| 2,157 |
py
|
Python
|
src/data/test_tfrecords.py
|
Brechard/simple-yolov3
|
21c0a2962659cbff2469b905ac2efebafdc78796
|
[
"MIT"
] | 1 |
2020-02-05T07:45:08.000Z
|
2020-02-05T07:45:08.000Z
|
src/data/test_tfrecords.py
|
Brechard/simple-yolov3
|
21c0a2962659cbff2469b905ac2efebafdc78796
|
[
"MIT"
] | null | null | null |
src/data/test_tfrecords.py
|
Brechard/simple-yolov3
|
21c0a2962659cbff2469b905ac2efebafdc78796
|
[
"MIT"
] | 1 |
2020-02-27T13:54:11.000Z
|
2020-02-27T13:54:11.000Z
|
import os
import unittest
import cv2
import numpy as np
import tensorflow as tf
import make_dataset
from .dataset import Dataset
from ..constants import GTSD
class TFRecordTest(unittest.TestCase):
def test_create_tfrecords(self):
dataset_name = GTSD
dataset = Dataset(dataset_name)
images_path = '/home/brechard/datasets/' + dataset_name + '/train/'
annotations_dict = make_dataset.create_annotations_dict(dataset_name, 'train')
no_annotations, total_analyzed = 0, 0
for i, filename in enumerate(os.listdir(images_path)):
if np.random.random() >= 0.01:
continue
total_analyzed += 1
image_path = images_path + filename
image_string = open(images_path + '/' + filename, 'rb')
annotations = []
if filename in annotations_dict:
height, width, _ = cv2.imread(image_path).shape
annotations_list = annotations_dict[filename]
for annotation in annotations_list:
annotations.append([annotation[0] / width, annotation[1] / height,
annotation[2] / width, annotation[3] / height,
annotation[4]])
else:
annotations = annotations_list = [[0.0, 0.0, 0.0, 0.0, 0.0]]
no_annotations += 1
annotations = np.array(annotations, dtype=np.float32)
tf_example = make_dataset.image_example(image_string.read(), annotations_list, filename)
image_string.close()
file_path = 'data.tfrecords'
with tf.io.TFRecordWriter(file_path) as writer:
writer.write(tf_example.SerializeToString())
tf_records = tf.data.TFRecordDataset([file_path])
for record in tf_records:
image, labels = dataset.parse_tfrecord(record, False)
labels = labels.numpy()
self.assertEqual(np.sum(annotations != labels), 0)
self.assertNotEqual(total_analyzed, no_annotations)
if __name__ == '__main__':
unittest.main()
| 39.218182 | 100 | 0.600834 |
4a1e8ef44828ea12c9901a2f713522ced9c60c7c
| 2,787 |
py
|
Python
|
somework/mainapp/models.py
|
XtremeGood/somework
|
52fa01d48538e32a8afb85f4ddbaadac3e3cd154
|
[
"Apache-2.0"
] | null | null | null |
somework/mainapp/models.py
|
XtremeGood/somework
|
52fa01d48538e32a8afb85f4ddbaadac3e3cd154
|
[
"Apache-2.0"
] | null | null | null |
somework/mainapp/models.py
|
XtremeGood/somework
|
52fa01d48538e32a8afb85f4ddbaadac3e3cd154
|
[
"Apache-2.0"
] | null | null | null |
from decouple import config
from django.db import models
class Dematad(models.Model):
DPID = models.TextField()
CLID = models.TextField()
TYPE = models.TextField()
SUBTYP = models.TextField()
ACCAT = models.TextField()
OCCUP = models.TextField()
NAME = models.TextField()
FNAME = models.TextField()
AD1 = models.TextField()
AD2 = models.TextField()
AD3 = models.TextField()
AD4 = models.TextField()
PIN = models.TextField()
PHONE = models.TextField()
FAX = models.TextField()
JT1 = models.TextField()
FJT1 = models.TextField()
JT2 = models.TextField()
FJT2 = models.TextField()
FILLER1 = models.TextField()
FILLER2 = models.TextField()
PAN1 = models.TextField()
PAN2 = models.TextField()
PAN3 = models.TextField()
NOM = models.TextField()
NOMNAME = models.TextField()
NAD1 = models.TextField()
NAD2 = models.TextField()
NAD3 = models.TextField()
NAD4 = models.TextField()
NPIN = models.TextField()
DBMINOR = models.TextField()
MIND = models.TextField()
ACNO = models.TextField()
BANKNAME = models.TextField()
BANKAD1 = models.TextField()
BANKAD2 = models.TextField()
BANKAD3 = models.TextField()
BANKAD4 = models.TextField()
BANKPIN = models.TextField()
RBIREF = models.TextField()
RBIDATE = models.TextField()
SEBIREGNO = models.TextField()
BTAX = models.TextField()
STATUS = models.TextField()
MICRCD = models.TextField()
IFSC = models.TextField()
ACTYPE = models.TextField()
Filler3 = models.TextField()
NAMEMAPIN = models.TextField()
JT1MAPIN = models.TextField()
JT2MAPIN = models.TextField()
EMAIL1 = models.TextField()
EMAIL2 = models.TextField()
EMAIL3 = models.TextField()
RGSFLG = models.TextField()
ANREPFLG = models.TextField()
UIDISTHOL = models.TextField()
UID2NDHOL = models.TextField()
UID3RDHOL = models.TextField()
PANGAR = models.TextField()
UIDGAR = models.TextField()
class Meta:
unique_together = (('DPID', 'CLID'))
class Demathol(models.Model):
DPID = models.TextField()
CLID = models.TextField()
FREEHOL = models.TextField()
HOLLCK = models.TextField()
HOLBLOCK = models.TextField()
HOLPLD = models.TextField()
HOLPLDLCK = models.TextField()
HOLPLDUNC = models.TextField()
HOLPLDLCKU = models.TextField()
HOLREM = models.TextField()
HOLREMLCK = models.TextField()
HOLCMIDD = models.TextField()
HOLCMPOOL = models.TextField()
HOLSET = models.TextField()
ISEN = models.TextField()
DATE = models.TextField()
class Meta:
unique_together = (('DPID', 'CLID'))
def __str__(self):
return self.DPID
| 28.438776 | 44 | 0.64765 |
4a1e8efbe61f87200dd1afdf29ce16b969c241e8
| 1,474 |
py
|
Python
|
test/test_systeminfo.py
|
fledge-iot/fledge-south-systeminfo
|
377dce0c1288b0b2b33b9466f0ec580f578d8b30
|
[
"Apache-2.0"
] | null | null | null |
test/test_systeminfo.py
|
fledge-iot/fledge-south-systeminfo
|
377dce0c1288b0b2b33b9466f0ec580f578d8b30
|
[
"Apache-2.0"
] | null | null | null |
test/test_systeminfo.py
|
fledge-iot/fledge-south-systeminfo
|
377dce0c1288b0b2b33b9466f0ec580f578d8b30
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# FLEDGE_BEGIN
# See: http://fledge-iot.readthedocs.io/
# FLEDGE_END
from unittest.mock import patch
import pytest
from python.fledge.plugins.south.systeminfo import systeminfo
__author__ = "Amarendra K Sinha"
__copyright__ = "Copyright (c) 2018 Dianomic Systems"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
config = systeminfo._DEFAULT_CONFIG
def test_plugin_contract():
# Evaluates if the plugin has all the required methods
assert callable(getattr(systeminfo, 'plugin_info'))
assert callable(getattr(systeminfo, 'plugin_init'))
assert callable(getattr(systeminfo, 'plugin_poll'))
assert callable(getattr(systeminfo, 'plugin_shutdown'))
assert callable(getattr(systeminfo, 'plugin_reconfigure'))
def test_plugin_info():
assert systeminfo.plugin_info() == {
'name': 'System Info plugin',
'version': '1.9.2',
'mode': 'poll',
'type': 'south',
'interface': '1.0',
'config': config
}
def test_plugin_init():
assert systeminfo.plugin_init(config) == config
@pytest.mark.skip(reason="To be implemented")
def test_plugin_poll():
pass
@pytest.mark.skip(reason="To be implemented")
def test_plugin_reconfigure():
pass
def test_plugin_shutdown():
with patch.object(systeminfo._LOGGER, 'info') as patch_logger_info:
systeminfo.plugin_shutdown(config)
patch_logger_info.assert_called_once_with('system info plugin shut down.')
| 25.859649 | 78 | 0.710312 |
4a1e9079a760bd8b0150672e525299f9ecedde13
| 1,733 |
py
|
Python
|
utils/misc.py
|
ta9ryuWalrus/FixMatch-pytorch
|
3d63376ce25886767f6afec07caa640bd44ada29
|
[
"MIT"
] | null | null | null |
utils/misc.py
|
ta9ryuWalrus/FixMatch-pytorch
|
3d63376ce25886767f6afec07caa640bd44ada29
|
[
"MIT"
] | null | null | null |
utils/misc.py
|
ta9ryuWalrus/FixMatch-pytorch
|
3d63376ce25886767f6afec07caa640bd44ada29
|
[
"MIT"
] | null | null | null |
'''Some helper functions for PyTorch, including:
- get_mean_and_std: calculate the mean and std value of dataset.
'''
import logging
import torch
logger = logging.getLogger(__name__)
__all__ = ['get_mean_and_std', 'accuracy', 'AverageMeter']
def get_mean_and_std(dataset):
'''Compute the mean and std value of dataset.'''
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=1, shuffle=False, num_workers=4)
mean = torch.zeros(3)
std = torch.zeros(3)
logger.info('==> Computing mean and std..')
for inputs, targets in dataloader:
for i in range(3):
mean[i] += inputs[:, i, :, :].mean()
std[i] += inputs[:, i, :, :].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].contiguous().view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class AverageMeter(object):
"""Computes and stores the average and current value
Imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
| 26.661538 | 95 | 0.610502 |
4a1e90d74116dda8e8414ad8a07daa9a72684f60
| 13,685 |
py
|
Python
|
py_expression_eval/tests.py
|
axiacore/py-expression-eval
|
e7cfbedb3cdb1c428ae3dfbc967fe43deffa5e64
|
[
"MIT"
] | 26 |
2020-09-19T17:58:56.000Z
|
2022-03-26T20:28:10.000Z
|
py_expression_eval/tests.py
|
axiacore/py-expression-eval
|
e7cfbedb3cdb1c428ae3dfbc967fe43deffa5e64
|
[
"MIT"
] | 11 |
2020-11-09T00:58:51.000Z
|
2022-03-18T15:19:24.000Z
|
py_expression_eval/tests.py
|
axiacore/py-expression-eval
|
e7cfbedb3cdb1c428ae3dfbc967fe43deffa5e64
|
[
"MIT"
] | 13 |
2020-10-23T14:45:34.000Z
|
2022-03-17T06:21:33.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Author: AxiaCore S.A.S. http://axiacore.com
#
# Based on js-expression-eval, by Matthew Crumley (email@matthewcrumley.com, http://silentmatt.com/)
# https://github.com/silentmatt/js-expression-eval
#
# Ported to Python and modified by Vera Mazhuga (ctrl-alt-delete@live.com, http://vero4ka.info/)
#
# You are free to use and modify this code in anyway you find useful. Please leave this comment in the code
# to acknowledge its original source. If you feel like it, I enjoy hearing about projects that use my code,
# but don't feel like you have to let me know or ask permission.
import unittest
from py_expression_eval import Parser
class ParserTestCase(unittest.TestCase):
def setUp(self):
self.parser = Parser()
def assertExactEqual(self, a, b):
self.assertEqual(type(a), type(b))
self.assertEqual(a, b)
def test_parser(self):
parser = Parser()
# parser and variables
self.assertEqual(parser.parse('pow(x,y)').variables(), ['x', 'y'])
self.assertEqual(parser.parse('pow(x,y)').symbols(), ['pow', 'x', 'y'])
# but '"a b"' can *not* be used as a variable
self.assertEqual(parser.parse('"a b"*2').evaluate({'"a b"': 2}), "a ba b")
# unless parse configured to allow double quoted variables (i.e. allow multi-word vars)
parser2 = Parser(string_literal_quotes=("'")) # only single, not double!
self.assertEqual(parser2.parse('"a b"*2').evaluate({'"a b"':2}),4)
# evaluate
self.assertExactEqual(parser.parse('1').evaluate({}), 1)
self.assertExactEqual(parser.parse('a').evaluate({'a': 2}), 2)
self.assertExactEqual(parser.parse('2 * 3').evaluate({}), 6)
self.assertExactEqual(parser.parse(u'2 \u2219 3').evaluate({}), 6)
self.assertExactEqual(parser.parse(u'2 \u2022 3').evaluate({}), 6)
self.assertExactEqual(parser.parse('2 ^ x').evaluate({'x': 3}), 8)
self.assertExactEqual(parser.parse('2 ** x').evaluate({'x': 3}), 8)
self.assertExactEqual(parser.parse('-1.E2 ** x + 2.0E2').evaluate({'x': 1}), 100.0)
self.assertEqual(parser.parse('x < 3').evaluate({'x': 3}), False)
self.assertEqual(parser.parse('x < 3').evaluate({'x': 2}), True)
self.assertEqual(parser.parse('x <= 3').evaluate({'x': 3}), True)
self.assertEqual(parser.parse('x <= 3').evaluate({'x': 4}), False)
self.assertEqual(parser.parse('x > 3').evaluate({'x': 4}), True)
self.assertEqual(parser.parse('x >= 3').evaluate({'x': 3}), True)
self.assertExactEqual(parser.parse('2 * x + 1').evaluate({'x': 3}), 7)
self.assertExactEqual(parser.parse('2 + 3 * x').evaluate({'x': 4}), 14)
self.assertExactEqual(parser.parse('(2 + 3) * x').evaluate({'x': 4}), 20)
self.assertExactEqual(parser.parse('2-3.0^x').evaluate({'x': 4}), -79.0)
self.assertExactEqual(parser.parse('-2-3.0^x').evaluate({'x': 4}), -83.0)
self.assertExactEqual(parser.parse('-3^x').evaluate({'x': 4}), -81)
self.assertExactEqual(parser.parse('(-3)^x').evaluate({'x': 4}), 81)
self.assertExactEqual(parser.parse('2-3**x').evaluate({'x': 4}), -79)
self.assertExactEqual(parser.parse('-2-3**x').evaluate({'x': 4}), -83)
self.assertExactEqual(parser.parse('-3.0**x').evaluate({'x': 4}), -81.0)
self.assertExactEqual(parser.parse('(-3.0)**x').evaluate({'x': 4}), 81.0)
self.assertExactEqual(parser.parse('2*x + y').evaluate({'x': 4, 'y': 1}), 9)
self.assertEqual(parser.parse("x||y").evaluate({'x': 'hello ', 'y': 'world'}), 'hello world')
self.assertEqual(parser.parse("'x'||'y'").evaluate({}), 'xy')
self.assertEqual(parser.parse("'x'=='x'").evaluate({}), True)
self.assertEqual(parser.parse("(a+b)==c").evaluate({'a': 1, 'b': 2, 'c': 3}), True)
self.assertEqual(parser.parse("(a+b)!=c").evaluate({'a': 1, 'b': 2, 'c': 3}), False)
self.assertEqual(parser.parse("(a^2-b^2)==((a+b)*(a-b))").evaluate({'a': 4859, 'b': 13150}), True)
self.assertEqual(parser.parse("(a^2-b^2+1)==((a+b)*(a-b))").evaluate({'a': 4859, 'b': 13150}), False)
self.assertEqual(parser.parse("(a**2-b**2)==((a+b)*(a-b))").evaluate({'a': 4859, 'b': 13150}), True)
self.assertEqual(parser.parse("(a**2-b**2+1)==((a+b)*(a-b))").evaluate({'a': 4859, 'b': 13150}), False)
self.assertExactEqual(parser.parse("x/((x+y))").simplify({}).evaluate({'x': 1, 'y': 1}), 0.5)
self.assertExactEqual(parser.parse('origin+2.0').evaluate({'origin': 1.0}), 3.0)
# logical expressions
self.assertExactEqual(parser.parse('a and b').evaluate({'a': True, 'b': False}), False)
self.assertExactEqual(parser.parse('a and not b').evaluate({'a': True, 'b': False}), True)
self.assertExactEqual(parser.parse('a or b').evaluate({'a': True, 'b': False}), True)
self.assertExactEqual(parser.parse('a xor b').evaluate({'a': True, 'b': True}), False)
# check precedents: AND should evaluate before OR
self.assertExactEqual(parser.parse('a or b and not a').evaluate({'a': True, 'b': False}), True)
# in operations
self.assertExactEqual(parser.parse('"ab" in ("ab", "cd")').evaluate({}), True)
self.assertExactEqual(parser.parse('"ee" in ("ab", "cd")').evaluate({}), False)
self.assertExactEqual(parser.parse('1 in (1, 2, 3)').evaluate({}), True)
self.assertExactEqual(parser.parse('"ab" in ("ab", "cd") and 1 in (1,2,3)').evaluate({}), True)
self.assertExactEqual(parser.parse('"word" in "word in sentence"').evaluate({}), True)
# functions
self.assertExactEqual(parser.parse('pyt(2 , 0)').evaluate({}), 2.0)
self.assertEqual(parser.parse("concat('Hello',' ','world')").evaluate({}), 'Hello world')
self.assertExactEqual(parser.parse('if(a>b,5,6)').evaluate({'a': 8, 'b': 3}), 5)
self.assertExactEqual(parser.parse('if(a,b,c)').evaluate({'a': None, 'b': 1, 'c': 3}), 3)
self.assertExactEqual(parser.parse('if(random(1)>1,1,0)').evaluate({}), 0)
# log with base or natural log
self.assertExactEqual(parser.parse('log(16,2)').evaluate({}), 4.0)
self.assertExactEqual(parser.parse('log(E^100)').evaluate({}), 100.0)
self.assertExactEqual(parser.parse('log(E**100)').evaluate({}), 100.0)
# test substitute
expr = parser.parse('2 * x + 1')
expr2 = expr.substitute('x', '4 * x') # ((2*(4*x))+1)
self.assertExactEqual(expr2.evaluate({'x': 3}), 25)
# test simplify
expr = parser.parse('x * (y * atan(1))').simplify({'y': 4})
self.assertIn('x*3.141592', expr.toString())
self.assertExactEqual(expr.evaluate({'x': 2}), 6.283185307179586)
# test toString with string constant
expr = parser.parse("'a'=='b'")
self.assertIn("'a'=='b'", expr.toString())
self.assertIn("'a'=='b'", "%s" % expr)
expr = parser.parse("concat('a\n','\n','\rb')=='a\n\n\rb'")
self.assertEqual(expr.evaluate({}), True)
expr = parser.parse("a==''")
self.assertEqual(expr.evaluate({'a': ''}), True)
# test toString with an external function
expr = parser.parse("myExtFn(a,b,c,1.51,'ok')")
self.assertEqual(expr.substitute("a", 'first').toString(), "myExtFn(first,b,c,1.51,'ok')")
# test variables
expr = parser.parse('x * (y * atan(1))')
self.assertEqual(expr.variables(), ['x', 'y'])
self.assertEqual(expr.simplify({'y': 4}).variables(), ['x'])
# list operations
self.assertEqual(parser.parse('a, 3').evaluate({'a': [1, 2]}), [1, 2, 3])
def test_consts(self):
# self.assertEqual(self.parser.parse("PI ").variables(), [""])
self.assertEqual(self.parser.parse("PI").variables(), [])
self.assertEqual(self.parser.parse("PI ").variables(), [])
self.assertEqual(self.parser.parse("E ").variables(), [])
self.assertEqual(self.parser.parse(" E").variables(), [])
self.assertEqual(self.parser.parse("E").variables(), [])
self.assertEqual(self.parser.parse("E+1").variables(), [])
self.assertEqual(self.parser.parse("E / 1").variables(), [])
self.assertEqual(self.parser.parse("sin(PI)+E").variables(), [])
def test_parsing_e_and_pi(self):
self.assertEqual(self.parser.parse('Pie').variables(), ["Pie"])
self.assertEqual(self.parser.parse('PIe').variables(), ["PIe"])
self.assertEqual(self.parser.parse('Eval').variables(), ["Eval"])
self.assertEqual(self.parser.parse('Eval1').variables(), ["Eval1"])
self.assertEqual(self.parser.parse('EPI').variables(), ["EPI"])
self.assertEqual(self.parser.parse('PIE').variables(), ["PIE"])
self.assertEqual(self.parser.parse('Engage').variables(), ["Engage"])
self.assertEqual(self.parser.parse('Engage * PIE').variables(), ["Engage", "PIE"])
self.assertEqual(self.parser.parse('Engage_').variables(), ["Engage_"])
self.assertEqual(self.parser.parse('Engage1').variables(), ["Engage1"])
self.assertEqual(self.parser.parse('E1').variables(), ["E1"])
self.assertEqual(self.parser.parse('PI2').variables(), ["PI2"])
self.assertEqual(self.parser.parse('(E1 + PI)').variables(), ["E1"])
self.assertEqual(self.parser.parse('E1_').variables(), ["E1_"])
self.assertEqual(self.parser.parse('E_').variables(), ["E_"])
def test_evaluating_consts(self):
self.assertExactEqual(self.parser.evaluate("Engage1", variables={"Engage1": 2}), 2)
self.assertExactEqual(self.parser.evaluate("Engage1 + 1", variables={"Engage1": 1}), 2)
def test_custom_functions(self):
parser = Parser()
def testFunction0():
return 13
def testFunction1(a):
return 2 * a + 9
def testFunction2(a, b):
return 2 * a + 3 * b
# zero argument functions don't currently work
# self.assertEqual(parser
# .parse('testFunction()')
# .evaluate({"testFunction":testFunction0}),13)
self.assertExactEqual(parser
.parse('testFunction(x)')
.evaluate({"x": 2, "testFunction": testFunction1}), 13)
self.assertExactEqual(parser
.parse('testFunction(x , y)')
.evaluate({"x": 2, "y": 3, "testFunction": testFunction2}), 13)
# Add some "built-in" functions
def mean(*xs):
return sum(xs) / len(xs)
parser.functions['mean'] = mean
def counter(initial):
class nonlocals:
x = initial
def count(increment):
nonlocals.x += increment
return nonlocals.x
return count
parser.functions['count'] = counter(0)
self.assertEqual(parser.parse("mean(xs)").variables(), ["xs"])
self.assertEqual(parser.parse("mean(xs)").symbols(), ["mean", "xs"])
self.assertEqual(parser.evaluate("mean(xs)", variables={"xs": [1, 2, 3]}), 2)
self.assertExactEqual(parser.evaluate("count(num)", variables={"num": 5}), 5)
self.assertExactEqual(parser.evaluate("count(num)", variables={"num": 5}), 10)
def test_custom_functions_with_inline_strings(self):
parser = Parser()
expr = parser.parse("func(1, \"func(2, 4)\")")
self.assertEqual(expr.variables(), ['func'])
expr = parser.parse("func(1, 'func(2, 4)')")
self.assertEqual(expr.variables(), ['func'])
parser2 = Parser(string_literal_quotes=("'"))
expr = parser2.parse("func(1, \"func(2, 4)\")")
self.assertEqual(expr.variables(), ['func', "\"func(2, 4)\""])
expr = parser2.parse("func(1, 'func(2, 4)')")
self.assertEqual(expr.variables(), ['func'])
def test_custom_functions_substitute_strings(self):
def func(var, str):
if str == "custom text":
return 1
if str == "foo":
return 2
return 0
parser = Parser()
expr = parser.parse("func(1, \"custom text\")")
self.assertEqual(expr.evaluate({"func": func}), 1)
parser = Parser(string_literal_quotes=("'"))
expr = parser.parse("func(1, \"custom text\")")
self.assertEqual(expr.evaluate({"func": func, "\"custom text\"": "foo" }), 2)
def test_decimals(self):
parser = Parser()
self.assertExactEqual(parser.parse(".1").evaluate({}), parser.parse("0.1").evaluate({}))
self.assertExactEqual(parser.parse(".1*.2").evaluate({}), parser.parse("0.1*0.2").evaluate({}))
self.assertExactEqual(parser.parse(".5^3").evaluate({}), float(0.125))
self.assertExactEqual(parser.parse("16^.5").evaluate({}), 4.0)
self.assertExactEqual(parser.parse(".5**3").evaluate({}), float(0.125))
self.assertExactEqual(parser.parse("16**.5").evaluate({}), 4.0)
self.assertExactEqual(parser.parse("8300*.8").evaluate({}), 6640.0)
self.assertExactEqual(parser.parse("1E3*2.0").evaluate({}), 2000.0)
self.assertExactEqual(parser.parse("-1e3*2.0").evaluate({}), -2000.0)
self.assertExactEqual(parser.parse("-1E3*2.E2").evaluate({}), -200000.0)
with self.assertRaises(ValueError):
parser.parse("..5").evaluate({})
def test_to_string(self):
parser = Parser()
self.assertEqual(parser.parse("-12 * a + -2").toString(), '(((-12)*a)+(-2))')
if __name__ == '__main__':
unittest.main()
| 50.3125 | 111 | 0.586262 |
4a1e91ff81cde8e18545da2f5175e573e3c159d7
| 52,974 |
py
|
Python
|
Macros/Python/platform.py
|
rec/dmxis
|
540baa59df6f4ae39990e5888f90b95caa362279
|
[
"Artistic-2.0"
] | 2 |
2019-05-26T15:11:18.000Z
|
2021-12-27T21:05:32.000Z
|
Macros/Python/platform.py
|
rec/DMXIS
|
540baa59df6f4ae39990e5888f90b95caa362279
|
[
"Artistic-2.0"
] | null | null | null |
Macros/Python/platform.py
|
rec/DMXIS
|
540baa59df6f4ae39990e5888f90b95caa362279
|
[
"Artistic-2.0"
] | null | null | null |
#!/usr/bin/env python
""" This module tries to retrieve as much platform-identifying data as
possible. It makes this information available via function APIs.
If called from the command line, it prints the platform
information concatenated as single string to stdout. The output
format is useable as part of a filename.
"""
# This module is maintained by Marc-Andre Lemburg <mal@egenix.com>.
# If you find problems, please submit bug reports/patches via the
# Python SourceForge Project Page and assign them to "lemburg".
#
# Note: Please keep this module compatible to Python 1.5.2.
#
# Still needed:
# * more support for WinCE
# * support for MS-DOS (PythonDX ?)
# * support for Amiga and other still unsupported platforms running Python
# * support for additional Linux distributions
#
# Many thanks to all those who helped adding platform-specific
# checks (in no particular order):
#
# Charles G Waldman, David Arnold, Gordon McMillan, Ben Darnell,
# Jeff Bauer, Cliff Crawford, Ivan Van Laningham, Josef
# Betancourt, Randall Hopper, Karl Putland, John Farrell, Greg
# Andruk, Just van Rossum, Thomas Heller, Mark R. Levinson, Mark
# Hammond, Bill Tutt, Hans Nowak, Uwe Zessin (OpenVMS support),
# Colin Kong, Trent Mick, Guido van Rossum, Anthony Baxter
#
# History:
#
# <see CVS and SVN checkin messages for history>
#
# 1.0.6 - added linux_distribution()
# 1.0.5 - fixed Java support to allow running the module on Jython
# 1.0.4 - added IronPython support
# 1.0.3 - added normalization of Windows system name
# 1.0.2 - added more Windows support
# 1.0.1 - reformatted to make doc.py happy
# 1.0.0 - reformatted a bit and checked into Python CVS
# 0.8.0 - added sys.version parser and various new access
# APIs (python_version(), python_compiler(), etc.)
# 0.7.2 - fixed architecture() to use sizeof(pointer) where available
# 0.7.1 - added support for Caldera OpenLinux
# 0.7.0 - some fixes for WinCE; untabified the source file
# 0.6.2 - support for OpenVMS - requires version 1.5.2-V006 or higher and
# vms_lib.getsyi() configured
# 0.6.1 - added code to prevent 'uname -p' on platforms which are
# known not to support it
# 0.6.0 - fixed win32_ver() to hopefully work on Win95,98,NT and Win2k;
# did some cleanup of the interfaces - some APIs have changed
# 0.5.5 - fixed another type in the MacOS code... should have
# used more coffee today ;-)
# 0.5.4 - fixed a few typos in the MacOS code
# 0.5.3 - added experimental MacOS support; added better popen()
# workarounds in _syscmd_ver() -- still not 100% elegant
# though
# 0.5.2 - fixed uname() to return '' instead of 'unknown' in all
# return values (the system uname command tends to return
# 'unknown' instead of just leaving the field emtpy)
# 0.5.1 - included code for slackware dist; added exception handlers
# to cover up situations where platforms don't have os.popen
# (e.g. Mac) or fail on socket.gethostname(); fixed libc
# detection RE
# 0.5.0 - changed the API names referring to system commands to *syscmd*;
# added java_ver(); made syscmd_ver() a private
# API (was system_ver() in previous versions) -- use uname()
# instead; extended the win32_ver() to also return processor
# type information
# 0.4.0 - added win32_ver() and modified the platform() output for WinXX
# 0.3.4 - fixed a bug in _follow_symlinks()
# 0.3.3 - fixed popen() and "file" command invokation bugs
# 0.3.2 - added architecture() API and support for it in platform()
# 0.3.1 - fixed syscmd_ver() RE to support Windows NT
# 0.3.0 - added system alias support
# 0.2.3 - removed 'wince' again... oh well.
# 0.2.2 - added 'wince' to syscmd_ver() supported platforms
# 0.2.1 - added cache logic and changed the platform string format
# 0.2.0 - changed the API to use functions instead of module globals
# since some action take too long to be run on module import
# 0.1.0 - first release
#
# You can always get the latest version of this module at:
#
# http://www.egenix.com/files/python/platform.py
#
# If that URL should fail, try contacting the author.
__copyright__ = """
Copyright (c) 1999-2000, Marc-Andre Lemburg; mailto:mal@lemburg.com
Copyright (c) 2000-2008, eGenix.com Software GmbH; mailto:info@egenix.com
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee or royalty is hereby granted,
provided that the above copyright notice appear in all copies and that
both that copyright notice and this permission notice appear in
supporting documentation or portions thereof, including modifications,
that you make.
EGENIX.COM SOFTWARE GMBH DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
"""
__version__ = '1.0.6'
import sys,string,os,re
### Platform specific APIs
_libc_search = re.compile(r'(__libc_init)'
'|'
'(GLIBC_([0-9.]+))'
'|'
'(libc(_\w+)?\.so(?:\.(\d[0-9.]*))?)')
def libc_ver(executable=sys.executable,lib='',version='',
chunksize=2048):
""" Tries to determine the libc version that the file executable
(which defaults to the Python interpreter) is linked against.
Returns a tuple of strings (lib,version) which default to the
given parameters in case the lookup fails.
Note that the function has intimate knowledge of how different
libc versions add symbols to the executable and thus is probably
only useable for executables compiled using gcc.
The file is read and scanned in chunks of chunksize bytes.
"""
if hasattr(os.path, 'realpath'):
# Python 2.2 introduced os.path.realpath(); it is used
# here to work around problems with Cygwin not being
# able to open symlinks for reading
executable = os.path.realpath(executable)
f = open(executable,'rb')
binary = f.read(chunksize)
pos = 0
while 1:
m = _libc_search.search(binary,pos)
if not m:
binary = f.read(chunksize)
if not binary:
break
pos = 0
continue
libcinit,glibc,glibcversion,so,threads,soversion = m.groups()
if libcinit and not lib:
lib = 'libc'
elif glibc:
if lib != 'glibc':
lib = 'glibc'
version = glibcversion
elif glibcversion > version:
version = glibcversion
elif so:
if lib != 'glibc':
lib = 'libc'
if soversion > version:
version = soversion
if threads and version[-len(threads):] != threads:
version = version + threads
pos = m.end()
f.close()
return lib,version
def _dist_try_harder(distname,version,id):
""" Tries some special tricks to get the distribution
information in case the default method fails.
Currently supports older SuSE Linux, Caldera OpenLinux and
Slackware Linux distributions.
"""
if os.path.exists('/var/adm/inst-log/info'):
# SuSE Linux stores distribution information in that file
info = open('/var/adm/inst-log/info').readlines()
distname = 'SuSE'
for line in info:
tv = string.split(line)
if len(tv) == 2:
tag,value = tv
else:
continue
if tag == 'MIN_DIST_VERSION':
version = string.strip(value)
elif tag == 'DIST_IDENT':
values = string.split(value,'-')
id = values[2]
return distname,version,id
if os.path.exists('/etc/.installed'):
# Caldera OpenLinux has some infos in that file (thanks to Colin Kong)
info = open('/etc/.installed').readlines()
for line in info:
pkg = string.split(line,'-')
if len(pkg) >= 2 and pkg[0] == 'OpenLinux':
# XXX does Caldera support non Intel platforms ? If yes,
# where can we find the needed id ?
return 'OpenLinux',pkg[1],id
if os.path.isdir('/usr/lib/setup'):
# Check for slackware verson tag file (thanks to Greg Andruk)
verfiles = os.listdir('/usr/lib/setup')
for n in range(len(verfiles)-1, -1, -1):
if verfiles[n][:14] != 'slack-version-':
del verfiles[n]
if verfiles:
verfiles.sort()
distname = 'slackware'
version = verfiles[-1][14:]
return distname,version,id
return distname,version,id
_release_filename = re.compile(r'(\w+)[-_](release|version)')
_lsb_release_version = re.compile(r'(.+)'
' release '
'([\d.]+)'
'[^(]*(?:\((.+)\))?')
_release_version = re.compile(r'([^0-9]+)'
'(?: release )?'
'([\d.]+)'
'[^(]*(?:\((.+)\))?')
# See also http://www.novell.com/coolsolutions/feature/11251.html
# and http://linuxmafia.com/faq/Admin/release-files.html
# and http://data.linux-ntfs.org/rpm/whichrpm
# and http://www.die.net/doc/linux/man/man1/lsb_release.1.html
_supported_dists = (
'SuSE', 'debian', 'fedora', 'redhat', 'centos',
'mandrake', 'mandriva', 'rocks', 'slackware', 'yellowdog', 'gentoo',
'UnitedLinux', 'turbolinux')
def _parse_release_file(firstline):
# Parse the first line
m = _lsb_release_version.match(firstline)
if m is not None:
# LSB format: "distro release x.x (codename)"
return tuple(m.groups())
# Pre-LSB format: "distro x.x (codename)"
m = _release_version.match(firstline)
if m is not None:
return tuple(m.groups())
# Unkown format... take the first two words
l = string.split(string.strip(firstline))
if l:
version = l[0]
if len(l) > 1:
id = l[1]
else:
id = ''
return '', version, id
def _test_parse_release_file():
for input, output in (
# Examples of release file contents:
('SuSE Linux 9.3 (x86-64)', ('SuSE Linux ', '9.3', 'x86-64'))
('SUSE LINUX 10.1 (X86-64)', ('SUSE LINUX ', '10.1', 'X86-64'))
('SUSE LINUX 10.1 (i586)', ('SUSE LINUX ', '10.1', 'i586'))
('Fedora Core release 5 (Bordeaux)', ('Fedora Core', '5', 'Bordeaux'))
('Red Hat Linux release 8.0 (Psyche)', ('Red Hat Linux', '8.0', 'Psyche'))
('Red Hat Linux release 9 (Shrike)', ('Red Hat Linux', '9', 'Shrike'))
('Red Hat Enterprise Linux release 4 (Nahant)', ('Red Hat Enterprise Linux', '4', 'Nahant'))
('CentOS release 4', ('CentOS', '4', None))
('Rocks release 4.2.1 (Cydonia)', ('Rocks', '4.2.1', 'Cydonia'))
):
parsed = _parse_release_file(input)
if parsed != output:
print (input, parsed)
def linux_distribution(distname='', version='', id='',
supported_dists=_supported_dists,
full_distribution_name=1):
""" Tries to determine the name of the Linux OS distribution name.
The function first looks for a distribution release file in
/etc and then reverts to _dist_try_harder() in case no
suitable files are found.
supported_dists may be given to define the set of Linux
distributions to look for. It defaults to a list of currently
supported Linux distributions identified by their release file
name.
If full_distribution_name is true (default), the full
distribution read from the OS is returned. Otherwise the short
name taken from supported_dists is used.
Returns a tuple (distname,version,id) which default to the
args given as parameters.
"""
try:
etc = os.listdir('/etc')
except os.error:
# Probably not a Unix system
return distname,version,id
etc.sort()
for file in etc:
m = _release_filename.match(file)
if m is not None:
_distname,dummy = m.groups()
if _distname in supported_dists:
distname = _distname
break
else:
return _dist_try_harder(distname,version,id)
# Read the first line
f = open('/etc/'+file, 'r')
firstline = f.readline()
f.close()
_distname, _version, _id = _parse_release_file(firstline)
if _distname and full_distribution_name:
distname = _distname
if _version:
version = _version
if _id:
id = _id
return distname, version, id
# To maintain backwards compatibility:
def dist(distname='',version='',id='',
supported_dists=_supported_dists):
""" Tries to determine the name of the Linux OS distribution name.
The function first looks for a distribution release file in
/etc and then reverts to _dist_try_harder() in case no
suitable files are found.
Returns a tuple (distname,version,id) which default to the
args given as parameters.
"""
return linux_distribution(distname, version, id,
supported_dists=supported_dists,
full_distribution_name=0)
class _popen:
""" Fairly portable (alternative) popen implementation.
This is mostly needed in case os.popen() is not available, or
doesn't work as advertised, e.g. in Win9X GUI programs like
PythonWin or IDLE.
Writing to the pipe is currently not supported.
"""
tmpfile = ''
pipe = None
bufsize = None
mode = 'r'
def __init__(self,cmd,mode='r',bufsize=None):
if mode != 'r':
raise ValueError,'popen()-emulation only supports read mode'
import tempfile
self.tmpfile = tmpfile = tempfile.mktemp()
os.system(cmd + ' > %s' % tmpfile)
self.pipe = open(tmpfile,'rb')
self.bufsize = bufsize
self.mode = mode
def read(self):
return self.pipe.read()
def readlines(self):
if self.bufsize is not None:
return self.pipe.readlines()
def close(self,
remove=os.unlink,error=os.error):
if self.pipe:
rc = self.pipe.close()
else:
rc = 255
if self.tmpfile:
try:
remove(self.tmpfile)
except error:
pass
return rc
# Alias
__del__ = close
def popen(cmd, mode='r', bufsize=None):
""" Portable popen() interface.
"""
# Find a working popen implementation preferring win32pipe.popen
# over os.popen over _popen
popen = None
if os.environ.get('OS','') == 'Windows_NT':
# On NT win32pipe should work; on Win9x it hangs due to bugs
# in the MS C lib (see MS KnowledgeBase article Q150956)
try:
import win32pipe
except ImportError:
pass
else:
popen = win32pipe.popen
if popen is None:
if hasattr(os,'popen'):
popen = os.popen
# Check whether it works... it doesn't in GUI programs
# on Windows platforms
if sys.platform == 'win32': # XXX Others too ?
try:
popen('')
except os.error:
popen = _popen
else:
popen = _popen
if bufsize is None:
return popen(cmd,mode)
else:
return popen(cmd,mode,bufsize)
def _norm_version(version, build=''):
""" Normalize the version and build strings and return a single
version string using the format major.minor.build (or patchlevel).
"""
l = string.split(version,'.')
if build:
l.append(build)
try:
ints = map(int,l)
except ValueError:
strings = l
else:
strings = map(str,ints)
version = string.join(strings[:3],'.')
return version
_ver_output = re.compile(r'(?:([\w ]+) ([\w.]+) '
'.*'
'Version ([\d.]+))')
def _syscmd_ver(system='', release='', version='',
supported_platforms=('win32','win16','dos','os2')):
""" Tries to figure out the OS version used and returns
a tuple (system,release,version).
It uses the "ver" shell command for this which is known
to exists on Windows, DOS and OS/2. XXX Others too ?
In case this fails, the given parameters are used as
defaults.
"""
if sys.platform not in supported_platforms:
return system,release,version
# Try some common cmd strings
for cmd in ('ver','command /c ver','cmd /c ver'):
try:
pipe = popen(cmd)
info = pipe.read()
if pipe.close():
raise os.error,'command failed'
# XXX How can I supress shell errors from being written
# to stderr ?
except os.error,why:
#print 'Command %s failed: %s' % (cmd,why)
continue
except IOError,why:
#print 'Command %s failed: %s' % (cmd,why)
continue
else:
break
else:
return system,release,version
# Parse the output
info = string.strip(info)
m = _ver_output.match(info)
if m is not None:
system,release,version = m.groups()
# Strip trailing dots from version and release
if release[-1] == '.':
release = release[:-1]
if version[-1] == '.':
version = version[:-1]
# Normalize the version and build strings (eliminating additional
# zeros)
version = _norm_version(version)
return system,release,version
def _win32_getvalue(key,name,default=''):
""" Read a value for name from the registry key.
In case this fails, default is returned.
"""
try:
# Use win32api if available
from win32api import RegQueryValueEx
except ImportError:
# On Python 2.0 and later, emulate using _winreg
import _winreg
RegQueryValueEx = _winreg.QueryValueEx
try:
return RegQueryValueEx(key,name)
except:
return default
def win32_ver(release='',version='',csd='',ptype=''):
""" Get additional version information from the Windows Registry
and return a tuple (version,csd,ptype) referring to version
number, CSD level and OS type (multi/single
processor).
As a hint: ptype returns 'Uniprocessor Free' on single
processor NT machines and 'Multiprocessor Free' on multi
processor machines. The 'Free' refers to the OS version being
free of debugging code. It could also state 'Checked' which
means the OS version uses debugging code, i.e. code that
checks arguments, ranges, etc. (Thomas Heller).
Note: this function works best with Mark Hammond's win32
package installed, but also on Python 2.3 and later. It
obviously only runs on Win32 compatible platforms.
"""
# XXX Is there any way to find out the processor type on WinXX ?
# XXX Is win32 available on Windows CE ?
#
# Adapted from code posted by Karl Putland to comp.lang.python.
#
# The mappings between reg. values and release names can be found
# here: http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfo_str.asp
# Import the needed APIs
try:
import win32api
from win32api import RegQueryValueEx, RegOpenKeyEx, \
RegCloseKey, GetVersionEx
from win32con import HKEY_LOCAL_MACHINE, VER_PLATFORM_WIN32_NT, \
VER_PLATFORM_WIN32_WINDOWS, VER_NT_WORKSTATION
except ImportError:
# Emulate the win32api module using Python APIs
try:
sys.getwindowsversion
except AttributeError:
# No emulation possible, so return the defaults...
return release,version,csd,ptype
else:
# Emulation using _winreg (added in Python 2.0) and
# sys.getwindowsversion() (added in Python 2.3)
import _winreg
GetVersionEx = sys.getwindowsversion
RegQueryValueEx = _winreg.QueryValueEx
RegOpenKeyEx = _winreg.OpenKeyEx
RegCloseKey = _winreg.CloseKey
HKEY_LOCAL_MACHINE = _winreg.HKEY_LOCAL_MACHINE
VER_PLATFORM_WIN32_WINDOWS = 1
VER_PLATFORM_WIN32_NT = 2
VER_NT_WORKSTATION = 1
# Find out the registry key and some general version infos
maj,min,buildno,plat,csd = GetVersionEx()
version = '%i.%i.%i' % (maj,min,buildno & 0xFFFF)
if csd[:13] == 'Service Pack ':
csd = 'SP' + csd[13:]
if plat == VER_PLATFORM_WIN32_WINDOWS:
regkey = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion'
# Try to guess the release name
if maj == 4:
if min == 0:
release = '95'
elif min == 10:
release = '98'
elif min == 90:
release = 'Me'
else:
release = 'postMe'
elif maj == 5:
release = '2000'
elif plat == VER_PLATFORM_WIN32_NT:
regkey = 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion'
if maj <= 4:
release = 'NT'
elif maj == 5:
if min == 0:
release = '2000'
elif min == 1:
release = 'XP'
elif min == 2:
release = '2003Server'
else:
release = 'post2003'
elif maj == 6:
if min == 0:
# Per http://msdn2.microsoft.com/en-us/library/ms724429.aspx
try:
productType = GetVersionEx(1)[8]
except TypeError:
# sys.getwindowsversion() doesn't take any arguments, so
# we cannot detect 2008 Server that way.
# XXX Add some other means of detecting 2008 Server ?!
release = 'Vista'
else:
if productType == VER_NT_WORKSTATION:
release = 'Vista'
else:
release = '2008Server'
else:
release = 'post2008Server'
else:
if not release:
# E.g. Win3.1 with win32s
release = '%i.%i' % (maj,min)
return release,version,csd,ptype
# Open the registry key
try:
keyCurVer = RegOpenKeyEx(HKEY_LOCAL_MACHINE, regkey)
# Get a value to make sure the key exists...
RegQueryValueEx(keyCurVer, 'SystemRoot')
except:
return release,version,csd,ptype
# Parse values
#subversion = _win32_getvalue(keyCurVer,
# 'SubVersionNumber',
# ('',1))[0]
#if subversion:
# release = release + subversion # 95a, 95b, etc.
build = _win32_getvalue(keyCurVer,
'CurrentBuildNumber',
('',1))[0]
ptype = _win32_getvalue(keyCurVer,
'CurrentType',
(ptype,1))[0]
# Normalize version
version = _norm_version(version,build)
# Close key
RegCloseKey(keyCurVer)
return release,version,csd,ptype
def _mac_ver_lookup(selectors,default=None):
from gestalt import gestalt
import MacOS
l = []
append = l.append
for selector in selectors:
try:
append(gestalt(selector))
except (RuntimeError, MacOS.Error):
append(default)
return l
def _bcd2str(bcd):
return hex(bcd)[2:]
def mac_ver(release='',versioninfo=('','',''),machine=''):
""" Get MacOS version information and return it as tuple (release,
versioninfo, machine) with versioninfo being a tuple (version,
dev_stage, non_release_version).
Entries which cannot be determined are set to the paramter values
which default to ''. All tuple entries are strings.
Thanks to Mark R. Levinson for mailing documentation links and
code examples for this function. Documentation for the
gestalt() API is available online at:
http://www.rgaros.nl/gestalt/
"""
# Check whether the version info module is available
try:
import gestalt
import MacOS
except ImportError:
return release,versioninfo,machine
# Get the infos
sysv,sysu,sysa = _mac_ver_lookup(('sysv','sysu','sysa'))
# Decode the infos
if sysv:
major = (sysv & 0xFF00) >> 8
minor = (sysv & 0x00F0) >> 4
patch = (sysv & 0x000F)
if (major, minor) >= (10, 4):
# the 'sysv' gestald cannot return patchlevels
# higher than 9. Apple introduced 3 new
# gestalt codes in 10.4 to deal with this
# issue (needed because patch levels can
# run higher than 9, such as 10.4.11)
major,minor,patch = _mac_ver_lookup(('sys1','sys2','sys3'))
release = '%i.%i.%i' %(major, minor, patch)
else:
release = '%s.%i.%i' % (_bcd2str(major),minor,patch)
if sysu:
# NOTE: this block is left as documentation of the
# intention of this function, the 'sysu' gestalt is no
# longer available and there are no alternatives.
major = int((sysu & 0xFF000000L) >> 24)
minor = (sysu & 0x00F00000) >> 20
bugfix = (sysu & 0x000F0000) >> 16
stage = (sysu & 0x0000FF00) >> 8
nonrel = (sysu & 0x000000FF)
version = '%s.%i.%i' % (_bcd2str(major),minor,bugfix)
nonrel = _bcd2str(nonrel)
stage = {0x20:'development',
0x40:'alpha',
0x60:'beta',
0x80:'final'}.get(stage,'')
versioninfo = (version,stage,nonrel)
if sysa:
machine = {0x1: '68k',
0x2: 'PowerPC',
0xa: 'i386'}.get(sysa,'')
return release,versioninfo,machine
def _java_getprop(name,default):
from java.lang import System
try:
value = System.getProperty(name)
if value is None:
return default
return value
except AttributeError:
return default
def java_ver(release='',vendor='',vminfo=('','',''),osinfo=('','','')):
""" Version interface for Jython.
Returns a tuple (release,vendor,vminfo,osinfo) with vminfo being
a tuple (vm_name,vm_release,vm_vendor) and osinfo being a
tuple (os_name,os_version,os_arch).
Values which cannot be determined are set to the defaults
given as parameters (which all default to '').
"""
# Import the needed APIs
try:
import java.lang
except ImportError:
return release,vendor,vminfo,osinfo
vendor = _java_getprop('java.vendor', vendor)
release = _java_getprop('java.version', release)
vm_name, vm_release, vm_vendor = vminfo
vm_name = _java_getprop('java.vm.name', vm_name)
vm_vendor = _java_getprop('java.vm.vendor', vm_vendor)
vm_release = _java_getprop('java.vm.version', vm_release)
vminfo = vm_name, vm_release, vm_vendor
os_name, os_version, os_arch = osinfo
os_arch = _java_getprop('java.os.arch', os_arch)
os_name = _java_getprop('java.os.name', os_name)
os_version = _java_getprop('java.os.version', os_version)
osinfo = os_name, os_version, os_arch
return release, vendor, vminfo, osinfo
### System name aliasing
def system_alias(system,release,version):
""" Returns (system,release,version) aliased to common
marketing names used for some systems.
It also does some reordering of the information in some cases
where it would otherwise cause confusion.
"""
if system == 'Rhapsody':
# Apple's BSD derivative
# XXX How can we determine the marketing release number ?
return 'MacOS X Server',system+release,version
elif system == 'SunOS':
# Sun's OS
if release < '5':
# These releases use the old name SunOS
return system,release,version
# Modify release (marketing release = SunOS release - 3)
l = string.split(release,'.')
if l:
try:
major = int(l[0])
except ValueError:
pass
else:
major = major - 3
l[0] = str(major)
release = string.join(l,'.')
if release < '6':
system = 'Solaris'
else:
# XXX Whatever the new SunOS marketing name is...
system = 'Solaris'
elif system == 'IRIX64':
# IRIX reports IRIX64 on platforms with 64-bit support; yet it
# is really a version and not a different platform, since 32-bit
# apps are also supported..
system = 'IRIX'
if version:
version = version + ' (64bit)'
else:
version = '64bit'
elif system in ('win32','win16'):
# In case one of the other tricks
system = 'Windows'
return system,release,version
### Various internal helpers
def _platform(*args):
""" Helper to format the platform string in a filename
compatible format e.g. "system-version-machine".
"""
# Format the platform string
platform = string.join(
map(string.strip,
filter(len, args)),
'-')
# Cleanup some possible filename obstacles...
replace = string.replace
platform = replace(platform,' ','_')
platform = replace(platform,'/','-')
platform = replace(platform,'\\','-')
platform = replace(platform,':','-')
platform = replace(platform,';','-')
platform = replace(platform,'"','-')
platform = replace(platform,'(','-')
platform = replace(platform,')','-')
# No need to report 'unknown' information...
platform = replace(platform,'unknown','')
# Fold '--'s and remove trailing '-'
while 1:
cleaned = replace(platform,'--','-')
if cleaned == platform:
break
platform = cleaned
while platform[-1] == '-':
platform = platform[:-1]
return platform
def _node(default=''):
""" Helper to determine the node name of this machine.
"""
try:
import socket
except ImportError:
# No sockets...
return default
try:
return socket.gethostname()
except socket.error:
# Still not working...
return default
# os.path.abspath is new in Python 1.5.2:
if not hasattr(os.path,'abspath'):
def _abspath(path,
isabs=os.path.isabs,join=os.path.join,getcwd=os.getcwd,
normpath=os.path.normpath):
if not isabs(path):
path = join(getcwd(), path)
return normpath(path)
else:
_abspath = os.path.abspath
def _follow_symlinks(filepath):
""" In case filepath is a symlink, follow it until a
real file is reached.
"""
filepath = _abspath(filepath)
while os.path.islink(filepath):
filepath = os.path.normpath(
os.path.join(os.path.dirname(filepath),os.readlink(filepath)))
return filepath
def _syscmd_uname(option,default=''):
""" Interface to the system's uname command.
"""
if sys.platform in ('dos','win32','win16','os2'):
# XXX Others too ?
return default
try:
f = os.popen('uname %s 2> /dev/null' % option)
except (AttributeError,os.error):
return default
output = string.strip(f.read())
rc = f.close()
if not output or rc:
return default
else:
return output
def _syscmd_file(target,default=''):
""" Interface to the system's file command.
The function uses the -b option of the file command to have it
ommit the filename in its output and if possible the -L option
to have the command follow symlinks. It returns default in
case the command should fail.
"""
if sys.platform in ('dos','win32','win16','os2'):
# XXX Others too ?
return default
target = _follow_symlinks(target)
try:
f = os.popen('file "%s" 2> /dev/null' % target)
except (AttributeError,os.error):
return default
output = string.strip(f.read())
rc = f.close()
if not output or rc:
return default
else:
return output
### Information about the used architecture
# Default values for architecture; non-empty strings override the
# defaults given as parameters
_default_architecture = {
'win32': ('','WindowsPE'),
'win16': ('','Windows'),
'dos': ('','MSDOS'),
}
_architecture_split = re.compile(r'[\s,]').split
def architecture(executable=sys.executable,bits='',linkage=''):
""" Queries the given executable (defaults to the Python interpreter
binary) for various architecture information.
Returns a tuple (bits,linkage) which contains information about
the bit architecture and the linkage format used for the
executable. Both values are returned as strings.
Values that cannot be determined are returned as given by the
parameter presets. If bits is given as '', the sizeof(pointer)
(or sizeof(long) on Python version < 1.5.2) is used as
indicator for the supported pointer size.
The function relies on the system's "file" command to do the
actual work. This is available on most if not all Unix
platforms. On some non-Unix platforms where the "file" command
does not exist and the executable is set to the Python interpreter
binary defaults from _default_architecture are used.
"""
# Use the sizeof(pointer) as default number of bits if nothing
# else is given as default.
if not bits:
import struct
try:
size = struct.calcsize('P')
except struct.error:
# Older installations can only query longs
size = struct.calcsize('l')
bits = str(size*8) + 'bit'
# Get data from the 'file' system command
if executable:
output = _syscmd_file(executable, '')
else:
output = ''
if not output and \
executable == sys.executable:
# "file" command did not return anything; we'll try to provide
# some sensible defaults then...
if _default_architecture.has_key(sys.platform):
b,l = _default_architecture[sys.platform]
if b:
bits = b
if l:
linkage = l
return bits,linkage
# Split the output into a list of strings omitting the filename
fileout = _architecture_split(output)[1:]
if 'executable' not in fileout:
# Format not supported
return bits,linkage
# Bits
if '32-bit' in fileout:
bits = '32bit'
elif 'N32' in fileout:
# On Irix only
bits = 'n32bit'
elif '64-bit' in fileout:
bits = '64bit'
# Linkage
if 'ELF' in fileout:
linkage = 'ELF'
elif 'PE' in fileout:
# E.g. Windows uses this format
if 'Windows' in fileout:
linkage = 'WindowsPE'
else:
linkage = 'PE'
elif 'COFF' in fileout:
linkage = 'COFF'
elif 'MS-DOS' in fileout:
linkage = 'MSDOS'
else:
# XXX the A.OUT format also falls under this class...
pass
return bits,linkage
### Portable uname() interface
_uname_cache = None
def uname():
""" Fairly portable uname interface. Returns a tuple
of strings (system,node,release,version,machine,processor)
identifying the underlying platform.
Note that unlike the os.uname function this also returns
possible processor information as an additional tuple entry.
Entries which cannot be determined are set to ''.
"""
global _uname_cache
no_os_uname = 0
if _uname_cache is not None:
return _uname_cache
processor = ''
# Get some infos from the builtin os.uname API...
try:
system,node,release,version,machine = os.uname()
except AttributeError:
no_os_uname = 1
if no_os_uname or not filter(None, (system, node, release, version, machine)):
# Hmm, no there is either no uname or uname has returned
#'unknowns'... we'll have to poke around the system then.
if no_os_uname:
system = sys.platform
release = ''
version = ''
node = _node()
machine = ''
use_syscmd_ver = 01
# Try win32_ver() on win32 platforms
if system == 'win32':
release,version,csd,ptype = win32_ver()
if release and version:
use_syscmd_ver = 0
# Try to use the PROCESSOR_* environment variables
# available on Win XP and later; see
# http://support.microsoft.com/kb/888731 and
# http://www.geocities.com/rick_lively/MANUALS/ENV/MSWIN/PROCESSI.HTM
if not machine:
machine = os.environ.get('PROCESSOR_ARCHITECTURE', '')
if not processor:
processor = os.environ.get('PROCESSOR_IDENTIFIER', machine)
# Try the 'ver' system command available on some
# platforms
if use_syscmd_ver:
system,release,version = _syscmd_ver(system)
# Normalize system to what win32_ver() normally returns
# (_syscmd_ver() tends to return the vendor name as well)
if system == 'Microsoft Windows':
system = 'Windows'
elif system == 'Microsoft' and release == 'Windows':
# Under Windows Vista and Windows Server 2008,
# Microsoft changed the output of the ver command. The
# release is no longer printed. This causes the
# system and release to be misidentified.
system = 'Windows'
if '6.0' == version[:3]:
release = 'Vista'
else:
release = ''
# In case we still don't know anything useful, we'll try to
# help ourselves
if system in ('win32','win16'):
if not version:
if system == 'win32':
version = '32bit'
else:
version = '16bit'
system = 'Windows'
elif system[:4] == 'java':
release,vendor,vminfo,osinfo = java_ver()
system = 'Java'
version = string.join(vminfo,', ')
if not version:
version = vendor
elif os.name == 'mac':
release,(version,stage,nonrel),machine = mac_ver()
system = 'MacOS'
# System specific extensions
if system == 'OpenVMS':
# OpenVMS seems to have release and version mixed up
if not release or release == '0':
release = version
version = ''
# Get processor information
try:
import vms_lib
except ImportError:
pass
else:
csid, cpu_number = vms_lib.getsyi('SYI$_CPU',0)
if (cpu_number >= 128):
processor = 'Alpha'
else:
processor = 'VAX'
if not processor:
# Get processor information from the uname system command
processor = _syscmd_uname('-p','')
#If any unknowns still exist, replace them with ''s, which are more portable
if system == 'unknown':
system = ''
if node == 'unknown':
node = ''
if release == 'unknown':
release = ''
if version == 'unknown':
version = ''
if machine == 'unknown':
machine = ''
if processor == 'unknown':
processor = ''
# normalize name
if system == 'Microsoft' and release == 'Windows':
system = 'Windows'
release = 'Vista'
_uname_cache = system,node,release,version,machine,processor
return _uname_cache
### Direct interfaces to some of the uname() return values
def system():
""" Returns the system/OS name, e.g. 'Linux', 'Windows' or 'Java'.
An empty string is returned if the value cannot be determined.
"""
return uname()[0]
def node():
""" Returns the computer's network name (which may not be fully
qualified)
An empty string is returned if the value cannot be determined.
"""
return uname()[1]
def release():
""" Returns the system's release, e.g. '2.2.0' or 'NT'
An empty string is returned if the value cannot be determined.
"""
return uname()[2]
def version():
""" Returns the system's release version, e.g. '#3 on degas'
An empty string is returned if the value cannot be determined.
"""
return uname()[3]
def machine():
""" Returns the machine type, e.g. 'i386'
An empty string is returned if the value cannot be determined.
"""
return uname()[4]
def processor():
""" Returns the (true) processor name, e.g. 'amdk6'
An empty string is returned if the value cannot be
determined. Note that many platforms do not provide this
information or simply return the same value as for machine(),
e.g. NetBSD does this.
"""
return uname()[5]
### Various APIs for extracting information from sys.version
_sys_version_parser = re.compile(
r'([\w.+]+)\s*'
'\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*'
'\[([^\]]+)\]?')
_jython_sys_version_parser = re.compile(
r'([\d\.]+)')
_ironpython_sys_version_parser = re.compile(
r'IronPython\s*'
'([\d\.]+)'
'(?: \(([\d\.]+)\))?'
' on (.NET [\d\.]+)')
_sys_version_cache = {}
def _sys_version(sys_version=None):
""" Returns a parsed version of Python's sys.version as tuple
(name, version, branch, revision, buildno, builddate, compiler)
referring to the Python implementation name, version, branch,
revision, build number, build date/time as string and the compiler
identification string.
Note that unlike the Python sys.version, the returned value
for the Python version will always include the patchlevel (it
defaults to '.0').
The function returns empty strings for tuple entries that
cannot be determined.
sys_version may be given to parse an alternative version
string, e.g. if the version was read from a different Python
interpreter.
"""
# Get the Python version
if sys_version is None:
sys_version = sys.version
# Try the cache first
result = _sys_version_cache.get(sys_version, None)
if result is not None:
return result
# Parse it
if sys_version[:10] == 'IronPython':
# IronPython
name = 'IronPython'
match = _ironpython_sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse IronPython sys.version: %s' %
repr(sys_version))
version, alt_version, compiler = match.groups()
branch = ''
revision = ''
buildno = ''
builddate = ''
elif sys.platform[:4] == 'java':
# Jython
name = 'Jython'
match = _jython_sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse Jython sys.version: %s' %
repr(sys_version))
version, = match.groups()
branch = ''
revision = ''
compiler = sys.platform
buildno = ''
builddate = ''
else:
# CPython
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse CPython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, compiler = \
match.groups()
if hasattr(sys, 'subversion'):
# sys.subversion was added in Python 2.5
name, branch, revision = sys.subversion
else:
name = 'CPython'
branch = ''
revision = ''
builddate = builddate + ' ' + buildtime
# Add the patchlevel version if missing
l = string.split(version, '.')
if len(l) == 2:
l.append('0')
version = string.join(l, '.')
# Build and cache the result
result = (name, version, branch, revision, buildno, builddate, compiler)
_sys_version_cache[sys_version] = result
return result
def _test_sys_version():
_sys_version_cache.clear()
for input, output in (
('2.4.3 (#1, Jun 21 2006, 13:54:21) \n[GCC 3.3.4 (pre 3.3.5 20040809)]',
('CPython', '2.4.3', '', '', '1', 'Jun 21 2006 13:54:21', 'GCC 3.3.4 (pre 3.3.5 20040809)')),
('IronPython 1.0.60816 on .NET 2.0.50727.42',
('IronPython', '1.0.60816', '', '', '', '', '.NET 2.0.50727.42')),
('IronPython 1.0 (1.0.61005.1977) on .NET 2.0.50727.42',
('IronPython', '1.0.0', '', '', '', '', '.NET 2.0.50727.42')),
):
parsed = _sys_version(input)
if parsed != output:
print (input, parsed)
def python_implementation():
""" Returns a string identifying the Python implementation.
Currently, the following implementations are identified:
'CPython' (C implementation of Python),
'IronPython' (.NET implementation of Python),
'Jython' (Java implementation of Python).
"""
return _sys_version()[0]
def python_version():
""" Returns the Python version as string 'major.minor.patchlevel'
Note that unlike the Python sys.version, the returned value
will always include the patchlevel (it defaults to 0).
"""
return _sys_version()[1]
def python_version_tuple():
""" Returns the Python version as tuple (major, minor, patchlevel)
of strings.
Note that unlike the Python sys.version, the returned value
will always include the patchlevel (it defaults to 0).
"""
return tuple(string.split(_sys_version()[1], '.'))
def python_branch():
""" Returns a string identifying the Python implementation
branch.
For CPython this is the Subversion branch from which the
Python binary was built.
If not available, an empty string is returned.
"""
return _sys_version()[2]
def python_revision():
""" Returns a string identifying the Python implementation
revision.
For CPython this is the Subversion revision from which the
Python binary was built.
If not available, an empty string is returned.
"""
return _sys_version()[3]
def python_build():
""" Returns a tuple (buildno, builddate) stating the Python
build number and date as strings.
"""
return _sys_version()[4:6]
def python_compiler():
""" Returns a string identifying the compiler used for compiling
Python.
"""
return _sys_version()[6]
### The Opus Magnum of platform strings :-)
_platform_cache = {}
def platform(aliased=0, terse=0):
""" Returns a single string identifying the underlying platform
with as much useful information as possible (but no more :).
The output is intended to be human readable rather than
machine parseable. It may look different on different
platforms and this is intended.
If "aliased" is true, the function will use aliases for
various platforms that report system names which differ from
their common names, e.g. SunOS will be reported as
Solaris. The system_alias() function is used to implement
this.
Setting terse to true causes the function to return only the
absolute minimum information needed to identify the platform.
"""
result = _platform_cache.get((aliased, terse), None)
if result is not None:
return result
# Get uname information and then apply platform specific cosmetics
# to it...
system,node,release,version,machine,processor = uname()
if machine == processor:
processor = ''
if aliased:
system,release,version = system_alias(system,release,version)
if system == 'Windows':
# MS platforms
rel,vers,csd,ptype = win32_ver(version)
if terse:
platform = _platform(system,release)
else:
platform = _platform(system,release,version,csd)
elif system in ('Linux',):
# Linux based systems
distname,distversion,distid = dist('')
if distname and not terse:
platform = _platform(system,release,machine,processor,
'with',
distname,distversion,distid)
else:
# If the distribution name is unknown check for libc vs. glibc
libcname,libcversion = libc_ver(sys.executable)
platform = _platform(system,release,machine,processor,
'with',
libcname+libcversion)
elif system == 'Java':
# Java platforms
r,v,vminfo,(os_name,os_version,os_arch) = java_ver()
if terse or not os_name:
platform = _platform(system,release,version)
else:
platform = _platform(system,release,version,
'on',
os_name,os_version,os_arch)
elif system == 'MacOS':
# MacOS platforms
if terse:
platform = _platform(system,release)
else:
platform = _platform(system,release,machine)
else:
# Generic handler
if terse:
platform = _platform(system,release)
else:
bits,linkage = architecture(sys.executable)
platform = _platform(system,release,machine,processor,bits,linkage)
_platform_cache[(aliased, terse)] = platform
return platform
### Command line interface
if __name__ == '__main__':
# Default is to print the aliased verbose platform string
terse = ('terse' in sys.argv or '--terse' in sys.argv)
aliased = (not 'nonaliased' in sys.argv and not '--nonaliased' in sys.argv)
print platform(aliased,terse)
sys.exit(0)
| 33.805999 | 103 | 0.572847 |
4a1e9356930dd40b0f6c96abbcb24f58af39f393
| 1,990 |
py
|
Python
|
py2neo/meta.py
|
theY4Kman/py2neo
|
431cf8e10d7f7fc684864608ade854ce1feece65
|
[
"Apache-2.0"
] | null | null | null |
py2neo/meta.py
|
theY4Kman/py2neo
|
431cf8e10d7f7fc684864608ade854ce1feece65
|
[
"Apache-2.0"
] | null | null | null |
py2neo/meta.py
|
theY4Kman/py2neo
|
431cf8e10d7f7fc684864608ade854ce1feece65
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2020, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ["__author__", "__copyright__", "__email__", "__license__", "__package__", "__version__",
"NEO4J_URI", "NEO4J_AUTH", "NEO4J_USER_AGENT", "NEO4J_SECURE", "NEO4J_VERIFIED",
"bolt_user_agent", "http_user_agent"]
__author__ = "Nigel Small <technige@nige.tech>"
__copyright__ = "2011-2020, Nigel Small"
__email__ = "py2neo@nige.tech"
__license__ = "Apache License, Version 2.0"
__package__ = "py2neo"
__version__ = "4.4.0"
from os import getenv
NEO4J_URI = getenv("NEO4J_URI")
NEO4J_AUTH = getenv("NEO4J_AUTH")
NEO4J_USER_AGENT = getenv("NEO4J_USER_AGENT")
NEO4J_SECURE = True if getenv("NEO4J_SECURE") == "1" else False if getenv("NEO4J_SECURE") == "0" else None
NEO4J_VERIFIED = True if getenv("NEO4J_VERIFIED") == "1" else False if getenv("NEO4J_VERIFIED") == "0" else None
def bolt_user_agent():
from sys import platform, version_info
from neobolt.meta import version as neobolt_version
fields = (__package__, __version__, neobolt_version,) + tuple(version_info) + (platform,)
return "{}/{} neobolt/{} Python/{}.{}.{}-{}-{} ({})".format(*fields)
def http_user_agent():
from sys import platform, version_info
import urllib3
fields = (__package__, __version__, urllib3.__version__,) + tuple(version_info) + (platform,)
return "{}/{} urllib3/{} Python/{}.{}.{}-{}-{} ({})".format(*fields)
| 37.54717 | 112 | 0.708543 |
4a1e9393ba113fbbb55bdd2ca26545ca90ba9d03
| 323 |
py
|
Python
|
Ch04-Divide-and-Conquer/02_quicksort.py
|
JackieXie168/grokking_algorithms
|
77f1719c87ef93249fc240a4668c2e5989ed49b7
|
[
"MIT"
] | 1 |
2021-07-05T12:15:46.000Z
|
2021-07-05T12:15:46.000Z
|
Ch04-Divide-and-Conquer/02_quicksort.py
|
JackieXie168/grokking_algorithms
|
77f1719c87ef93249fc240a4668c2e5989ed49b7
|
[
"MIT"
] | 1 |
2021-06-27T05:00:27.000Z
|
2021-06-27T05:00:27.000Z
|
Ch04-Divide-and-Conquer/02_quicksort.py
|
yihjie/Grokking_Algorithms_Study
|
77f1719c87ef93249fc240a4668c2e5989ed49b7
|
[
"MIT"
] | null | null | null |
def quicksort(array):
if len(array) < 2:
return array
else:
pivot = array[0]
less = [i for i in array[1:] if i <= pivot]
greater = [i for i in array[1:] if i > pivot]
return quicksort(less) + [pivot] + quicksort(greater)
print([10, 5, 2, 3])
print(quicksort([10, 5, 2, 3]))
| 26.916667 | 61 | 0.544892 |
4a1e94610a79a3fed798e1d573d1a818029eae37
| 431 |
py
|
Python
|
src/logging_utils.py
|
olehb/CyberReaper
|
3dc5ed2928a6083f303aeb5f7ec36972d7642b68
|
[
"MIT"
] | null | null | null |
src/logging_utils.py
|
olehb/CyberReaper
|
3dc5ed2928a6083f303aeb5f7ec36972d7642b68
|
[
"MIT"
] | null | null | null |
src/logging_utils.py
|
olehb/CyberReaper
|
3dc5ed2928a6083f303aeb5f7ec36972d7642b68
|
[
"MIT"
] | null | null | null |
import logging
import sys
from logging import Logger
def configure_logger(name: str, level: int = logging.INFO) -> Logger:
logger = logging.getLogger(name)
logger_handler = logging.StreamHandler(sys.stdout)
logger_handler.setFormatter(
logging.Formatter("[%(asctime)s - %(levelname)s] %(message)s", datefmt="%H:%M:%S")
)
logger.addHandler(logger_handler)
logger.setLevel(level)
return logger
| 26.9375 | 90 | 0.705336 |
4a1e952c9377bd318b19b7354ee228da3ecd4928
| 2,125 |
py
|
Python
|
indico/modules/categories/fields.py
|
bpedersen2/indico
|
8410ee5f8f8530a8692f3dd2d4015c3074b0aa30
|
[
"MIT"
] | null | null | null |
indico/modules/categories/fields.py
|
bpedersen2/indico
|
8410ee5f8f8530a8692f3dd2d4015c3074b0aa30
|
[
"MIT"
] | null | null | null |
indico/modules/categories/fields.py
|
bpedersen2/indico
|
8410ee5f8f8530a8692f3dd2d4015c3074b0aa30
|
[
"MIT"
] | null | null | null |
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import json
from wtforms.fields.simple import HiddenField
from indico.modules.categories.models.event_move_request import EventMoveRequest, MoveRequestState
from indico.util.marshmallow import ModelList
from indico.web.forms.widgets import JinjaWidget
class CategoryField(HiddenField):
"""WTForms field that lets you select a category.
:param require_event_creation_rights: Whether to allow selecting
only categories where the
user can create events.
"""
widget = JinjaWidget('forms/category_picker_widget.html')
def __init__(self, *args, **kwargs):
self.navigator_category_id = 0
self.require_event_creation_rights = kwargs.pop('require_event_creation_rights', False)
super().__init__(*args, **kwargs)
def process_data(self, value):
if not value:
self.data = None
return
self.data = value
self.navigator_category_id = value.id
def process_formdata(self, valuelist):
from indico.modules.categories import Category
if valuelist:
try:
category_id = int(json.loads(valuelist[0])['id'])
except KeyError:
self.data = None
else:
self.data = Category.get(category_id, is_deleted=False)
def _value(self):
return {'id': self.data.id, 'title': self.data.title} if self.data else {}
def _get_data(self):
return self.data
class EventRequestList(ModelList):
def __init__(self, category, **kwargs):
def _get_query(m):
return m.query.filter(
EventMoveRequest.category == category,
EventMoveRequest.state == MoveRequestState.pending
)
super().__init__(model=EventMoveRequest, get_query=_get_query, collection_class=set, **kwargs)
| 33.203125 | 102 | 0.645647 |
4a1e953feff0ff3cd007645303d6443c16231d88
| 7,681 |
py
|
Python
|
tests/test_multicall_for_all_chains.py
|
007vasy/multicall.py
|
b0cdf8b31c2000e8363926a86cffeedb89263767
|
[
"MIT"
] | null | null | null |
tests/test_multicall_for_all_chains.py
|
007vasy/multicall.py
|
b0cdf8b31c2000e8363926a86cffeedb89263767
|
[
"MIT"
] | null | null | null |
tests/test_multicall_for_all_chains.py
|
007vasy/multicall.py
|
b0cdf8b31c2000e8363926a86cffeedb89263767
|
[
"MIT"
] | null | null | null |
import sys
import unittest
from decimal import Decimal
from web3 import Web3
from web3.middleware import geth_poa_middleware
import pytest
import json
from pathlib import Path
from eth_utils import to_hex, is_hexstr, remove_0x_prefix
from multicall.constants import (
w3,
CHAIN_ARBITRUM,
CHAIN_AVAX,
CHAIN_BSC,
CHAIN_ETHEREUM,
CHAIN_FANTOM,
CHAIN_HARMONY,
CHAIN_HECO,
CHAIN_POLYGON,
CHAIN_MOONRIVER,
CHAIN_OPTIMISM,
CHAIN_XDAI,
CHAIN_MOONBEAM,
PUBLIC_RPC_ENDPOINT_MAP,
)
from multicall.multicall import get_multicall_map
from multicall import Call, Multicall
def from_decimals(value):
return Decimal(value)
def from_v4(value):
return Decimal(value) / 10**18
class AbstractBase:
class BaseMultiCall(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.rpc_endpoint_map = PUBLIC_RPC_ENDPOINT_MAP
# WILL be updated
cls.CHAIN = CHAIN_ETHEREUM
cls.CONTRACT = "0x00260Db07a22a6a5182213d8de1AbA0705A6Cd78"
cls.RQST_PARAM = "0x1589d072aC911a55c2010D97839a1f61b1e3323A"
cls.contract_interface = "owedPayment(address)(uint256)"
def get_w3(self):
w3_url = self.rpc_endpoint_map.get(self.CHAIN,None)
if w3_url is None:
_w3 = w3
else:
_w3 = Web3(Web3.HTTPProvider(w3_url))
return _w3
def get_no_params_call(self):
_w3 = self.get_w3()
return Call(self.CONTRACT, ['decimals()(uint8)'], [[self.CONTRACT,from_decimals]], _w3 = _w3, block_id="latest")
def get_with_params_call(self):
_w3 = self.get_w3()
return Call(self.CONTRACT, [self.contract_interface,self.RQST_PARAM], [[self.RQST_PARAM, from_v4]], _w3 = _w3, block_id="latest")
def test_multicall_no_params(self):
if self.CONTRACT is not None:
_w3 = self.get_w3()
multi = Multicall(
[
self.get_no_params_call()
]
, _w3 = _w3, block_id="latest")
decimals = multi()
assert len(decimals) == 1
assert decimals[self.CONTRACT] >= 8 and decimals[self.CONTRACT] <= 18
assert multi.get_last_calls_block_id() is not None
assert isinstance(multi.get_last_calls_block_id(),int)
else:
print(">> CONTRACT is not set")
def test_multicall_with_params(self):
if self.CONTRACT is not None:
_w3 = self.get_w3()
multi = Multicall(
[
self.get_with_params_call()
]
, _w3 = _w3, block_id="latest")
resp = multi()
assert resp[self.RQST_PARAM] is not None
assert resp[self.RQST_PARAM] >= 0
assert multi.get_last_calls_block_id() is not None
assert isinstance(multi.get_last_calls_block_id(),int)
else:
print(">> CONTRACT is not set")
class Test_ARBITRUM_MultiCall(AbstractBase.BaseMultiCall):
@classmethod
def setUpClass(cls):
super(Test_ARBITRUM_MultiCall, cls).setUpClass()
cls.CHAIN = CHAIN_ARBITRUM
cls.CONTRACT = "0x09410414Ca067b8763ce62DBEcA8160be9cfD548"
cls.RQST_PARAM = "0x01f4e56D5ee46e84Edf8595ca7A7B62a3306De76"
cls.contract_interface = 'owedPayment(address)(uint256)'
class Test_AVAX_MultiCall(AbstractBase.BaseMultiCall):
@classmethod
def setUpClass(cls):
super(Test_AVAX_MultiCall, cls).setUpClass()
cls.CHAIN = CHAIN_AVAX
cls.CONTRACT = "0x0aCcDFd55026873CB12F75f66513b42fB4974245"
cls.RQST_PARAM = "0x0499BEA33347cb62D79A9C0b1EDA01d8d329894c"
cls.contract_interface = 'owedPayment(address)(uint256)'
class Test_BSC_MultiCall(AbstractBase.BaseMultiCall):
@classmethod
def setUpClass(cls):
super(Test_BSC_MultiCall, cls).setUpClass()
cls.CHAIN = CHAIN_BSC
cls.CONTRACT = "0x000f46946d47647c04A5f10269e9084FB8c8637A"
cls.contract_interface = 'owedPayment(address)(uint256)'
class Test_ETHEREUM_MultiCall(AbstractBase.BaseMultiCall):
@classmethod
def setUpClass(cls):
super(Test_ETHEREUM_MultiCall, cls).setUpClass()
cls.CHAIN = CHAIN_ETHEREUM
cls.CONTRACT = "0x00260Db07a22a6a5182213d8de1AbA0705A6Cd78"
cls.RQST_PARAM = "0x1589d072aC911a55c2010D97839a1f61b1e3323A"
cls.contract_interface = 'owedPayment(address)(uint256)'
class Test_FANTOM_MultiCall(AbstractBase.BaseMultiCall):
@classmethod
def setUpClass(cls):
super(Test_FANTOM_MultiCall, cls).setUpClass()
cls.CHAIN = CHAIN_FANTOM
cls.CONTRACT = "0x0Aaf3EAcc3088691be6921fd33Bad8075590aE85"
cls.RQST_PARAM = "0x05Ee5882122A86C8D15D8D5ECB42830503A7d0d8"
cls.contract_interface = 'owedPayment(address)(uint256)'
class Test_HECO_MultiCall(AbstractBase.BaseMultiCall):
@classmethod
def setUpClass(cls):
super(Test_HECO_MultiCall, cls).setUpClass()
cls.CHAIN = CHAIN_HECO
cls.CONTRACT = "0x0AF7cEb1D2f3F5ceC626aEe32fF89EB15D40C586"
cls.RQST_PARAM = "0x071FE390b362b866257c739C402f1e33FACC6181"
class Test_MOONBEAM_MultiCall(AbstractBase.BaseMultiCall):
@classmethod
def setUpClass(cls):
super(Test_MOONBEAM_MultiCall, cls).setUpClass()
cls.CHAIN = CHAIN_MOONBEAM
cls.CONTRACT = ""
cls.RQST_PARAM = ""
cls.contract_interface = 'owedPayment(address)(uint256)'
class Test_MOONRIVER_MultiCall(AbstractBase.BaseMultiCall):
@classmethod
def setUpClass(cls):
super(Test_MOONRIVER_MultiCall, cls).setUpClass()
cls.CHAIN = CHAIN_MOONRIVER
cls.CONTRACT = "0x0192a7CA918CC005253008BE85dce18b164de437"
cls.RQST_PARAM = "0x03D4d742351dD27E2FCA3736F6285A75df48D15d"
cls.contract_interface = 'owedPayment(address)(uint256)'
class Test_OPTIMISM_MultiCall(AbstractBase.BaseMultiCall):
@classmethod
def setUpClass(cls):
super(Test_OPTIMISM_MultiCall, cls).setUpClass()
cls.CHAIN = CHAIN_OPTIMISM
cls.CONTRACT = "0x02f5E9e9dcc66ba6392f6904D5Fcf8625d9B19C9"
cls.RQST_PARAM = "0x2878c587eba4C4251f97784cE124f7387305Ab32"
cls.contract_interface = 'owedPayment(address)(uint256)'
class Test_POLYGON_MultiCall(AbstractBase.BaseMultiCall):
@classmethod
def setUpClass(cls):
super(Test_POLYGON_MultiCall, cls).setUpClass()
cls.CHAIN = CHAIN_POLYGON
cls.CONTRACT = "0x00a27E2f64dE7B05E9ddF7aD6bA916d78458c8c7"
cls.RQST_PARAM = "0x21148F81D302442c34D39cB65B82f5e7138F9bE6"
cls.contract_interface = 'owedPayment(address)(uint256)'
class Test_XDAI_MultiCall(AbstractBase.BaseMultiCall):
@classmethod
def setUpClass(cls):
super(Test_XDAI_MultiCall, cls).setUpClass()
cls.CHAIN = CHAIN_XDAI
cls.CONTRACT = "0x016a45F646bbd35B61fE7A496a75D9Ea69bD243E"
cls.RQST_PARAM = "0x11eB6a69A56DF3a89b99c4b1484691Af4AB0c508"
cls.contract_interface = 'owedPayment(address)(uint256)'
class Test_HARMONY_MultiCall(AbstractBase.BaseMultiCall):
@classmethod
def setUpClass(cls):
super(Test_HARMONY_MultiCall, cls).setUpClass()
cls.CHAIN = CHAIN_HARMONY
cls.CONTRACT = "0x21d100a1792ea39b1b172371d4076f7e3a63d159"
cls.RQST_PARAM = "0xbf8f337b1863dee5e251358834dbecdeafb64ce2"
cls.contract_interface = 'owedPayment(address)(uint256)'
| 37.10628 | 141 | 0.675173 |
4a1e95fc6dc5ae52d98fad3c2f2c19bf09abb062
| 1,913 |
py
|
Python
|
contrib/devtools/check-doc.py
|
ventual-core/vntl
|
9408f2d7ea53e7191d0d72a13ad80c6472f4f1cf
|
[
"MIT"
] | null | null | null |
contrib/devtools/check-doc.py
|
ventual-core/vntl
|
9408f2d7ea53e7191d0d72a13ad80c6472f4f1cf
|
[
"MIT"
] | null | null | null |
contrib/devtools/check-doc.py
|
ventual-core/vntl
|
9408f2d7ea53e7191d0d72a13ad80c6472f4f1cf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
This checks if all command line args are documented.
Return value is 0 to indicate no error.
Author: @MarcoFalke
'''
from subprocess import check_output
import re
FOLDER_GREP = 'src'
FOLDER_TEST = 'src/test/'
CMD_ROOT_DIR = '`git rev-parse --show-toplevel`/%s' % FOLDER_GREP
CMD_GREP_ARGS = r"egrep -r -I '(map(Multi)?Args(\.count\(|\[)|Get(Bool)?Arg\()\"\-[^\"]+?\"' %s | grep -v '%s'" % (CMD_ROOT_DIR, FOLDER_TEST)
CMD_GREP_DOCS = r"egrep -r -I 'HelpMessageOpt\(\"\-[^\"=]+?(=|\")' %s" % (CMD_ROOT_DIR)
REGEX_ARG = re.compile(r'(?:map(?:Multi)?Args(?:\.count\(|\[)|Get(?:Bool)?Arg\()\"(\-[^\"]+?)\"')
REGEX_DOC = re.compile(r'HelpMessageOpt\(\"(\-[^\"=]+?)(?:=|\")')
# list unsupported, deprecated and duplicate args as they need no documentation
SET_DOC_OPTIONAL = set(['-rpcssl', '-benchmark', '-h', '-help', '-socks', '-tor', '-debugnet', '-whitelistalwaysrelay', '-prematurewitness', '-walletprematurewitness', '-promiscuousmempoolflags', '-blockminsize', '-sendfreetransactions', '-checklevel', '-liquidityprovider', '-anonymizeventualamount'])
def main():
used = check_output(CMD_GREP_ARGS, shell=True)
docd = check_output(CMD_GREP_DOCS, shell=True)
args_used = set(re.findall(REGEX_ARG,used))
args_docd = set(re.findall(REGEX_DOC,docd)).union(SET_DOC_OPTIONAL)
args_need_doc = args_used.difference(args_docd)
args_unknown = args_docd.difference(args_used)
print "Args used : %s" % len(args_used)
print "Args documented : %s" % len(args_docd)
print "Args undocumented: %s" % len(args_need_doc)
print args_need_doc
print "Args unknown : %s" % len(args_unknown)
print args_unknown
exit(len(args_need_doc))
if __name__ == "__main__":
main()
| 42.511111 | 302 | 0.690538 |
4a1e9737e74e7445dd8352b1e868e4d87680c9ec
| 655 |
py
|
Python
|
kattis/k_everywhere.py
|
ivanlyon/exercises
|
0792976ae2acb85187b26a52812f9ebdd119b5e8
|
[
"MIT"
] | null | null | null |
kattis/k_everywhere.py
|
ivanlyon/exercises
|
0792976ae2acb85187b26a52812f9ebdd119b5e8
|
[
"MIT"
] | null | null | null |
kattis/k_everywhere.py
|
ivanlyon/exercises
|
0792976ae2acb85187b26a52812f9ebdd119b5e8
|
[
"MIT"
] | null | null | null |
'''
Compute # of unique labels.
Status: Accepted
'''
###############################################################################
def uniques(inputs):
'''Compute # of unique labels in a list'''
return len(set(inputs))
###############################################################################
if __name__ == '__main__':
testcases = int(input())
for t in range(testcases):
worktrips = int(input())
destinations = []
for w in range(worktrips):
destinations.append(input())
print(uniques(destinations))
###############################################################################
| 25.192308 | 79 | 0.378626 |
4a1e9921b22a91d2399746c929697d656d73f305
| 3,020 |
py
|
Python
|
examples/demoBebopVisionGUI.py
|
leocencetti/pyparrot
|
c1a3df4ce7a62a646ab85e93b89966256bfa6cfa
|
[
"MIT"
] | 1 |
2020-04-24T15:22:13.000Z
|
2020-04-24T15:22:13.000Z
|
examples/demoBebopVisionGUI.py
|
leocencetti/pyparrot
|
c1a3df4ce7a62a646ab85e93b89966256bfa6cfa
|
[
"MIT"
] | null | null | null |
examples/demoBebopVisionGUI.py
|
leocencetti/pyparrot
|
c1a3df4ce7a62a646ab85e93b89966256bfa6cfa
|
[
"MIT"
] | null | null | null |
"""
Demo of the Bebop vision using DroneVisionGUI (relies on libVLC). It is a different
multi-threaded approach than DroneVision
Author: Amy McGovern
"""
from pyparrot.Bebop import Bebop
from pyparrot.DroneVisionGUI import DroneVisionGUI
import threading
import cv2
import time
from PyQt5.QtGui import QImage
isAlive = False
class UserVision:
def __init__(self, vision):
self.index = 0
self.vision = vision
def save_pictures(self, args):
#print("saving picture")
img = self.vision.get_latest_valid_picture()
# limiting the pictures to the first 10 just to limit the demo from writing out a ton of files
if (img is not None and self.index <= 10):
filename = "test_image_%06d.png" % self.index
cv2.imwrite(filename, img)
self.index +=1
def draw_current_photo():
"""
Quick demo of returning an image to show in the user window. Clearly one would want to make this a dynamic image
"""
image = cv2.imread('test_image_000001.png')
if (image is not None):
if len(image.shape) < 3 or image.shape[2] == 1:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
else:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
height, width, byteValue = image.shape
byteValue = byteValue * width
qimage = QImage(image, width, height, byteValue, QImage.Format_RGB888)
return qimage
else:
return None
def demo_user_code_after_vision_opened(bebopVision, args):
bebop = args[0]
print("Vision successfully started!")
#removed the user call to this function (it now happens in open_video())
#bebopVision.start_video_buffering()
# takeoff
# bebop.safe_takeoff(5)
# skipping actually flying for safety purposes indoors - if you want
# different pictures, move the bebop around by hand
print("Fly me around by hand!")
bebop.smart_sleep(15)
if (bebopVision.vision_running):
print("Moving the camera using velocity")
bebop.pan_tilt_camera_velocity(pan_velocity=0, tilt_velocity=-2, duration=4)
bebop.smart_sleep(5)
# land
bebop.safe_land(5)
print("Finishing demo and stopping vision")
bebopVision.close_video()
# disconnect nicely so we don't need a reboot
print("disconnecting")
bebop.disconnect()
if __name__ == "__main__":
# make my bebop object
bebop = Bebop()
# connect to the bebop
success = bebop.connect(5)
if (success):
# start up the video
bebopVision = DroneVisionGUI(bebop, is_bebop=True, user_code_to_run=demo_user_code_after_vision_opened,
user_args=(bebop, ), user_draw_window_fn=draw_current_photo)
userVision = UserVision(bebopVision)
bebopVision.set_user_callback_function(userVision.save_pictures, user_callback_args=None)
bebopVision.open_video()
else:
print("Error connecting to bebop. Retry")
| 29.607843 | 117 | 0.67053 |
4a1e994c03fcda72c389f0256f6922975ac2a6bf
| 3,959 |
py
|
Python
|
hubspot/crm/quotes/models/next_page.py
|
cclauss/hubspot-api-python
|
7c60c0f572b98c73e1f1816bf5981396a42735f6
|
[
"Apache-2.0"
] | null | null | null |
hubspot/crm/quotes/models/next_page.py
|
cclauss/hubspot-api-python
|
7c60c0f572b98c73e1f1816bf5981396a42735f6
|
[
"Apache-2.0"
] | null | null | null |
hubspot/crm/quotes/models/next_page.py
|
cclauss/hubspot-api-python
|
7c60c0f572b98c73e1f1816bf5981396a42735f6
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Quotes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.crm.quotes.configuration import Configuration
class NextPage(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'after': 'str',
'link': 'str'
}
attribute_map = {
'after': 'after',
'link': 'link'
}
def __init__(self, after=None, link=None, local_vars_configuration=None): # noqa: E501
"""NextPage - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._after = None
self._link = None
self.discriminator = None
self.after = after
if link is not None:
self.link = link
@property
def after(self):
"""Gets the after of this NextPage. # noqa: E501
:return: The after of this NextPage. # noqa: E501
:rtype: str
"""
return self._after
@after.setter
def after(self, after):
"""Sets the after of this NextPage.
:param after: The after of this NextPage. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and after is None: # noqa: E501
raise ValueError("Invalid value for `after`, must not be `None`") # noqa: E501
self._after = after
@property
def link(self):
"""Gets the link of this NextPage. # noqa: E501
:return: The link of this NextPage. # noqa: E501
:rtype: str
"""
return self._link
@link.setter
def link(self, link):
"""Sets the link of this NextPage.
:param link: The link of this NextPage. # noqa: E501
:type: str
"""
self._link = link
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NextPage):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, NextPage):
return True
return self.to_dict() != other.to_dict()
| 26.75 | 124 | 0.562768 |
4a1e9a4a070e7aff2a6f729ea6f1087ebee0cb7c
| 2,652 |
py
|
Python
|
DatasetStats.py
|
JosephDevaney/FYP
|
46c597074b5b17da5ba5f25a7ffc2a8e2bfdbd40
|
[
"MIT"
] | null | null | null |
DatasetStats.py
|
JosephDevaney/FYP
|
46c597074b5b17da5ba5f25a7ffc2a8e2bfdbd40
|
[
"MIT"
] | null | null | null |
DatasetStats.py
|
JosephDevaney/FYP
|
46c597074b5b17da5ba5f25a7ffc2a8e2bfdbd40
|
[
"MIT"
] | null | null | null |
import os
import scipy.io.wavfile as wav
import pickle as pkl
from VideoFeatures import VideoFeatures
# Builds a report of a features file detailing the number of instances per category and the average length
def analyse_features():
path = input("Enter the filepath here: \n")
videos = {}
try:
with open(path + "features30sec.ftr", "rb") as inp:
unpickle = pkl.Unpickler(inp)
while True:
try:
vid = unpickle.load()
cat = vid.get_category_from_name()
vid_len = vid.get_length_from_name()
if cat in videos:
videos[cat][0] += 1
videos[cat][1] += int(vid_len)
else:
videos[cat] = [1, int(vid_len)]
except EOFError:
print("EOF")
break
except TypeError:
print("Unable to load object")
except pkl.UnpicklingError:
print("Unable to load object2")
except Exception as e:
print(e)
except Exception as e:
print(type(e))
print(e.args)
return videos
# Checks a dataset text file and counts the number of instances per class.
def analyse_videos_file():
vid_stats = {}
while True:
datafile = input("Please enter the location of the datafile: \n")
# videodata = [[val for val in line.split('\t')] for line in open(datafile) if line]
with open(datafile) as f:
for line in f:
cols = line.split('\t')
if len(cols) >= 4:
vid_link = cols[0]
vid_cat = cols[3]
vid_len = int(cols[4])
if vid_cat in vid_stats:
vid_stats[vid_cat][0] += 1
vid_stats[vid_cat][1] += vid_len
else:
vid_stats[vid_cat] = [1, vid_len]
more_files = input("Press Y(y) to enter another file to be analysed: \n")
if more_files != 'Y' and more_files != 'y':
break
return vid_stats
def print_stats(stats):
for cat, val in stats.items():
print(cat + "\t|\t" + str(val[0]) + "\t|\t" + str(val[1] / val[0]))
def main():
print_stats(analyse_features())
# print_stats(analyse_videos_file())
if __name__ == "__main__":
main()
# D:\Documents\DT228_4\FYP\Datasets\080327\0_Audio\Autos & Vehicles_7n3jD-kxb1U_310.wav
# D:\Documents\DT228_4\FYP\Datasets\080327\1.txt
| 31.2 | 106 | 0.521493 |
4a1e9b15030e841bd8b399464a6119c37d8336b5
| 1,281 |
py
|
Python
|
tictactoe/tests/tictactoe_test.py
|
akhilennu/cs50ai_tictactoe
|
6e35ea10c50d358f2c8b1dfb22c720a14bd7bbd1
|
[
"MIT"
] | null | null | null |
tictactoe/tests/tictactoe_test.py
|
akhilennu/cs50ai_tictactoe
|
6e35ea10c50d358f2c8b1dfb22c720a14bd7bbd1
|
[
"MIT"
] | null | null | null |
tictactoe/tests/tictactoe_test.py
|
akhilennu/cs50ai_tictactoe
|
6e35ea10c50d358f2c8b1dfb22c720a14bd7bbd1
|
[
"MIT"
] | null | null | null |
from logging import BASIC_FORMAT
import tictactoe as t
import test_helper as helper
import unittest
class TestTicTacToe(unittest.TestCase):
def test_player(self):
for test_case in helper.test_cases:
self.assertEqual(t.player(test_case["board"]),test_case["player"])
def test_terminal(self):
self.assertTrue(t.terminal([['O', 'X', 'O'], ['X', 'X', 'O'], ['X', 'O', 'X']]))
def test_result(self):
board = [['O', 'O', 'X'], ['X', 'O', 'X'], [t.EMPTY, 'X', t.EMPTY]]
tmp = t.result(board,(2,2))
self.assertEqual(tmp,[['O', 'O', 'X'], ['X', 'O', 'X'], [t.EMPTY, 'X', 'O']])
self.assertEqual(board,[['O', 'O', 'X'], ['X', 'O', 'X'], [t.EMPTY, 'X', t.EMPTY]])
def test_minimax(self):
self.assertEqual(t.minimax([['O', 'X', 'O'], ['X', 'X', 'O'], ['X', 'O', t.EMPTY]]),(2,2))
self.assertEqual(t.minimax([['O', 'X', 'O'], ['X', 'X', 'O'], ['X', t.EMPTY, t.EMPTY]]),(2,2))
self.assertEqual(t.minimax([['O', 'O', 'X'], ['X', 'O', 'X'], [t.EMPTY, 'X', t.EMPTY]]),(2,2))
self.assertEqual(t.minimax([
[t.X, t.EMPTY, t.EMPTY],
[t.EMPTY, t.O, t.EMPTY],
[t.EMPTY, t.EMPTY, t.EMPTY],
]),(0,1))
if __name__ == '__main__':
unittest.main()
| 38.818182 | 102 | 0.505855 |
4a1e9c8aee74b89469f219136d63471f333874b0
| 4,015 |
py
|
Python
|
env/Lib/site-packages/plotly/validators/layout/_mapbox.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 11,750 |
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
packages/python/plotly/plotly/validators/layout/_mapbox.py
|
jiangrongbo/plotly.py
|
df19fc702b309586cc24e25373b87e8bdbb3ff60
|
[
"MIT"
] | 2,951 |
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
packages/python/plotly/plotly/validators/layout/_mapbox.py
|
jiangrongbo/plotly.py
|
df19fc702b309586cc24e25373b87e8bdbb3ff60
|
[
"MIT"
] | 2,623 |
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
import _plotly_utils.basevalidators
class MapboxValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="mapbox", parent_name="layout", **kwargs):
super(MapboxValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Mapbox"),
data_docs=kwargs.pop(
"data_docs",
"""
accesstoken
Sets the mapbox access token to be used for
this mapbox map. Alternatively, the mapbox
access token can be set in the configuration
options under `mapboxAccessToken`. Note that
accessToken are only required when `style` (e.g
with values : basic, streets, outdoors, light,
dark, satellite, satellite-streets ) and/or a
layout layer references the Mapbox server.
bearing
Sets the bearing angle of the map in degrees
counter-clockwise from North (mapbox.bearing).
center
:class:`plotly.graph_objects.layout.mapbox.Cent
er` instance or dict with compatible properties
domain
:class:`plotly.graph_objects.layout.mapbox.Doma
in` instance or dict with compatible properties
layers
A tuple of :class:`plotly.graph_objects.layout.
mapbox.Layer` instances or dicts with
compatible properties
layerdefaults
When used in a template (as
layout.template.layout.mapbox.layerdefaults),
sets the default property values to use for
elements of layout.mapbox.layers
pitch
Sets the pitch angle of the map (in degrees,
where 0 means perpendicular to the surface of
the map) (mapbox.pitch).
style
Defines the map layers that are rendered by
default below the trace layers defined in
`data`, which are themselves by default
rendered below the layers defined in
`layout.mapbox.layers`. These layers can be
defined either explicitly as a Mapbox Style
object which can contain multiple layer
definitions that load data from any public or
private Tile Map Service (TMS or XYZ) or Web
Map Service (WMS) or implicitly by using one of
the built-in style objects which use WMSes
which do not require any access tokens, or by
using a default Mapbox style or custom Mapbox
style URL, both of which require a Mapbox
access token Note that Mapbox access token can
be set in the `accesstoken` attribute or in the
`mapboxAccessToken` config option. Mapbox
Style objects are of the form described in the
Mapbox GL JS documentation available at
https://docs.mapbox.com/mapbox-gl-js/style-spec
The built-in plotly.js styles objects are:
carto-darkmatter, carto-positron, open-street-
map, stamen-terrain, stamen-toner, stamen-
watercolor, white-bg The built-in Mapbox
styles are: basic, streets, outdoors, light,
dark, satellite, satellite-streets Mapbox
style URLs are of the form:
mapbox://mapbox.mapbox-<name>-<version>
uirevision
Controls persistence of user-driven changes in
the view: `center`, `zoom`, `bearing`, `pitch`.
Defaults to `layout.uirevision`.
zoom
Sets the zoom level of the map (mapbox.zoom).
""",
),
**kwargs
)
| 48.373494 | 77 | 0.571606 |
4a1e9d4e66882a2fd07abc70f8b1b4b097ac1265
| 7,243 |
py
|
Python
|
code/gene_distance_feature.py
|
Asleda/IR
|
c58870a0219211415c0b2f47af0c8ab5c8ba3001
|
[
"MIT"
] | null | null | null |
code/gene_distance_feature.py
|
Asleda/IR
|
c58870a0219211415c0b2f47af0c8ab5c8ba3001
|
[
"MIT"
] | null | null | null |
code/gene_distance_feature.py
|
Asleda/IR
|
c58870a0219211415c0b2f47af0c8ab5c8ba3001
|
[
"MIT"
] | null | null | null |
from load_model import bing_bg,bing_ug,bing_tg,twitter_ug,twitter_bg,twitter_tg,inter_ug,inter_bg,inter_tg
from update import getBigram,getTrigram
import numpy as np
###########################################################################################
def try_divide(x, y, val=0.0):
if y != 0.0:
val = float(x) / y
return val
def JaccardCoef(A, B):
A, B = set(A), set(B)
intersect = len(A.intersection(B))
union = len(A.union(B))
j_d = try_divide(intersect, union)
return j_d
def DiceDist(A, B):
A, B = set(A), set(B)
intersect = len(A.intersection(B))
union = len(A) + len(B)
d_d = try_divide(2*intersect, union)
return d_d
def compute_dist(A, B, dist="jaccard_coef"):
if dist == "jaccard_coef":
d = JaccardCoef(A, B)
elif dist == "dice_dist":
d = DiceDist(A, B)
return d
def pairwise_jaccard_coef(A, B):
coef = np.zeros((A.shape[0], B.shape[0]), dtype=float)
for i in range(A.shape[0]):
for j in range(B.shape[0]):
coef[i,j] = JaccardCoef(A[i], B[j])
return coef
def pairwise_dice_dist(A, B):
d = np.zeros((A.shape[0], B.shape[0]), dtype=float)
for i in range(A.shape[0]):
for j in range(B.shape[0]):
d[i,j] = DiceDist(A[i], B[j])
return d
def pairwise_dist(A, B, dist="jaccard_coef"):
if dist == "jaccard_coef":
d = pairwise_jaccard_coef(A, B)
elif dist == "dice_dist":
d = pairwise_dice_dist(A, B)
return d
###########################################################################################
def distance_feat(term_u,term_b,topic):
query_u_bing_jd=compute_dist(term_u,bing_ug[topic],dist="jaccard_coef")
query_u_twitter_jd=compute_dist(term_u,twitter_ug[topic],dist="jaccard_coef")
query_u_inter_jd=compute_dist(term_u,inter_ug[topic],dist="jaccard_coef")
query_u_bing_dd=compute_dist(term_u,bing_ug[topic],dist="dice_dist")
query_u_twitter_dd=compute_dist(term_u,twitter_ug[topic],dist="dice_dist")
query_u_inter_dd=compute_dist(term_u,inter_ug[topic],dist="dice_dist")
query_u_bing_pjd=try_divide(sum([ii for i in pairwise_dist(np.array(term_u),np.array(bing_ug[topic]),dist="jaccard_coef") for ii in i]),np.array(term_u).shape[0]*np.array(bing_ug[topic]).shape[0])
query_u_twitter_pjd=try_divide(sum([ii for i in pairwise_dist(np.array(term_u),np.array(twitter_ug[topic]),dist="jaccard_coef") for ii in i]),np.array(term_u).shape[0]*np.array(twitter_ug[topic]).shape[0])
query_u_inter_pjd=try_divide(sum([ii for i in pairwise_dist(np.array(term_u),np.array(inter_ug[topic]),dist="jaccard_coef") for ii in i]),np.array(term_u).shape[0]*np.array(inter_ug[topic]).shape[0])
query_u_bing_pdd=try_divide(sum([ii for i in pairwise_dist(np.array(term_u),np.array(bing_ug[topic]),dist="dice_dist") for ii in i]),np.array(term_u).shape[0]*np.array(bing_ug[topic]).shape[0])
query_u_twitter_pdd=try_divide(sum([ii for i in pairwise_dist(np.array(term_u),np.array(twitter_ug[topic]),dist="dice_dist") for ii in i]),np.array(term_u).shape[0]*np.array(twitter_ug[topic]).shape[0])
query_u_inter_pdd=try_divide(sum([ii for i in pairwise_dist(np.array(term_u),np.array(inter_ug[topic]),dist="dice_dist") for ii in i]),np.array(term_u).shape[0]*np.array(inter_ug[topic]).shape[0])
###################################################################
query_b_bing_jd=compute_dist(term_b,bing_bg[topic],dist="jaccard_coef")
query_b_twitter_jd=compute_dist(term_b,twitter_bg[topic],dist="jaccard_coef")
query_b_inter_jd=compute_dist(term_b,inter_bg[topic],dist="jaccard_coef")
query_b_bing_dd=compute_dist(term_b,bing_bg[topic],dist="dice_dist")
query_b_twitter_dd=compute_dist(term_b,twitter_bg[topic],dist="dice_dist")
query_b_inter_dd=compute_dist(term_b,inter_bg[topic],dist="dice_dist")
# query_b_bing_pjd=try_divide(sum([ii for i in pairwise_dist(np.array(term_b),np.array(bing_bg[topic]),dist="jaccard_coef") for ii in i]),np.array(term_b).shape[0]*np.array(bing_bg[topic]).shape[0])
# query_b_twitter_pjd=try_divide(sum([ii for i in pairwise_dist(np.array(term_b),np.array(twitter_bg[topic]),dist="jaccard_coef") for ii in i]),np.array(term_b).shape[0]*np.array(twitter_bg[topic]).shape[0])
# query_b_inter_pjd=try_divide(sum([ii for i in pairwise_dist(np.array(term_b),np.array(inter_bg[topic]),dist="jaccard_coef") for ii in i]),np.array(term_b).shape[0]*np.array(inter_bg[topic]).shape[0])
# query_b_bing_pdd=try_divide(sum([ii for i in pairwise_dist(np.array(term_b),np.array(bing_bg[topic]),dist="dice_dist") for ii in i]),np.array(term_b).shape[0]*np.array(bing_bg[topic]).shape[0])
# query_b_twitter_pdd=try_divide(sum([ii for i in pairwise_dist(np.array(term_b),np.array(twitter_bg[topic]),dist="dice_dist") for ii in i]),np.array(term_b).shape[0]*np.array(twitter_bg[topic]).shape[0])
# query_b_inter_pdd=try_divide(sum([ii for i in pairwise_dist(np.array(term_b),np.array(inter_bg[topic]),dist="dice_dist") for ii in i]),np.array(term_b).shape[0]*np.array(inter_bg[topic]).shape[0])
###################################################################
# query_t_bing_jd=compute_dist(term_t,bing_tg[topic],dist="jaccard_coef")
# query_t_twitter_jd=compute_dist(term_t,twitter_tg[topic],dist="jaccard_coef")
# query_t_inter_jd=compute_dist(term_t,inter_tg[topic],dist="jaccard_coef")
# query_t_bing_dd=compute_dist(term_t,bing_tg[topic],dist="dice_dist")
# query_t_twitter_dd=compute_dist(term_t,twitter_tg[topic],dist="dice_dist")
# query_t_inter_dd=compute_dist(term_t,inter_tg[topic],dist="dice_dist")
# query_t_bing_pjd=try_divide(sum([ii for i in pairwise_dist(np.array(term_t),np.array(bing_tg[topic]),dist="jaccard_coef") for ii in i]),np.array(term_t).shape[0]*np.array(bing_tg[topic]).shape[0])
# query_t_twitter_pjd=try_divide(sum([ii for i in pairwise_dist(np.array(term_t),np.array(twitter_tg[topic]),dist="jaccard_coef") for ii in i]),np.array(term_t).shape[0]*np.array(twitter_tg[topic]).shape[0])
# query_t_inter_pjd=try_divide(sum([ii for i in pairwise_dist(np.array(term_t),np.array(inter_tg[topic]),dist="jaccard_coef") for ii in i]),np.array(term_t).shape[0]*np.array(inter_tg[topic]).shape[0])
# query_t_bing_pdd=try_divide(sum([ii for i in pairwise_dist(np.array(term_t),np.array(bing_tg[topic]),dist="dice_dist") for ii in i]),np.array(term_t).shape[0]*np.array(bing_tg[topic]).shape[0])
# query_t_twitter_pdd=try_divide(sum([ii for i in pairwise_dist(np.array(term_t),np.array(twitter_tg[topic]),dist="dice_dist") for ii in i]),np.array(term_t).shape[0]*np.array(twitter_tg[topic]).shape[0])
# query_t_inter_pdd=try_divide(sum([ii for i in pairwise_dist(np.array(term_t),np.array(inter_tg[topic]),dist="dice_dist") for ii in i]),np.array(term_t).shape[0]*np.array(inter_tg[topic]).shape[0])
yield [query_u_bing_jd,query_u_twitter_jd,query_u_inter_jd,query_u_bing_dd,query_u_twitter_dd,query_u_inter_dd,\
query_u_bing_pjd,query_u_twitter_pjd,query_u_inter_pjd,query_u_bing_pdd,query_u_twitter_pdd,query_u_inter_pdd,\
query_b_bing_jd,query_b_twitter_jd,query_b_inter_jd,query_b_bing_dd,query_b_twitter_dd,query_b_inter_dd]
| 55.715385 | 210 | 0.697915 |
4a1e9dd11ed1261d5899d9e062ff16981885ebee
| 22,648 |
py
|
Python
|
src/pymwm/slit/samples/__init__.py
|
mnishida/PyMWM
|
0fffa2717c37ea258655ab9bf5196208e2be8fd1
|
[
"MIT"
] | 3 |
2020-04-16T14:55:34.000Z
|
2021-08-04T07:03:31.000Z
|
src/pymwm/slit/samples/__init__.py
|
mnishida/PyMWM
|
0fffa2717c37ea258655ab9bf5196208e2be8fd1
|
[
"MIT"
] | 1 |
2021-08-13T04:45:50.000Z
|
2021-08-18T03:33:08.000Z
|
src/pymwm/slit/samples/__init__.py
|
mnishida/PyMWM
|
0fffa2717c37ea258655ab9bf5196208e2be8fd1
|
[
"MIT"
] | 2 |
2021-04-05T07:10:26.000Z
|
2021-08-04T03:15:43.000Z
|
from __future__ import annotations
import cmath
from logging import getLogger
import numpy as np
import ray
import riip
from pymwm.utils import slit_utils
from pymwm.waveguide import Sampling
logger = getLogger(__package__)
class Samples(Sampling):
"""A class defining samples of phase constants of slit waveguide modes.
Attributes:
fill: An instance of Material class for the core
clad: An instance of Material class for the clad
size: A float indicating the size of core [um].
size2: A float indicating the optional size of core [um].
params: A dict whose keys and values are as follows:
'wl_max': A float indicating the maximum wavelength [um]
'wl_min': A float indicating the minimum wavelength [um]
'wl_imag': A float indicating the minimum value of
abs(c / f_imag) [um] where f_imag is the imaginary part of
the frequency.
'dw': A float indicating frequency interval
[rad * c / 1um]=[2.99792458e14 rad / s].
'num_n': An integer indicating the number of orders of modes.
'num_m': An integer indicating the number of modes in each
order and polarization.
ws: A 1D array indicating the real part of the angular frequencies
to be calculated [rad (c / 1um)]=[2.99792458e14 rad / s].
wis: A 1D array indicating the imaginary part of the angular
frequencies to be calculated [rad * (c / 1um)].
r: A float indicating the width of slit [um].
"""
def __init__(self, size: float, fill: dict, clad: dict, params: dict):
"""Init Samples class.
Args:
size (float): The width of slit [um].
fill (dict): Parameters for riip.Material class for the core.
clad (dict): Parameters for riip.Material class for the clad.
params (dict): Keys and values are as follows:
'wl_max' (float): The maximum wavelength [um].
Defaults to 5.0.
'wl_min' (float): The minimum wavelength [um].
Defaults to 0.4.
'wl_imag' (float): The minimum value of
abs(c / f_imag) [um] where f_imag is the imaginary part of
the frequency. Defaults to 5.0.
'dw' (float): The frequency interval [rad c / 1um]=[2.99792458e14 rad / s].
Defaults to 1 / 64.
'num_n (int)': The number of orders of modes.
'num_m' (int): The number of modes in each order and polarization
(= 1 in the slit case).
"""
num_m = params.setdefault("num_m", 1)
if num_m != 1:
logger.warning(
"num_m must be 1 if shape is slit." + "The set value is ignored."
)
params["num_m"] = 1
super().__init__(size, fill, clad, params)
self.r = size
@property
def shape(self):
return "slit"
@property
def num_all(self):
return 2 * self.params["num_n"]
def beta2_pec(self, w: complex, parity: str, num_n: int) -> np.ndarray:
"""Return squares of phase constants for a PEC waveguide
Args:
w (complex): Angular frequency
parity ("even" or "odd"): "even" ("odd") if even (odd) numbers in the list of n are evaluated.
num_n (int): Number of the modes.
Returns:
h2s (np.ndarray): The squares of phase constants, whose first
element is for TM mode and the rest is for both TE and TM modes.
"""
w_comp = w.real + 1j * w.imag
ns_all = list(range(num_n))
if parity == "even":
ns = np.array(ns_all[::2])
else:
ns = np.array(ns_all[1::2])
h2: np.ndarray = self.fill(w_comp) * w_comp ** 2 - (ns * np.pi / self.r) ** 2
return h2
def u(
self,
h2: complex | np.ndarray,
w: complex | np.ndarray,
e1: complex | np.ndarray,
) -> complex | np.ndarray:
# return cmath.sqrt(e1 * w ** 2 - h2) * self.r / 2
val: complex | np.ndarray = (
(1 + 1j) * np.sqrt(-0.5j * (e1 * w ** 2 - h2)) * self.r / 2
)
return val
def v(
self,
h2: complex | np.ndarray,
w: complex | np.ndarray,
e2: complex | np.ndarray,
) -> complex | np.ndarray:
# This definition is very important!!
# Other definitions can not give good results in some cases
val: complex | np.ndarray = (
(1 - 1j) * np.sqrt(0.5j * (-e2 * w ** 2 + h2)) * self.r / 2
)
return val
def eig_eq(
self, h2: complex, w: complex, pol: str, n: int, e1: complex, e2: complex
):
"""Return the value of the characteristic equation
Args:
h2: The square of the phase constant.
w: The angular frequency
pol: The polarization
n: The order of the modes
e1: The permittivity of the core
e2: The permittivity of the clad.
Returns:
val: A complex indicating the left-hand value of the characteristic
equation.
"""
u = self.u(h2, w, e1)
v = self.v(h2, w, e2)
if pol == "E":
if n % 2 == 0:
return u / v + cmath.tan(u)
else:
return u / v - 1 / cmath.tan(u)
else:
if n % 2 == 0:
return u * cmath.tan(u) - (e1 * v) / e2
else:
return u / cmath.tan(u) + (e1 * v) / e2
def beta2(
self,
w: complex,
pol: str,
parity: str,
num_n: int,
e1: complex,
e2: complex,
xis: np.ndarray,
) -> tuple[np.ndarray, np.ndarray]:
"""Return roots and convergences of the characteristic equation
Args:
w (complex): Angular frequency.
pol (str): 'E' or 'M' indicating the polarization.
parity ("even" or "odd"): "even" ("odd") if even (odd) numbers in the list of n are evaluated.
num_n (int): The number of modes.
e1 (complex): Permittivity of tha core.
e2 (complex): Permittivity of tha clad.
xis (np.ndarray[complex]): Initial approximations for the roots whose number of
elements is 2.
Returns:
xs: The roots, whose length is 2.
success: The convergence information for xs.
"""
if self.clad.label == "PEC":
xs = self.beta2_pec(w, parity, num_n)
s = np.ones_like(xs, dtype=bool)
if parity == "even" and pol == "E":
s[0] = False
return xs, s
from scipy.optimize import root
roots: list[complex] = []
vals: list[complex] = []
success: list[bool] = []
ns_all = list(range(num_n))
if parity == "even":
ns = ns_all[::2]
else:
ns = ns_all[1::2]
for i_n, n in enumerate(ns):
xi = xis[i_n]
if pol == "E" and n == 0:
vals.append(xi)
success.append(False)
continue
result = root(
slit_utils.func_cython,
np.array([xi.real, xi.imag]),
args=(w, pol, n, e1, e2, self.r, np.array(roots, dtype=complex)),
method="hybr",
options={"xtol": 1.0e-9},
)
x = result.x[0] + result.x[1] * 1j
if result.success:
roots.append(x)
# v = self.v(x, w, e2)
# if v.real > 0.0:
success.append(result.success)
# else:
# success.append(False)
vals.append(x)
return np.array(vals), np.array(success)
@staticmethod
def beta_from_beta2(x: np.ndarray):
return (1 + 1j) * np.sqrt(-0.5j * x)
def beta2_w_min(
self, pol: str, parity: str, num_n: int
) -> tuple[np.ndarray, np.ndarray]:
"""Return roots and convergences of the characteristic equation at
the lowest angular frequency, ws[0].
Args:
pol (str): 'E' or 'M' indicating the polarization.
parity ("even" or "odd"): "even" ("odd") if even (odd) numbers in the list of n are evaluated.
num_n (int): An integer indicating the number of modes.
Returns:
xs (np.ndarray): A 1D array indicating the roots, whose length is 2.
success (np.ndarray): A 1D array indicating the convergence information for xs.
"""
if self.clad.label == "PEC":
xs = self.beta2_pec(self.ws[0], parity, num_n)
success = np.ones_like(xs, dtype=bool)
if parity == "even" and pol == "E":
success[0] = False
return xs, success
w_0 = 2 * np.pi / 10
e1 = self.fill(w_0)
e2_0 = self.clad(w_0) * 1000
de2 = (self.clad(w_0) - e2_0) / 5000
xis = xs = self.beta2_pec(w_0, parity, num_n)
success = np.ones_like(xs, dtype=bool)
for i in range(5001):
e2 = e2_0 + de2 * i
xs, success = self.beta2(w_0, pol, parity, num_n, e1, e2, xis)
for _, ok in enumerate(success):
if not ok:
xs[_] = xis[_]
xis = xs
dw = (self.ws[0] - w_0) / 100
for i in range(101):
w = w_0 + dw * i
e1 = self.fill(w)
e2 = self.clad(w)
xs, success = self.beta2(w, pol, parity, num_n, e1, e2, xis)
for _, ok in enumerate(success):
if not ok:
xs[_] = xis[_]
xis = xs
return xs, success
def betas_convs(self, xs_success_list: list) -> tuple[dict, dict]:
betas = {}
convs = {}
for i_pol, pol in enumerate(["M", "E"]):
xs_array, success_array = xs_success_list[i_pol]
num_n = xs_array.shape[2]
for n in range(num_n):
betas[(pol, n, 1)] = np.zeros(
(len(self.ws), len(self.wis)), dtype=complex
)
convs[(pol, n, 1)] = np.zeros((len(self.ws), len(self.wis)), dtype=bool)
for iwi in range(len(self.wis)):
for iwr in range(len(self.ws)):
for n in range(num_n):
x = xs_array[iwr, iwi][n]
betas[(pol, n, 1)][iwr, iwi] = self.beta_from_beta2(x)
convs[(pol, n, 1)][iwr, iwi] = success_array[iwr, iwi][n]
return betas, convs
def __call__(self, arg: tuple[str, str, int]) -> tuple[np.ndarray, np.ndarray]:
"""Return a dict of the roots of the characteristic equation
Args:
arg: (pol, parity, num_n)
pol ("E" or "M"): Polarization.
parity ("even" or "odd"): "even" ("odd") if even (odd) numbers in the list of n are evaluated.
num_n (int): The number of modes.
Returns:
betas: A dict containing arrays of roots, whose key is as follows:
(pol, n, m):
pol: 'E' or 'M' indicating the polarization.
n: An integer indicating the order of the mode.
m: An integer indicating the ordinal of the mode in the
same order.
convs: A dict containing the convergence information for betas,
whose key is the same as above.
"""
pol, parity, num_n = arg
num_ws = len(self.ws)
ns = list(range(num_n))
if parity == "even":
num = len(ns[::2])
else:
num = len(ns[1::2])
xs_array = np.zeros((num_ws, len(self.wis), num), dtype=complex)
success_array = np.zeros((num_ws, len(self.wis), num), dtype=bool)
iwr = iwi = 0
wi = self.wis[iwi]
xis, success = self.beta2_w_min(pol, parity, num_n)
xs_array[iwr, iwi] = xis
success_array[iwr, iwi] = success
for iwr in range(1, len(self.ws)):
wr = self.ws[iwr]
w = wr + 1j * wi
e1 = self.fill(w)
e2 = self.clad(w)
xs, success = self.beta2(w, pol, parity, num_n, e1, e2, xis)
xs = np.where(success, xs, xis)
xs_array[iwr, iwi] = xs
success_array[iwr, iwi] = success
xis = xs
for iwi in range(1, len(self.wis)):
wi = self.wis[iwi]
for iwr in range(len(self.ws)):
wr = self.ws[iwr]
w = wr + 1j * wi
e1 = self.fill(w)
e2 = self.clad(w)
if iwr == 0:
xis = xs_array[iwr, iwi - 1]
else:
xis = (
xs_array[iwr, iwi - 1]
+ xs_array[iwr - 1, iwi]
- xs_array[iwr - 1, iwi - 1]
)
xs, success = self.beta2(w, pol, parity, num_n, e1, e2, xis)
xs = np.where(success, xs, xis)
xs_array[iwr, iwi] = xs
success_array[iwr, iwi] = success
return xs_array, success_array
def wr_sampling(self, arg: tuple[str, str, int]) -> tuple[np.ndarray, np.ndarray]:
pol, parity, num_n = arg
num_ws = len(self.ws)
ns = list(range(num_n))
if parity == "even":
num = len(ns[::2])
else:
num = len(ns[1::2])
xs_array = np.zeros((num_ws, num), dtype=complex)
success_array = np.zeros((num_ws, num), dtype=bool)
iwr = iwi = 0
wi = self.wis[iwi]
xis, success = self.beta2_w_min(pol, parity, num_n)
xs_array[iwr] = xis
success_array[iwr] = success
xs0 = xs1 = xis
for iwr in range(1, len(self.ws)):
wr = self.ws[iwr]
w = wr + 1j * wi
e1 = self.fill(w)
e2 = self.clad(w)
xis = 2 * xs1 - xs0
xs, success = self.beta2(w, pol, parity, num_n, e1, e2, xis)
xs = np.where(success, xs, xis)
xs_array[iwr] = xs
success_array[iwr] = success
xs0 = xs1
xs1 = xs
return xs_array, success_array
def wi_sampling(
self, arg: tuple[str, str, int, int, np.ndarray]
) -> tuple[np.ndarray, np.ndarray]:
pol, parity, num_n, iwr, xis0 = arg
num = len(xis0)
xs_array = np.zeros((len(self.wis), num), dtype=complex)
success_array = np.zeros((len(self.wis), num), dtype=bool)
wr = self.ws[iwr]
xs0 = xs1 = xis0
for iwi in range(len(self.wis)):
wi = self.wis[iwi]
w = wr + 1j * wi
e1 = self.fill(w)
e2 = self.clad(w)
xis = 2 * xs1 - xs0
xs, success = self.beta2(w, pol, parity, num_n, e1, e2, xis)
xs = np.where(success, xs, xis)
xs0 = xs1
xs1 = xs
xs_array[iwi] = xs
success_array[iwi] = success
return xs_array, success_array
class SamplesLowLoss(Samples):
"""A class defining samples of phase constants of slit waveguide
modes in a virtual low-loss clad waveguide by subclassing the Samples
class.
Attributes:
fill: An instance of Material class for the core
clad: An instance of Material class for the clad
size: A float indicating the size of core [um].
size2: A float indicating the optional size of core [um].
params: A dict whose keys and values are as follows:
'wl_max': A float indicating the maximum wavelength [um]
'wl_min': A float indicating the minimum wavelength [um]
'wl_imag': A float indicating the minimum value of
abs(c / f_imag) [um] where f_imag is the imaginary part of
the frequency.
'dw': A float indicating frequency interval
[rad * c / 1um]=[2.99792458e14 rad / s].
'num_n': An integer indicating the number of orders of modes.
'num_m': An integer indicating the number of modes in each
order and polarization.
ws: A 1D array indicating the real part of the angular frequencies
to be calculated [rad (c / 1um)]=[2.99792458e14 rad / s].
wis: A 1D array indicating the imaginary part of the angular
frequencies to be calculated [rad * (c / 1um)].
r: A float indicating the width of slit [um].
"""
def __init__(self, size: float, fill: dict, clad: dict, params: dict):
"""Init SamplesLowLoss class.
Args:
size: A float indicating the width of the slit [um].
fill: An instance of Material class for the core
clad: An instance of Material class for the clad
params: A dict whose keys and values are as follows:
'wl_max': A float indicating the maximum wavelength [um]
(default: 5.0)
'wl_min': A float indicating the minimum wavelength [um]
(default: 0.4)
'wl_imag': A float indicating the minimum value of
abs(c / f_imag) [um] where f_imag is the imaginary part of
the frequency. (default: 5.0)
'dw': A float indicating frequency interval
[rad c / 1um]=[2.99792458e14 rad / s] (default: 1 / 64).
'num_n': An integer indicating the number of orders of modes.
'num_m': An integer indicating the number of modes in each
order and polarization.
"""
super().__init__(size, fill, clad, params)
def betas_convs(self, xs_success_list):
num_iwr = len(self.ws)
num_iwi = len(self.wis)
num_n = self.params["num_n"]
ns = list(range(num_n))
ns_e = ns[::2]
ns_o = ns[1::2]
betas = {}
convs = {}
for n in ns:
betas[("M", n, 1)] = np.zeros((len(self.ws), len(self.wis)), dtype=complex)
convs[("M", n, 1)] = np.zeros((len(self.ws), len(self.wis)), dtype=bool)
betas[("E", n, 1)] = np.zeros((len(self.ws), len(self.wis)), dtype=complex)
convs[("E", n, 1)] = np.zeros((len(self.ws), len(self.wis)), dtype=bool)
for iwr in range(num_iwr):
for iwi in range(num_iwi):
j = iwr * num_iwi + iwi
w = self.ws[iwr] + 1j * self.wis[iwi]
e2 = self.clad(w)
for i_n, n in enumerate(ns_e):
x = xs_success_list[j][0][0][i_n]
v = self.v(x, w, e2)
betas[("M", n, 1)][iwr, iwi] = self.beta_from_beta2(x)
convs[("M", n, 1)][iwr, iwi] = (
xs_success_list[j][1][0][i_n] if v.real > abs(v.imag) else False
)
x = xs_success_list[j][0][2][i_n]
v = self.v(x, w, e2)
betas[("E", n, 1)][iwr, iwi] = self.beta_from_beta2(x)
convs[("E", n, 1)][iwr, iwi] = (
xs_success_list[j][1][2][i_n] if v.real > abs(v.imag) else False
)
for i_n, n in enumerate(ns_o):
x = xs_success_list[j][0][1][i_n]
v = self.v(x, w, e2)
betas[("M", n, 1)][iwr, iwi] = self.beta_from_beta2(x)
convs[("M", n, 1)][iwr, iwi] = (
xs_success_list[j][1][1][i_n] if v.real > abs(v.imag) else False
)
x = xs_success_list[j][0][3][i_n]
v = self.v(x, w, e2)
betas[("E", n, 1)][iwr, iwi] = self.beta_from_beta2(x)
convs[("E", n, 1)][iwr, iwi] = (
xs_success_list[j][1][3][i_n] if v.real > abs(v.imag) else False
)
return betas, convs
@ray.remote
class SamplesForRay(Samples):
"""A derived class in order to create ray actor."""
def __init__(self, size: float, fill: dict, clad: dict, params: dict):
super().__init__(size, fill, clad, params)
@ray.remote
class SamplesLowLossForRay(SamplesLowLoss):
"""A derived class in order to create ray actor."""
def __init__(self, size: float, fill: dict, clad: dict, params: dict):
super().__init__(size, fill, clad, params)
def task(self, arg: tuple[int, int, list[np.ndarray]]):
"""Return a dict of the roots of the characteristic equation
Args:
arg: (iwr, iwi, xis_list)
iwr: The ordinal of the Re(w).
iwi: The ordinal of the Im(w).
xis_list: The initial guess of roots whose length is num_n
Returns:
xs_list: A list of num_n 1D arrays indicating the roots, whose
length is num_n
success_list: A list of num_n 1D arrays indicating the convergence
information for xs, whose length is num_n
"""
num_n = self.params["num_n"]
iwr, iwi, xis_list = arg
im_factor = self.clad.im_factor
self.clad.im_factor = 1.0
wr = self.ws[iwr]
wi = self.wis[iwi]
w = wr + 1j * wi
e1 = self.fill(w)
xs_list = []
success_list = []
for i_pp, x0s in enumerate(xis_list):
if i_pp == 0:
pol = "M"
parity = "even"
elif i_pp == 1:
pol = "M"
parity = "odd"
elif i_pp == 2:
pol = "E"
parity = "even"
else:
pol = "E"
parity = "odd"
xis = xs = x0s
success = np.ones_like(xs, dtype=bool)
for i in range(1, 8):
self.clad.im_factor = 0.5 ** i
if i == 7 or self.clad.im_factor < im_factor:
self.clad.im_factor = im_factor
e2 = self.clad(w)
xs, success = self.beta2(w, pol, parity, num_n, e1, e2, xis)
for _, ok in enumerate(success):
if not ok:
xs[_] = xis[_]
xis = xs
xs_list.append(xs)
success_list.append(success)
return xs_list, success_list
| 39.594406 | 110 | 0.507639 |
4a1e9e01be83a92a629b788ab06c22ade479d950
| 468 |
py
|
Python
|
Projetos Python/pythonexercicios/venv/Scripts/easy_install-script.py
|
Moyses-Nunes/Projetos-Python
|
71ae170fb0d7be6afea18608bca630b57b9f0dff
|
[
"MIT"
] | null | null | null |
Projetos Python/pythonexercicios/venv/Scripts/easy_install-script.py
|
Moyses-Nunes/Projetos-Python
|
71ae170fb0d7be6afea18608bca630b57b9f0dff
|
[
"MIT"
] | null | null | null |
Projetos Python/pythonexercicios/venv/Scripts/easy_install-script.py
|
Moyses-Nunes/Projetos-Python
|
71ae170fb0d7be6afea18608bca630b57b9f0dff
|
[
"MIT"
] | null | null | null |
#!"C:\Users\MOYSES NUNES\PycharmProjects\pythonexercicios\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| 36 | 83 | 0.702991 |
4a1e9fec9695cf49d31366d863d6b659efe86404
| 8,700 |
py
|
Python
|
fastai/utils/collect_env.py
|
Qumeric/fastai
|
749b18fec30fe18b6059e5c08dd40cd19e08b2b1
|
[
"Apache-2.0"
] | 1 |
2018-12-15T11:14:26.000Z
|
2018-12-15T11:14:26.000Z
|
fastai/utils/collect_env.py
|
Qumeric/fastai
|
749b18fec30fe18b6059e5c08dd40cd19e08b2b1
|
[
"Apache-2.0"
] | 1 |
2022-02-26T12:32:11.000Z
|
2022-02-26T12:32:11.000Z
|
fastai/utils/collect_env.py
|
Qumeric/fastai
|
749b18fec30fe18b6059e5c08dd40cd19e08b2b1
|
[
"Apache-2.0"
] | null | null | null |
"Utility functions to help deal with user environment"
from ..imports.torch import *
from ..core import *
import fastprogress
import subprocess
__all__ = ['show_install', 'check_perf']
def get_env(name):
"Return env var value if it's defined and not an empty string, or return Unknown"
if name in os.environ and len(os.environ[name]):
return os.environ[name]
else:
return "Unknown"
def show_install(show_nvidia_smi:bool=False):
"Print user's setup information: python -c 'import fastai; fastai.show_install()'"
import platform, fastai.version
rep = []
opt_mods = []
rep.append(["=== Software ===", None])
rep.append(["python", platform.python_version()])
rep.append(["fastai", fastai.__version__])
rep.append(["fastprogress", fastprogress.__version__])
rep.append(["torch", torch.__version__])
# nvidia-smi
cmd = "nvidia-smi"
have_nvidia_smi = False
try:
result = subprocess.run(cmd.split(), shell=False, check=False, stdout=subprocess.PIPE)
except:
pass
else:
if result.returncode == 0 and result.stdout:
have_nvidia_smi = True
# XXX: if nvidia-smi is not available, another check could be:
# /proc/driver/nvidia/version on most systems, since it's the
# currently active version
if have_nvidia_smi:
smi = result.stdout.decode('utf-8')
# matching: "Driver Version: 396.44"
match = re.findall(r'Driver Version: +(\d+\.\d+)', smi)
if match: rep.append(["nvidia driver", match[0]])
available = "available" if torch.cuda.is_available() else "**Not available** "
rep.append(["torch cuda", f"{torch.version.cuda} / is {available}"])
# no point reporting on cudnn if cuda is not available, as it
# seems to be enabled at times even on cpu-only setups
if torch.cuda.is_available():
enabled = "enabled" if torch.backends.cudnn.enabled else "**Not enabled** "
rep.append(["torch cudnn", f"{torch.backends.cudnn.version()} / is {enabled}"])
rep.append(["\n=== Hardware ===", None])
# it's possible that torch might not see what nvidia-smi sees?
gpu_total_mem = []
nvidia_gpu_cnt = 0
if have_nvidia_smi:
try:
cmd = "nvidia-smi --query-gpu=memory.total --format=csv,nounits,noheader"
result = subprocess.run(cmd.split(), shell=False, check=False, stdout=subprocess.PIPE)
except:
print("have nvidia-smi, but failed to query it")
else:
if result.returncode == 0 and result.stdout:
output = result.stdout.decode('utf-8')
gpu_total_mem = [int(x) for x in output.strip().split('\n')]
nvidia_gpu_cnt = len(gpu_total_mem)
if nvidia_gpu_cnt: rep.append(["nvidia gpus", nvidia_gpu_cnt])
torch_gpu_cnt = torch.cuda.device_count()
if torch_gpu_cnt:
rep.append(["torch devices", torch_gpu_cnt])
# information for each gpu
for i in range(torch_gpu_cnt):
rep.append([f" - gpu{i}", (f"{gpu_total_mem[i]}MB | " if gpu_total_mem else "") + torch.cuda.get_device_name(i)])
else:
if nvidia_gpu_cnt:
rep.append([f"Have {nvidia_gpu_cnt} GPU(s), but torch can't use them (check nvidia driver)", None])
else:
rep.append([f"No GPUs available", None])
rep.append(["\n=== Environment ===", None])
rep.append(["platform", platform.platform()])
if platform.system() == 'Linux':
try:
import distro
except ImportError:
opt_mods.append('distro');
# partial distro info
rep.append(["distro", platform.uname().version])
else:
# full distro info
rep.append(["distro", ' '.join(distro.linux_distribution())])
rep.append(["conda env", get_env('CONDA_DEFAULT_ENV')])
rep.append(["python", sys.executable])
rep.append(["sys.path", "\n".join(sys.path)])
print("\n\n```text")
keylen = max([len(e[0]) for e in rep if e[1] is not None])
for e in rep:
print(f"{e[0]:{keylen}}", (f": {e[1]}" if e[1] is not None else ""))
if have_nvidia_smi:
if show_nvidia_smi == True: print(f"\n{smi}")
else:
if torch_gpu_cnt:
# have gpu, but no nvidia-smi
print("no nvidia-smi is found")
else:
print("no supported gpus found on this system")
print("```\n")
print("Please make sure to include opening/closing ``` when you paste into forums/github to make the reports appear formatted as code sections.\n")
if opt_mods:
print("Optional package(s) to enhance the diagnostics can be installed with:")
print(f"pip install {' '.join(opt_mods)}")
print("Once installed, re-run this utility to get the additional information")
def pypi_module_version_is_available(module, version):
"Check whether module==version is available on pypi"
# returns True/False (or None if failed to execute the check)
# using a hack that when passing "module==" w/ no version number to pip
# it "fails" and returns all the available versions in stderr
try:
cmd = f"pip install {module}=="
result = subprocess.run(cmd.split(), shell=False, check=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception as e:
print(f"Error: {e}")
return None
else:
if result.returncode == 1 and result.stderr:
output = result.stderr.decode('utf-8')
return True if version in output else False
else:
print(f"Some error in {cmd}")
return None
def check_perf():
"Suggest how to improve the setup to speed things up"
from PIL import features, Image
from packaging import version
import pynvml
print("Running performance checks.")
# libjpeg_turbo check
print("\n*** libjpeg-turbo status")
if version.parse(Image.PILLOW_VERSION) >= version.parse("5.4.0"):
if features.check_feature('libjpeg_turbo'):
print("✔ libjpeg-turbo is on")
else:
print("✘ libjpeg-turbo is not on. It's recommended you install libjpeg-turbo to speed up JPEG decoding. See https://docs.fast.ai/performance.html#libjpeg-turbo")
else:
print(f"❓ libjpeg-turbo's status can't be derived - need Pillow(-SIMD)? >= 5.4.0 to tell, current version {Image.PILLOW_VERSION}")
# XXX: remove this check/note once Pillow and Pillow-SIMD 5.4.0 is available
pillow_ver_5_4_is_avail = pypi_module_version_is_available("Pillow", "5.4.0")
if pillow_ver_5_4_is_avail == False:
print("5.4.0 is not yet available, other than the dev version on github, which can be installed via pip from git+https://github.com/python-pillow/Pillow. See https://docs.fast.ai/performance.html#libjpeg-turbo")
# Pillow-SIMD check
print("\n*** Pillow-SIMD status")
if re.search(r'\.post\d+', Image.PILLOW_VERSION):
print(f"✔ Running Pillow-SIMD {Image.PILLOW_VERSION}")
else:
print(f"✘ Running Pillow {Image.PILLOW_VERSION}; It's recommended you install Pillow-SIMD to speed up image resizing and other operations. See https://docs.fast.ai/performance.html#pillow-simd")
# CUDA version check
# compatibility table: k: min nvidia ver is required for v: cuda ver
# note: windows nvidia driver version is slightly higher, see:
# https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html
# note: add new entries if pytorch starts supporting new cudaXX
nvidia2cuda = {
"410.00": "10.0",
"384.81": "9.0",
"367.48": "8.0",
}
print("\n*** CUDA status")
if torch.cuda.is_available():
pynvml.nvmlInit()
nvidia_ver = pynvml.nvmlSystemGetDriverVersion().decode('utf-8')
cuda_ver = torch.version.cuda
max_cuda = "8.0"
for k in sorted(nvidia2cuda.keys()):
if version.parse(nvidia_ver) > version.parse(k): max_cuda = nvidia2cuda[k]
if version.parse(str(max_cuda)) <= version.parse(cuda_ver):
print(f"✔ Running the latest CUDA {cuda_ver} with NVIDIA driver {nvidia_ver}")
else:
print(f"✘ You are running pytorch built against cuda {cuda_ver}, your NVIDIA driver {nvidia_ver} supports cuda10. See https://pytorch.org/get-started/locally/ to install pytorch built against the faster CUDA version.")
else:
print(f"❓ Running cpu-only torch version, CUDA check is not relevant")
print("\nRefer to https://docs.fast.ai/performance.html to make sense out of these checks and suggestions.")
| 40.654206 | 230 | 0.638046 |
4a1ea01156105108ab9f157656d16882b0811bff
| 6,900 |
py
|
Python
|
webserver/python2.7/site-packages/sympy/assumptions/tests/test_refine.py
|
maxr1876/Radix
|
bf9a5470908ea0823c8398565086b1e6b960c73b
|
[
"BSD-2-Clause"
] | 1 |
2015-07-04T12:40:41.000Z
|
2015-07-04T12:40:41.000Z
|
webserver/python2.7/site-packages/sympy/assumptions/tests/test_refine.py
|
maxr1876/Radix
|
bf9a5470908ea0823c8398565086b1e6b960c73b
|
[
"BSD-2-Clause"
] | 1 |
2015-11-01T17:20:32.000Z
|
2015-11-01T17:20:32.000Z
|
webserver/python2.7/site-packages/sympy/assumptions/tests/test_refine.py
|
maxr1876/Radix
|
bf9a5470908ea0823c8398565086b1e6b960c73b
|
[
"BSD-2-Clause"
] | 1 |
2018-10-22T09:17:11.000Z
|
2018-10-22T09:17:11.000Z
|
from sympy import (Abs, exp, Expr, I, pi, Q, Rational, refine, S, sqrt,
atan, atan2, nan, Symbol)
from sympy.abc import x, y, z
from sympy.core.relational import Eq, Ne
from sympy.functions.elementary.piecewise import Piecewise
from sympy.utilities.pytest import slow
def test_Abs():
assert refine(Abs(x), Q.positive(x)) == x
assert refine(1 + Abs(x), Q.positive(x)) == 1 + x
assert refine(Abs(x), Q.negative(x)) == -x
assert refine(1 + Abs(x), Q.negative(x)) == 1 - x
assert refine(Abs(x**2)) != x**2
assert refine(Abs(x**2), Q.real(x)) == x**2
@slow
def test_pow1():
assert refine((-1)**x, Q.even(x)) == 1
assert refine((-1)**x, Q.odd(x)) == -1
assert refine((-2)**x, Q.even(x)) == 2**x
# nested powers
assert refine(sqrt(x**2)) != Abs(x)
assert refine(sqrt(x**2), Q.complex(x)) != Abs(x)
assert refine(sqrt(x**2), Q.real(x)) == Abs(x)
assert refine(sqrt(x**2), Q.positive(x)) == x
assert refine((x**3)**(S(1)/3)) != x
assert refine((x**3)**(S(1)/3), Q.real(x)) != x
assert refine((x**3)**(S(1)/3), Q.positive(x)) == x
assert refine(sqrt(1/x), Q.real(x)) != 1/sqrt(x)
assert refine(sqrt(1/x), Q.positive(x)) == 1/sqrt(x)
@slow
def test_pow2():
# powers of (-1)
assert refine((-1)**(x + y), Q.even(x)) == (-1)**y
assert refine((-1)**(x + y + z), Q.odd(x) & Q.odd(z)) == (-1)**y
assert refine((-1)**(x + y + 1), Q.odd(x)) == (-1)**y
assert refine((-1)**(x + y + 2), Q.odd(x)) == (-1)**(y + 1)
assert refine((-1)**(x + 3)) == (-1)**(x + 1)
@slow
def test_pow3():
# continuation
assert refine((-1)**((-1)**x/2 - S.Half), Q.integer(x)) == (-1)**x
assert refine((-1)**((-1)**x/2 + S.Half), Q.integer(x)) == (-1)**(x + 1)
assert refine((-1)**((-1)**x/2 + 5*S.Half), Q.integer(x)) == (-1)**(x + 1)
@slow
def test_pow4():
assert refine((-1)**((-1)**x/2 - 7*S.Half), Q.integer(x)) == (-1)**(x + 1)
assert refine((-1)**((-1)**x/2 - 9*S.Half), Q.integer(x)) == (-1)**x
# powers of Abs
assert refine(Abs(x)**2, Q.real(x)) == x**2
assert refine(Abs(x)**3, Q.real(x)) == Abs(x)**3
assert refine(Abs(x)**2) == Abs(x)**2
def test_exp():
x = Symbol('x', integer=True)
assert refine(exp(pi*I*2*x)) == 1
assert refine(exp(pi*I*2*(x + Rational(1, 2)))) == -1
assert refine(exp(pi*I*2*(x + Rational(1, 4)))) == I
assert refine(exp(pi*I*2*(x + Rational(3, 4)))) == -I
def test_Relational():
assert not refine(x < 0, ~Q.is_true(x < 0))
assert refine(x < 0, Q.is_true(x < 0))
assert refine(x < 0, Q.is_true(0 > x)) == True
assert refine(x < 0, Q.is_true(y < 0)) == (x < 0)
assert not refine(x <= 0, ~Q.is_true(x <= 0))
assert refine(x <= 0, Q.is_true(x <= 0))
assert refine(x <= 0, Q.is_true(0 >= x)) == True
assert refine(x <= 0, Q.is_true(y <= 0)) == (x <= 0)
assert not refine(x > 0, ~Q.is_true(x > 0))
assert refine(x > 0, Q.is_true(x > 0))
assert refine(x > 0, Q.is_true(0 < x)) == True
assert refine(x > 0, Q.is_true(y > 0)) == (x > 0)
assert not refine(x >= 0, ~Q.is_true(x >= 0))
assert refine(x >= 0, Q.is_true(x >= 0))
assert refine(x >= 0, Q.is_true(0 <= x)) == True
assert refine(x >= 0, Q.is_true(y >= 0)) == (x >= 0)
assert not refine(Eq(x, 0), ~Q.is_true(Eq(x, 0)))
assert refine(Eq(x, 0), Q.is_true(Eq(x, 0)))
assert refine(Eq(x, 0), Q.is_true(Eq(0, x))) == True
assert refine(Eq(x, 0), Q.is_true(Eq(y, 0))) == Eq(x, 0)
assert not refine(Ne(x, 0), ~Q.is_true(Ne(x, 0)))
assert refine(Ne(x, 0), Q.is_true(Ne(0, x))) == True
assert refine(Ne(x, 0), Q.is_true(Ne(x, 0)))
assert refine(Ne(x, 0), Q.is_true(Ne(y, 0))) == (Ne(x, 0))
def test_Piecewise():
assert refine(Piecewise((1, x < 0), (3, True)), Q.is_true(x < 0)) == 1
assert refine(Piecewise((1, x < 0), (3, True)), ~Q.is_true(x < 0)) == 3
assert refine(Piecewise((1, x < 0), (3, True)), Q.is_true(y < 0)) == \
Piecewise((1, x < 0), (3, True))
assert refine(Piecewise((1, x > 0), (3, True)), Q.is_true(x > 0)) == 1
assert refine(Piecewise((1, x > 0), (3, True)), ~Q.is_true(x > 0)) == 3
assert refine(Piecewise((1, x > 0), (3, True)), Q.is_true(y > 0)) == \
Piecewise((1, x > 0), (3, True))
assert refine(Piecewise((1, x <= 0), (3, True)), Q.is_true(x <= 0)) == 1
assert refine(Piecewise((1, x <= 0), (3, True)), ~Q.is_true(x <= 0)) == 3
assert refine(Piecewise((1, x <= 0), (3, True)), Q.is_true(y <= 0)) == \
Piecewise((1, x <= 0), (3, True))
assert refine(Piecewise((1, x >= 0), (3, True)), Q.is_true(x >= 0)) == 1
assert refine(Piecewise((1, x >= 0), (3, True)), ~Q.is_true(x >= 0)) == 3
assert refine(Piecewise((1, x >= 0), (3, True)), Q.is_true(y >= 0)) == \
Piecewise((1, x >= 0), (3, True))
assert refine(Piecewise((1, Eq(x, 0)), (3, True)), Q.is_true(Eq(x, 0)))\
== 1
assert refine(Piecewise((1, Eq(x, 0)), (3, True)), Q.is_true(Eq(0, x)))\
== 1
assert refine(Piecewise((1, Eq(x, 0)), (3, True)), ~Q.is_true(Eq(x, 0)))\
== 3
assert refine(Piecewise((1, Eq(x, 0)), (3, True)), ~Q.is_true(Eq(0, x)))\
== 3
assert refine(Piecewise((1, Eq(x, 0)), (3, True)), Q.is_true(Eq(y, 0)))\
== Piecewise((1, Eq(x, 0)), (3, True))
assert refine(Piecewise((1, Ne(x, 0)), (3, True)), Q.is_true(Ne(x, 0)))\
== 1
assert refine(Piecewise((1, Ne(x, 0)), (3, True)), ~Q.is_true(Ne(x, 0)))\
== 3
assert refine(Piecewise((1, Ne(x, 0)), (3, True)), Q.is_true(Ne(y, 0)))\
== Piecewise((1, Ne(x, 0)), (3, True))
def test_atan2():
assert refine(atan2(y, x), Q.real(y) & Q.positive(x)) == atan(y/x)
assert refine(atan2(y, x), Q.negative(y) & Q.positive(x)) == atan(y/x)
assert refine(atan2(y, x), Q.negative(y) & Q.negative(x)) == atan(y/x) - pi
assert refine(atan2(y, x), Q.positive(y) & Q.negative(x)) == atan(y/x) + pi
assert refine(atan2(y, x), Q.zero(y) & Q.negative(x)) == pi
assert refine(atan2(y, x), Q.positive(y) & Q.zero(x)) == pi/2
assert refine(atan2(y, x), Q.negative(y) & Q.zero(x)) == -pi/2
assert refine(atan2(y, x), Q.zero(y) & Q.zero(x)) == nan
def test_func_args():
class MyClass(Expr):
# A class with nontrivial .func
def __init__(self, *args):
self.my_member = ""
@property
def func(self):
def my_func(*args):
obj = MyClass(*args)
obj.my_member = self.my_member
return obj
return my_func
x = MyClass()
x.my_member = "A very important value"
assert x.my_member == refine(x).my_member
def test_eval_refine():
from sympy.core.expr import Expr
class MockExpr(Expr):
def _eval_refine(self, assumptions):
return True
mock_obj = MockExpr()
assert refine(mock_obj)
| 38.983051 | 79 | 0.52942 |
4a1ea01a8247d9096bb816b63e26002061fa93a6
| 10,713 |
py
|
Python
|
clearml/utilities/locks/utils.py
|
mmiller-max/clearml
|
fd2d6c6f5d46cad3e406e88eeb4d805455b5b3d8
|
[
"Apache-2.0"
] | 2,097 |
2019-06-11T14:36:25.000Z
|
2020-12-21T03:52:59.000Z
|
clearml/utilities/locks/utils.py
|
mmiller-max/clearml
|
fd2d6c6f5d46cad3e406e88eeb4d805455b5b3d8
|
[
"Apache-2.0"
] | 347 |
2020-12-23T22:38:48.000Z
|
2022-03-31T20:01:06.000Z
|
clearml/utilities/locks/utils.py
|
mmiller-max/clearml
|
fd2d6c6f5d46cad3e406e88eeb4d805455b5b3d8
|
[
"Apache-2.0"
] | 256 |
2019-06-11T14:36:28.000Z
|
2020-12-18T08:32:47.000Z
|
import os
import time
import atexit
import tempfile
import contextlib
from multiprocessing import RLock as ProcessRLock
from . import exceptions
from . import constants
from . import portalocker
current_time = getattr(time, "monotonic", time.time)
DEFAULT_TIMEOUT = 10 ** 8
DEFAULT_CHECK_INTERVAL = 0.25
LOCK_METHOD = constants.LOCK_EX | constants.LOCK_NB
__all__ = [
'Lock',
'RLock',
'open_atomic',
]
@contextlib.contextmanager
def open_atomic(filename, binary=True):
'''Open a file for atomic writing. Instead of locking this method allows
you to write the entire file and move it to the actual location. Note that
this makes the assumption that a rename is atomic on your platform which
is generally the case but not a guarantee.
http://docs.python.org/library/os.html#os.rename
>>> filename = 'test_file.txt'
>>> if os.path.exists(filename):
... os.remove(filename)
>>> with open_atomic(filename) as fh:
... written = fh.write(b'test')
>>> assert os.path.exists(filename)
>>> os.remove(filename)
'''
assert not os.path.exists(filename), '%r exists' % filename
path, name = os.path.split(filename)
# Create the parent directory if it doesn't exist
if path and not os.path.isdir(path): # pragma: no cover
os.makedirs(path)
temp_fh = tempfile.NamedTemporaryFile(
mode=binary and 'wb' or 'w',
dir=path,
delete=False,
)
yield temp_fh
temp_fh.flush()
os.fsync(temp_fh.fileno())
temp_fh.close()
try:
os.rename(temp_fh.name, filename)
finally:
try:
os.remove(temp_fh.name)
except Exception:
pass
class Lock(object):
def __init__(
self, filename, mode='a', timeout=DEFAULT_TIMEOUT,
check_interval=DEFAULT_CHECK_INTERVAL, fail_when_locked=False,
flags=LOCK_METHOD, **file_open_kwargs):
'''Lock manager with build-in timeout
filename -- filename
mode -- the open mode, 'a' or 'ab' should be used for writing
truncate -- use truncate to emulate 'w' mode, None is disabled, 0 is
truncate to 0 bytes
timeout -- timeout when trying to acquire a lock
check_interval -- check interval while waiting
fail_when_locked -- after the initial lock failed, return an error
or lock the file
**file_open_kwargs -- The kwargs for the `open(...)` call
fail_when_locked is useful when multiple threads/processes can race
when creating a file. If set to true than the system will wait till
the lock was acquired and then return an AlreadyLocked exception.
Note that the file is opened first and locked later. So using 'w' as
mode will result in truncate _BEFORE_ the lock is checked.
'''
if 'w' in mode:
truncate = True
mode = mode.replace('w', 'a')
else:
truncate = False
self.fh = None
self.filename = filename
self.mode = mode
self.truncate = truncate
self.timeout = timeout
self.check_interval = check_interval
self.fail_when_locked = fail_when_locked
self.flags = flags
self.file_open_kwargs = file_open_kwargs
def acquire(
self, timeout=None, check_interval=None, fail_when_locked=None):
'''Acquire the locked filehandle'''
if timeout is None:
timeout = self.timeout
if timeout is None:
timeout = 0
if check_interval is None:
check_interval = self.check_interval
if fail_when_locked is None:
fail_when_locked = self.fail_when_locked
# If we already have a filehandle, return it
fh = self.fh
if fh:
return fh
# Get a new filehandler
fh = self._get_fh()
try:
# Try to lock
fh = self._get_lock(fh)
except exceptions.LockException as exception:
# Try till the timeout has passed
timeoutend = current_time() + timeout
while timeoutend > current_time():
# Wait a bit
time.sleep(check_interval)
# Try again
try:
# We already tried to the get the lock
# If fail_when_locked is true, then stop trying
if fail_when_locked:
raise exceptions.AlreadyLocked(exception)
else: # pragma: no cover
# We've got the lock
fh = self._get_lock(fh)
break
except exceptions.LockException:
pass
else:
# We got a timeout... reraising
raise exceptions.LockException(exception)
# Prepare the filehandle (truncate if needed)
fh = self._prepare_fh(fh)
self.fh = fh
return fh
def release(self):
'''Releases the currently locked file handle'''
if self.fh:
# noinspection PyBroadException
try:
portalocker.unlock(self.fh)
except Exception:
pass
# noinspection PyBroadException
try:
self.fh.close()
except Exception:
pass
self.fh = None
def delete_lock_file(self):
# type: () -> bool
"""
Remove the local file used for locking (fail if file is locked)
:return: True is successful
"""
if self.fh:
return False
# noinspection PyBroadException
try:
os.unlink(path=self.filename)
except BaseException:
return False
return True
def _get_fh(self):
'''Get a new filehandle'''
# Create the parent directory if it doesn't exist
path, name = os.path.split(self.filename)
if path and not os.path.isdir(path): # pragma: no cover
os.makedirs(path, exist_ok=True)
return open(self.filename, self.mode, **self.file_open_kwargs)
def _get_lock(self, fh):
'''
Try to lock the given filehandle
returns LockException if it fails'''
portalocker.lock(fh, self.flags)
return fh
def _prepare_fh(self, fh):
'''
Prepare the filehandle for usage
If truncate is a number, the file will be truncated to that amount of
bytes
'''
if self.truncate:
fh.seek(0)
fh.truncate(0)
return fh
def __enter__(self):
return self.acquire()
def __exit__(self, type_, value, tb):
self.release()
def __delete__(self, instance): # pragma: no cover
instance.release()
class RLock(Lock):
"""
A reentrant lock, functions in a similar way to threading.RLock in that it
can be acquired multiple times. When the corresponding number of release()
calls are made the lock will finally release the underlying file lock.
"""
def __init__(
self, filename, mode='a', timeout=DEFAULT_TIMEOUT,
check_interval=DEFAULT_CHECK_INTERVAL, fail_when_locked=False,
flags=LOCK_METHOD):
super(RLock, self).__init__(filename, mode, timeout, check_interval,
fail_when_locked, flags)
self._acquire_count = 0
self._lock = ProcessRLock()
self._pid = os.getpid()
def acquire(self, timeout=None, check_interval=None, fail_when_locked=None):
if self._lock:
# cleanup bad python behaviour when forking while lock is acquired
# see Issue https://github.com/allegroai/clearml-agent/issues/73
# and https://bugs.python.org/issue6721
if self._pid != os.getpid():
# noinspection PyBroadException
try:
if self._lock._semlock._count(): # noqa
# this should never happen unless python forgot calling _after_fork
self._lock._semlock._after_fork() # noqa
except BaseException:
pass
if not self._lock.acquire(block=timeout != 0, timeout=timeout):
# We got a timeout... reraising
raise exceptions.LockException()
# check if we need to recreate the file lock on another subprocess
if self._pid != os.getpid():
self._pid = os.getpid()
self._acquire_count = 0
if self.fh:
# noinspection PyBroadException
try:
portalocker.unlock(self.fh)
self.fh.close()
except Exception:
pass
self.fh = None
if self._acquire_count >= 1:
fh = self.fh
else:
fh = super(RLock, self).acquire(timeout, check_interval,
fail_when_locked)
self._acquire_count += 1
return fh
def release(self):
if self._acquire_count == 0:
raise exceptions.LockException(
"Cannot release more times than acquired")
if self._acquire_count == 1:
super(RLock, self).release()
self._acquire_count -= 1
if self._lock:
self._lock.release()
def __del__(self):
self._lock = None
# try to remove the file when we are done
if not os.path.isfile(self.filename):
return
try:
self.acquire(timeout=0)
try:
os.unlink(self.filename)
removed = True
except Exception:
removed = False
self.release()
if not removed:
try:
os.unlink(self.filename)
except Exception:
pass
except Exception:
pass
class TemporaryFileLock(Lock):
def __init__(self, filename='.lock', timeout=DEFAULT_TIMEOUT,
check_interval=DEFAULT_CHECK_INTERVAL, fail_when_locked=True,
flags=LOCK_METHOD):
Lock.__init__(self, filename=filename, mode='w', timeout=timeout,
check_interval=check_interval,
fail_when_locked=fail_when_locked, flags=flags)
atexit.register(self.release)
def release(self):
Lock.release(self)
if os.path.isfile(self.filename): # pragma: no branch
os.unlink(self.filename)
| 31.142442 | 91 | 0.573135 |
4a1ea160b71fd22084cb9fbbc314f649c4ba2c9a
| 1,696 |
py
|
Python
|
examples/plot_cluster_dl85.py
|
aglingael/dl85_dist_source
|
42a0bb66c923f2b691cd0e6bf8b29c150b90f76c
|
[
"MIT"
] | 9 |
2020-01-25T07:47:27.000Z
|
2020-03-14T09:12:28.000Z
|
examples/plot_cluster_dl85.py
|
aglingael/dl85_dist_source
|
42a0bb66c923f2b691cd0e6bf8b29c150b90f76c
|
[
"MIT"
] | null | null | null |
examples/plot_cluster_dl85.py
|
aglingael/dl85_dist_source
|
42a0bb66c923f2b691cd0e6bf8b29c150b90f76c
|
[
"MIT"
] | 1 |
2020-03-31T16:59:07.000Z
|
2020-03-31T16:59:07.000Z
|
"""
===================================
DL8.5 default predictive clustering
===================================
This example illustrates how to use the DL85Cluster class for predictive clustering.
A second implementation of predictive clustering is provided in the plot_cluster_user.py
example.
"""
import numpy as np
from sklearn.model_selection import train_test_split
import time
from pydl85 import DL85Cluster
dataset = np.genfromtxt("../datasets/anneal.txt", delimiter=' ')
X = dataset[:, 1:]
X_train, X_test = train_test_split(X, test_size=0.2, random_state=0)
print("####################################################################\n"
"# DL8.5 default clustering #\n"
"####################################################################")
clf = DL85Cluster(max_depth=1, time_limit=600)
start = time.perf_counter()
print("Model building...")
clf.fit(X_train)
duration = time.perf_counter() - start
print("Model built. Duration of building =", round(duration, 4), "\n\n\n")
predicted = clf.predict(X_test)
print("####################################################################\n"
"# DL8.5 default predictive clustering #\n"
"####################################################################")
X_train1 = X_train[:X_test.shape[0], :]
clf = DL85Cluster(max_depth=1, time_limit=600)
start = time.perf_counter()
print("Model building...")
clf.fit(X_train1, X_test)
duration = time.perf_counter() - start
print("Model built. Duration of building =", round(duration, 4))
print("Xtrain1 is used to describe data while X_test is used to compute errors")
predicted = clf.predict(X_test)
| 38.545455 | 88 | 0.555425 |
4a1ea220458472dcfaeffc9ab270c09436b71f0f
| 12,554 |
py
|
Python
|
libcloudforensics/providers/gcp/internal/storage.py
|
Fryyyyy/cloud-forensics-utils
|
d7fb845bd7df1498d67230520e2c7169c9a929df
|
[
"Apache-2.0"
] | 1 |
2021-06-15T05:50:47.000Z
|
2021-06-15T05:50:47.000Z
|
libcloudforensics/providers/gcp/internal/storage.py
|
Fryyyyy/cloud-forensics-utils
|
d7fb845bd7df1498d67230520e2c7169c9a929df
|
[
"Apache-2.0"
] | null | null | null |
libcloudforensics/providers/gcp/internal/storage.py
|
Fryyyyy/cloud-forensics-utils
|
d7fb845bd7df1498d67230520e2c7169c9a929df
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Storage functionalities."""
import collections
import datetime
import os
import shutil
import tempfile
from typing import TYPE_CHECKING, List, Dict, Any, Optional
import googleapiclient.http
from googleapiclient.errors import HttpError
from libcloudforensics import errors
from libcloudforensics import logging_utils
from libcloudforensics.providers.gcp.internal import common
# pylint: disable=line-too-long
from libcloudforensics.providers.gcp.internal import monitoring as gcp_monitoring
# pylint: enable=line-too-long
from libcloudforensics.providers.utils.storage_utils import SplitStoragePath
logging_utils.SetUpLogger(__name__)
logger = logging_utils.GetLogger(__name__)
if TYPE_CHECKING:
import googleapiclient # pylint: disable=ungrouped-imports
class GoogleCloudStorage:
"""Class to call Google Cloud Storage APIs.
Attributes:
project_id: Google Cloud project ID.
"""
CLOUD_STORAGE_API_VERSION = 'v1'
def __init__(self, project_id: Optional[str] = None) -> None:
"""Initialize the GoogleCloudStorage object.
Args:
project_id (str): Optional. Google Cloud project ID.
"""
self.project_id = project_id
def GcsApi(self) -> 'googleapiclient.discovery.Resource':
"""Get a Google Cloud Storage service object.
Returns:
googleapiclient.discovery.Resource: A Google Cloud Storage service object.
"""
return common.CreateService(
'storage', self.CLOUD_STORAGE_API_VERSION)
def GetObjectMetadata(self,
gcs_path: str,
user_project: Optional[str] = None) -> Dict[str, Any]:
"""Get API operation object metadata for Google Cloud Storage object.
Args:
gcs_path (str): File path to a resource in GCS.
Ex: gs://bucket/folder/obj
user_project (str): The project ID to be billed for this request.
Required for Requester Pays buckets.
Returns:
Dict: An API operation object for a Google Cloud Storage object.
https://cloud.google.com/storage/docs/json_api/v1/objects#resource
"""
if not gcs_path.startswith('gs://'):
gcs_path = 'gs://' + gcs_path
bucket, object_path = SplitStoragePath(gcs_path)
gcs_objects = self.GcsApi().objects() # pylint: disable=no-member
request = gcs_objects.get(
bucket=bucket, object=object_path, userProject=user_project)
response = request.execute() # type: Dict[str, Any]
return response
def GetBucketACLs(self,
bucket: str,
user_project: Optional[str] = None) -> Dict[str, List[str]]:
"""Get ACLs for a Google Cloud Storage bucket.
This includes both ACL entries and IAM policies.
Args:
bucket (str): Name of a bucket in GCS.
Ex: logs_bucket_1
user_project (str): The project ID to be billed for this request.
Required for Requester Pays buckets.
Returns:
Dict: A mapping of role to members of that role.
"""
ret = collections.defaultdict(list)
if bucket.startswith('gs://'):
# Can change to removeprefix() in 3.9
bucket = bucket[5:]
gcs_bac = self.GcsApi().bucketAccessControls() # pylint: disable=no-member
request = gcs_bac.list(bucket=bucket, userProject=user_project)
# https://cloud.google.com/storage/docs/json_api/v1/bucketAccessControls#resource
ac_response = request.execute()
for item in ac_response.get('items', []):
if item.get('kind') == 'storage#bucketAccessControl': # Sanity check
ret[item['role']].append(item['entity'])
gcs_buckets = self.GcsApi().buckets() # pylint: disable=no-member
request = gcs_buckets.getIamPolicy(bucket=bucket)
# https://cloud.google.com/storage/docs/json_api/v1/buckets/getIamPolicy
iam_response = request.execute()
for item in iam_response.get('bindings', []):
for member in item.get('members', []):
ret[item['role']].append(member)
return ret
def ListBuckets(self) -> List[Dict[str, Any]]:
"""List buckets in a Google Cloud project.
Returns:
List[Dict[str, Any]]: List of object dicts.
(https://cloud.google.com/storage/docs/json_api/v1/buckets#resource)
"""
gcs_buckets = self.GcsApi().buckets() # pylint: disable=no-member
request = gcs_buckets.list(project=self.project_id)
objects = request.execute() # type: Dict[str, Any]
return objects.get('items', [])
def ListBucketObjects(self, bucket: str) -> List[Dict[str, Any]]:
"""List objects (with metadata) in a Google Cloud Storage bucket.
Args:
bucket (str): Name of a bucket in GCS.
Returns:
List of Object Dicts (see GetObjectMetadata)
"""
if bucket.startswith('gs://'):
# Can change to removeprefix() in 3.9
bucket = bucket[5:]
gcs_objects = self.GcsApi().objects() # pylint: disable=no-member
request = gcs_objects.list(bucket=bucket)
objects = request.execute() # type: Dict[str, Any]
return objects.get('items', [])
def DeleteObject(self, gcs_path: str) -> None:
"""Deletes an object in a Google Cloud Storage bucket.
Args:
gcs_path (str): Full path to the object (ie: gs://bucket/dir1/dir2/obj)
"""
if not gcs_path.startswith('gs://'):
gcs_path = 'gs://' + gcs_path
bucket, object_path = SplitStoragePath(gcs_path)
gcs_objects = self.GcsApi().objects() # pylint: disable=no-member
request = gcs_objects.delete(bucket=bucket, object=object_path)
request.execute() # type: Dict[str, Any]
def GetBucketSize(self,
bucket: str,
timeframe: int = 1) -> Dict[str, int]:
"""List the size of a Google Storage Bucket in a project (default: last 1
day).
Note: This will list the _maximum size_
(in bytes) the bucket had in the timeframe.
Ref: https://cloud.google.com/monitoring/api/metrics_gcp#gcp-storage
Args:
bucket (str): Name of a bucket in GCS.
timeframe (int): Optional. The number (in days) for
which to measure activity.
Default: 1 day.
Returns:
Dict[str, int]: Dictionary mapping bucket name to its size (in bytes).
"""
start_time = common.FormatRFC3339(
datetime.datetime.utcnow() - datetime.timedelta(days=timeframe))
end_time = common.FormatRFC3339(datetime.datetime.utcnow())
period = timeframe * 24 * 60 * 60
assert self.project_id # Necessary for mypy check
gcm = gcp_monitoring.GoogleCloudMonitoring(self.project_id)
gcm_api = gcm.GcmApi()
gcm_timeseries_client = gcm_api.projects().timeSeries() # pylint: disable=no-member
qfilter = ('metric.type="storage.googleapis.com/storage/total_bytes" '
'resource.type="gcs_bucket"')
qfilter += ' resource.label.bucket_name="{0:s}"'.format(bucket)
responses = common.ExecuteRequest(
gcm_timeseries_client,
'list',
{
'name': 'projects/{0:s}'.format(self.project_id),
'filter': qfilter,
'interval_startTime': start_time,
'interval_endTime': end_time,
'aggregation_groupByFields': 'resource.label.bucket_name',
'aggregation_perSeriesAligner': 'ALIGN_MAX',
'aggregation_alignmentPeriod': '{0:d}s'.format(period),
'aggregation_crossSeriesReducer': 'REDUCE_NONE'
})
ret = {}
for response in responses:
for ts in response.get('timeSeries', []):
bucket = ts.get('resource', {}).get('labels', {}).get('bucket_name', '')
if bucket:
points = ts.get('points', [])
for point in points:
val = point.get('value', {}).get('doubleValue', 0)
if bucket not in ret:
ret[bucket] = val
elif val > ret[bucket]:
ret[bucket] = val
return ret
def CreateBucket(
self,
bucket: str,
labels: Optional[Dict[str, str]] = None,
predefined_acl: str = 'private',
predefined_default_object_acl: str = 'private') -> Dict[str, Any]:
"""Creates a Google Cloud Storage bucket in the current project.
Args:
bucket (str): Name of the desired bucket.
labels (Dict[str, str]): Mapping of key/value strings to be applied as a label
to the bucket.
Rules for acceptable label values are located at
https://cloud.google.com/storage/docs/key-terms#bucket-labels
predefined_acl (str): A predefined set of Access Controls
to apply to the bucket.
predefined_default_object_acl (str): A predefined set of Access Controls
to apply to the objects in the bucket.
Values listed in https://cloud.google.com/storage/docs/json_api/v1/buckets/insert#parameters # pylint: disable=line-too-long
Returns:
Dict[str, Any]: An API operation object for a Google Cloud Storage bucket.
https://cloud.google.com/storage/docs/json_api/v1/buckets#resource
"""
if bucket.startswith('gs://'):
bucket = bucket[5:]
gcs_buckets = self.GcsApi().buckets() # pylint: disable=no-member
body = {'name': bucket, 'labels': labels}
request = gcs_buckets.insert(
project=self.project_id,
predefinedAcl=predefined_acl,
predefinedDefaultObjectAcl=predefined_default_object_acl,
body=body)
try:
response = request.execute() # type: Dict[str, Any]
except HttpError as exception:
if exception.resp.status == 409:
raise errors.ResourceCreationError(
'Bucket {0:s} already exists: {1!s}'.format(bucket, exception),
__name__) from exception
raise errors.ResourceCreationError(
'Unknown error occurred when creating bucket:'
' {0!s}'.format(exception), __name__) from exception
return response
def GetObject(self,
gcs_path: str,
out_file: Optional[str] = None) -> str:
"""Gets the contents of an object in a Google Cloud Storage bucket.
Args:
gcs_path (str): Full path to the object (ie: gs://bucket/dir1/dir2/obj)
out_file (str): Path to the local file that will be written.
If not provided, will create a temporary file.
Returns:
str: The filename of the written object.
Raises:
ResourceCreationError: If the file couldn't be downloaded.
"""
if not gcs_path.startswith('gs://'):
gcs_path = 'gs://' + gcs_path
gcs_objects = self.GcsApi().objects() # pylint: disable=no-member
(bucket, filename) = SplitStoragePath(gcs_path)
request = gcs_objects.get_media(bucket=bucket, object=filename)
if not out_file:
outputdir = tempfile.mkdtemp()
logger.info('Created temporary directory {0:s}'.format(outputdir))
out_file = os.path.join(outputdir, os.path.basename(filename))
stat = shutil.disk_usage(os.path.dirname(outputdir))
om = self.GetObjectMetadata(gcs_path)
if 'size' not in om:
logger.warning('Unable to retrieve object metadata before fetching')
else:
if int(om['size']) > stat.free:
raise errors.ResourceCreationError(
'Target drive does not have enough space ({0!s} free vs {1!s} needed)' # pylint: disable=line-too-long
.format(stat.free, om['size']),
__name__)
with open(out_file, 'wb') as outputfile:
downloader = googleapiclient.http.MediaIoBaseDownload(outputfile, request)
done = False
while not done:
status, done = downloader.next_chunk()
if status.total_size > stat.free:
raise errors.ResourceCreationError(
'Target drive does not have enough space ({0!s} free vs {1!s} needed)' # pylint: disable=line-too-long
.format(stat.free, status.total_size),
__name__)
logger.info('Download {}%.'.format(int(status.progress() * 100)))
logger.info('File successfully written to {0:s}'.format(out_file))
return out_file
| 37.813253 | 131 | 0.662259 |
4a1ea228cd0065631d06749d3c566dd1ac84f5ce
| 22,617 |
py
|
Python
|
test/functional/test_runner.py
|
silbatech/silba-src
|
8fa0435d469e9a704a3ebc8ff902b2dbbca19520
|
[
"MIT"
] | null | null | null |
test/functional/test_runner.py
|
silbatech/silba-src
|
8fa0435d469e9a704a3ebc8ff902b2dbbca19520
|
[
"MIT"
] | null | null | null |
test/functional/test_runner.py
|
silbatech/silba-src
|
8fa0435d469e9a704a3ebc8ff902b2dbbca19520
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
BASE_SCRIPTS= [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'wallet_backup.py',
'p2p_pos_fakestake.py',
'p2p_pos_fakestake_accepted.py',
#'p2p_zpos_fakestake.py',
#'p2p_zpos_fakestake_accepted.py',
#'zerocoin_wrapped_serials.py',
# vv Tests less than 5m vv
#'feature_block.py',
#'rpc_fundrawtransaction.py',
# vv Tests less than 2m vv
'p2p_pos_doublespend.py',
'wallet_basic.py',
'wallet_accounts.py',
'wallet_dump.py',
'rpc_listtransactions.py',
# vv Tests less than 60s vv
'wallet_zapwallettxes.py',
#'wallet_importmulti.py',
#'mempool_limit.py', # We currently don't limit our mempool
'wallet_listreceivedby.py',
#'wallet_abandonconflict.py',
'rpc_rawtransaction.py',
'feature_reindex.py',
'rpc_bip38.py',
# vv Tests less than 30s vv
'wallet_keypool_topup.py',
#'interface_zmq.py',
'interface_bitcoin_cli.py',
'mempool_resurrect.py',
'wallet_txn_doublespend.py --mineblock',
'wallet_txn_clone.py --mineblock',
#'rpc_getchaintips.py',
'interface_rest.py',
'mempool_spend_coinbase.py',
'mempool_reorg.py',
#'mempool_persist.py', # Not yet implemented
'interface_http.py',
#'rpc_users.py',
'feature_proxy.py',
'rpc_signrawtransaction.py',
'p2p_disconnect_ban.py',
'rpc_decodescript.py',
'rpc_blockchain.py',
#'rpc_deprecated.py',
'wallet_disable.py',
'rpc_net.py',
'wallet_keypool.py',
#'p2p_mempool.py',
#'mining_prioritisetransaction.py',
#'p2p_invalid_block.py',
#'p2p_invalid_tx.py',
'rpc_signmessage.py',
#'wallet_import_rescan.py',
#'mining_basic.py',
#'wallet_bumpfee.py',
#'wallet_listsinceblock.py',
#'p2p_leak.py',
'wallet_encryption.py',
#'feature_cltv.py',
#'wallet_resendwallettransactions.py',
#'feature_minchainwork.py',
#'p2p_fingerprint.py',
'feature_uacomment.py',
#'p2p_unrequested_blocks.py',
#'feature_config_args.py',
'feature_help.py',
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
# vv Tests less than 20m vv
#'feature_fee_estimation.py',
# vv Tests less than 5m vv
# vv Tests less than 2m vv
#'p2p_timeouts.py',
# vv Tests less than 60s vv
#'p2p_feefilter.py',
'rpc_bind.py',
# vv Tests less than 30s vv
#'example_test.py',
'feature_notifications.py',
'rpc_invalidateblock.py',
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--combinedlogslen', '-c', type=int, default=0, help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.')
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/silba_test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not (enable_wallet and enable_utils and enable_bitcoind):
print("No functional tests to run. Wallet, utils, and silbad must all be enabled")
print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
sys.exit(0)
# Build list of tests
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
tests = [re.sub("\.py$", "", t) + ".py" for t in tests]
test_list = []
for t in tests:
if t in ALL_SCRIPTS:
test_list.append(t)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], t))
else:
# No individual tests have been specified.
# Run all base tests, and optionally run extended tests.
test_list = BASE_SCRIPTS
if args.extended:
# place the EXTENDED_SCRIPTS first since the three longest ones
# are there and the list is shorter
test_list = EXTENDED_SCRIPTS + test_list
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
tests_excl = [re.sub("\.py$", "", t) + ".py" for t in args.exclude.split(',')]
for exclude_test in tests_excl:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([(config["environment"]["SRCDIR"] + '/test/functional/' + test_list[0].split()[0])] + ['-h'])
sys.exit(0)
check_script_list(config["environment"]["SRCDIR"])
check_script_prefixes()
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(test_list, config["environment"]["SRCDIR"], config["environment"]["BUILDDIR"], config["environment"]["EXEEXT"], tmpdir, args.jobs, args.coverage, passon_args, args.combinedlogslen)
def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_coverage=False, args=[], combined_logs_len=0):
# Warn if bitcoind is already running (unix only)
try:
if subprocess.check_output(["pidof", "silbad"]) is not None:
print("%sWARNING!%s There is already a silbad process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
#Set env vars
if "BITCOIND" not in os.environ:
os.environ["BITCOIND"] = build_dir + '/src/silbad' + exeext
os.environ["BITCOINCLI"] = build_dir + '/src/silba-cli' + exeext
tests_dir = src_dir + '/test/functional/'
flags = ["--srcdir={}/src".format(build_dir)] + args
flags.append("--cachedir=%s" % cache_dir)
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
try:
subprocess.check_output([tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
except subprocess.CalledProcessError as e:
sys.stdout.buffer.write(e.output)
raise
#Run Tests
job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags)
time0 = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
for _ in range(len(test_list)):
test_result, testdir, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
if test_result.status == "Passed":
logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
elif test_result.status == "Skipped":
logging.debug("\n%s%s%s skipped" % (BOLD[1], test_result.name, BOLD[0]))
else:
print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs, _ = subprocess.Popen([os.path.join(tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
print_results(test_results, max_len_name, (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=lambda result: result.name.lower())
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the test scripts passed in via the list.
"""
def __init__(self, num_tests_parallel, tests_dir, tmpdir, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie silbads, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
portseed = len(self.test_list) + self.portseed_offset
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = t.split()
testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
self.jobs.append((t,
time.time(),
subprocess.Popen([self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
testdir,
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, testdir, log_out, log_err) = j
if os.getenv('TRAVIS') == 'true' and int(time.time() - time0) > 20 * 60:
# In travis, timeout individual tests after 20 minutes (to stop tests hanging and not
# providing useful output.
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(j)
return TestResult(name, status, int(time.time() - time0)), testdir, stdout, stderr
print('.', end='', flush=True)
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_prefixes():
"""Check that at most a handful of the
test scripts don't start with one of the allowed name prefixes."""
# LEEWAY is provided as a transition measure, so that pull-requests
# that introduce new tests that don't conform with the naming
# convention don't immediately cause the tests to fail.
LEEWAY = 10
good_prefixes_re = re.compile("(example|feature|interface|mempool|mining|p2p|rpc|wallet|zerocoin)_")
bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None]
if len(bad_script_names) > 0:
print("INFO: %d tests not meeting naming conventions:" % (len(bad_script_names)))
print(" %s" % ("\n ".join(sorted(bad_script_names))))
assert len(bad_script_names) <= LEEWAY, "Too many tests not following naming convention! (%d found, maximum: %d)" % (len(bad_script_names), LEEWAY)
def check_script_list(src_dir):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([t for t in os.listdir(script_dir) if t[-3:] == ".py"])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if os.getenv('TRAVIS') == 'true':
# On travis this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `silba-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
| 40.972826 | 195 | 0.632179 |
4a1ea2fab00db4a3aedc14916c6d9f7e7673a964
| 4,180 |
py
|
Python
|
sahara/plugins/cdh/deploy.py
|
ksshanam/sahara
|
0d259f7a71447cd0cefe4f11184cc2ee335f4e33
|
[
"Apache-2.0"
] | 2 |
2019-01-28T22:10:54.000Z
|
2019-02-20T08:35:58.000Z
|
sahara_plugin_cdh/plugins/cdh/deploy.py
|
openstack/sahara-plugin-cdh
|
063c3ee8de7e56831ef6b3bc8807706bd5fd7cea
|
[
"Apache-2.0"
] | 1 |
2020-10-06T07:50:12.000Z
|
2020-10-06T07:50:12.000Z
|
sahara_plugin_cdh/plugins/cdh/deploy.py
|
openstack/sahara-plugin-cdh
|
063c3ee8de7e56831ef6b3bc8807706bd5fd7cea
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins import kerberos
PACKAGES = [
'cloudera-manager-agent',
'cloudera-manager-daemons',
'cloudera-manager-server',
'cloudera-manager-server-db-2',
'flume-ng',
'hadoop-hdfs-datanode',
'hadoop-hdfs-namenode',
'hadoop-hdfs-secondarynamenode',
'hadoop-kms'
'hadoop-mapreduce',
'hadoop-mapreduce-historyserver',
'hadoop-yarn-nodemanager',
'hadoop-yarn-resourcemanager',
'hbase',
'hbase-solr',
'hive-hcatalog',
'hive-metastore',
'hive-server2',
'hive-webhcat-server',
'hue',
'impala',
'impala-server',
'impala-state-store',
'impala-catalog',
'impala-shell',
'kafka',
'kafka-server'
'keytrustee-keyprovider',
'oozie',
'oracle-j2sdk1.7',
'sentry',
'solr-server',
'solr-doc',
'search',
'spark-history-server',
'sqoop2',
'unzip',
'zookeeper'
]
def setup_kerberos_for_cluster(cluster, cloudera_utils):
if kerberos.is_kerberos_security_enabled(cluster):
manager = cloudera_utils.pu.get_manager(cluster)
kerberos.deploy_infrastructure(cluster, manager)
cloudera_utils.full_cluster_stop(cluster)
kerberos.prepare_policy_files(cluster)
cloudera_utils.push_kerberos_configs(cluster)
cloudera_utils.full_cluster_start(cluster)
kerberos.create_keytabs_for_map(
cluster,
{'hdfs': cloudera_utils.pu.get_hdfs_nodes(cluster),
'spark': [cloudera_utils.pu.get_spark_historyserver(cluster)]})
def prepare_scaling_kerberized_cluster(cluster, cloudera_utils, instances):
if kerberos.is_kerberos_security_enabled(cluster):
server = None
if not kerberos.using_existing_kdc(cluster):
server = cloudera_utils.pu.get_manager(cluster)
kerberos.setup_clients(cluster, server)
kerberos.prepare_policy_files(cluster)
# manager can correctly handle updating configs
cloudera_utils.push_kerberos_configs(cluster)
kerberos.create_keytabs_for_map(
cluster,
{'hdfs': cloudera_utils.pu.get_hdfs_nodes(cluster, instances)})
def get_open_ports(node_group):
ports = [9000] # for CM agent
ports_map = {
'CLOUDERA_MANAGER': [7180, 7182, 7183, 7432, 7184, 8084, 8086, 10101,
9997, 9996, 8087, 9998, 9999, 8085, 9995, 9994],
'HDFS_NAMENODE': [8020, 8022, 50070, 50470],
'HDFS_SECONDARYNAMENODE': [50090, 50495],
'HDFS_DATANODE': [50010, 1004, 50075, 1006, 50020],
'YARN_RESOURCEMANAGER': [8030, 8031, 8032, 8033, 8088],
'YARN_STANDBYRM': [8030, 8031, 8032, 8033, 8088],
'YARN_NODEMANAGER': [8040, 8041, 8042],
'YARN_JOBHISTORY': [10020, 19888],
'HIVE_METASTORE': [9083],
'HIVE_SERVER2': [10000],
'HUE_SERVER': [8888],
'OOZIE_SERVER': [11000, 11001],
'SPARK_YARN_HISTORY_SERVER': [18088],
'ZOOKEEPER_SERVER': [2181, 3181, 4181, 9010],
'HBASE_MASTER': [60000],
'HBASE_REGIONSERVER': [60020],
'FLUME_AGENT': [41414],
'SENTRY_SERVER': [8038],
'SOLR_SERVER': [8983, 8984],
'SQOOP_SERVER': [8005, 12000],
'KEY_VALUE_STORE_INDEXER': [],
'IMPALA_CATALOGSERVER': [25020, 26000],
'IMPALA_STATESTORE': [25010, 24000],
'IMPALAD': [21050, 21000, 23000, 25000, 28000, 22000],
'KMS': [16000, 16001],
'JOURNALNODE': [8480, 8481, 8485]
}
for process in node_group.node_processes:
if process in ports_map:
ports.extend(ports_map[process])
return ports
| 33.44 | 77 | 0.649522 |
4a1ea479876af6378a13f283fa51e845a7920667
| 3,775 |
py
|
Python
|
schedule.py
|
fakegit/bilibili-live-tools
|
99dfb53d1dc3275e37f0be34e4199e672aecb1bf
|
[
"MIT"
] | 1,018 |
2017-12-24T01:53:18.000Z
|
2022-03-25T09:25:02.000Z
|
schedule.py
|
fakegit/bilibili-live-tools
|
99dfb53d1dc3275e37f0be34e4199e672aecb1bf
|
[
"MIT"
] | 274 |
2017-12-29T13:58:19.000Z
|
2021-12-09T14:31:55.000Z
|
schedule.py
|
fakegit/bilibili-live-tools
|
99dfb53d1dc3275e37f0be34e4199e672aecb1bf
|
[
"MIT"
] | 240 |
2018-01-22T05:01:28.000Z
|
2022-01-21T06:30:57.000Z
|
import asyncio
import time
from printer import Printer
sec_calc = lambda h, m, s: 3600 * int(h) + 60 * int(m) + float(s)
time_minus = lambda t2, t1: (t2 - t1) % 86400
time_str_calc = lambda sec: f'{sec//3600:02.0f}:{sec%3600//60:02.0f}:{sec%60:02.0f}'
def sec_now():
time_tuple = time.localtime()
return sec_calc(time_tuple.tm_hour, time_tuple.tm_min, time_tuple.tm_sec)
class Schedule:
instance = None
def __new__(cls, *args, **kw):
if not cls.instance:
cls.instance = super(Schedule, cls).__new__(cls)
cls.instance.scheduled_sleep = False
return cls.instance
async def run(self, schedule_str):
if schedule_str == '':
Printer().printer("请填入定时休眠时间段", "Warning", "red")
self.scheduled_sleep = False
return
second_array = sorted([[sec_calc(*time_str.split(':')) for time_str in
time_str_pair.split('-')] for time_str_pair in schedule_str.split(';')])
second_array = [[start, end] for (start, end) in second_array if start != end]
if not len(second_array):
Printer().printer("请填入有效时间段", "Warning", "red")
self.scheduled_sleep = False
return
# 按顺序合并有overlap的时间段
second_rearrng = [second_array[0]]
pos = 1
while pos < len(second_array):
if time_minus(second_array[pos][0], second_rearrng[-1][0]) <= \
time_minus(second_rearrng[-1][1], second_rearrng[-1][0]):
if time_minus(second_rearrng[-1][1], second_rearrng[-1][0]) < \
time_minus(second_array[pos][1], second_rearrng[-1][0]):
second_rearrng[-1][1] = second_array[pos][1]
else:
second_rearrng.append(second_array[pos])
pos += 1
# 考虑最后一个跨0点时间段覆盖最开始几个时间段端点的情况
if second_rearrng[-1][1] < second_rearrng[-1][0]:
while len(second_rearrng) > 1:
if second_rearrng[-1][1] > second_rearrng[0][0]:
if second_rearrng[-1][1] < second_rearrng[0][1]:
second_rearrng[-1][1] = second_rearrng[0][1]
del second_rearrng[0]
else:
break
sec_sequence = __import__('functools').reduce(lambda x, y: x+y, second_rearrng)
sec_init = sec_now()
for i in range(len(sec_sequence)):
if sec_sequence[i] > sec_init:
stage = i
break
else:
stage = len(sec_sequence)-1 if sec_sequence[-1] < sec_sequence[-2] else 0
# 当前时间在0时后且在最后一个包含0时的时间段内
if stage == 0 and sec_init < sec_sequence[-1] < sec_sequence[-2]:
stage = len(sec_sequence)-1
if stage % 2 == 1:
self.scheduled_sleep = True
Printer().printer(f"当前处于定时休眠时间段内,下一次取消休眠时间为 {time_str_calc(sec_sequence[stage])}", "Info", "green")
else:
self.scheduled_sleep = False
Printer().printer(f"当前处于定时休眠时间段外,下一次开始休眠时间为 {time_str_calc(sec_sequence[stage])}", "Info", "green")
while True:
sleep_time = (sec_sequence[stage] - sec_now()) % 86400
# 避免因误差刚好过了下个时间点
sleep_time = 0 if sleep_time > 86395 else sleep_time
await asyncio.sleep(sleep_time)
stage += 1
stage = stage % len(sec_sequence)
if stage % 2 == 0:
Printer().printer(f"结束定时休眠,下一次开始休眠时间为 {time_str_calc(sec_sequence[stage])}", "Info", "green")
self.scheduled_sleep = False
else:
Printer().printer(f"开始定时休眠,本次结束休眠时间为 {time_str_calc(sec_sequence[stage])}", "Info", "green")
self.scheduled_sleep = True
| 41.483516 | 111 | 0.567417 |
4a1ea48fb9f91d4bd3ee339e9f8e385b28db575c
| 3,936 |
py
|
Python
|
examples/dqn_m0.py
|
NICALab/Inducing-Functions-through-RL
|
e2171ff5e14bb272353e7df5156104ad2a85a3ae
|
[
"MIT"
] | 1 |
2022-01-07T10:00:48.000Z
|
2022-01-07T10:00:48.000Z
|
examples/dqn_m0.py
|
NICALab/Inducing-Functions-through-RL
|
e2171ff5e14bb272353e7df5156104ad2a85a3ae
|
[
"MIT"
] | null | null | null |
examples/dqn_m0.py
|
NICALab/Inducing-Functions-through-RL
|
e2171ff5e14bb272353e7df5156104ad2a85a3ae
|
[
"MIT"
] | null | null | null |
import argparse
import gym
from torch import nn as nn
import numpy as np
from rlkit.exploration_strategies.base import \
PolicyWrappedWithExplorationStrategy
from rlkit.exploration_strategies.epsilon_greedy import EpsilonGreedy
from rlkit.policies.argmax import ArgmaxDiscretePolicyM0
from rlkit.torch.dqn.dqn import DQNTrainerM0
from rlkit.torch.networks.custom import BaselineM0
import rlkit.torch.pytorch_util as ptu
from rlkit.data_management.env_replay_buffer import EnvReplayBufferM0
from rlkit.launchers.launcher_util import setup_logger
from rlkit.samplers.data_collector import MdpPathCollectorM0
from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm
from env_survive.env_survive import EnvSurvive, vision_map, action_map
def experiment(variant):
expl_env = EnvSurvive(path='./env_survive/mnist', seed=0, raw_vision=False, memory_task=True)
eval_env = EnvSurvive(path='./env_survive/mnist', seed=0, raw_vision=False, memory_task=True)
qf = BaselineM0()
target_qf = BaselineM0()
'''
# M0 with known mlp
# load Baseline-V0
import torch
f_path = './data/dqn-survive/dqn-survive_2021_01_12_17_23_14_0000--s-0/itr_1800.pkl'
data = torch.load(f_path)
BaselineV0 = data['evaluation/policy'].qf.mlp
source = BaselineV0
for target_param, param in zip(qf.mlp.parameters(), source.parameters()):
target_param.data.copy_(param)
for target_param, param in zip(target_qf.mlp.parameters(), source.parameters()):
target_param.data.copy_(param)
'''
qf_criterion = nn.MSELoss()
eval_policy = ArgmaxDiscretePolicyM0(qf)
expl_policy = PolicyWrappedWithExplorationStrategy(
EpsilonGreedy(expl_env.action_space),
eval_policy,
)
eval_path_collector = MdpPathCollectorM0(
eval_env,
eval_policy,
)
expl_path_collector = MdpPathCollectorM0(
expl_env,
expl_policy,
)
trainer = DQNTrainerM0(
qf=qf,
target_qf=target_qf,
qf_criterion=qf_criterion,
**variant['trainer_kwargs']
)
replay_buffer = EnvReplayBufferM0(
variant['replay_buffer_size'],
expl_env,
)
algorithm = TorchBatchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
**variant['algorithm_kwargs']
)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
variant = dict(
algorithm="DQN",
version="normal",
replay_buffer_size=int(1E5),
algorithm_kwargs=dict(
num_epochs=2000,
num_eval_steps_per_epoch=1000,
num_trains_per_train_loop=500,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=2000,
max_path_length=500,
batch_size=256,
),
trainer_kwargs=dict(
discount=0.99,
learning_rate=3E-4,
),
)
setup_logger(exp_prefix='dqn-survive',
variant=variant,
text_log_file="debug.log",
variant_log_file="variant.json",
tabular_log_file="progress.csv",
snapshot_mode="gap_and_last",
snapshot_gap=200,
log_tabular_only=False,
log_dir=None,
git_infos=None,
script_name=None,
# **create_log_dir_kwargs
base_log_dir='./data',
exp_id=1,
seed=0) # baseline m0
ptu.set_gpu_mode(True)
experiment(variant)
'''
for np1, np2 in zip(target.named_parameters(), source.named_parameters()):
n1 = np1[0] # parameter name
p1 = np1[1] # paramter value
n2 = np2[0]
print(n1, n2)
'''
| 32.528926 | 97 | 0.662093 |
4a1ea5f75edf2a8f91a61b622be4ca7e65701280
| 5,156 |
py
|
Python
|
exam_terminal/__main__.py
|
ismet55555/exam-terminal
|
2d17cc3ccc71c17c8eb37e3adc23a52f8bc56e57
|
[
"Apache-2.0"
] | 14 |
2020-11-25T22:16:35.000Z
|
2022-01-08T20:41:44.000Z
|
exam_terminal/__main__.py
|
ismet55555/exam-terminal
|
2d17cc3ccc71c17c8eb37e3adc23a52f8bc56e57
|
[
"Apache-2.0"
] | 23 |
2021-02-10T01:37:38.000Z
|
2022-03-31T10:22:16.000Z
|
exam_terminal/__main__.py
|
ismet55555/exam-terminal
|
2d17cc3ccc71c17c8eb37e3adc23a52f8bc56e57
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import logging
import os
import sys
import sysconfig
from urllib.parse import urlparse
import click
from exam_terminal import exam_terminal, utility
# Creating a message logger, all dependent scripts will inhearent this logger
logging.basicConfig(format='[%(asctime)s][%(levelname)-8s] [%(filename)-30s:%(lineno)4s] %(message)s', datefmt='%m/%d-%H:%M:%S')
logger = logging.getLogger()
logger.setLevel(logging.INFO) # <--- Manually change debug level here (DEBUG, INFO, ERROR, etc)
if logger.level == logging.DEBUG:
logger.addHandler(logging.FileHandler("exam-terminal.log"))
@click.command(context_settings={"ignore_unknown_options": True})
@click.option('-s', '--sample', is_flag=True, default=False, type=bool, help='Set this flag to run a sample exam, just to check things out')
@click.option('-e', '--examfile', required=False, default='', type=str, help='Local path or remote URL to the exam YAML file to be loaded')
def main(sample, examfile) -> None:
"""
\b
_ _ _
| | (_) | |
_____ ____ _ _ __ ___ ______| |_ ___ _ __ _ __ ___ _ _ __ __ _| |
/ _ \ \/ / _` | '_ ` _ \______| __/ _ \ '__| '_ ` _ \| | '_ \ / _` | |
| __/> < (_| | | | | | | | || __/ | | | | | | | | | | | (_| | |
\___/_/\_\__,_|_| |_| |_| \__\___|_| |_| |_| |_|_|_| |_|\__,_|_|
Use this little terminal program to perform a exam/quiz/test using a
predefined YML (or YAML) file containing exam information.
\b
Example Usages:
exam-terminal --sample
exam-terminal -e MyExam.yml
exam-terminal -examfile ~/Documents/Exams/SomeExam.yaml
exam-terminal -e "/home/you/review.yml"
exam-terminal -e https://raw.githubusercontent.com/ismet55555/exam-terminal/master/exam_terminal/exams/sample_exam.yml
For even more help visit:
https://github.com/ismet55555/exam-terminal
"""
logger.debug(f'--sample = {sample}')
logger.debug(f'--examfile = {examfile}')
# Check if any options have been passed
if not sample and not examfile:
ctx = click.get_current_context()
click.echo(click.style("Uh-Oh! Something's wrong here ...", fg='bright_red', bold=True))
ctx.fail(click.style("User Input Error: No exam-terminal options were specified. Please specify any option.", fg='bright_red', bold=True))
# Sample examfile
exam_file_location = ''
exam_file_contents = {}
if sample and not examfile:
# If local does not exist, try site-package
exam_file_location = os.path.abspath(os.path.join("exam_terminal", "exams", "sample_exam.yml"))
if not os.path.exists(exam_file_location):
logger.debug(f'Failed to find {exam_file_location}, trying python site-package directory ...')
site_package_dir = sysconfig.get_paths()["purelib"]
exam_file_location = os.path.abspath(os.path.join(site_package_dir, "exam_terminal", "exams", "sample_exam.yml"))
logger.debug(f'Using sample exam file: {exam_file_location}')
# Load the file
exam_file_contents = utility.load_examfile_contents_from_local_file(exam_file_location)
# Specified exam file location
if examfile:
# Check if examfile is passed as local path or remote URL to be downloaded
if bool(urlparse(examfile).scheme):
# Loading file from remote URL
exam_file_contents = utility.load_examfile_contents_from_url(examfile)
else:
# Loading local file
logger.debug(f'Passed local exam file: {click.format_filename(examfile)}')
exam_file_location = os.path.abspath(click.format_filename(examfile))
logger.debug(f'Interpreted local exam file path: {exam_file_location}')
# Check if examfile exists locally
if not os.path.exists(exam_file_location):
ctx = click.get_current_context()
click.echo(click.style("Uh-Oh! Something's wrong here ...", fg='bright_red', bold=True))
ctx.fail(click.style(f"User Input Error: The exam file which you specified does not exist: {exam_file_location}", fg='bright_red', bold=True))
# Load the file
exam_file_contents = utility.load_examfile_contents_from_local_file(exam_file_location)
# Run exam-terminal
exitcode = 0
if exam_file_contents:
exitcode = exam_terminal.exam_terminal(exam_file_contents)
else:
ctx = click.get_current_context()
ctx.fail(click.style(f"Failed to load the specified file '{examfile}'. Check file location or format.", fg='bright_red', bold=True))
if not exitcode:
click.echo(click.style("Done", fg='bright_green', bold=True))
sys.exit(exitcode)
if __name__ == "__main__":
"""
Main entry point to the entire program.
This file and this function will be called when running the program.
Parameters: None
Returns: None
"""
main()
| 44.068376 | 158 | 0.635764 |
4a1ea6e5d39fc5735a08fc5742d954224e5df539
| 920 |
py
|
Python
|
python/func_with_test.py
|
hermantai/samples
|
8b76e17b57c39876c65cb3aac65dc4f9079858a7
|
[
"Apache-2.0"
] | 1 |
2016-08-17T03:19:55.000Z
|
2016-08-17T03:19:55.000Z
|
python/func_with_test.py
|
hermantai/samples
|
8b76e17b57c39876c65cb3aac65dc4f9079858a7
|
[
"Apache-2.0"
] | null | null | null |
python/func_with_test.py
|
hermantai/samples
|
8b76e17b57c39876c65cb3aac65dc4f9079858a7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""A template for creating functions or classes with unit tests.
It's mainly used for rapid protyping several functions or classes.
Run this template with:
python func_with_test.py
or
python func_with_test.py -v
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import unittest
class ClassA(object):
pass
def func(a, b):
return a + b
class ClassATestCase(unittest.TestCase):
pass
# To test functions in the module, I prefer having a single TestCase using the
# module name.
class FuncWithTestTestCase(unittest.TestCase):
def test_func_1And2_3(self):
self.assertEqual(3, func(1, 2))
self.assertEqual(
3,
func(
1,
2,
),
)
if __name__ == '__main__':
unittest.main()
| 20 | 78 | 0.676087 |
4a1ea6fbcb312db5572bab0cde9425ae23ef77ee
| 460 |
py
|
Python
|
app_collaborative_sci_workflow/pipeline_modules/Source_Get_Fragments/Source_Get_Fragments_main.py
|
pseudoPixels/SourceFlow
|
e1738c8b838c71b18598ceca29d7c487c76f876b
|
[
"MIT"
] | null | null | null |
app_collaborative_sci_workflow/pipeline_modules/Source_Get_Fragments/Source_Get_Fragments_main.py
|
pseudoPixels/SourceFlow
|
e1738c8b838c71b18598ceca29d7c487c76f876b
|
[
"MIT"
] | null | null | null |
app_collaborative_sci_workflow/pipeline_modules/Source_Get_Fragments/Source_Get_Fragments_main.py
|
pseudoPixels/SourceFlow
|
e1738c8b838c71b18598ceca29d7c487c76f876b
|
[
"MIT"
] | null | null | null |
import subprocess
lines = ''
with open(source_directory) as module_1_inp:
lines = module_1_inp.readlines()
#only read the first line (in case it has multiples)
source_directory = lines[0]
pipe = subprocess.Popen(
["/bin/bash", "/home/ubuntu/Webpage/app_collaborative_sci_workflow/External_Libraries/NiCad-4.0/scripts/Extract",
granularity, language,
source_directory, select_pattern, ignore_pattern,
source_fragments]).communicate()
| 24.210526 | 117 | 0.758696 |
4a1ea7cd106baac6e04eaa537e1f403644c79900
| 2,909 |
py
|
Python
|
pypgqueue/consumer.py
|
MarekSuchanek/pypgqueue
|
e11d161a804f28de3c0557403763293f594871de
|
[
"MIT"
] | null | null | null |
pypgqueue/consumer.py
|
MarekSuchanek/pypgqueue
|
e11d161a804f28de3c0557403763293f594871de
|
[
"MIT"
] | null | null | null |
pypgqueue/consumer.py
|
MarekSuchanek/pypgqueue
|
e11d161a804f28de3c0557403763293f594871de
|
[
"MIT"
] | null | null | null |
import select
import psycopg2
import psycopg2.extensions
import time
from pypgqueue.consts import LISTEN_TIMEOUT
from pypgqueue.database import DatabaseConfig, Database
from pypgqueue.logging import logger
class Consumer:
def __init__(self, db_config: DatabaseConfig, name: str):
self.db_config = db_config
self.name = name
self.conn_queue = psycopg2.connect(db_config.connection_string)
self.conn_query = psycopg2.connect(db_config.connection_string)
self.numbers = list()
self.conn_queue.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
def _work(self):
logger.info(f'Consumer {self.name}: working')
cursor = self.conn_query.cursor()
cursor.execute(Database.SELECT_JOB)
result = cursor.fetchall()
if len(result) != 1:
logger.info(f'Consumer {self.name}: fetched {len(result)} jobs')
return False
job = Database.get_as_job(result[0])
logger.info(f'Consumer {self.name}: fetched job {job.id}')
logger.info(f'Consumer {self.name}: message - {job.message}')
logger.info(f'Consumer {self.name}: computing result')
self.numbers.append(job.number)
result = job.number ** 2
time.sleep(1)
logger.info(f'Consumer {self.name}: result computed')
logger.info(f'Consumer {self.name}: storing result')
message = f'Result for number {job.number} (computed by {self.name})'
cursor.execute(
query=Database.INSERT_RESULT,
vars=Database.result_query_args(message, result),
)
logger.info(f'Consumer {self.name}: deleting job')
cursor.execute(
query=Database.DELETE_JOB,
vars=(job.id,)
)
logger.info(f'Consumer {self.name}: committing')
self.conn_query.commit()
cursor.close()
return True
def run(self):
logger.info(f'Consumer {self.name}: starting')
cursor = self.conn_queue.cursor()
cursor.execute(Database.LISTEN)
while True:
logger.info(f'Consumer {self.name}: trying to do some work')
should_work = True
while should_work:
should_work = self._work()
logger.info(f'Consumer {self.name} processed: {self.numbers}')
logger.info(f'Consumer {self.name}: waiting for notifications')
if select.select([self.conn_queue], [], [], LISTEN_TIMEOUT) == ([], [], []):
logger.info(f'Consumer {self.name}: nothing received in this cycle...')
else:
self.conn_queue.poll()
notifications = []
while self.conn_queue.notifies:
notifications.append(self.conn_queue.notifies.pop())
logger.info(f'Consumer {self.name}: {len(notifications)} notifications received')
| 39.310811 | 97 | 0.623238 |
4a1ea95e589233ebc104cd1cf32b7777b781ba89
| 591 |
py
|
Python
|
env/lib/python3.8/site-packages/plotly/validators/layout/polar/angularaxis/_showexponent.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 76 |
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
env/lib/python3.8/site-packages/plotly/validators/layout/polar/angularaxis/_showexponent.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11 |
2020-08-09T02:30:14.000Z
|
2022-03-12T00:50:14.000Z
|
env/lib/python3.8/site-packages/plotly/validators/layout/polar/angularaxis/_showexponent.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11 |
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
import _plotly_utils.basevalidators
class ShowexponentValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="showexponent",
parent_name="layout.polar.angularaxis",
**kwargs
):
super(ShowexponentValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs
)
| 31.105263 | 78 | 0.609137 |
4a1ea98284c78ed66bf0482025e940ca834d1502
| 681 |
py
|
Python
|
lvjichuan/aipTest.py
|
qsyPython/Python_play_now
|
278b6d5d30082f8f93b26902c854737c4919405a
|
[
"MIT"
] | 2 |
2018-03-29T08:26:17.000Z
|
2019-06-17T10:56:19.000Z
|
lvjichuan/aipTest.py
|
qsyPython/Python_play_now
|
278b6d5d30082f8f93b26902c854737c4919405a
|
[
"MIT"
] | 1 |
2022-03-22T20:26:08.000Z
|
2022-03-22T20:26:08.000Z
|
lvjichuan/aipTest.py
|
qsyPython/Python_play_now
|
278b6d5d30082f8f93b26902c854737c4919405a
|
[
"MIT"
] | 1 |
2019-02-18T10:44:20.000Z
|
2019-02-18T10:44:20.000Z
|
from aip import AipSpeech
import pygame
import time
""" 你的 APPID AK SK 个人开发key,请勿乱用"""
APP_ID = '6504962'
API_KEY = 'ogCRv6RoBvgMYak5Fxa53njg'
SECRET_KEY = 'Aw1DWipvMGZgIGSkUxG2DEC8BFGQBgLl'
client = AipSpeech(APP_ID, API_KEY, SECRET_KEY)
def playMusic():
pygame.mixer.init()
print("开始朗读")
pygame.time.delay(1000)
track = pygame.mixer.music.load('auido.mp3')
pygame.mixer.music.play()
time.sleep(10)
pygame.mixer.music.stop()
# 定义语音合成
result = client.synthesis('长亭外,古道边,芳草碧连天。', 'zh', 1, {
'vol': 5,
})
# 识别正确返回语音二进制 错误则返回dict 参照下面错误码
if not isinstance(result, dict):
with open('auido.mp3', 'wb') as f:
f.write(result)
playMusic()
| 22.7 | 55 | 0.687225 |
4a1eaa4596baa24065ab8a9866844aa082a86f0e
| 6,292 |
py
|
Python
|
scripts/train_baselines_mocap.py
|
weirayao/leap
|
8d10b8413d02d3be49d5c02a13a0aa60a741d8da
|
[
"MIT"
] | 7 |
2022-01-06T18:37:57.000Z
|
2022-03-20T17:11:30.000Z
|
scripts/train_baselines_mocap.py
|
weirayao/leap
|
8d10b8413d02d3be49d5c02a13a0aa60a741d8da
|
[
"MIT"
] | null | null | null |
scripts/train_baselines_mocap.py
|
weirayao/leap
|
8d10b8413d02d3be49d5c02a13a0aa60a741d8da
|
[
"MIT"
] | null | null | null |
"running the baseline file: main.py"
import warnings
warnings.filterwarnings('ignore')
import argparse
import os, pwd, yaml
import pytorch_lightning as pl
from torch.utils.data import DataLoader, random_split
"utils file (SAME)"
from leap.tools.utils import load_yaml
# Stationary:
from leap.datasets.sim_dataset import SimulationDatasetTSTwoSample
# Nonstationary:
from leap.datasets.mocap_dataset import MocapTwoSample, MocapTwoSampleNS
"baseline list"
from leap.baselines.TCL.model import TCL
# from leap.baselines.PCL.model import PCL # deprecated
from leap.baselines.iVAE.model import iVAE
from leap.baselines.BetaVAE.model import BetaVAE
from leap.baselines.SlowVAE.model import SlowVAE
from leap.baselines.FactorVAE.model import FactorVAE
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.callbacks import ModelCheckpoint
def main(args):
assert args.exp is not None, "FATAL: "+__file__+": You must specify an exp config file (e.g., *.yaml)"
current_user = pwd.getpwuid(os.getuid()).pw_name
script_dir = os.path.dirname(__file__)
rel_path = os.path.join('../leap/configs', '%s.yaml'%args.exp)
abs_file_path = os.path.join(script_dir, rel_path)
cfg = load_yaml(abs_file_path)
print("######### Configuration #########")
print(yaml.dump(cfg, default_flow_style=False))
print("#################################")
pl.seed_everything(args.seed)
if cfg['NS']:
data = MocapTwoSampleNS(directory=cfg['ROOT'], dataset=cfg['DATASET'])
else:
data = MocapTwoSample(directory=cfg['ROOT'], dataset=cfg['DATASET'])
num_validation_samples = cfg['VAE']['N_VAL_SAMPLES']
train_data, val_data = random_split(data, [len(data)-num_validation_samples, num_validation_samples])
train_loader = DataLoader(train_data,
batch_size=cfg['VAE']['TRAIN_BS'],
pin_memory=cfg['VAE']['PIN'],
num_workers=cfg['VAE']['CPU'],
drop_last=False,
shuffle=True)
val_loader = DataLoader(val_data,
batch_size=cfg['VAE']['VAL_BS'],
pin_memory=cfg['VAE']['PIN'],
num_workers=cfg['VAE']['CPU'],
shuffle=False)
if cfg['MODEL'] == "TCL":
model = TCL(input_dim=cfg['VAE']['INPUT_DIM'],
z_dim=cfg['VAE']['LATENT_DIM'],
nclass=cfg['TCL']['NCLASS'],
hidden_dim=cfg['VAE']['ENC']['HIDDEN_DIM'],
lr=cfg['TCL']['LR'],
correlation=cfg['MCC']['CORR'])
monitor = 'val_loss'
elif cfg['MODEL'] == "iVAE":
model = iVAE(input_dim=cfg['VAE']['INPUT_DIM'],
z_dim=cfg['VAE']['LATENT_DIM'],
hidden_dim=cfg['VAE']['ENC']['HIDDEN_DIM'],
lr=cfg['iVAE']['LR'],
correlation=cfg['MCC']['CORR'])
monitor = 'val_vae_loss'
elif cfg['MODEL'] == "BetaVAE":
model = BetaVAE(input_dim=cfg['VAE']['INPUT_DIM'],
z_dim=cfg['VAE']['LATENT_DIM'],
hidden_dim=cfg['VAE']['ENC']['HIDDEN_DIM'],
beta=cfg['BetaVAE']['BETA'],
beta1=cfg['BetaVAE']['beta1_VAE'],
beta2=cfg['BetaVAE']['beta2_VAE'],
lr=cfg['BetaVAE']['LR'],
correlation=cfg['MCC']['CORR'])
monitor = 'val_vae_loss'
elif cfg['MODEL'] == "SlowVAE":
model = SlowVAE(input_dim=cfg['VAE']['INPUT_DIM'],
z_dim=cfg['VAE']['LATENT_DIM'],
hidden_dim=cfg['VAE']['ENC']['HIDDEN_DIM'],
beta=cfg['SlowVAE']['BETA'],
gamma=cfg['SlowVAE']['GAMMA'],
beta1=cfg['SlowVAE']['beta1_VAE'],
beta2=cfg['SlowVAE']['beta2_VAE'],
lr=cfg['SlowVAE']['LR'],
rate_prior=cfg['SlowVAE']['RATE_PRIOR'],
correlation=cfg['MCC']['CORR'])
monitor = 'val_vae_loss'
elif cfg['MODEL'] == "FactorVAE":
model = FactorVAE(input_dim=cfg['VAE']['INPUT_DIM'],
z_dim=cfg['VAE']['LATENT_DIM'],
hidden_dim=cfg['VAE']['ENC']['HIDDEN_DIM'],
gamma=cfg['FactorVAE']['GAMMA'],
lr_VAE=cfg['FactorVAE']['LR_VAE'],
beta1_VAE=cfg['FactorVAE']['beta1_VAE'],
beta2_VAE=cfg['FactorVAE']['beta2_VAE'],
lr_D=cfg['FactorVAE']['LR_D'],
beta1_D=cfg['FactorVAE']['beta1_D'],
beta2_D=cfg['FactorVAE']['beta2_D'],
correlation=cfg['MCC']['CORR'])
monitor = 'val_vae_loss'
log_dir = os.path.join(cfg["LOG"], current_user, args.exp)
checkpoint_callback = ModelCheckpoint(monitor=monitor,
save_top_k=1,
mode='min')
early_stop_callback = EarlyStopping(monitor=monitor,
min_delta=0.00,
patience=10,
verbose=False,
mode="min")
trainer = pl.Trainer(default_root_dir=log_dir,
gpus=cfg['VAE']['GPU'],
val_check_interval = cfg['MCC']['FREQ'],
max_epochs=cfg['VAE']['EPOCHS'],
deterministic=True,
callbacks=[checkpoint_callback, early_stop_callback])
# Train the model
trainer.fit(model, train_loader, val_loader)
if __name__ == "__main__":
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
'-e',
'--exp',
type=str
)
argparser.add_argument(
'-s',
'--seed',
type=int,
default=770
)
args = argparser.parse_args()
main(args)
| 40.593548 | 106 | 0.518595 |
4a1eaaa5afe90cff2ce23b90ed083d8704f2316b
| 3,750 |
py
|
Python
|
python/coursera_python/WESLEYAN/week4/phone.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/coursera_python/WESLEYAN/week4/phone.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/coursera_python/WESLEYAN/week4/phone.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
# -phones.py *- coding: utf-8 -*-
"""
This program maintains a database of names and phone numbers in a csv
file called myphones.csv. It is run from the command line and is menu
driven. To start it, save it in a directory and from the terminal run
>python phones.py
Version FINAL:
"""
import os
import csv
phones = []
name_pos = 0
phone_pos = 1
phone_header = [ 'Name', 'Phone Number']
def proper_menu_choice(which):
if not which.isdigit():
print ("'" + which + "' needs to be the number of a phone!")
return False
which = int(which)
if which < 1 or which > len(phones):
print ("'" + str(which) + "' needs to be the number of a phone!")
return False
return True
def delete_phone(which):
if not proper_menu_choice(which):
return
which = int(which)
del phones[which-1]
print( "Deleted phone #", which)
def edit_phone(which):
if not proper_menu_choice(which):
return
which = int(which)
phone = phones[which-1]
print("Enter the data for a new phone. Press <enter> to leave unchanged.")
print(phone[name_pos])
newname = input("Enter phone name to change or press return: ")
if newname == "":
newname = phone[name_pos]
print(phone[phone_pos])
newphone_num = input("Enter new phone number to change or press return: ")
if newphone_num == "":
newphone_num = phone[phone_pos]
phone = [newname, newphone_num]
phones[which-1] = phone
def save_phone_list():
f = open("myphones.csv", 'w', newline='')
for item in phones:
csv.writer(f).writerow(item)
f.close()
def load_phone_list():
if os.access("myphones.csv",os.F_OK):
f = open("myphones.csv")
for row in csv.reader(f):
phones.append(row)
f.close()
def show_phones():
show_phone(phone_header, "")
index = 1
for phone in phones:
show_phone(phone, index)
index = index + 1
print()
def show_phone(phone, index):
outputstr = "{0:>3} {1:<20} {2:>16}"
print(outputstr.format(index, phone[name_pos], phone[phone_pos]))
def create_phone():
print("Enter the data for a new phone:")
newname = input("Enter name: ")
newphone_num = input("Enter phone number: ")
phone = [newname,newphone_num]
phones.append(phone)
def menu_choice():
""" Find out what the user wants to do next. """
print("Choose one of the following options?")
print(" s) Show")
print(" n) New")
print(" d) Delete")
print(" e) Edit")
print(" q) Quit")
choice = input("Choice: ")
if choice.lower() in ['n','d', 's','e', 'q']:
return choice.lower()
else:
print(choice +"?")
print("Invalid option")
return None
def main_loop():
load_phone_list()
while True:
choice = menu_choice()
if choice == None:
continue
if choice == 'q':
print( "Exiting...")
break # jump out of while loop
elif choice == 'n':
create_phone()
elif choice == 'd':
which = input("Which item do you want to delete? ")
print("which is ", which)
delete_phone(which)
elif choice == 's':
show_phones()
elif choice == 'e':
which = input("Which item do you want to edit? ")
print("which is ", which)
edit_phone(which)
else:
print("Invalid choice.")
save_phone_list()
# The following makes this program start running at main_loop()
# when executed as a stand-alone program.
if __name__ == '__main__':
main_loop()
| 26.408451 | 78 | 0.5768 |
4a1eab0e1301d7487afa0c61a6bdbe11a708b316
| 962 |
py
|
Python
|
modules/extract_features.py
|
rlorigro/mungus
|
2ff8d94f58b9f280399157912a999d5f31a459fd
|
[
"MIT"
] | null | null | null |
modules/extract_features.py
|
rlorigro/mungus
|
2ff8d94f58b9f280399157912a999d5f31a459fd
|
[
"MIT"
] | null | null | null |
modules/extract_features.py
|
rlorigro/mungus
|
2ff8d94f58b9f280399157912a999d5f31a459fd
|
[
"MIT"
] | null | null | null |
from scipy import signal
import numpy
import cv2
import os
SCHARR_KERNEL = numpy.array([[ -3-3j, 0-10j, +3 -3j],
[-10+0j, 0+ 0j, +10 +0j],
[ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy
def extract_features(grayscale_image, kernel):
convolved = signal.convolve2d(grayscale_image, kernel, mode="valid")
cv2.imshow("B", numpy.absolute(convolved))
cv2.waitKey()
if __name__ == "__main__":
project_directory = os.path.dirname(__file__)
data_directory = os.path.join(project_directory, "data")
test_data_paths = os.listdir(data_directory)
for i in range(0,len(test_data_paths)):
path_a = test_data_paths[i]
absolute_path_a = os.path.join(data_directory, path_a)
image_a = cv2.imread(absolute_path_a)
grayscale_a = cv2.cvtColor(image_a, cv2.COLOR_BGR2GRAY)
extract_features(grayscale_a, SCHARR_KERNEL)
| 27.485714 | 73 | 0.621622 |
4a1eab6beb1b56acd7dfd93d708acc51876b6d4c
| 22,392 |
py
|
Python
|
pybind/nos/v7_1_0/mac_address_table/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/nos/v7_1_0/mac_address_table/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/nos/v7_1_0/mac_address_table/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1 |
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import static
import aging_time
import mac_move
import consistency_check
class mac_address_table(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mac-address-table - based on the path /mac-address-table. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__static','__learning_mode','__aging_time','__mac_move','__consistency_check',)
_yang_name = 'mac-address-table'
_rest_name = 'mac-address-table'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__consistency_check = YANGDynClass(base=consistency_check.consistency_check, is_container='container', presence=False, yang_name="consistency-check", rest_name="consistency-check", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'MAC Consistency check', u'callpoint': u'mac-move-callpoint', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='container', is_config=True)
self.__aging_time = YANGDynClass(base=aging_time.aging_time, is_container='container', presence=False, yang_name="aging-time", rest_name="aging-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Aging Time', u'callpoint': u'mac-aging-callpoint'}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='container', is_config=True)
self.__mac_move = YANGDynClass(base=mac_move.mac_move, is_container='container', presence=False, yang_name="mac-move", rest_name="mac-move", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'MAC move', u'callpoint': u'mac-move-callpoint', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='container', is_config=True)
self.__static = YANGDynClass(base=YANGListType("mac_address forward interface_type interface_name vlan vlanid",static.static, yang_name="static", rest_name="static", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='mac-address forward interface-type interface-name vlan vlanid', extensions={u'tailf-common': {u'info': u'Static address', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-sequence-commands': None, u'hidden': u'wyser-write-hook', u'callpoint': u'static-mac-callpoint'}}), is_container='list', yang_name="static", rest_name="static", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Static address', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-sequence-commands': None, u'hidden': u'wyser-write-hook', u'callpoint': u'static-mac-callpoint'}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='list', is_config=True)
self.__learning_mode = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'conversational': {'value': 1}},), is_leaf=True, yang_name="learning-mode", rest_name="learning-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Conversational Learning Mode', u'cli-full-command': None, u'callpoint': u'learning-mode-callpoint', u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='enumeration', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'mac-address-table']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'mac-address-table']
def _get_static(self):
"""
Getter method for static, mapped from YANG variable /mac_address_table/static (list)
"""
return self.__static
def _set_static(self, v, load=False):
"""
Setter method for static, mapped from YANG variable /mac_address_table/static (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_static is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_static() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("mac_address forward interface_type interface_name vlan vlanid",static.static, yang_name="static", rest_name="static", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='mac-address forward interface-type interface-name vlan vlanid', extensions={u'tailf-common': {u'info': u'Static address', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-sequence-commands': None, u'hidden': u'wyser-write-hook', u'callpoint': u'static-mac-callpoint'}}), is_container='list', yang_name="static", rest_name="static", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Static address', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-sequence-commands': None, u'hidden': u'wyser-write-hook', u'callpoint': u'static-mac-callpoint'}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """static must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("mac_address forward interface_type interface_name vlan vlanid",static.static, yang_name="static", rest_name="static", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='mac-address forward interface-type interface-name vlan vlanid', extensions={u'tailf-common': {u'info': u'Static address', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-sequence-commands': None, u'hidden': u'wyser-write-hook', u'callpoint': u'static-mac-callpoint'}}), is_container='list', yang_name="static", rest_name="static", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Static address', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-sequence-commands': None, u'hidden': u'wyser-write-hook', u'callpoint': u'static-mac-callpoint'}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='list', is_config=True)""",
})
self.__static = t
if hasattr(self, '_set'):
self._set()
def _unset_static(self):
self.__static = YANGDynClass(base=YANGListType("mac_address forward interface_type interface_name vlan vlanid",static.static, yang_name="static", rest_name="static", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='mac-address forward interface-type interface-name vlan vlanid', extensions={u'tailf-common': {u'info': u'Static address', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-sequence-commands': None, u'hidden': u'wyser-write-hook', u'callpoint': u'static-mac-callpoint'}}), is_container='list', yang_name="static", rest_name="static", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Static address', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-sequence-commands': None, u'hidden': u'wyser-write-hook', u'callpoint': u'static-mac-callpoint'}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='list', is_config=True)
def _get_learning_mode(self):
"""
Getter method for learning_mode, mapped from YANG variable /mac_address_table/learning_mode (enumeration)
YANG Description: Conversational Learning Mode
"""
return self.__learning_mode
def _set_learning_mode(self, v, load=False):
"""
Setter method for learning_mode, mapped from YANG variable /mac_address_table/learning_mode (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_learning_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_learning_mode() directly.
YANG Description: Conversational Learning Mode
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'conversational': {'value': 1}},), is_leaf=True, yang_name="learning-mode", rest_name="learning-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Conversational Learning Mode', u'cli-full-command': None, u'callpoint': u'learning-mode-callpoint', u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """learning_mode must be of a type compatible with enumeration""",
'defined-type': "brocade-mac-address-table:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'conversational': {'value': 1}},), is_leaf=True, yang_name="learning-mode", rest_name="learning-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Conversational Learning Mode', u'cli-full-command': None, u'callpoint': u'learning-mode-callpoint', u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='enumeration', is_config=True)""",
})
self.__learning_mode = t
if hasattr(self, '_set'):
self._set()
def _unset_learning_mode(self):
self.__learning_mode = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'conversational': {'value': 1}},), is_leaf=True, yang_name="learning-mode", rest_name="learning-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Conversational Learning Mode', u'cli-full-command': None, u'callpoint': u'learning-mode-callpoint', u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='enumeration', is_config=True)
def _get_aging_time(self):
"""
Getter method for aging_time, mapped from YANG variable /mac_address_table/aging_time (container)
YANG Description: Aging Time
"""
return self.__aging_time
def _set_aging_time(self, v, load=False):
"""
Setter method for aging_time, mapped from YANG variable /mac_address_table/aging_time (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_aging_time is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_aging_time() directly.
YANG Description: Aging Time
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=aging_time.aging_time, is_container='container', presence=False, yang_name="aging-time", rest_name="aging-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Aging Time', u'callpoint': u'mac-aging-callpoint'}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """aging_time must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=aging_time.aging_time, is_container='container', presence=False, yang_name="aging-time", rest_name="aging-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Aging Time', u'callpoint': u'mac-aging-callpoint'}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='container', is_config=True)""",
})
self.__aging_time = t
if hasattr(self, '_set'):
self._set()
def _unset_aging_time(self):
self.__aging_time = YANGDynClass(base=aging_time.aging_time, is_container='container', presence=False, yang_name="aging-time", rest_name="aging-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Aging Time', u'callpoint': u'mac-aging-callpoint'}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='container', is_config=True)
def _get_mac_move(self):
"""
Getter method for mac_move, mapped from YANG variable /mac_address_table/mac_move (container)
YANG Description: MAC move
"""
return self.__mac_move
def _set_mac_move(self, v, load=False):
"""
Setter method for mac_move, mapped from YANG variable /mac_address_table/mac_move (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mac_move is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mac_move() directly.
YANG Description: MAC move
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=mac_move.mac_move, is_container='container', presence=False, yang_name="mac-move", rest_name="mac-move", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'MAC move', u'callpoint': u'mac-move-callpoint', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mac_move must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=mac_move.mac_move, is_container='container', presence=False, yang_name="mac-move", rest_name="mac-move", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'MAC move', u'callpoint': u'mac-move-callpoint', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='container', is_config=True)""",
})
self.__mac_move = t
if hasattr(self, '_set'):
self._set()
def _unset_mac_move(self):
self.__mac_move = YANGDynClass(base=mac_move.mac_move, is_container='container', presence=False, yang_name="mac-move", rest_name="mac-move", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'MAC move', u'callpoint': u'mac-move-callpoint', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='container', is_config=True)
def _get_consistency_check(self):
"""
Getter method for consistency_check, mapped from YANG variable /mac_address_table/consistency_check (container)
YANG Description: MAC Consistency check
"""
return self.__consistency_check
def _set_consistency_check(self, v, load=False):
"""
Setter method for consistency_check, mapped from YANG variable /mac_address_table/consistency_check (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_consistency_check is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_consistency_check() directly.
YANG Description: MAC Consistency check
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=consistency_check.consistency_check, is_container='container', presence=False, yang_name="consistency-check", rest_name="consistency-check", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'MAC Consistency check', u'callpoint': u'mac-move-callpoint', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """consistency_check must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=consistency_check.consistency_check, is_container='container', presence=False, yang_name="consistency-check", rest_name="consistency-check", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'MAC Consistency check', u'callpoint': u'mac-move-callpoint', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='container', is_config=True)""",
})
self.__consistency_check = t
if hasattr(self, '_set'):
self._set()
def _unset_consistency_check(self):
self.__consistency_check = YANGDynClass(base=consistency_check.consistency_check, is_container='container', presence=False, yang_name="consistency-check", rest_name="consistency-check", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'MAC Consistency check', u'callpoint': u'mac-move-callpoint', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mac-address-table', defining_module='brocade-mac-address-table', yang_type='container', is_config=True)
static = __builtin__.property(_get_static, _set_static)
learning_mode = __builtin__.property(_get_learning_mode, _set_learning_mode)
aging_time = __builtin__.property(_get_aging_time, _set_aging_time)
mac_move = __builtin__.property(_get_mac_move, _set_mac_move)
consistency_check = __builtin__.property(_get_consistency_check, _set_consistency_check)
_pyangbind_elements = {'static': static, 'learning_mode': learning_mode, 'aging_time': aging_time, 'mac_move': mac_move, 'consistency_check': consistency_check, }
| 79.123675 | 1,147 | 0.730975 |
4a1eab95f885b1e9213400a27911a3b7671a6ba9
| 2,143 |
py
|
Python
|
test/python/testprocess.py
|
personx000/paperetl
|
99cfcf305e60df3eef6c3cab9eb1401057dfa862
|
[
"Apache-2.0"
] | null | null | null |
test/python/testprocess.py
|
personx000/paperetl
|
99cfcf305e60df3eef6c3cab9eb1401057dfa862
|
[
"Apache-2.0"
] | null | null | null |
test/python/testprocess.py
|
personx000/paperetl
|
99cfcf305e60df3eef6c3cab9eb1401057dfa862
|
[
"Apache-2.0"
] | null | null | null |
"""
Generic ETL process test module
"""
import unittest
from paperetl.schema.article import Article
# pylint: disable = C0411
from utils import Utils
class TestProcess(unittest.TestCase):
"""
Generic ETL process test
"""
# Database connection
db, cur = None, None
def articleCount(self, count):
"""
Test number of articles.
Args:
count: expected count
"""
self.cur.execute("SELECT COUNT(id) FROM articles")
self.assertEqual(self.cur.fetchone()[0], count)
def articles(self, hashes):
"""
Test article metadata.
Args:
hashes: expected hashes
"""
# Get all article columns except published and entry date
columns = list(Article.ARTICLE[:-1])
del columns[2]
columns = ",".join(columns)
self.cur.execute(f"SELECT {columns} FROM articles ORDER BY id")
for row in self.cur.fetchall():
# Calculate row hash
md5 = Utils.hashtext(" ".join([str(x) for x in row]))
# Check hash equals expected value
self.assertEqual(hashes[row[0]], md5)
def sectionCount(self, count):
"""
Test number of sections.
Args:
count: expected count
"""
self.cur.execute("SELECT COUNT(id) FROM sections")
self.assertEqual(self.cur.fetchone()[0], count)
def sections(self, hashes):
"""
Test section content.
Args:
hashes: expected hashes
"""
# Section columns
columns = ", ".join(Article.SECTION)
self.cur.execute("SELECT id FROM articles ORDER BY id")
for row in self.cur.fetchall():
# Get list of sections
self.cur.execute(
f"SELECT {columns} FROM sections WHERE article = ? ORDER BY id",
[row[0]],
)
text = [str(y) for x in self.cur.fetchall() for y in x]
md5 = Utils.hashtext(" ".join(text))
# Check hash equals expected value
self.assertEqual(hashes[row[0]], md5)
| 24.352273 | 80 | 0.55623 |
4a1eaba659e942972b997def72a10a6f1bb43ca9
| 1,702 |
py
|
Python
|
config/settings/test.py
|
RedGranatum/Carmesi
|
bde1d4dd104401ba08e7ba2f3de5b9d5f537dd94
|
[
"MIT"
] | null | null | null |
config/settings/test.py
|
RedGranatum/Carmesi
|
bde1d4dd104401ba08e7ba2f3de5b9d5f537dd94
|
[
"MIT"
] | null | null | null |
config/settings/test.py
|
RedGranatum/Carmesi
|
bde1d4dd104401ba08e7ba2f3de5b9d5f537dd94
|
[
"MIT"
] | null | null | null |
"""
With these settings, tests run faster.
"""
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="fqBNybUWxyNWALr8toY79zTzFKXRgtmQnJkorW3Eu0DHxZCJtKQY8mkSvt090QE1",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#test-runner
TEST_RUNNER = "django.test.runner.DiscoverRunner"
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
}
}
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"]
# TEMPLATES
# ------------------------------------------------------------------------------
TEMPLATES[-1]["OPTIONS"]["loaders"] = [ # type: ignore[index] # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend"
# Your stuff...
# ------------------------------------------------------------------------------
| 32.730769 | 80 | 0.49765 |
4a1eac54b11b6c7465d13fb40e7c872df0a3e4ae
| 2,369 |
py
|
Python
|
test/scons-time/mem/format-gnuplot.py
|
EmanueleCannizzaro/scons
|
6baa4e65cdf4df6951473545b69435711864e509
|
[
"MIT"
] | 1 |
2019-09-18T06:37:02.000Z
|
2019-09-18T06:37:02.000Z
|
test/scons-time/mem/format-gnuplot.py
|
EmanueleCannizzaro/scons
|
6baa4e65cdf4df6951473545b69435711864e509
|
[
"MIT"
] | null | null | null |
test/scons-time/mem/format-gnuplot.py
|
EmanueleCannizzaro/scons
|
6baa4e65cdf4df6951473545b69435711864e509
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/scons-time/mem/format-gnuplot.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify the mem --format=gnuplot option.
"""
import TestSCons_time
test = TestSCons_time.TestSCons_time()
test.fake_logfile('foo-000-0.log', 0)
test.fake_logfile('foo-000-1.log', 0)
test.fake_logfile('foo-000-2.log', 0)
test.fake_logfile('foo-001-0.log', 1)
test.fake_logfile('foo-001-1.log', 1)
test.fake_logfile('foo-001-2.log', 1)
expect_notitle = """\
set key bottom left
plot '-' title "Startup" with lines lt 1, \\
'-' title "Full build" with lines lt 2, \\
'-' title "Up-to-date build" with lines lt 3
# Startup
0 4000.000
1 4001.000
e
# Full build
0 4000.000
1 4001.000
e
# Up-to-date build
0 4000.000
1 4001.000
e
"""
expect_title = 'set title "TITLE"\n' + expect_notitle
test.run(arguments = 'mem --fmt gnuplot', stdout=expect_notitle)
test.run(arguments = 'mem --fmt=gnuplot --title TITLE', stdout=expect_title)
test.run(arguments = 'mem --format gnuplot --title TITLE', stdout=expect_title)
test.run(arguments = 'mem --format=gnuplot', stdout=expect_notitle)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 29.987342 | 111 | 0.739553 |
4a1ead0243c4651d8831f0a5c6da2c40e23c48a8
| 8,873 |
py
|
Python
|
upvote/gae/datastore/models/utils.py
|
iwikmai/upvote
|
77bb200d0e35a28cc5aed98ceee8e234998814b6
|
[
"Apache-2.0"
] | 453 |
2017-10-24T15:29:44.000Z
|
2021-09-27T23:21:20.000Z
|
upvote/gae/datastore/models/utils.py
|
iwikmai/upvote
|
77bb200d0e35a28cc5aed98ceee8e234998814b6
|
[
"Apache-2.0"
] | 58 |
2018-03-23T21:19:16.000Z
|
2021-05-23T20:06:05.000Z
|
upvote/gae/datastore/models/utils.py
|
iwikmai/upvote
|
77bb200d0e35a28cc5aed98ceee8e234998814b6
|
[
"Apache-2.0"
] | 36 |
2018-03-23T21:25:54.000Z
|
2021-09-27T23:21:24.000Z
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Datastore Model-related utility functions."""
from google.appengine.ext import ndb
from upvote.gae import settings
from upvote.gae.datastore.models import binary as binary_models
from upvote.gae.datastore.models import cert as cert_models
from upvote.gae.datastore.models import event as event_models
from upvote.gae.datastore.models import exemption as exemption_models
from upvote.gae.datastore.models import host as host_models
from upvote.gae.datastore.models import package as package_models
from upvote.gae.datastore.models import rule as rule_models
from upvote.gae.datastore.models import user as user_models
from upvote.gae.utils import user_utils
from upvote.shared import constants
class Error(Exception):
"""Base Exception class."""
class UnsupportedPlatformError(Error):
"""Raised when an unsupported platform is encountered."""
class UnsupportedRuleTypeError(Error):
"""Raised when an unsupported rule type is encountered."""
def GetBit9HostKeysForUser(user):
"""Returns the Keys of all Bit9Hosts associated with the given user.
Args:
user: The User in question.
Returns:
A list of Bit9Host Keys.
"""
query = host_models.Bit9Host.query(
host_models.Bit9Host.users == user.nickname)
return query.fetch(keys_only=True)
def GetBit9HostIdsForUser(user):
return [key.id() for key in GetBit9HostKeysForUser(user)]
def GetSantaHostKeysForUser(user):
"""Returns the Keys of all SantaHosts associated with the given user.
Args:
user: The User in question.
Returns:
A list of SantaHost Keys.
"""
hosts_query = host_models.SantaHost.query(
host_models.SantaHost.primary_user == user.nickname)
hosts_future = hosts_query.fetch_async(keys_only=True)
# If a user has been logged in to a Host when an Event was registered, they
# are associated with that Host.
events_query = event_models.SantaEvent.query(
ancestor=user.key,
projection=[event_models.SantaEvent.host_id],
distinct=True)
events_future = events_query.fetch_async()
all_keys = set(hosts_future.get_result())
for event in events_future.get_result():
all_keys.add(ndb.Key(host_models.SantaHost, event.host_id))
return list(all_keys)
def GetSantaHostIdsForUser(user):
return [key.id() for key in GetSantaHostKeysForUser(user)]
def GetHostKeysForUser(user):
return GetBit9HostKeysForUser(user) + GetSantaHostKeysForUser(user)
def GetHostIdsForUser(user):
return GetBit9HostIdsForUser(user) + GetSantaHostIdsForUser(user)
def GetExemptionsForUser(email_addr, state=None):
user = user_models.User.GetById(email_addr)
exm_keys = [
exemption_models.Exemption.CreateKey(host_id)
for host_id in GetHostIdsForUser(user)]
exms = [exm for exm in ndb.get_multi(exm_keys) if exm]
if state:
exms = [exm for exm in exms if exm.state == state]
return exms
def GetExemptionsForHosts(host_keys):
"""Retrieves all Exemptions corresponding to the specified Hosts.
Args:
host_keys: A list of NDB Host Keys.
Returns:
A dictionary mapping the given Host Keys to their corresponding Exemptions,
or None if one doesn't exist.
"""
# Compose the expected Exemption Keys for all Hosts.
exm_keys = [ndb.Key(flat=k.flat() + ('Exemption', '1')) for k in host_keys]
# Grab everything from Datastore.
exms = ndb.get_multi(exm_keys)
# Map Host Keys to the entities we got back.
exm_dict = {exm.key.parent(): exm for exm in exms if exm}
# Return a mapping of all Host Keys to their Exemptions, or None for those
# without Exemptions.
return {host_key: exm_dict.get(host_key) for host_key in host_keys}
def GetEventKeysToInsert(event, logged_in_users, host_owners):
"""Returns the list of keys with which this Event should be inserted."""
if settings.EVENT_CREATION == constants.EVENT_CREATION.EXECUTING_USER:
if event.run_by_local_admin:
usernames = logged_in_users
else:
usernames = [event.executing_user] if event.executing_user else []
else: # HOST_OWNERS
usernames = host_owners
emails = [user_utils.UsernameToEmail(username) for username in usernames]
keys = []
for email in emails:
key_pairs = [
(user_models.User, email.lower()),
(host_models.Host, event.host_id)]
key_pairs += event.blockable_key.pairs()
key_pairs += [(event_models.Event, '1')]
keys.append(ndb.Key(pairs=key_pairs))
return keys
def IsBit9HostAssociatedWithUser(host, user):
return user.nickname in host.users
def IsSantaHostAssociatedWithUser(host, user):
"""Returns whether the given user is associated with this host."""
if user.nickname == host.primary_user:
return True
# If a user has been logged in to this Host when an Event was registered,
# they are associated with this Host.
parent_key = ndb.Key(host_models.SantaHost, host.key.id(), parent=user.key)
query = event_models.SantaEvent.query(ancestor=parent_key)
return query.get(keys_only=True) is not None
def IsHostAssociatedWithUser(host, user):
"""Returns whether the given host is associated with a given user.
NOTE: What consitutes "associated with" is platform-dependent.
Args:
host: The Host entity to test.
user: The User entity to test.
Returns:
bool, Whether the host is associated with the user.
"""
if isinstance(host, host_models.Bit9Host):
return IsBit9HostAssociatedWithUser(host, user)
elif isinstance(host, host_models.SantaHost):
return IsSantaHostAssociatedWithUser(host, user)
else:
raise ValueError('Unsupported Host class: %s' % host.__class__.__name__)
def GetUsersAssociatedWithSantaHost(host_id):
event_query = event_models.Event.query(
event_models.Event.host_id == host_id,
projection=[event_models.Event.executing_user],
distinct=True)
return [
e.executing_user for e in event_query.fetch()
if e.executing_user != constants.LOCAL_ADMIN.MACOS]
def GetBundleBinaryIdsForRule(rule):
if rule.rule_type == constants.RULE_TYPE.PACKAGE:
keys = package_models.SantaBundle.GetBundleBinaryKeys(rule.key.parent())
return [key.id() for key in keys]
return []
_BLOCKABLE_CLASSES = {
constants.PLATFORM.MACOS: {
constants.RULE_TYPE.BINARY: binary_models.SantaBlockable,
constants.RULE_TYPE.CERTIFICATE: cert_models.SantaCertificate,
},
}
def EnsureCriticalRule(critical_rule):
"""Pre-populates Datastore with a critical Rule entity.
Args:
critical_rule: A settings.CriticalRule namedtuple.
Raises:
UnsupportedPlatformError: if an unsupported platform is encountered.
UnsupportedRuleTypeError: if an unsupported rule type is encountered.
"""
# Start off by determining the necessary Blockable and Rule subclasses we'll
# need before we continue.
if critical_rule.platform == constants.PLATFORM.MACOS:
rule_cls = rule_models.SantaRule
if critical_rule.rule_type == constants.RULE_TYPE.BINARY:
blockable_cls = binary_models.SantaBlockable
elif critical_rule.rule_type == constants.RULE_TYPE.CERTIFICATE:
blockable_cls = cert_models.SantaCertificate
else:
raise UnsupportedRuleTypeError(critical_rule.rule_type)
else:
raise UnsupportedPlatformError(critical_rule.platform)
# If the Blockable entity doesn't yet exist in Datastore, create it now.
blockable_key = ndb.Key(blockable_cls, critical_rule.sha256)
blockable = blockable_key.get()
if not blockable:
blockable = blockable_cls(
id=critical_rule.sha256, id_type=constants.ID_TYPE.SHA256,
blockable_hash=critical_rule.sha256)
blockable.put()
blockable.InsertBigQueryRow(constants.BLOCK_ACTION.FIRST_SEEN)
# Check for at least one matching Rule entity.
rule = rule_cls.query(
ancestor=blockable_key).get(keys_only=True)
# Doesn't exist? Add it!
if rule is None:
rule = rule_cls(
parent=blockable_key,
rule_type=critical_rule.rule_type,
policy=critical_rule.rule_policy)
rule.put()
rule.InsertBigQueryRow()
def EnsureCriticalRules(critical_rules):
"""Pre-populates Datastore with critical Rule entities.
Args:
critical_rules: A list of settings.CriticalRule namedtuples.
"""
for critical_rule in critical_rules:
EnsureCriticalRule(critical_rule)
| 31.917266 | 79 | 0.749239 |
4a1eadefde80427b4703f1cf4cfc5eebe257b592
| 245 |
py
|
Python
|
Python/01 - Introduction/Write a function.py
|
sohammanjrekar/HackerRank
|
1f5010133a1ac1e765e855a086053c97d9e958be
|
[
"MIT"
] | null | null | null |
Python/01 - Introduction/Write a function.py
|
sohammanjrekar/HackerRank
|
1f5010133a1ac1e765e855a086053c97d9e958be
|
[
"MIT"
] | null | null | null |
Python/01 - Introduction/Write a function.py
|
sohammanjrekar/HackerRank
|
1f5010133a1ac1e765e855a086053c97d9e958be
|
[
"MIT"
] | null | null | null |
def is_leap(year):
leap = False
# Write your logic here
if year % 400 == 0:
leap = True
elif year % 4 == 0 and year % 100 != 0:
leap = True
return leap
year = int(input())
print(is_leap(year))
| 17.5 | 44 | 0.514286 |
4a1eae3c76790d6e6a5fa3e941d0803194d70f02
| 270 |
py
|
Python
|
tests/artificial/transf_BoxCox/trend_MovingAverage/cycle_7/ar_/test_artificial_128_BoxCox_MovingAverage_7__20.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
tests/artificial/transf_BoxCox/trend_MovingAverage/cycle_7/ar_/test_artificial_128_BoxCox_MovingAverage_7__20.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | 1 |
2019-11-30T23:39:38.000Z
|
2019-12-01T04:34:35.000Z
|
tests/artificial/transf_BoxCox/trend_MovingAverage/cycle_7/ar_/test_artificial_128_BoxCox_MovingAverage_7__20.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 7, transform = "BoxCox", sigma = 0.0, exog_count = 20, ar_order = 0);
| 38.571429 | 165 | 0.733333 |
4a1eaea946487a023b0d11c811ce8d60332fd2e9
| 51,848 |
py
|
Python
|
appengine/swarming/swarming_bot/bot_code/bot_main.py
|
amymariaparker2401/luci-py
|
c5902547adc12390df6b09c825a38370f1034e8b
|
[
"Apache-2.0"
] | null | null | null |
appengine/swarming/swarming_bot/bot_code/bot_main.py
|
amymariaparker2401/luci-py
|
c5902547adc12390df6b09c825a38370f1034e8b
|
[
"Apache-2.0"
] | 1 |
2022-03-02T09:56:27.000Z
|
2022-03-02T09:56:27.000Z
|
appengine/swarming/swarming_bot/bot_code/bot_main.py
|
Lees78/luci-py
|
7b854c55f63e648005ae8aa38e2e41cd8f99feda
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Swarming bot main process.
This is the program that communicates with the Swarming server, ensures the code
is always up to date and executes a child process to run tasks and upload
results back.
It manages self-update and rebooting the host in case of problems.
Sections are:
- Globals
- Monitoring
- bot_config handler
- Public functions used by __main__.py
- Sub process management
- Bot lifetime management
"""
from __future__ import print_function
import argparse
import collections
import contextlib
import fnmatch
import json
import logging
import os
import shutil
import sys
import tempfile
import threading
import time
import traceback
import types
import zipfile
import six
# Import _strptime before threaded code. datetime.datetime.strptime is
# threadsafe except for the initial import of the _strptime module.
# See https://bugs.python.org/issue7980.
import _strptime # pylint: disable=unused-import
from api import bot
from api import os_utilities
from api import platforms
from bot_code import bot_auth
from bot_code import common
from bot_code import file_refresher
from bot_code import remote_client
from bot_code import remote_client_errors
from bot_code import singleton
from infra_libs import ts_mon
from utils import file_path
from utils import fs
from utils import net
from utils import on_error
from utils import subprocess42
from utils import tools
from utils import zip_package
### Globals
# Used to opportunistically set the error handler to notify the server when the
# process exits due to an exception.
_ERROR_HANDLER_WAS_REGISTERED = False
# Set to the zip's name containing this file. This is set to the absolute path
# to swarming_bot.zip when run as part of swarming_bot.zip. This value is
# overriden in unit tests.
#
# Note: this more or less requires the bot to be in a path without non-ASCII
# characters.
THIS_FILE = six.ensure_text(os.path.abspath(zip_package.get_main_script_path()))
THIS_DIR = os.path.dirname(THIS_FILE)
# The singleton, initially unset.
SINGLETON = singleton.Singleton(THIS_DIR)
# Whitelist of files that can be present in the bot's directory. Anything else
# will be forcibly deleted on startup! Note that 'w' (work) is not in this list,
# as we want it to be deleted on startup.
# See
# https://chromium.googlesource.com/infra/luci/luci-py.git/+/master/appengine/swarming/doc/Bot.md
# for more details.
PASSLIST = (
'*-cacert.pem',
'README',
'README.md',
'c',
'cas_cache',
'cipd_cache',
'isolated_cache',
'logs',
'swarming.lck',
'swarming_bot.1.zip',
'swarming_bot.2.zip',
'swarming_bot.zip',
)
# These settings are documented in ../config/bot_config.py.
# Keep in sync with ../config/bot_config.py. This is enforced by a unit test.
DEFAULT_SETTINGS = {
'free_partition': {
'root': {
'size': 1 * 1024 * 1024 * 1024,
'max_percent': 10.,
'min_percent': 6.,
},
'bot': {
'size': 4 * 1024 * 1024 * 1024,
'max_percent': 15.,
'min_percent': 7.,
'wiggle': 250 * 1024 * 1024,
},
},
'caches': {
'isolated': {
'size': 50 * 1024 * 1024 * 1024,
'items': 50 * 1024,
},
},
}
# Keep in sync with ../../ts_mon_metrics.py
_IGNORED_DIMENSIONS = (
'android_devices', 'caches', 'id', 'server_version', 'temp_band')
# Flag to decide if bot is running in test mode. This is mostly used by smoke
# and integration tests.
# TODO(1099655): Remove once we have fully enabled CIPD in both prod and tests.
_IN_TEST_MODE = False
### Monitoring
_bucketer = ts_mon.GeometricBucketer(growth_factor=10**0.07,
num_finite_buckets=100)
_hooks_durations = ts_mon.CumulativeDistributionMetric(
'swarming/bots/hooks/durations',
'Duration of bot hook calls in ms', [
ts_mon.StringField('hookname'),
ts_mon.StringField('pool'),
],
bucketer=_bucketer,
units=ts_mon.MetricsDataUnits.MILLISECONDS)
def _pool_from_dimensions(dimensions):
"""Return a canonical string of flattened dimensions."""
# Keep in sync with ../../ts_mon_metrics.py
pairs = []
for key, values in dimensions.items():
if key in _IGNORED_DIMENSIONS:
continue
# Strip all the prefixes of other values. values is already sorted.
for i, value in enumerate(values):
if not any(v.startswith(value) for v in values[i+1:]):
pairs.append(u'%s:%s' % (key, value))
return u'|'.join(sorted(pairs))
def _monitor_call(func):
"""Decorates a functions and reports the runtime to ts_mon."""
def hook(chained, botobj, name, *args, **kwargs):
start = time.time()
try:
return func(chained, botobj, name, *args, **kwargs)
finally:
duration = max(0, (time.time() - start) * 1000)
if botobj and botobj.dimensions:
flat_dims = _pool_from_dimensions(botobj.dimensions)
if flat_dims:
logging.info('ts_mon hook_name=%r pool=%r', name, flat_dims)
_hooks_durations.add(
duration, fields={u'hookname': name, u'pool': flat_dims})
logging.info('%s(): %gs', name, round(duration/1000., 3))
return hook
def _init_ts_mon():
"""Initializes ts_mon."""
parser = argparse.ArgumentParser(description=sys.modules[__name__].__doc__)
ts_mon.add_argparse_options(parser)
parser.set_defaults(
ts_mon_target_type='task',
ts_mon_task_service_name='swarming-bot',
ts_mon_task_job_name='default',
ts_mon_flush='auto',
ts_mon_ca_certs=tools.get_cacerts_bundle(),
)
args = parser.parse_args([])
ts_mon.process_argparse_options(args)
### bot_config handler
# Reference to the config/bot_config.py module inside the swarming_bot.zip file.
# This variable is initialized inside _get_bot_config().
_BOT_CONFIG = None
# Reference to the second bot_config.py module injected by the server. This
# variable is initialized inside _do_handshake().
_EXTRA_BOT_CONFIG = None
# Super Sticky quarantine string. This variable is initialized inside
# _set_quarantined() and be set at various places when a hook throws an
# exception. Restarting the bot will clear the quarantine, which includes
# updated the bot due to new bot_config or new bot code.
_QUARANTINED = None
def _set_quarantined(reason):
"""Sets the Super Sticky Quarantine string."""
logging.error('_set_quarantined(%s)', reason)
global _QUARANTINED
_QUARANTINED = _QUARANTINED or reason
def _get_bot_config():
"""Returns the bot_config.py module. Imports it only once.
This file is called implicitly by _call_hook() and _call_hook_safe().
"""
global _BOT_CONFIG
if not _BOT_CONFIG:
from config import bot_config as _BOT_CONFIG
return _BOT_CONFIG
def _register_extra_bot_config(content, rev, script):
"""Registers the server injected extra injected.py bot_config.
This file is called implicitly by _call_hook() and _call_hook_safe().
"""
global _EXTRA_BOT_CONFIG
if isinstance(content, six.text_type):
# compile will throw if there's a '# coding: utf-8' line and the string is
# in unicode. <3 python.
content = content.encode('utf-8')
try:
compiled = compile(content, 'injected.py', 'exec')
_EXTRA_BOT_CONFIG = types.ModuleType('injected')
exec(compiled, _EXTRA_BOT_CONFIG.__dict__)
logging.debug('extra bot_config %s at rev %s was injected.', script, rev)
except (SyntaxError, TypeError) as e:
_set_quarantined(
'handshake returned invalid injected bot_config.py: %s' % e)
@_monitor_call
def _call_hook(chained, botobj, name, *args, **kwargs):
"""Calls a hook function named `name` in bot_config.py.
If `chained` is True, calls the general bot_config.py then the injected
version.
If `chained` is False, the injected bot_config version is called first, and
only if not present the general bot_config version is called.
"""
try:
if not chained:
# Injected version has higher priority.
hook = getattr(_EXTRA_BOT_CONFIG, name, None)
if hook:
try:
return hook(botobj, *args, **kwargs)
except OSError:
_log_process_info()
hook = getattr(_get_bot_config(), name, None)
if hook:
return hook(botobj, *args, **kwargs)
# The hook is not defined.
return None
# In the case of chained=True, call both hooks. Call the generic one first,
# then the specialized.
ret = None
hook = getattr(_get_bot_config(), name, None)
if hook:
ret = hook(botobj, *args, **kwargs)
hook = getattr(_EXTRA_BOT_CONFIG, name, None)
if hook:
# Ignores the previous return value.
ret = hook(botobj, *args, **kwargs)
return ret
finally:
# TODO(maruel): Handle host_reboot() request the same way.
if botobj:
msg = botobj.bot_restart_msg()
if msg:
# The hook requested a bot restart. Do it right after the hook call.
_bot_restart(botobj, msg)
def _log_process_info():
try:
import psutil
process_count = collections.Counter(
proc.name() for proc in psutil.process_iter()
)
logging.info('Processes running: %s', process_count)
except ImportError:
logging.info('Fail to get process info. Missing psutil')
def _call_hook_safe(chained, botobj, name, *args):
"""Calls a hook function in bot_config.py.
Like _call_hook() but traps most exceptions.
"""
try:
return _call_hook(chained, botobj, name, *args)
except Exception as e:
traceback.print_exc()
logging.exception('%s() threw', name)
msg = '%s\n%s' % (e, traceback.format_exc()[-2048:])
if botobj:
botobj.post_error('Failed to call hook %s(): %s' % (name, msg))
# TODO(maruel): Disabled because of https://crbug.com/694327
#_set_quarantined(msg)
def _get_dimensions(botobj):
"""Returns bot_config.py's get_dimensions() dict."""
# Importing this administrator provided script could have side-effects on
# startup. That is why it is imported late.
out = _call_hook_safe(False, botobj, 'get_dimensions')
if isinstance(out, dict):
out = out.copy()
out[u'server_version'] = [_get_server_version_safe()]
return out
try:
_set_quarantined('get_dimensions(): expected a dict, got %r' % out)
out = os_utilities.get_dimensions()
out[u'quarantined'] = [u'1']
out[u'server_version'] = [_get_server_version_safe()]
return out
except Exception as e:
logging.exception('os.utilities.get_dimensions() failed')
return {
u'error': [u'%s\n%s' % (e, traceback.format_exc()[-2048:])],
u'id': [_get_botid_safe()],
u'quarantined': [u'1'],
u'server_version': [_get_server_version_safe()],
}
def _get_server_version_safe():
return get_config().get(u'server_version', u'N/A')
@tools.cached
def _get_botid_safe():
"""Paranoid version of get_hostname_short()."""
try:
return os_utilities.get_hostname_short()
except Exception as e:
logging.exception('os.utilities.get_hostname_short() failed')
return 'error_%s' % str(e)
def _get_settings(botobj):
"""Returns settings for this bot.
The way used to make it work safely is to take the default settings, then
merge the custom settings. This way, a user can only specify a subset of the
desired settings.
The function won't alert on unknown settings. This is so bot_config.py can be
updated in advance before pushing new bot_main.py. The main drawback is that
it will make typos silently fail. CHECK FOR TYPOS in get_settings() in your
bot_config.py.
"""
settings = _call_hook_safe(False, botobj, 'get_settings')
try:
if isinstance(settings, dict):
return _dict_deep_merge(DEFAULT_SETTINGS, settings)
except (KeyError, TypeError, ValueError):
logging.exception('get_settings() failed')
return DEFAULT_SETTINGS
def _get_state(botobj, sleep_streak):
"""Returns dict with a state of the bot reported to the server with each poll.
"""
state = _call_hook_safe(False, botobj, 'get_state')
if not isinstance(state, dict):
_set_quarantined('get_state(): expected a dict, got %r' % state)
state = {u'broken': state}
if not state.get(u'quarantined'):
if not _is_base_dir_ok(botobj):
# Use super hammer in case of dangerous environment.
_set_quarantined('Can\'t run from blacklisted directory')
if _QUARANTINED:
state[u'quarantined'] = _QUARANTINED
state[u'sleep_streak'] = sleep_streak
if not state.get(u'quarantined') and botobj:
# Reuse the data from 'state/disks'
disks = state.get(u'disks', {})
err = _get_disks_quarantine(botobj, disks)
if err:
state[u'quarantined'] = err
return state
def _get_disks_quarantine(botobj, disks):
"""Returns a quarantine error message when there's not enough free space.
It looks at both root partition and the current partition the bot is running
in.
"""
settings = _get_settings(botobj)['free_partition']
# On Windows, drive letters are always lower case.
root = 'c:\\' if sys.platform == 'win32' else '/'
errors = []
def _check_for_quarantine(r, i, key):
min_free = _min_free_disk(i, settings[key])
if int(i[u'free_mb']*1024*1024) < min_free:
errors.append(
u'Not enough free disk space on %s. %.1fmib < %.1fmib' %
(r, i[u'free_mb'], round(min_free / 1024. / 1024., 1)))
# root may be missing in the case of netbooted devices.
if root in disks:
_check_for_quarantine(root, disks[root], 'root')
# Try again with the bot's base directory. It is frequent to run the bot
# from a secondary partition, to reduce the risk of OS failure due to full
# root partition.
# This code is similar to os_utilities.get_disk_size().
path = botobj.base_dir
case_insensitive = sys.platform in ('darwin', 'win32')
if case_insensitive:
path = path.lower()
for mount, infos in sorted(disks.items(), key=lambda x: -len(x[0])):
if path.startswith(mount.lower() if case_insensitive else mount):
# Apply 'bot' check if bot is on its own partition, or it's on
# root partition and there are no errors reported yet.
if mount != root or not errors:
_check_for_quarantine(mount, infos, 'bot')
break
if errors:
return '\n'.join(errors)
def _get_authentication_headers(botobj):
"""Calls bot_config.get_authentication_headers() if it is defined.
See remote_client.RemoteClient doc for the expected format of the return
value.
Doesn't catch exceptions. RemoteClient knows how to deal with them.
"""
return _call_hook(False, botobj, 'get_authentication_headers') or (None, None)
def _on_shutdown_hook(b):
"""Called when the bot is restarting."""
_call_hook_safe(True, b, 'on_bot_shutdown')
# Aggressively set itself up so we ensure the auto-reboot configuration is
# fine before restarting the host. This is important as some tasks delete the
# autorestart script (!)
setup_bot(True)
def _min_free_disk(infos, settings):
"""Returns the calculated minimum free disk space for this partition.
See _get_settings() in ../config/bot_config.py for an explanation.
"""
size = int(infos[u'size_mb']*1024*1024)
x1 = settings['size'] or 0
x2 = int(round(size * float(settings['max_percent'] or 0) * 0.01))
# Select the lowest non-zero value.
x = min(x1, x2) if (x1 and x2) else (x1 or x2)
# Select the maximum value.
return max(x, int(round(size * float(settings['min_percent'] or 0) * 0.01)))
def _dict_deep_merge(x, y):
"""Returns the union of x and y.
y takes predescence.
"""
if x is None:
return y
if y is None:
return x
if isinstance(x, dict):
if isinstance(y, dict):
return {k: _dict_deep_merge(x.get(k), y.get(k)) for k in set(x).union(y)}
assert y is None, repr(y)
return x
if isinstance(y, dict):
assert x is None, repr(x)
return y
# y is overriding x.
return y
def _is_base_dir_ok(botobj):
"""Returns False if the bot must be quarantined at all cost."""
if not botobj:
# This can happen very early in the process lifetime.
return THIS_DIR != os.path.expanduser('~')
return botobj.base_dir != os.path.expanduser('~')
### Public functions used by __main__.py
def setup_bot(skip_reboot):
"""Calls bot_config.setup_bot() to have the bot self-configure itself.
Reboots the host if bot_config.setup_bot() returns False, unless skip_reboot
is also true.
Does nothing if SWARMING_EXTERNAL_BOT_SETUP env var is set to 1. It is set in
case bot's autostart configuration is managed elsewhere, and we don't want
the bot itself to interfere.
"""
if os.environ.get('SWARMING_EXTERNAL_BOT_SETUP') == '1':
logging.info('Skipping setup_bot, SWARMING_EXTERNAL_BOT_SETUP is set')
return
botobj = get_bot(get_config())
try:
from config import bot_config
except Exception as e:
msg = '%s\n%s' % (e, traceback.format_exc()[-2048:])
botobj.post_error('bot_config.py is bad: %s' % msg)
return
# TODO(maruel): Convert the should_continue return value to the hook calling
# botobj.host_reboot() by itself.
try:
should_continue = bot_config.setup_bot(botobj)
except Exception as e:
msg = '%s\n%s' % (e, traceback.format_exc()[-2048:])
botobj.post_error('bot_config.setup_bot() threw: %s' % msg)
return
if not should_continue and not skip_reboot:
botobj.host_reboot('Starting new swarming bot: %s' % THIS_FILE)
@tools.cached
def generate_version():
"""Returns the bot's code version."""
try:
return zip_package.generate_version()
except Exception as e:
return 'Error: %s' % e
def get_attributes(botobj):
"""Returns the attributes sent to the server in /handshake.
Each called function catches all exceptions so the bot doesn't die on startup,
which is annoying to recover. In that case, we set a special property to catch
these and help the admin fix the swarming_bot code more quickly.
Arguments:
- botobj: bot.Bot instance or None
"""
return {
u'dimensions': _get_dimensions(botobj),
u'state': _get_state(botobj, 0),
u'version': generate_version(),
}
def get_bot(config):
"""Returns a valid Bot instance.
Should only be called once in the process lifetime.
It can be called by ../__main__.py, something to keep in mind.
"""
# This variable is used to bootstrap the initial bot.Bot object, which then is
# used to get the dimensions and state.
attributes = {
'dimensions': {u'id': ['none']},
'state': {},
'version': generate_version(),
}
hostname = _get_botid_safe()
base_dir = THIS_DIR
# Use temporary Bot object to call get_attributes. Attributes are needed to
# construct the "real" bot.Bot.
attributes = get_attributes(
bot.Bot(
remote_client.createRemoteClient(config['server'], None, hostname,
base_dir), attributes,
config['server'], config['server_version'], base_dir,
_on_shutdown_hook))
# Make remote client callback use the returned bot object. We assume here
# RemoteClient doesn't call its callback in the constructor (since 'botobj' is
# undefined during the construction).
botobj = bot.Bot(
remote_client.createRemoteClient(
config['server'], lambda: _get_authentication_headers(botobj),
hostname, base_dir), attributes, config['server'],
config['server_version'], base_dir, _on_shutdown_hook)
return botobj
@tools.cached
def get_config():
"""Returns the data from config.json."""
global _ERROR_HANDLER_WAS_REGISTERED
try:
with contextlib.closing(zipfile.ZipFile(THIS_FILE, 'r')) as f:
config = json.load(f.open('config/config.json', 'r'))
if config['server'].endswith('/'):
raise ValueError('Invalid server entry %r' % config['server'])
except (zipfile.BadZipfile, IOError, OSError, TypeError, ValueError):
logging.exception('Invalid config.json!')
config = {
u'server': u'',
u'server_version': u'N/A',
}
if not _ERROR_HANDLER_WAS_REGISTERED and config['server']:
on_error.report_on_exception_exit(config['server'])
_ERROR_HANDLER_WAS_REGISTERED = True
return config
### Sub process management
def _cleanup_bot_directory(botobj):
"""Delete anything not expected in the swarming bot directory.
This helps with stale work directory or any unexpected junk that could cause
this bot to self-quarantine. Do only this when running from the zip.
"""
if not _is_base_dir_ok(botobj):
# That's an important one-off check as cleaning the $HOME directory has
# really bad effects on normal host.
logging.error('Not cleaning root directory because of bad base directory')
return
for i in fs.listdir(botobj.base_dir):
if any(fnmatch.fnmatch(i, w) for w in PASSLIST):
continue
try:
p = six.ensure_text(os.path.join(botobj.base_dir, i))
if fs.isdir(p):
file_path.rmtree(p)
else:
file_path.remove(p)
except (IOError, OSError) as e:
botobj.post_error(
'Failed to remove %s from bot\'s directory: %s' % (i, e))
def _run_isolated_flags(botobj):
"""Returns flags to pass to run_isolated.
These are not meant to be processed by task_runner.py.
"""
settings = _get_settings(botobj)
partition = settings['free_partition']['bot']
size = os_utilities.get_disk_size(THIS_FILE)
min_free = (
_min_free_disk({'size_mb': size}, partition) +
partition['wiggle'])
logging.info('size %d, partition %s, min_free %s', size, partition, min_free)
args = [
# Shared option.
'--min-free-space',
str(min_free),
'--max-cache-size',
str(settings['caches']['isolated']['size']),
# Isolated cache options.
'--cache',
os.path.join(botobj.base_dir, 'isolated_cache'),
'--max-items',
str(settings['caches']['isolated']['items']),
# CAS cache option.
'--cas-cache',
os.path.join(botobj.base_dir, 'cas_cache'),
# Named cache option.
'--named-cache-root',
os.path.join(botobj.base_dir, 'c'),
]
if _IN_TEST_MODE:
args += ['--cipd-enabled', 'false']
return args
def _Popen(botobj, cmd, **kwargs):
"""Wraps subprocess42.Popen.
Creates a 'detached' process as per subprocess42 description.
On Windows, also create a separate console.
"""
kwargs.setdefault('stdout', subprocess42.PIPE)
if sys.platform == 'win32':
prev = kwargs.get('creationflags', 0)
kwargs['creationflags'] = prev | subprocess42.CREATE_NEW_CONSOLE
else:
kwargs['close_fds'] = True
return subprocess42.Popen(
cmd,
stdin=subprocess42.PIPE,
stderr=subprocess42.STDOUT,
cwd=botobj.base_dir,
detached=True,
**kwargs)
def _clean_cache(botobj):
"""Asks run_isolated to clean its cache.
This may take a while but it ensures that in the case of a run_isolated run
failed and it temporarily used more space than _min_free_disk, it can cleans
up the mess properly.
It will remove unexpected files, remove corrupted files, trim the cache size
based on the policies and update state.json.
"""
cmd = [
sys.executable, THIS_FILE, 'run_isolated',
'--clean',
'--log-file', os.path.join(botobj.base_dir, 'logs', 'run_isolated.log'),
]
cmd.extend(_run_isolated_flags(botobj))
logging.info('Running: %s', cmd)
try:
# Intentionally do not use a timeout, it can take a while to hash 50gb but
# better be safe than sorry.
proc = _Popen(botobj, cmd)
output, _ = proc.communicate(None)
logging.info('Result:\n%s', output)
if proc.returncode:
botobj.post_error(
'swarming_bot.zip failure during run_isolated --clean:\n%s' % output)
except OSError:
botobj.post_error(
'swarming_bot.zip internal failure during run_isolated --clean')
def _post_error_task(botobj, error, task_id):
"""Posts given error as failure cause for the task.
This is used in case of internal code error, and this causes the task to
become BOT_DIED.
Arguments:
botobj: A bot.Bot instance.
error: String representing the problem.
task_id: Task that had an internal error. When the Swarming server sends
commands to a bot, even though they could be completely wrong, the
server assumes the job as running. Thus this function acts as the
exception handler for incoming commands from the Swarming server. If for
any reason the local test runner script can not be run successfully,
this function is invoked.
"""
logging.error('Error: %s', error)
return botobj.remote.post_task_error(task_id, error)
def _run_manifest(botobj, manifest, start):
"""Defers to task_runner.py.
Return True if the task succeeded.
"""
# Ensure the manifest is valid. This can throw a json decoding error. Also
# raise if it is empty.
if not manifest:
raise ValueError('Empty manifest')
# Necessary to signal an internal_failure. This occurs when task_runner fails
# to execute the command. It is important to note that this data is extracted
# before any I/O is done, like writting the manifest to disk.
task_id = manifest['task_id']
last_ditch_timeout = manifest['hard_timeout'] or None
# The grace period is the time between SIGTERM and SIGKILL.
grace_period = max(manifest['grace_period'] or 0, 30)
if last_ditch_timeout:
# One for the child process, one for run_isolated, one for task_runner.
last_ditch_timeout += 3 * grace_period
# CIPD, isolated download time, plus named cache cleanup is not counted for
# hard timeout so add more time; hard_timeout is handled by run_isolated.
last_ditch_timeout += max(manifest['io_timeout'] or 0, 1200)
# Get the server info to pass to the task runner so it can provide updates.
url = botobj.remote.server
if 'host' in manifest:
# The URL in the manifest includes the version - eg not https://chromium-
# swarm-dev.appspot.com, but https://<some-version>-dot-chromiium-swarm-
# dev.appspot.com. That way, if a new server version becomes the default,
# old bots will continue to work with a server version that can manipulate
# the old data (the new server will only ever have to read it, which is
# much simpler) while new bots won't accidentally contact an old server
# which the GAE engine hasn't gotten around to updating yet.
#
# With a gRPC proxy, we could theoretically run into the same problem
# if we change the meaning of some data without changing the protos.
# However, if we *do* change the protos, we already need to make the
# change in a few steps:
# 1. Modify the Swarming server to accept the new data
# 2. Modify the protos and the proxy to accept the new data
# in gRPC calls and translate it to "native" Swarming calls.
# 3. Update the bots to transmit the new protos.
# Throughout all this, the proto format itself irons out minor differences
# and additions. But because we deploy in three steps, the odds of a
# newer bot contacting an older server is very low.
#
# None of this applies if we don't actually update the protos but just
# change the semantics. If this becomes a significant problem, we could
# start transmitting the expected server version using gRPC metadata.
# - aludwin, Nov 2016
url = manifest['host']
task_dimensions = manifest['dimensions']
task_result = {}
failure = False
internal_failure = False
msg = None
auth_params_dumper = None
must_reboot_reason = None
# Use 'w' instead of 'work' because path length is precious on Windows.
work_dir = os.path.join(botobj.base_dir, u'w')
try:
try:
if fs.isdir(work_dir):
file_path.rmtree(work_dir)
except OSError:
# If a previous task created an undeleteable file/directory inside 'w',
# make sure that following tasks are not affected. This is done by working
# around the undeleteable directory by creating a temporary directory
# instead. This is not normal behavior. The bot will report a failure on
# start.
work_dir = tempfile.mkdtemp(dir=botobj.base_dir, prefix=u'w')
else:
try:
fs.makedirs(work_dir)
except OSError:
# Sometimes it's a race condition, so do a last ditch attempt.
work_dir = tempfile.mkdtemp(dir=botobj.base_dir, prefix=u'w')
env = os.environ.copy()
# Windows in particular does not tolerate unicode strings in environment
# variables.
env['SWARMING_TASK_ID'] = task_id.encode('ascii')
env['SWARMING_SERVER'] = botobj.server.encode('ascii')
task_in_file = os.path.join(work_dir, 'task_runner_in.json')
with fs.open(task_in_file, 'w') as f:
f.write(json.dumps(manifest))
handle, bot_file = tempfile.mkstemp(
prefix='bot_file', suffix='.json', dir=work_dir)
os.close(handle)
task_result_file = os.path.join(work_dir, 'task_runner_out.json')
if fs.exists(task_result_file):
fs.remove(task_result_file)
# Start a thread that periodically puts authentication headers and other
# authentication related information to a file on disk. task_runner reads it
# from there before making authenticated HTTP calls.
#
# TODO(vadimsh): Switch to pipes or local sockets if the latency tokens
# propagation here becomes an issue.
auth_params_file = os.path.join(work_dir, 'bot_auth_params.json')
auth_params_dumper = file_refresher.FileRefresherThread(
auth_params_file,
lambda: bot_auth.prepare_auth_params_json(botobj, manifest))
auth_params_dumper.start()
command = [
sys.executable,
THIS_FILE,
'task_runner',
'--swarming-server',
url,
'--in-file',
task_in_file,
'--out-file',
task_result_file,
'--cost-usd-hour',
str(botobj.state.get('cost_usd_hour') or 0.),
# Include the time taken to poll the task in the cost.
'--start',
str(start),
'--bot-file',
bot_file,
'--auth-params-file',
auth_params_file,
]
# Flags for run_isolated.py are passed through by task_runner.py as-is
# without interpretation.
command.append('--')
command.extend(_run_isolated_flags(botobj))
_call_hook_safe(True, botobj, 'on_before_task', bot_file, command, env)
logging.debug('Running command: %s', command)
base_log = os.path.join(botobj.base_dir, u'logs')
if not fs.isdir(base_log):
# It was observed that this directory may be unexpectedly deleted.
# Recreate as needed, otherwise it may throw at the open() call below.
fs.mkdir(base_log)
log_path = os.path.join(base_log, 'task_runner_stdout.log')
os_utilities.roll_log(log_path)
os_utilities.trim_rolled_log(log_path)
with fs.open(log_path, 'a+b') as f:
proc = _Popen(botobj, command, stdout=f, env=env)
try:
proc.wait(last_ditch_timeout)
except subprocess42.TimeoutExpired:
# That's the last ditch effort; as task_runner should have completed a
# while ago and had enforced the io_timeout or run_isolated for
# hard_timeout.
logging.error('Sending SIGTERM to task_runner')
proc.terminate()
internal_failure = True
msg = 'task_runner hung'
try:
proc.wait(2*grace_period)
except subprocess42.TimeoutExpired:
logging.error('Sending SIGKILL to task_runner')
proc.kill()
proc.wait()
return False
logging.info('task_runner exit: %d', proc.returncode)
if fs.exists(task_result_file):
with fs.open(task_result_file, 'rb') as fd:
task_result = json.load(fd)
if proc.returncode:
# STATUS_DLL_INIT_FAILED generally means that something bad happened, and
# a reboot magically clears things out. :(
if sys.platform == 'win32' and proc.returncode == -1073741502:
must_reboot_reason = ('Working around STATUS_DLL_INIT_FAILED by '
'task_runner')
msg = 'Execution failed: internal error (%d).' % proc.returncode
internal_failure = True
elif not task_result:
logging.warning('task_runner failed to write metadata')
msg = 'Execution failed: internal error (no metadata).'
internal_failure = True
elif task_result[u'must_signal_internal_failure']:
msg = (
'Execution failed: %s' % task_result[u'must_signal_internal_failure'])
internal_failure = True
failure = bool(task_result.get('exit_code')) if task_result else False
return not internal_failure and not failure
except Exception as e:
# Failures include IOError when writing if the disk is full, OSError if
# swarming_bot.zip doesn't exist anymore, etc.
logging.exception('_run_manifest failed')
msg = 'Internal exception occured: %s\n%s' % (
e, traceback.format_exc()[-2048:])
internal_failure = True
finally:
if auth_params_dumper:
auth_params_dumper.stop()
if internal_failure:
_post_error_task(botobj, msg, task_id)
logging.info('calling on_after_task: failure=%s, internal_failure=%s, '
'task_dimensions=%s, task_result=%s',
failure, internal_failure, task_dimensions, task_result)
_call_hook_safe(
True, botobj, 'on_after_task', failure, internal_failure,
task_dimensions, task_result)
if fs.isdir(work_dir):
try:
file_path.rmtree(work_dir)
except Exception:
botobj.post_error('Failed to delete work directory %s: %s' %
(work_dir, traceback.format_exc()[-2048:]))
# Failure to delete could be due to a proc with open file handles. Just
# reboot the machine in that case.
must_reboot_reason = 'Failure to remove %s' % work_dir
if must_reboot_reason:
botobj.host_reboot(must_reboot_reason)
### Bot lifetime management
def _run_bot(arg_error):
"""Runs _run_bot_inner() with a signal handler."""
# The quit_bit is to signal that the bot process must shutdown. It is
# different from a request to restart the bot process or reboot the host.
quit_bit = threading.Event()
def handler(sig, _):
# A signal terminates the bot process, it doesn't cause it to restart.
logging.info('Got signal %s', sig)
quit_bit.set()
# TODO(maruel): Set quit_bit when stdin is closed on Windows.
with subprocess42.set_signal_handler(subprocess42.STOP_SIGNALS, handler):
return _run_bot_inner(arg_error, quit_bit)
def _run_bot_inner(arg_error, quit_bit):
"""Runs the bot until an event occurs.
One of the three following even can occur:
- host reboots
- bot process restarts (this includes self-update)
- bot process shuts down (this includes a signal is received)
"""
config = get_config()
if config.get('enable_ts_monitoring'):
_init_ts_mon()
try:
# First thing is to get an arbitrary url. This also ensures the network is
# up and running, which is necessary before trying to get the FQDN below.
# There's no need to do error handling here - the "ping" is just to "wake
# up" the network; if there's something seriously wrong, the handshake will
# fail and we'll handle it there.
hostname = _get_botid_safe()
base_dir = os.path.dirname(THIS_FILE)
remote = remote_client.createRemoteClient(config['server'], None, hostname,
base_dir)
remote.ping()
except Exception:
# url_read() already traps pretty much every exceptions. This except
# clause is kept there "just in case".
logging.exception('server_ping threw')
# If we are on GCE, we want to make sure GCE metadata server responds, since
# we use the metadata to derive bot ID, dimensions and state.
if platforms.is_gce():
logging.info('Running on GCE, waiting for the metadata server')
platforms.gce.wait_for_metadata(quit_bit)
if quit_bit.is_set():
logging.info('Early quit 1')
return 0
# Next we make sure the bot can make authenticated calls by grabbing the auth
# headers, retrying on errors a bunch of times. We don't give up if it fails
# though (maybe the bot will "fix itself" later).
botobj = get_bot(config)
try:
botobj.remote.initialize(quit_bit)
except remote_client.InitializationError as exc:
botobj.post_error('failed to grab auth headers: %s' % exc.last_error)
logging.error('Can\'t grab auth headers, continuing anyway...')
if arg_error:
botobj.post_error('Bootstrapping error: %s' % arg_error)
if quit_bit.is_set():
logging.info('Early quit 2')
return 0
_call_hook_safe(True, botobj, 'on_bot_startup')
# Initial attributes passed to bot.Bot in get_bot above were constructed for
# 'fake' bot ID ('none'). Refresh them to match the real bot ID, now that we
# have fully initialize bot.Bot object. Note that 'get_dimensions' and
# 'get_state' may depend on actions done by 'on_bot_startup' hook, that's why
# we do it here and not in 'get_bot'.
dims = _get_dimensions(botobj)
states = _get_state(botobj, 0)
with botobj._lock:
botobj._update_dimensions(dims)
botobj._update_state(states)
if quit_bit.is_set():
logging.info('Early quit 3')
return 0
_do_handshake(botobj, quit_bit)
if quit_bit.is_set():
logging.info('Early quit 4')
return 0
# Let the bot to finish the initialization, now that it knows its server
# defined dimensions.
_call_hook_safe(True, botobj, 'on_handshake')
_cleanup_bot_directory(botobj)
_clean_cache(botobj)
if quit_bit.is_set():
logging.info('Early quit 5')
return 0
# This environment variable is accessible to the tasks executed by this bot.
os.environ['SWARMING_BOT_ID'] = six.ensure_str(botobj.id)
# bot_id is used in 'X-Luci-Swarming-Bot-ID' header.
botobj.remote.bot_id = botobj.id
consecutive_sleeps = 0
last_action = time.time()
while not quit_bit.is_set():
try:
_call_hook_safe(False, botobj, 'on_before_poll')
dims = _get_dimensions(botobj)
states = _get_state(botobj, consecutive_sleeps)
with botobj._lock:
botobj._update_dimensions(dims)
botobj._update_state(states)
did_something = _poll_server(botobj, quit_bit, last_action)
if did_something:
last_action = time.time()
consecutive_sleeps = 0
else:
consecutive_sleeps += 1
except Exception as e:
logging.exception('_poll_server failed in a completely unexpected way')
msg = '%s\n%s' % (e, traceback.format_exc()[-2048:])
botobj.post_error(msg)
consecutive_sleeps = 0
# Sleep a bit as a precaution to avoid hammering the server.
quit_bit.wait(10)
# Tell the server we are going away.
botobj.post_event('bot_shutdown', 'Signal was received')
return 0
def _should_have_exited_but_didnt(reason):
"""Something super sad happened, set the sticky quarantine bit before polling
again and sleep a bit to prevent busy-loop/DDoS.
"""
time.sleep(2)
_set_quarantined(reason)
def _do_handshake(botobj, quit_bit):
"""Connects to /handshake and reads the bot_config if specified."""
# This is the first authenticated request to the server. If the bot is
# misconfigured, the request may fail with HTTP 401 or HTTP 403. Instead of
# dying right away, spin in a loop, hoping the bot will "fix itself"
# eventually. Authentication errors in /handshake are logged on the server and
# generate error reports, so bots stuck in this state are discoverable.
sleep_time = 5
while not quit_bit.is_set():
resp = botobj.remote.do_handshake(botobj._attributes)
if resp:
logging.info('Connected to %s', resp.get('server_version'))
if resp.get('bot_version') != botobj._attributes['version']:
logging.warning(
'Found out we\'ll need to update: server said %s; we\'re %s',
resp.get('bot_version'), botobj._attributes['version'])
# Remember the server-provided per-bot configuration. '/handshake' is
# the only place where the server returns it. The bot will be sending
# the 'bot_group_cfg_version' back in each /poll (as part of 'state'),
# so that the server can instruct the bot to restart itself when
# config changes.
cfg_version = resp.get('bot_group_cfg_version')
if cfg_version:
botobj._update_bot_group_cfg(cfg_version, resp.get('bot_group_cfg'))
content = resp.get('bot_config')
rev = resp.get('bot_config_rev')
script = resp.get('bot_config_name')
if content:
_register_extra_bot_config(content, rev, script)
break
logging.error(
'Failed to contact for handshake, retrying in %d sec...', sleep_time)
quit_bit.wait(sleep_time)
sleep_time = min(300, sleep_time * 2)
def _poll_server(botobj, quit_bit, last_action):
"""Polls the server to run one loop.
Returns True if executed some action, False if server asked the bot to sleep.
"""
start = time.time()
cmd = None
try:
cmd, value = botobj.remote.poll(botobj._attributes)
except remote_client_errors.PollError as e:
# Back off on failure.
delay = max(1, min(60, botobj.state.get(u'sleep_streak', 10) * 2))
logging.warning('Poll failed (%s), sleeping %.1f sec', e, delay)
quit_bit.wait(delay)
return False
finally:
_call_hook_safe(False, botobj, 'on_after_poll', cmd)
logging.debug('Server response:\n%s: %s', cmd, value)
if cmd == 'sleep':
# Value is duration
_call_hook_safe(
True, botobj, 'on_bot_idle', max(0, time.time() - last_action))
_maybe_update_lkgbc(botobj)
try:
# Sometimes throw with "[Errno 4] Interrupted function call", especially
# on Windows upon system shutdown.
quit_bit.wait(value)
except IOError:
# Act as it if were set as this likely mean a system shutdown.
quit_bit.set()
return False
if cmd == 'terminate':
# The value is the task ID to serve as the special termination command.
quit_bit.set()
try:
# Duration must be set or server IEs. For that matter, we've never cared
# if there's an error here before, so let's preserve that behaviour
# (though anything that's not a remote_client.InternalError will make
# it through, again preserving prior behaviour).
botobj.remote.post_task_update(value, {'duration': 0}, None, 0)
except remote_client_errors.InternalError:
pass
return False
if cmd == 'run':
# Value is the manifest
success = _run_manifest(botobj, value, start)
# Unconditionally clean up cache after each task. This is done *after* the
# task is terminated, so that:
# - there's no task overhead
# - if there's an exception while cleaning, it's not logged in the task
_clean_cache(botobj)
if success:
# Completed a task successfully so update swarming_bot.zip if necessary.
_update_lkgbc(botobj)
# TODO(maruel): Handle the case where quit_bit.is_set() happens here. This
# is concerning as this means a signal (often SIGTERM) was received while
# running the task. Make sure the host is properly restarting.
elif cmd == 'update':
# Value is the version
_update_bot(botobj, value)
_should_have_exited_but_didnt('Failed to self-update the bot')
elif cmd in ('host_reboot', 'restart'):
# Value is the message to display while rebooting the host
botobj.host_reboot(value)
_should_have_exited_but_didnt('Failed to reboot the host')
elif cmd == 'bot_restart':
# Value is the message to display while restarting
_bot_restart(botobj, value)
_should_have_exited_but_didnt('Failed to restart the bot process')
else:
raise ValueError('Unexpected command: %s\n%s' % (cmd, value))
return True
def _update_bot(botobj, version):
"""Downloads the new version of the bot code and then runs it.
Use alternating files; first load swarming_bot.1.zip, then swarming_bot.2.zip,
never touching swarming_bot.zip which was the originally bootstrapped file.
LKGBC is handled by _update_lkgbc() and _maybe_update_lkgbc().
Returns only in case of failure to get the new bot code.
"""
# Alternate between .1.zip and .2.zip.
new_zip = 'swarming_bot.1.zip'
if os.path.basename(THIS_FILE) == new_zip:
new_zip = 'swarming_bot.2.zip'
new_zip = os.path.join(botobj.base_dir, new_zip)
# Download as a new file.
try:
botobj.remote.get_bot_code(new_zip, version)
except remote_client.BotCodeError as e:
botobj.post_error(str(e))
else:
_bot_restart(botobj, 'Updating to %s' % version, filepath=new_zip)
def _bot_restart(botobj, message, filepath=None):
"""Restarts the bot process, optionally in a new file.
The function will return if the new bot code is not valid.
"""
filepath = filepath or THIS_FILE
s = fs.stat(filepath)
logging.info('Restarting to %s; %d bytes.', filepath, s.st_size)
sys.stdout.flush()
sys.stderr.flush()
proc = _Popen(botobj, [sys.executable, filepath, 'is_fine'])
output, _ = proc.communicate()
if proc.returncode:
botobj.post_error(
'New bot code is bad: proc exit = %s. stdout:\n%s' %
(proc.returncode, output))
if sys.platform == 'win32' and proc.returncode == -1073741502:
# STATUS_DLL_INIT_FAILED generally means that something bad happened, and
# a reboot magically clears things out. :(
botobj.host_reboot(
'Working around STATUS_DLL_INIT_FAILED when restarting the bot')
return
botobj.post_event('bot_shutdown', 'About to restart: %s' % message)
# Sleep a bit to make sure new bot process connects to a GAE instance with
# the fresh bot group config cache (it gets refreshed each second). This makes
# sure the bot doesn't accidentally pick up the old config after restarting
# and connecting to an instance with a stale cache.
time.sleep(2)
# Don't forget to release the singleton before restarting itself.
SINGLETON.release()
# Do not call on_bot_shutdown.
# On OSX, launchd will be unhappy if we quit so the old code bot process has
# to outlive the new code child process. Launchd really wants the main process
# to survive, and it'll restart it if it disappears. os.exec*() replaces the
# process so this is fine.
cmd = [filepath, 'start_slave', '--survive']
if _IN_TEST_MODE:
cmd.append('--test-mode')
logging.debug('Restarting bot, cmd: %s', cmd)
ret = common.exec_python(cmd)
if ret in (1073807364, -1073741510):
# 1073807364 is returned when the process is killed due to shutdown. No need
# to alert anyone in that case.
# -1073741510 is returned when rebooting too. This can happen when the
# parent code was running the old version and gets confused and decided to
# poll again.
# In any case, zap out the error code.
ret = 0
elif ret:
botobj.post_error('Bot failed to respawn after update: %s' % ret)
sys.exit(ret)
def _update_lkgbc(botobj):
"""Updates the Last Known Good Bot Code if necessary.
Returns True if LKGBC was updated.
"""
try:
if not fs.isfile(THIS_FILE):
# TODO(maruel): Try to download the code again from the server.
botobj.post_error('Missing file %s for LKGBC' % THIS_FILE)
return False
golden = os.path.join(botobj.base_dir, 'swarming_bot.zip')
if fs.isfile(golden):
org = fs.stat(golden)
cur = fs.stat(THIS_FILE)
if org.st_size == org.st_size and org.st_mtime >= cur.st_mtime:
return False
# Copy the current file back to LKGBC.
shutil.copy(THIS_FILE, golden)
return True
except Exception as e:
botobj.post_error('Failed to update LKGBC: %s' % e)
return False
def _maybe_update_lkgbc(botobj):
"""Updates the Last Known Good Bot Code (LKGBC) when it is older than 1 one
week.
This either means:
- The bot code is particularly hosed, for an extended period of time.
- All tasks are failing, which may legitimitely happen sometimes when the devs
don't care about a particular configuration.
- The bot is completely idle, so it never gets the chance to update LKGBC.
We decide that the first situation is rare enough that it's preferable to
explcitly handle the later two. We've seen bots being idle for extended
periods of time (well over a year), which introduces all sorts of
'interesting' problems.
Returns True if LKGBC was updated.
"""
try:
if not fs.isfile(THIS_FILE):
# TODO(maruel): Try to download the code again from the server.
return False
golden = os.path.join(botobj.base_dir, u'swarming_bot.zip')
if fs.isfile(golden):
org = fs.stat(golden)
cur = fs.stat(THIS_FILE)
if org.st_size == org.st_size and org.st_mtime >= cur.st_mtime:
return False
if org.st_mtime >= time.time() - 7*24*60*60:
return False
# Copy the current file back to LKGBC.
shutil.copy(THIS_FILE, golden)
return True
except Exception as e:
botobj.post_error('Failed to update LKGBC while idle: %s' % e)
return False
def main(argv):
subprocess42.inhibit_os_error_reporting()
# Disable magical auto-detection of OAuth config. bot_main.py prepares auth
# headers on its own explicitly (via get_authentication_headers hook) when
# using 'net' library through RemoteClientNative class and auto-configured
# auth in net.py may interfere with this. We also disable auto-detection in
# task_runner.py (since it also uses special mechanism for getting auth
# headers from bot_main.py). We do _not_ disable auto-detection in
# run_isolated.py, since at this layer we have an auth context (setup by
# task_runner.py) and it is correctly getting recognized by the auto-detection
# in net.py.
net.disable_oauth_config()
# Add SWARMING_HEADLESS into environ so subcommands know that they are running
# in a headless (non-interactive) mode.
os.environ['SWARMING_HEADLESS'] = '1'
# The only reason this is kept is to enable the unit test to use --help to
# quit the process.
parser = argparse.ArgumentParser(description=sys.modules[__name__].__doc__)
parser.add_argument('unsupported', nargs='*', help=argparse.SUPPRESS)
parser.add_argument('--test-mode', action='store_true')
args = parser.parse_args(argv)
global _IN_TEST_MODE
_IN_TEST_MODE = args.test_mode
if _IN_TEST_MODE:
logging.debug('bot_main running in TEST mode')
if sys.platform == 'win32':
if not file_path.enable_privilege('SeShutdownPrivilege'):
logging.error('Failed to enable SeShutdownPrivilege')
# Enforces that only one process with a bot in this directory can be run on
# this host at once.
if not SINGLETON.acquire():
if sys.platform == 'darwin':
msg = (
'Found a previous bot, %d rebooting as a workaround for '
'https://crbug.com/569610.') % os.getpid()
else:
msg = ('Found a previous bot, %d rebooting as a workaround for '
'https://crbug.com/1061531' % os.getpid())
print(msg, file=sys.stderr)
os_utilities.host_reboot(msg)
return 1
base_dir = os.path.dirname(THIS_FILE)
for t in ('out', 'err'):
log_path = os.path.join(base_dir, 'logs', 'bot_std%s.log' % t)
os_utilities.roll_log(log_path)
os_utilities.trim_rolled_log(log_path)
error = None
if len(args.unsupported) != 0:
error = 'Unexpected arguments: %s' % args
try:
return _run_bot(error)
finally:
_call_hook_safe(
True, bot.Bot(None, None, None, None, base_dir, None),
'on_bot_shutdown')
logging.info('main() returning')
| 35.103588 | 97 | 0.693064 |
4a1eaf4450edc58e208b3276c46323f6ed5de127
| 4,868 |
py
|
Python
|
test/unit/managers/base.py
|
tsungjui/fusionline
|
26d5d41e82ac83822ba41df1cd14c54afa112655
|
[
"CC-BY-3.0"
] | 1 |
2019-11-03T11:45:43.000Z
|
2019-11-03T11:45:43.000Z
|
test/unit/managers/base.py
|
tsungjui/fusionline
|
26d5d41e82ac83822ba41df1cd14c54afa112655
|
[
"CC-BY-3.0"
] | 4 |
2017-05-24T19:36:34.000Z
|
2019-08-23T02:49:18.000Z
|
test/unit/managers/base.py
|
abretaud/galaxy
|
1ad89511540e6800cd2d0da5d878c1c77d8ccfe9
|
[
"CC-BY-3.0"
] | null | null | null |
"""
"""
from __future__ import print_function
import json
import os
import sys
import unittest
import sqlalchemy
from six import string_types
from galaxy.managers.users import UserManager
unit_root = os.path.abspath( os.path.join( os.path.dirname( __file__ ), os.pardir ) )
sys.path.insert( 1, unit_root )
from unittest_utils import galaxy_mock
# =============================================================================
admin_email = 'admin@admin.admin'
admin_users = admin_email
default_password = '123456'
# =============================================================================
class BaseTestCase( unittest.TestCase ):
@classmethod
def setUpClass( cls ):
print( '\n', '-' * 20, 'begin class', cls )
@classmethod
def tearDownClass( cls ):
print( '\n', '-' * 20, 'end class', cls )
def __init__( self, *args ):
unittest.TestCase.__init__( self, *args )
def setUp( self ):
self.log( '.' * 20, 'begin test', self )
self.set_up_mocks()
self.set_up_managers()
self.set_up_trans()
def set_up_mocks( self ):
self.trans = galaxy_mock.MockTrans( admin_users=admin_users )
self.app = self.trans.app
def set_up_managers( self ):
self.user_manager = UserManager( self.app )
def set_up_trans( self ):
self.admin_user = self.user_manager.create( email=admin_email, username='admin', password=default_password )
self.trans.set_user( self.admin_user )
self.trans.set_history( None )
def tearDown( self ):
self.log( '.' * 20, 'end test', self, '\n' )
def log( self, *args, **kwargs ):
print( *args, **kwargs )
# ---- additional test types
TYPES_NEEDING_NO_SERIALIZERS = ( string_types, bool, type( None ), int, float )
def assertKeys( self, obj, key_list ):
self.assertEqual( sorted( obj.keys() ), sorted( key_list ) )
def assertHasKeys( self, obj, key_list ):
for key in key_list:
if key not in obj:
self.fail( 'Missing key: ' + key )
else:
self.assertTrue( True, 'keys found in object' )
def assertNullableBasestring( self, item ):
if not isinstance( item, ( string_types, type( None ) ) ):
self.fail( 'Non-nullable basestring: ' + str( type( item ) ) )
# TODO: len mod 8 and hex re
self.assertTrue( True, 'is nullable basestring: ' + str( item ) )
def assertEncodedId( self, item ):
if not isinstance( item, string_types ):
self.fail( 'Non-string: ' + str( type( item ) ) )
# TODO: len mod 8 and hex re
self.assertTrue( True, 'is id: ' + item )
def assertNullableEncodedId( self, item ):
if item is None:
self.assertTrue( True, 'nullable id is None' )
else:
self.assertEncodedId( item )
def assertDate( self, item ):
if not isinstance( item, string_types ):
self.fail( 'Non-string: ' + str( type( item ) ) )
# TODO: no great way to parse this fully (w/o python-dateutil)
# TODO: re?
self.assertTrue( True, 'is date: ' + item )
def assertUUID( self, item ):
if not isinstance( item, string_types ):
self.fail( 'Non-string: ' + str( type( item ) ) )
# TODO: re for d4d76d69-80d4-4ed7-80c7-211ebcc1a358
self.assertTrue( True, 'is uuid: ' + item )
def assertORMFilter( self, item, msg=None ):
if not isinstance( item, sqlalchemy.sql.elements.BinaryExpression ):
self.fail( 'Not an orm filter: ' + str( type( item ) ) )
self.assertTrue( True, msg or ( 'is an orm filter: ' + str( item ) ) )
def assertFnFilter( self, item, msg=None ):
if not item or not callable( item ):
self.fail( 'Not a fn filter: ' + str( type( item ) ) )
self.assertTrue( True, msg or ( 'is a fn filter: ' + str( item ) ) )
def assertIsJsonifyable( self, item ):
# TODO: use galaxy's override
self.assertIsInstance( json.dumps( item ), string_types )
class CreatesCollectionsMixin( object ):
def build_element_identifiers( self, elements ):
identifier_list = []
for element in elements:
src = 'hda'
# if isinstance( element, model.DatasetCollection ):
# src = 'collection'#?
# elif isinstance( element, model.LibraryDatasetDatasetAssociation ):
# src = 'ldda'#?
encoded_id = self.trans.security.encode_id( element.id )
identifier_list.append( dict( src=src, name=element.name, id=encoded_id ) )
return identifier_list
# =============================================================================
if __name__ == '__main__':
# or more generally, nosetests test_resourcemanagers.py -s -v
unittest.main()
| 34.524823 | 116 | 0.57765 |
4a1eb1c2a51cf12bc23b7023a2a95d4e9c0bdc3d
| 6,155 |
py
|
Python
|
OGBN_arxiv/unify/ogb/ogbn_arxiv/args.py
|
x-zho14/Unified-LTH-GNN
|
edbb2f9aaa7cb363424dcfcb2ce198cfb66f3d55
|
[
"MIT"
] | 29 |
2021-02-17T02:46:54.000Z
|
2022-03-18T02:09:03.000Z
|
OGBN_arxiv/unify/ogb/ogbn_arxiv/args.py
|
x-zho14/Unified-LTH-GNN
|
edbb2f9aaa7cb363424dcfcb2ce198cfb66f3d55
|
[
"MIT"
] | 1 |
2021-09-03T13:30:50.000Z
|
2021-09-03T13:30:50.000Z
|
OGBN_arxiv/unify/ogb/ogbn_arxiv/args.py
|
x-zho14/Unified-LTH-GNN
|
edbb2f9aaa7cb363424dcfcb2ce198cfb66f3d55
|
[
"MIT"
] | 10 |
2021-04-01T16:27:03.000Z
|
2022-03-07T09:20:38.000Z
|
import __init__
import argparse
import uuid
import logging
import time
import os
import sys
from utils.logger import create_exp_dir
import glob
class ArgsInit(object):
def __init__(self):
parser = argparse.ArgumentParser(description='DeeperGCN')
### pruning settings
parser.add_argument('--s1', type=float, default=0.0001,help='scale sparse rate (default: 0.0001)')
parser.add_argument('--s2', type=float, default=0.0001,help='scale sparse rate (default: 0.0001)')
parser.add_argument('--pruning_percent_wei', type=float, default=0.1)
parser.add_argument('--pruning_percent_adj', type=float, default=0.1)
parser.add_argument('--resume_dir', type=str, default='')
parser.add_argument('--seed', type=int, default=10, help='which seed to use if any (default: 0)')
parser.add_argument('--mask_epochs', type=int, default=200,
help='number of epochs to train (default: 500)')
parser.add_argument('--fix_epochs', type=int, default=500,
help='number of epochs to train (default: 500)')
parser.add_argument('--fixed', default='', type=str, help='{all_fixed, only_adj, only_wei, no_fixed}')
# parser.add_argument('--baseline', action='store_true')
# dataset
parser.add_argument('--dataset', type=str, default='ogbn-arxiv',
help='dataset name (default: ogbn-arxiv)')
parser.add_argument('--self_loop', action='store_true')
# training & eval settings
parser.add_argument('--use_gpu', action='store_true')
parser.add_argument('--device', type=int, default=0,
help='which gpu to use if any (default: 0)')
parser.add_argument('--epochs', type=int, default=500,
help='number of epochs to train (default: 500)')
parser.add_argument('--lr', type=float, default=0.01,
help='learning rate set for optimizer.')
parser.add_argument('--dropout', type=float, default=0.5)
# model
parser.add_argument('--num_layers', type=int, default=3,
help='the number of layers of the networks')
parser.add_argument('--mlp_layers', type=int, default=1,
help='the number of layers of mlp in conv')
parser.add_argument('--in_channels', type=int, default=128,
help='the dimension of initial embeddings of nodes')
parser.add_argument('--hidden_channels', type=int, default=128,
help='the dimension of embeddings of nodes')
parser.add_argument('--block', default='res+', type=str,
help='graph backbone block type {res+, res, dense, plain}')
parser.add_argument('--conv', type=str, default='gen',
help='the type of GCNs')
parser.add_argument('--gcn_aggr', type=str, default='max',
help='the aggregator of GENConv [mean, max, add, softmax, softmax_sg, softmax_sum, power, power_sum]')
parser.add_argument('--norm', type=str, default='batch',
help='the type of normalization layer')
parser.add_argument('--num_tasks', type=int, default=1,
help='the number of prediction tasks')
# learnable parameters
parser.add_argument('--t', type=float, default=1.0,
help='the temperature of SoftMax')
parser.add_argument('--p', type=float, default=1.0,
help='the power of PowerMean')
parser.add_argument('--y', type=float, default=0.0,
help='the power of degrees')
parser.add_argument('--learn_t', action='store_true')
parser.add_argument('--learn_p', action='store_true')
parser.add_argument('--learn_y', action='store_true')
# message norm
parser.add_argument('--msg_norm', action='store_true')
parser.add_argument('--learn_msg_scale', action='store_true')
# save model
parser.add_argument('--model_save_path', type=str, default='model_ckpt',
help='the directory used to save models')
parser.add_argument('--save', type=str, default='CKPTs', help='experiment name')
# load pre-trained model
parser.add_argument('--model_load_path', type=str, default='ogbn_arxiv_pretrained_model.pth',
help='the path of pre-trained model')
self.args = parser.parse_args()
def save_exp(self):
# self.args.save = '{}-B_{}-C_{}-L_{}-F_{}-DP_{}' \
# '-GA_{}-T_{}-LT_{}-P_{}-LP_{}-Y_{}-LY_{}' \
# '-MN_{}-LS_{}'.format(self.args.save, self.args.block, self.args.conv,
# self.args.num_layers, self.args.hidden_channels,
# self.args.dropout, self.args.gcn_aggr,
# self.args.t, self.args.learn_t,
# self.args.p, self.args.learn_p,
# self.args.y, self.args.learn_y,
# self.args.msg_norm, self.args.learn_msg_scale)
# self.args.save = 'log/{}-{}-{}'.format(self.args.save, time.strftime("%Y%m%d-%H%M%S"), str(uuid.uuid4()))
self.args.model_save_path = os.path.join(self.args.save, self.args.model_save_path)
create_exp_dir(self.args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout,
level=logging.INFO,
format=log_format,
datefmt='%m/%d %I:%M:%S %p')
# fh = logging.FileHandler(os.path.join(self.args.model_save_path, 'log.txt'))
# fh.setFormatter(logging.Formatter(log_format))
# logging.getLogger().addHandler(fh)
return self.args
| 56.46789 | 130 | 0.566856 |
4a1eb21f4bae9f5dda5ed1cb379f814f29060236
| 6,061 |
py
|
Python
|
HW2/Video.py
|
etc1290/Computer-Vision
|
d4fe765533e0e687e424e17397ff03e8be0fa3b2
|
[
"MIT"
] | null | null | null |
HW2/Video.py
|
etc1290/Computer-Vision
|
d4fe765533e0e687e424e17397ff03e8be0fa3b2
|
[
"MIT"
] | null | null | null |
HW2/Video.py
|
etc1290/Computer-Vision
|
d4fe765533e0e687e424e17397ff03e8be0fa3b2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 7 09:15:07 2020
@author: ST16
"""
import os
import cv2
import cv2 as cv
import numpy as np
from datetime import datetime
from PIL import ImageFont, ImageDraw, Image
#import matplotlib.pyplot as plt
#import matplotlib.image as mpimg
#Make a New Subfloder
if not os.path.isdir("./Saved Image"):
#print("New Dir")
os.makedirs("./Saved Image")
#Initialize
group = 0
cap = cv2.VideoCapture(0)
cap.set(3, 640)
cap.set(4, 480)
kernel = np.ones((7,7),np.uint8)
b,g,r,a = 255,255,255,0
font = ImageFont.truetype("TaipeiSansTCBeta-Bold.ttf", 470)
mask0 = np.zeros((480,640,3),np.uint8)
img_pil = Image.fromarray(mask0)
draw = ImageDraw.Draw(img_pil)
draw.text((100, 0), "聖", font = font, fill = (b, g, r, a))
mask0 = np.array(img_pil)
mask1 = np.zeros((480,640,3),np.uint8)
img_pil = Image.fromarray(mask1)
draw = ImageDraw.Draw(img_pil)
draw.text((100, 0), "誕", font = font, fill = (b, g, r, a))
mask1 = np.array(img_pil)
mask2 = np.zeros((480,640,3),np.uint8)
img_pil = Image.fromarray(mask2)
draw = ImageDraw.Draw(img_pil)
draw.text((100, 0), "快", font = font, fill = (b, g, r, a))
mask2 = np.array(img_pil)
mask3 = np.zeros((480,640,3),np.uint8)
img_pil = Image.fromarray(mask3)
draw = ImageDraw.Draw(img_pil)
draw.text((100, 0), "樂", font = font, fill = (b, g, r, a))
mask3 = np.array(img_pil)
bar = cv2.imread("Bar.png")
while(1):
# Take each frame
_, frame = cap.read()
if group == 0:
img1 = frame
img2 = frame
img3 = frame
if group == 1:
(img1,img2,img3) = cv2.split(frame)
img1 = cv2.cvtColor(img1, cv2.COLOR_GRAY2BGR)
img2 = cv2.cvtColor(img2, cv2.COLOR_GRAY2BGR)
img3 = cv2.cvtColor(img3, cv2.COLOR_GRAY2BGR)
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
if group == 2:
img1 = frame.copy()
img2 = frame.copy()
img3 = frame.copy()
img1[:,:,0] = 0
img2[:,:,1] = 0
img3[:,:,2] = 0
if group == 3:
img1 = cv2.flip(frame, 1)
img2 = cv2.flip(frame, 0)
img3 = cv2.flip(frame, -1)
if group == 4:
img1 = cv2.bitwise_and(frame, mask1)
img2 = cv2.bitwise_and(frame, mask2)
img3 = cv2.bitwise_and(frame, mask3)
frame = cv2.bitwise_and(frame, mask0)
if group == 5:
img1 = cv2.GaussianBlur(frame,(15,15),0)
img2 = cv2.GaussianBlur(frame,(15,25),0)
img3 = cv2.GaussianBlur(frame,(35,35),0)
if group == 6:
img1 = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
img2 = cv2.cvtColor(frame, cv2.COLOR_BGR2LAB)
img3 = cv2.cvtColor(frame, cv2.COLOR_BGR2YCrCb)
if group == 7:
(b,g,r) = cv2.split(frame)
tempb = cv2.equalizeHist(b)
tempg = cv2.equalizeHist(g)
tempr = cv2.equalizeHist(r)
img1 = np.dstack((tempb, tempg, tempr))
temp = cv2.Canny(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY),100,200)
img2 = np.dstack((temp, temp, temp))
img3 = cv2.cvtColor(frame, cv2.COLOR_BGR2YCrCb)
h1 = np.hstack((frame,img1))
h2 = np.hstack((img2,img3))
quard = np.vstack((h1,h2))
quard = np.vstack((quard,bar))
if group == 0 or group == 4 or group == 5:
Text1 = " "
Text2 = " "
Text3 = " "
Text4 = " "
if group == 1:
Text1 = "Gray Scale"
Text2 = "Blue"
Text3 = "Green"
Text4 = "Red"
if group == 2:
Text1 = " "
Text2 = "No Blue"
Text3 = "No Green"
Text4 = "No Red"
if group == 3:
Text1 = " "
Text2 = "Horizontally"
Text3 = "Vertically"
Text4 = "H+V"
if group == 6:
Text1 = " "
Text2 = "HSV"
Text3 = "Lab"
Text4 = "YCrCb"
if group == 7:
Text1 = " "
Text2 = "Histogram"
Text3 = "Canny"
Text4 = "YCrCb"
text_size = 2
text_Hoffset = 70
text_Voffset = 15
cv2.putText(quard, Text1, (text_Voffset, text_Hoffset), cv2.FONT_HERSHEY_DUPLEX,text_size, (0, 0, 0), 3, cv2.LINE_AA)
cv2.putText(quard, Text2, (640+text_Voffset, text_Hoffset), cv2.FONT_HERSHEY_DUPLEX,text_size, (0, 0, 0), 3, cv2.LINE_AA)
cv2.putText(quard, Text3, (text_Voffset, 480+text_Hoffset), cv2.FONT_HERSHEY_DUPLEX,text_size, (0, 0, 0), 3, cv2.LINE_AA)
cv2.putText(quard, Text4, (640+text_Voffset, 480+text_Hoffset), cv2.FONT_HERSHEY_DUPLEX,text_size, (0, 0, 0), 3, cv2.LINE_AA)
cv2.putText(quard, Text1, (text_Voffset, text_Hoffset), cv2.FONT_HERSHEY_DUPLEX,text_size, (255, 255, 255), 2, cv2.LINE_AA)
cv2.putText(quard, Text2, (640+text_Voffset, text_Hoffset), cv2.FONT_HERSHEY_DUPLEX,text_size, (255, 255, 255), 2, cv2.LINE_AA)
cv2.putText(quard, Text3, (text_Voffset, 480+text_Hoffset), cv2.FONT_HERSHEY_DUPLEX,text_size, (255, 255, 255), 2, cv2.LINE_AA)
cv2.putText(quard, Text4, (640+text_Voffset, 480+text_Hoffset), cv2.FONT_HERSHEY_DUPLEX,text_size, (255, 255, 255), 2, cv2.LINE_AA)
#cv2.circle(frame,(200,200 + group*10),50,(0, 0, 0), -1)
cv2.namedWindow('Orignal', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Orignal', 960, 720)
cv2.imshow('Orignal',quard)
key = cv2.waitKeyEx(5)
#print(key)
"""
Please uncomment "print(key)" to chack arrow key value
when arrow key does not working correctly
2555904(Rrght Arrow)
2424832(Left Arrow)
"""
if key == 2555904:
if group >= 7:
group = 7
else:
group += 1
if key == 2424832:
if group <= 0:
group = 0;
else:
group -= 1
if key == 32:
now = datetime.now()
current_time = now.strftime("%Y_%m%d_%H%M%S")
cv.imwrite('Saved Image/1'+current_time+'.jpg',frame)
cv.imwrite('Saved Image/2'+current_time+'quard.jpg',quard)
#print(current_time)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
| 30.611111 | 135 | 0.589672 |
4a1eb2f2fc675bc221980f5c9c89140798d1dc3d
| 1,464 |
py
|
Python
|
epayco_django/migrations/0004_add_length_to_transaction_fields.py
|
gustav0/epayco_django
|
ea71a3a48271ba87a501135ba2025c56279bf8fc
|
[
"MIT"
] | 1 |
2020-05-09T22:35:10.000Z
|
2020-05-09T22:35:10.000Z
|
epayco_django/migrations/0004_add_length_to_transaction_fields.py
|
gustav0/epayco_django
|
ea71a3a48271ba87a501135ba2025c56279bf8fc
|
[
"MIT"
] | 2 |
2020-09-24T10:49:58.000Z
|
2021-06-25T15:40:34.000Z
|
epayco_django/migrations/0004_add_length_to_transaction_fields.py
|
gustav0/epayco_django
|
ea71a3a48271ba87a501135ba2025c56279bf8fc
|
[
"MIT"
] | 2 |
2020-07-08T04:46:07.000Z
|
2020-08-12T13:16:34.000Z
|
# Generated by Django 2.2.3 on 2021-02-21 02:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('epayco_django', '0003_extend_response_reason_text_length'),
]
operations = [
migrations.AlterField(
model_name='paymentconfirmation',
name='approval_code',
field=models.CharField(max_length=32),
),
migrations.AlterField(
model_name='paymentconfirmation',
name='cod_response',
field=models.CharField(max_length=32),
),
migrations.AlterField(
model_name='paymentconfirmation',
name='cod_transaction_state',
field=models.CharField(max_length=32),
),
migrations.AlterField(
model_name='paymentconfirmation',
name='errorcode',
field=models.CharField(max_length=32),
),
migrations.AlterField(
model_name='paymentconfirmation',
name='franchise',
field=models.CharField(max_length=32),
),
migrations.AlterField(
model_name='paymentconfirmation',
name='response',
field=models.CharField(max_length=32),
),
migrations.AlterField(
model_name='paymentconfirmation',
name='transaction_state',
field=models.CharField(max_length=32),
),
]
| 29.877551 | 69 | 0.586749 |
4a1eb32fcc6d3f5bcc327af1a3c7b423f2412f81
| 572 |
py
|
Python
|
imagesite/src/config.py
|
gatarelib/PyTorchCV
|
5191d0ddc5c42a4cc8dc5451aa14c263c2f3e77f
|
[
"Apache-2.0"
] | 308 |
2019-08-11T02:12:37.000Z
|
2022-03-30T07:20:41.000Z
|
imagesite/src/config.py
|
shanhedian2017/torchcv
|
6414f5acb41c2f35f8e79e477a57eaba65591c66
|
[
"Apache-2.0"
] | 19 |
2019-08-22T04:57:33.000Z
|
2022-03-27T10:59:23.000Z
|
imagesite/src/config.py
|
shanhedian2017/torchcv
|
6414f5acb41c2f35f8e79e477a57eaba65591c66
|
[
"Apache-2.0"
] | 64 |
2019-08-17T07:09:50.000Z
|
2022-03-27T11:23:39.000Z
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: Donny You(youansheng@gmail.com)
def getNetworkIp():
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.connect(('<broadcast>', 0))
return s.getsockname()[0]
IP = getNetworkIp()
STATIC_SERVICE = "http://%s/imagesite" % IP
DATASET_ROOT = '/home/donny/DataSet'
PROJECT_ROOT = '/home/donny/Projects/PyTorchCV'
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
| 23.833333 | 59 | 0.638112 |
4a1eb4008320f876c9a9cc0a96f600ff69f33581
| 9,516 |
py
|
Python
|
bert_sbd.py
|
ajitrajasekharan/simple_sbd
|
f0b4abb511f409ca02b61ce930126f147e72b7e0
|
[
"MIT"
] | 2 |
2021-03-16T03:23:34.000Z
|
2022-01-19T03:05:11.000Z
|
bert_sbd.py
|
ajitrajasekharan/simple_sbd
|
f0b4abb511f409ca02b61ce930126f147e72b7e0
|
[
"MIT"
] | null | null | null |
bert_sbd.py
|
ajitrajasekharan/simple_sbd
|
f0b4abb511f409ca02b61ce930126f147e72b7e0
|
[
"MIT"
] | 1 |
2022-01-19T03:05:11.000Z
|
2022-01-19T03:05:11.000Z
|
import os
import sys
import string
import pdb
import argparse
#this is a primitive sentence boundary detector
#this splits sentences on period boundaries. It skips line without sentence delimiters that exceed max_sequence_length in chars. Also skips lines with less than three words
#this code needs to be changed to included additional domain specific filters
DEFAULT_MIN_WORDS = 3
DEFAULT_MAX_LENGTH=1000
DEFAULT_EXCLUDE_FILE="exclude.txt"
DEFAULT_ABBR_FILE="abbr.txt"
DEFAULT_MAX_ABBR_LENGTH=3
#Note: this script does not join lines. Line joining has to be done prior to this
sbds = [';','.','?','!','\n']
def read_lines(in_file):
filter_list = []
with open(in_file) as fp:
for line in fp:
filter_list.append(line.rstrip('\n'))
return filter_list
def in_filter_list(filter_list,curr):
for i in filter_list:
if (curr.lstrip().startswith(i)):
return True
return False
def emit(line,params,partial_line):
max_sequence_length = params["args"].max
min_words = params["args"].min_words
if (len(line) < max_sequence_length and (partial_line or len(line.split()) > min_words)):
print(line.lstrip(),end='')
#terms like e.g. etc.
def is_abbr(curr,i,j,params):
max_abbr_length = params["args"].abbr_length
if (j > i and (curr[i:j+1] in params["abbrs"])):
return True
if (i > j or (j - i > max_abbr_length)):
return False
if (i == j and curr[j].isalpha()):
return True
orig_i = i
while (i < j):
if (not curr[i].isalpha() and not (curr[i] in string.punctuation)):
return False
i+= 1
i = orig_i
while (i < j):
if (curr[i] == '.'):
return True
i+= 1
return False
def is_any_punct(curr,i,j):
if (i > j):
return False
if (i == j and (curr[j] in string.punctuation)):
return True
while (i < j):
if (curr[i] in string.punctuation):
return True
i += 1
return False
#This checks if a terms is number 23.56 (we dont want to break on that period) or an abbrev like "Mr." which we dont want to break on either
def previous_word_abbr_or_number(curr,i,params):
i -= 1
if (i > 0):
j = i
while (j >= 0):
if (curr[j] == ' '):
j += 1
break
if (j == 0):
break
else:
j -= 1
if (is_abbr(curr,j,i,params) or ((curr[j].isupper() and i != j and (j - i <= params["args"].abbr_length) and curr[i].islower()) or (i == j and (curr[i].islower or curr[i].isupper())))): #e. coli or Mr. Xanadu
return True,False,j
elif ((i == j or curr[j+1].isdigit()) and curr[i].isdigit()): #digits with currency
return False,True,j
else:
return False,False,j
else:
return True,False,0
#This ignores period in quotes. It will break on them
def process_sbd(buffered,curr,params):
length = len(curr)
start = 0
for i in range(len(curr)):
char = curr[i]
if (char in sbds):
#pdb.set_trace()
if (i + 1 == length):
emit(buffered + ' ' + curr[start:],params,False)
return ""
else:
words = curr[i+1:].strip().split()
if (len(words) > 0):
#This is a primitive sentence boundary detector. if a period is followed immediately by a space it is considered a new line except for the following character a lower case character (e. coli). So a word like EX. Coli will be broken up. E. Coli will not be broken since it is a single letter.
#Abbreviation like Mr. will be part of exceptions and will not be broken down
assert(i+1 < length)
is_prev_abbr,is_prev_number,prev_token_start = previous_word_abbr_or_number(curr,i,params)
if (char != '.'):
emit(buffered + ' ' + curr[start:i+1] + '\n',params,True)
start = i+1
buffered = ''
elif (curr[i+1] == ' ' and is_prev_number):
if (not curr[i+2].isdigit()):
any_punct = is_any_punct(curr,prev_token_start,i)
if (any_punct):
end = i+1
else:
end = prev_token_start #numbers in a line are assumed to be a bullet. Point. A downside of this is a sentence ending aith a number like 2070 will make it a new line ending with "like" and 2070 will become a new line.
emit(buffered + ' ' + curr[start:end] + '\n',params,True)
start = end
buffered = ''
elif (curr[i+1] == ' ' and not is_prev_abbr and not curr[i+2].islower()):
if (start == 0 and len(curr[start:i - start+1].split()) < params["args"].min_words):
end_char = ' '
else:
end_char = '\n'
emit(buffered + ' ' + curr[start:i+1] + end_char,params,True)
start = i+1
buffered = ''
else:
emit(buffered + ' ' + curr[start:],params,True)
return ""
return (buffered + ' ' + curr[start:]).lstrip().rstrip('\n')
def process_line(curr,buffered,nl_emitted,params):
min_words = params["args"].min_words
filter_list = params["exclude"]
if (len(curr) == 1 and curr[0] == '\n'):
if (len(buffered) > 0):
emit(buffered + '\n',params,False)
else:
if (not nl_emitted):
print()
nl_emitted = True
return "",nl_emitted
if (len(curr.split()) <= min_words and len(buffered) == 0):
return "",nl_emitted
if (not in_filter_list(filter_list,curr)):
nl_emitted = False
buffered = process_sbd(buffered,curr,params)
return buffered,nl_emitted
def fold_on_wb(buffer_str,max_sequence_length,min_words):
arr = buffer_str.split()
if (len(arr) == 1):
pass
else:
curr_size = 0
curr_str = ""
for i in range(len(arr)):
curr_len = len(arr[i])
if (curr_size + curr_len + 1 >= max_sequence_length):
emit(curr_str + '\n',max_sequence_length,False)
curr_size = 0
curr_str = 0
else:
if (curr_size == 0):
curr_str = arr[i]
curr_size += curr_len
else:
curr_str += ' ' + arr[i]
curr_size += curr_len + 1
if (curr_size > 0):
emit(curr_str + '\n',max_sequence_length,min_words,False)
def collapse_spaces(line):
ret_line = []
is_space = False
for ch in line:
if (ch == ' ' or ch == '\t'):
if (is_space):
continue
else:
ret_line.append(ch)
is_space = True
else:
is_space = False
ret_line.append(ch)
return ''.join(ret_line)
def main(params):
file_name = params["args"].input
max_sequence_length = params["args"].max
single_line_doc = params["args"].single
with open(file_name,"r") as fp:
buffer_str = ""
nl_emitted = False
for line in fp:
line = collapse_spaces(line)
buffer_str,nl_emitted = process_line(line,buffer_str,nl_emitted,params)
assert(len(buffer_str) == 0)
if (len(buffer_str) > max_sequence_length):
fold_on_wb(buffer_str,params)
buffer_str = ''
if (single_line_doc):
print()
emit(buffer_str + '\n',params,False)
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Simple sentence boundary detector - for BERT like model input ',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-input', action="store", dest="input",required=True, help='Input to file containing sentences.')
parser.add_argument('-max', action="store", dest="max", default=DEFAULT_MAX_LENGTH,type=int,help='Maximum sentence length')
parser.add_argument('-single', action="store", dest="single", default=False,type=bool,help='Consider each line as a document and add newline')
parser.add_argument('-min_words', action="store", dest="min_words", default=DEFAULT_MIN_WORDS,type=int,help='Ignore lines less than this threshold of words')
parser.add_argument('-exclude', action="store", dest="exclude", default=DEFAULT_EXCLUDE_FILE,help='Ignore these lines')
parser.add_argument('-abbr_length', action="store", dest="abbr_length", default=DEFAULT_MAX_ABBR_LENGTH,type=int,help='Max length of abbreviations')
parser.add_argument('-abbrs', action="store", dest="abbrs",default=DEFAULT_ABBR_FILE, help='Exception abbrs to be included regardless of abbr length')
params = {}
args = parser.parse_args()
filter_list = read_lines(args.exclude)
abbrs_list = read_lines(args.abbrs)
params["args"] = args
params["exclude"] = filter_list
params["abbrs"] = abbrs_list
main(params)
| 38.526316 | 311 | 0.560635 |
4a1eb4ff8d4d5ceb1c33751565cdf8218f3c81fb
| 1,425 |
py
|
Python
|
qiskit/aqua/utils/decimal_to_binary.py
|
Nick-Singstock/qiskit-aqua
|
8c2bc57b78dec447faec3adbc966471a3206c2ef
|
[
"Apache-2.0"
] | 1 |
2020-11-06T01:09:28.000Z
|
2020-11-06T01:09:28.000Z
|
qiskit/aqua/utils/decimal_to_binary.py
|
Nick-Singstock/qiskit-aqua
|
8c2bc57b78dec447faec3adbc966471a3206c2ef
|
[
"Apache-2.0"
] | null | null | null |
qiskit/aqua/utils/decimal_to_binary.py
|
Nick-Singstock/qiskit-aqua
|
8c2bc57b78dec447faec3adbc966471a3206c2ef
|
[
"Apache-2.0"
] | 1 |
2020-11-06T01:09:43.000Z
|
2020-11-06T01:09:43.000Z
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM Corp. 2017 and later.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
from numpy import binary_repr
def decimal_to_binary(decimal_val, max_num_digits=20, fractional_part_only=False):
decimal_val_fractional_part = abs(decimal_val - int(decimal_val))
current_binary_position_val = 1 / 2
binary_fractional_part_digits = []
while decimal_val_fractional_part > 0 and len(binary_fractional_part_digits) < max_num_digits:
if decimal_val_fractional_part >= current_binary_position_val:
binary_fractional_part_digits.append('1')
decimal_val_fractional_part -= current_binary_position_val
else:
binary_fractional_part_digits.append('0')
current_binary_position_val /= 2
binary_repr_fractional_part = ''.join(binary_fractional_part_digits)
if fractional_part_only:
return binary_repr_fractional_part
else:
return binary_repr(int(decimal_val)) + '.' + binary_repr_fractional_part
| 39.583333 | 98 | 0.749474 |
4a1eb6bcb80788670c16b373f52b7241e2cf1059
| 9,314 |
py
|
Python
|
sdk/python/pulumi_azure_native/documentdb/v20151106/get_database_account_sql_container.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/documentdb/v20151106/get_database_account_sql_container.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/documentdb/v20151106/get_database_account_sql_container.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetDatabaseAccountSqlContainerResult',
'AwaitableGetDatabaseAccountSqlContainerResult',
'get_database_account_sql_container',
]
@pulumi.output_type
class GetDatabaseAccountSqlContainerResult:
"""
An Azure Cosmos DB container.
"""
def __init__(__self__, conflict_resolution_policy=None, default_ttl=None, etag=None, id=None, indexing_policy=None, location=None, name=None, partition_key=None, rid=None, tags=None, ts=None, type=None, unique_key_policy=None):
if conflict_resolution_policy and not isinstance(conflict_resolution_policy, dict):
raise TypeError("Expected argument 'conflict_resolution_policy' to be a dict")
pulumi.set(__self__, "conflict_resolution_policy", conflict_resolution_policy)
if default_ttl and not isinstance(default_ttl, int):
raise TypeError("Expected argument 'default_ttl' to be a int")
pulumi.set(__self__, "default_ttl", default_ttl)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if indexing_policy and not isinstance(indexing_policy, dict):
raise TypeError("Expected argument 'indexing_policy' to be a dict")
pulumi.set(__self__, "indexing_policy", indexing_policy)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if partition_key and not isinstance(partition_key, dict):
raise TypeError("Expected argument 'partition_key' to be a dict")
pulumi.set(__self__, "partition_key", partition_key)
if rid and not isinstance(rid, str):
raise TypeError("Expected argument 'rid' to be a str")
pulumi.set(__self__, "rid", rid)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if ts and not isinstance(ts, dict):
raise TypeError("Expected argument 'ts' to be a dict")
pulumi.set(__self__, "ts", ts)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if unique_key_policy and not isinstance(unique_key_policy, dict):
raise TypeError("Expected argument 'unique_key_policy' to be a dict")
pulumi.set(__self__, "unique_key_policy", unique_key_policy)
@property
@pulumi.getter(name="conflictResolutionPolicy")
def conflict_resolution_policy(self) -> Optional['outputs.ConflictResolutionPolicyResponse']:
"""
The conflict resolution policy for the container.
"""
return pulumi.get(self, "conflict_resolution_policy")
@property
@pulumi.getter(name="defaultTtl")
def default_ttl(self) -> Optional[int]:
"""
Default time to live
"""
return pulumi.get(self, "default_ttl")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A system generated property representing the resource etag required for optimistic concurrency control.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
The unique resource identifier of the database account.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="indexingPolicy")
def indexing_policy(self) -> Optional['outputs.IndexingPolicyResponse']:
"""
The configuration of the indexing policy. By default, the indexing is automatic for all document paths within the container
"""
return pulumi.get(self, "indexing_policy")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The location of the resource group to which the resource belongs.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the database account.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="partitionKey")
def partition_key(self) -> Optional['outputs.ContainerPartitionKeyResponse']:
"""
The configuration of the partition key to be used for partitioning data into multiple partitions
"""
return pulumi.get(self, "partition_key")
@property
@pulumi.getter
def rid(self) -> Optional[str]:
"""
A system generated property. A unique identifier.
"""
return pulumi.get(self, "rid")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def ts(self) -> Optional[Any]:
"""
A system generated property that denotes the last updated timestamp of the resource.
"""
return pulumi.get(self, "ts")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of Azure resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="uniqueKeyPolicy")
def unique_key_policy(self) -> Optional['outputs.UniqueKeyPolicyResponse']:
"""
The unique key policy configuration for specifying uniqueness constraints on documents in the collection in the Azure Cosmos DB service.
"""
return pulumi.get(self, "unique_key_policy")
class AwaitableGetDatabaseAccountSqlContainerResult(GetDatabaseAccountSqlContainerResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDatabaseAccountSqlContainerResult(
conflict_resolution_policy=self.conflict_resolution_policy,
default_ttl=self.default_ttl,
etag=self.etag,
id=self.id,
indexing_policy=self.indexing_policy,
location=self.location,
name=self.name,
partition_key=self.partition_key,
rid=self.rid,
tags=self.tags,
ts=self.ts,
type=self.type,
unique_key_policy=self.unique_key_policy)
def get_database_account_sql_container(account_name: Optional[str] = None,
container_name: Optional[str] = None,
database_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDatabaseAccountSqlContainerResult:
"""
An Azure Cosmos DB container.
:param str account_name: Cosmos DB database account name.
:param str container_name: Cosmos DB container name.
:param str database_name: Cosmos DB database name.
:param str resource_group_name: Name of an Azure resource group.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['containerName'] = container_name
__args__['databaseName'] = database_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:documentdb/v20151106:getDatabaseAccountSqlContainer', __args__, opts=opts, typ=GetDatabaseAccountSqlContainerResult).value
return AwaitableGetDatabaseAccountSqlContainerResult(
conflict_resolution_policy=__ret__.conflict_resolution_policy,
default_ttl=__ret__.default_ttl,
etag=__ret__.etag,
id=__ret__.id,
indexing_policy=__ret__.indexing_policy,
location=__ret__.location,
name=__ret__.name,
partition_key=__ret__.partition_key,
rid=__ret__.rid,
tags=__ret__.tags,
ts=__ret__.ts,
type=__ret__.type,
unique_key_policy=__ret__.unique_key_policy)
| 40.672489 | 505 | 0.659652 |
4a1eb7177e6eb422e8c1f9a204377485ecfaaf07
| 1,864 |
py
|
Python
|
tests/photos/test_photo_categories.py
|
Torniojaws/vortech-backend
|
f775a97eeae089fa720088d86fe92d40bc5d65bc
|
[
"MIT"
] | null | null | null |
tests/photos/test_photo_categories.py
|
Torniojaws/vortech-backend
|
f775a97eeae089fa720088d86fe92d40bc5d65bc
|
[
"MIT"
] | 93 |
2017-09-01T22:24:10.000Z
|
2021-12-22T14:07:06.000Z
|
tests/photos/test_photo_categories.py
|
Torniojaws/vortech-backend
|
f775a97eeae089fa720088d86fe92d40bc5d65bc
|
[
"MIT"
] | null | null | null |
import json
import unittest
from flask_caching import Cache
from app import app, db
from apps.photos.models import PhotoCategories
class TestPhotoCategories(unittest.TestCase):
def setUp(self):
# Clear redis cache completely
cache = Cache()
cache.init_app(app, config={"CACHE_TYPE": "RedisCache"})
with app.app_context():
cache.clear()
self.app = app.test_client()
# Add some categories
cat1 = PhotoCategories(
Category="testCategory1"
)
cat2 = PhotoCategories(
Category="testCategory2"
)
cat3 = PhotoCategories(
Category="testCategory3"
)
db.session.add(cat1)
db.session.add(cat2)
db.session.add(cat3)
db.session.commit()
self.valid_cats = [cat1.PhotoCategoryID, cat2.PhotoCategoryID, cat3.PhotoCategoryID]
def tearDown(self):
try:
for cat in PhotoCategories.query.all():
db.session.delete(cat)
db.session.commit()
except TypeError as e:
print("Teardown failed:\n{}".format(e))
def test_get_photo_categories(self):
"""Should return all valid photo categories and the IDs."""
response = self.app.get("/api/1.0/photos/categories/")
categories = json.loads(
response.get_data().decode()
)
self.assertEqual(200, response.status_code)
self.assertEqual(3, len(categories["photoCategories"]))
self.assertEqual(self.valid_cats[0], categories["photoCategories"][0]["id"])
self.assertEqual("testCategory1", categories["photoCategories"][0]["category"])
self.assertEqual(self.valid_cats[2], categories["photoCategories"][2]["id"])
self.assertEqual("testCategory3", categories["photoCategories"][2]["category"])
| 33.890909 | 92 | 0.624464 |
4a1eb7a87688ef157d87d4bc54a714acc3182dde
| 831 |
py
|
Python
|
Leetcode/1570-dot_product_two_vectors.py
|
EdwaRen/Competitve-Programming
|
e8bffeb457936d28c75ecfefb5a1f316c15a9b6c
|
[
"MIT"
] | 1 |
2021-05-03T21:48:25.000Z
|
2021-05-03T21:48:25.000Z
|
Leetcode/1570-dot_product_two_vectors.py
|
EdwaRen/Competitve_Programming
|
e8bffeb457936d28c75ecfefb5a1f316c15a9b6c
|
[
"MIT"
] | null | null | null |
Leetcode/1570-dot_product_two_vectors.py
|
EdwaRen/Competitve_Programming
|
e8bffeb457936d28c75ecfefb5a1f316c15a9b6c
|
[
"MIT"
] | null | null | null |
class SparseVector:
def __init__(self, nums):
"""
:type nums: List[int]
mapping for index => value
"""
self.nums = {idx: val for idx, val in enumerate(nums) if val}
# Return the dotProduct of two sparse vectors
def dotProduct(self, vec):
"""
:type vec: 'SparseVector'
:rtype: int
"""
total = 0
for key, value in self.nums.items():
if key in vec.nums:
total += value * vec.nums[key]
return total
# Your SparseVector object will be instantiated and called as such:
nums1 = [1, 0, 0, 2, 3]
nums2 = [0, 3, 0, 4, 0]
nums1 = [0,1,0,0,0]
nums2 = [0,0,0,0,2]
nums1 = [0,1,0,0,2,0,0]
nums2 = [1,0,0,0,3,0,4]
v1 = SparseVector(nums1)
v2 = SparseVector(nums2)
ans = v1.dotProduct(v2)
print(ans)
| 25.181818 | 69 | 0.54994 |
4a1eb7cbf1a5674f47c445afe12ab0b04b263d9d
| 9,586 |
py
|
Python
|
matcher/parser.py
|
INK-USC/expl-refinement
|
815a7892a8d4c42fb429856746212a44f67d2547
|
[
"MIT"
] | 7 |
2021-09-29T08:14:30.000Z
|
2022-03-27T13:12:37.000Z
|
matcher/parser.py
|
INK-USC/expl-refinement
|
815a7892a8d4c42fb429856746212a44f67d2547
|
[
"MIT"
] | null | null | null |
matcher/parser.py
|
INK-USC/expl-refinement
|
815a7892a8d4c42fb429856746212a44f67d2547
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
import logging
from dictionary import STRING2PREDICATE, WORD2NUMBER, OPS_FEATURE, RAW_LEXICON
from utils import SPECIAL_CHARS, REVERSE_SPECIAL_CHARS
from rule import Rule
from nltk.ccg import chart, lexicon
from read_instance import InstancePreprocess
from utils import preprocess_sent
MAX_PHRASE_LEN = 4
BEAM_WIDTH = 100
logger = logging.getLogger(__name__)
def string_to_predicate(s):
"""input: one string (can contain multiple tokens with ;
output: a list of predicates."""
if s != ',' and s not in REVERSE_SPECIAL_CHARS:
s = s.lower().strip(',')
if s.startswith("$"):
return [s]
elif s.startswith("\"") and s.endswith("\""):
return ["'" + s[1:-1] + "'"]
elif s in STRING2PREDICATE:
return STRING2PREDICATE[s]
elif s.isdigit():
return ["'" + s + "'"]
elif s in WORD2NUMBER:
return ["'" + WORD2NUMBER[s] + "'"]
else:
return []
def tokenize(sentence):
"""input: a list of tokens;
output: a list of possible tokenization of the sentence;
each token can be mapped to multiple predicates"""
# log[j] is a list containing temporary results using 0..(j-1) tokens
log = {i: [] for i in range(len(sentence) + 1)}
log[0] = [[]]
for i, token in enumerate(sentence):
for _range in range(1, MAX_PHRASE_LEN + 1):
if i + _range > len(sentence):
break
phrase = ' '.join(sentence[i:i + _range])
predicates = string_to_predicate(phrase)
for temp_result in log[i]:
for predicate in predicates:
log[i + _range].append(temp_result + [predicate])
if token.startswith("\""): # avoid --"A" and "B"-- treated as one predicate
break
return log[len(sentence)]
def get_word_name(layer, st, idx):
return "$Layer{}_St{}_{}".format(str(layer), str(st), str(idx))
def get_entry(word_name, category, semantics):
return "\n\t\t{0} => {1} {{{2}}}".format(word_name, str(category), str(semantics))
def quote_word_lexicon(sentence):
def is_quote_word(token):
return (token.startswith("\'") and token.endswith("\'")) \
or (token.startswith("\"") and token.endswith("\""))
ret = ""
for token in sentence:
if is_quote_word(token):
ret += get_entry(token, 'NP', token)
ret += get_entry(token, 'N', token)
ret += get_entry(token, 'NP', "'@In'({},'all')".format(token))
if token[1:-1].isdigit():
ret += get_entry(token, 'NP/NP', "\\x.'@Num'({},x)".format(token))
ret += get_entry(token, 'N/N', "\\x.'@Num'({},x)".format(token))
ret += get_entry(token, 'PP/PP/NP/NP', "\\x y F.'@WordCount'('@Num'({},x),y,F)".format(token))
ret += get_entry(token, 'PP/PP/N/N', "\\x y F.'@WordCount'('@Num'({},x),y,F)".format(token))
return ret
class Parser(nn.Module):
def __init__(self):
super(Parser, self).__init__()
self.raw_lexicon = RAW_LEXICON
self.beam_width = BEAM_WIDTH
self.feature_size = len(OPS_FEATURE)
self.theta = nn.Parameter(torch.randn(self.feature_size, dtype=torch.float64))
# self.theta.data.uniform_(0.0, 0.2)
# torch.nn.init.xavier_uniform(self.theta)
print('Parser of dimension {} is initialized.'.format(self.feature_size))
# print('Initial weights:', self.theta)
def forward(self, instances):
"""
:param instances: [[]], for each instance, there is a list of successful parses
:return: [[]], the score for each parse in each sentence.
"""
ret = []
for instance in instances:
inputs = [item[2] for item in instance]
t = torch.tensor(inputs, dtype=torch.float64)
logits = torch.matmul(self.theta, t.t())
probs = F.softmax(logits, dim=0)
ret.append(probs)
return ret
def loss(self, preds, xys):
ret = 0.0
for pred, xy in zip(preds, xys):
labels = [item[1] for item in xy]
t = torch.tensor(labels, dtype=torch.float64)
ret -= torch.matmul(torch.log(pred), t.t())
return ret
def forward_single(self, str_rep):
rule = Rule(str_rep)
t = torch.tensor(rule.features, dtype=torch.float64)
ret = torch.matmul(self.theta, t.t())
return ret.item()
def parse(self, sentence, beam=True):
"""
:param sentence: a list of tokens in one sentence.
e.g. ['"may_be"', '$Is', '$Between', '$ArgX', '$And', '$ArgY']
:return: a list of successful parses.
"""
beam_lexicon = copy.deepcopy(self.raw_lexicon) + quote_word_lexicon(sentence)
# the first index of forms is layer
# the second index of forms is starting index
all_forms = [[[token] for token in sentence]]
# parsed results to be returned
ret = []
# Width of tokens to be parsed. Start with width 1 and stack to len(sentence)
for layer in range(1, len(sentence)):
layer_form = []
# update the lexicon from previous layers
lex = lexicon.fromstring(beam_lexicon, True)
parser = chart.CCGChartParser(lex, chart.DefaultRuleSet)
# parse the span (st, st+layer)
for st in range(0, len(sentence) - layer):
form = []
memory = [] # keep a memory and remove redundant parses
word_index = 0
ed = st + layer
# try to combine (st, split), (split+1, ed) into (st, ed)
for split in range(st, ed):
# get candidates for (st, split) and (split+1, ed)
words_L = all_forms[split-st][st]
words_R = all_forms[ed-split-1][split+1]
for word_L in words_L:
for word_R in words_R:
# try to combine word_L and word_R
try:
for parse in parser.parse([word_L, word_R]):
token, _ = parse.label()
category, semantics = token.categ(), token.semantics()
memory_key = str(category) + '_' + str(semantics)
if memory_key not in memory:
memory.append(memory_key)
word_index += 1
form.append((parse, category, semantics, word_index))
except (AssertionError, SyntaxError) as e:
logger.info('Error when parsing {} and {}'.format(word_L, word_R))
logger.info('Error information: {}'.format(e.args))
# beam here. todo: implement feature selection and beam; use if beam
to_add = []
for item in form:
parse, category, semantics, word_index = item
word_name = get_word_name(layer, st, word_index)
to_add.append(word_name)
beam_lexicon += get_entry(word_name, category, semantics)
# if this is the last layer (covering the whole sentence)
# add this to output
if layer == len(sentence) - 1:
ret.append(str(semantics))
layer_form.append(to_add)
all_forms.append(layer_form)
# filter incomplete parses
ret = list(filter(lambda x: x.startswith("'@"), ret))
ret = sorted(ret, key=lambda x: self.forward_single(x), reverse=True)
return list(ret)
if __name__ == "__main__":
sent = 'X is country. Y is negative. X is less than 3 dependencies from Y.'
sentences = preprocess_sent(sent)
# print(sentences)
tokenized_sentences = [tokenize(sentence) for sentence in sentences]
# print(tokenized_sentences)
parser = Parser()
print('=' * 20 + ' start parsing ' + '=' * 20 + '\n')
rule_list_sentence = []
for i, sentence in enumerate(tokenized_sentences):
print('=== sentence {}: {}'.format(i, sentences[i]))
for potential_sentence in sentence:
print('sentence predicates: {}'.format(potential_sentence))
all_possible_parses = parser.parse(potential_sentence)
if len(all_possible_parses) > 0:
rule_list_sentence += all_possible_parses
print('parses: {}\n'.format(all_possible_parses))
rule = Rule(rule_list_sentence[0])
sentence = " Well, In the end. Sweden has proven to be a failure. Especially anything to do with feminsium."
label = 1
instance_reader = InstancePreprocess(pre=True)
instance = instance_reader.read_one(sentence, label)
inputs = {'Label': label,
'X': "Sweden",
'Y': "failure",
'Z': "",
'instance': instance,
'pretrained_modules': None,
'soft': 0
}
result = rule.execute(inputs)
print("result:",result)
# funcs = [Rule(item) for item in ret]
# test_sent = "bag may be hello in ArgY".split(' ')
# rule1 = Rule(ret[3])
# print(rule1.tree)
# ret = rule1.func({'s': test_sent, 'ArgX': 'hello'})
# print(ret)
| 38.191235 | 112 | 0.554767 |
4a1eb7eee316f9ffe10562fbc5b6532105f32877
| 11,619 |
py
|
Python
|
venv/Lib/site-packages/decorators.py
|
BigtoC/NextCloudAPI
|
fd93d1ee658ae5799e01bbaffda19b1c03c81f13
|
[
"MIT"
] | 1 |
2020-03-18T15:19:22.000Z
|
2020-03-18T15:19:22.000Z
|
venv/Lib/site-packages/decorators.py
|
BigtoC/NextCloudAPI
|
fd93d1ee658ae5799e01bbaffda19b1c03c81f13
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/decorators.py
|
BigtoC/NextCloudAPI
|
fd93d1ee658ae5799e01bbaffda19b1c03c81f13
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, print_function, absolute_import
import functools
import inspect
import re
__version__ = "0.1.1"
class Decorator(object):
"""
A decorator class that you can be extended that allows you to do normal decorators
with no arguments, or a decorator with arguments
May be invoked as a simple, argument-less decorator (eg, `@decorator`) or
with arguments customizing its behavior (eg,`@decorator(*args, **kwargs)`).
To create your own decorators, just extend this class and override the decorate_func()
method to decorate functions/methods and/or the decorate_class() method to decorate
classes.
based off of the task decorator in Fabric
https://github.com/fabric/fabric/blob/master/fabric/decorators.py#L15
with modifications inspired by --
https://wiki.python.org/moin/PythonDecoratorLibrary#Class_method_decorator_using_instance
https://wiki.python.org/moin/PythonDecoratorLibrary#Creating_decorator_with_optional_arguments
other links --
http://pythonconquerstheuniverse.wordpress.com/2012/04/29/python-decorators/
http://stackoverflow.com/questions/739654/
http://stackoverflow.com/questions/666216/decorator-classes-in-python
"""
wrapped_func = False
"""this decorator is wrapping an instance, auto-discovered"""
wrapped_class = False
"""this decorator is wrapping a class, auto-discovered"""
required_args = False
"""set this to True in child if the decorator requires arguments (eg, @dec(...))"""
def __new__(cls, *args, **kwargs):
instance = super(Decorator, cls).__new__(cls)
instance.create_args = args
instance.create_kwargs = kwargs
if instance.is_wrapped_arg(*args, **kwargs):
instance.set_wrapped(args[0])
args = ()
else:
instance.wrapped = None
instance.args = args
instance.kwargs = kwargs
# here we do some magic stuff to return the class back in case this is a
# class decorator, we do this so we don't wrap the class, thus causing
# things like isinstance() checks to fail, and also not making class
# variables available
if instance.wrapped_class:
try:
instance = instance.decorate_class(
instance.wrapped,
*instance.args,
**instance.kwargs
)
except TypeError as e:
# recover from is_wrapped_arg misclassifying the call
# NOTE -- this is super hacky
e_msg = str(e)
if "arguments" in e_msg and "takes" in e_msg \
or ("argument" in e_msg and "missing" in e_msg):
instance.wrapped = None
instance.args = instance.create_args
instance.kwargs = instance.create_kwargs
else:
raise
return instance
def is_wrapped_arg(self, *args, **kwargs):
"""decide if this decorator was called with arguments (eg, @dec(...)) or
without (eg, @dec) so we can take the correct path when the decorator is
invoked
for the most part this works except for the case where you have one callback
or class as the only passed in method to the decorator (eg, @dec(lambda x: x)),
you can get around it by using named arguments (eg, @dec(callback=lambda x: x))
or by setting required_args class property to True in your child decorator,
otherwise this will try and autodiscover and have to recover when the
decorator is actually invoked. I wracked my brain trying to figure out a
better way to do this and I couldn't come up with anything and after the
hours I've spent on it I'm just not sure there is a way to really know
:param *args: any positional values passed into __new__ or __call__
:param **kwargs: any named values passed into __new__ or __call__
:returns: boolean
"""
ret = False
if len(args) == 1 and len(kwargs) == 0:
#pout.v(args, self, isinstance(args[0], type), isinstance(args[0], FuncDecorator))
ret = inspect.isfunction(args[0]) \
or isinstance(args[0], type) \
or isinstance(args[0], Decorator)
if ret:
ret = not self.required_args
# if ret:
# frames = inspect.getouterframes(inspect.currentframe())
# if len(frames) >= 3:
# dec_frame = frames[2]
# lines = "".join(dec_frame[4]).strip()
# # we have 3 possibilities here:
# # 1) @dec
# # 2) @dec(...)
# # 3) something else
# # this will return True for 1 and 3
# if re.match(r"^@", lines):
# ret = "(" not in lines
return ret
def set_wrapped(self, wrapped):
"""This will decide what wrapped is and set .wrapped_func or .wrapped_class
accordingly
:param wrapped: either a function or class
"""
self.wrapped = wrapped
functools.update_wrapper(self, self.wrapped, updated=())
self.wrapped_func = False
self.wrapped_class = False
if inspect.isroutine(wrapped):
self.wrapped_func = True
elif isinstance(wrapped, type):
self.wrapped_class = True
def __get__(self, instance, klass):
"""
having this method here turns the class into a descriptor used when there
is no (...) on the decorator, this is only called when the decorator is on
a method, it won't be called when the decorator is on a non class method
(ie, just a normal function)
"""
def wrapper(*args, **kwargs):
ret = self.decorate_func(self.wrapped, *self.args, **self.kwargs)(instance, *args, **kwargs)
return ret
return wrapper
def __call__(self, *args, **kwargs):
"""call is used when there are (...) on the decorator or when there are no (...)
and the actual wrapped thing (function/method/class) is called"""
call_args = args
call_kwargs = kwargs
ret = None
invoke = True
if not self.wrapped:
self.set_wrapped(args[0])
args = ()
invoke = False
try:
if self.wrapped_func:
ret = self.decorate_func(self.wrapped, *self.args, **self.kwargs)
elif self.wrapped_class:
ret = self.decorate_class(self.wrapped, *self.args, **self.kwargs)
else:
raise ValueError("wrapped is not a class or a function")
except TypeError as e:
# recover from is_wrapped_arg misclassifying the call
# NOTE -- this is super hacky
e_msg = str(e)
if "arguments" in e_msg and "takes" in e_msg \
or ("argument" in e_msg and "missing" in e_msg):
self.args = self.create_args
self.kwargs = self.create_kwargs
self.wrapped = None
ret = self.__call__(*call_args, **call_kwargs)
else:
raise
else:
if invoke:
ret = ret(*args, **kwargs)
return ret
def decorate_func(self, func, *decorator_args, **decorator_kwargs):
"""override this in a child class with your own logic, it must return a
function that calls self.func
:param func: callback -- the function being decorated
:param decorator_args: tuple -- the arguments passed into the decorator (eg, @dec(1, 2))
:param decorator_kwargs: dict -- the named args passed into the decorator (eg, @dec(foo=1))
:returns: the wrapped func with our decorator func
"""
raise RuntimeError("decorator {} does not support function decoration".format(self.__class__.__name__))
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
def decorate_class(self, klass, *decorator_args, **decorator_kwargs):
"""override this in a child class with your own logic, it must return a
function that returns klass or the like
:param klass: the class object that is being decorated
:param decorator_args: tuple -- the arguments passed into the decorator (eg, @dec(1, 2))
:param decorator_kwargs: dict -- the named args passed into the decorator (eg, @dec(foo=1))
:returns: the wrapped class
"""
raise RuntimeError("decorator {} does not support class decoration".format(self.__class__.__name__))
return klass
class InstanceDecorator(Decorator):
"""only decorate instances of a class"""
def decorate(self, instance, *decorator_args, **decorator_kwargs):
"""
override this in a child class with your own logic, it must return an
instance of a class
:param instance: class() -- the class instance being decorated
:param decorator_args: tuple -- the arguments passed into the decorator (eg, @dec(1, 2))
:param decorator_kwargs: dict -- the named args passed into the decorator (eg, @dec(foo=1))
"""
return instance
def decorate_class(self, klass, *decorator_args, **decorator_kwargs):
"""where the magic happens, this wraps a class to call our decorate method
in the init of the class
"""
class ChildClass(klass):
def __init__(slf, *args, **kwargs):
super(ChildClass, slf).__init__(*args, **kwargs)
self.decorate(
slf, *decorator_args, **decorator_kwargs
)
decorate_klass = ChildClass
decorate_klass.__name__ = klass.__name__
decorate_klass.__module__ = klass.__module__
# for some reason you can't update a __doc__ on a class
# http://bugs.python.org/issue12773
return decorate_klass
class ClassDecorator(Decorator):
"""only decorate a class"""
def decorate(self, klass, *decorator_args, **decorator_kwargs):
"""
override this in a child class with your own logic, it must return a
class object
:param klass: class -- the class being decorated
:param decorator_args: tuple -- the arguments passed into the decorator (eg, @dec(1, 2))
:param decorator_kwargs: dict -- the named args passed into the decorator (eg, @dec(foo=1))
"""
return klass
def decorate_class(self, *args, **kwargs):
return self.decorate(*args, **kwargs)
class FuncDecorator(Decorator):
"""only decorate functions/methods"""
def decorate(self, func, *decorator_args, **decorator_kwargs):
"""
override this in a child class with your own logic, it must return a
function that calls self.func
:param func: callback -- the function being decorated
:param decorator_args: tuple -- the arguments passed into the decorator (eg, @dec(1, 2))
:param decorator_kwargs: dict -- the named args passed into the decorator (eg, @dec(foo=1))
"""
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
def decorate_func(self, *args, **kwargs):
return self.decorate(*args, **kwargs)
| 39.65529 | 111 | 0.608744 |
4a1eb875e9fe4d9d29ca0cebf76c51bd884c5ad3
| 13,311 |
py
|
Python
|
qupulse/_program/transformation.py
|
eendebakpt/qupulse
|
5b5b48de10084d413e10cfd8f6e9f7536c69dd70
|
[
"MIT"
] | 30 |
2018-09-13T02:59:55.000Z
|
2022-03-21T04:25:22.000Z
|
qupulse/_program/transformation.py
|
eendebakpt/qupulse
|
5b5b48de10084d413e10cfd8f6e9f7536c69dd70
|
[
"MIT"
] | 220 |
2018-09-06T14:43:15.000Z
|
2022-03-25T12:26:25.000Z
|
qupulse/_program/transformation.py
|
eendebakpt/qupulse
|
5b5b48de10084d413e10cfd8f6e9f7536c69dd70
|
[
"MIT"
] | 14 |
2019-01-08T14:42:36.000Z
|
2021-05-21T08:53:06.000Z
|
from typing import Mapping, Set, Tuple, Sequence, AbstractSet, Union
from abc import abstractmethod
from numbers import Real
import numpy as np
from qupulse import ChannelID
from qupulse.comparable import Comparable
from qupulse.utils.types import SingletonABCMeta
class Transformation(Comparable):
_identity_singleton = None
"""Transforms numeric time-voltage values for multiple channels to other time-voltage values. The number and names
of input and output channels might differ."""
@abstractmethod
def __call__(self, time: Union[np.ndarray, float],
data: Mapping[ChannelID, Union[np.ndarray, float]]) -> Mapping[ChannelID, Union[np.ndarray, float]]:
"""Apply transformation to data
Args:
time:
data:
Returns:
transformed: A DataFrame that has been transformed with index == output_channels
"""
@abstractmethod
def get_output_channels(self, input_channels: AbstractSet[ChannelID]) -> AbstractSet[ChannelID]:
"""Return the channel identifiers"""
@abstractmethod
def get_input_channels(self, output_channels: AbstractSet[ChannelID]) -> AbstractSet[ChannelID]:
"""Channels that are required for getting data for the requested output channel"""
def chain(self, next_transformation: 'Transformation') -> 'Transformation':
if next_transformation is IdentityTransformation():
return self
else:
return chain_transformations(self, next_transformation)
def is_constant_invariant(self):
"""Signals if the transformation always maps constants to constants."""
return False
class IdentityTransformation(Transformation, metaclass=SingletonABCMeta):
def __call__(self, time: Union[np.ndarray, float],
data: Mapping[ChannelID, Union[np.ndarray, float]]) -> Mapping[ChannelID, Union[np.ndarray, float]]:
return data
def get_output_channels(self, input_channels: AbstractSet[ChannelID]) -> AbstractSet[ChannelID]:
return input_channels
@property
def compare_key(self) -> None:
return None
def get_input_channels(self, output_channels: AbstractSet[ChannelID]) -> AbstractSet[ChannelID]:
return output_channels
def chain(self, next_transformation: Transformation) -> Transformation:
return next_transformation
def __repr__(self):
return 'IdentityTransformation()'
def is_constant_invariant(self):
"""Signals if the transformation always maps constants to constants."""
return True
class ChainedTransformation(Transformation):
def __init__(self, *transformations: Transformation):
self._transformations = transformations
@property
def transformations(self) -> Tuple[Transformation, ...]:
return self._transformations
def get_output_channels(self, input_channels: AbstractSet[ChannelID]) -> AbstractSet[ChannelID]:
for transformation in self._transformations:
input_channels = transformation.get_output_channels(input_channels)
return input_channels
def get_input_channels(self, output_channels: AbstractSet[ChannelID]) -> AbstractSet[ChannelID]:
for transformation in reversed(self._transformations):
output_channels = transformation.get_input_channels(output_channels)
return output_channels
def __call__(self, time: Union[np.ndarray, float],
data: Mapping[ChannelID, Union[np.ndarray, float]]) -> Mapping[ChannelID, Union[np.ndarray, float]]:
for transformation in self._transformations:
data = transformation(time, data)
return data
@property
def compare_key(self) -> Tuple[Transformation, ...]:
return self._transformations
def chain(self, next_transformation) -> Transformation:
return chain_transformations(*self.transformations, next_transformation)
def __repr__(self):
return 'ChainedTransformation%r' % (self._transformations,)
def is_constant_invariant(self):
"""Signals if the transformation always maps constants to constants."""
return all(trafo.is_constant_invariant() for trafo in self._transformations)
class LinearTransformation(Transformation):
def __init__(self,
transformation_matrix: np.ndarray,
input_channels: Sequence[ChannelID],
output_channels: Sequence[ChannelID]):
"""
Args:
transformation_matrix: Matrix describing the transformation with shape (output_channels, input_channels)
input_channels: Channel ids of the columns
output_channels: Channel ids of the rows
"""
transformation_matrix = np.asarray(transformation_matrix)
if transformation_matrix.shape != (len(output_channels), len(input_channels)):
raise ValueError('Shape of transformation matrix does not match to the given channels')
output_sorter = np.argsort(output_channels)
transformation_matrix = transformation_matrix[output_sorter, :]
input_sorter = np.argsort(input_channels)
transformation_matrix = transformation_matrix[:, input_sorter]
self._matrix = transformation_matrix
self._input_channels = tuple(sorted(input_channels))
self._output_channels = tuple(sorted(output_channels))
self._input_channels_set = frozenset(self._input_channels)
self._output_channels_set = frozenset(self._output_channels)
def __call__(self, time: Union[np.ndarray, float],
data: Mapping[ChannelID, Union[np.ndarray, float]]) -> Mapping[ChannelID, Union[np.ndarray, float]]:
data_out = {forwarded_channel: data[forwarded_channel]
for forwarded_channel in set(data.keys()).difference(self._input_channels)}
if len(data_out) == len(data):
# only forwarded data
return data_out
try:
data_in = np.stack(data[in_channel] for in_channel in self._input_channels)
except KeyError as error:
raise KeyError('Invalid input channels', set(data.keys()), set(self._input_channels)) from error
transformed_data = self._matrix @ data_in
for idx, out_channel in enumerate(self._output_channels):
data_out[out_channel] = transformed_data[idx, ...]
return data_out
def get_output_channels(self, input_channels: AbstractSet[ChannelID]) -> AbstractSet[ChannelID]:
if not input_channels >= self._input_channels_set:
# input_channels is not a superset of the required input channels
raise KeyError('Invalid input channels', input_channels, self._input_channels_set)
return (input_channels - self._input_channels_set) | self._output_channels_set
def get_input_channels(self, output_channels: AbstractSet[ChannelID]) -> AbstractSet[ChannelID]:
forwarded = output_channels - self._output_channels_set
if not forwarded.isdisjoint(self._input_channels):
raise KeyError('Is input channel', forwarded & self._input_channels_set)
elif output_channels.isdisjoint(self._output_channels):
return output_channels
else:
return forwarded | self._input_channels_set
@property
def compare_key(self) -> Tuple[Tuple[ChannelID], Tuple[ChannelID], bytes]:
return self._input_channels, self._output_channels, self._matrix.tobytes()
def __repr__(self):
return ('LinearTransformation('
'transformation_matrix={transformation_matrix},'
'input_channels={input_channels},'
'output_channels={output_channels})').format(transformation_matrix=self._matrix.tolist(),
input_channels=self._input_channels,
output_channels=self._output_channels)
def is_constant_invariant(self):
"""Signals if the transformation always maps constants to constants."""
return True
class OffsetTransformation(Transformation):
def __init__(self, offsets: Mapping[ChannelID, Real]):
"""Adds an offset to each channel specified in offsets.
Channels not in offsets are forewarded
Args:
offsets: Channel -> offset mapping
"""
self._offsets = dict(offsets.items())
def __call__(self, time: Union[np.ndarray, float],
data: Mapping[ChannelID, Union[np.ndarray, float]]) -> Mapping[ChannelID, Union[np.ndarray, float]]:
return {channel: channel_values + self._offsets[channel] if channel in self._offsets else channel_values
for channel, channel_values in data.items()}
def get_input_channels(self, output_channels: AbstractSet[ChannelID]) -> AbstractSet[ChannelID]:
return output_channels
def get_output_channels(self, input_channels: AbstractSet[ChannelID]) -> AbstractSet[ChannelID]:
return input_channels
@property
def compare_key(self) -> frozenset:
return frozenset(self._offsets.items())
def __repr__(self):
return 'OffsetTransformation(%r)' % self._offsets
def is_constant_invariant(self):
"""Signals if the transformation always maps constants to constants."""
return True
class ScalingTransformation(Transformation):
def __init__(self, factors: Mapping[ChannelID, Real]):
self._factors = dict(factors.items())
def __call__(self, time: Union[np.ndarray, float],
data: Mapping[ChannelID, Union[np.ndarray, float]]) -> Mapping[ChannelID, Union[np.ndarray, float]]:
return {channel: channel_values * self._factors[channel] if channel in self._factors else channel_values
for channel, channel_values in data.items()}
def get_input_channels(self, output_channels: AbstractSet[ChannelID]) -> AbstractSet[ChannelID]:
return output_channels
def get_output_channels(self, input_channels: AbstractSet[ChannelID]) -> AbstractSet[ChannelID]:
return input_channels
@property
def compare_key(self) -> frozenset:
return frozenset(self._factors.items())
def __repr__(self):
return 'ScalingTransformation(%r)' % self._factors
def is_constant_invariant(self):
"""Signals if the transformation always maps constants to constants."""
return True
try:
import pandas
def linear_transformation_from_pandas(transformation: pandas.DataFrame) -> LinearTransformation:
""" Creates a LinearTransformation object out of a pandas data frame.
Args:
transformation (pandas.DataFrame): The pandas.DataFrame object out of which a LinearTransformation will be formed.
Returns:
the created LinearTransformation instance
"""
return LinearTransformation(transformation.values, transformation.columns, transformation.index)
LinearTransformation.from_pandas = linear_transformation_from_pandas
except ImportError:
pass
class ParallelConstantChannelTransformation(Transformation):
def __init__(self, channels: Mapping[ChannelID, Real]):
"""Set channel values to given values regardless their former existence
Args:
channels: Channels present in this map are set to the given value.
"""
self._channels = {channel: float(value)
for channel, value in channels.items()}
def __call__(self, time: Union[np.ndarray, float],
data: Mapping[ChannelID, Union[np.ndarray, float]]) -> Mapping[ChannelID, Union[np.ndarray, float]]:
overwritten = {channel: np.full_like(time, fill_value=value, dtype=float)
for channel, value in self._channels.items()}
return {**data, **overwritten}
@property
def compare_key(self) -> Tuple[Tuple[ChannelID, float], ...]:
return tuple(sorted(self._channels.items()))
def get_input_channels(self, output_channels: AbstractSet[ChannelID]) -> AbstractSet[ChannelID]:
return output_channels - self._channels.keys()
def get_output_channels(self, input_channels: AbstractSet[ChannelID]) -> AbstractSet[ChannelID]:
return input_channels | self._channels.keys()
def __repr__(self):
return 'ParallelConstantChannelTransformation(%r)' % self._channels
def is_constant_invariant(self):
"""Signals if the transformation always maps constants to constants."""
return True
def chain_transformations(*transformations: Transformation) -> Transformation:
parsed_transformations = []
for transformation in transformations:
if transformation is IdentityTransformation() or transformation is None:
pass
elif isinstance(transformation, ChainedTransformation):
parsed_transformations.extend(transformation.transformations)
else:
parsed_transformations.append(transformation)
if len(parsed_transformations) == 0:
return IdentityTransformation()
elif len(parsed_transformations) == 1:
return parsed_transformations[0]
else:
return ChainedTransformation(*parsed_transformations)
| 41.083333 | 126 | 0.692209 |
4a1eb8d5a1ef6e37d731606af1020fcb83e266d9
| 1,008 |
py
|
Python
|
problems/47/solution_47.py
|
r1cc4rdo/daily_coding_problem
|
6ac85309fad2f64231ac7ab94aa4158e18bdec40
|
[
"Unlicense"
] | 158 |
2018-01-25T06:33:30.000Z
|
2022-03-14T23:18:05.000Z
|
problems/47/solution_47.py
|
r1cc4rdo/daily_coding_problem
|
6ac85309fad2f64231ac7ab94aa4158e18bdec40
|
[
"Unlicense"
] | 9 |
2018-07-04T00:31:57.000Z
|
2020-05-16T21:02:30.000Z
|
problems/47/solution_47.py
|
r1cc4rdo/daily_coding_problem
|
6ac85309fad2f64231ac7ab94aa4158e18bdec40
|
[
"Unlicense"
] | 50 |
2018-06-22T16:48:44.000Z
|
2022-01-11T16:45:48.000Z
|
def coding_problem_47(prices):
"""
Given a array of numbers representing the stock prices of a company in chronological order, write a function that
calculates the maximum profit you could have made from buying and selling that stock once. You must buy before you
can sell it. For example, given [9, 11, 8, 5, 7, 10], you should return 5, since you could buy the stock at 5
dollars and sell it at 10 dollars.
>>> coding_problem_47([9, 11, 8, 5, 7, 10])
5
Here's the inefficient one-liner:
>>> prices = [9, 11, 8, 5, 7, 10]
>>> max([max(prices[today + 1:]) - prices[today] for today in range(len(prices) - 1)])
5
"""
max_future_price, max_profit = prices[-1], 0
for index in range(len(prices) - 1, 0, -1):
max_future_price = max(max_future_price, prices[index])
max_profit = max(max_profit, max_future_price - prices[index - 1])
return max_profit
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=True)
| 36 | 118 | 0.662698 |
4a1eb8f52ef2f0347b385d104d0706f75c3bfff9
| 1,338 |
py
|
Python
|
xlsx2html/format/locale.py
|
gorj-tessella/xlsx2html
|
bbd384341be2920c8782dbe60467d531b851da86
|
[
"MIT"
] | null | null | null |
xlsx2html/format/locale.py
|
gorj-tessella/xlsx2html
|
bbd384341be2920c8782dbe60467d531b851da86
|
[
"MIT"
] | null | null | null |
xlsx2html/format/locale.py
|
gorj-tessella/xlsx2html
|
bbd384341be2920c8782dbe60467d531b851da86
|
[
"MIT"
] | null | null | null |
import re
from babel import Locale, UnknownLocaleError
from xlsx2html.constants import LCID_HEX_MAP
LOCALE_FORMAT_RE = re.compile(r'''
\[
\$
(?:.+|)
-(?P<lcid>[0-9A-Fa-f]{3,4})
\]
'''
, re.VERBOSE)
def parse_locale_code(code):
'''
>>> parse_locale_code('-404')
'zh_Hant_TW'
>>> parse_locale_code('404')
'zh_Hant_TW'
>>> parse_locale_code('0404')
'zh_Hant_TW'
>>> parse_locale_code('58050')
'''
try:
lcid = abs(int(code, 16))
locale_code = LCID_HEX_MAP.get(lcid, 'UNKNOWN')
return str(Locale.parse(locale_code))
except UnknownLocaleError:
return None
def extract_locale_from_format(fmt):
"""
>>> extract_locale_from_format('[$-404]e/m/d')
('zh_Hant_TW', 'e/m/d')
>>> extract_locale_from_format('[$USD-404]e/m/d')
('zh_Hant_TW', 'e/m/d')
>>> extract_locale_from_format('[$$-404]#.00')
('zh_Hant_TW', '#.00')
>>> extract_locale_from_format('[RED]e/m/d')
(None, '[RED]e/m/d')
"""
locale = None
m = LOCALE_FORMAT_RE.match(fmt)
if not m:
return locale, fmt
win_locale = m.group()
# todo keep currency
new_locale = parse_locale_code(m.group(1))
if new_locale:
locale = new_locale
fmt = fmt.replace(win_locale, '')
return locale, fmt
| 23.068966 | 55 | 0.591181 |
4a1eb8f749bfb3d62b31d05ec3e7d7c2c876bde7
| 3,733 |
py
|
Python
|
scripts/ft_glue.py
|
a1600012888/fairseq
|
dbd2cd08fc396f919d2e737513095fcb966896c0
|
[
"MIT"
] | null | null | null |
scripts/ft_glue.py
|
a1600012888/fairseq
|
dbd2cd08fc396f919d2e737513095fcb966896c0
|
[
"MIT"
] | null | null | null |
scripts/ft_glue.py
|
a1600012888/fairseq
|
dbd2cd08fc396f919d2e737513095fcb966896c0
|
[
"MIT"
] | 1 |
2020-04-01T03:31:00.000Z
|
2020-04-01T03:31:00.000Z
|
import argparse
import os
import numpy as np
task_name = ['MNLI', 'QNLI', 'QQP', 'RTE', 'SST-2', 'MRPC', 'CoLA', 'STS-B']
dataset_dir_name = ['MNLI-bin', 'QNLI-bin', 'QQP-bin',
'RTE-bin', 'SST-2-bin', 'MRPC-bin',
'CoLA-bin', 'STS-B-bin']
num_classes = [3,2,2,2,2,2,2,1]
lrs = [1e-5, 1e-5, 1e-5, 2e-5, 1e-5, 1e-5, 1e-5, 2e-5]
max_sentences = [32,32,32,16,32,16,16,16]
total_num_updates = [123873, 33112, 113272, 2036, 20935, 2296, 5336, 3598]
warm_updates = [7432, 1986, 28318, 122, 1256, 137, 320, 214]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--arch', type=str, default='robert_base',
choices=['roberta_base', 'roberta_leaner',
'roberta_base_deepsup', 'roberta_base_norm_fl',
'roberta_base-se'])
parser.add_argument('--task', type=str, default='SST-2',
choices=task_name)
parser.add_argument('-d', type=int, default=0)
parser.add_argument('-p', type=str)
args = parser.parse_args()
index = task_name.index(args.task)
rand_int = np.random.randint(0, 1000000000)
print('Random Seed: {}'.format(rand_int))
if args.task == 'MNLI':
cmds =["CUDA_VISIBLE_DEVICES={}".format(args.d),
" python3 train.py ~/data/glue-32768-fast/{}/ ".format(dataset_dir_name[index]),
"--restore-file {} ".format(args.p),
"--max-positions 512 --max-sentences {} ".format(max_sentences[index]),
"--max-tokens 4400 --task sentence_prediction --reset-optimizer --valid-subset valid,valid1 ",
"--reset-dataloader --reset-meters --required-batch-size-multiple 1 \
--init-token 0 --separator-token 2 --truncate-sequence ",
"--arch {} ".format(args.arch),
"--criterion sentence_prediction ",
"--num-classes {} ".format(num_classes[index]),
"--dropout 0.1 --attention-dropout 0.1 --weight-decay 0.1 \
--optimizer adam --adam-betas '(0.9, 0.98)' --adam-eps 1e-06 \
--clip-norm 0.0 --lr-scheduler polynomial_decay ",
"--lr {} --total-num-update {} ".format(lrs[index], total_num_updates[index]),
"--warmup-updates {} --seed {} ".format(warm_updates[index], rand_int),
"--max-epoch 10 \
--find-unused-parameters \
--best-checkpoint-metric accuracy --maximize-best-checkpoint-metric;"
]
else:
cmds =["CUDA_VISIBLE_DEVICES={}".format(args.d),
" python3 train.py ~/data/glue-32768-fast/{}/ ".format(dataset_dir_name[index]),
"--restore-file {} ".format(args.p),
"--max-positions 512 --max-sentences {} ".format(max_sentences[index]),
"--max-tokens 4400 --task sentence_prediction --reset-optimizer ",
"--reset-dataloader --reset-meters --required-batch-size-multiple 1 \
--init-token 0 --separator-token 2 --truncate-sequence ",
"--arch {} ".format(args.arch),
"--criterion sentence_prediction ",
"--num-classes {} ".format(num_classes[index]),
"--dropout 0.1 --attention-dropout 0.1 --weight-decay 0.1 \
--optimizer adam --adam-betas '(0.9, 0.98)' --adam-eps 1e-06 \
--clip-norm 0.0 --lr-scheduler polynomial_decay ",
"--lr {} --total-num-update {} ".format(lrs[index], total_num_updates[index]),
"--warmup-updates {} --seed {} ".format(warm_updates[index], rand_int),
"--max-epoch 10 \
--find-unused-parameters \
--best-checkpoint-metric accuracy --maximize-best-checkpoint-metric;"
]
cmd = ' '
for c in cmds:
cmd += c
print(cmd)
os.system(cmd)
| 36.242718 | 102 | 0.582641 |
4a1eb95486eee84b31e748dc62063be42a31c012
| 22,623 |
py
|
Python
|
models/det_densefusion_plus_2.py
|
simon3dv/frustum-convnet
|
73cffa8e53af8a4f59255591cf2ba4af6916602c
|
[
"MIT"
] | null | null | null |
models/det_densefusion_plus_2.py
|
simon3dv/frustum-convnet
|
73cffa8e53af8a4f59255591cf2ba4af6916602c
|
[
"MIT"
] | null | null | null |
models/det_densefusion_plus_2.py
|
simon3dv/frustum-convnet
|
73cffa8e53af8a4f59255591cf2ba4af6916602c
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import math
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(ROOT_DIR)
from configs.config import cfg
from datasets.dataset_info import KITTICategory
from models.model_util import get_box3d_corners_helper
from models.model_util import huber_loss
from models.common import Conv1d, Conv2d, DeConv1d, init_params
from models.common import softmax_focal_loss_ignore, get_accuracy
from ops.query_depth_point.query_depth_point import QueryDepthPoint
from ops.pybind11.box_ops_cc import rbbox_iou_3d_pair
from models.box_transform import size_decode, size_encode, center_decode, center_encode, angle_decode, angle_encode
from models.pspnet import PSPNet
NUM_SIZE_CLUSTER = len(KITTICategory.CLASSES)
MEAN_SIZE_ARRAY = KITTICategory.MEAN_SIZE_ARRAY
# single scale PointNet module
class PointNetModule(nn.Module):
def __init__(self, Infea, mlp, dist, nsample, use_xyz=True, use_feature=True):
super(PointNetModule, self).__init__()
self.dist = dist
self.nsample = nsample
self.use_xyz = use_xyz
self.mlp = mlp
if Infea > 0:
use_feature = True
else:
use_feature = False
self.use_feature = use_feature
self.query_depth_point = QueryDepthPoint(dist, nsample)
if self.use_xyz:
self.conv1 = Conv2d(Infea + 3, mlp[0], 1)
else:
self.conv1 = Conv2d(Infea, mlp[0], 1)
self.conv2 = Conv2d(mlp[0], mlp[1], 1)
self.conv3 = Conv2d(mlp[1], mlp[2], 1)
self.econv1 = Conv2d(32,mlp[0],1)
self.econv2 = Conv2d(mlp[0],mlp[1],1)
self.joint_conv1 = Conv2d(mlp[1]*2+mlp[0]*2,mlp[2],1)
init_params([self.conv1[0], self.conv2[0], self.conv3[0], self.econv1[0], self.econv2[0], self.joint_conv1[0]], 'kaiming_normal')
init_params([self.conv1[1], self.conv2[1], self.conv3[1], self.econv2[1], self.econv2[1], self.joint_conv1[1]], 1)
def forward(self, pc, feat, new_pc=None,
img=None, P=None, query_v1=None):
batch_size = pc.size(0)
npoint = new_pc.shape[2]
k = self.nsample
indices, num = self.query_depth_point(pc, new_pc) # b*npoint*nsample
assert indices.data.max() < pc.shape[2] and indices.data.min() >= 0
indices_rgb = torch.gather(query_v1, 1, indices.view(batch_size, npoint * k)) \
.view(batch_size, npoint, k)
assert indices_rgb.data.max() < img.shape[2]*img.shape[3]
assert indices_rgb.data.min() >= 0
grouped_pc = None
grouped_feature = None
if self.use_xyz:
grouped_pc = torch.gather(
pc, 2,
indices.view(batch_size, 1, npoint * k).expand(-1, 3, -1)
).view(batch_size, 3, npoint, k)
grouped_pc = grouped_pc - new_pc.unsqueeze(3)
if self.use_feature:
grouped_feature = torch.gather(
feat, 2,
indices.view(batch_size, 1, npoint * k).expand(-1, feat.size(1), -1)
).view(batch_size, feat.size(1), npoint, k)
# grouped_feature = torch.cat([new_feat.unsqueeze(3), grouped_feature], -1)
img = img.view(batch_size,32,-1)
grouped_rgb = torch.gather(
img, 2,
indices_rgb.view(batch_size, 1, npoint * k).expand(-1, 32, -1)
).view(batch_size, 32, npoint, k)
if self.use_feature and self.use_xyz:
grouped_feature = torch.cat([grouped_pc, grouped_feature], 1)
elif self.use_xyz:
grouped_feature = grouped_pc.contiguous()
grouped_feature = self.conv1(grouped_feature)
# mlp[0]+mlp[0]:
grouped_rgb = self.econv1(grouped_rgb)
fusion_feature_1 = torch.cat([grouped_feature, grouped_rgb], 1)
grouped_feature = self.conv2(grouped_feature)
# mlp[1]+mlp[1]:
grouped_rgb = self.econv2(grouped_rgb)
fusion_feature_2 = torch.cat([grouped_feature, grouped_rgb], 1)
grouped_feature = self.conv3(grouped_feature)
fusion_feature = torch.cat([fusion_feature_1, fusion_feature_2], 1)
fusion_feature = self.joint_conv1(fusion_feature)
# output, _ = torch.max(grouped_feature, -1)
valid = (num > 0).view(batch_size, 1, -1, 1)
grouped_feature = torch.cat([grouped_feature, fusion_feature], 1)
grouped_feature = grouped_feature * valid.float()
return grouped_feature
# multi-scale PointNet module
class PointNetFeat(nn.Module):
def __init__(self, input_channel=3, num_vec=0):
super(PointNetFeat, self).__init__()
self.num_vec = num_vec
u = cfg.DATA.HEIGHT_HALF
assert len(u) == 4
self.pointnet1 = PointNetModule(
input_channel - 3, [64, 64, 128], u[0], 32, use_xyz=True, use_feature=True)
self.pointnet2 = PointNetModule(
input_channel - 3, [64, 64, 128], u[1], 64, use_xyz=True, use_feature=True)
self.pointnet3 = PointNetModule(
input_channel - 3, [128, 128, 256], u[2], 64, use_xyz=True, use_feature=True)
self.pointnet4 = PointNetModule(
input_channel - 3, [256, 256, 512], u[3], 128, use_xyz=True, use_feature=True)
def forward(self, point_cloud, sample_pc, feat=None, one_hot_vec=None,
img=None, P=None, query_v1=None):
pc = point_cloud
pc1 = sample_pc[0]
pc2 = sample_pc[1]
pc3 = sample_pc[2]
pc4 = sample_pc[3]
feat1 = self.pointnet1(pc, feat, pc1, img, P, query_v1,)
feat1, _ = torch.max(feat1, -1)
feat2 = self.pointnet2(pc, feat, pc2, img, P, query_v1,)
feat2, _ = torch.max(feat2, -1)
feat3 = self.pointnet3(pc, feat, pc3, img, P, query_v1,)
feat3, _ = torch.max(feat3, -1)
feat4 = self.pointnet4(pc, feat, pc4, img, P, query_v1,)
feat4, _ = torch.max(feat4, -1)
if one_hot_vec is not None:
one_hot = one_hot_vec.unsqueeze(-1).expand(-1, -1, feat1.shape[-1])
feat1 = torch.cat([feat1, one_hot], 1)
one_hot = one_hot_vec.unsqueeze(-1).expand(-1, -1, feat2.shape[-1])
feat2 = torch.cat([feat2, one_hot], 1)
one_hot = one_hot_vec.unsqueeze(-1).expand(-1, -1, feat3.shape[-1])
feat3 = torch.cat([feat3, one_hot], 1)
one_hot = one_hot_vec.unsqueeze(-1).expand(-1, -1, feat4.shape[-1])
feat4 = torch.cat([feat4, one_hot], 1)
return feat1, feat2, feat3, feat4
# FCN
class ConvFeatNet(nn.Module):
def __init__(self, i_c=256, num_vec=3):
super(ConvFeatNet, self).__init__()
self.block1_conv1 = Conv1d(i_c + num_vec, 256, 3, 1, 1)
self.block2_conv1 = Conv1d(256, 256, 3, 2, 1)
self.block2_conv2 = Conv1d(256, 256, 3, 1, 1)
self.block2_merge = Conv1d(256 + 256 + num_vec, 256, 1, 1)
self.block3_conv1 = Conv1d(256, 640, 3, 2, 1)
self.block3_conv2 = Conv1d(640, 640, 3, 1, 1)
self.block3_merge = Conv1d(640 + 512 + num_vec, 640, 1, 1)
self.block4_conv1 = Conv1d(640, 1280, 3, 2, 1)
self.block4_conv2 = Conv1d(1280, 1280, 3, 1, 1)
self.block4_merge = Conv1d(1280 + 1024 + num_vec, 1280, 1, 1)
self.block2_deconv = DeConv1d(256, 640, 1, 1, 0)
self.block3_deconv = DeConv1d(640, 640, 2, 2, 0)
self.block4_deconv = DeConv1d(1280, 640, 4, 4, 0)
for m in self.modules():
if isinstance(m, (nn.Conv1d, nn.ConvTranspose1d)):
# nn.init.xavier_uniform_(m.weight.data)
nn.init.kaiming_normal_(m.weight.data, mode='fan_in')
if m.bias is not None:
m.bias.data.zero_()
if isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x1, x2, x3, x4):
x = self.block1_conv1(x1)
x = self.block2_conv1(x)
x = self.block2_conv2(x)
x = torch.cat([x, x2], 1)
x = self.block2_merge(x)
xx1 = x
x = self.block3_conv1(x)
x = self.block3_conv2(x)
x = torch.cat([x, x3], 1)
x = self.block3_merge(x)
xx2 = x
x = self.block4_conv1(x)
x = self.block4_conv2(x)
x = torch.cat([x, x4], 1)
x = self.block4_merge(x)
xx3 = x
xx1 = self.block2_deconv(xx1)
xx2 = self.block3_deconv(xx2)
xx3 = self.block4_deconv(xx3)
x = torch.cat([xx1, xx2[:, :, :xx1.shape[-1]], xx3[:, :, :xx1.shape[-1]]], 1)
return x
# the whole pipeline
class PointNetDet(nn.Module):
def __init__(self, input_channel=3, num_vec=0, num_classes=2):
super(PointNetDet, self).__init__()
self.feat_net = PointNetFeat(input_channel, 0)
self.conv_net = ConvFeatNet()
self.num_classes = num_classes
num_bins = cfg.DATA.NUM_HEADING_BIN
self.num_bins = num_bins
output_size = 3 + num_bins * 2 + NUM_SIZE_CLUSTER * 4
self.reg_out = nn.Conv1d(1920, output_size, 1)
self.cls_out = nn.Conv1d(1920, 2, 1)
self.relu = nn.ReLU(True)
nn.init.kaiming_uniform_(self.cls_out.weight, mode='fan_in')
nn.init.kaiming_uniform_(self.reg_out.weight, mode='fan_in')
self.cls_out.bias.data.zero_()
self.reg_out.bias.data.zero_()
self.cnn = ModifiedResnet()
def _slice_output(self, output):
batch_size = output.shape[0]
num_bins = self.num_bins
center = output[:, 0:3].contiguous()
heading_scores = output[:, 3:3 + num_bins].contiguous()
heading_res_norm = output[:, 3 + num_bins:3 + num_bins * 2].contiguous()
size_scores = output[:, 3 + num_bins * 2:3 + num_bins * 2 + NUM_SIZE_CLUSTER].contiguous()
size_res_norm = output[:, 3 + num_bins * 2 + NUM_SIZE_CLUSTER:].contiguous()
size_res_norm = size_res_norm.view(batch_size, NUM_SIZE_CLUSTER, 3)
return center, heading_scores, heading_res_norm, size_scores, size_res_norm
def get_center_loss(self, pred_offsets, gt_offsets):
center_dist = torch.norm(gt_offsets - pred_offsets, 2, dim=-1)
center_loss = huber_loss(center_dist, delta=3.0)
return center_loss
def get_heading_loss(self, heading_scores, heading_res_norm, heading_class_label, heading_res_norm_label):
heading_class_loss = F.cross_entropy(heading_scores, heading_class_label)
# b, NUM_HEADING_BIN -> b, 1
heading_res_norm_select = torch.gather(heading_res_norm, 1, heading_class_label.view(-1, 1))
heading_res_norm_loss = huber_loss(
heading_res_norm_select.squeeze(1) - heading_res_norm_label, delta=1.0)
return heading_class_loss, heading_res_norm_loss
def get_size_loss(self, size_scores, size_res_norm, size_class_label, size_res_label_norm):
batch_size = size_scores.shape[0]
size_class_loss = F.cross_entropy(size_scores, size_class_label)
# b, NUM_SIZE_CLUSTER, 3 -> b, 1, 3
size_res_norm_select = torch.gather(size_res_norm, 1,
size_class_label.view(batch_size, 1, 1).expand(
batch_size, 1, 3))
size_norm_dist = torch.norm(
size_res_label_norm - size_res_norm_select.squeeze(1), 2, dim=-1)
size_res_norm_loss = huber_loss(size_norm_dist, delta=1.0)
return size_class_loss, size_res_norm_loss
def get_corner_loss(self, preds, gts):
center_label, heading_label, size_label = gts
center_preds, heading_preds, size_preds = preds
corners_3d_gt = get_box3d_corners_helper(center_label, heading_label, size_label)
corners_3d_gt_flip = get_box3d_corners_helper(center_label, heading_label + np.pi, size_label)
corners_3d_pred = get_box3d_corners_helper(center_preds, heading_preds, size_preds)
# N, 8, 3
corners_dist = torch.min(
torch.norm(corners_3d_pred - corners_3d_gt, 2, dim=-1).mean(-1),
torch.norm(corners_3d_pred - corners_3d_gt_flip, 2, dim=-1).mean(-1))
# corners_dist = torch.norm(corners_3d_pred - corners_3d_gt, 2, dim=-1)
corners_loss = huber_loss(corners_dist, delta=1.0)
return corners_loss, corners_3d_gt
def forward(self,
data_dicts):
image = data_dicts.get('image')
out_image = self.cnn(image)
P = data_dicts.get('P')
query_v1 = data_dicts.get('query_v1')
point_cloud = data_dicts.get('point_cloud')
one_hot_vec = data_dicts.get('one_hot')
cls_label = data_dicts.get('label')
size_class_label = data_dicts.get('size_class')
center_label = data_dicts.get('box3d_center')
heading_label = data_dicts.get('box3d_heading')
size_label = data_dicts.get('box3d_size')
center_ref1 = data_dicts.get('center_ref1')
center_ref2 = data_dicts.get('center_ref2')
center_ref3 = data_dicts.get('center_ref3')
center_ref4 = data_dicts.get('center_ref4')
batch_size = point_cloud.shape[0]
object_point_cloud_xyz = point_cloud[:, :3, :].contiguous()
if point_cloud.shape[1] > 3:
object_point_cloud_i = point_cloud[:, [3], :].contiguous()
else:
object_point_cloud_i = None
mean_size_array = torch.from_numpy(MEAN_SIZE_ARRAY).type_as(point_cloud)
feat1, feat2, feat3, feat4 = self.feat_net(
object_point_cloud_xyz,
[center_ref1, center_ref2, center_ref3, center_ref4],
object_point_cloud_i,
one_hot_vec,
out_image,
P,
query_v1
)
x = self.conv_net(feat1, feat2, feat3, feat4)
cls_scores = self.cls_out(x)
outputs = self.reg_out(x)
num_out = outputs.shape[2]
output_size = outputs.shape[1]
# b, c, n -> b, n, c
cls_scores = cls_scores.permute(0, 2, 1).contiguous().view(-1, 2)
outputs = outputs.permute(0, 2, 1).contiguous().view(-1, output_size)
center_ref2 = center_ref2.permute(0, 2, 1).contiguous().view(-1, 3)
cls_probs = F.softmax(cls_scores, -1)
if center_label is None:
assert not self.training, 'Please provide labels for training.'
det_outputs = self._slice_output(outputs)
center_boxnet, heading_scores, heading_res_norm, size_scores, size_res_norm = det_outputs
# decode
heading_probs = F.softmax(heading_scores, -1)
size_probs = F.softmax(size_scores, -1)
heading_pred_label = torch.argmax(heading_probs, -1)
size_pred_label = torch.argmax(size_probs, -1)
center_preds = center_boxnet + center_ref2
heading_preds = angle_decode(heading_res_norm, heading_pred_label)
size_preds = size_decode(size_res_norm, mean_size_array, size_pred_label)
# corner_preds = get_box3d_corners_helper(center_preds, heading_preds, size_preds)
cls_probs = cls_probs.view(batch_size, -1, 2)
center_preds = center_preds.view(batch_size, -1, 3)
size_preds = size_preds.view(batch_size, -1, 3)
heading_preds = heading_preds.view(batch_size, -1)
outputs = (cls_probs, center_preds, heading_preds, size_preds)
return outputs
fg_idx = (cls_label.view(-1) == 1).nonzero().view(-1)
assert fg_idx.numel() != 0
outputs = outputs[fg_idx, :]
center_ref2 = center_ref2[fg_idx]
det_outputs = self._slice_output(outputs)
center_boxnet, heading_scores, heading_res_norm, size_scores, size_res_norm = det_outputs
heading_probs = F.softmax(heading_scores, -1)
size_probs = F.softmax(size_scores, -1)
# cls_loss = F.cross_entropy(cls_scores, mask_label, ignore_index=-1)
cls_loss = softmax_focal_loss_ignore(cls_probs, cls_label.view(-1), ignore_idx=-1)
# prepare label
center_label = center_label.unsqueeze(1).expand(-1, num_out, -1).contiguous().view(-1, 3)[fg_idx]
heading_label = heading_label.expand(-1, num_out).contiguous().view(-1)[fg_idx]
size_label = size_label.unsqueeze(1).expand(-1, num_out, -1).contiguous().view(-1, 3)[fg_idx]
size_class_label = size_class_label.expand(-1, num_out).contiguous().view(-1)[fg_idx]
# encode regression targets
center_gt_offsets = center_encode(center_label, center_ref2)
heading_class_label, heading_res_norm_label = angle_encode(heading_label)
size_res_label_norm = size_encode(size_label, mean_size_array, size_class_label)
# loss calculation
# center_loss
center_loss = self.get_center_loss(center_boxnet, center_gt_offsets)
# heading loss
heading_class_loss, heading_res_norm_loss = self.get_heading_loss(
heading_scores, heading_res_norm, heading_class_label, heading_res_norm_label)
# size loss
size_class_loss, size_res_norm_loss = self.get_size_loss(
size_scores, size_res_norm, size_class_label, size_res_label_norm)
# corner loss regulation
center_preds = center_decode(center_ref2, center_boxnet)
heading = angle_decode(heading_res_norm, heading_class_label)
size = size_decode(size_res_norm, mean_size_array, size_class_label)
corners_loss, corner_gts = self.get_corner_loss(
(center_preds, heading, size),
(center_label, heading_label, size_label)
)
BOX_LOSS_WEIGHT = cfg.LOSS.BOX_LOSS_WEIGHT
CORNER_LOSS_WEIGHT = cfg.LOSS.CORNER_LOSS_WEIGHT
HEAD_REG_WEIGHT = cfg.LOSS.HEAD_REG_WEIGHT
SIZE_REG_WEIGHT = cfg.LOSS.SIZE_REG_WEIGHT
# Weighted sum of all losses
loss = cls_loss + \
BOX_LOSS_WEIGHT * (center_loss +
heading_class_loss + size_class_loss +
HEAD_REG_WEIGHT * heading_res_norm_loss +
SIZE_REG_WEIGHT * size_res_norm_loss +
CORNER_LOSS_WEIGHT * corners_loss)
# some metrics to monitor training status
with torch.no_grad():
# accuracy
cls_prec = get_accuracy(cls_probs, cls_label.view(-1))
heading_prec = get_accuracy(heading_probs, heading_class_label.view(-1))
size_prec = get_accuracy(size_probs, size_class_label.view(-1))
# iou metrics
heading_pred_label = torch.argmax(heading_probs, -1)
size_pred_label = torch.argmax(size_probs, -1)
heading_preds = angle_decode(heading_res_norm, heading_pred_label)
size_preds = size_decode(size_res_norm, mean_size_array, size_pred_label)
corner_preds = get_box3d_corners_helper(center_preds, heading_preds, size_preds)
overlap = rbbox_iou_3d_pair(corner_preds.detach().cpu().numpy(), corner_gts.detach().cpu().numpy())
iou2ds, iou3ds = overlap[:, 0], overlap[:, 1]
iou2d_mean = iou2ds.mean()
iou3d_mean = iou3ds.mean()
iou3d_gt_mean = (iou3ds >= cfg.IOU_THRESH).mean()
iou2d_mean = torch.tensor(iou2d_mean).type_as(cls_prec)
iou3d_mean = torch.tensor(iou3d_mean).type_as(cls_prec)
iou3d_gt_mean = torch.tensor(iou3d_gt_mean).type_as(cls_prec)
losses = {
'total_loss': loss,
'cls_loss': cls_loss,
'center_loss': center_loss,
'head_cls_loss': heading_class_loss,
'head_res_loss': heading_res_norm_loss,
'size_cls_loss': size_class_loss,
'size_res_loss': size_res_norm_loss,
'corners_loss': corners_loss
}
metrics = {
'cls_acc': cls_prec,
'head_acc': heading_prec,
'size_acc': size_prec,
'IoU_2D': iou2d_mean,
'IoU_3D': iou3d_mean,
'IoU_' + str(cfg.IOU_THRESH): iou3d_gt_mean
}
return losses, metrics
psp_models = {
'resnet18': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='resnet18'),
'resnet34': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='resnet34'),
'resnet50': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet50'),
'resnet101': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet101'),
'resnet152': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet152')
}
class ModifiedResnet(nn.Module):
def __init__(self, usegpu=True):
super(ModifiedResnet, self).__init__()
self.model = psp_models['resnet18'.lower()]()
self.model = nn.DataParallel(self.model)
def forward(self, x):
x = self.model(x)
return x
if __name__ == '__main__':
from datasets.provider_fusion import ProviderDataset
dataset = ProviderDataset(npoints=1024, split='val',
random_flip=True, random_shift=True, one_hot=True,
overwritten_data_path='kitti/data/pickle_data/frustum_caronly_wimage_val.pickle',
gen_image=True)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False,
num_workers=4, pin_memory=True)
model = PointNetDet(3, num_vec=0, num_classes=2).cuda()
t = 0
for batch, data_dicts in enumerate(dataloader):
data_dicts_var = {key: value.cuda() for key, value in data_dicts.items()}
# dict_keys(['point_cloud', 'rot_angle', 'box3d_center', 'one_hot',
# 'ref_label', 'center_ref1', 'center_ref2', 'center_ref3', 'center_ref4',
# 'size_class', 'box3d_size', 'box3d_heading', 'image', 'P', 'query_v1'])
tic = time.perf_counter()
losses, metrics= model(data_dicts_var)
tic2 = time.perf_counter()
t += (tic2-tic)
print("Time:%.2fms"%(t))
print()
for key,value in losses.items():
print(key,value)
print()
for key,value in metrics.items():
print(key,value)
print("Avr Time:%.2fms"%(t/len(dataset)))
| 36.905383 | 137 | 0.631614 |
4a1eb96323fac49fe02d05ba434489c36d39aaf9
| 3,260 |
py
|
Python
|
Final/InteractionPage.py
|
maapte/VRGAutomation
|
094f233097d0e88c51eadd6ad86fcd6b03c2307a
|
[
"Apache-2.0"
] | null | null | null |
Final/InteractionPage.py
|
maapte/VRGAutomation
|
094f233097d0e88c51eadd6ad86fcd6b03c2307a
|
[
"Apache-2.0"
] | 2 |
2019-05-14T06:19:21.000Z
|
2019-05-15T21:46:02.000Z
|
Final/InteractionPage.py
|
maapte/VRGAutomation
|
094f233097d0e88c51eadd6ad86fcd6b03c2307a
|
[
"Apache-2.0"
] | null | null | null |
"""
__author__ : Manish Apte
__maintainer__ : Manish Apte
__organization__ : Deloitte
"""
class InteractionPage:
"""" Interaction Page class handle interaction attributes"""
def __init__(self):
self.page_name = "{dynamic}"
self.page_referrer = "{dynamic}"
self.page_language = "{dynamic}"
self.page_view = "false"
self.page_accessibility = "{dynamic}"
self.page_path = "{dynamic}"
self.page_url = "{dynamic}"
self.page_login = "{dynamic}"
self.page_hierarchy = "{dynamic}"
self.user_id = ""
self.user_type = ""
self.user_auth_state = ""
self.site_interaction_event = "true"
self.interaction_name = ""
@property
def get_page_name(self):
return self.page_name
def set_page_name(self, page_name):
self.page_name = page_name
@property
def get_page_referrer(self):
return self.page_referrer
def set_page_referrer(self, page_referrer):
self.page_referrer = page_referrer
@property
def get_page_language(self):
return self.page_language
def set_page_language(self, page_language):
self.page_language = page_language
@property
def get_page_view(self):
return self.page_view
def set_page_view(self, page_view):
self.page_view = page_view
@property
def get_page_accessibility(self):
return self.page_accessibility
def set_page_accessibility(self, page_accessibility):
self.page_accessibility = page_accessibility
@property
def get_page_path(self):
return self.page_path
def set_page_path(self, page_path):
self.page_path = page_path
@property
def get_page_url(self):
return self.page_url
def set_page_url(self, page_url):
self.page_url = page_url
@property
def get_page_login(self):
return self.page_login
def set_page_login(self, page_login):
self.page_login = page_login
@property
def get_page_hierarchy(self):
return self.page_hierarchy
def set_page_hierarchy(self, page_hierarchy):
self.page_hierarchy = page_hierarchy
@property
def get_login_event(self):
return self.login_event
def set_login_event(self, login_event):
self.login_event = login_event
@property
def get_user_id(self):
return self.user_id
def set_user_id(self, user_id):
self.user_id = user_id
@property
def get_user_auth_state(self):
return self.user_auth_state
def set_user_auth_state(self, user_auth_state):
self.user_auth_state = user_auth_state
@property
def get_user_type(self):
return self.user_type
def set_user_type(self, user_type):
self.user_type = user_type
@property
def get_site_interaction_event(self):
return self.site_interaction_event
def set_site_interaction_event(self,site_interaction_event):
self.site_interaction_event = site_interaction_event
@property
def get_interaction_name(self):
return self.interaction_name;
def set_interaction_name(self,interaction_name):
self.interaction_name = interaction_name
| 25.076923 | 64 | 0.675153 |
4a1eb993c41189e335d86893a4631717f4750d63
| 17,094 |
py
|
Python
|
ex_openmic.py
|
ishine/PaSST
|
d7049e78e84ba38173ffd779479d1c9ec7d1c116
|
[
"Apache-2.0"
] | 73 |
2021-10-30T15:05:27.000Z
|
2022-03-24T17:47:01.000Z
|
ex_openmic.py
|
ishine/PaSST
|
d7049e78e84ba38173ffd779479d1c9ec7d1c116
|
[
"Apache-2.0"
] | 11 |
2021-12-06T07:02:49.000Z
|
2022-03-29T11:31:06.000Z
|
ex_openmic.py
|
ishine/PaSST
|
d7049e78e84ba38173ffd779479d1c9ec7d1c116
|
[
"Apache-2.0"
] | 10 |
2021-11-01T05:48:59.000Z
|
2022-03-19T13:26:06.000Z
|
import os
import sys
import torch
from pytorch_lightning.callbacks import ModelCheckpoint
from sacred.config_helpers import DynamicIngredient, CMD
from torch.nn import functional as F
import numpy as np
from ba3l.experiment import Experiment
from ba3l.module import Ba3lModule
from torch.utils.data import DataLoader
from config_updates import add_configs
from helpers.mixup import my_mixup
from helpers.models_size import count_non_zero_params
from helpers.ramp import exp_warmup_linear_down, cosine_cycle
from helpers.workersinit import worker_init_fn
from sklearn import metrics
ex = Experiment("openmic2008")
# Example call with all the default config:
# python ex_openmic.py with trainer.precision=16 -p -m mongodb_server:27000:audioset21_balanced -c "OpenMIC PaSST base"
# with 2 gpus:
# DDP=2 python ex_openmic.py with trainer.precision=16 -p -m mongodb_server:27000:audioset21_balanced -c "OpenMIC PaSST base"
# define datasets and loaders
ex.datasets.training.iter(DataLoader, static_args=dict(worker_init_fn=worker_init_fn), train=True, batch_size=6,
num_workers=16, shuffle=None, dataset=CMD("/basedataset.get_training_set"),
)
get_validate_loader = ex.datasets.test.iter(DataLoader, static_args=dict(worker_init_fn=worker_init_fn),
validate=True, batch_size=20, num_workers=16,
dataset=CMD("/basedataset.get_test_set"))
@ex.config
def default_conf():
cmd = " ".join(sys.argv)
saque_cmd = os.environ.get("SAQUE_CMD", "").strip()
saque_id = os.environ.get("SAQUE_ID", "").strip()
slurm_job_id = os.environ.get("SLURM_JOB_ID", "").strip()
if os.environ.get("SLURM_ARRAY_JOB_ID", False):
slurm_job_id = os.environ.get("SLURM_ARRAY_JOB_ID", "").strip() + "_" + os.environ.get("SLURM_ARRAY_TASK_ID",
"").strip()
process_id = os.getpid()
models = {
"net": DynamicIngredient("models.passt.model_ing", n_classes=20, s_patchout_t=40, s_patchout_f=4),
"mel": DynamicIngredient("models.preprocess.model_ing",
instance_cmd="AugmentMelSTFT",
n_mels=128, sr=32000, win_length=800, hopsize=320, n_fft=1024, freqm=48,
timem=192,
htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10,
fmax_aug_range=2000)
}
basedataset = DynamicIngredient("openmic2008.dataset.dataset", wavmix=1)
trainer = dict(max_epochs=10, gpus=1, weights_summary='full', benchmark=True, num_sanity_val_steps=0,
reload_dataloaders_every_epoch=True)
lr = 0.00001
use_mixup = True
mixup_alpha = 0.3
# register extra possible configs
add_configs(ex)
@ex.command
def get_scheduler_lambda(warm_up_len=5, ramp_down_start=50, ramp_down_len=50, last_lr_value=0.01,
schedule_mode="exp_lin"):
if schedule_mode == "exp_lin":
return exp_warmup_linear_down(warm_up_len, ramp_down_len, ramp_down_start, last_lr_value)
if schedule_mode == "cos_cyc":
return cosine_cycle(warm_up_len, ramp_down_start, last_lr_value)
raise RuntimeError(f"schedule_mode={schedule_mode} Unknown for a lambda funtion.")
@ex.command
def get_lr_scheduler(optimizer, schedule_mode):
if schedule_mode in {"exp_lin", "cos_cyc"}:
return torch.optim.lr_scheduler.LambdaLR(optimizer, get_scheduler_lambda())
raise RuntimeError(f"schedule_mode={schedule_mode} Unknown.")
@ex.command
def get_optimizer(params, lr, adamw=True, weight_decay=0.0001):
if adamw:
print(f"\nUsing adamw weight_decay={weight_decay}!\n")
return torch.optim.AdamW(params, lr=lr, weight_decay=weight_decay)
return torch.optim.Adam(params, lr=lr)
class M(Ba3lModule):
def __init__(self, experiment):
self.mel = None
self.da_net = None
super(M, self).__init__(experiment)
self.use_mixup = self.config.use_mixup or False
self.mixup_alpha = self.config.mixup_alpha
desc, sum_params, sum_non_zero = count_non_zero_params(self.net)
self.experiment.info["start_sum_params"] = sum_params
self.experiment.info["start_sum_params_non_zero"] = sum_non_zero
# in case we need embedings for the DA
self.net.return_embed = True
self.dyn_norm = self.config.dyn_norm
self.do_swa = False
self.distributed_mode = self.config.trainer.num_nodes > 1
def forward(self, x):
return self.net(x)
def mel_forward(self, x):
old_shape = x.size()
x = x.reshape(-1, old_shape[2])
x = self.mel(x)
x = x.reshape(old_shape[0], old_shape[1], x.shape[1], x.shape[2])
if self.dyn_norm:
if not hasattr(self, "tr_m") or not hasattr(self, "tr_std"):
tr_m, tr_std = get_dynamic_norm(self)
self.register_buffer('tr_m', tr_m)
self.register_buffer('tr_std', tr_std)
x = (x - self.tr_m) / self.tr_std
return x
def training_step(self, batch, batch_idx):
# REQUIRED
x, f, y = batch
if self.mel:
x = self.mel_forward(x)
y_mask = y[:, 20:]
y = y[:, :20] > 0.5
y = y.float()
orig_x = x
batch_size = len(y)
rn_indices, lam = None, None
if self.use_mixup:
rn_indices, lam = my_mixup(batch_size, self.mixup_alpha)
lam = lam.to(x.device)
x = x * lam.reshape(batch_size, 1, 1, 1) + x[rn_indices] * (1. - lam.reshape(batch_size, 1, 1, 1))
y_hat, embed = self.forward(x)
if self.use_mixup:
y_mix = y * lam.reshape(batch_size, 1) + y[rn_indices] * (1. - lam.reshape(batch_size, 1))
samples_loss = F.binary_cross_entropy_with_logits(
y_hat, y_mix, reduction="none")
y_mix_mask = ((y_mask > 0.5) | (y_mask[rn_indices] > 0.5)).float()
samples_loss = y_mask.float() * samples_loss
loss = samples_loss.mean()
samples_loss = samples_loss.detach()
else:
samples_loss = F.binary_cross_entropy_with_logits(y_hat, y, reduction="none")
samples_loss = y_mask.float() * samples_loss
loss = samples_loss.mean()
samples_loss = samples_loss.detach()
results = {"loss": loss, }
return results
def training_epoch_end(self, outputs):
avg_loss = torch.stack([x['loss'] for x in outputs]).mean()
logs = {'train.loss': avg_loss, 'step': self.current_epoch}
self.log_dict(logs, sync_dist=True)
def predict(self, batch, batch_idx: int, dataloader_idx: int = None):
x, f, y = batch
if self.mel:
x = self.mel_forward(x)
y_hat, _ = self.forward(x)
return f, y_hat
def validation_step(self, batch, batch_idx):
x, f, y = batch
if self.mel:
x = self.mel_forward(x)
y_mask = y[:, 20:]
y = y[:, :20] > 0.5
y = y.float()
results = {}
model_name = [("", self.net)]
if self.do_swa:
model_name = model_name + [("swa_", self.net_swa)]
for net_name, net in model_name:
y_hat, _ = net(x)
samples_loss = F.binary_cross_entropy_with_logits(y_hat, y)
samples_loss = y_mask.float() * samples_loss
loss = samples_loss.mean()
out = torch.sigmoid(y_hat.detach())
# self.log("validation.loss", loss, prog_bar=True, on_epoch=True, on_step=False)
results = {**results, net_name + "val_loss": loss, net_name + "out": out, net_name + "target": y.detach(),
net_name + "mask": y_mask.detach()}
results = {k: v.cpu() for k, v in results.items()}
return results
def validation_epoch_end(self, outputs):
model_name = [("", self.net)]
if self.do_swa:
model_name = model_name + [("swa_", self.net_swa)]
for net_name, net in model_name:
avg_loss = torch.stack([x[net_name + 'val_loss'] for x in outputs]).mean()
out = torch.cat([x[net_name + 'out'] for x in outputs], dim=0)
target = torch.cat([x[net_name + 'target'] for x in outputs], dim=0)
mask = torch.cat([x[net_name + 'mask'] for x in outputs], dim=0)
try:
y_true = target.float().numpy()
y_pred = out.float().numpy()
y_mask = mask.float().numpy()
average_precision = np.array([metrics.average_precision_score(
y_true[:, i], y_pred[:, i], sample_weight=y_mask[:, i]) for i in range(y_true.shape[1])])
except ValueError:
average_precision = np.array([np.nan] * y_true.shape[1])
#torch.save(average_precision, f"ap_openmic_perclass_{average_precision.mean()}.pt")
try:
roc = np.array([metrics.roc_auc_score(
y_true[:, i], y_pred[:, i], sample_weight=y_mask[:, i]) for i in range(y_true.shape[1])])
except ValueError:
roc = np.array([np.nan] * y_true.shape[1])
logs = {net_name + 'val.loss': torch.as_tensor(avg_loss).cuda(),
net_name + 'ap': torch.as_tensor(average_precision.mean()).cuda(),
net_name + 'roc': torch.as_tensor(roc.mean()).cuda(),
'step': torch.as_tensor(self.current_epoch).cuda()}
self.log_dict(logs, sync_dist=True)
if self.distributed_mode:
allout = self.all_gather(out)
alltarget = self.all_gather(target)
all_mask = self.all_gather(mask)
y_true = alltarget.reshape(-1, alltarget.shape[-1]).cpu().float().numpy()
y_pred = allout.reshape(-1, alltarget.shape[-1]).cpu().float().numpy()
y_mask = all_mask.reshape(-1, alltarget.shape[-1]).cpu().float().numpy()
average_precision = np.array([metrics.average_precision_score(
y_true[:, i], y_pred[:, i], sample_weight=y_mask[:, i]) for i in range(y_true.shape[1])])
if self.trainer.is_global_zero:
logs = {net_name + "allap": torch.as_tensor(average_precision.mean()).cuda(),
'step': torch.as_tensor(self.current_epoch).cuda()}
self.log_dict(logs, sync_dist=False)
else:
self.log_dict({net_name + "allap": logs[net_name + 'ap'], 'step': logs['step']}, sync_dist=True)
def configure_optimizers(self):
# REQUIRED
# can return multiple optimizers and learning_rate schedulers
# (LBFGS it is automatically supported, no need for closure function)
optimizer = get_optimizer(self.parameters())
# torch.optim.Adam(self.parameters(), lr=self.config.lr)
return {
'optimizer': optimizer,
'lr_scheduler': get_lr_scheduler(optimizer)
}
def configure_callbacks(self):
return get_extra_checkpoint_callback() + get_extra_swa_callback()
@ex.command
def get_dynamic_norm(model, dyn_norm=False):
if not dyn_norm:
return None, None
raise RuntimeError('no dynamic norm supported yet.')
@ex.command
def get_extra_checkpoint_callback(save_last_n=None):
if save_last_n is None:
return []
return [ModelCheckpoint(monitor="step", verbose=True, save_top_k=save_last_n, mode='max')]
@ex.command
def get_extra_swa_callback(swa=True, swa_epoch_start=2,
swa_freq=1):
if not swa:
return []
print("\n Using swa!\n")
from helpers.swa_callback import StochasticWeightAveraging
return [StochasticWeightAveraging(swa_epoch_start=swa_epoch_start, swa_freq=swa_freq)]
@ex.command
def main(_run, _config, _log, _rnd, _seed):
trainer = ex.get_trainer()
train_loader = ex.get_train_dataloaders()
val_loader = ex.get_val_dataloaders()
modul = M(ex)
trainer.fit(
modul,
train_dataloader=train_loader,
val_dataloaders=val_loader,
)
return {"done": True}
@ex.command
def model_speed_test(_run, _config, _log, _rnd, _seed, speed_test_batch_size=100):
'''
Test training speed of a model
@param _run:
@param _config:
@param _log:
@param _rnd:
@param _seed:
@param speed_test_batch_size: the batch size during the test
@return:
'''
modul = M(ex)
modul = modul.cuda()
batch_size = speed_test_batch_size
print(f"\nBATCH SIZE : {batch_size}\n")
test_length = 100
print(f"\ntest_length : {test_length}\n")
x = torch.ones([batch_size, 1, 128, 998]).cuda()
target = torch.ones([batch_size, 527]).cuda()
# one passe
net = modul.net
# net(x)
scaler = torch.cuda.amp.GradScaler()
torch.backends.cudnn.benchmark = True
# net = torch.jit.trace(net,(x,))
optimizer = torch.optim.SGD(net.parameters(), lr=0.001)
print("warmup")
import time
torch.cuda.synchronize()
t1 = time.time()
for i in range(10):
with torch.cuda.amp.autocast():
y_hat, embed = net(x)
loss = F.binary_cross_entropy_with_logits(y_hat, target, reduction="none").mean()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
torch.cuda.synchronize()
t2 = time.time()
print('warmup done:', (t2 - t1))
torch.cuda.synchronize()
t1 = time.time()
print("testing speed")
for i in range(test_length):
with torch.cuda.amp.autocast():
y_hat, embed = net(x)
loss = F.binary_cross_entropy_with_logits(y_hat, target, reduction="none").mean()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
torch.cuda.synchronize()
t2 = time.time()
print('test done:', (t2 - t1))
print("average speed: ", (test_length * batch_size) / (t2 - t1), " specs/second")
@ex.command
def evaluate_only(_run, _config, _log, _rnd, _seed):
# force overriding the config, not logged = not recommended
trainer = ex.get_trainer()
train_loader = ex.get_train_dataloaders()
val_loader = ex.get_val_dataloaders()
modul = M(ex)
modul.val_dataloader = None
trainer.val_dataloaders = None
print(f"\n\nValidation len={len(val_loader)}\n")
res = trainer.validate(modul, val_dataloaders=val_loader)
print("\n\n Validtaion:")
print(res)
@ex.command
def test_loaders():
'''
get one sample from each loader for debbuging
@return:
'''
for i, b in enumerate(ex.datasets.training.get_iter()):
print(b)
break
for i, b in enumerate(ex.datasets.test.get_iter()):
print(b)
break
def set_default_json_pickle(obj):
if isinstance(obj, set):
return list(obj)
raise TypeError
@ex.command
def preload_mp3(all_y=CMD("/basedataset.preload_mp3")):
'''
read the dataset sequentially, useful if you have a network cache
@param all_y: the dataset preload command
@return:
'''
print(all_y.shape)
def multiprocessing_run(rank, word_size):
print("rank ", rank, os.getpid())
print("word_size ", word_size)
os.environ['NODE_RANK'] = str(rank)
os.environ['CUDA_VISIBLE_DEVICES'] = os.environ['CUDA_VISIBLE_DEVICES'].split(",")[rank]
argv = sys.argv
if rank != 0:
print(f"Unobserved {os.getpid()} with rank {rank}")
argv = argv + ["-u"] # only rank 0 is observed
if "with" not in argv:
argv = argv + ["with"]
argv = argv + [f"trainer.num_nodes={word_size}", f"trainer.accelerator=ddp"]
print(argv)
@ex.main
def default_command():
return main()
ex.run_commandline(argv)
if __name__ == '__main__':
# set DDP=2 forks two processes to run on two GPUs
# the environment variable "DDP" define the number of processes to fork
# With two 2x 2080ti you can train the full model to .47 in around 24 hours
# you may need to set NCCL_P2P_DISABLE=1
word_size = os.environ.get("DDP", None)
if word_size:
import random
word_size = int(word_size)
print(f"\n\nDDP TRAINING WITH WORD_SIZE={word_size}\n\n")
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = f"{9999 + random.randint(0, 9999)}" # plz no collisions
os.environ['PL_IN_DDP_SUBPROCESS'] = '1'
for rank in range(word_size):
pid = os.fork()
if pid == 0:
print("Child Forked ")
multiprocessing_run(rank, word_size)
exit(0)
pid, exit_code = os.wait()
print(pid, exit_code)
exit(0)
print("__main__ is running pid", os.getpid(), "in module main: ", __name__)
@ex.automain
def default_command():
return main()
| 36.447761 | 127 | 0.615011 |
4a1ebb35f85a3fb580100e3ad9af5b85050e799c
| 6,777 |
py
|
Python
|
Udacity_Workspace/person_detect.py
|
jonathanyeh0723/OpenVINO_Smart_Queuing_System
|
e08e8f5ca3ad2f58aba91db56bc3a25e336edcd3
|
[
"Apache-2.0"
] | null | null | null |
Udacity_Workspace/person_detect.py
|
jonathanyeh0723/OpenVINO_Smart_Queuing_System
|
e08e8f5ca3ad2f58aba91db56bc3a25e336edcd3
|
[
"Apache-2.0"
] | null | null | null |
Udacity_Workspace/person_detect.py
|
jonathanyeh0723/OpenVINO_Smart_Queuing_System
|
e08e8f5ca3ad2f58aba91db56bc3a25e336edcd3
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import cv2
import os
import sys
import numpy as np
import time
from openvino.inference_engine import IECore
class Queue:
'''
Class for dealing with queues
'''
def __init__(self):
self.queues=[]
def add_queue(self, points):
self.queues.append(points)
def get_queues(self, image):
for q in self.queues:
x_min, y_min, x_max, y_max=q
frame=image[y_min:y_max, x_min:x_max]
yield frame
def check_coords(self, coords):
d={k+1:0 for k in range(len(self.queues))}
for coord in coords:
for i, q in enumerate(self.queues):
if coord[0]>q[0] and coord[2]<q[2]:
d[i+1]+=1
return d
class PersonDetect:
'''
Class for the Person Detection Model.
'''
def __init__(self, model_name, device, threshold=0.60):
self.model_weights = model_name + ".bin"
self.model_structure = model_name + ".xml"
self.device = device
self.threshold = threshold
try:
self.core = IECore()
self.model = self.core.read_network(model = self.model_structure, weights = self.model_weights)
except Exception as e:
raise ValueError("Could not Initialize the network. Have you entered the correct model path?")
self.input_name = next(iter(self.model.inputs))
self.input_shape = self.model.inputs[self.input_name].shape
self.output_name = next(iter(self.model.outputs))
self.output_shape = self.model.outputs[self.output_name].shape
def load_model(self):
'''
TODO: This method needs to be completed by you
'''
# Define the exec_network
self.exec_network = self.core.load_network(network = self.model, device_name = self.device, num_requests = 1)
def predict(self, image):
'''
TODO: This method needs to be completed by you
'''
# Grab the input
input_image = self.preprocess_input(image)
input_dict = {self.input_name: input_image}
# Perform inference
output = self.exec_network.infer(input_dict)
# Extract the output
coordinates = self.preprocess_outputs(output[self.output_name])
coordinates, image = self.draw_outputs(coordinates, image)
return coordinates, image
def draw_outputs(self, coords, image):
'''
TODO: This method needs to be completed by you
'''
coordinates = []
for obj in coords:
xmin = int(obj[3] * initial_w)
ymin = int(obj[4] * initial_h)
xmax = int(obj[5] * initial_w)
ymax = int(obj[6] * initial_h)
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (0, 255, 0), 3)
coordinates.append([xmin, ymin, xmax, ymax])
return coordinates, image
def preprocess_outputs(self, outputs):
'''
TODO: This method needs to be completed by you
'''
coordinates = []
for obj in outputs[0][0]:
conf = obj[2]
label = obj[1]
if conf >= self.threshold and label == 1:
coordinates.append(obj)
return coordinates
def preprocess_input(self, image):
'''
TODO: This method needs to be completed by you
'''
p_frame = cv2.resize(image, (self.input_shape[3], self.input_shape[2]), interpolation=cv2.INTER_AREA)
p_frame = p_frame.transpose((2, 0, 1))
p_frame = p_frame.reshape(1, *p_frame.shape)
return p_frame
def main(args):
model = args.model
device = args.device
video_file = args.video
max_people = args.max_people
threshold = args.threshold
output_path = args.output_path
start_model_load_time = time.time()
pd = PersonDetect(model, device, threshold)
pd.load_model()
total_model_load_time = time.time() - start_model_load_time
queue=Queue()
try:
queue_param=np.load(args.queue_param)
for q in queue_param:
queue.add_queue(q)
except:
print("error loading queue param file")
try:
cap=cv2.VideoCapture(video_file)
except FileNotFoundError:
print("Cannot locate video file: "+ video_file)
except Exception as e:
print("Something else went wrong with the video file: ", e)
global initial_w, initial_h
initial_w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
initial_h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
video_len = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
out_video = cv2.VideoWriter(os.path.join(output_path, 'output_video.mp4'), cv2.VideoWriter_fourcc(*'avc1'), fps, (initial_w, initial_h), True)
counter=0
start_inference_time=time.time()
try:
while cap.isOpened():
ret, frame=cap.read()
if not ret:
break
counter+=1
coords, image= pd.predict(frame)
num_people= queue.check_coords(coords)
print(f"Total People in frame = {len(coords)}")
print(f"Number of people in queue = {num_people}")
out_text=""
y_pixel=25
for k, v in num_people.items():
out_text += f"No. of People in Queue {k} is {v} "
if v >= int(max_people):
out_text += f" Queue full; Please move to next Queue "
cv2.putText(image, out_text, (15, y_pixel), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
out_text=""
y_pixel+=40
out_video.write(image)
total_time=time.time()-start_inference_time
total_inference_time=round(total_time, 1)
fps=counter/total_inference_time
with open(os.path.join(output_path, 'stats.txt'), 'w') as f:
f.write(str(total_inference_time)+'\n')
f.write(str(fps)+'\n')
f.write(str(total_model_load_time)+'\n')
cap.release()
cv2.destroyAllWindows()
except Exception as e:
print("Could not run Inference: ", e)
if __name__=='__main__':
parser=argparse.ArgumentParser()
parser.add_argument('--model', required=True)
parser.add_argument('--device', default='CPU')
parser.add_argument('--video', default=None)
parser.add_argument('--queue_param', default=None)
parser.add_argument('--output_path', default='/results')
parser.add_argument('--max_people', default=2)
parser.add_argument('--threshold', default=0.60)
args=parser.parse_args()
main(args)
| 32.118483 | 146 | 0.593183 |
4a1ebc696fb9584d1ed1e01a3143cf2a2083c2bb
| 3,773 |
py
|
Python
|
src/env.py
|
sandysa/Environment_Shaping_NSE
|
29db4ae6302cc8c6f546fae22ff7eea2588db94f
|
[
"MIT"
] | null | null | null |
src/env.py
|
sandysa/Environment_Shaping_NSE
|
29db4ae6302cc8c6f546fae22ff7eea2588db94f
|
[
"MIT"
] | null | null | null |
src/env.py
|
sandysa/Environment_Shaping_NSE
|
29db4ae6302cc8c6f546fae22ff7eea2588db94f
|
[
"MIT"
] | null | null | null |
#####################################################################
# Author: Sandhya Saisubramanian
# Description: Implements value iteration
#####################################################################
import numpy as np
import sys
import timeit
class Env():
def __init__(self, states, actions,P,R, start_state_id,goal_state_id):
self.gamma = 1.0
self.states = states
self.actions = actions
self.P = P
self.R = R
self.goal_id = goal_state_id
self.start_state_id = start_state_id
self.V = np.zeros((len(self.states))).astype('float32').reshape(-1,1)
self.Q = np.zeros((len(self.states), len(self.actions))).astype('float32')
self.policy = {}
def solve(self, solver,max_trials=700):
if solver == "VI":
start_timer = timeit.default_timer()
self.policy = self.VI(max_trials)
end_timer = timeit.default_timer()
# print('Time taken to solve (seconds): ', end_timer - start_timer)
return self.policy, float(self.V[self.start_state_id])
else:
raise ValueError('Unknown solver')
def VI(self,trials):
epsilon = 0.001
max_trials = trials
dead_end_cost = 500
dead_end_states = []
curr_iter = 0
bestAction = np.full((len(self.states)), -1)
while curr_iter < max_trials:
max_residual = 0
curr_iter += 1
for ns, s in enumerate(self.states):
if ns == self.goal_id:
bestAction[ns] = 0
continue
bestQ = dead_end_cost
hasAction = False
for na, a in enumerate(self.actions):
if self.P[ns][na] is None:
continue
if s in dead_end_states:
break
hasAction = True
qaction = min(dead_end_cost, self.qvalue(ns, na)) # Cost-minimization
self.Q[ns][na] = qaction
if qaction < bestQ:
bestQ = qaction
bestAction[ns] = na
if bestQ >= dead_end_cost or hasAction == False:
if s not in dead_end_states:
dead_end_states.append(s)
self.V[ns] = dead_end_cost
residual = abs(bestQ - self.V[ns])
self.V[ns] = bestQ
max_residual = max(max_residual, residual)
if max_residual < epsilon:
break
return bestAction
def qvalue(self,state_id, action_id):
qaction = 0
succ_list = self.P[state_id][action_id]
if succ_list is not None:
for succ in succ_list:
succ_state_id = self.states.index(succ[0])
prob = succ[1]
qaction += prob * float(self.V[succ_state_id])
return ((self.gamma * qaction) + self.R[state_id][action_id])
else:
return 500
def getStates(self):
return self.states
def getActions(self):
return self.actions
def get_Trans_matrix(self):
return self.T_matrix
def get_R(self):
return self.R
def get_V(self):
return self.V
def get_Q(self):
return self.Q
def get_policy(self):
return self.policy
def get_gamma(self):
return self.gamma
def get_goal(self):
return self.goal_id
def get_P(self):
return self.P
| 29.708661 | 90 | 0.48688 |
4a1ebd29c00b6b3931ff6d9b974c86a2656082e9
| 832 |
py
|
Python
|
Day01-15/13_exercise_8.py
|
MaoningGuan/-Python-100-Days
|
d36e49d67a134278455438348efc41ffb28b778a
|
[
"MIT"
] | null | null | null |
Day01-15/13_exercise_8.py
|
MaoningGuan/-Python-100-Days
|
d36e49d67a134278455438348efc41ffb28b778a
|
[
"MIT"
] | null | null | null |
Day01-15/13_exercise_8.py
|
MaoningGuan/-Python-100-Days
|
d36e49d67a134278455438348efc41ffb28b778a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
class A(object):
def __init__(self, value):
self.value = value
if __name__ == '__main__':
print('----类对象之间赋值------')
a = A(100)
b = a
print(a.value)
print(b.value)
a.value = 200
print(a.value)
print(b.value)
b.value = 300
print(a.value)
print(b.value)
print('----list之间赋值------')
list1 = [1]
list2 = list1
print(list1)
print(list2)
list1.append(2)
print(list1)
print(list2)
list2.append(3)
print(list1)
print(list2)
print('----数值之间赋值------')
a = 1
b = a
print(a)
print(b)
a = 2
print(a)
print(b)
b = 3
print(a)
print(b)
"""
----类对象之间赋值------
100
100
200
200
300
300
----list之间赋值------
[1]
[1]
[1, 2]
[1, 2]
[1, 2, 3]
[1, 2, 3]
----数值之间赋值------
1
1
2
1
2
3
"""
| 12.606061 | 31 | 0.483173 |
4a1ebdd54adad0afe59326bb5b4d97e1f367fa88
| 6,752 |
py
|
Python
|
tools/gnc_visualizer/scripts/communications/com_manager.py
|
limenutt/astrobee
|
9241e67e6692810d6e275abb3165b6d02f4ca5ef
|
[
"Apache-2.0"
] | null | null | null |
tools/gnc_visualizer/scripts/communications/com_manager.py
|
limenutt/astrobee
|
9241e67e6692810d6e275abb3165b6d02f4ca5ef
|
[
"Apache-2.0"
] | null | null | null |
tools/gnc_visualizer/scripts/communications/com_manager.py
|
limenutt/astrobee
|
9241e67e6692810d6e275abb3165b6d02f4ca5ef
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (c) 2017, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# All rights reserved.
#
# The Astrobee platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import configuration_support as configuration
DDS_COM = "dds"
ROS_COM = "ros"
class ComManager:
default_com_method = ROS_COM
current_com_method = None
viz = None
subs_manager = None
started = False
com = None
executor = None
config = configuration.Preferences()
reset_ekf = lambda self: self.executor.reset_ekf()
initialize_bias = lambda self: self.executor.initialize_bias()
toggle_pmc = lambda self, current_value: self.executor.toggle_pmc(current_value)
def __init__(self, com_method=None):
if com_method != None:
self.set_com_method(self.com_method)
def start_communications(self, viz):
if com == None:
return
if self.started:
return
self.viz = viz
if self.current_com_method == DDS_COM:
self.__start_dds_com()
else:
self.__start_ros_com()
self.started = True
def stop_communications(self):
if self.subs_manager != None:
self.subs_manager.stop_all()
self.started = False
def was_shutdown(self):
if self.subs_manager == None:
return False
return com.is_shutdown()
def set_com_method(self, com_method, args=None):
if com_method == DDS_COM:
self.current_com_method = com_method
partition_name = None
given_peer = None
domain = None
public_ip = None
if args != None and isinstance(args, dict):
if "partition_name" in args:
partition_name = args["partition_name"]
if "given_peer" in args:
given_peer = args["given_peer"]
if "domain" in args:
domain = args["domain"]
if "public_ip" in args:
public_ip = args["public_ip"]
if self.config.set_preferences(
partition_name, given_peer, domain, public_ip
):
# Print result
print >> sys.stderr, (
self.config.get_all_warnings() + self.config.get_all_info()
)
else:
# Print result and exit
print >> sys.stderr, (
self.config.get_all_errors()
+ self.config.get_all_warnings()
+ self.config.get_all_info()
)
return False
elif com_method == ROS_COM:
self.current_com_method = com_method
else:
self.current_com_method = self.default_com_method
global com
com = __import__(self.current_com_method + "_support", globals(), locals())
return True
def __start_ros_com(self):
self.subs_manager = com.RosSubscriberManager(
"gnc_visualizer", self.viz.print_to_log
)
self.subs_manager.add_subscriber(
"rosout", "/rosout", self.viz.log_callback, com.Log
)
self.subs_manager.add_subscriber(
"truth", "/loc/truth", self.viz.ground_truth_callback, com.PoseStamped
)
self.subs_manager.add_subscriber(
"ml_pose",
"/sparse_mapping/pose",
self.viz.ml_pose_callback,
com.PoseStamped,
)
self.subs_manager.add_subscriber(
"ekf", "/gnc/ekf", self.viz.ekf_callback, com.EkfState
)
self.subs_manager.add_subscriber(
"ctl_cmd", "/gnc/ctl/command", self.viz.command_callback, com.FamCommand
)
self.subs_manager.add_subscriber(
"ctl_traj", "/gnc/ctl/traj", self.viz.traj_callback, com.ControlState
)
self.subs_manager.add_subscriber(
"ctl_shaper", "/gnc/ctl/shaper", self.viz.shaper_callback, com.ControlState
)
self.subs_manager.add_subscriber(
"pmc_cmd", "/hw/pmc/command", self.viz.pmc_callback, com.PmcCommand
)
self.executor = com.RosCommandExecutor()
def __start_dds_com(self):
self.subs_manager = com.DdsSubscriberManager()
self.subs_manager.add_subscriber(
"sparse_mapping_pose_sub",
com.DdsSubscriber(
"SparseMappingPoseStampedSubscriber::SparseMappingPoseStampedReader",
self.viz.ml_pose_callback,
com.PoseStamped,
),
True,
)
self.subs_manager.add_subscriber(
"ekf_sub",
com.DdsSubscriber(
"EkfSubscriber::EkfReader", self.viz.ekf_callback, com.EkfState
),
True,
)
self.subs_manager.add_subscriber(
"fam_sub",
com.DdsSubscriber(
"FamCmdSubscriber::FamCmdReader",
self.viz.command_callback,
com.FamCommand,
),
True,
)
self.subs_manager.add_subscriber(
"shaper_sub",
com.DdsSubscriber(
"GncControlStateSubscriber::ShaperReader",
self.viz.shaper_callback,
com.ControlState,
),
True,
)
self.subs_manager.add_subscriber(
"traj_sub",
com.DdsSubscriber(
"GncControlStateSubscriber::TrajReader",
self.viz.traj_callback,
com.ControlState,
),
True,
)
self.subs_manager.add_subscriber(
"pmc_sub",
com.DdsSubscriber(
"PmcCmdSubscriber::PmcReader", self.viz.pmc_callback, com.PmcCommand
),
True,
)
self.subs_manager.add_subscriber(
"log_sub",
com.DdsSubscriber(
"LogSubscriber::LogReader", self.viz.log_callback, com.Log
),
True,
)
self.executor = com.DdsCommandExecutor()
| 31.849057 | 87 | 0.576126 |
4a1ebdfd7f49307af7db9e0933d8ce20941817ed
| 413 |
py
|
Python
|
diventi/ebooks/migrations/0076_section_bookmark.py
|
flavoi/diven
|
3173ca3ca3fbedc191b8eab3639a6bceb3c442c4
|
[
"Apache-2.0"
] | 2 |
2019-06-27T16:00:17.000Z
|
2020-08-14T07:46:05.000Z
|
diventi/ebooks/migrations/0076_section_bookmark.py
|
flavoi/diven
|
3173ca3ca3fbedc191b8eab3639a6bceb3c442c4
|
[
"Apache-2.0"
] | 26 |
2020-02-15T22:39:35.000Z
|
2022-02-19T21:09:01.000Z
|
diventi/ebooks/migrations/0076_section_bookmark.py
|
flavoi/diven
|
3173ca3ca3fbedc191b8eab3639a6bceb3c442c4
|
[
"Apache-2.0"
] | 1 |
2021-11-12T22:30:15.000Z
|
2021-11-12T22:30:15.000Z
|
# Generated by Django 2.2.4 on 2019-08-11 17:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ebooks', '0075_auto_20190811_1913'),
]
operations = [
migrations.AddField(
model_name='section',
name='bookmark',
field=models.BooleanField(default=True, verbose_name='bookmark'),
),
]
| 21.736842 | 77 | 0.612591 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.