hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a1aa33e57a59fca116b2af179ff784a6692d4ca
| 304 |
py
|
Python
|
utils/procon_tools/languages/python_utils.py
|
fumiphys/programming_contest
|
b9466e646045e1c64571af2a1e64813908e70841
|
[
"MIT"
] | 7 |
2019-04-30T14:25:40.000Z
|
2020-12-19T17:38:11.000Z
|
utils/procon_tools/languages/python_utils.py
|
fumiphys/programming_contest
|
b9466e646045e1c64571af2a1e64813908e70841
|
[
"MIT"
] | 46 |
2018-09-19T16:42:09.000Z
|
2020-05-07T09:05:08.000Z
|
utils/procon_tools/languages/python_utils.py
|
fumiphys/programming_contest
|
b9466e646045e1c64571af2a1e64813908e70841
|
[
"MIT"
] | null | null | null |
'''Python utils
'''
from config import exec_time_base
from pc_utils import exec_command
def exec_py3_input(source, inp):
'''execute python scirpt
'''
stdout_data, stderr_data, returncode = exec_command(
exec_time_base + ["python3", source], inp)
return stdout_data, stderr_data
| 23.384615 | 56 | 0.717105 |
4a1aa410f34e866edf9f29b77e9b762147db342b
| 7,136 |
py
|
Python
|
cloudroast/glance/regression/image_operations/deactivate_image_test.py
|
melissa-kam/cloudroast
|
59a3c8b058991d7144fa46e6d97be9bb5bb9deae
|
[
"Apache-2.0"
] | null | null | null |
cloudroast/glance/regression/image_operations/deactivate_image_test.py
|
melissa-kam/cloudroast
|
59a3c8b058991d7144fa46e6d97be9bb5bb9deae
|
[
"Apache-2.0"
] | null | null | null |
cloudroast/glance/regression/image_operations/deactivate_image_test.py
|
melissa-kam/cloudroast
|
59a3c8b058991d7144fa46e6d97be9bb5bb9deae
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cafe.drivers.unittest.decorators import (
data_driven_test, DataDrivenFixture)
from cloudcafe.common.tools.datagen import rand_name
from cloudcafe.glance.common.constants import Messages
from cloudcafe.glance.common.types import ImageStatus
from cloudroast.glance.fixtures import ImagesIntegrationFixture
from cloudroast.glance.generators import ImagesDatasetListGenerator
@DataDrivenFixture
class DeactivateImage(ImagesIntegrationFixture):
@classmethod
def setUpClass(cls):
super(DeactivateImage, cls).setUpClass()
# Count set to number of images required for this module
created_images = cls.images.behaviors.create_images_via_task(
image_properties={'name': rand_name('deactivate_image')}, count=3)
cls.deleted_image = created_images.pop()
cls.images.client.delete_image(cls.deleted_image.id_)
cls.protected_image = created_images.pop()
cls.images.client.update_image(
cls.protected_image.id_, replace={'protected': True})
cls.created_image = created_images.pop()
created_server = cls.compute.servers.behaviors.create_active_server(
image_ref=cls.images.config.primary_image).entity
cls.resources.add(
created_server.id, cls.compute.servers.client.delete_server)
cls.created_snapshot = (
cls.compute.images.behaviors.create_active_image(
created_server.id).entity)
cls.resources.add(
cls.created_snapshot.id, cls.images.client.delete_image)
@classmethod
def tearDownClass(cls):
cls.images.client.update_image(
cls.protected_image.id_, replace={'protected': False})
cls.images.behaviors.resources.release()
super(DeactivateImage, cls).tearDownClass()
@data_driven_test(
ImagesDatasetListGenerator.DeactivateImageTypes())
def ddtest_deactivate_image(self, image):
"""
@summary: Deactivate an image
1) Deactivate an image via wrapper test method
2) Verify that the image's status is deactivated
"""
get_image = self._deactivate_image(image.id_)
self.assertEqual(
get_image.status, ImageStatus.DEACTIVATED,
msg=('Unexpected status for image {0}. '
'Expected: {1} Received: '
'{2}').format(image.id_,
ImageStatus.DEACTIVATED, get_image.status))
def test_deactivate_protected_image(self):
"""
@summary: Deactivate a protected image
1) Deactivate a protected image via wrapper test method
2) Verify that the image's status is deactivated
"""
get_image = self._deactivate_image(self.protected_image.id_)
self.assertEqual(
get_image.status, ImageStatus.DEACTIVATED,
msg=('Unexpected status for image {0}. '
'Expected: {1} Received: '
'{2}').format(self.protected_image.id_,
ImageStatus.DEACTIVATED, get_image.status))
def test_deactivate_snapshot_image(self):
"""
@summary: Deactivate a snapshot image
1) Deactivate a snapshot image via wrapper test method
2) Verify that the image's status is deactivated
"""
get_image = self._deactivate_image(self.created_snapshot.id)
self.assertEqual(
get_image.status, ImageStatus.DEACTIVATED,
msg=('Unexpected status for image {0}. '
'Expected: {1} Received: '
'{2}').format(self.created_snapshot.id,
ImageStatus.DEACTIVATED, get_image.status))
def test_deactivate_deleted_image(self):
"""
@summary: Deactivate a deleted image
1) Deactivate a deleted image
2) Verify that the response code is 404
"""
resp = self.images_admin.client.deactivate_image(
self.deleted_image.id_)
self.assertEqual(
resp.status_code, 404,
Messages.STATUS_CODE_MSG.format(404, resp.status_code))
def test_deactivate_image_using_non_admin_forbidden(self):
"""
@summary: Deactivate an image as non-admin
1) Deactivate an image as non-admin via wrapper test method
2) Verify that the image's status is still active
"""
get_image = self._deactivate_image(
self.created_image.id_, self.images.client, 403)
self.assertEqual(
get_image.status, ImageStatus.ACTIVE,
msg=('Unexpected status for image {0}. '
'Expected: {1} Received: '
'{2}').format(self.created_image.id_,
ImageStatus.ACTIVE, get_image.status))
def test_deactivate_image_using_invalid_image_id(self):
"""
@summary: Deactivate an image using an invalid image id
1) Deactivate an image using an invalid image id
2) Verify that the response code is 404
"""
resp = self.images_admin.client.deactivate_image(image_id='invalid')
self.assertEqual(
resp.status_code, 404,
Messages.STATUS_CODE_MSG.format(404, resp.status_code))
def _deactivate_image(self, image_id, images_client=None,
response_code=None):
"""
@summary: Deactivate image and return the get image details response
@param image_id: Image id to deactivate
@type image_id: Uuid
@param images_client: Images client to user
@type images_client: Object
@param response_code: Response status code
@type response_code: Integer
@return: Get image details response
@rtype: Object
1) Deactivate an image as specified user
2) Verify that the response code is as expected
3) Get image details passing in the image id
4) Verify that the response is ok
5) Return the get image details response
"""
if images_client is None:
images_client = self.images_admin.client
if response_code is None:
response_code = 204
resp = images_client.deactivate_image(image_id)
self.assertEqual(
resp.status_code, response_code,
Messages.STATUS_CODE_MSG.format(response_code, resp.status_code))
resp = self.images.client.get_image_details(image_id)
self.assertTrue(resp.ok, Messages.OK_RESP_MSG.format(resp.status_code))
return resp.entity
| 36.040404 | 79 | 0.655409 |
4a1aa4ae21559a1b0f4137218649e7cff3bdb073
| 3,736 |
py
|
Python
|
tests/integration/offer/multibuy_benefit_tests.py
|
endgame/django-oscar
|
e5d78436e20b55902537a6cc82edf4e22568f9d6
|
[
"BSD-3-Clause"
] | null | null | null |
tests/integration/offer/multibuy_benefit_tests.py
|
endgame/django-oscar
|
e5d78436e20b55902537a6cc82edf4e22568f9d6
|
[
"BSD-3-Clause"
] | null | null | null |
tests/integration/offer/multibuy_benefit_tests.py
|
endgame/django-oscar
|
e5d78436e20b55902537a6cc82edf4e22568f9d6
|
[
"BSD-3-Clause"
] | 1 |
2019-07-10T06:32:14.000Z
|
2019-07-10T06:32:14.000Z
|
from decimal import Decimal as D
from django.test import TestCase
from django_dynamic_fixture import G
from oscar.apps.offer import models
from oscar.apps.basket.models import Basket
from oscar_testsupport.factories import create_product
class TestAMultibuyDiscountAppliedWithCountCondition(TestCase):
def setUp(self):
range = models.Range.objects.create(
name="All products", includes_all_products=True)
self.condition = models.CountCondition.objects.create(
range=range,
type=models.Condition.COUNT,
value=3)
self.benefit = models.MultibuyDiscountBenefit.objects.create(
range=range,
type=models.Benefit.MULTIBUY,
value=1)
self.basket = G(Basket)
def test_applies_correctly_to_empty_basket(self):
discount = self.benefit.apply(self.basket, self.condition)
self.assertEqual(D('0.00'), discount)
self.assertEqual(0, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_matches_condition(self):
for product in [create_product(price=D('12.00'))]:
self.basket.add_product(product, 3)
discount = self.benefit.apply(self.basket, self.condition)
self.assertEqual(D('12.00'), discount)
self.assertEqual(3, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_exceeds_condition(self):
for product in [create_product(price=D('4.00')),
create_product(price=D('2.00'))]:
self.basket.add_product(product, 4)
discount = self.benefit.apply(self.basket, self.condition)
self.assertEqual(D('2.00'), discount)
self.assertEqual(3, self.basket.num_items_with_discount)
self.assertEqual(5, self.basket.num_items_without_discount)
class TestAMultibuyDiscountAppliedWithAValueCondition(TestCase):
def setUp(self):
range = models.Range.objects.create(
name="All products", includes_all_products=True)
self.condition = models.ValueCondition.objects.create(
range=range,
type=models.Condition.VALUE,
value=D('10.00'))
self.benefit = models.MultibuyDiscountBenefit.objects.create(
range=range,
type=models.Benefit.MULTIBUY,
value=1)
self.basket = G(Basket)
def test_applies_correctly_to_empty_basket(self):
discount = self.benefit.apply(self.basket, self.condition)
self.assertEqual(D('0.00'), discount)
self.assertEqual(0, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_matches_condition(self):
for product in [create_product(price=D('5.00'))]:
self.basket.add_product(product, 2)
discount = self.benefit.apply(self.basket, self.condition)
self.assertEqual(D('5.00'), discount)
self.assertEqual(2, self.basket.num_items_with_discount)
self.assertEqual(0, self.basket.num_items_without_discount)
def test_applies_correctly_to_basket_which_exceeds_condition(self):
for product in [create_product(price=D('4.00')),
create_product(price=D('2.00'))]:
self.basket.add_product(product, 2)
discount = self.benefit.apply(self.basket, self.condition)
self.assertEqual(D('2.00'), discount)
self.assertEqual(3, self.basket.num_items_with_discount)
self.assertEqual(1, self.basket.num_items_without_discount)
| 42.942529 | 71 | 0.690578 |
4a1aa5bd88e9f097500d62cc0eb8109069b21091
| 19,319 |
py
|
Python
|
main.py
|
runzhouge/MAC
|
a35be044797100051934254b71d892eda3853e6a
|
[
"MIT"
] | 36 |
2018-11-12T18:56:17.000Z
|
2022-03-30T02:24:05.000Z
|
main.py
|
runzhouge/MAC
|
a35be044797100051934254b71d892eda3853e6a
|
[
"MIT"
] | 5 |
2018-11-28T03:32:36.000Z
|
2019-11-29T02:48:19.000Z
|
main.py
|
runzhouge/MAC
|
a35be044797100051934254b71d892eda3853e6a
|
[
"MIT"
] | 10 |
2019-01-04T23:01:47.000Z
|
2022-01-03T06:17:19.000Z
|
from __future__ import division
import tensorflow as tf
import numpy as np
import acl_model
from six.moves import xrange
import time
from sklearn.metrics import average_precision_score
import pickle
import mpu
import operator
import os
import argparse
import math
from scipy.special import expit
#=====================================================================
GPU_MEM_FRACTION = 0.42
TEST_SAVE_EVERY = 2000
MAX_TRAIN_STEP = 20005
BATCH_SIZE_TRAIN = 28
SEED_ = 1234
#=====================================================================
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='TALL next')
parser.add_argument('--is_only_test', dest='is_only_test', help='If it is only for test without trianing, use True',
default=False)
parser.add_argument('--checkpoint_path', dest='checkpoint_path', help='The path to read the checkpoint when test',
default='./trained_save/')
parser.add_argument('--test_name', dest='test_name', help='The test name which will be displayed in the test txt file',
default='one_time_test')
parser.add_argument('--save_checkpoint_parent_dir', dest='save_checkpoint_parent_dir', help='the parent folder to save the trained model in training',
default='./trained_save/')
parser.add_argument('--is_continue_training', dest='is_continue_training', help='If this is for continuing training the model, use True',
default=False)
parser.add_argument('--checkpoint_path_continue_training', dest='checkpoint_path_continue_training', help='The path to read the checkpoint when continue training ',
default='./trained_save/')
args = parser.parse_args()
return args
def compute_recall_top_n(top_n,class_score_matrix,labels):
correct_num=0.0
for k in range(class_score_matrix.shape[0]):
class_score=class_score_matrix[k,:]
predictions=class_score.argsort()[::-1][0:top_n]
if labels[k] in predictions: correct_num+=1
return correct_num, correct_num/len(labels)
def compute_precision_top_n(top_n,sentence_image_mat,sclips,iclips):
correct_num=0.0
for k in range(sentence_image_mat.shape[0]):
gt=sclips[k]
sim_v=sentence_image_mat[k]
sim_argsort=np.argsort(sim_v)[::-1][0:top_n]
for index in sim_argsort:
if gt == iclips[index]:
correct_num+=1
return correct_num
def calculate_IoU(i0,i1):
union=(min(i0[0],i1[0]) , max(i0[1],i1[1]))
inter=(max(i0[0],i1[0]) , min(i0[1],i1[1]))
iou=1.0*(inter[1]-inter[0])/(union[1]-union[0])
return iou
def nms_temporal(x1,x2,s, overlap):
pick = []
assert len(x1)==len(s)
assert len(x2)==len(s)
if len(x1)==0:
return pick
union = map(operator.sub, x2, x1) # union = x2-x1
I = [i[0] for i in sorted(enumerate(s), key=lambda x:x[1])] # sort and get index
while len(I)>0:
i = I[-1]
pick.append(i)
xx1 = [max(x1[i],x1[j]) for j in I[:-1]]
xx2 = [min(x2[i],x2[j]) for j in I[:-1]]
inter = [max(0.0, k2-k1) for k1, k2 in zip(xx1, xx2)]
o = [inter[u]/(union[i] + union[I[u]] - inter[u]) for u in range(len(I)-1)]
I_new = []
for j in range(len(o)):
if o[j] <=overlap:
I_new.append(I[j])
I = I_new
return pick
def compute_IoU_recall_top_n(top_n,iou_thresh,sentence_image_mat,sclips,iclips):
correct_num=0.0
for k in range(sentence_image_mat.shape[0]):
gt=sclips[k]
gt_start=float(gt.split(" ")[1])
gt_end=float(gt.split(" ")[2])
#print gt +" "+str(gt_start)+" "+str(gt_end)
sim_v=[v for v in sentence_image_mat[k]]
starts=[float(iclip.split("_")[1]) for iclip in iclips]
ends=[float(iclip.split("_")[2]) for iclip in iclips]
picks=nms_temporal(starts,ends,sim_v,iou_thresh-0.05)
#sim_argsort=np.argsort(sim_v)[::-1][0:top_n]
if top_n<len(picks): picks=picks[0:top_n]
for index in picks:
pred_start=float(iclips[index].split("_")[1])
pred_end=float(iclips[index].split("_")[2])
iou=calculate_IoU((gt_start,gt_end),(pred_start,pred_end))
if iou>=iou_thresh:
correct_num+=1
break
return correct_num
def compute_IoU_recall_top_n_forreg(top_n,iou_thresh,sentence_image_mat,sentence_image_reg_mat,sclips,iclips):
correct_num=0.0
for k in range(sentence_image_mat.shape[0]):
gt=sclips[k]
gt_start=float(gt.split("_")[1])
gt_end=float(gt.split("_")[2])
#print gt +" "+str(gt_start)+" "+str(gt_end)
sim_v=[v for v in sentence_image_mat[k]]
starts=[s for s in sentence_image_reg_mat[k,:,0]]
ends=[e for e in sentence_image_reg_mat[k,:,1]]
picks=nms_temporal(starts,ends,sim_v,iou_thresh-0.05)
#sim_argsort=np.argsort(sim_v)[::-1][0:top_n]
if top_n<len(picks): picks=picks[0:top_n]
for index in picks:
pred_start=sentence_image_reg_mat[k,index,0]
pred_end=sentence_image_reg_mat[k,index,1]
iou=calculate_IoU((gt_start,gt_end),(pred_start,pred_end))
if iou>=iou_thresh:
correct_num+=1
break
return correct_num
def do_eval_slidingclips(sess,vs_eval_op,model,movie_length_info,iter_step, test_result_output):
IoU_thresh=[0.5, 0.7]
all_correct_num_10=[0.0]*5
all_correct_num_5=[0.0]*5
all_correct_num_1=[0.0]*5
all_retrievd=0.0
for movie_name in model.test_set.movie_names:
movie_length=movie_length_info[movie_name]
#print "Test movie: "+movie_name+"....loading movie data"
movie_clip_featmaps, movie_clip_sentences=model.test_set.load_movie_slidingclip(movie_name,16)
#print "sentences: "+ str(len(movie_clip_sentences))
#print "clips: "+ str(len(movie_clip_featmaps))
sentence_image_mat=np.zeros([len(movie_clip_sentences),len(movie_clip_featmaps)])
sentence_image_reg_mat=np.zeros([len(movie_clip_sentences),len(movie_clip_featmaps),2])
for k in range(len(movie_clip_sentences)):
sent_vec=movie_clip_sentences[k][1]
VP_spacy_vec = movie_clip_sentences[k][2]
subj_spacy_vec = movie_clip_sentences[k][3]
obj_spacy_vec = movie_clip_sentences[k][4]
sent_vec=np.reshape(sent_vec,[1,sent_vec.shape[0]])
VP_spacy_vec=np.reshape(VP_spacy_vec,[1,VP_spacy_vec.shape[0]])
subj_spacy_vec=np.reshape(subj_spacy_vec,[1,subj_spacy_vec.shape[0]])
obj_spacy_vec=np.reshape(obj_spacy_vec,[1,obj_spacy_vec.shape[0]])
for t in range(len(movie_clip_featmaps)):
featmap = movie_clip_featmaps[t][1]
softmax_ = movie_clip_featmaps[t][2]
visual_clip_name = movie_clip_featmaps[t][0]
# the contents of visual_clip_name
# 0: name; 1: swin_start; 2:swin_end; 3: round_reg_start;
# 4: round_reg_end; 5: reg_start; 6:reg_end; 7: score; others
# swin_start and swin_end
start=float(visual_clip_name.split("_")[1])
end=float(visual_clip_name.split("_")[2])
conf_score = float(visual_clip_name.split("_")[7])
featmap=np.reshape(featmap,[1,featmap.shape[0]])
softmax_ = np.reshape(softmax_, [1, softmax_.shape[0]])
feed_dict = {
model.visual_featmap_ph_test: featmap,
model.sentence_ph_test:sent_vec,
model.VP_spacy_ph_test:VP_spacy_vec,
model.softmax_ph_test: softmax_
}
outputs=sess.run(vs_eval_op,feed_dict=feed_dict)
sentence_image_mat[k,t] = expit(outputs[0]) * conf_score
reg_end=end+outputs[2]
reg_start=start+outputs[1]
sentence_image_reg_mat[k,t,0]=reg_start
sentence_image_reg_mat[k,t,1]=reg_end
iclips=[b[0] for b in movie_clip_featmaps]
sclips=[b[0] for b in movie_clip_sentences]
for k in range(len(IoU_thresh)):
IoU=IoU_thresh[k]
correct_num_10=compute_IoU_recall_top_n_forreg(10,IoU,sentence_image_mat,sentence_image_reg_mat,sclips,iclips)
correct_num_5=compute_IoU_recall_top_n_forreg(5,IoU,sentence_image_mat,sentence_image_reg_mat,sclips,iclips)
correct_num_1=compute_IoU_recall_top_n_forreg(1,IoU,sentence_image_mat,sentence_image_reg_mat,sclips,iclips)
all_correct_num_10[k]+=correct_num_10
all_correct_num_5[k]+=correct_num_5
all_correct_num_1[k]+=correct_num_1
all_retrievd+=len(sclips)
for k in range(len(IoU_thresh)):
print " IoU="+str(IoU_thresh[k])+", R@10: "+str(all_correct_num_10[k]/all_retrievd)+"; IoU="+str(IoU_thresh[k])+", R@5: "+str(all_correct_num_5[k]/all_retrievd)+"; IoU="+str(IoU_thresh[k])+", R@1: "+str(all_correct_num_1[k]/all_retrievd)
test_result_output.write("Step "+str(iter_step)+": IoU="+str(IoU_thresh[k])+", R@10: "+str(all_correct_num_10[k]/all_retrievd)+"; IoU="+str(IoU_thresh[k])+", R@5: "+str(all_correct_num_5[k]/all_retrievd)+"; IoU="+str(IoU_thresh[k])+", R@1: "+str(all_correct_num_1[k]/all_retrievd)+"\n")
def run_training():
max_steps_stage1=0
max_steps=MAX_TRAIN_STEP
batch_size=BATCH_SIZE_TRAIN
home_dir = os.environ['HOME']
# visual feature for each video clip
# the fc6 layer outputs of C3D pretraiend on Sprots-1M, 4096-dimension
train_feature_dir = "/change/directory/to/all_fc6_unit16_overlap0.5/"
test_feature_dir = "/change/directory/to/all_fc6_unit16_overlap0.5/"
# visual activity concepts for each video clip
# the softmax layer ouput of R(2+1)D pretraiend on Kinetics, 400-demension
train_softmax_dir = '/change/directory/to/train_softmax/'
test_softmax_dir = '/change/directory/to/test_softmax/'
# clip ground truth and corresponding sentence embedding
# this is originally from https://github.com/jiyanggao/TALL
# we didn't use the setence embdedding provided in these files
# we use our own sentence embedding which will appear later
train_csv_path = "./ref_info/charades_sta_train_clip-sentvec_o0.5_l10_activity_nodup.pkl"
test_csv_path = "./ref_info/charades_sta_test_clip-sentvec_o0.5_l10_activity_nodup.pkl"
# sentece embedding, verb-object vector
# the content of each item is organized as dict --> dict --> lst --> dict
# i.e. The first level dict is the dict for different videos
# the second level dict is the dict for different video clips in one video
# the third level list is the different sliding windows and different sentence for one video clip
# the fourth level dict contains the components we use, e.g. skip-thought sentence embedding, glove word vector and so on.
# please refer to the paper and correponding pkl file for more details.
# sentece embedding is extracted by skip-thoughts (4800-dimesnion)
# verb-object vector is extracted by stanford glove (300-dimension)
train_clip_sentence_pairs_iou_path = "./ref_info/charades_sta_train_semantic_sentence_VP_sub_obj.pkl"
test_clip_sentence_pairs_path = './ref_info/charades_sta_test_semantic_sentence_VP_sub_obj.pkl'
# the propsal score used in test
# trained on TURN TAP, https://github.com/jiyanggao/TURN-TAP
# the contents of each line are:
# 0: name; 1: swin_start; 2:swin_end; 3: round_reg_start;
# 4: round_reg_end; 5: reg_start; 6:reg_end; 7: proposal_confident_score;
# 8: others; 9: others
test_swin_txt_path = "./ref_info/charades_sta_test_swin_props_num_36364.txt"
# arguments
args = parse_args()
is_only_test = args.is_only_test
checkpoint_path = args.checkpoint_path
save_checkpoint_parent_dir = args.save_checkpoint_parent_dir
is_continue_training = args.is_continue_training
checkpoint_path_continue_training = args.checkpoint_path_continue_training
test_name = args.test_name
model=acl_model.acl_model(batch_size,train_csv_path, test_csv_path, test_feature_dir, train_feature_dir, train_clip_sentence_pairs_iou_path, test_clip_sentence_pairs_path, test_swin_txt_path, train_softmax_dir, test_softmax_dir)
# if it is test only
if is_only_test:
# txt file to save the test results
localtime = time.asctime(time.localtime(time.time()))
_localtime = localtime.replace(" ", "_").replace(":", "_")
txt_dir = './results_history/'
txt_dir = txt_dir + 'ctrl_test_results_' + _localtime+ '_only_one_test.txt'
test_result_output=open(txt_dir, "w")
with tf.Graph().as_default():
tf.set_random_seed(SEED_)
loss_align_reg,vs_train_op,vs_eval_op,offset_pred,loss_reg=model.construct_model()
# Create a session for running Ops on the Graph.
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = GPU_MEM_FRACTION)
sess = tf.Session(config = tf.ConfigProto(gpu_options = gpu_options))
saver = tf.train.Saver(max_to_keep=1000)
merged=tf.summary.merge_all()
writer=tf.summary.FileWriter("./tf_summary/",sess.graph_def)
saver.restore(sess, checkpoint_path)
print "Model restored from " + checkpoint_path, "----------------------------------\n"
movie_length_dict={}
with open("./ref_info/charades_movie_length_info.txt") as f:
for l in f:
movie_length_dict[l.rstrip().split(" ")[0]]=float(l.rstrip().split(" ")[2])
print "Start to test:-----------------\n"
do_eval_slidingclips(sess,vs_eval_op,model,movie_length_dict,test_name, test_result_output)
else:
# txt file to save the test results
localtime = time.asctime(time.localtime(time.time()))
_localtime = localtime.replace(" ", "_").replace(":", "_")
txt_dir = './results_history/'
txt_dir = txt_dir + 'ctrl_test_results_' + _localtime+ '.txt'
test_result_output=open(txt_dir, "w")
# folder to save the trained model
if not os.path.exists(save_checkpoint_parent_dir+_localtime):
os.makedirs(save_checkpoint_parent_dir+_localtime)
# if it is continuing training
if is_continue_training:
# if there is a error in this line, please assign the cuurent_step yourself.
current_step = int(checkpoint_path_continue_training.split("-")[-1])
with tf.Graph().as_default():
tf.set_random_seed(SEED_)
loss_align_reg,vs_train_op,vs_eval_op,offset_pred,loss_reg=model.construct_model()
# Create a session for running Ops on the Graph.
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = GPU_MEM_FRACTION)
sess = tf.Session(config = tf.ConfigProto(gpu_options = gpu_options))
saver = tf.train.Saver(max_to_keep=1000)
merged=tf.summary.merge_all()
writer=tf.summary.FileWriter("./tf_summary/",sess.graph_def)
saver.restore(sess, checkpoint_path_continue_training)
print "Model restored from " + checkpoint_path, "----------------------------------\n"
for step in xrange(current_step, max_steps):
start_time = time.time()
feed_dict = model.fill_feed_dict_train_reg()
_,loss_value,offset_pred_v,loss_reg_v = sess.run([vs_train_op,loss_align_reg,offset_pred,loss_reg], feed_dict=feed_dict)
duration = time.time() - start_time
if step % 5 == 0:
# Print status to stdout.
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
#print loss_reg_v
#writer.add_summary(sum_str,step)
if (step+1) % TEST_SAVE_EVERY == 0:
save_path = saver.save(sess, save_checkpoint_parent_dir+_localtime+"/trained_model.ckpt", global_step=step+1)
print "Model saved to " + save_path, "----------------------------------\n"
print "Start to test:-----------------\n"
movie_length_dict={}
with open("./ref_info/charades_movie_length_info.txt") as f:
for l in f:
movie_length_dict[l.rstrip().split(" ")[0]]=float(l.rstrip().split(" ")[2])
do_eval_slidingclips(sess,vs_eval_op,model,movie_length_dict,step+1, test_result_output)
else:
current_step = 0
with tf.Graph().as_default():
tf.set_random_seed(SEED_)
loss_align_reg,vs_train_op,vs_eval_op,offset_pred,loss_reg=model.construct_model()
# Create a session for running Ops on the Graph.
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = GPU_MEM_FRACTION)
sess = tf.Session(config = tf.ConfigProto(gpu_options = gpu_options))
saver = tf.train.Saver(max_to_keep=1000)
merged=tf.summary.merge_all()
writer=tf.summary.FileWriter("./tf_summary/",sess.graph_def)
# Run the Op to initialize the variables.
init = tf.global_variables_initializer()
sess.run(init)
for step in xrange(current_step, max_steps):
start_time = time.time()
feed_dict = model.fill_feed_dict_train_reg()
_,loss_value,offset_pred_v,loss_reg_v = sess.run([vs_train_op,loss_align_reg,offset_pred,loss_reg], feed_dict=feed_dict)
duration = time.time() - start_time
if step % 5 == 0:
# Print status to stdout.
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
#print loss_reg_v
#writer.add_summary(sum_str,step)
if (step+1) % TEST_SAVE_EVERY == 0:
save_path = saver.save(sess, save_checkpoint_parent_dir+_localtime+"/trained_model.ckpt", global_step=step+1)
print "Model saved to " + save_path, "----------------------------------\n"
print "Start to test:-----------------\n"
movie_length_dict={}
with open("./ref_info/charades_movie_length_info.txt") as f:
for l in f:
movie_length_dict[l.rstrip().split(" ")[0]]=float(l.rstrip().split(" ")[2])
do_eval_slidingclips(sess,vs_eval_op,model,movie_length_dict,step+1, test_result_output)
def main(_):
run_training()
if __name__ == '__main__':
tf.app.run()
| 44.309633 | 294 | 0.624463 |
4a1aa856cd6943138123864f633a7a7f86ca304e
| 20,928 |
py
|
Python
|
built-in/TensorFlow/Benchmark/nlp/Nezha-large_for_TensorFlow/utils/create_squad_data.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 12 |
2020-12-13T08:34:24.000Z
|
2022-03-20T15:17:17.000Z
|
built-in/TensorFlow/Benchmark/nlp/Nezha-large_for_TensorFlow/utils/create_squad_data.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 1 |
2022-01-20T03:11:05.000Z
|
2022-01-20T06:53:39.000Z
|
built-in/TensorFlow/Benchmark/nlp/Nezha-large_for_TensorFlow/utils/create_squad_data.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 2 |
2021-07-10T12:40:46.000Z
|
2021-12-17T07:55:15.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import math
import os
import random
import modeling
import optimization
import tokenization
import six
import tensorflow as tf
import horovod.tensorflow as hvd
import time
flags = tf.flags
FLAGS = None
def extract_flags():
flags.DEFINE_integer(
"max_seq_length", 384,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_integer(
"doc_stride", 128,
"When splitting up a long document into chunks, how much stride to "
"take between chunks.")
flags.DEFINE_integer(
"max_query_length", 64,
"The maximum number of tokens for the question. Questions longer than "
"this will be truncated to this length.")
flags.DEFINE_bool(
"version_2_with_negative", False,
"If true, the SQuAD examples contain some that do not have an answer.")
flags.DEFINE_string("train_file", None,
"SQuAD json for training. E.g., train-v1.1.json")
flags.DEFINE_string(
"predict_file", None,
"SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
flags.DEFINE_string(
"squad_dir", None,
"The output directory where the model checkpoints will be written.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_bool(
"verbose_logging", False,
"If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
flags.mark_flag_as_required("train_file")
flags.mark_flag_as_required("predict_file")
flags.mark_flag_as_required("squad_dir")
flags.mark_flag_as_required("vocab_file")
return flags.FLAGS
class SquadExample(object):
"""A single training/test example for simple sequence classification.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=False):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (tokenization.printable_text(self.qas_id))
s += ", question_text: %s" % (
tokenization.printable_text(self.question_text))
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.start_position:
s += ", end_position: %d" % (self.end_position)
if self.start_position:
s += ", is_impossible: %r" % (self.is_impossible)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def read_squad_examples(input_file, is_training, version_2_with_negative=False):
"""Read a SQuAD json file into a list of SquadExample."""
with tf.gfile.Open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
is_impossible = False
if is_training:
if version_2_with_negative:
is_impossible = qa["is_impossible"]
if (len(qa["answers"]) != 1) and (not is_impossible):
raise ValueError(
"For training, each question should have exactly 1 answer.")
if not is_impossible:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
answer_offset = answer["answer_start"]
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length -
1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(
doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(
tokenization.whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
tf.logging.warning("Could not find answer: '%s' vs. '%s'",
actual_text, cleaned_answer_text)
continue
else:
start_position = -1
end_position = -1
orig_answer_text = ""
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
examples.append(example)
return examples
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training,
output_fn, verbose_logging=False):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
if is_training and not example.is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and example.is_impossible:
start_position = 0
end_position = 0
if verbose_logging and example_index < 20:
tf.logging.info("*** Example ***")
tf.logging.info("unique_id: %s" % (unique_id))
tf.logging.info("example_index: %s" % (example_index))
tf.logging.info("doc_span_index: %s" % (doc_span_index))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("token_to_orig_map: %s" % " ".join(
["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))
tf.logging.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context)
]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training and example.is_impossible:
tf.logging.info("impossible example")
if is_training and not example.is_impossible:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
tf.logging.info("start_position: %d" % (start_position))
tf.logging.info("end_position: %d" % (end_position))
tf.logging.info(
"answer: %s" % (tokenization.printable_text(answer_text)))
feature = InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
is_impossible=example.is_impossible)
# Run callback
output_fn(feature)
unique_id += 1
class FeatureWriter(object):
"""Writes InputFeature to TF example file."""
def __init__(self, filename, is_training):
self.filename = filename
self.is_training = is_training
self.num_features = 0
self._writer = tf.python_io.TFRecordWriter(filename)
def process_feature(self, feature):
"""Write a InputFeature to the TFRecordWriter as a tf.train.Example."""
self.num_features += 1
def create_int_feature(values):
feature = tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
return feature
features = collections.OrderedDict()
features["unique_ids"] = create_int_feature([feature.unique_id])
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
if self.is_training:
features["start_positions"] = create_int_feature([feature.start_position])
features["end_positions"] = create_int_feature([feature.end_position])
impossible = 0
if feature.is_impossible:
impossible = 1
features["is_impossible"] = create_int_feature([impossible])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
self._writer.write(tf_example.SerializeToString())
def close(self):
self._writer.close()
def main():
FLAGS = extract_flags()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tf.gfile.MakeDirs(FLAGS.squad_dir + "/final_tfrecords_sharded")
# We write to a temporary file to avoid storing very large constant tensors
# in memory.
train_examples = read_squad_examples(
input_file=FLAGS.train_file, is_training=True,
version_2_with_negative=FLAGS.version_2_with_negative)
train_writer = FeatureWriter(
filename=os.path.join(FLAGS.squad_dir, "final_tfrecords_sharded/train.tf_record"),
is_training=True)
convert_examples_to_features(
examples=train_examples,
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=True,
output_fn=train_writer.process_feature,
verbose_logging=FLAGS.verbose_logging)
train_writer.close()
eval_examples = read_squad_examples(
input_file=FLAGS.predict_file, is_training=False,
version_2_with_negative=FLAGS.version_2_with_negative)
eval_writer = FeatureWriter(
filename=os.path.join(FLAGS.squad_dir, "final_tfrecords_sharded/eval.tf_record"),
is_training=False)
eval_features = []
def append_feature(feature):
eval_features.append(feature)
eval_writer.process_feature(feature)
convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=False,
output_fn=append_feature,
verbose_logging=FLAGS.verbose_logging)
eval_writer.close()
if __name__ == "__main__":
main()
| 37.172291 | 90 | 0.661841 |
4a1aa8c2d60f22a8d64ea6280b0fa21dfefc93c2
| 18,803 |
py
|
Python
|
emodelrunner/GUI_utils/simulator.py
|
BlueBrain/EModelRunner
|
3d46e9ce20e76666288e84a300c329b46f0fa2c4
|
[
"ECL-2.0",
"Apache-2.0"
] | 3 |
2021-12-03T15:28:26.000Z
|
2022-02-01T11:44:29.000Z
|
emodelrunner/GUI_utils/simulator.py
|
BlueBrain/EModelRunner
|
3d46e9ce20e76666288e84a300c329b46f0fa2c4
|
[
"ECL-2.0",
"Apache-2.0"
] | 27 |
2021-12-03T09:16:29.000Z
|
2022-03-03T10:29:21.000Z
|
emodelrunner/GUI_utils/simulator.py
|
BlueBrain/EModelRunner
|
3d46e9ce20e76666288e84a300c329b46f0fa2c4
|
[
"ECL-2.0",
"Apache-2.0"
] | 4 |
2021-12-07T08:16:29.000Z
|
2022-02-22T17:55:58.000Z
|
"""Class containing simulation for the GUI."""
# Copyright 2020-2022 Blue Brain Project / EPFL
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import numpy as np
from bluepyopt import ephys
from emodelrunner.recordings import RecordingCustom
from emodelrunner.cell import CellModelCustom
from emodelrunner.synapses.stimuli import NrnNetStimStimulusCustom
from emodelrunner.load import (
load_config,
load_syn_mechs,
load_unoptimized_parameters,
load_mechanisms,
get_morph_args,
get_release_params,
)
from emodelrunner.morphology import create_morphology
from emodelrunner.synapses.create_locations import get_syn_locs
def section_coordinate_3d(sec, seg_pos):
"""Returns the 3d coordinates of a point in a section.
Args:
sec: neuron section
seg_pos (float): postion of the segment os the section
(should be between 0 and 1)
Returns:
list: 3d coordinates of a point in a section, or None if not found
"""
n3d = sec.n3d()
arc3d = [sec.arc3d(i) for i in range(n3d)]
x3d = [sec.x3d(i) for i in range(n3d)]
y3d = [sec.y3d(i) for i in range(n3d)]
z3d = [sec.z3d(i) for i in range(n3d)]
if not arc3d or seg_pos < 0 or seg_pos > 1:
return None
seg_pos = seg_pos * arc3d[-1]
if seg_pos in arc3d:
idx = arc3d.index(seg_pos)
local_x = x3d[idx]
local_y = y3d[idx]
local_z = z3d[idx]
return [local_x, local_y, local_z]
else:
for i, arc in enumerate(arc3d[1:]):
if arc > seg_pos > arc3d[i - 1] and arc - arc3d[i - 1] != 0:
proportion = (seg_pos - arc3d[i - 1]) / (arc - arc3d[i - 1])
local_x = x3d[i - 1] + proportion * (x3d[i] - x3d[i - 1])
local_y = y3d[i - 1] + proportion * (y3d[i] - y3d[i - 1])
local_z = z3d[i - 1] + proportion * (z3d[i] - z3d[i - 1])
return [local_x, local_y, local_z]
return None
def get_pos_and_color(sec, seg_pos, syn_type):
"""Returns the position and the synaptic type (0 for inhib. or 1 for excit.).
Args:
sec: neuron section
seg_pos (float): postion of the segment os the section
(should be between 0 and 1)
syn_type (int): synaptic type. excitatory if >100,
inhibitory if <100
Returns:
list: first 3 numbers are the position, fourth is the synaptic type, None if pos not found
"""
pos = section_coordinate_3d(sec, seg_pos)
if pos is None:
return None
if syn_type < 100:
syn_type_ = 0
else:
syn_type_ = 1
pos.append(syn_type_)
return pos
def get_step_data(steps, step, default_step):
"""Extract step data from StepProtocol json dict and add amplitude to a step list.
Args:
steps (list): list of step amplitudes (nA) to be updated
step (dict or list of dicts): step from which to extract step data
default_step (float): default value for the custom step entry (nA)
Returns:
a tuple containing
- float: total duration of the step (ms)
- float: delay of the step (ms)
- float: duration of the step (ms)
"""
# can be dict or list of dicts
if isinstance(step, list):
for step_ in step:
# If a default step amplitude is registered,
# two buttons will have the same value:
# the registered one and the custom one
if step_["amp"] != default_step:
steps.append(step_["amp"])
# replace default values
# may be replaced several times
total_duration = step_["totduration"]
step_delay = step_["delay"]
step_duration = step_["duration"]
else:
if step["amp"] != default_step:
steps.append(step["amp"])
total_duration = step["totduration"]
step_delay = step["delay"]
step_duration = step["duration"]
return total_duration, step_delay, step_duration
def get_holding_data(holdings, stimulus_data, total_duration, default_holding):
"""Extract holding data from StepProtocol json dict and add amplitude to a holding list.
Args:
holdings (list): list of holding amplitudes (nA) to be updated
stimulus_data (dict): stimulus dict from protocol json file containing holding data
total_duration (float): total duration of the step (ms)
default_holding (float): default value for the custom holding entry
Returns:
a tuple containing
- float: delay of the holding stimulus (ms)
- float: duration of the holding stimulus (ms)
"""
if "holding" in stimulus_data:
holding = stimulus_data["holding"]
# amp can be None in e.g. Rin recipe protocol
if holding["amp"] is not None and holding["amp"] != default_holding:
holdings.append(holding["amp"])
hold_step_delay = holding["delay"]
hold_step_duration = holding["duration"]
else:
hold_step_delay = 0.0
hold_step_duration = total_duration
return hold_step_delay, hold_step_duration
class NeuronSimulation:
"""Class containing BPO cell, simulation & protocol.
Attributes:
config (dict): dictionary containing configuration data
cell_path (str): path to cell repo. should be "."
total_duration (float): duration of cell simulation (ms)
steps (list of floats): default step stimuli (nA)
hypamps (list of floats): default holding stimuli (nA)
step_stim (float): selected step stimulus (nA)
hypamp (float): selected holding stimulus (nA)
step_delay (float): delay before applying step stimulus (ms)
step_duration (float): duration of step stimulus (ms)
hold_step_delay (float): delay of holding stimulus (ms)
hold_step_duration (float): duration of holding stimulus (ms)
available_pre_mtypes (dict): all synapses pre_mtypes
{mtypeidx: mtype_name, ...}
pre_mtypes (list of int): selected pre_mtypes to run
[mtypeidx, ...]
netstim_params (dict): netstim parameters for synapses of each mtype
{mtypeidx:[start, interval, number, noise]}
syn_start (int): default time (ms) at which the synapse starts firing
syn_interval (int): default interval (ms) between two synapse firing
syn_nmb_of_spikes (int): default number of synapse firing
syn_noise (int): default synapse noise
protocol (ephys.protocols.SweepProtocol): BluePyOpt-based Protocol
cell (CellModelCustom): BluePyOpt-based cell
release_params (dict): optimised cell parameters to fill in
the cell's free parameters
sim (ephys.simulators.NrnSimulator): BluePyOpt simulator
can access neuron data from it
syn_display_data (dict): synapse data (position and type) for display
syn_display_data[pre_mtype] = [x,y,z,type],
type=0 if inhib, type=1 if excit
"""
def __init__(self, config_path="config/config_allsteps.ini"):
"""Constructor. Load default params from config file.
Args:
config_path (str):path to the config file
"""
# load config file
self.config = load_config(config_path=config_path)
self.cell_path = self.config.get("Paths", "memodel_dir")
# get default params
self.load_protocol_params()
self.load_synapse_params()
# uninstantiated params
self.protocol = None
self.cell = None
self.release_params = None
self.sim = None
self.syn_display_data = None
def load_protocol_params(
self,
total_duration=3000,
step_delay=700,
step_duration=2000,
hold_step_delay=0,
hold_step_duration=3000,
default_step=0,
default_holding=0,
):
"""Load default protocol params.
Args:
total_duration (float): default value for duration of cell simulation (ms)
if no StepProtocol is found
step_delay (float): default value for delay before applying step stimulus (ms)
if no StepProtocol is found
step_duration (float): default value for duration of step stimulus (ms)
if no StepProtocol is found
hold_step_delay (float): default value for delay of holding stimulus (ms)
if no StepProtocol is found
hold_step_duration (float): default value for duration of holding stimulus (ms)
if no StepProtocol is found
default_step (float): default value for custom step amplitude (nA)
default_holding (float): default value for custom holding amplitude (nA)
"""
prot_path = self.config.get("Paths", "prot_path")
with open(prot_path, "r", encoding="utf-8") as protocol_file:
protocol_data = json.load(protocol_file)
if "__comment" in protocol_data:
del protocol_data["__comment"]
# list of all steps and hold amps found in all stepprotocols in prot file
steps = []
holdings = []
for prot_data in protocol_data.values():
# update default delays / durations and update steps and holdings lists
if prot_data["type"] == "StepProtocol":
total_duration, step_delay, step_duration = get_step_data(
steps=steps,
step=prot_data["stimuli"]["step"],
default_step=default_step,
)
hold_step_delay, hold_step_duration = get_holding_data(
holdings, prot_data["stimuli"], total_duration, default_holding
)
self.total_duration = total_duration
# filter duplicates (dict preserves order for py37+)
self.steps = list(dict.fromkeys(steps))
self.hypamps = list(dict.fromkeys(holdings))
# set default values for custom entry
self.step_stim = default_step
self.hypamp = default_holding
self.step_delay = step_delay
self.step_duration = step_duration
self.hold_step_delay = hold_step_delay
self.hold_step_duration = hold_step_duration
def load_synapse_params(
self, syn_start=0, syn_interval=0, syn_nmb_of_spikes=0, syn_noise=0
):
"""Load default synapse params.
Args:
syn_start (int): default time (ms) at which the synapse starts firing
syn_interval (int): default interval (ms) between two synapse firing
syn_nmb_of_spikes (int): default number of synapse firing
syn_noise (int): default synapse noise
"""
# mtypes to be chosen from {mtypeidx: mtype_name, ...}
self.available_pre_mtypes = self.load_available_pre_mtypes()
# mtypes to be loaded [mtypeidx, ...]
self.pre_mtypes = []
# synapse netstim param depending on mtype {mtypeidx:[start, interval, number, noise]}
self.netstim_params = {}
# default synapse params
self.syn_start = syn_start
self.syn_interval = syn_interval
self.syn_nmb_of_spikes = syn_nmb_of_spikes
self.syn_noise = syn_noise
def load_available_pre_mtypes(self):
"""Load the list of pre mtype cells to which are connected the synapses.
Returns:
dict: mtypes of cells connected to the synapses
"""
mtype_path = os.path.join(
self.config.get("Paths", "syn_dir"),
self.config.get("Paths", "syn_mtype_map"),
)
with open(mtype_path, "r", encoding="utf-8") as mtype_file:
raw_mtypes = mtype_file.readlines()
# mtypes[id] = m-type name
mtypes = {}
for line in raw_mtypes:
line = line.rstrip().split()
if line:
mtypes[int(line[0])] = line[1]
return mtypes
def get_syn_stim(self):
"""Create syanpse stimuli.
Returns:
emodelrunner.synapses.stimuli.NrnNetStimStimulusCustom: synapse stimuli
"""
if self.pre_mtypes:
syn_locs = get_syn_locs(self.cell)
syn_total_duration = self.total_duration
return NrnNetStimStimulusCustom(syn_locs, syn_total_duration)
return None
def load_protocol(self, protocol_name="protocol"):
"""Load BPO protocol.
Args:
protocol_name (str): protocol name to use in BluePyOpt classes.
Does not have an effect on the simulation
"""
syn_stim = self.get_syn_stim()
soma_loc = ephys.locations.NrnSeclistCompLocation(
name="soma", seclist_name="somatic", sec_index=0, comp_x=0.5
)
rec = RecordingCustom(name=protocol_name, location=soma_loc, variable="v")
# create step stimulus
stim = ephys.stimuli.NrnSquarePulse(
step_amplitude=self.step_stim,
step_delay=self.step_delay,
step_duration=self.step_duration,
location=soma_loc,
total_duration=self.total_duration,
)
# create holding stimulus
hold_stim = ephys.stimuli.NrnSquarePulse(
step_amplitude=self.hypamp,
step_delay=self.hold_step_delay,
step_duration=self.hold_step_duration,
location=soma_loc,
total_duration=self.total_duration,
)
# create protocol
stims = [stim, hold_stim]
if syn_stim is not None:
stims.append(syn_stim)
self.protocol = ephys.protocols.SweepProtocol(
protocol_name, stims, [rec], False
)
def create_cell_custom(self):
"""Create a cell.
Returns:
emodelrunner.cell.CellModelCustom: cell model
"""
# pylint: disable=too-many-locals
emodel = self.config.get("Cell", "emodel")
gid = self.config.getint("Cell", "gid")
# load mechanisms
unopt_params_path = self.config.get("Paths", "unoptimized_params_path")
mechs = load_mechanisms(unopt_params_path)
# add synapses mechs
seed = self.config.getint("Synapses", "seed")
rng_settings_mode = self.config.get("Synapses", "rng_settings_mode")
syn_data_path = os.path.join(
self.config.get("Paths", "syn_dir"),
self.config.get("Paths", "syn_data_file"),
)
syn_conf_path = os.path.join(
self.config.get("Paths", "syn_dir"),
self.config.get("Paths", "syn_conf_file"),
)
# always load synapse data for synapse display.
# -> do not need to reload syn data each time user toggles synapse checkbox
mechs += [
load_syn_mechs(
seed,
rng_settings_mode,
syn_data_path,
syn_conf_path,
self.pre_mtypes,
self.netstim_params,
)
]
# load parameters
params = load_unoptimized_parameters(
unopt_params_path,
v_init=self.config.getfloat("Cell", "v_init"),
celsius=self.config.getfloat("Cell", "celsius"),
)
# load morphology
morph_config = get_morph_args(self.config)
morph = create_morphology(morph_config, self.config.package_type)
# create cell
cell = CellModelCustom(
name=emodel,
morph=morph,
mechs=mechs,
params=params,
gid=gid,
)
return cell
def load_cell_sim(self):
"""Load BPO cell & simulation."""
self.cell = self.create_cell_custom()
self.release_params = get_release_params(self.config)
self.sim = ephys.simulators.NrnSimulator(
dt=self.config.getfloat("Sim", "dt"), cvode_active=False
)
def load_synapse_display_data(self):
"""Load dict containing x,y,z of each synapse & inhib/excit."""
# self.syn_display_data[pre_mtype] = [x,y,z,type], type=0 if inhib, type=1 if excit
self.syn_display_data = {}
for key in self.available_pre_mtypes:
self.syn_display_data[key] = []
for mech in self.cell.mechanisms:
if hasattr(mech, "pprocesses"):
for syn in mech.synapses_data:
pre_mtype = syn["pre_mtype"]
seg_pos = syn["seg_x"]
# check if a synapse of the same mtype has already the same position
# and add synapse only if a new position has to be displayed
syn_section = mech.get_cell_section_for_synapse(
syn, self.cell.icell
)
syn_display_data = get_pos_and_color(
syn_section, seg_pos, syn["synapse_type"]
)
if (
syn_display_data is not None
and syn_display_data not in self.syn_display_data[pre_mtype]
):
self.syn_display_data[pre_mtype].append(syn_display_data)
def instantiate(self):
"""Instantiate cell, simulation & protocol."""
self.cell.freeze(self.release_params)
self.cell.instantiate(sim=self.sim)
self.protocol.instantiate(sim=self.sim, icell=self.cell.icell)
self.sim.neuron.h.tstop = self.protocol.total_duration
self.sim.neuron.h.stdinit()
def destroy(self):
"""Destroy cell & protocol."""
self.protocol.destroy(sim=self.sim)
self.cell.destroy(sim=self.sim)
self.cell.unfreeze(self.release_params.keys())
def get_voltage(self):
"""Returns voltage response.
Returns:
a tuple containing
- ndarray: the time response
- ndarray: the voltage response
"""
responses = {
recording.name: recording.response for recording in self.protocol.recordings
}
key = list(responses.keys())[0]
resp = responses[key]
return np.array(resp["time"]), np.array(resp["voltage"])
| 36.090211 | 98 | 0.615008 |
4a1aa9563c7f3cd2433d12e4fa01e3158f23ead2
| 7,979 |
py
|
Python
|
serious_django_services/__init__.py
|
serioeseGmbH/serious-django-services
|
001d73bc9e85e0ae1c3acdbcb30ca911dcb1a093
|
[
"MIT"
] | 4 |
2019-02-20T09:32:45.000Z
|
2021-07-25T19:05:40.000Z
|
serious_django_services/__init__.py
|
serioeseGmbH/serious-django-services
|
001d73bc9e85e0ae1c3acdbcb30ca911dcb1a093
|
[
"MIT"
] | 5 |
2018-12-22T10:51:15.000Z
|
2019-08-21T13:25:49.000Z
|
serious_django_services/__init__.py
|
serioeseGmbH/serious-django-services
|
001d73bc9e85e0ae1c3acdbcb30ca911dcb1a093
|
[
"MIT"
] | null | null | null |
from abc import ABC, ABCMeta
import collections
from django.contrib.auth import get_user_model
from django.core.exceptions import ImproperlyConfigured, PermissionDenied, \
ValidationError
from django.forms.models import model_to_dict
from django.utils.translation import gettext as _
class ServiceMetaclass(ABCMeta):
def __new__(mcls, name, *args, **kwargs):
cls = super(ServiceMetaclass, mcls).__new__(mcls, name, *args, **kwargs)
if cls.__base__ == ABC:
return cls
if not name.endswith('Service'):
raise ImproperlyConfigured(
"A Service subclass's name must end with 'Service'."
)
svc_excs = getattr(cls, 'service_exceptions')
if svc_excs is None or\
not isinstance(svc_excs, tuple) or\
not all(type(c) is type and issubclass(c, Exception) for c in svc_excs):
raise ImproperlyConfigured(
"Defined a service without an `service_exceptions` property. "
"Define a `service_exceptions` property which should be a tuple "
"of subclasses of Exception, and enumerates all service-specific "
"exceptions that a service can throw."
)
cls.exceptions = tuple(cls.service_exceptions or []) +\
tuple(cls.base_exceptions or [])
return cls
class Service(ABC, metaclass=ServiceMetaclass):
base_exceptions = (PermissionDenied,)
@classmethod
def require_permissions(cls, user, permissions, obj=None):
"""
Checks if:
1. the given user is signed in (i.e. not None and not anonymous)
2. the user has a certain set of permissions
and raises a PermissionDenied exception otherwise.
:param user: The user to check
:param permissions: One permission or a list of permissions that the
user is expected to have. Should all have the Permission type
defined in this file.
:param obj: If obj is passed in, this method won’t check for permissions for the model,
but for the specific object. (Only if you use an external library like django-guardian)
"""
cls.require_signed_in(user)
if not isinstance(permissions, collections.Iterable):
permissions = [permissions]
for permission in permissions:
if not user.has_perm(permission, obj=obj):
raise PermissionDenied(
_("You do not have permission '{perm}'.").format(
perm=str(permission)
)
)
@classmethod
def require_signed_in(cls, user):
"""
Checks if the given user is signed in, and raises a PermissionDenied
exception otherwise.
:param user: The user to check
"""
if not isinstance(user, get_user_model()) or user.is_anonymous:
raise PermissionDenied(_("You are not logged in."))
class CRUDMixinMetaclass(ServiceMetaclass):
@classmethod
def __new__(mcls, name, *args, **kwargs):
cls = super().__new__(name, *args, **kwargs)
if cls.__base__ == ABC:
return cls
mcls.check_required_config_params(cls)
return cls
@staticmethod
def check_required_config_params(cls):
"""
Checks if the config parameters for the mixin have been set.
:raise ImproperlyConfigured: If at least one of the required_params is None
:return: None
"""
required_params = ['model',
'create_form',
'update_form']
for config_param in required_params:
config_param_value = getattr(cls, config_param, None)
if config_param_value is None:
raise ImproperlyConfigured(f'{config_param} has to be set in'
' the class using the Mixin!')
class CRUDMixin(ABC, metaclass=CRUDMixinMetaclass):
"""
A Mixin to provide CRUD operations.
You must define the config parameters on the class using the Mixin:
model: The model you want to perform the operations on
create_form: A ModelForm to create a new model instance
update_form: A ModelForm to update a model instance.
Usage: Implement the operation in your service class and call the corresponding
_operation from this Mixin.
Eg.:
```
class UserProfileService(Service, CRUDMixin):
model = UserProfile
create_form = CreateUserProfileForm
update_form = UpdateUserProfileForm
@classmethod
def create(cls, name: str, profile_picture: ImageFile):
data = {'name': name}
file_data = {'profile_picture': profile_picture}
return cls._create(data, file_data)
```
"""
@classmethod
def _create(cls, data: dict, file_data: dict = None):
"""
Create an instance of cls.model and save it to the db.
:param data: Data of the instance to be created
:param file_data: File data of the instance to be created
:raise ValidationError: If the form validations fails
:return: The newly created instance of cls.model
"""
data = {k: v for k, v in data.items() if v != NotPassed}
file_data = file_data or {}
file_data = {k: v for k, v in file_data.items() if v != NotPassed}
bound_form = cls.create_form(data, file_data)
if bound_form.is_valid():
return bound_form.save()
else:
raise ValidationError(bound_form.errors)
@classmethod
def _retrieve(cls, id: int):
"""
Retrieve one single instance of cls.model from the db.
:param id: ID of the instance to be retrieved
:raise ObjectDoesNotExist: If an instance corresponding with id does not exist
:raise ValueError: If id is not int
:return: The instance of cls.model
"""
if not isinstance(id, int):
raise ValueError("id must be int")
return cls.model.objects.get(id=id)
@classmethod
def _update(cls, id: int, data: dict, file_data: dict = None):
"""
Update an instance of cls.model and save it to the db.
:param id: ID of the instance to be updated
:param data: Data to update on the instance
:param file_data: File data to update on the instance
:raise ValidationError: If the form validations fails
:raise ObjectDoesNotExist: If an instance corresponding with id does not exist
:raise TypeError: If no id is passed
:return: The updated instance of cls.model
"""
data = {k: v for k, v in data.items() if v != NotPassed}
file_data = file_data or {}
file_data = {k: v for k, v in file_data.items() if v != NotPassed}
model_instance_to_be_updated = cls.model.objects.get(id=id)
updated_model_data = {**model_to_dict(model_instance_to_be_updated), **data}
updated_model_file_data = {**model_to_dict(model_instance_to_be_updated), **(file_data if file_data else {})}
bound_form = cls.update_form(updated_model_data, updated_model_file_data, instance=model_instance_to_be_updated)
if bound_form.is_valid():
return bound_form.save()
else:
raise ValidationError(bound_form.errors)
@classmethod
def _delete(cls, id: int):
"""
Delete an instance of cls.model from the db.
:param id: ID of the instance to be deleted
:raise ObjectDoesNotExist: If an instance corresponding with id does not exist
:return True: If the instance was successfully deleted
"""
model_instance_to_be_deleted = cls.model.objects.get(id=id)
model_instance_to_be_deleted.delete()
return True
class NotPassed:
"""
A default value for named args not being passed to the function.
"""
pass
| 35.30531 | 120 | 0.633287 |
4a1aa992ef879cfca1896a9b185e5c805eb1a074
| 20,185 |
py
|
Python
|
vCenterShell/commands/command_orchestrator.py
|
doppleware/vCenterShell_test
|
c91870169c5780e5c561b2ae682991af20257c4f
|
[
"Apache-2.0"
] | null | null | null |
vCenterShell/commands/command_orchestrator.py
|
doppleware/vCenterShell_test
|
c91870169c5780e5c561b2ae682991af20257c4f
|
[
"Apache-2.0"
] | null | null | null |
vCenterShell/commands/command_orchestrator.py
|
doppleware/vCenterShell_test
|
c91870169c5780e5c561b2ae682991af20257c4f
|
[
"Apache-2.0"
] | null | null | null |
from logging import getLogger
import jsonpickle
import time
from pyVim.connect import SmartConnect, Disconnect
from common.cloud_shell.driver_helper import CloudshellDriverHelper
from common.cloud_shell.resource_remover import CloudshellResourceRemover
from common.model_factory import ResourceModelParser
from common.utilites.command_result import set_command_result
from common.utilites.common_name import generate_unique_name
from common.vcenter.ovf_service import OvfImageDeployerService
from common.vcenter.task_waiter import SynchronousTaskWaiter
from common.vcenter.vmomi_service import pyVmomiService
from common.wrappers.command_wrapper import CommandWrapper
from models.DeployDataHolder import DeployDataHolder
from models.DriverResponse import DriverResponse, DriverResponseRoot
from models.GenericDeployedAppResourceModel import GenericDeployedAppResourceModel
from models.VMwarevCenterResourceModel import VMwarevCenterResourceModel
from vCenterShell.commands.connect_dvswitch import VirtualSwitchConnectCommand
from vCenterShell.commands.connect_orchestrator import ConnectionCommandOrchestrator
from vCenterShell.commands.deploy_vm import DeployCommand
from vCenterShell.commands.destroy_vm import DestroyVirtualMachineCommand
from vCenterShell.commands.disconnect_dvswitch import VirtualSwitchToMachineDisconnectCommand
from vCenterShell.commands.power_manager_vm import VirtualMachinePowerManagementCommand
from vCenterShell.commands.refresh_ip import RefreshIpCommand
from vCenterShell.network.dvswitch.creator import DvPortGroupCreator
from vCenterShell.network.dvswitch.name_generator import DvPortGroupNameGenerator
from vCenterShell.network.vlan.factory import VlanSpecFactory
from vCenterShell.network.vlan.range_parser import VLanIdRangeParser
from vCenterShell.network.vnic.vnic_service import VNicService
from vCenterShell.vm.deploy import VirtualMachineDeployer
from vCenterShell.vm.dvswitch_connector import VirtualSwitchToMachineConnector
from vCenterShell.vm.portgroup_configurer import VirtualMachinePortGroupConfigurer
from vCenterShell.vm.vnic_to_network_mapper import VnicToNetworkMapper
class CommandOrchestrator(object):
def __init__(self, context):
"""
Initialize the driver session, this function is called everytime a new instance of the driver is created
in here the driver is going to be bootstrapped
:param context: models.QualiDriverModels.InitCommandContext
"""
self.cs_helper = CloudshellDriverHelper()
pv_service = pyVmomiService(SmartConnect, Disconnect)
synchronous_task_waiter = SynchronousTaskWaiter()
self.resource_model_parser = ResourceModelParser()
port_group_name_generator = DvPortGroupNameGenerator()
self.vc_data_model = self.resource_model_parser.convert_to_resource_model(context.resource,
VMwarevCenterResourceModel)
vnic_to_network_mapper = VnicToNetworkMapper(quali_name_generator=port_group_name_generator)
resource_remover = CloudshellResourceRemover()
ovf_service = OvfImageDeployerService(self.vc_data_model.ovf_tool_path, getLogger('OvfImageDeployerService'))
vm_deployer = VirtualMachineDeployer(pv_service=pv_service,
name_generator=generate_unique_name,
ovf_service=ovf_service)
dv_port_group_creator = DvPortGroupCreator(pyvmomi_service=pv_service,
synchronous_task_waiter=synchronous_task_waiter)
virtual_machine_port_group_configurer = \
VirtualMachinePortGroupConfigurer(pyvmomi_service=pv_service,
synchronous_task_waiter=synchronous_task_waiter,
vnic_to_network_mapper=vnic_to_network_mapper,
vnic_service=VNicService())
virtual_switch_to_machine_connector = VirtualSwitchToMachineConnector(dv_port_group_creator,
virtual_machine_port_group_configurer)
# Command Wrapper
self.command_wrapper = CommandWrapper(logger=getLogger, pv_service=pv_service)
# Deploy Command
self.deploy_command = DeployCommand(deployer=vm_deployer)
# Virtual Switch Revoke
self.virtual_switch_disconnect_command = \
VirtualSwitchToMachineDisconnectCommand(
pyvmomi_service=pv_service,
port_group_configurer=virtual_machine_port_group_configurer,
default_network=self.vc_data_model.holding_network)
# Virtual Switch Connect
virtual_switch_connect_command = \
VirtualSwitchConnectCommand(
pv_service=pv_service,
virtual_switch_to_machine_connector=virtual_switch_to_machine_connector,
dv_port_group_name_generator=DvPortGroupNameGenerator(),
vlan_spec_factory=VlanSpecFactory(),
vlan_id_range_parser=VLanIdRangeParser(),
logger=getLogger('VirtualSwitchConnectCommand'))
self.connection_orchestrator = ConnectionCommandOrchestrator(self.vc_data_model,
virtual_switch_connect_command,
self.virtual_switch_disconnect_command)
# Destroy VM Command
self.destroy_virtual_machine_command = \
DestroyVirtualMachineCommand(pv_service=pv_service,
resource_remover=resource_remover,
disconnector=self.virtual_switch_disconnect_command)
# Power Command
self.vm_power_management_command = \
VirtualMachinePowerManagementCommand(pyvmomi_service=pv_service,
synchronous_task_waiter=synchronous_task_waiter)
# Refresh IP command
self.refresh_ip_command = RefreshIpCommand(pyvmomi_service=pv_service,
resource_model_parser=ResourceModelParser())
def connect_bulk(self, context, request):
session = self.cs_helper.get_session(context.connectivity.server_address, context.connectivity.admin_auth_token,
context.reservation.domain)
connection_details = self.cs_helper.get_connection_details(session, self.vc_data_model, context.resource)
results = self.command_wrapper.execute_command_with_connection(connection_details,
self.connection_orchestrator.connect_bulk,
request)
driver_response = DriverResponse()
driver_response.actionResults = results
driver_response_root = DriverResponseRoot()
driver_response_root.driverResponse = driver_response
return set_command_result(result=driver_response_root, unpicklable=False)
def deploy_from_template(self, context, deploy_data):
"""
Deploy From Template Command, will deploy vm from template
:param models.QualiDriverModels.ResourceCommandContext context: the context of the command
:param str deploy_data: represent a json of the parameters, example: {
"template_model": {
"vCenter_resource_name": "QualiSB",
"vm_folder": "QualiSB/Raz",
"template_name": "2"
},
"vm_cluster_model": {
"cluster_name": "QualiSB Cluster",
"resource_pool": "IT"
},
"datastore_name": "eric ds cluster",
"power_on": False
}
:return str deploy results
"""
# get connection details
session = self.cs_helper.get_session(context.connectivity.server_address, context.connectivity.admin_auth_token,
context.reservation.domain)
connection_details = self.cs_helper.get_connection_details(session, self.vc_data_model,
context.resource)
# get command parameters from the environment
data = jsonpickle.decode(deploy_data)
data_holder = DeployDataHolder(data)
# execute command
result = self.command_wrapper.execute_command_with_connection(
connection_details,
self.deploy_command.execute_deploy_from_template,
data_holder)
return set_command_result(result=result, unpicklable=False)
def deploy_from_image(self, context, deploy_data):
"""
Deploy From Image Command, will deploy vm from ovf image
:param models.QualiDriverModels.ResourceCommandContext context: the context of the command
:param str deploy_data: represent a json of the parameters, example: {
"image_url": "c:\image.ovf" or
"\\nas\shared\image.ovf" or
"http://192.168.65.88/ovf/Debian%2064%20-%20Yoav.ovf",
"cluster_name": "QualiSB Cluster",
"resource_pool": "LiverPool",
"datastore_name": "eric ds cluster",
"datacenter_name": "QualiSB"
"power_on": False
"app_name": "appName"
"user_arguments": ["--compress=9", " --schemaValidate", "--etc"]
}
:return str deploy results
"""
session = self.cs_helper.get_session(context.connectivity.server_address,
context.connectivity.admin_auth_token,
context.reservation.domain)
connection_details = self.cs_helper.get_connection_details(session, self.vc_data_model, context.resource)
# get command parameters from the environment
data = jsonpickle.decode(deploy_data)
data_holder = DeployDataHolder(data)
# execute command
result = self.command_wrapper.execute_command_with_connection(
connection_details,
self.deploy_command.execute_deploy_from_image,
data_holder,
connection_details)
return set_command_result(result=result, unpicklable=False)
# remote command
def disconnect_all(self, context, ports):
"""
Disconnect All Command, will the assign all the vnics on the vm to the default network,
which is sign to be disconnected
:param models.QualiDriverModels.ResourceRemoteCommandContext context: the context the command runs on
:param list[string] ports: the ports of the connection between the remote resource and the local resource, NOT IN USE!!!
"""
# get connection details
session = self.cs_helper.get_session(context.connectivity.server_address,
context.connectivity.admin_auth_token,
context.remote_reservation.domain)
connection_details = self.cs_helper.get_connection_details(session, self.vc_data_model, context.resource)
resource_details = self._parse_remote_model(context)
# execute command
res = self.command_wrapper.execute_command_with_connection(
connection_details,
self.virtual_switch_disconnect_command.disconnect_all,
resource_details.vm_uuid)
return set_command_result(result=res, unpicklable=False)
# remote command
def disconnect(self, context, ports, network_name):
"""
Disconnect Command, will disconnect the a specific network that is assign to the vm,
the command will assign the default network for all the vnics that is assigned to the given network
:param models.QualiDriverModels.ResourceRemoteCommandContext context: the context the command runs on
:param str network_name: the name of the network to disconnect from
:param list[string] ports: the ports of the connection between the remote resource and the local resource, NOT IN USE!!!
"""
# get connection details
session = self.cs_helper.get_session(context.connectivity.server_address,
context.connectivity.admin_auth_token,
context.remote_reservation.domain)
connection_details = self.cs_helper.get_connection_details(session, self.vc_data_model, context.resource)
resource_details = self._parse_remote_model(context)
# execute command
res = self.command_wrapper.execute_command_with_connection(
connection_details,
self.virtual_switch_disconnect_command.disconnect,
resource_details.vm_uuid,
network_name)
return set_command_result(result=res, unpicklable=False)
# remote command
def destroy_vm(self, context, ports):
"""
Destroy Vm Command, will destroy the vm and remove the resource
:param models.QualiDriverModels.ResourceRemoteCommandContext context: the context the command runs on
:param list[string] ports: the ports of the connection between the remote resource and the local resource, NOT IN USE!!!
"""
# get connection details
session = self.cs_helper.get_session(context.connectivity.server_address, context.connectivity.admin_auth_token,
context.remote_reservation.domain)
connection_details = self.cs_helper.get_connection_details(session, self.vc_data_model, context.resource)
resource_details = self._parse_remote_model(context)
# execute command
res = self.command_wrapper.execute_command_with_connection(
connection_details,
self.destroy_virtual_machine_command.destroy,
session,
resource_details.vm_uuid,
resource_details.fullname)
return set_command_result(result=res, unpicklable=False)
# remote command
def refresh_ip(self, context, ports):
"""
Refresh IP Command, will refresh the ip of the vm and will update it on the resource
:param models.QualiDriverModels.ResourceRemoteCommandContext context: the context the command runs on
:param list[string] ports: the ports of the connection between the remote resource and the local resource, NOT IN USE!!!
"""
# get connection details
session = self.cs_helper.get_session(context.connectivity.server_address,
context.connectivity.admin_auth_token,
context.remote_reservation.domain)
connection_details = self.cs_helper.get_connection_details(session, self.vc_data_model, context.resource)
resource_details = self._parse_remote_model(context)
# execute command
res = self.command_wrapper.execute_command_with_connection(connection_details,
self.refresh_ip_command.refresh_ip,
session,
resource_details.vm_uuid,
resource_details.fullname,
self.vc_data_model.holding_network)
return set_command_result(result=res, unpicklable=False)
# remote command
def power_off(self, context, ports):
"""
Powers off the remote vm
:param models.QualiDriverModels.ResourceRemoteCommandContext context: the context the command runs on
:param list[string] ports: the ports of the connection between the remote resource and the local resource, NOT IN USE!!!
"""
return self._power_command(context, ports, self.vm_power_management_command.power_off)
# remote command
def power_on(self, context, ports):
"""
Powers on the remote vm
:param models.QualiDriverModels.ResourceRemoteCommandContext context: the context the command runs on
:param list[string] ports: the ports of the connection between the remote resource and the local resource, NOT IN USE!!!
"""
return self._power_command(context, ports, self.vm_power_management_command.power_on)
# remote command
def power_cycle(self, context, ports, delay):
"""
preforms a restart to the vm
:param models.QualiDriverModels.ResourceRemoteCommandContext context: the context the command runs on
:param list[string] ports: the ports of the connection between the remote resource and the local resource, NOT IN USE!!!
:param number delay: the time to wait between the power on and off
"""
self.power_off(context, ports)
time.sleep(float(delay))
return self.power_on(context, ports)
def _power_command(self, context, ports, command):
# get connection details
session = self.cs_helper.get_session(context.connectivity.server_address,
context.connectivity.admin_auth_token,
context.remote_reservation.domain)
connection_details = self.cs_helper.get_connection_details(session, self.vc_data_model, context.resource)
resource_details = self._parse_remote_model(context)
# execute command
res = self.command_wrapper.execute_command_with_connection(connection_details,
command,
session,
resource_details.vm_uuid,
resource_details.fullname)
return set_command_result(result=res, unpicklable=False)
def _parse_remote_model(self, context):
"""
parse the remote resource model and adds its full name
:type context: models.QualiDriverModels.ResourceRemoteCommandContext
"""
if not context.remote_endpoints:
raise Exception('no remote resources found in context: {0}', jsonpickle.encode(context, unpicklable=False))
resource = context.remote_endpoints[0]
dictionary = jsonpickle.decode(resource.app_context.deployed_app_json)
holder = DeployDataHolder(dictionary)
app_resource_detail = GenericDeployedAppResourceModel()
app_resource_detail.vm_uuid = holder.vmdetails.uid
app_resource_detail.cloud_provider = context.resource.fullname
app_resource_detail.fullname = resource.fullname
return app_resource_detail
def power_on_not_roemote(self, context, vm_uuid, resource_fullname):
# get connection details
session = self.cs_helper.get_session(context.connectivity.server_address, context.connectivity.admin_auth_token,
context.reservation.domain)
connection_details = self.cs_helper.get_connection_details(session, self.vc_data_model, context.resource)
res = self.command_wrapper.execute_command_with_connection(connection_details,
self.vm_power_management_command.power_on,
session,
vm_uuid,
resource_fullname)
return set_command_result(result=res, unpicklable=False)
| 53.399471 | 128 | 0.649938 |
4a1aa99ac4c55185926bce188b8f33c2499056e8
| 7,437 |
py
|
Python
|
msmarco/rnet/config_msm.py
|
burglarhobbit/R-net
|
fd7c2037441ed005b1eb6b0dcc812f2b74deef38
|
[
"MIT"
] | null | null | null |
msmarco/rnet/config_msm.py
|
burglarhobbit/R-net
|
fd7c2037441ed005b1eb6b0dcc812f2b74deef38
|
[
"MIT"
] | null | null | null |
msmarco/rnet/config_msm.py
|
burglarhobbit/R-net
|
fd7c2037441ed005b1eb6b0dcc812f2b74deef38
|
[
"MIT"
] | null | null | null |
import os
import tensorflow as tf
from base64 import b64decode as bd
from prepro_msm import prepro
from analyze_dataset import prepro_
from main import train, test
flags = tf.flags
home = os.path.expanduser("~")
hdd2 = "/media/hdd2"
if os.path.isdir(hdd2):
path = hdd2
else:
path = home
train_file = os.path.join(path, "snetP_data", "data", "msmarco", "train_v1.1.json")
dev_file = os.path.join(path, "snetP_data", "data", "msmarco", "dev_v1.1.json")
test_file = os.path.join(path, "snetP_data", "data", "msmarco", "dev_v1.1.json")
glove_file = os.path.join(path, "snetP_data", "data", "glove", "glove.840B.300d.txt")
#train_file = os.path.join(hdd2, "snetP_data", "data", "msmarco", "train_v1.1.json")
#dev_file = os.path.join(hdd2, "snetP_data", "data", "msmarco", "dev_v1.1.json")
#test_file = os.path.join(hdd2, "snetP_data", "data", "msmarco", "test_public_v1.1.json")
#glove_file = os.path.join(hdd2, "snetP_data", "data", "glove", "glove.840B.300d.txt")
#target_dir = os.path.join(hdd2, "snetP_data", "snet_data")
#target_dir = "data"
target_dir = os.path.join(path, "snetP_data", "rnet", "msmarco")
log_dir = os.path.join(path, "snetP_data", "rnet", "msmarco", "log", "event")
save_dir = os.path.join(path, "snetP_data", "rnet", "msmarco", "log", "model")
answer_dir = os.path.join(path, "snetP_data", "rnet", "msmarco","log", "answer")
train_record_file = os.path.join(target_dir, "train.tfrecords")
dev_record_file = os.path.join(target_dir, "dev.tfrecords")
test_record_file = os.path.join(target_dir, "test.tfrecords")
word_emb_file = os.path.join(target_dir, "word_emb.json")
char_emb_file = os.path.join(target_dir, "char_emb.json")
train_eval = os.path.join(target_dir, "train_eval.json")
dev_eval = os.path.join(target_dir, "dev_eval.json")
test_eval = os.path.join(target_dir, "test_eval.json")
dev_meta = os.path.join(target_dir, "dev_meta.json")
test_meta = os.path.join(target_dir, "test_meta.json")
answer_file = os.path.join(answer_dir, "answer.json")
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if not os.path.exists(answer_dir):
os.makedirs(answer_dir)
flags.DEFINE_string("mode", "train", "Running mode train/debug/test")
flags.DEFINE_string("target_dir", target_dir, "Target directory for out data")
flags.DEFINE_string("log_dir", log_dir, "Directory for tf event")
flags.DEFINE_string("save_dir", save_dir, "Directory for saving model")
flags.DEFINE_string("train_file", train_file, "Train source file")
flags.DEFINE_string("dev_file", dev_file, "Dev source file")
flags.DEFINE_string("test_file", test_file, "Test source file")
flags.DEFINE_string("glove_file", glove_file, "Glove source file")
flags.DEFINE_string("train_record_file", train_record_file,
"Out file for train data")
flags.DEFINE_string("dev_record_file", dev_record_file,
"Out file for dev data")
flags.DEFINE_string("test_record_file", test_record_file,
"Out file for test data")
flags.DEFINE_string("word_emb_file", word_emb_file,
"Out file for word embedding")
flags.DEFINE_string("char_emb_file", char_emb_file,
"Out file for char embedding")
flags.DEFINE_string("train_eval_file", train_eval, "Out file for train eval")
flags.DEFINE_string("dev_eval_file", dev_eval, "Out file for dev eval")
flags.DEFINE_string("test_eval_file", test_eval, "Out file for test eval")
flags.DEFINE_string("dev_meta", dev_meta, "Out file for dev meta")
flags.DEFINE_string("test_meta", test_meta, "Out file for test meta")
flags.DEFINE_string("answer_file", answer_file, "Out file for answer")
flags.DEFINE_integer("glove_size", int(2.2e6), "Corpus size for Glove")
flags.DEFINE_integer("glove_dim", 300, "Embedding dimension for Glove")
flags.DEFINE_integer("char_dim", 8, "Embedding dimension for char")
flags.DEFINE_integer("para_limit", 400, "Limit length for paragraph")
flags.DEFINE_integer("ques_limit", 50, "Limit length for question")
flags.DEFINE_integer("test_para_limit", 1000,
"Limit length for paragraph in test file")
flags.DEFINE_integer("test_ques_limit", 100,
"Limit length for question in test file")
flags.DEFINE_integer("char_limit", 16, "Limit length for character")
flags.DEFINE_integer("word_count_limit", -1, "Min count for word")
flags.DEFINE_integer("char_count_limit", -1, "Min count for char")
flags.DEFINE_integer("capacity", 15000, "Batch size of dataset shuffle")
flags.DEFINE_integer("num_threads", 4, "Number of threads in input pipeline")
flags.DEFINE_boolean("use_cudnn", True, "Whether to use cudnn rnn (should be False for CPU)")
flags.DEFINE_boolean("is_bucket", True, "build bucket batch iterator or not")
flags.DEFINE_integer("bucket_range", [40, 401, 40], "the range of bucket")
flags.DEFINE_integer("batch_size", 64, "Batch size")
flags.DEFINE_integer("num_steps", 50000, "Number of steps")
flags.DEFINE_integer("checkpoint", 1000,
"checkpoint to save and evaluate the model")
flags.DEFINE_integer("period", 100, "period to save batch loss")
flags.DEFINE_integer("val_num_batches", 150,
"Number of batches to evaluate the model")
flags.DEFINE_float("init_lr", 0.5, "Initial learning rate for Adadelta")
flags.DEFINE_float("keep_prob", 0.7, "Dropout keep prob in rnn") #0.7
flags.DEFINE_float("ptr_keep_prob", 0.7, "Dropout keep prob for pointer network") #0.7
flags.DEFINE_float("grad_clip", 5.0, "Global Norm gradient clipping rate")
flags.DEFINE_integer("hidden", 75, "Hidden size") #75
flags.DEFINE_integer("char_hidden", 100, "GRU dimention for char")
flags.DEFINE_integer("patience", 3, "Patience for learning rate decay")
flags.DEFINE_string("bd","bd","bd")
def main(_):
config = flags.FLAGS
if config.mode == "train":
train(config)
elif config.mode == "prepro":
prepro(config)
elif config.mode == "analyze":
prepro_(config)
elif config.mode == "debug":
config.num_steps = 2
config.val_num_batches = 1
config.checkpoint = 1
config.period = 1
train(config)
elif config.mode == "test":
if config.use_cudnn:
print("Warning: Due to a known bug in Tensorflow, the parameters of CudnnGRU may not be properly restored.")
test(config)
else:
print("Unknown mode")
exit(0)
def send():
import smtplib
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
config = flags.FLAGS
if config.bd == "bd":
return
user = "bhavyapatwa007@gmail.com"
subject = "Train/Dev results on MS-MARCO"
body = "Please find the scores attached"
recipient = [user]
gmail_user = user
gmail_pwd = bd(bd(config.bd))
FROM = user
TO = recipient if type(recipient) is list else [recipient]
SUBJECT = subject
TEXT = body
message = """From: %s\nTo: %s\nSubject: %s\n\n%s
""" % (FROM, ", ".join(TO), SUBJECT, TEXT)
COMMASPACE = ', '
msg = MIMEMultipart()
msg['Subject'] = subject
msg['From'] = user
msg['To'] = COMMASPACE.join(recipient)
msg.preamble = 'asasas'
files = ['dev.png','train.png']
pngfiles = []
for i in files:
pngfiles.append(os.path.join(log_dir,i))
for file in pngfiles:
with open(file, 'rb') as fp:
img = MIMEImage(fp.read())
msg.attach(img)
try:
server = smtplib.SMTP("smtp.gmail.com:587")
server.ehlo()
server.starttls()
server.login(gmail_user, gmail_pwd)
server.send_mesage(msg)
server.close()
print('successfully sent the mail')
except:
print("failed to send mail")
if __name__ == "__main__":
tf.app.run()
| 38.937173 | 111 | 0.731881 |
4a1aaaf4ba3aa7513d9113157b6702d13a779d61
| 38 |
py
|
Python
|
name.py
|
hakerona/Tutorial
|
7f37299576d0db17c3fbfc0239123633499dfe11
|
[
"MIT"
] | null | null | null |
name.py
|
hakerona/Tutorial
|
7f37299576d0db17c3fbfc0239123633499dfe11
|
[
"MIT"
] | null | null | null |
name.py
|
hakerona/Tutorial
|
7f37299576d0db17c3fbfc0239123633499dfe11
|
[
"MIT"
] | null | null | null |
def name_print(name):
print(name)
| 12.666667 | 21 | 0.684211 |
4a1aab317fb848451c82ed6f179975810bd574aa
| 2,513 |
py
|
Python
|
plugin/formatting.py
|
rictic/LSP
|
7ecade4273e46a9f73523cfcf9584fd32bedc060
|
[
"MIT"
] | null | null | null |
plugin/formatting.py
|
rictic/LSP
|
7ecade4273e46a9f73523cfcf9584fd32bedc060
|
[
"MIT"
] | 1 |
2022-02-26T20:54:56.000Z
|
2022-02-26T20:54:56.000Z
|
plugin/formatting.py
|
isabella232/LSP
|
7ecade4273e46a9f73523cfcf9584fd32bedc060
|
[
"MIT"
] | 1 |
2022-02-26T14:26:58.000Z
|
2022-02-26T14:26:58.000Z
|
import sublime_plugin
from .core.protocol import Request, Range
from .core.url import filename_to_uri
from .core.clients import client_for_view
from .core.configurations import is_supported_view
class LspFormatDocumentCommand(sublime_plugin.TextCommand):
def is_enabled(self):
if is_supported_view(self.view):
client = client_for_view(self.view)
if client and client.has_capability('documentFormattingProvider'):
return True
return False
def run(self, edit):
client = client_for_view(self.view)
if client:
pos = self.view.sel()[0].begin()
params = {
"textDocument": {
"uri": filename_to_uri(self.view.file_name())
},
"options": {
"tabSize": self.view.settings().get("tab_size", 4),
"insertSpaces": True
}
}
request = Request.formatting(params)
client.send_request(
request, lambda response: self.handle_response(response, pos))
def handle_response(self, response, pos):
self.view.run_command('lsp_apply_document_edit',
{'changes': response})
class LspFormatDocumentRangeCommand(sublime_plugin.TextCommand):
def is_enabled(self):
if is_supported_view(self.view):
client = client_for_view(self.view)
if client and client.has_capability('documentRangeFormattingProvider'):
if len(self.view.sel()) == 1:
region = self.view.sel()[0]
if region.begin() != region.end():
return True
return False
def run(self, _):
client = client_for_view(self.view)
if client:
region = self.view.sel()[0]
params = {
"textDocument": {
"uri": filename_to_uri(self.view.file_name())
},
"range": Range.from_region(self.view, region).to_lsp(),
"options": {
"tabSize": self.view.settings().get("tab_size", 4),
"insertSpaces": True
}
}
client.send_request(Request.rangeFormatting(params),
lambda response: self.view.run_command('lsp_apply_document_edit',
{'changes': response}))
| 37.507463 | 97 | 0.537207 |
4a1aac32f0fdeace36028db4d0a162557510b3a7
| 254 |
py
|
Python
|
manage.py
|
barseghyanartur/wagtaildemo
|
7ee38cf379abca98627203ae7dce094535a60bff
|
[
"BSD-3-Clause"
] | 97 |
2016-12-13T05:49:10.000Z
|
2022-03-23T08:08:36.000Z
|
manage.py
|
barseghyanartur/wagtaildemo
|
7ee38cf379abca98627203ae7dce094535a60bff
|
[
"BSD-3-Clause"
] | 14 |
2017-01-07T22:29:32.000Z
|
2019-02-03T16:12:46.000Z
|
manage.py
|
barseghyanartur/wagtaildemo
|
7ee38cf379abca98627203ae7dce094535a60bff
|
[
"BSD-3-Clause"
] | 60 |
2016-12-19T16:49:27.000Z
|
2020-12-16T01:56:28.000Z
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "wagtaildemo.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 23.090909 | 75 | 0.775591 |
4a1ab0af437a280edd8ccf24e2acd06c07cc24b3
| 553 |
py
|
Python
|
src/repositories/__init__.py
|
WebisD/chat-irc-protocol
|
6720d1789a366bfd7943b81c7c84cb0941c66e80
|
[
"MIT"
] | null | null | null |
src/repositories/__init__.py
|
WebisD/chat-irc-protocol
|
6720d1789a366bfd7943b81c7c84cb0941c66e80
|
[
"MIT"
] | null | null | null |
src/repositories/__init__.py
|
WebisD/chat-irc-protocol
|
6720d1789a366bfd7943b81c7c84cb0941c66e80
|
[
"MIT"
] | 3 |
2021-06-03T12:27:27.000Z
|
2021-06-14T22:48:36.000Z
|
from .repository_interface import *
from .user_repository import *
from .room_repository import *
from .participants_repository import *
from .message_repository import *
from .room_messages_repository import *
from .file_repository import *
from .words_repository import *
__all_ = (
repository_interface.__all__ +
user_repository.__all__ +
room_repository.__all__ +
participants_repository.__all__ +
message_repository.__all__ +
room_messages_repository.__all__ +
file_repository.__all__ +
words_repository.__all__
)
| 26.333333 | 39 | 0.78481 |
4a1ab2e05b9423646cff773a58bd4e8a34348bb1
| 127,009 |
py
|
Python
|
mrcnn/model.py
|
IgorVetoshev/cardif_project
|
2934c53914962c21392e2ea35d6a6809197eb710
|
[
"MIT"
] | null | null | null |
mrcnn/model.py
|
IgorVetoshev/cardif_project
|
2934c53914962c21392e2ea35d6a6809197eb710
|
[
"MIT"
] | null | null | null |
mrcnn/model.py
|
IgorVetoshev/cardif_project
|
2934c53914962c21392e2ea35d6a6809197eb710
|
[
"MIT"
] | null | null | null |
"""
Mask R-CNN
The main Mask R-CNN model implementation.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
"""
import os
import random
import datetime
import re
import math
import logging
from collections import OrderedDict
import multiprocessing
import numpy as np
import tensorflow as tf
import keras
import keras.backend as K
import keras.layers as KL
import keras.engine as KE
import keras.models as KM
from mrcnn import utils
# Requires TensorFlow 1.3+ and Keras 2.0.8+.
from distutils.version import LooseVersion
assert LooseVersion(tf.__version__) >= LooseVersion("1.3")
assert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')
############################################################
# Utility Functions
############################################################
def log(text, array=None):
"""Prints a text message. And, optionally, if a Numpy array is provided it
prints it's shape, min, and max values.
"""
if array is not None:
text = text.ljust(25)
text += ("shape: {:20} min: {:10.5f} max: {:10.5f} {}".format(
str(array.shape),
array.min() if array.size else "",
array.max() if array.size else "",
array.dtype))
print(text)
class BatchNorm(KL.BatchNormalization):
"""Extends the Keras BatchNormalization class to allow a central place
to make changes if needed.
Batch normalization has a negative effect on training if batches are small
so this layer is often frozen (via setting in Config class) and functions
as linear layer.
"""
def call(self, inputs, training=None):
"""
Note about training values:
None: Train BN layers. This is the normal mode
False: Freeze BN layers. Good when batch size is small
True: (don't use). Set layer in training mode even when making inferences
"""
return super(self.__class__, self).call(inputs, training=training)
def compute_backbone_shapes(config, image_shape):
"""Computes the width and height of each stage of the backbone network.
Returns:
[N, (height, width)]. Where N is the number of stages
"""
if callable(config.BACKBONE):
return config.COMPUTE_BACKBONE_SHAPE(image_shape)
# Currently supports ResNet only
assert config.BACKBONE in ["resnet50", "resnet101"]
return np.array(
[[int(math.ceil(image_shape[0] / stride)),
int(math.ceil(image_shape[1] / stride))]
for stride in config.BACKBONE_STRIDES])
############################################################
# Resnet Graph
############################################################
# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
def identity_block(input_tensor, kernel_size, filters, stage, block,
use_bias=True, train_bn=True):
"""The identity_block is the block that has no conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layers
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',
use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',
use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
x = KL.Add()([x, input_tensor])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block,
strides=(2, 2), use_bias=True, train_bn=True):
"""conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layers
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,
name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +
'2c', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,
name=conv_name_base + '1', use_bias=use_bias)(input_tensor)
shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)
x = KL.Add()([x, shortcut])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def resnet_graph(input_image, architecture, stage5=False, train_bn=True):
"""Build a ResNet graph.
architecture: Can be resnet50 or resnet101
stage5: Boolean. If False, stage5 of the network is not created
train_bn: Boolean. Train or freeze Batch Norm layers
"""
assert architecture in ["resnet50", "resnet101"]
# Stage 1
x = KL.ZeroPadding2D((3, 3))(input_image)
x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)
x = BatchNorm(name='bn_conv1')(x, training=train_bn)
x = KL.Activation('relu')(x)
C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
# Stage 2
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)
C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)
# Stage 3
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)
C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)
# Stage 4
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)
block_count = {"resnet50": 5, "resnet101": 22}[architecture]
for i in range(block_count):
x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)
C4 = x
# Stage 5
if stage5:
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)
C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)
else:
C5 = None
return [C1, C2, C3, C4, C5]
############################################################
# Proposal Layer
############################################################
def apply_box_deltas_graph(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, (y1, x1, y2, x2)] boxes to update
deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply
"""
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= tf.exp(deltas[:, 2])
width *= tf.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
result = tf.stack([y1, x1, y2, x2], axis=1, name="apply_box_deltas_out")
return result
def clip_boxes_graph(boxes, window):
"""
boxes: [N, (y1, x1, y2, x2)]
window: [4] in the form y1, x1, y2, x2
"""
# Split
wy1, wx1, wy2, wx2 = tf.split(window, 4)
y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
# Clip
y1 = tf.maximum(tf.minimum(y1, wy2), wy1)
x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes")
clipped.set_shape((clipped.shape[0], 4))
return clipped
class ProposalLayer(KE.Layer):
"""Receives anchor scores and selects a subset to pass as proposals
to the second stage. Filtering is done based on anchor scores and
non-max suppression to remove overlaps. It also applies bounding
box refinement deltas to anchors.
Inputs:
rpn_probs: [batch, num_anchors, (bg prob, fg prob)]
rpn_bbox: [batch, num_anchors, (dy, dx, log(dh), log(dw))]
anchors: [batch, num_anchors, (y1, x1, y2, x2)] anchors in normalized coordinates
Returns:
Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]
"""
def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):
super(ProposalLayer, self).__init__(**kwargs)
self.config = config
self.proposal_count = proposal_count
self.nms_threshold = nms_threshold
def call(self, inputs):
# Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]
scores = inputs[0][:, :, 1]
# Box deltas [batch, num_rois, 4]
deltas = inputs[1]
deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])
# Anchors
anchors = inputs[2]
# Improve performance by trimming to top anchors by score
# and doing the rest on the smaller subset.
pre_nms_limit = tf.minimum(self.config.PRE_NMS_LIMIT, tf.shape(anchors)[1])
ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,
name="top_anchors").indices
scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),
self.config.IMAGES_PER_GPU,
names=["pre_nms_anchors"])
# Apply deltas to anchors to get refined anchors.
# [batch, N, (y1, x1, y2, x2)]
boxes = utils.batch_slice([pre_nms_anchors, deltas],
lambda x, y: apply_box_deltas_graph(x, y),
self.config.IMAGES_PER_GPU,
names=["refined_anchors"])
# Clip to image boundaries. Since we're in normalized coordinates,
# clip to 0..1 range. [batch, N, (y1, x1, y2, x2)]
window = np.array([0, 0, 1, 1], dtype=np.float32)
boxes = utils.batch_slice(boxes,
lambda x: clip_boxes_graph(x, window),
self.config.IMAGES_PER_GPU,
names=["refined_anchors_clipped"])
# Filter out small boxes
# According to Xinlei Chen's paper, this reduces detection accuracy
# for small objects, so we're skipping it.
# Non-max suppression
def nms(boxes, scores):
indices = tf.image.non_max_suppression(
boxes, scores, self.proposal_count,
self.nms_threshold, name="rpn_non_max_suppression")
proposals = tf.gather(boxes, indices)
# Pad if needed
padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)
proposals = tf.pad(proposals, [(0, padding), (0, 0)])
return proposals
proposals = utils.batch_slice([boxes, scores], nms,
self.config.IMAGES_PER_GPU)
return proposals
def compute_output_shape(self, input_shape):
return (None, self.proposal_count, 4)
############################################################
# ROIAlign Layer
############################################################
def log2_graph(x):
"""Implementation of Log2. TF doesn't have a native implementation."""
return tf.log(x) / tf.log(2.0)
class PyramidROIAlign(KE.Layer):
"""Implements ROI Pooling on multiple levels of the feature pyramid.
Params:
- pool_shape: [pool_height, pool_width] of the output pooled regions. Usually [7, 7]
Inputs:
- boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized
coordinates. Possibly padded with zeros if not enough
boxes to fill the array.
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
- feature_maps: List of feature maps from different levels of the pyramid.
Each is [batch, height, width, channels]
Output:
Pooled regions in the shape: [batch, num_boxes, pool_height, pool_width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
def __init__(self, pool_shape, **kwargs):
super(PyramidROIAlign, self).__init__(**kwargs)
self.pool_shape = tuple(pool_shape)
def call(self, inputs):
# Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords
boxes = inputs[0]
# Image meta
# Holds details about the image. See compose_image_meta()
image_meta = inputs[1]
# Feature Maps. List of feature maps from different level of the
# feature pyramid. Each is [batch, height, width, channels]
feature_maps = inputs[2:]
# Assign each ROI to a level in the pyramid based on the ROI area.
y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)
h = y2 - y1
w = x2 - x1
# Use shape of first image. Images in a batch must have the same size.
image_shape = parse_image_meta_graph(image_meta)['image_shape'][0]
# Equation 1 in the Feature Pyramid Networks paper. Account for
# the fact that our coordinates are normalized here.
# e.g. a 224x224 ROI (in pixels) maps to P4
image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32)
roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))
roi_level = tf.minimum(5, tf.maximum(
2, 4 + tf.cast(tf.round(roi_level), tf.int32)))
roi_level = tf.squeeze(roi_level, 2)
# Loop through levels and apply ROI pooling to each. P2 to P5.
pooled = []
box_to_level = []
for i, level in enumerate(range(2, 6)):
ix = tf.where(tf.equal(roi_level, level))
level_boxes = tf.gather_nd(boxes, ix)
# Box indices for crop_and_resize.
box_indices = tf.cast(ix[:, 0], tf.int32)
# Keep track of which box is mapped to which level
box_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_boxes = tf.stop_gradient(level_boxes)
box_indices = tf.stop_gradient(box_indices)
# Crop and Resize
# From Mask R-CNN paper: "We sample four regular locations, so
# that we can evaluate either max or average pooling. In fact,
# interpolating only a single value at each bin center (without
# pooling) is nearly as effective."
#
# Here we use the simplified approach of a single value per bin,
# which is how it's done in tf.crop_and_resize()
# Result: [batch * num_boxes, pool_height, pool_width, channels]
pooled.append(tf.image.crop_and_resize(
feature_maps[i], level_boxes, box_indices, self.pool_shape,
method="bilinear"))
# Pack pooled features into one tensor
pooled = tf.concat(pooled, axis=0)
# Pack box_to_level mapping into one array and add another
# column representing the order of pooled boxes
box_to_level = tf.concat(box_to_level, axis=0)
box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)
box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],
axis=1)
# Rearrange pooled features to match the order of the original boxes
# Sort box_to_level by batch then box index
# TF doesn't have a way to sort by two columns, so merge them and sort.
sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]
ix = tf.nn.top_k(sorting_tensor, k=tf.shape(
box_to_level)[0]).indices[::-1]
ix = tf.gather(box_to_level[:, 2], ix)
pooled = tf.gather(pooled, ix)
# Re-add the batch dimension
shape = tf.concat([tf.shape(boxes)[:2], tf.shape(pooled)[1:]], axis=0)
pooled = tf.reshape(pooled, shape)
return pooled
def compute_output_shape(self, input_shape):
return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], )
############################################################
# Detection Target Layer
############################################################
def overlaps_graph(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
"""
# 1. Tile boxes2 and repeat boxes1. This allows us to compare
# every boxes1 against every boxes2 without loops.
# TF doesn't have an equivalent to np.repeat() so simulate it
# using tf.tile() and tf.reshape.
b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),
[1, 1, tf.shape(boxes2)[0]]), [-1, 4])
b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])
# 2. Compute intersections
b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)
b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)
y1 = tf.maximum(b1_y1, b2_y1)
x1 = tf.maximum(b1_x1, b2_x1)
y2 = tf.minimum(b1_y2, b2_y2)
x2 = tf.minimum(b1_x2, b2_x2)
intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)
# 3. Compute unions
b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
union = b1_area + b2_area - intersection
# 4. Compute IoU and reshape to [boxes1, boxes2]
iou = intersection / union
overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])
return overlaps
def detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):
"""Generates detection targets for one image. Subsamples proposals and
generates target class IDs, bounding box deltas, and masks for each.
Inputs:
proposals: [POST_NMS_ROIS_TRAINING, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [MAX_GT_INSTANCES] int class IDs
gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.
gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.
deltas: [TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw))]
masks: [TRAIN_ROIS_PER_IMAGE, height, width]. Masks cropped to bbox
boundaries and resized to neural network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
# Assertions
asserts = [
tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],
name="roi_assertion"),
]
with tf.control_dependencies(asserts):
proposals = tf.identity(proposals)
# Remove zero padding
proposals, _ = trim_zeros_graph(proposals, name="trim_proposals")
gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name="trim_gt_boxes")
gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,
name="trim_gt_class_ids")
gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,
name="trim_gt_masks")
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = tf.where(gt_class_ids < 0)[:, 0]
non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]
crowd_boxes = tf.gather(gt_boxes, crowd_ix)
crowd_masks = tf.gather(gt_masks, crowd_ix, axis=2)
gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)
gt_boxes = tf.gather(gt_boxes, non_crowd_ix)
gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)
# Compute overlaps matrix [proposals, gt_boxes]
overlaps = overlaps_graph(proposals, gt_boxes)
# Compute overlaps with crowd boxes [proposals, crowd_boxes]
crowd_overlaps = overlaps_graph(proposals, crowd_boxes)
crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
# Determine positive and negative ROIs
roi_iou_max = tf.reduce_max(overlaps, axis=1)
# 1. Positive ROIs are those with >= 0.5 IoU with a GT box
positive_roi_bool = (roi_iou_max >= 0.5)
positive_indices = tf.where(positive_roi_bool)[:, 0]
# 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.
negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]
# Subsample ROIs. Aim for 33% positive
# Positive ROIs
positive_count = int(config.TRAIN_ROIS_PER_IMAGE *
config.ROI_POSITIVE_RATIO)
positive_indices = tf.random_shuffle(positive_indices)[:positive_count]
positive_count = tf.shape(positive_indices)[0]
# Negative ROIs. Add enough to maintain positive:negative ratio.
r = 1.0 / config.ROI_POSITIVE_RATIO
negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count
negative_indices = tf.random_shuffle(negative_indices)[:negative_count]
# Gather selected ROIs
positive_rois = tf.gather(proposals, positive_indices)
negative_rois = tf.gather(proposals, negative_indices)
# Assign positive ROIs to GT boxes.
positive_overlaps = tf.gather(overlaps, positive_indices)
roi_gt_box_assignment = tf.cond(
tf.greater(tf.shape(positive_overlaps)[1], 0),
true_fn = lambda: tf.argmax(positive_overlaps, axis=1),
false_fn = lambda: tf.cast(tf.constant([]),tf.int64)
)
roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)
roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)
# Compute bbox refinement for positive ROIs
deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)
deltas /= config.BBOX_STD_DEV
# Assign positive ROIs to GT masks
# Permute masks to [N, height, width, 1]
transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)
# Pick the right mask for each ROI
roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)
# Compute mask targets
boxes = positive_rois
if config.USE_MINI_MASK:
# Transform ROI coordinates from normalized image space
# to normalized mini-mask space.
y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)
gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)
gt_h = gt_y2 - gt_y1
gt_w = gt_x2 - gt_x1
y1 = (y1 - gt_y1) / gt_h
x1 = (x1 - gt_x1) / gt_w
y2 = (y2 - gt_y1) / gt_h
x2 = (x2 - gt_x1) / gt_w
boxes = tf.concat([y1, x1, y2, x2], 1)
box_ids = tf.range(0, tf.shape(roi_masks)[0])
masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,
box_ids,
config.MASK_SHAPE)
# Remove the extra dimension from masks.
masks = tf.squeeze(masks, axis=3)
# Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with
# binary cross entropy loss.
masks = tf.round(masks)
# Append negative ROIs and pad bbox deltas and masks that
# are not used for negative ROIs with zeros.
rois = tf.concat([positive_rois, negative_rois], axis=0)
N = tf.shape(negative_rois)[0]
P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)
rois = tf.pad(rois, [(0, P), (0, 0)])
roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])
roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])
deltas = tf.pad(deltas, [(0, N + P), (0, 0)])
masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])
return rois, roi_gt_class_ids, deltas, masks
class DetectionTargetLayer(KE.Layer):
"""Subsamples proposals and generates target box refinement, class_ids,
and masks for each.
Inputs:
proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.
gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized
coordinates.
gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized
coordinates
target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw)]
target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width]
Masks cropped to bbox boundaries and resized to neural
network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
def __init__(self, config, **kwargs):
super(DetectionTargetLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
proposals = inputs[0]
gt_class_ids = inputs[1]
gt_boxes = inputs[2]
gt_masks = inputs[3]
# Slice the batch and run a graph for each slice
# TODO: Rename target_bbox to target_deltas for clarity
names = ["rois", "target_class_ids", "target_bbox", "target_mask"]
outputs = utils.batch_slice(
[proposals, gt_class_ids, gt_boxes, gt_masks],
lambda w, x, y, z: detection_targets_graph(
w, x, y, z, self.config),
self.config.IMAGES_PER_GPU, names=names)
return outputs
def compute_output_shape(self, input_shape):
return [
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois
(None, self.config.TRAIN_ROIS_PER_IMAGE), # class_ids
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas
(None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],
self.config.MASK_SHAPE[1]) # masks
]
def compute_mask(self, inputs, mask=None):
return [None, None, None, None]
############################################################
# Detection Layer
############################################################
def refine_detections_graph(rois, probs, deltas, window, config):
"""Refine classified proposals and filter overlaps and return final
detections.
Inputs:
rois: [N, (y1, x1, y2, x2)] in normalized coordinates
probs: [N, num_classes]. Class probabilities.
deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific
bounding box deltas.
window: (y1, x1, y2, x2) in normalized coordinates. The part of the image
that contains the image excluding the padding.
Returns detections shaped: [num_detections, (y1, x1, y2, x2, class_id, score)] where
coordinates are normalized.
"""
# Class IDs per ROI
class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)
# Class probability of the top class of each ROI
indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)
class_scores = tf.gather_nd(probs, indices)
# Class-specific bounding box deltas
deltas_specific = tf.gather_nd(deltas, indices)
# Apply bounding box deltas
# Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates
refined_rois = apply_box_deltas_graph(
rois, deltas_specific * config.BBOX_STD_DEV)
# Clip boxes to image window
refined_rois = clip_boxes_graph(refined_rois, window)
# TODO: Filter out boxes with zero area
# Filter out background boxes
keep = tf.where(class_ids > 0)[:, 0]
# Filter out low confidence boxes
if config.DETECTION_MIN_CONFIDENCE:
conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(conf_keep, 0))
keep = tf.sparse_tensor_to_dense(keep)[0]
# Apply per-class NMS
# 1. Prepare variables
pre_nms_class_ids = tf.gather(class_ids, keep)
pre_nms_scores = tf.gather(class_scores, keep)
pre_nms_rois = tf.gather(refined_rois, keep)
unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]
def nms_keep_map(class_id):
"""Apply Non-Maximum Suppression on ROIs of the given class."""
# Indices of ROIs of the given class
ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]
# Apply NMS
class_keep = tf.image.non_max_suppression(
tf.gather(pre_nms_rois, ixs),
tf.gather(pre_nms_scores, ixs),
max_output_size=config.DETECTION_MAX_INSTANCES,
iou_threshold=config.DETECTION_NMS_THRESHOLD)
# Map indices
class_keep = tf.gather(keep, tf.gather(ixs, class_keep))
# Pad with -1 so returned tensors have the same shape
gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]
class_keep = tf.pad(class_keep, [(0, gap)],
mode='CONSTANT', constant_values=-1)
# Set shape so map_fn() can infer result shape
class_keep.set_shape([config.DETECTION_MAX_INSTANCES])
return class_keep
# 2. Map over class IDs
nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,
dtype=tf.int64)
# 3. Merge results into one list, and remove -1 padding
nms_keep = tf.reshape(nms_keep, [-1])
nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])
# 4. Compute intersection between keep and nms_keep
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(nms_keep, 0))
keep = tf.sparse_tensor_to_dense(keep)[0]
# Keep top detections
roi_count = config.DETECTION_MAX_INSTANCES
class_scores_keep = tf.gather(class_scores, keep)
num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)
top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]
keep = tf.gather(keep, top_ids)
# Arrange output as [N, (y1, x1, y2, x2, class_id, score)]
# Coordinates are normalized.
detections = tf.concat([
tf.gather(refined_rois, keep),
tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis],
tf.gather(class_scores, keep)[..., tf.newaxis]
], axis=1)
# Pad with zeros if detections < DETECTION_MAX_INSTANCES
gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]
detections = tf.pad(detections, [(0, gap), (0, 0)], "CONSTANT")
return detections
class DetectionLayer(KE.Layer):
"""Takes classified proposal boxes and their bounding box deltas and
returns the final detection boxes.
Returns:
[batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where
coordinates are normalized.
"""
def __init__(self, config=None, **kwargs):
super(DetectionLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
rois = inputs[0]
mrcnn_class = inputs[1]
mrcnn_bbox = inputs[2]
image_meta = inputs[3]
# Get windows of images in normalized coordinates. Windows are the area
# in the image that excludes the padding.
# Use the shape of the first image in the batch to normalize the window
# because we know that all images get resized to the same size.
m = parse_image_meta_graph(image_meta)
image_shape = m['image_shape'][0]
window = norm_boxes_graph(m['window'], image_shape[:2])
# Run detection refinement graph on each item in the batch
detections_batch = utils.batch_slice(
[rois, mrcnn_class, mrcnn_bbox, window],
lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),
self.config.IMAGES_PER_GPU)
# Reshape output
# [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in
# normalized coordinates
return tf.reshape(
detections_batch,
[self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])
def compute_output_shape(self, input_shape):
return (None, self.config.DETECTION_MAX_INSTANCES, 6)
############################################################
# Region Proposal Network (RPN)
############################################################
def rpn_graph(feature_map, anchors_per_location, anchor_stride):
"""Builds the computation graph of Region Proposal Network.
feature_map: backbone features [batch, height, width, depth]
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
Returns:
rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
# TODO: check if stride of 2 causes alignment issues if the feature map
# is not even.
# Shared convolutional base of the RPN
shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',
strides=anchor_stride,
name='rpn_conv_shared')(feature_map)
# Anchor Score. [batch, height, width, anchors per location * 2].
x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',
activation='linear', name='rpn_class_raw')(shared)
# Reshape to [batch, anchors, 2]
rpn_class_logits = KL.Lambda(
lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)
# Softmax on last dimension of BG/FG.
rpn_probs = KL.Activation(
"softmax", name="rpn_class_xxx")(rpn_class_logits)
# Bounding box refinement. [batch, H, W, anchors per location * depth]
# where depth is [x, y, log(w), log(h)]
x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding="valid",
activation='linear', name='rpn_bbox_pred')(shared)
# Reshape to [batch, anchors, 4]
rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)
return [rpn_class_logits, rpn_probs, rpn_bbox]
def build_rpn_model(anchor_stride, anchors_per_location, depth):
"""Builds a Keras model of the Region Proposal Network.
It wraps the RPN graph so it can be used multiple times with shared
weights.
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
depth: Depth of the backbone feature map.
Returns a Keras Model object. The model outputs, when called, are:
rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
input_feature_map = KL.Input(shape=[None, None, depth],
name="input_rpn_feature_map")
outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)
return KM.Model([input_feature_map], outputs, name="rpn_model")
############################################################
# Feature Pyramid Network Heads
############################################################
def fpn_classifier_graph(rois, feature_maps, image_meta,
pool_size, num_classes, train_bn=True,
fc_layers_size=1024):
"""Builds the computation graph of the feature pyramid network classifier
and regressor heads.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from different layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layers
fc_layers_size: Size of the 2 FC layers
Returns:
logits: [batch, num_rois, NUM_CLASSES] classifier logits (before softmax)
probs: [batch, num_rois, NUM_CLASSES] classifier probabilities
bbox_deltas: [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))] Deltas to apply to
proposal boxes
"""
# ROI Pooling
# Shape: [batch, num_rois, POOL_SIZE, POOL_SIZE, channels]
x = PyramidROIAlign([pool_size, pool_size],
name="roi_align_classifier")([rois, image_meta] + feature_maps)
# Two 1024 FC layers (implemented with Conv2D for consistency)
x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding="valid"),
name="mrcnn_class_conv1")(x)
x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)),
name="mrcnn_class_conv2")(x)
x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),
name="pool_squeeze")(x)
# Classifier head
mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),
name='mrcnn_class_logits')(shared)
mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"),
name="mrcnn_class")(mrcnn_class_logits)
# BBox head
# [batch, num_rois, NUM_CLASSES * (dy, dx, log(dh), log(dw))]
x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),
name='mrcnn_bbox_fc')(shared)
# Reshape to [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))]
s = K.int_shape(x)
mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name="mrcnn_bbox")(x)
return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox
def build_fpn_mask_graph(rois, feature_maps, image_meta,
pool_size, num_classes, train_bn=True):
"""Builds the computation graph of the mask head of Feature Pyramid Network.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from different layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layers
Returns: Masks [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, NUM_CLASSES]
"""
# ROI Pooling
# Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels]
x = PyramidROIAlign([pool_size, pool_size],
name="roi_align_mask")([rois, image_meta] + feature_maps)
# Conv layers
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv1")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv2")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv3")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn3')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv4")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn4')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"),
name="mrcnn_mask_deconv")(x)
x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation="sigmoid"),
name="mrcnn_mask")(x)
return x
############################################################
# Loss Functions
############################################################
def smooth_l1_loss(y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typically: [N, 4], but could be any shape.
"""
diff = K.abs(y_true - y_pred)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
return loss
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Cross entropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):
"""Return the RPN bounding box loss graph.
config: the model config object.
target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
Uses 0 padding to fill in unsed bbox deltas.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
"""
# Positive anchors contribute to the loss, but negative and
# neutral anchors (match value of 0 or -1) don't.
rpn_match = K.squeeze(rpn_match, -1)
indices = tf.where(K.equal(rpn_match, 1))
# Pick bbox deltas that contribute to the loss
rpn_bbox = tf.gather_nd(rpn_bbox, indices)
# Trim target bounding box deltas to the same length as rpn_bbox.
batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)
target_bbox = batch_pack_graph(target_bbox, batch_counts,
config.IMAGES_PER_GPU)
loss = smooth_l1_loss(target_bbox, rpn_bbox)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def mrcnn_class_loss_graph(target_class_ids, pred_class_logits,
active_class_ids):
"""Loss for the classifier head of Mask RCNN.
target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero
padding to fill in the array.
pred_class_logits: [batch, num_rois, num_classes]
active_class_ids: [batch, num_classes]. Has a value of 1 for
classes that are in the dataset of the image, and 0
for classes that are not in the dataset.
"""
# During model building, Keras calls this function with
# target_class_ids of type float32. Unclear why. Cast it
# to int to get around it.
target_class_ids = tf.cast(target_class_ids, 'int64')
# Find predictions of classes that are not in the dataset.
pred_class_ids = tf.argmax(pred_class_logits, axis=2)
# TODO: Update this line to work with batch > 1. Right now it assumes all
# images in a batch have the same active_class_ids
pred_active = tf.gather(active_class_ids[0], pred_class_ids)
# Loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target_class_ids, logits=pred_class_logits)
# Erase losses of predictions of classes that are not in the active
# classes of the image.
loss = loss * pred_active
# Computer loss mean. Use only predictions that contribute
# to the loss to get a correct mean.
loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)
return loss
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
"""Loss for Mask R-CNN bounding box refinement.
target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
target_class_ids: [batch, num_rois]. Integer class IDs.
pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
"""
# Reshape to merge batch and roi dimensions for simplicity.
target_class_ids = K.reshape(target_class_ids, (-1,))
target_bbox = K.reshape(target_bbox, (-1, 4))
pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))
# Only positive ROIs contribute to the loss. And only
# the right class_id of each ROI. Get their indices.
positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]
positive_roi_class_ids = tf.cast(
tf.gather(target_class_ids, positive_roi_ix), tf.int64)
indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)
# Gather the deltas (predicted and true) that contribute to loss
target_bbox = tf.gather(target_bbox, positive_roi_ix)
pred_bbox = tf.gather_nd(pred_bbox, indices)
# Smooth-L1 Loss
loss = K.switch(tf.size(target_bbox) > 0,
smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
tf.constant(0.0))
loss = K.mean(loss)
return loss
def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):
"""Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
"""
# Reshape for simplicity. Merge first two dimensions into one.
target_class_ids = K.reshape(target_class_ids, (-1,))
mask_shape = tf.shape(target_masks)
target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))
pred_shape = tf.shape(pred_masks)
pred_masks = K.reshape(pred_masks,
(-1, pred_shape[2], pred_shape[3], pred_shape[4]))
# Permute predicted masks to [N, num_classes, height, width]
pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])
# Only positive ROIs contribute to the loss. And only
# the class specific mask of each ROI.
positive_ix = tf.where(target_class_ids > 0)[:, 0]
positive_class_ids = tf.cast(
tf.gather(target_class_ids, positive_ix), tf.int64)
indices = tf.stack([positive_ix, positive_class_ids], axis=1)
# Gather the masks (predicted and true) that contribute to loss
y_true = tf.gather(target_masks, positive_ix)
y_pred = tf.gather_nd(pred_masks, indices)
# Compute binary cross entropy. If no positive ROIs, then return 0.
# shape: [batch, roi, num_classes]
loss = K.switch(tf.size(y_true) > 0,
K.binary_crossentropy(target=y_true, output=y_pred),
tf.constant(0.0))
loss = K.mean(loss)
return loss
############################################################
# Data Generator
############################################################
def load_image_gt(dataset, config, image_id, augment=False, augmentation=None,
use_mini_mask=False):
"""Load and return ground truth data for an image (image, mask, bounding boxes).
augment: (deprecated. Use augmentation instead). If true, apply random
image augmentation. Currently, only horizontal flipping is offered.
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
For example, passing imgaug.augmenters.Fliplr(0.5) flips images
right/left 50% of the time.
use_mini_mask: If False, returns full-size masks that are the same height
and width as the original image. These can be big, for example
1024x1024x100 (for 100 instances). Mini masks are smaller, typically,
224x224 and are generated by extracting the bounding box of the
object and resizing it to MINI_MASK_SHAPE.
Returns:
image: [height, width, 3]
shape: the original shape of the image before resizing and cropping.
class_ids: [instance_count] Integer class IDs
bbox: [instance_count, (y1, x1, y2, x2)]
mask: [height, width, instance_count]. The height and width are those
of the image unless use_mini_mask is True, in which case they are
defined in MINI_MASK_SHAPE.
"""
# Load image and mask
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
original_shape = image.shape
image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=config.IMAGE_MIN_DIM,
min_scale=config.IMAGE_MIN_SCALE,
max_dim=config.IMAGE_MAX_DIM,
mode=config.IMAGE_RESIZE_MODE)
mask = utils.resize_mask(mask, scale, padding, crop)
# Random horizontal flips.
# TODO: will be removed in a future update in favor of augmentation
if augment:
logging.warning("'augment' is deprecated. Use 'augmentation' instead.")
if random.randint(0, 1):
image = np.fliplr(image)
mask = np.fliplr(mask)
# Augmentation
# This requires the imgaug lib (https://github.com/aleju/imgaug)
if augmentation:
import imgaug
# Augmenters that are safe to apply to masks
# Some, such as Affine, have settings that make them unsafe, so always
# test your augmentation on masks
MASK_AUGMENTERS = ["Sequential", "SomeOf", "OneOf", "Sometimes",
"Fliplr", "Flipud", "CropAndPad",
"Affine", "PiecewiseAffine"]
def hook(images, augmenter, parents, default):
"""Determines which augmenters to apply to masks."""
return augmenter.__class__.__name__ in MASK_AUGMENTERS
# Store shapes before augmentation to compare
image_shape = image.shape
mask_shape = mask.shape
# Make augmenters deterministic to apply similarly to images and masks
det = augmentation.to_deterministic()
image = det.augment_image(image)
# Change mask to np.uint8 because imgaug doesn't support np.bool
mask = det.augment_image(mask.astype(np.uint8),
hooks=imgaug.HooksImages(activator=hook))
# Verify that shapes didn't change
assert image.shape == image_shape, "Augmentation shouldn't change image size"
assert mask.shape == mask_shape, "Augmentation shouldn't change mask size"
# Change mask back to bool
mask = mask.astype(np.bool)
# Note that some boxes might be all zeros if the corresponding mask got cropped out.
# and here is to filter them out
_idx = np.sum(mask, axis=(0, 1)) > 0
mask = mask[:, :, _idx]
class_ids = class_ids[_idx]
# Bounding boxes. Note that some boxes might be all zeros
# if the corresponding mask got cropped out.
# bbox: [num_instances, (y1, x1, y2, x2)]
bbox = utils.extract_bboxes(mask)
# Active classes
# Different datasets have different classes, so track the
# classes supported in the dataset of this image.
active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)
source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]["source"]]
active_class_ids[source_class_ids] = 1
# Resize masks to smaller size to reduce memory usage
if use_mini_mask:
mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)
# Image meta data
image_meta = compose_image_meta(image_id, original_shape, image.shape,
window, scale, active_class_ids)
return image, image_meta, class_ids, bbox, mask
def build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):
"""Generate targets for training Stage 2 classifier and mask heads.
This is not used in normal training. It's useful for debugging or to train
the Mask RCNN heads without using the RPN head.
Inputs:
rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.
gt_class_ids: [instance count] Integer class IDs
gt_boxes: [instance count, (y1, x1, y2, x2)]
gt_masks: [height, width, instance count] Ground truth masks. Can be full
size or mini-masks.
Returns:
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific
bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped
to bbox boundaries and resized to neural network output size.
"""
assert rpn_rois.shape[0] > 0
assert gt_class_ids.dtype == np.int32, "Expected int but got {}".format(
gt_class_ids.dtype)
assert gt_boxes.dtype == np.int32, "Expected int but got {}".format(
gt_boxes.dtype)
assert gt_masks.dtype == np.bool_, "Expected bool but got {}".format(
gt_masks.dtype)
# It's common to add GT Boxes to ROIs but we don't do that here because
# according to XinLei Chen's paper, it doesn't help.
# Trim empty padding in gt_boxes and gt_masks parts
instance_ids = np.where(gt_class_ids > 0)[0]
assert instance_ids.shape[0] > 0, "Image must contain instances."
gt_class_ids = gt_class_ids[instance_ids]
gt_boxes = gt_boxes[instance_ids]
gt_masks = gt_masks[:, :, instance_ids]
# Compute areas of ROIs and ground truth boxes.
rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \
(rpn_rois[:, 3] - rpn_rois[:, 1])
gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \
(gt_boxes[:, 3] - gt_boxes[:, 1])
# Compute overlaps [rpn_rois, gt_boxes]
overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))
for i in range(overlaps.shape[1]):
gt = gt_boxes[i]
overlaps[:, i] = utils.compute_iou(
gt, rpn_rois, gt_box_area[i], rpn_roi_area)
# Assign ROIs to GT boxes
rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)
rpn_roi_iou_max = overlaps[np.arange(
overlaps.shape[0]), rpn_roi_iou_argmax]
# GT box assigned to each ROI
rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]
rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]
# Positive ROIs are those with >= 0.5 IoU with a GT box.
fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]
# Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)
# TODO: To hard example mine or not to hard example mine, that's the question
# bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
# Subsample ROIs. Aim for 33% foreground.
# FG
fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)
if fg_ids.shape[0] > fg_roi_count:
keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)
else:
keep_fg_ids = fg_ids
# BG
remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]
if bg_ids.shape[0] > remaining:
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
else:
keep_bg_ids = bg_ids
# Combine indices of ROIs to keep
keep = np.concatenate([keep_fg_ids, keep_bg_ids])
# Need more?
remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]
if remaining > 0:
# Looks like we don't have enough samples to maintain the desired
# balance. Reduce requirements and fill in the rest. This is
# likely different from the Mask RCNN paper.
# There is a small chance we have neither fg nor bg samples.
if keep.shape[0] == 0:
# Pick bg regions with easier IoU threshold
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
assert bg_ids.shape[0] >= remaining
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
assert keep_bg_ids.shape[0] == remaining
keep = np.concatenate([keep, keep_bg_ids])
else:
# Fill the rest with repeated bg rois.
keep_extra_ids = np.random.choice(
keep_bg_ids, remaining, replace=True)
keep = np.concatenate([keep, keep_extra_ids])
assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \
"keep doesn't match ROI batch size {}, {}".format(
keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)
# Reset the gt boxes assigned to BG ROIs.
rpn_roi_gt_boxes[keep_bg_ids, :] = 0
rpn_roi_gt_class_ids[keep_bg_ids] = 0
# For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.
rois = rpn_rois[keep]
roi_gt_boxes = rpn_roi_gt_boxes[keep]
roi_gt_class_ids = rpn_roi_gt_class_ids[keep]
roi_gt_assignment = rpn_roi_iou_argmax[keep]
# Class-aware bbox deltas. [y, x, log(h), log(w)]
bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,
config.NUM_CLASSES, 4), dtype=np.float32)
pos_ids = np.where(roi_gt_class_ids > 0)[0]
bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(
rois[pos_ids], roi_gt_boxes[pos_ids, :4])
# Normalize bbox refinements
bboxes /= config.BBOX_STD_DEV
# Generate class-specific target masks
masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),
dtype=np.float32)
for i in pos_ids:
class_id = roi_gt_class_ids[i]
assert class_id > 0, "class id must be greater than 0"
gt_id = roi_gt_assignment[i]
class_mask = gt_masks[:, :, gt_id]
if config.USE_MINI_MASK:
# Create a mask placeholder, the size of the image
placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)
# GT box
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]
gt_w = gt_x2 - gt_x1
gt_h = gt_y2 - gt_y1
# Resize mini mask to size of GT box
placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \
np.round(utils.resize(class_mask, (gt_h, gt_w))).astype(bool)
# Place the mini batch in the placeholder
class_mask = placeholder
# Pick part of the mask and resize it
y1, x1, y2, x2 = rois[i].astype(np.int32)
m = class_mask[y1:y2, x1:x2]
mask = utils.resize(m, config.MASK_SHAPE)
masks[i, :, :, class_id] = mask
return rois, roi_gt_class_ids, bboxes, masks
def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):
"""Given the anchors and GT boxes, compute overlaps and identify positive
anchors and deltas to refine them to match their corresponding GT boxes.
anchors: [num_anchors, (y1, x1, y2, x2)]
gt_class_ids: [num_gt_boxes] Integer class IDs.
gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]
Returns:
rpn_match: [N] (int32) matches between anchors and GT boxes.
1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
"""
# RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)
# RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]
rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = np.where(gt_class_ids < 0)[0]
if crowd_ix.shape[0] > 0:
# Filter out crowds from ground truth class IDs and boxes
non_crowd_ix = np.where(gt_class_ids > 0)[0]
crowd_boxes = gt_boxes[crowd_ix]
gt_class_ids = gt_class_ids[non_crowd_ix]
gt_boxes = gt_boxes[non_crowd_ix]
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)
crowd_iou_max = np.amax(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
else:
# All anchors don't intersect a crowd
no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)
# Compute overlaps [num_anchors, num_gt_boxes]
overlaps = utils.compute_overlaps(anchors, gt_boxes)
# Match anchors to GT Boxes
# If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.
# If an anchor overlaps a GT box with IoU < 0.3 then it's negative.
# Neutral anchors are those that don't match the conditions above,
# and they don't influence the loss function.
# However, don't keep any GT box unmatched (rare, but happens). Instead,
# match it to the closest anchor (even if its max IoU is < 0.3).
#
# 1. Set negative anchors first. They get overwritten below if a GT box is
# matched to them. Skip boxes in crowd areas.
anchor_iou_argmax = np.argmax(overlaps, axis=1)
anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]
rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1
# 2. Set an anchor for each GT box (regardless of IoU value).
# TODO: If multiple anchors have the same IoU match all of them
gt_iou_argmax = np.argmax(overlaps, axis=0)
rpn_match[gt_iou_argmax] = 1
# 3. Set anchors with high overlap as positive.
rpn_match[anchor_iou_max >= 0.7] = 1
# Subsample to balance positive and negative anchors
# Don't let positives be more than half the anchors
ids = np.where(rpn_match == 1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)
if extra > 0:
# Reset the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# Same for negative proposals
ids = np.where(rpn_match == -1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -
np.sum(rpn_match == 1))
if extra > 0:
# Rest the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# For positive anchors, compute shift and scale needed to transform them
# to match the corresponding GT boxes.
ids = np.where(rpn_match == 1)[0]
ix = 0 # index into rpn_bbox
# TODO: use box_refinement() rather than duplicating the code here
for i, a in zip(ids, anchors[ids]):
# Closest gt box (it might have IoU < 0.7)
gt = gt_boxes[anchor_iou_argmax[i]]
# Convert coordinates to center plus width/height.
# GT Box
gt_h = gt[2] - gt[0]
gt_w = gt[3] - gt[1]
gt_center_y = gt[0] + 0.5 * gt_h
gt_center_x = gt[1] + 0.5 * gt_w
# Anchor
a_h = a[2] - a[0]
a_w = a[3] - a[1]
a_center_y = a[0] + 0.5 * a_h
a_center_x = a[1] + 0.5 * a_w
# Compute the bbox refinement that the RPN should predict.
rpn_bbox[ix] = [
(gt_center_y - a_center_y) / a_h,
(gt_center_x - a_center_x) / a_w,
np.log(gt_h / a_h),
np.log(gt_w / a_w),
]
# Normalize
rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV
ix += 1
return rpn_match, rpn_bbox
def generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):
"""Generates ROI proposals similar to what a region proposal network
would generate.
image_shape: [Height, Width, Depth]
count: Number of ROIs to generate
gt_class_ids: [N] Integer ground truth class IDs
gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.
Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.
"""
# placeholder
rois = np.zeros((count, 4), dtype=np.int32)
# Generate random ROIs around GT boxes (90% of count)
rois_per_box = int(0.9 * count / gt_boxes.shape[0])
for i in range(gt_boxes.shape[0]):
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]
h = gt_y2 - gt_y1
w = gt_x2 - gt_x1
# random boundaries
r_y1 = max(gt_y1 - h, 0)
r_y2 = min(gt_y2 + h, image_shape[0])
r_x1 = max(gt_x1 - w, 0)
r_x2 = min(gt_x2 + w, image_shape[1])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))
x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:rois_per_box]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:rois_per_box]
if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
box_rois = np.hstack([y1, x1, y2, x2])
rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois
# Generate random ROIs anywhere in the image (10% of count)
remaining_count = count - (rois_per_box * gt_boxes.shape[0])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))
x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:remaining_count]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:remaining_count]
if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
global_rois = np.hstack([y1, x1, y2, x2])
rois[-remaining_count:] = global_rois
return rois
def data_generator(dataset, config, shuffle=True, augment=False, augmentation=None,
random_rois=0, batch_size=1, detection_targets=False,
no_augmentation_sources=None):
"""A generator that returns images and corresponding target class ids,
bounding box deltas, and masks.
dataset: The Dataset object to pick data from
config: The model config object
shuffle: If True, shuffles the samples before every epoch
augment: (deprecated. Use augmentation instead). If true, apply random
image augmentation. Currently, only horizontal flipping is offered.
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
For example, passing imgaug.augmenters.Fliplr(0.5) flips images
right/left 50% of the time.
random_rois: If > 0 then generate proposals to be used to train the
network classifier and mask heads. Useful if training
the Mask RCNN part without the RPN.
batch_size: How many images to return in each call
detection_targets: If True, generate detection targets (class IDs, bbox
deltas, and masks). Typically for debugging or visualizations because
in trainig detection targets are generated by DetectionTargetLayer.
no_augmentation_sources: Optional. List of sources to exclude for
augmentation. A source is string that identifies a dataset and is
defined in the Dataset class.
Returns a Python generator. Upon calling next() on it, the
generator returns two lists, inputs and outputs. The contents
of the lists differs depending on the received arguments:
inputs list:
- images: [batch, H, W, C]
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
- rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)
- rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
- gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs
- gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]
- gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width
are those of the image unless use_mini_mask is True, in which
case they are defined in MINI_MASK_SHAPE.
outputs list: Usually empty in regular training. But if detection_targets
is True then the outputs list contains target class_ids, bbox deltas,
and masks.
"""
b = 0 # batch item index
image_index = -1
image_ids = np.copy(dataset.image_ids)
error_count = 0
no_augmentation_sources = no_augmentation_sources or []
# Anchors
# [anchor_count, (y1, x1, y2, x2)]
backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)
anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
backbone_shapes,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
# Keras requires a generator to run indefinitely.
while True:
try:
# Increment index to pick next image. Shuffle if at the start of an epoch.
image_index = (image_index + 1) % len(image_ids)
if shuffle and image_index == 0:
np.random.shuffle(image_ids)
# Get GT bounding boxes and masks for image.
image_id = image_ids[image_index]
# If the image source is not to be augmented pass None as augmentation
if dataset.image_info[image_id]['source'] in no_augmentation_sources:
image, image_meta, gt_class_ids, gt_boxes, gt_masks = \
load_image_gt(dataset, config, image_id, augment=augment,
augmentation=None,
use_mini_mask=config.USE_MINI_MASK)
else:
image, image_meta, gt_class_ids, gt_boxes, gt_masks = \
load_image_gt(dataset, config, image_id, augment=augment,
augmentation=augmentation,
use_mini_mask=config.USE_MINI_MASK)
# Skip images that have no instances. This can happen in cases
# where we train on a subset of classes and the image doesn't
# have any of the classes we care about.
if not np.any(gt_class_ids > 0):
continue
# RPN Targets
rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,
gt_class_ids, gt_boxes, config)
# Mask R-CNN Targets
if random_rois:
rpn_rois = generate_random_rois(
image.shape, random_rois, gt_class_ids, gt_boxes)
if detection_targets:
rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask =\
build_detection_targets(
rpn_rois, gt_class_ids, gt_boxes, gt_masks, config)
# Init batch arrays
if b == 0:
batch_image_meta = np.zeros(
(batch_size,) + image_meta.shape, dtype=image_meta.dtype)
batch_rpn_match = np.zeros(
[batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)
batch_rpn_bbox = np.zeros(
[batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)
batch_images = np.zeros(
(batch_size,) + image.shape, dtype=np.float32)
batch_gt_class_ids = np.zeros(
(batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)
batch_gt_boxes = np.zeros(
(batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)
batch_gt_masks = np.zeros(
(batch_size, gt_masks.shape[0], gt_masks.shape[1],
config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)
if random_rois:
batch_rpn_rois = np.zeros(
(batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)
if detection_targets:
batch_rois = np.zeros(
(batch_size,) + rois.shape, dtype=rois.dtype)
batch_mrcnn_class_ids = np.zeros(
(batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)
batch_mrcnn_bbox = np.zeros(
(batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)
batch_mrcnn_mask = np.zeros(
(batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)
# If more instances than fits in the array, sub-sample from them.
if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:
ids = np.random.choice(
np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)
gt_class_ids = gt_class_ids[ids]
gt_boxes = gt_boxes[ids]
gt_masks = gt_masks[:, :, ids]
# Add to batch
batch_image_meta[b] = image_meta
batch_rpn_match[b] = rpn_match[:, np.newaxis]
batch_rpn_bbox[b] = rpn_bbox
batch_images[b] = mold_image(image.astype(np.float32), config)
batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids
batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes
batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks
if random_rois:
batch_rpn_rois[b] = rpn_rois
if detection_targets:
batch_rois[b] = rois
batch_mrcnn_class_ids[b] = mrcnn_class_ids
batch_mrcnn_bbox[b] = mrcnn_bbox
batch_mrcnn_mask[b] = mrcnn_mask
b += 1
# Batch full?
if b >= batch_size:
inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,
batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]
outputs = []
if random_rois:
inputs.extend([batch_rpn_rois])
if detection_targets:
inputs.extend([batch_rois])
# Keras requires that output and targets have the same number of dimensions
batch_mrcnn_class_ids = np.expand_dims(
batch_mrcnn_class_ids, -1)
outputs.extend(
[batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])
yield inputs, outputs
# start a new batch
b = 0
except (GeneratorExit, KeyboardInterrupt):
raise
except:
# Log it and skip the image
logging.exception("Error processing image {}".format(
dataset.image_info[image_id]))
error_count += 1
if error_count > 5:
raise
############################################################
# MaskRCNN Class
############################################################
class MaskRCNN():
"""Encapsulates the Mask RCNN model functionality.
The actual Keras model is in the keras_model property.
"""
def __init__(self, mode, config, model_dir):
"""
mode: Either "training" or "inference"
config: A Sub-class of the Config class
model_dir: Directory to save training logs and trained weights
"""
assert mode in ['training', 'inference']
self.mode = mode
self.config = config
self.model_dir = model_dir
self.set_log_dir()
self.keras_model = self.build(mode=mode, config=config)
def build(self, mode, config):
"""Build Mask R-CNN architecture.
input_shape: The shape of the input image.
mode: Either "training" or "inference". The inputs and
outputs of the model differ accordingly.
"""
assert mode in ['training', 'inference']
# Image size must be dividable by 2 multiple times
h, w = config.IMAGE_SHAPE[:2]
if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):
raise Exception("Image size must be dividable by 2 at least 6 times "
"to avoid fractions when downscaling and upscaling."
"For example, use 256, 320, 384, 448, 512, ... etc. ")
# Inputs
input_image = KL.Input(
shape=[None, None, config.IMAGE_SHAPE[2]], name="input_image")
input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],
name="input_image_meta")
if mode == "training":
# RPN GT
input_rpn_match = KL.Input(
shape=[None, 1], name="input_rpn_match", dtype=tf.int32)
input_rpn_bbox = KL.Input(
shape=[None, 4], name="input_rpn_bbox", dtype=tf.float32)
# Detection GT (class IDs, bounding boxes, and masks)
# 1. GT Class IDs (zero padded)
input_gt_class_ids = KL.Input(
shape=[None], name="input_gt_class_ids", dtype=tf.int32)
# 2. GT Boxes in pixels (zero padded)
# [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates
input_gt_boxes = KL.Input(
shape=[None, 4], name="input_gt_boxes", dtype=tf.float32)
# Normalize coordinates
gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_gt_boxes)
# 3. GT Masks (zero padded)
# [batch, height, width, MAX_GT_INSTANCES]
if config.USE_MINI_MASK:
input_gt_masks = KL.Input(
shape=[config.MINI_MASK_SHAPE[0],
config.MINI_MASK_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
else:
input_gt_masks = KL.Input(
shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
elif mode == "inference":
# Anchors in normalized coordinates
input_anchors = KL.Input(shape=[None, 4], name="input_anchors")
# Build the shared convolutional layers.
# Bottom-up Layers
# Returns a list of the last layers of each stage, 5 in total.
# Don't create the thead (stage 5), so we pick the 4th item in the list.
if callable(config.BACKBONE):
_, C2, C3, C4, C5 = config.BACKBONE(input_image, stage5=True,
train_bn=config.TRAIN_BN)
else:
_, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,
stage5=True, train_bn=config.TRAIN_BN)
# Top-down Layers
# TODO: add assert to varify feature map sizes match what's in config
P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c5p5')(C5)
P4 = KL.Add(name="fpn_p4add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p5upsampled")(P5),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)])
P3 = KL.Add(name="fpn_p3add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p4upsampled")(P4),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)])
P2 = KL.Add(name="fpn_p2add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p3upsampled")(P3),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)])
# Attach 3x3 conv to all P layers to get the final feature maps.
P2 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p2")(P2)
P3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p3")(P3)
P4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p4")(P4)
P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p5")(P5)
# P6 is used for the 5th anchor scale in RPN. Generated by
# subsampling from P5 with stride of 2.
P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name="fpn_p6")(P5)
# Note that P6 is used in RPN, but not in the classifier heads.
rpn_feature_maps = [P2, P3, P4, P5, P6]
mrcnn_feature_maps = [P2, P3, P4, P5]
# Anchors
if mode == "training":
anchors = self.get_anchors(config.IMAGE_SHAPE)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)
# A hack to get around Keras's bad support for constants
anchors = KL.Lambda(lambda x: tf.Variable(anchors), name="anchors")(input_image)
else:
anchors = input_anchors
# RPN Model
rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,
len(config.RPN_ANCHOR_RATIOS), config.TOP_DOWN_PYRAMID_SIZE)
# Loop through pyramid layers
layer_outputs = [] # list of lists
for p in rpn_feature_maps:
layer_outputs.append(rpn([p]))
# Concatenate layer outputs
# Convert from list of lists of level outputs to list of lists
# of outputs across levels.
# e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]
output_names = ["rpn_class_logits", "rpn_class", "rpn_bbox"]
outputs = list(zip(*layer_outputs))
outputs = [KL.Concatenate(axis=1, name=n)(list(o))
for o, n in zip(outputs, output_names)]
rpn_class_logits, rpn_class, rpn_bbox = outputs
# Generate proposals
# Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates
# and zero padded.
proposal_count = config.POST_NMS_ROIS_TRAINING if mode == "training"\
else config.POST_NMS_ROIS_INFERENCE
rpn_rois = ProposalLayer(
proposal_count=proposal_count,
nms_threshold=config.RPN_NMS_THRESHOLD,
name="ROI",
config=config)([rpn_class, rpn_bbox, anchors])
if mode == "training":
# Class ID mask to mark class IDs supported by the dataset the image
# came from.
active_class_ids = KL.Lambda(
lambda x: parse_image_meta_graph(x)["active_class_ids"]
)(input_image_meta)
if not config.USE_RPN_ROIS:
# Ignore predicted ROIs and use ROIs provided as an input.
input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],
name="input_roi", dtype=np.int32)
# Normalize coordinates
target_rois = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_rois)
else:
target_rois = rpn_rois
# Generate detection targets
# Subsamples proposals and generates target outputs for training
# Note that proposal class IDs, gt_boxes, and gt_masks are zero
# padded. Equally, returned rois and targets are zero padded.
rois, target_class_ids, target_bbox, target_mask =\
DetectionTargetLayer(config, name="proposal_targets")([
target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])
# Network Heads
# TODO: verify that this handles zero padded ROIs
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN,
fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)
mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
# TODO: clean up (use tf.identify if necessary)
output_rois = KL.Lambda(lambda x: x * 1, name="output_rois")(rois)
# Losses
rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name="rpn_class_loss")(
[input_rpn_match, rpn_class_logits])
rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name="rpn_bbox_loss")(
[input_rpn_bbox, input_rpn_match, rpn_bbox])
class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name="mrcnn_class_loss")(
[target_class_ids, mrcnn_class_logits, active_class_ids])
bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name="mrcnn_bbox_loss")(
[target_bbox, target_class_ids, mrcnn_bbox])
mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name="mrcnn_mask_loss")(
[target_mask, target_class_ids, mrcnn_mask])
# Model
inputs = [input_image, input_image_meta,
input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]
if not config.USE_RPN_ROIS:
inputs.append(input_rois)
outputs = [rpn_class_logits, rpn_class, rpn_bbox,
mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,
rpn_rois, output_rois,
rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]
model = KM.Model(inputs, outputs, name='mask_rcnn')
else:
# Network Heads
# Proposal classifier and BBox regressor heads
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN,
fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)
# Detections
# output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in
# normalized coordinates
detections = DetectionLayer(config, name="mrcnn_detection")(
[rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])
# Create masks for detections
detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)
mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
model = KM.Model([input_image, input_image_meta, input_anchors],
[detections, mrcnn_class, mrcnn_bbox,
mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],
name='mask_rcnn')
# Add multi-GPU support.
if config.GPU_COUNT > 1:
from mrcnn.parallel_model import ParallelModel
model = ParallelModel(model, config.GPU_COUNT)
return model
def find_last(self):
"""Finds the last checkpoint file of the last trained model in the
model directory.
Returns:
The path of the last checkpoint file
"""
# Get directory names. Each directory corresponds to a model
dir_names = next(os.walk(self.model_dir))[1]
key = self.config.NAME.lower()
dir_names = filter(lambda f: f.startswith(key), dir_names)
dir_names = sorted(dir_names)
if not dir_names:
import errno
raise FileNotFoundError(
errno.ENOENT,
"Could not find model directory under {}".format(self.model_dir))
# Pick last directory
dir_name = os.path.join(self.model_dir, dir_names[-1])
# Find the last checkpoint
checkpoints = next(os.walk(dir_name))[2]
checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints)
checkpoints = sorted(checkpoints)
if not checkpoints:
import errno
raise FileNotFoundError(
errno.ENOENT, "Could not find weight files in {}".format(dir_name))
checkpoint = os.path.join(dir_name, checkpoints[-1])
return checkpoint
def load_weights(self, filepath, by_name=False, exclude=None):
"""Modified version of the corresponding Keras function with
the addition of multi-GPU support and the ability to exclude
some layers from loading.
exclude: list of layer names to exclude
"""
import h5py
# Conditional import to support versions of Keras before 2.2
# TODO: remove in about 6 months (end of 2018)
try:
from keras.engine import saving
except ImportError:
# Keras before 2.2 used the 'topology' namespace.
from keras.engine import topology as saving
if exclude:
by_name = True
if h5py is None:
raise ImportError('`load_weights` requires h5py.')
f = h5py.File(filepath, mode='r')
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
keras_model = self.keras_model
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
# Exclude some layers
if exclude:
layers = filter(lambda l: l.name not in exclude, layers)
if by_name:
saving.load_weights_from_hdf5_group_by_name(f, layers)
else:
saving.load_weights_from_hdf5_group(f, layers)
if hasattr(f, 'close'):
f.close()
# Update the log directory
self.set_log_dir(filepath)
def get_imagenet_weights(self):
"""Downloads ImageNet trained weights from Keras.
Returns path to weights file.
"""
from keras.utils.data_utils import get_file
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
'releases/download/v0.2/'\
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
return weights_path
def compile(self, learning_rate, momentum):
"""Gets the model ready for training. Adds losses, regularization, and
metrics. Then calls the Keras compile() function.
"""
# Optimizer object
optimizer = keras.optimizers.SGD(
lr=learning_rate, momentum=momentum,
clipnorm=self.config.GRADIENT_CLIP_NORM)
# Add Losses
# First, clear previously set losses to avoid duplication
self.keras_model._losses = []
self.keras_model._per_input_losses = {}
loss_names = [
"rpn_class_loss", "rpn_bbox_loss",
"mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"]
for name in loss_names:
layer = self.keras_model.get_layer(name)
if layer.output in self.keras_model.losses:
continue
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.add_loss(loss)
# Add L2 Regularization
# Skip gamma and beta weights of batch normalization layers.
reg_losses = [
keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)
for w in self.keras_model.trainable_weights
if 'gamma' not in w.name and 'beta' not in w.name]
self.keras_model.add_loss(tf.add_n(reg_losses))
# Compile
self.keras_model.compile(
optimizer=optimizer,
loss=[None] * len(self.keras_model.outputs))
# Add metrics for losses
for name in loss_names:
if name in self.keras_model.metrics_names:
continue
layer = self.keras_model.get_layer(name)
self.keras_model.metrics_names.append(name)
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.metrics_tensors.append(loss)
def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):
"""Sets model layers as trainable if their names match
the given regular expression.
"""
# Print message on the first call (but not on recursive calls)
if verbose > 0 and keras_model is None:
log("Selecting layers to train")
keras_model = keras_model or self.keras_model
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
for layer in layers:
# Is the layer a model?
if layer.__class__.__name__ == 'Model':
print("In model: ", layer.name)
self.set_trainable(
layer_regex, keras_model=layer, indent=indent + 4)
continue
if not layer.weights:
continue
# Is it trainable?
trainable = bool(re.fullmatch(layer_regex, layer.name))
# Update layer. If layer is a container, update inner layer.
if layer.__class__.__name__ == 'TimeDistributed':
layer.layer.trainable = trainable
else:
layer.trainable = trainable
# Print trainable layer names
if trainable and verbose > 0:
log("{}{:20} ({})".format(" " * indent, layer.name,
layer.__class__.__name__))
def set_log_dir(self, model_path=None):
"""Sets the model log directory and epoch counter.
model_path: If None, or a format different from what this code uses
then set a new log directory and start epochs from 0. Otherwise,
extract the log directory and the epoch counter from the file
name.
"""
# Set date and epoch counter as if starting a new model
self.epoch = 0
now = datetime.datetime.now()
# If we have a model path with date and epochs use them
if model_path:
# Continue from we left of. Get epoch and date from the file name
# A sample model path might look like:
# \path\to\logs\coco20171029T2315\mask_rcnn_coco_0001.h5 (Windows)
# /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5 (Linux)
regex = r".*[/\\][\w-]+(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})[/\\]mask\_rcnn\_[\w-]+(\d{4})\.h5"
m = re.match(regex, model_path)
if m:
now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),
int(m.group(4)), int(m.group(5)))
# Epoch number in file is 1-based, and in Keras code it's 0-based.
# So, adjust for that then increment by one to start from the next epoch
self.epoch = int(m.group(6)) - 1 + 1
print('Re-starting from epoch %d' % self.epoch)
# Directory for training logs
self.log_dir = os.path.join(self.model_dir, "{}{:%Y%m%dT%H%M}".format(
self.config.NAME.lower(), now))
# Path to save after each epoch. Include placeholders that get filled by Keras.
self.checkpoint_path = os.path.join(self.log_dir, "mask_rcnn_{}_*epoch*.h5".format(
self.config.NAME.lower()))
self.checkpoint_path = self.checkpoint_path.replace(
"*epoch*", "{epoch:04d}")
def train(self, train_dataset, val_dataset, learning_rate, epochs, layers,
augmentation=None, custom_callbacks=None, no_augmentation_sources=None):
"""Train the model.
train_dataset, val_dataset: Training and validation Dataset objects.
learning_rate: The learning rate to train with
epochs: Number of training epochs. Note that previous training epochs
are considered to be done alreay, so this actually determines
the epochs to train in total rather than in this particaular
call.
layers: Allows selecting wich layers to train. It can be:
- A regular expression to match layer names to train
- One of these predefined values:
heads: The RPN, classifier and mask heads of the network
all: All the layers
3+: Train Resnet stage 3 and up
4+: Train Resnet stage 4 and up
5+: Train Resnet stage 5 and up
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)
augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)
flips images right/left 50% of the time. You can pass complex
augmentations as well. This augmentation applies 50% of the
time, and when it does it flips images right/left half the time
and adds a Gaussian blur with a random sigma in range 0 to 5.
augmentation = imgaug.augmenters.Sometimes(0.5, [
imgaug.augmenters.Fliplr(0.5),
imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))
])
custom_callbacks: Optional. Add custom callbacks to be called
with the keras fit_generator method. Must be list of type keras.callbacks.
no_augmentation_sources: Optional. List of sources to exclude for
augmentation. A source is string that identifies a dataset and is
defined in the Dataset class.
"""
assert self.mode == "training", "Create model in training mode."
# Pre-defined layer regular expressions
layer_regex = {
# all layers but the backbone
"heads": r"(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# From a specific Resnet stage and up
"3+": r"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"4+": r"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"5+": r"(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# All layers
"all": ".*",
}
if layers in layer_regex.keys():
layers = layer_regex[layers]
# Data generators
train_generator = data_generator(train_dataset, self.config, shuffle=True,
augmentation=augmentation,
batch_size=self.config.BATCH_SIZE,
no_augmentation_sources=no_augmentation_sources)
val_generator = data_generator(val_dataset, self.config, shuffle=True,
batch_size=self.config.BATCH_SIZE)
# Create log_dir if it does not exist
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
# Callbacks
callbacks = [
keras.callbacks.TensorBoard(log_dir=self.log_dir,
histogram_freq=0, write_graph=True, write_images=False),
keras.callbacks.ModelCheckpoint(self.checkpoint_path,
verbose=0, save_weights_only=True),
]
# Add custom callbacks to the list
if custom_callbacks:
callbacks += custom_callbacks
# Train
log("\nStarting at epoch {}. LR={}\n".format(self.epoch, learning_rate))
log("Checkpoint Path: {}".format(self.checkpoint_path))
self.set_trainable(layers)
self.compile(learning_rate, self.config.LEARNING_MOMENTUM)
# Work-around for Windows: Keras fails on Windows when using
# multiprocessing workers. See discussion here:
# https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009
if os.name is 'nt':
workers = 0
else:
#workers = multiprocessing.cpu_count() #notchanged
workers = 1
self.keras_model.fit_generator(
train_generator,
initial_epoch=self.epoch,
epochs=epochs,
steps_per_epoch=self.config.STEPS_PER_EPOCH,
callbacks=callbacks,
validation_data=val_generator,
validation_steps=self.config.VALIDATION_STEPS,
max_queue_size=1, #100
workers=workers,
#use_multiprocessing=True, notchanged
use_multiprocessing=False, #notchanged
)
self.epoch = max(self.epoch, epochs)
def mold_inputs(self, images):
"""Takes a list of images and modifies them to the format expected
as an input to the neural network.
images: List of image matrices [height,width,depth]. Images can have
different sizes.
Returns 3 Numpy matrices:
molded_images: [N, h, w, 3]. Images resized and normalized.
image_metas: [N, length of meta data]. Details about each image.
windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the
original image (padding excluded).
"""
molded_images = []
image_metas = []
windows = []
for image in images:
# Resize image
# TODO: move resizing to mold_image()
molded_image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=self.config.IMAGE_MIN_DIM,
min_scale=self.config.IMAGE_MIN_SCALE,
max_dim=self.config.IMAGE_MAX_DIM,
mode=self.config.IMAGE_RESIZE_MODE)
molded_image = mold_image(molded_image, self.config)
# Build image_meta
image_meta = compose_image_meta(
0, image.shape, molded_image.shape, window, scale,
np.zeros([self.config.NUM_CLASSES], dtype=np.int32))
# Append
molded_images.append(molded_image)
windows.append(window)
image_metas.append(image_meta)
# Pack into arrays
molded_images = np.stack(molded_images)
image_metas = np.stack(image_metas)
windows = np.stack(windows)
return molded_images, image_metas, windows
def unmold_detections(self, detections, mrcnn_mask, original_image_shape,
image_shape, window):
"""Reformats the detections of one image from the format of the neural
network output to a format suitable for use in the rest of the
application.
detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates
mrcnn_mask: [N, height, width, num_classes]
original_image_shape: [H, W, C] Original image shape before resizing
image_shape: [H, W, C] Shape of the image after resizing and padding
window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real
image is excluding the padding.
Returns:
boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels
class_ids: [N] Integer class IDs for each bounding box
scores: [N] Float probability scores of the class_id
masks: [height, width, num_instances] Instance masks
"""
# How many detections do we have?
# Detections array is padded with zeros. Find the first class_id == 0.
zero_ix = np.where(detections[:, 4] == 0)[0]
N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]
# Extract boxes, class_ids, scores, and class-specific masks
boxes = detections[:N, :4]
class_ids = detections[:N, 4].astype(np.int32)
scores = detections[:N, 5]
masks = mrcnn_mask[np.arange(N), :, :, class_ids]
# Translate normalized coordinates in the resized image to pixel
# coordinates in the original image before resizing
window = utils.norm_boxes(window, image_shape[:2])
wy1, wx1, wy2, wx2 = window
shift = np.array([wy1, wx1, wy1, wx1])
wh = wy2 - wy1 # window height
ww = wx2 - wx1 # window width
scale = np.array([wh, ww, wh, ww])
# Convert boxes to normalized coordinates on the window
boxes = np.divide(boxes - shift, scale)
# Convert boxes to pixel coordinates on the original image
boxes = utils.denorm_boxes(boxes, original_image_shape[:2])
# Filter out detections with zero area. Happens in early training when
# network weights are still random
exclude_ix = np.where(
(boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
masks = np.delete(masks, exclude_ix, axis=0)
N = class_ids.shape[0]
# Resize masks to original image size and set boundary threshold.
full_masks = []
for i in range(N):
# Convert neural network mask to full size mask
full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape)
full_masks.append(full_mask)
full_masks = np.stack(full_masks, axis=-1)\
if full_masks else np.empty(original_image_shape[:2] + (0,))
return boxes, class_ids, scores, full_masks
def detect(self, images, verbose=0):
"""Runs the detection pipeline.
images: List of images, potentially of different sizes.
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(
images) == self.config.BATCH_SIZE, "len(images) must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(images)))
for image in images:
log("image", image)
# Mold inputs to format expected by the neural network
molded_images, image_metas, windows = self.mold_inputs(images)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape,\
"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes."
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
detections, _, _, mrcnn_mask, _, _, _ =\
self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(images):
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
windows[i])
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def detect_molded(self, molded_images, image_metas, verbose=0):
"""Runs the detection pipeline, but expect inputs that are
molded already. Used mostly for debugging and inspecting
the model.
molded_images: List of images loaded using load_image_gt()
image_metas: image meta data, also returned by load_image_gt()
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(molded_images) == self.config.BATCH_SIZE,\
"Number of images must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(molded_images)))
for image in molded_images:
log("image", image)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape, "Images must have the same size"
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
detections, _, _, mrcnn_mask, _, _, _ =\
self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(molded_images):
window = [0, 0, image.shape[0], image.shape[1]]
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
window)
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def get_anchors(self, image_shape):
"""Returns anchor pyramid for the given image size."""
backbone_shapes = compute_backbone_shapes(self.config, image_shape)
# Cache anchors and reuse if image shape is the same
if not hasattr(self, "_anchor_cache"):
self._anchor_cache = {}
if not tuple(image_shape) in self._anchor_cache:
# Generate Anchors
a = utils.generate_pyramid_anchors(
self.config.RPN_ANCHOR_SCALES,
self.config.RPN_ANCHOR_RATIOS,
backbone_shapes,
self.config.BACKBONE_STRIDES,
self.config.RPN_ANCHOR_STRIDE)
# Keep a copy of the latest anchors in pixel coordinates because
# it's used in inspect_model notebooks.
# TODO: Remove this after the notebook are refactored to not use it
self.anchors = a
# Normalize coordinates
self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])
return self._anchor_cache[tuple(image_shape)]
def ancestor(self, tensor, name, checked=None):
"""Finds the ancestor of a TF tensor in the computation graph.
tensor: TensorFlow symbolic tensor.
name: Name of ancestor tensor to find
checked: For internal use. A list of tensors that were already
searched to avoid loops in traversing the graph.
"""
checked = checked if checked is not None else []
# Put a limit on how deep we go to avoid very long loops
if len(checked) > 500:
return None
# Convert name to a regex and allow matching a number prefix
# because Keras adds them automatically
if isinstance(name, str):
name = re.compile(name.replace("/", r"(\_\d+)*/"))
parents = tensor.op.inputs
for p in parents:
if p in checked:
continue
if bool(re.fullmatch(name, p.name)):
return p
checked.append(p)
a = self.ancestor(p, name, checked)
if a is not None:
return a
return None
def find_trainable_layer(self, layer):
"""If a layer is encapsulated by another layer, this function
digs through the encapsulation and returns the layer that holds
the weights.
"""
if layer.__class__.__name__ == 'TimeDistributed':
return self.find_trainable_layer(layer.layer)
return layer
def get_trainable_layers(self):
"""Returns a list of layers that have weights."""
layers = []
# Loop through all layers
for l in self.keras_model.layers:
# If layer is a wrapper, find inner trainable layer
l = self.find_trainable_layer(l)
# Include layer if it has weights
if l.get_weights():
layers.append(l)
return layers
def run_graph(self, images, outputs, image_metas=None):
"""Runs a sub-set of the computation graph that computes the given
outputs.
image_metas: If provided, the images are assumed to be already
molded (i.e. resized, padded, and normalized)
outputs: List of tuples (name, tensor) to compute. The tensors are
symbolic TensorFlow tensors and the names are for easy tracking.
Returns an ordered dict of results. Keys are the names received in the
input and values are Numpy arrays.
"""
model = self.keras_model
# Organize desired outputs into an ordered dict
outputs = OrderedDict(outputs)
for o in outputs.values():
assert o is not None
# Build a Keras function to run parts of the computation graph
inputs = model.inputs
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
kf = K.function(model.inputs, list(outputs.values()))
# Prepare inputs
if image_metas is None:
molded_images, image_metas, _ = self.mold_inputs(images)
else:
molded_images = images
image_shape = molded_images[0].shape
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
model_in = [molded_images, image_metas, anchors]
# Run inference
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
model_in.append(0.)
outputs_np = kf(model_in)
# Pack the generated Numpy arrays into a a dict and log the results.
outputs_np = OrderedDict([(k, v)
for k, v in zip(outputs.keys(), outputs_np)])
for k, v in outputs_np.items():
log(k, v)
return outputs_np
############################################################
# Data Formatting
############################################################
def compose_image_meta(image_id, original_image_shape, image_shape,
window, scale, active_class_ids):
"""Takes attributes of an image and puts them in one 1D array.
image_id: An int ID of the image. Useful for debugging.
original_image_shape: [H, W, C] before resizing or padding.
image_shape: [H, W, C] after resizing and padding
window: (y1, x1, y2, x2) in pixels. The area of the image where the real
image is (excluding the padding)
scale: The scaling factor applied to the original image (float32)
active_class_ids: List of class_ids available in the dataset from which
the image came. Useful if training on images from multiple datasets
where not all classes are present in all datasets.
"""
meta = np.array(
[image_id] + # size=1
list(original_image_shape) + # size=3
list(image_shape) + # size=3
list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates
[scale] + # size=1
list(active_class_ids) # size=num_classes
)
return meta
def parse_image_meta(meta):
"""Parses an array that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
Returns a dict of the parsed values.
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
active_class_ids = meta[:, 12:]
return {
"image_id": image_id.astype(np.int32),
"original_image_shape": original_image_shape.astype(np.int32),
"image_shape": image_shape.astype(np.int32),
"window": window.astype(np.int32),
"scale": scale.astype(np.float32),
"active_class_ids": active_class_ids.astype(np.int32),
}
def parse_image_meta_graph(meta):
"""Parses a tensor that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
Returns a dict of the parsed tensors.
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
active_class_ids = meta[:, 12:]
return {
"image_id": image_id,
"original_image_shape": original_image_shape,
"image_shape": image_shape,
"window": window,
"scale": scale,
"active_class_ids": active_class_ids,
}
def mold_image(images, config):
"""Expects an RGB image (or array of images) and subtracts
the mean pixel and converts it to float. Expects image
colors in RGB order.
"""
return images.astype(np.float32) - config.MEAN_PIXEL
def unmold_image(normalized_images, config):
"""Takes a image normalized with mold() and returns the original."""
return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)
############################################################
# Miscellenous Graph Functions
############################################################
def trim_zeros_graph(boxes, name=None):
"""Often boxes are represented with matrices of shape [N, 4] and
are padded with zeros. This removes zero boxes.
boxes: [N, 4] matrix of boxes.
non_zeros: [N] a 1D boolean mask identifying the rows to keep
"""
non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)
boxes = tf.boolean_mask(boxes, non_zeros, name=name)
return boxes, non_zeros
def batch_pack_graph(x, counts, num_rows):
"""Picks different number of values from each row
in x depending on the values in counts.
"""
outputs = []
for i in range(num_rows):
outputs.append(x[i, :counts[i]])
return tf.concat(outputs, axis=0)
def norm_boxes_graph(boxes, shape):
"""Converts boxes from pixel coordinates to normalized coordinates.
boxes: [..., (y1, x1, y2, x2)] in pixel coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[..., (y1, x1, y2, x2)] in normalized coordinates
"""
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.divide(boxes - shift, scale)
def denorm_boxes_graph(boxes, shape):
"""Converts boxes from normalized coordinates to pixel coordinates.
boxes: [..., (y1, x1, y2, x2)] in normalized coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[..., (y1, x1, y2, x2)] in pixel coordinates
"""
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32)
| 44.238593 | 115 | 0.612122 |
4a1ab30e9df07dbd63fc01963db93ed77c4e02de
| 321 |
py
|
Python
|
portal/urls.py
|
mezidia/django_labs
|
8e9a4d57b1feb3b54e666c6583d14b978f230129
|
[
"MIT"
] | null | null | null |
portal/urls.py
|
mezidia/django_labs
|
8e9a4d57b1feb3b54e666c6583d14b978f230129
|
[
"MIT"
] | 26 |
2021-09-21T09:29:25.000Z
|
2022-03-02T15:27:16.000Z
|
portal/urls.py
|
mezidia/django_labs
|
8e9a4d57b1feb3b54e666c6583d14b978f230129
|
[
"MIT"
] | 1 |
2021-11-14T16:40:52.000Z
|
2021-11-14T16:40:52.000Z
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('flight/<int:id>/', views.flight, name='flight'),
path('flights', views.flights, name='flights'),
path('about', views.about, name='about'),
path('contacts', views.contacts, name='contacts'),
]
| 29.181818 | 58 | 0.647975 |
4a1ab3751be2a7aed6b26f6f3fbbb7df3561b002
| 750 |
py
|
Python
|
dialogs/specialecho.py
|
uezo/linebot-project-template
|
294d40f5c50a3bbee346314107b60e98f4e07bf0
|
[
"MIT"
] | 14 |
2019-08-05T22:54:59.000Z
|
2021-12-21T00:29:22.000Z
|
dialogs/specialecho.py
|
whitecat-22/linebot-project-template
|
294d40f5c50a3bbee346314107b60e98f4e07bf0
|
[
"MIT"
] | 1 |
2021-06-17T09:30:33.000Z
|
2021-06-18T07:16:37.000Z
|
dialogs/specialecho.py
|
whitecat-22/linebot-project-template
|
294d40f5c50a3bbee346314107b60e98f4e07bf0
|
[
"MIT"
] | 5 |
2019-09-03T06:51:44.000Z
|
2021-06-17T09:40:42.000Z
|
from minette import DialogService
# リクエスト情報やセッション情報を使ったおうむ返し
class SpecialEchoDialogService(DialogService):
def compose_response(self, request, context, connection):
# 前回の発話内容をセッションから取得
previous_text = context.data.get("previous_text", "")
# 今回の発話内容をセッションに格納
context.data["previous_text"] = request.text
# 発話内容から名詞を抽出
noun = [w.surface for w in request.words if w.part == "名詞"]
# この対話を継続することでセッション情報を維持
context.topic.keep_on = True
# 応答メッセージの組み立て
ret = "こんにちは、{}さん。今回は'{}'って言ったね。前回は'{}'って言ってたよ".format(
request.user.name, request.text, previous_text
)
ret += "\n含まれる名詞: {}".format("、".join(noun)) if noun else ""
return ret
| 30 | 68 | 0.633333 |
4a1ab44e51a3ccbc5e4488081072effda4adeca1
| 8,018 |
py
|
Python
|
deephyper/search/nas/model/space/node.py
|
madhukar-m-rao/deephyper
|
d280701d9e4cae3e639be054bf1c5ef918d9a1a7
|
[
"BSD-3-Clause"
] | 2 |
2020-08-26T09:15:27.000Z
|
2020-08-26T09:19:13.000Z
|
deephyper/search/nas/model/space/node.py
|
madhukar-m-rao/deephyper
|
d280701d9e4cae3e639be054bf1c5ef918d9a1a7
|
[
"BSD-3-Clause"
] | null | null | null |
deephyper/search/nas/model/space/node.py
|
madhukar-m-rao/deephyper
|
d280701d9e4cae3e639be054bf1c5ef918d9a1a7
|
[
"BSD-3-Clause"
] | null | null | null |
from tensorflow import keras
from .....core.exceptions import DeephyperRuntimeError
from .op.basic import Operation
class Node:
"""This class represents a node of a graph
Args:
name (str): node name.
"""
# Number of 'Node' instances created
num = 0
def __init__(self, name="", *args, **kwargs):
Node.num += 1
self._num = Node.num
self._tensor = None
self.name = name
def __str__(self):
return f"{self.name}[id={self._num}]"
@property
def id(self):
return self._num
@property
def op(self):
raise NotImplementedError
def create_tensor(self, *args, **kwargs):
raise NotImplementedError
@staticmethod
def verify_operation(op):
if isinstance(op, Operation):
return op
elif isinstance(op, keras.layers.Layer):
return Operation(op)
else:
raise RuntimeError(
f"Can't add this operation '{op.__name__}'. An operation should be either of type Operation or keras.layers.Layer when is of type: {type(op)}"
)
class OperationNode(Node):
def __init__(self, name="", *args, **kwargs):
super().__init__(name=name, *args, **kwargs)
def create_tensor(self, inputs=None, train=True, seed=None, **kwargs):
if self._tensor is None:
if inputs == None:
try:
self._tensor = self.op(train=train, seed=None)
except TypeError:
raise RuntimeError(
f'Verify if node: "{self}" has incoming connexions!'
)
else:
self._tensor = self.op(inputs, train=train)
return self._tensor
class VariableNode(OperationNode):
"""This class represents a node of a graph where you have a set of possible operations. It means the agent will have to act to choose one of these operations.
>>> import tensorflow as tf
>>> from deephyper.search.nas.model.space.node import VariableNode
>>> vnode = VariableNode("VNode1")
>>> from deephyper.search.nas.model.space.op.op1d import Dense
>>> vnode.add_op(Dense(
... units=10,
... activation=tf.nn.relu))
>>> vnode.num_ops
1
>>> vnode.add_op(Dense(
... units=1000,
... activation=tf.nn.tanh))
>>> vnode.num_ops
2
>>> vnode.set_op(0)
>>> vnode.op.units
10
Args:
name (str): node name.
"""
def __init__(self, name=""):
super().__init__(name=name)
self._ops = list()
self._index = None
def __str__(self):
if self._index != None:
return f"{super().__str__()}(Variable[{str(self.op)}])"
else:
return f"{super().__str__()}(Variable[?])"
def add_op(self, op):
self._ops.append(self.verify_operation(op))
@property
def num_ops(self):
return len(self._ops)
def set_op(self, index):
self.get_op(index).init(self)
def get_op(self, index):
assert (
"float" in str(type(index)) or type(index) is int
), f"found type is : {type(index)}"
if "float" in str(type(index)):
self._index = self.denormalize(index)
else:
assert 0 <= index and index < len(
self._ops
), f"Number of possible operations is: {len(self._ops)}, but index given is: {index} (index starts from 0)!"
self._index = index
return self.op
def denormalize(self, index):
"""Denormalize a normalized index to get an absolute indexes. Useful when you want to compare the number of different search_spaces.
Args:
indexes (float|int): a normalized index.
Returns:
int: An absolute indexes corresponding to the operation choosen with the relative index of `index`.
"""
if type(index) is int:
return index
else:
assert 0.0 <= index and index <= 1.0
res = int(index * len(self._ops))
if index == 1.0:
res -= 1
return res
@property
def op(self):
if len(self._ops) == 0:
raise RuntimeError("This VariableNode doesn't have any operation yet.")
elif self._index is None:
raise RuntimeError(
'This VariableNode doesn\'t have any set operation, please use "set_op(index)" if you want to set one'
)
else:
return self._ops[self._index]
@property
def ops(self):
return self._ops
class ConstantNode(OperationNode):
"""A ConstantNode represents a node with a fixed operation. It means the agent will not make any new decision for this node. The common use case for this node is to add a tensor in the graph.
>>> import tensorflow as tf
>>> from deephyper.search.nas.model.space.node import ConstantNode
>>> from deephyper.search.nas.model.space.op.op1d import Dense
>>> cnode = ConstantNode(op=Dense(units=100, activation=tf.nn.relu), name='CNode1')
>>> cnode.op
Dense_100_relu
Args:
op (Operation, optional): [description]. Defaults to None.
name (str, optional): [description]. Defaults to ''.
"""
def __init__(self, op=None, name="", *args, **kwargs):
super().__init__(name=name)
if not op is None:
op = self.verify_operation(op)
op.init(self) # set operation
self._op = op
def set_op(self, op):
op = self.verify_operation(op)
op.init(self)
self._op = op
def __str__(self):
return f"{super().__str__()}(Constant[{str(self.op)}])"
@property
def op(self):
return self._op
class MirrorNode(OperationNode):
"""A MirrorNode is a node which reuse an other, it enable the reuse of keras layers. This node will not add operations to choose.
Args:
node (Node): The targeted node to mirror.
>>> from deephyper.search.nas.model.space.node import VariableNode, MirrorNode
>>> from deephyper.search.nas.model.space.op.op1d import Dense
>>> vnode = VariableNode()
>>> vnode.add_op(Dense(10))
>>> vnode.add_op(Dense(20))
>>> mnode = MirrorNode(vnode)
>>> vnode.set_op(0)
>>> vnode.op
Dense_10
>>> mnode.op
Dense_10
"""
def __init__(self, node):
super().__init__(name=f"Mirror[{str(node)}]")
self._node = node
@property
def op(self):
return self._node.op
class MimeNode(OperationNode):
"""A MimeNode is a node which reuse an the choice made for an VariableNode, it enable the definition of a Cell based search_space. This node reuse the operation from the mimed VariableNode but only the choice made.
Args:
node (VariableNode): the VariableNode to mime.
>>> from deephyper.search.nas.model.space.node import VariableNode, MimeNode
>>> from deephyper.search.nas.model.space.op.op1d import Dense
>>> vnode = VariableNode()
>>> vnode.add_op(Dense(10))
>>> vnode.add_op(Dense(20))
>>> mnode = MimeNode(vnode)
>>> mnode.add_op(Dense(30))
>>> mnode.add_op(Dense(40))
>>> vnode.set_op(0)
>>> vnode.op
Dense_10
>>> mnode.op
Dense_30
"""
def __init__(self, node):
super().__init__(name=f"Mime[{str(node)}]")
self.node = node
self._ops = list()
def add_op(self, op):
self._ops.append(self.verify_operation(op))
@property
def num_ops(self):
return len(self._ops)
@property
def op(self):
if self.num_ops != self.node.num_ops:
raise DeephyperRuntimeError(
f"{str(self)} and {str(self.node)} should have the same number of opertions, when {str(self)} has {self.num_ops} and {str(self.node)} has {self.node.num_ops}!"
)
else:
return self._ops[self.node._index]
@property
def ops(self):
return self._ops
| 29.806691 | 218 | 0.593789 |
4a1ab4a7fbea592d1d3a5b745b176ff36581de3f
| 28 |
py
|
Python
|
config/__init__.py
|
UditGupta10/distTransE
|
b4ef028d11f9af934324935826255750c0dd2df3
|
[
"MIT"
] | 57 |
2019-09-14T13:24:19.000Z
|
2022-03-27T13:17:02.000Z
|
config/__init__.py
|
UditGupta10/distTransE
|
b4ef028d11f9af934324935826255750c0dd2df3
|
[
"MIT"
] | 2 |
2019-10-12T15:07:35.000Z
|
2020-05-11T11:53:40.000Z
|
config/__init__.py
|
UditGupta10/distTransE
|
b4ef028d11f9af934324935826255750c0dd2df3
|
[
"MIT"
] | 11 |
2019-09-24T04:42:42.000Z
|
2021-11-23T08:21:01.000Z
|
from .Config import Config
| 14 | 27 | 0.785714 |
4a1ab4e36236d076ef4f8c8bcbd7ceddf56fd653
| 279 |
py
|
Python
|
ITcoach/DataAnalysis-master/day06/code/douban/config.py
|
ww35133634/chenxusheng
|
666e0eb3aedde46342faf0d4030f5c72b10c9732
|
[
"AFL-3.0"
] | null | null | null |
ITcoach/DataAnalysis-master/day06/code/douban/config.py
|
ww35133634/chenxusheng
|
666e0eb3aedde46342faf0d4030f5c72b10c9732
|
[
"AFL-3.0"
] | null | null | null |
ITcoach/DataAnalysis-master/day06/code/douban/config.py
|
ww35133634/chenxusheng
|
666e0eb3aedde46342faf0d4030f5c72b10c9732
|
[
"AFL-3.0"
] | null | null | null |
# coding=utf-8
SPIDER_DEFAULT_HEADERS = {"User-Agnet":"Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1"}
MONGO_HOST = "127.0.0.1"
MONGO_PORT = 27017
MONGO_DB = "douban"
MONGO_COLLECTION = "tv1"
| 46.5 | 175 | 0.738351 |
4a1ab609befb22a6122e172bd91d6df66084d250
| 4,300 |
py
|
Python
|
phylogeny/reconstruction/allquartets.py
|
mroctavious/Phylogeny
|
014461b5ec6323f42cf1c913f6a9bdba42e3bc05
|
[
"MIT"
] | 2 |
2020-01-17T17:19:15.000Z
|
2021-04-18T22:27:59.000Z
|
phylogeny/reconstruction/allquartets.py
|
mroctavious/Phylogeny
|
014461b5ec6323f42cf1c913f6a9bdba42e3bc05
|
[
"MIT"
] | null | null | null |
phylogeny/reconstruction/allquartets.py
|
mroctavious/Phylogeny
|
014461b5ec6323f42cf1c913f6a9bdba42e3bc05
|
[
"MIT"
] | 2 |
2018-08-30T20:57:37.000Z
|
2020-09-09T06:29:02.000Z
|
"""
All quartets method.
Given an n×n additive matrix M with n ≥ 5 associated to a
binary tree T with positive branch lengths, we can construct
T using a two-step technique that we now describe.
In Step 1, we compute a quartet tree on every four leaves by
applying the Four Point Method to each 4×4 submatrix of M.
In Step 2, we assemble the quartet trees into a tree on the
full set of leaves. Step 1 is straightforward. The technique
we use in Step 2 is called the “All Quartets Method”.
"""
import itertools as itr
from ..core import Tree
from ..core.fpc import fpc_sums
def induced_quartet(dist_matrix, idx_quartet=None):
"""Get the induced quartet ordering of 4 items."""
if idx_quartet is None:
idx_quartet = range(4) # The first 4 elements
q = tuple(idx_quartet)
# Calculate the relevant pairwise sums
sums = fpc_sums(dist_matrix, q)
# Get the quartet with smallest sum
quartet = min(sums, key=lambda x:sums[x])
return quartet
# ---
def map_names_to_quartet(quartet, names=None):
"Map the names to the quartet's indices."
if names:
((a,b),(c,d)) = quartet
return ((names[a],names[b]), (names[c],names[d]))
else:
return quartet
# ---
def four_point_method(additive, names=None):
"""Method for inferring a tree from a 4x4 additive matrix.
If we are given a 4×4 additive matrix 'D' that
corresponds to a tree 'T' with positive branch weights,
then we can easily compute 'T' from 'D': We calculate
the three pairwise sums from the four point condition,
we determine which of the three pairwise sums is the
smallest, and use that one to define the split for the
four leaves into two sets of two leaves each.
"""
if names is None:
try:
names = additive.names
except AttributeError:
pass
# Calculate the quartet inferred by the distances
quartet = induced_quartet(additive)
# Map the names to the quartet
quartet = map_names_to_quartet(quartet, names)
# Assemble the quartet into a tree structure
tree = Tree.from_quartet(quartet)
return tree
# ---
def all_quartets(dist_matrix, names=None):
"Get all inferred quartet subtrees."
if names is None:
try:
names = dist_matrix.names
except AttributeError:
pass
n = len(dist_matrix)
quartets = itr.combinations(range(n), 4)
return [map_names_to_quartet(induced_quartet(dist_matrix,q),
names)
for q in quartets]
# ---
def infer_siblings(quartets):
"""From the tree quartets, infer pairs of sibling leafs.
We search for a pair x,y of leaves that is always
together in any quartet that contains both x and y.
(In other words, for all a,b, any quartet on {x,y,a,b}
is ((x,y),(a,b))). Any pair of leaves that are siblings
in the quartets tree T will satisfy this property.
"""
together = set()
separated = set()
for q in quartets:
quartet = { frozenset(pair) for pair in q }
together |= quartet
((a,b), (c,d)) = q
separated |= { frozenset(i)
for i in [(a,c), (a,d),
(b,c), (b,d)] }
return {frozenset(pair) for pair in together - separated}
# ---
def tree_from_quartets(quartets):
"From the given quartets, assemble the tree."
if len(quartets) == 1:
q = quartets[0]
return Tree.from_quartet(q)
else:
# Fetch a pair of sibling leafs
a,b = list(infer_siblings(quartets).pop())
# Recourse in quartets \ {a}
new_quartets = [q for q in quartets if (a not in q[0]) and (a not in q[-1])]
tree = tree_from_quartets(new_quartets)
# Add a as sibling of b
tree.add_as_sibling(a,b)
return tree
# ---
def all_quartets_method(dist_matrix, names=None):
"Reconstruct the tree from the dist. matrix using the all quartets method."
if names is None:
try:
names = dist_matrix.names
except AttributeError:
pass
quartets = all_quartets(dist_matrix, names)
return tree_from_quartets(quartets)
# ---
| 30.28169 | 84 | 0.627209 |
4a1ab63b86640412ca0e18389a70713c782a49af
| 13,150 |
py
|
Python
|
textacy/datasets/capitol_words.py
|
tbsexton/textacy
|
964614213c7261f91f09c106334269388d45f790
|
[
"Apache-2.0"
] | null | null | null |
textacy/datasets/capitol_words.py
|
tbsexton/textacy
|
964614213c7261f91f09c106334269388d45f790
|
[
"Apache-2.0"
] | null | null | null |
textacy/datasets/capitol_words.py
|
tbsexton/textacy
|
964614213c7261f91f09c106334269388d45f790
|
[
"Apache-2.0"
] | null | null | null |
"""
Capitol Words Congressional speeches
------------------------------------
A collection of ~11k (almost all) speeches given by the main protagonists of the
2016 U.S. Presidential election that had previously served in the U.S. Congress --
including Hillary Clinton, Bernie Sanders, Barack Obama, Ted Cruz, and John Kasich --
from January 1996 through June 2016.
Records include the following data:
- ``text``: Full text of the Congressperson's remarks.
- ``title``: Title of the speech, in all caps.
- ``date``: Date on which the speech was given, as an ISO-standard string.
- ``speaker_name``: First and last name of the speaker.
- ``speaker_party``: Political party of the speaker: "R" for Republican,
"D" for Democrat, "I" for Independent.
- ``congress``: Number of the Congress in which the speech was given: ranges
continuously between 104 and 114.
- ``chamber``: Chamber of Congress in which the speech was given: almost all
are either "House" or "Senate", with a small number of "Extensions".
This dataset was derived from data provided by the (now defunct) Sunlight
Foundation's `Capitol Words API <http://sunlightlabs.github.io/Capitol-Words/>`_.
"""
import itertools
import logging
import pathlib
import urllib.parse
from typing import Iterable, Optional, Set, Tuple, Union
from .. import constants, utils
from .. import io as tio
from .base import Dataset
LOGGER = logging.getLogger(__name__)
NAME = "capitol_words"
META = {
"site_url": "http://sunlightlabs.github.io/Capitol-Words/",
"description": (
"Collection of ~11k speeches in the Congressional Record given by "
"notable U.S. politicians between Jan 1996 and Jun 2016."
),
}
DOWNLOAD_ROOT = "https://github.com/bdewilde/textacy-data/releases/download/"
class CapitolWords(Dataset):
"""
Stream a collection of Congressional speeches from a compressed json file on disk,
either as texts or text + metadata pairs.
Download the data (one time only!) from the textacy-data repo
(https://github.com/bdewilde/textacy-data), and save its contents to disk::
>>> import textacy.datasets
>>> ds = textacy.datasets.CapitolWords()
>>> ds.download()
>>> ds.info
{'name': 'capitol_words',
'site_url': 'http://sunlightlabs.github.io/Capitol-Words/',
'description': 'Collection of ~11k speeches in the Congressional Record given by notable U.S. politicians between Jan 1996 and Jun 2016.'}
Iterate over speeches as texts or records with both text and metadata::
>>> for text in ds.texts(limit=3):
... print(text, end="\\n\\n")
>>> for text, meta in ds.records(limit=3):
... print("\\n{} ({})\\n{}".format(meta["title"], meta["speaker_name"], text))
Filter speeches by a variety of metadata fields and text length::
>>> for text, meta in ds.records(speaker_name="Bernie Sanders", limit=3):
... print("\\n{}, {}\\n{}".format(meta["title"], meta["date"], text))
>>> for text, meta in ds.records(speaker_party="D", congress={110, 111, 112},
... chamber="Senate", limit=3):
... print(meta["title"], meta["speaker_name"], meta["date"])
>>> for text, meta in ds.records(speaker_name={"Barack Obama", "Hillary Clinton"},
... date_range=("2005-01-01", "2005-12-31")):
... print(meta["title"], meta["speaker_name"], meta["date"])
>>> for text in ds.texts(min_len=50000):
... print(len(text))
Stream speeches into a :class:`textacy.Corpus <textacy.corpus.Corpus>`::
>>> textacy.Corpus("en", data=ota.records(limit=100))
Corpus(100 docs; 70496 tokens)
Args:
data_dir: Path to directory on disk under which dataset is stored,
i.e. ``/path/to/data_dir/capitol_words``.
Attributes:
full_date_range: First and last dates for which speeches are available,
each as an ISO-formatted string (YYYY-MM-DD).
speaker_names: Full names of all speakers included in corpus, e.g. "Bernie Sanders".
speaker_parties: All distinct political parties of speakers, e.g. "R".
chambers: All distinct chambers in which speeches were given, e.g. "House".
congresses: All distinct numbers of the congresses in which speeches were given, e.g. 114.
"""
full_date_range: Tuple[str, str] = ("1996-01-01", "2016-06-30")
speaker_names: Set[str] = {
"Barack Obama",
"Bernie Sanders",
"Hillary Clinton",
"Jim Webb",
"Joe Biden",
"John Kasich",
"Joseph Biden",
"Lincoln Chafee",
"Lindsey Graham",
"Marco Rubio",
"Mike Pence",
"Rand Paul",
"Rick Santorum",
"Ted Cruz",
}
speaker_parties: Set[str] = {"D", "I", "R"}
chambers: Set[str] = {"Extensions", "House", "Senate"}
congresses: Set[int] = {104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114}
def __init__(
self,
data_dir: Union[str, pathlib.Path] = constants.DEFAULT_DATA_DIR.joinpath(NAME),
):
super().__init__(NAME, meta=META)
self.data_dir = utils.to_path(data_dir).resolve()
self._filename = "capitol-words-py3.json.gz"
self._filepath = self.data_dir.joinpath(self._filename)
@property
def filepath(self) -> Optional[str]:
"""
Full path on disk for CapitolWords data as compressed json file.
``None`` if file is not found, e.g. has not yet been downloaded.
"""
if self._filepath.is_file():
return str(self._filepath)
else:
return None
def download(self, *, force: bool = False) -> None:
"""
Download the data as a Python version-specific compressed json file and
save it to disk under the ``data_dir`` directory.
Args:
force: If True, download the dataset, even if it already exists
on disk under ``data_dir``.
"""
release_tag = "capitol_words_py3_v{data_version}".format(data_version=1.0)
url = urllib.parse.urljoin(DOWNLOAD_ROOT, release_tag + "/" + self._filename)
tio.download_file(
url,
filename=self._filename,
dirpath=self.data_dir,
force=force,
)
def __iter__(self):
if not self._filepath.is_file():
raise OSError(
"dataset file {} not found;\n"
"has the dataset been downloaded yet?".format(self._filepath)
)
for record in tio.read_json(self._filepath, mode="rt", lines=True):
yield record
def _get_filters(
self,
speaker_name: Optional[Union[str, Set[str]]] = None,
speaker_party: Optional[Union[str, Set[str]]] = None,
chamber: Optional[Union[str, Set[str]]] = None,
congress: Optional[Union[int, Set[int]]] = None,
date_range: Optional[Tuple[Optional[str], Optional[str]]] = None,
min_len: Optional[int] = None,
):
filters = []
if min_len is not None:
if min_len < 1:
raise ValueError("`min_len` must be at least 1")
filters.append(
lambda record: len(record.get("text", "")) >= min_len
)
if date_range is not None:
date_range = utils.validate_and_clip_range(
date_range, self.full_date_range, val_type=(str, bytes))
filters.append(
lambda record: (
record.get("date")
and date_range[0] <= record["date"] < date_range[1]
)
)
if speaker_name is not None:
speaker_name = utils.validate_set_members(
speaker_name, (str, bytes), valid_vals=self.speaker_names)
filters.append(lambda record: record.get("speaker_name") in speaker_name)
if speaker_party is not None:
speaker_party = utils.validate_set_members(
speaker_party, (str, bytes), valid_vals=self.speaker_parties)
filters.append(lambda record: record.get("speaker_party") in speaker_party)
if chamber is not None:
chamber = utils.validate_set_members(
chamber, (str, bytes), valid_vals=self.chambers)
filters.append(lambda record: record.get("chamber") in chamber)
if congress is not None:
congress = utils.validate_set_members(
congress, int, valid_vals=self.congresses)
filters.append(lambda record: record.get("congress") in congress)
return filters
def _filtered_iter(self, filters):
if filters:
for record in self:
if all(filter_(record) for filter_ in filters):
yield record
else:
for record in self:
yield record
def texts(
self,
*,
speaker_name: Optional[Union[str, Set[str]]] = None,
speaker_party: Optional[Union[str, Set[str]]] = None,
chamber: Optional[Union[str, Set[str]]] = None,
congress: Optional[Union[int, Set[int]]] = None,
date_range: Optional[Tuple[Optional[str], Optional[str]]] = None,
min_len: Optional[int] = None,
limit: Optional[int] = None,
) -> Iterable[str]:
"""
Iterate over speeches in this dataset, optionally filtering by a variety
of metadata and/or text length, and yield texts only,
in chronological order.
Args:
speaker_name: Filter speeches by the speakers' name;
see :attr:`CapitolWords.speaker_names`.
speaker_party: Filter speeches by the speakers' party;
see :attr:`CapitolWords.speaker_parties`.
chamber: Filter speeches by the chamber in which they were given;
see :attr:`CapitolWords.chambers`.
congress: Filter speeches by the congress in which they were given;
see :attr:`CapitolWords.congresses`.
date_range: Filter speeches by the date on which they were given.
Both start and end date must be specified, but a null value for either
will be replaced by the min/max date available for the dataset.
min_len: Filter texts by the length (# characters) of their text content.
limit: Yield no more than ``limit`` texts that match all specified filters.
Yields:
Full text of next (by chronological order) speech in dataset
passing all filter params.
Raises:
ValueError: If any filtering options are invalid.
"""
filters = self._get_filters(
speaker_name, speaker_party, chamber, congress, date_range, min_len)
for record in itertools.islice(self._filtered_iter(filters), limit):
yield record["text"]
def records(
self,
*,
speaker_name: Optional[Union[str, Set[str]]] = None,
speaker_party: Optional[Union[str, Set[str]]] = None,
chamber: Optional[Union[str, Set[str]]] = None,
congress: Optional[Union[int, Set[int]]] = None,
date_range: Optional[Tuple[Optional[str], Optional[str]]] = None,
min_len: Optional[int] = None,
limit: Optional[int] = None,
) -> Iterable[Tuple[str, dict]]:
"""
Iterate over speeches in this dataset, optionally filtering by a variety
of metadata and/or text length, and yield text + metadata pairs,
in chronological order.
Args:
speaker_name: Filter speeches by the speakers' name;
see :attr:`CapitolWords.speaker_names`.
speaker_party: Filter speeches by the speakers' party;
see :attr:`CapitolWords.speaker_parties`.
chamber: Filter speeches by the chamber in which they were given;
see :attr:`CapitolWords.chambers`.
congress: Filter speeches by the congress in which they were given;
see :attr:`CapitolWords.congresses`.
date_range: Filter speeches by the date on which they were given.
Both start and end date must be specified, but a null value for either
will be replaced by the min/max date available for the dataset.
min_len: Filter speeches by the length (# characters) of their text content.
limit: Yield no more than ``limit`` speeches that match all specified filters.
Yields:
Full text of the next (by chronological order) speech in dataset
passing all filters, and its corresponding metadata.
Raises:
ValueError: If any filtering options are invalid.
"""
filters = self._get_filters(
speaker_name, speaker_party, chamber, congress, date_range, min_len)
for record in itertools.islice(self._filtered_iter(filters), limit):
yield record.pop("text"), record
| 42.694805 | 147 | 0.611103 |
4a1ab739417e20ae5e0551db037db4f2499401e0
| 3,232 |
py
|
Python
|
ext/v8/upstream/scons/engine/SCons/Tool/g++.py
|
bsingr/therubyracer
|
2397cae80aa8f458c028e28bdf2bd8a93e6161a6
|
[
"MIT",
"Unlicense"
] | 1 |
2015-11-05T01:29:05.000Z
|
2015-11-05T01:29:05.000Z
|
ext/v8/upstream/scons/engine/SCons/Tool/g++.py
|
bsingr/therubyracer
|
2397cae80aa8f458c028e28bdf2bd8a93e6161a6
|
[
"MIT",
"Unlicense"
] | null | null | null |
ext/v8/upstream/scons/engine/SCons/Tool/g++.py
|
bsingr/therubyracer
|
2397cae80aa8f458c028e28bdf2bd8a93e6161a6
|
[
"MIT",
"Unlicense"
] | null | null | null |
"""SCons.Tool.g++
Tool-specific initialization for g++.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/g++.py 4629 2010/01/17 22:23:21 scons"
import os.path
import re
import subprocess
import SCons.Tool
import SCons.Util
cplusplus = __import__('c++', globals(), locals(), [])
compilers = ['g++']
def generate(env):
"""Add Builders and construction variables for g++ to an Environment."""
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
cplusplus.generate(env)
env['CXX'] = env.Detect(compilers)
# platform specific settings
if env['PLATFORM'] == 'aix':
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS -mminimal-toc')
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 1
env['SHOBJSUFFIX'] = '$OBJSUFFIX'
elif env['PLATFORM'] == 'hpux':
env['SHOBJSUFFIX'] = '.pic.o'
elif env['PLATFORM'] == 'sunos':
env['SHOBJSUFFIX'] = '.pic.o'
# determine compiler version
if env['CXX']:
#pipe = SCons.Action._subproc(env, [env['CXX'], '-dumpversion'],
pipe = SCons.Action._subproc(env, [env['CXX'], '--version'],
stdin = 'devnull',
stderr = 'devnull',
stdout = subprocess.PIPE)
if pipe.wait() != 0: return
# -dumpversion was added in GCC 3.0. As long as we're supporting
# GCC versions older than that, we should use --version and a
# regular expression.
#line = pipe.stdout.read().strip()
#if line:
# env['CXXVERSION'] = line
line = pipe.stdout.readline()
match = re.search(r'[0-9]+(\.[0-9]+)+', line)
if match:
env['CXXVERSION'] = match.group(0)
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 35.516484 | 95 | 0.659344 |
4a1ab81dfed8fbace06f48106bd00399c4e0c8b2
| 2,119 |
py
|
Python
|
onnxruntime/test/python/onnx_backend_test_series.py
|
stevenlix/onnxruntime
|
8c561c629653e6f4344b5297be3f3faeadb3fe1b
|
[
"MIT"
] | 1 |
2019-03-29T22:18:04.000Z
|
2019-03-29T22:18:04.000Z
|
onnxruntime/test/python/onnx_backend_test_series.py
|
stevenlix/onnxruntime
|
8c561c629653e6f4344b5297be3f3faeadb3fe1b
|
[
"MIT"
] | null | null | null |
onnxruntime/test/python/onnx_backend_test_series.py
|
stevenlix/onnxruntime
|
8c561c629653e6f4344b5297be3f3faeadb3fe1b
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import unittest
import onnx.backend.test
import onnxruntime.backend as c2
pytest_plugins = 'onnx.backend.test.report',
backend_test = onnx.backend.test.BackendTest(c2, __name__)
# Type not supported
backend_test.exclude(r'(FLOAT16)')
backend_test.exclude(r'^test_gru_seq_length_cpu.*')
backend_test.exclude(r'('
'^test_cast_DOUBLE_to_FLOAT_cpu.*'
'|^test_cast_FLOAT_to_DOUBLE_cpu.*'
'|^test_cast_FLOAT_to_STRING_cpu.*'
'|^test_cast_STRING_to_FLOAT_cpu.*'
'|^test_convtranspose_1d_cpu.*'
'|^test_convtranspose_3d_cpu.*'
'|^test_scatter_with_axis_cpu.*'
'|^test_scatter_without_axis_cpu.*'
'|^test_shrink_hard_cpu.*'
'|^test_shrink_soft_cpu.*'
'|^test_AvgPool1d_cpu.*'
'|^test_AvgPool1d_stride_cpu.*'
'|^test_AvgPool2d_cpu.*'
'|^test_AvgPool2d_stride_cpu.*'
'|^test_AvgPool3d_cpu.*'
'|^test_AvgPool3d_stride1_pad0_gpu_input_cpu.*'
'|^test_AvgPool3d_stride_cpu.*'
'|^test_BatchNorm1d_3d_input_eval_cpu.*'
'|^test_BatchNorm2d_eval_cpu.*'
'|^test_BatchNorm2d_momentum_eval_cpu.*'
'|^test_BatchNorm3d_eval_cpu.*'
'|^test_BatchNorm3d_momentum_eval_cpu.*'
'|^test_GLU_cpu.*'
'|^test_GLU_dim_cpu.*'
'|^test_Linear_cpu.*'
'|^test_PReLU_1d_cpu.*'
'|^test_PReLU_1d_multiparam_cpu.*'
'|^test_PReLU_2d_cpu.*'
'|^test_PReLU_2d_multiparam_cpu.*'
'|^test_PReLU_3d_cpu.*'
'|^test_PReLU_3d_multiparam_cpu.*'
'|^test_PoissonNLLLLoss_no_reduce_cpu.*'
'|^test_Softsign_cpu.*'
'|^test_operator_add_broadcast_cpu.*'
'|^test_operator_add_size1_broadcast_cpu.*'
'|^test_operator_add_size1_right_broadcast_cpu.*'
'|^test_operator_add_size1_singleton_broadcast_cpu.*'
'|^test_operator_addconstant_cpu.*'
'|^test_operator_addmm_cpu.*'
'|^test_operator_basic_cpu.*'
'|^test_operator_mm_cpu.*'
'|^test_operator_non_float_params_cpu.*'
'|^test_operator_params_cpu.*'
'|^test_operator_pow_cpu.*'
'|^test_shrink_cpu.*'
'|^test_sign_model_cpu.*'
')')
# import all test cases at global scope to make
# them visible to python.unittest.
globals().update(backend_test.enable_report().test_cases)
if __name__ == '__main__':
unittest.main()
| 27.881579 | 59 | 0.780085 |
4a1ab91ae3e16750685b782fbbad64d268c6e735
| 302 |
py
|
Python
|
src/hpx_dashboard/server/plots/__init__.py
|
jokteur/hpx-dashboard
|
91ca3876dec389e514f89f34acdb6ec9cac9d1b4
|
[
"BSD-3-Clause"
] | 6 |
2020-07-31T08:12:09.000Z
|
2022-01-16T03:35:06.000Z
|
src/hpx_dashboard/server/plots/__init__.py
|
jokteur/hpx-dashboard
|
91ca3876dec389e514f89f34acdb6ec9cac9d1b4
|
[
"BSD-3-Clause"
] | 23 |
2020-08-12T08:51:12.000Z
|
2020-09-29T16:45:54.000Z
|
src/hpx_dashboard/server/plots/__init__.py
|
jokteur/hpx-dashboard
|
91ca3876dec389e514f89f34acdb6ec9cac9d1b4
|
[
"BSD-3-Clause"
] | 2 |
2020-10-08T13:55:45.000Z
|
2022-01-16T03:37:13.000Z
|
# -*- coding: utf-8 -*-
#
# HPX - dashboard
#
# Copyright (c) 2020 - ETH Zurich
# All rights reserved
#
# SPDX-License-Identifier: BSD-3-Clause
"""
"""
from .base import BaseElement
from .generator import TimeSeries
from .tasks import TasksPlot
__all__ = ["TimeSeries", "TasksPlot", "BaseElement"]
| 15.894737 | 52 | 0.688742 |
4a1ab962419170264d8b32e3a6e51171a1929e89
| 12,592 |
py
|
Python
|
tensorflow_model_analysis/evaluators/eval_saved_model_util.py
|
paulinenicolas1/model-analysis
|
e6e597db29b31c29f41ccae6921ae7896914fd3c
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_model_analysis/evaluators/eval_saved_model_util.py
|
paulinenicolas1/model-analysis
|
e6e597db29b31c29f41ccae6921ae7896914fd3c
|
[
"Apache-2.0"
] | 1 |
2021-02-24T00:48:35.000Z
|
2021-02-24T00:48:35.000Z
|
tensorflow_model_analysis/evaluators/eval_saved_model_util.py
|
paulinenicolas1/model-analysis
|
e6e597db29b31c29f41ccae6921ae7896914fd3c
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for evaluations using the EvalMetricsGraph."""
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
from typing import Any, Dict, Iterable, List, Optional, Text
import apache_beam as beam
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import model_util
from tensorflow_model_analysis import size_estimator
from tensorflow_model_analysis import types
from tensorflow_model_analysis.eval_metrics_graph import eval_metrics_graph
from tensorflow_model_analysis.metrics import metric_types
def metric_computations_using_eval_saved_model(
model_name: Text,
model_loader: types.ModelLoader,
batch_size: Optional[int] = None) -> metric_types.MetricComputations:
"""Returns computations for computing metrics natively using EvalMetricsGraph.
Note that unlike other computations, there is no direct key associated with
this computation. Instead the final computation returns the actual internal
metric keys used by the model such as 'auc', etc).
Args:
model_name: Name of model.
model_loader: Loader for shared model containing eval saved model to use for
metric computations.
batch_size: Batch size to use during evaluation (testing only).
"""
return [
# _EvalSavedModelPreprocessor loads the EvalSavedModel into memory under a
# shared handle that can be used by subsequent steps. Combiner lifting and
# producer-consumer fusion should ensure that the processor and combiner
# run in the same process and memory space.
#
# TODO(b/69566045): Remove model loading from _EvalSavedModelPreprocessor
# and move model loading to _EvalSavedModelCombiner.setup after it is
# available in Beam.
metric_types.MetricComputation(
keys=[],
preprocessor=_EvalSavedModelPreprocessor(model_name, model_loader),
combiner=_EvalSavedModelCombiner(model_name, model_loader,
batch_size))
]
class _EvalSavedModelPreprocessor(model_util.DoFnWithModels):
"""A DoFn that loads the EvalSavedModel and returns the input."""
def __init__(self, model_name: Text, model_loader: types.ModelLoader):
super(_EvalSavedModelPreprocessor,
self).__init__({model_name: model_loader})
def process(self, extracts: types.Extracts) -> Iterable[bytes]:
yield extracts[constants.INPUT_KEY]
def _add_metric_variables( # pylint: disable=invalid-name
left: types.MetricVariablesType,
right: types.MetricVariablesType) -> types.MetricVariablesType:
"""Returns left and right metric variables combined."""
if left is not None and right is not None:
if len(left) != len(right):
raise ValueError('metric variables lengths should match, but got '
'%d and %d' % (len(left), len(right)))
return [x + y for x, y in zip(left, right)]
elif left is not None:
return left
else:
return right
def _metrics_by_output_name(
metrics: Dict[Text, Any]) -> Dict[Text, Dict[Text, Any]]:
"""Returns metrics grouped by output name."""
# If an output (head) name is used in an estimator, the metric names are of
# the form "<metric_name>/<head>". This code checks for the existence of a '/'
# where the trailing suffix is shared by at least three metrics. This
# seemingly random choice of three was choose to avoid standard cases such as
# 'label/mean' and 'prediction/mean' that are used by estimators but are not
# indicative of a multi-headed model.
result = {}
for name, value in metrics.items():
index = name.rfind('/')
if index == -1:
return {'': metrics}
output_name = name[index + 1:]
if output_name not in result:
result[output_name] = {}
result[output_name][name[:index]] = value
for output_name, values in result.items():
if len(values) <= 2:
return {'': metrics}
return result
# TODO(b/171992041): Clean up by removing this and share logic with
# legacy_aggregate.py
class _AggState(object):
"""Combine state for AggregateCombineFn.
There are two parts to the state: the metric variables (the actual state),
and a list of FeaturesPredictionsLabels or other inputs. See
_AggregateCombineFn for why we need this.
"""
# We really want the batch size to be adaptive like it is in
# beam.BatchElements(), but there isn't an easy way to make it so. For now
# we will limit stored inputs to a max overall byte size.
# TODO(b/73789023): Figure out how to make this batch size dynamic.
_TOTAL_INPUT_BYTE_SIZE_THRESHOLD = 16 << 20 # 16MiB
_DEFAULT_DESIRED_BATCH_SIZE = 1000
__slots__ = [
'metric_variables', 'inputs', 'size_estimator', '_desired_batch_size'
]
# TODO(b/173811366): Consider removing the desired_batch_size knob and
# only use input size.
def __init__(self, desired_batch_size: Optional[int] = None):
self.metric_variables = None # type: Optional[types.MetricVariablesType]
self.inputs = [] # type: List[bytes]
self.size_estimator = size_estimator.SizeEstimator(
size_threshold=self._TOTAL_INPUT_BYTE_SIZE_THRESHOLD, size_fn=len)
if desired_batch_size and desired_batch_size > 0:
self._desired_batch_size = desired_batch_size
else:
self._desired_batch_size = self._DEFAULT_DESIRED_BATCH_SIZE
def __iadd__(self, other: '_AggState') -> '_AggState':
self.metric_variables = _add_metric_variables(self.metric_variables,
other.metric_variables)
self.inputs.extend(other.inputs)
self.size_estimator += other.size_estimator
return self
def add_input(self, new_input: bytes):
self.inputs.append(new_input)
self.size_estimator.update(new_input)
def clear_inputs(self):
del self.inputs[:]
self.size_estimator.clear()
def add_metrics_variables(self, metric_variables: types.MetricVariablesType):
self.metric_variables = _add_metric_variables(self.metric_variables,
metric_variables)
def should_flush(self) -> bool:
return (len(self.inputs) >= self._desired_batch_size or
self.size_estimator.should_flush())
@beam.typehints.with_input_types(bytes)
@beam.typehints.with_output_types(Dict[metric_types.MetricKey, Any])
class _EvalSavedModelCombiner(model_util.CombineFnWithModels):
"""Aggregate combine function.
This function really does three things:
1. Batching of FeaturesPredictionsLabels.
3. "Partial reduction" of these batches by sending this through the
"intro metrics" step.
3. The "normal" combining of MetricVariables.
What we really want to do is conceptually the following:
Predictions | GroupByKey() | KeyAwareBatchElements()
| ParDo(IntroMetrics()) | CombineValues(CombineMetricVariables()).
but there's no way to KeyAwareBatchElements in Beam, and no way to do partial
reductions either. Hence, this CombineFn has to do the work of batching,
partial reduction (intro metrics), and actual combining, all in one.
We do this by accumulating FeaturesPredictionsLabels in the combine state
until we accumulate a large enough batch, at which point we send them
through the "intro metrics" step. When merging, we merge the metric variables
and accumulate FeaturesPredictionsLabels accordingly. We do one final
"intro metrics" and merge step before producing the final output value.
See also:
BEAM-3737: Key-aware batching function
(https://issues.apache.org/jira/browse/BEAM-3737).
"""
def __init__(self,
model_name: Text,
model_loader: types.ModelLoader,
desired_batch_size: Optional[int] = None):
super(_EvalSavedModelCombiner, self).__init__({model_name: model_loader})
self._model_name = model_name
self._desired_batch_size = desired_batch_size
self._eval_metrics_graph = None # type: eval_metrics_graph.EvalMetricsGraph
self._batch_size_beam_metric = beam.metrics.Metrics.distribution(
constants.METRICS_NAMESPACE, 'eval_saved_model_combine_batch_size')
self._total_input_byte_size_beam_metric = beam.metrics.Metrics.distribution(
constants.METRICS_NAMESPACE,
'eval_saved_model_combine_batch_bytes_size')
self._num_compacts = beam.metrics.Metrics.counter(
constants.METRICS_NAMESPACE, 'num_compacts')
def _maybe_do_batch(self,
accumulator: _AggState,
force: bool = False) -> None:
"""Maybe intro metrics and update accumulator in place.
Checks if accumulator has enough FPLs for a batch, and if so, does the
intro metrics for the batch and updates accumulator in place.
Args:
accumulator: Accumulator. Will be updated in place.
force: Force intro metrics even if accumulator has less FPLs than the
batch size or max byte size.
"""
if self._eval_metrics_graph is None:
self._setup_if_needed()
self._eval_metrics_graph = self._loaded_models[self._model_name]
if force or accumulator.should_flush():
if accumulator.inputs:
self._batch_size_beam_metric.update(len(accumulator.inputs))
self._total_input_byte_size_beam_metric.update(
accumulator.size_estimator.get_estimate())
inputs_for_metrics = accumulator.inputs
if inputs_for_metrics:
accumulator.add_metrics_variables(
self._eval_metrics_graph.metrics_reset_update_get_list(
inputs_for_metrics))
else:
# Call to metrics_reset_update_get_list does a reset prior to the
# metrics update, but does not handle empty updates. Explicitly
# calling just reset here, to make the flow clear.
self._eval_metrics_graph.reset_metric_variables()
accumulator.clear_inputs()
def create_accumulator(self) -> _AggState:
return _AggState(desired_batch_size=self._desired_batch_size)
def add_input(self, accumulator: _AggState, elem: bytes) -> _AggState:
accumulator.add_input(elem)
self._maybe_do_batch(accumulator)
return accumulator
def merge_accumulators(self, accumulators: Iterable[_AggState]) -> _AggState:
result = self.create_accumulator()
for acc in accumulators:
result += acc
# Compact within the loop to avoid accumulating too much data.
#
# During the "map" side of combining merging happens with memory limits
# but on the "reduce" side it's across all bundles (for a given key).
#
# So we could potentially accumulate get num_bundles * batch_size
# elements if we don't process the batches within the loop, which
# could cause OOM errors (b/77293756).
self._maybe_do_batch(result)
return result
def compact(self, accumulator: _AggState) -> _AggState:
self._maybe_do_batch(accumulator, force=True) # Guaranteed compaction.
self._num_compacts.inc(1)
return accumulator
def extract_output(
self, accumulator: _AggState) -> Dict[metric_types.MetricKey, Any]:
# It's possible that the accumulator has not been fully flushed, if it was
# not produced by a call to compact (which is not guaranteed across all Beam
# Runners), so we defensively flush it here again, before we extract data
# from it, to ensure correctness.
self._maybe_do_batch(accumulator, force=True)
result = {}
if accumulator.metric_variables:
eval_saved_model = self._loaded_models[self._model_name]
grouped_metrics = _metrics_by_output_name(
eval_saved_model.metrics_set_variables_and_get_values(
accumulator.metric_variables))
for output_name, metrics in grouped_metrics.items():
for name, value in metrics.items():
key = metric_types.MetricKey(
name=name, model_name=self._model_name, output_name=output_name)
result[key] = value
return result
| 41.973333 | 80 | 0.724031 |
4a1abb1a49beda2c307fd2400ae6009eafe1c6c6
| 2,505 |
py
|
Python
|
docgen.py
|
bureau14/qdb-api-python
|
2a010df3252d39bc4d529f545547c5cefb9fe86e
|
[
"BSD-3-Clause"
] | 9 |
2015-09-02T20:13:13.000Z
|
2020-07-16T14:17:36.000Z
|
docgen.py
|
bureau14/qdb-api-python
|
2a010df3252d39bc4d529f545547c5cefb9fe86e
|
[
"BSD-3-Clause"
] | 5 |
2018-02-20T10:47:02.000Z
|
2020-05-20T10:05:49.000Z
|
docgen.py
|
bureau14/qdb-api-python
|
2a010df3252d39bc4d529f545547c5cefb9fe86e
|
[
"BSD-3-Clause"
] | 1 |
2018-04-01T11:12:56.000Z
|
2018-04-01T11:12:56.000Z
|
import os
import pdoc
import quasardb
import quasardb.pool
import quasardb.stats
import quasardb.pandas as qdbpd
context = pdoc.Context()
# This is a hack: pydoc has a lot of issues with importing submodules properly. It's
# related to pybind11 generating invalid docstrings, and we would get import errors
# and whatnot. E.g. the quasardb.Blob() has a `expiry=datetime.datetime()` mention
# in the type, _but_ the actual modules (as python sees it) it generates don't even
# import datetime.
#
# This then causes pydoc to throw an error.
#
# As such, we're making our own module subclass here, which will allow us to manually
# tell pybind which submodules to load, rather than telling it to traverse everything
# automatically.
class Module(pdoc.Module):
def __init__(self, *args, submodules=[], **kwargs):
super().__init__(*args, **kwargs)
self._submodules = submodules
def submodules(self):
return self._submodules
module_qdb = Module(quasardb.quasardb, context=context,
submodules=[pdoc.Module(quasardb.pool, context=context),
pdoc.Module(quasardb.stats, context=context),
pdoc.Module(quasardb.pandas, context=context)])
modules = [module_qdb]
pdoc.link_inheritance(context)
def recursive_htmls(mod):
yield mod.name, mod.html()
for submod in mod.submodules():
yield from recursive_htmls(submod)
def _strip_prefix(s, p):
if s.startswith(p):
return s[len(p):]
else:
return s
def write_module(filename, html):
with open(filename, 'w') as f:
f.write(html)
for mod in modules:
for module_name, html in recursive_htmls(mod):
# This hack is related to the fact that _sometimes_, a module is called
# `quasardb.quasardb`, and other times it's just `quasardb`. Apparently, when
# a module's name is `quasardb.pool`, pydoc thinks the file should be called
# `pool` :/
#
# So, just to make sure we have "everything", we write each module twice:
# once with the quasardb. prefix, and once without.
write_module("doc/" + module_name + ".html", html)
write_module("doc/" + _strip_prefix(module_name, 'quasardb.') + ".html", html)
try:
os.mkdir("doc/" + _strip_prefix(module_name, 'quasardb.'))
except FileExistsError:
pass
write_module("doc/" + _strip_prefix(module_name, 'quasardb.') + "/index.html", html)
| 34.315068 | 92 | 0.66507 |
4a1abb74416fa577201894b4b19679214ec4fb1d
| 2,906 |
py
|
Python
|
nicos_virt_mlz/treff/setups/det.py
|
jkrueger1/nicos
|
5f4ce66c312dedd78995f9d91e8a6e3c891b262b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
nicos_virt_mlz/treff/setups/det.py
|
jkrueger1/nicos
|
5f4ce66c312dedd78995f9d91e8a6e3c891b262b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
nicos_virt_mlz/treff/setups/det.py
|
jkrueger1/nicos
|
5f4ce66c312dedd78995f9d91e8a6e3c891b262b
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
# -*- coding: utf-8 -*-
description = 'detector setup'
group = 'basic'
includes = [
'analyzer',
'beamstop',
'treff',
]
sysconfig = dict(
datasinks = ['NPGZFileSink', 'YAMLSaver', 'LiveViewSink'],
)
basename = '%(proposal)s_%(session.experiment.sample.filename)s_'
scanbasename = basename + '%(scancounter)08d_%(pointnumber)08d'
countbasename = basename + '%(pointpropcounter)010d'
devices = dict(
NPGZFileSink = device('nicos.devices.datasinks.text.NPGZFileSink',
description = 'Saves image data in numpy text format',
filenametemplate = [
scanbasename + '.gz',
countbasename + '.gz',
],
),
YAMLSaver = device('nicos_mlz.maria.devices.yamlformat.YAMLFileSink',
filenametemplate = [
scanbasename + '.yaml',
countbasename + '.yaml',
],
),
LiveViewSink = device('nicos.devices.datasinks.LiveViewSink',
description = 'Sends image data to LiveViewWidget',
),
mcstas = device('nicos_virt_mlz.treff.devices.McStasSimulation',
description = 'McStas simulation',
neutronspersec = 1.6e6,
beamstop = 'beamstop',
sample_y = 'sample_y',
sample_x = 'sample_x',
sample_z = 'sample_z',
omega = 'omega',
chi = 'chi',
phi = 'phi',
detarm = 'detarm',
s1 = 's1',
s2 = 's2',
sample = 'Sample',
),
detimg = device('nicos.devices.mcstas.McStasImage',
description = 'Detector image',
mcstas = 'mcstas',
size = (256, 256),
mcstasfile = 'PSD_TREFF_total.psd',
fmtstr = '%d',
unit = 'cts',
visibility = (),
),
full = device('nicos.devices.generic.RateChannel',
description = 'Full detector cts and rate',
),
roi1 = device('nicos.devices.generic.RectROIChannel',
description = 'ROI 1',
roi = (122, 50, 12, 140),
),
roi2 = device('nicos.devices.generic.RectROIChannel',
description = 'ROI 2',
roi = (122, 119, 12, 18),
),
roi_pol = device('nicos.devices.generic.RectROIChannel',
description = 'ROI 1',
roi = (122, 76, 12, 114),
),
timer = device('nicos.devices.mcstas.McStasTimer',
description = 'McStas virtual neutron counts timer channel',
mcstas = 'mcstas',
),
det = device('nicos_mlz.maria.devices.detector.MariaDetector',
description = 'Jumiom detector',
shutter = 'expshutter',
timers = ['timer'],
monitors = ['mon0', 'mon1'],
images = ['detimg'],
counters = ['roi1', 'roi2', 'roi_pol', 'full'],
postprocess = [
('roi1', 'detimg'),
('roi2', 'detimg'),
('roi_pol', 'detimg'),
('full', 'detimg', 'timer')
],
liveinterval = .5,
),
)
startupcode = """
SetDetectors(det)
"""
| 29.06 | 73 | 0.558844 |
4a1abb8f1e1ebdc26fc9428bbcf727de60963fa8
| 518 |
py
|
Python
|
pandas_ta/overlap/hlc3.py
|
allahyarzadeh/pandas-ta
|
1866016b7b73caef0fc15d55bfb73dcd58b3d21f
|
[
"MIT"
] | 1 |
2019-07-09T11:07:39.000Z
|
2019-07-09T11:07:39.000Z
|
pandas_ta/overlap/hlc3.py
|
marchanero/pandas-ta
|
a1dac5922f544af795fd6311df02406fc2728ebe
|
[
"MIT"
] | null | null | null |
pandas_ta/overlap/hlc3.py
|
marchanero/pandas-ta
|
a1dac5922f544af795fd6311df02406fc2728ebe
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from ..utils import get_offset, verify_series
def hlc3(high, low, close, offset=None, **kwargs):
"""Indicator: HLC3"""
# Validate Arguments
high = verify_series(high)
low = verify_series(low)
close = verify_series(close)
offset = get_offset(offset)
# Calculate Result
hlc3 = (high + low + close) / 3
# Offset
if offset != 0:
hlc3 = hlc3.shift(offset)
# Name & Category
hlc3.name = "HLC3"
hlc3.category = 'overlap'
return hlc3
| 22.521739 | 50 | 0.611969 |
4a1abca9eb77d523bf1b8656ffc0805322fe40b8
| 5,001 |
py
|
Python
|
scripts/hdt2csv.py
|
KRRVU/kgbench
|
d70ff8a6f48228f38a4ad3fee8df033050213556
|
[
"MIT"
] | 19 |
2020-11-12T18:34:18.000Z
|
2022-01-25T19:33:22.000Z
|
scripts/hdt2csv.py
|
KRRVU/kgbench
|
d70ff8a6f48228f38a4ad3fee8df033050213556
|
[
"MIT"
] | 1 |
2022-01-25T19:21:38.000Z
|
2022-01-26T09:38:07.000Z
|
scripts/hdt2csv.py
|
KRRVU/kgbench
|
d70ff8a6f48228f38a4ad3fee8df033050213556
|
[
"MIT"
] | 3 |
2021-01-17T21:37:58.000Z
|
2022-01-26T09:49:36.000Z
|
#!/usr/bin/env python
import csv
import gzip
import sys
# https://github.com/Callidon/pyHDT
import hdt
from tqdm import tqdm
import pandas as pd
from rdflib import Graph
import kgbench as kg
def generate_csv_context(doc):
entities = set()
relations = set()
datatypes = set()
triples, c = doc.search_triples('', '', '')
for s, p, o in tqdm(triples, total=c):
datatypes.add(kg.entity_hdt(s)[1])
datatypes.add(kg.entity_hdt(o)[1])
i2d = list(datatypes)
i2d.sort()
triples, c = doc.search_triples('', '', '')
for s, p, o in tqdm(triples, total=c):
se, sd = kg.entity_hdt(s)
oe, od = kg.entity_hdt(o)
entities.add((se, sd))
entities.add((oe, od))
relations.add(p)
i2e = list(entities)
i2r = list(relations)
i2e.sort(); i2r.sort()
# -- this is required for the script to be deterministic
df = pd.DataFrame(enumerate(i2d), columns=['index', 'annotation'])
df.to_csv('nodetypes.int.csv', index=False, header=True)
ent_data = [(i, dt, ent) for i, (ent, dt) in enumerate(i2e)]
df = pd.DataFrame(ent_data, columns=['index', 'annotation', 'label'])
df.to_csv('nodes.int.csv', index=False, header=True, quoting=csv.QUOTE_NONNUMERIC)
df = pd.DataFrame(enumerate(i2r), columns=['index', 'label'])
df.to_csv('relations.int.csv', index=False, header=True, quoting=csv.QUOTE_NONNUMERIC)
e2i = {e:i for i, e in enumerate(i2e)}
r2i = {r:i for i, r in enumerate(i2r)}
# Write triples to CSV
print('Writing integer triples.')
triples, c = doc.search_triples('', '', '')
with gzip.open('triples.int.csv.gz', 'wt') as file:
for s, p, o in tqdm(triples, total=c):
sp = kg.entity_hdt(s)
op = kg.entity_hdt(o)
file.write(f'{e2i[sp]}, {r2i[p]}, {e2i[op]}\n')
return (e2i, r2i)
def generate_csv_splits(splits, e2i, r2i):
if len(splits) <= 0:
return
train_path, test_path, valid_path = splits[:3]
# Load test/train/valid/meta
g_train = Graph()
with gzip.open(train_path, 'rb') as gzf:
g_train.parse(gzf, format='nt')
g_test = Graph()
with gzip.open(test_path, 'rb') as gzf:
g_test.parse(gzf, format='nt')
g_valid = Graph()
with gzip.open(valid_path, 'rb') as gzf:
g_valid.parse(gzf, format='nt')
g_meta = Graph()
if len(splits) == 4:
meta_path = splits[3]
with gzip.open(meta_path, 'rb') as gzf:
g_meta.parse(gzf, format='nt')
c2i = dict()
i = 0
for g in [g_train, g_test, g_valid, g_meta]:
classes = set(g.objects())
for c in classes:
if c not in c2i.keys():
c2i[c] = i
i += 1
with open('all.int.csv', 'w') as allfile:
allwriter = csv.writer(allfile, delimiter=',')
allwriter.writerow(['index', 'class'])
with open('training.int.csv', 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(['index', 'class'])
for s, p, o in g_train.triples((None, None, None)):
s_idx = e2i[(str(s), 'iri')]
o_idx = c2i[o]
writer.writerow([s_idx, o_idx])
allwriter.writerow([s_idx, o_idx])
with open('testing.int.csv', 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(['index', 'class'])
for s, p, o in g_test.triples((None, None, None)):
s_idx = e2i[(str(s), 'iri')]
o_idx = c2i[o]
writer.writerow([s_idx, o_idx])
allwriter.writerow([s_idx, o_idx])
with open('validation.int.csv', 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(['index', 'class'])
for s, p, o in g_valid.triples((None, None, None)):
s_idx = e2i[(str(s), 'iri')]
o_idx = c2i[o]
writer.writerow([s_idx, o_idx])
allwriter.writerow([s_idx, o_idx])
if len(g_meta) > 0:
with open('meta-testing.int.csv', 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(['index', 'class'])
for s, p, o in g_meta.triples((None, None, None)):
s_idx = e2i[(str(s), 'iri')]
o_idx = c2i[o]
writer.writerow([s_idx, o_idx])
allwriter.writerow([s_idx, o_idx])
def generate_csv(doc, splits):
e2i, r2i = generate_csv_context(doc)
generate_csv_splits(splits, e2i, r2i)
if __name__ == "__main__":
args = sys.argv[1:]
if len(args) < 1 or len(args) > 5:
print("USAGE: ./hdt2csv.py <graph_stripped.hdt> [<train_set.nt.gz> <test_set.nt.gz> <valid_set.nt.gz> [<meta_set.nt.gz>]]")
hdtfile = args[0]
doc = hdt.HDTDocument(hdtfile)
generate_csv(doc, args[1:])
| 30.493902 | 131 | 0.557888 |
4a1abd5a778691beefeed9ecd90ce0983ae35132
| 2,276 |
py
|
Python
|
netforce_hr/netforce_hr/models/hr_payslip_line.py
|
nfco/netforce
|
35252eecd0a6633ab9d82162e9e3ff57d4da029a
|
[
"MIT"
] | 27 |
2015-09-30T23:53:30.000Z
|
2021-06-07T04:56:25.000Z
|
netforce_hr/netforce_hr/models/hr_payslip_line.py
|
nfco/netforce
|
35252eecd0a6633ab9d82162e9e3ff57d4da029a
|
[
"MIT"
] | 191 |
2015-10-08T11:46:30.000Z
|
2019-11-14T02:24:36.000Z
|
netforce_hr/netforce_hr/models/hr_payslip_line.py
|
nfco/netforce
|
35252eecd0a6633ab9d82162e9e3ff57d4da029a
|
[
"MIT"
] | 32 |
2015-10-01T03:59:43.000Z
|
2022-01-13T07:31:05.000Z
|
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields, get_model
from datetime import *
from dateutil.relativedelta import *
class PaySlipLine(Model):
_name = "hr.payslip.line"
_string = "Pay Slip Line"
_fields = {
"slip_id": fields.Many2One("hr.payslip", "Pay Slip", required=True, on_delete="cascade"),
"sequence": fields.Integer("Sequence"),
"payitem_id": fields.Many2One("hr.payitem", "Pay Item", required=True),
"qty": fields.Decimal("Qty", required=True),
"rate": fields.Decimal("Rate", required=True),
"amount": fields.Decimal("Amount", function="get_amount"),
"comments": fields.One2Many("message", "related_id", "Comments"),
}
_defaults = {
"state": "draft",
"date_from": lambda *a: date.today().strftime("%Y-%m-01"),
"date_to": lambda *a: (date.today() + relativedelta(day=31)).strftime("%Y-%m-%d"),
}
_order = "sequence"
def get_amount(self, ids, context={}):
# TODO need some logic for compute pay item
vals = {}
for obj in self.browse(ids):
vals[obj.id] = obj.qty * obj.rate
return vals
PaySlipLine.register()
| 42.943396 | 97 | 0.690685 |
4a1abf397847a3aeeeeef30de37d22628d62ec89
| 1,109 |
py
|
Python
|
symposion_project/proposals/models.py
|
ImaginaryLandscape/symposion
|
e3d74d8d274e6aaf716b38a4dd3f830c76e9d295
|
[
"BSD-3-Clause"
] | 3 |
2018-07-24T12:10:28.000Z
|
2021-11-16T13:52:56.000Z
|
symposion_project/proposals/models.py
|
ImaginaryLandscape/symposion
|
e3d74d8d274e6aaf716b38a4dd3f830c76e9d295
|
[
"BSD-3-Clause"
] | 1 |
2022-03-21T07:17:57.000Z
|
2022-03-21T07:17:57.000Z
|
symposion_project/proposals/models.py
|
ImaginaryLandscape/symposion
|
e3d74d8d274e6aaf716b38a4dd3f830c76e9d295
|
[
"BSD-3-Clause"
] | 1 |
2018-07-24T12:10:29.000Z
|
2018-07-24T12:10:29.000Z
|
from django.db import models
from symposion.proposals.models import ProposalBase
class Proposal(ProposalBase):
AUDIENCE_LEVEL_NOVICE = 1
AUDIENCE_LEVEL_EXPERIENCED = 2
AUDIENCE_LEVEL_INTERMEDIATE = 3
AUDIENCE_LEVELS = [
(AUDIENCE_LEVEL_NOVICE, "Novice"),
(AUDIENCE_LEVEL_INTERMEDIATE, "Intermediate"),
(AUDIENCE_LEVEL_EXPERIENCED, "Experienced"),
]
audience_level = models.IntegerField(choices=AUDIENCE_LEVELS)
recording_release = models.BooleanField(
default=True,
help_text="By submitting your talk proposal, you agree to give permission to the conference organizers to record, edit, and release audio and/or video of your presentation. If you do not agree to this, please uncheck this box."
)
class Meta:
abstract = True
class TalkProposal(Proposal):
class Meta:
verbose_name = "talk proposal"
class TutorialProposal(Proposal):
class Meta:
verbose_name = "tutorial proposal"
class PosterProposal(Proposal):
class Meta:
verbose_name = "poster proposal"
| 26.404762 | 235 | 0.702435 |
4a1ac1313a085010610779327b887553062f39ff
| 679 |
py
|
Python
|
IPProxyPool/settings.py
|
jiangyx3915/IPProxyPool
|
75ed064bf376fd530774efa353d18890346291e1
|
[
"MIT"
] | null | null | null |
IPProxyPool/settings.py
|
jiangyx3915/IPProxyPool
|
75ed064bf376fd530774efa353d18890346291e1
|
[
"MIT"
] | null | null | null |
IPProxyPool/settings.py
|
jiangyx3915/IPProxyPool
|
75ed064bf376fd530774efa353d18890346291e1
|
[
"MIT"
] | null | null | null |
"""配置文件"""
# Redis 配置
REDIS_HOST = '127.0.0.1' # Redis数据库地址
REDIS_PORT = 6379 # Redis端口
REDIS_PASSWORD = None # Redis密码,如无填None
REDIS_KEY = 'proxies' # 代理池redis键值
# 代理分数
INITIAL_SCORE = 10 # 初始化分数
MIN_SCORE = 0 # 最低分
MAX_SCORE = 100 # 最高分
POOL_MAX_THRESHOLD = 5000 # 代理池容量限制
BATCH_TEST_SIZE = 10 # 批量测试数量
# 检查周期配置
TESTER_CYCLE = 20 # 测试器运行周期
GETTER_CYCLE = 300 # 获取器运行周期
# 模块加载配置
ENABLE_TESTER = True # 是否开启测试器
ENABLE_GETTER = True # 是否开启获取器
ENABLE_API = True # 是否开启API的支持
# api配置
API_HOST = '0.0.0.0' # API 服务器地址
API_PORT = 5000 # API 服务器端口
| 23.413793 | 45 | 0.57732 |
4a1ac147c44f480aabb3af6bd3d6d8d1a7f90677
| 1,796 |
py
|
Python
|
part3/sprites.py
|
codingfever-anishmishra/Warrior-Game-developement-in-python
|
ff9bb627251f6b82a08f3bc7d84fb72068fea3db
|
[
"Apache-2.0"
] | null | null | null |
part3/sprites.py
|
codingfever-anishmishra/Warrior-Game-developement-in-python
|
ff9bb627251f6b82a08f3bc7d84fb72068fea3db
|
[
"Apache-2.0"
] | null | null | null |
part3/sprites.py
|
codingfever-anishmishra/Warrior-Game-developement-in-python
|
ff9bb627251f6b82a08f3bc7d84fb72068fea3db
|
[
"Apache-2.0"
] | null | null | null |
from random import randrange
from settings import*
import pygame
vec = pygame.math.Vector2
class Platform(pygame.sprite.Sprite):
def __init__(self,game):
self.groups = game.all_sprites,game.platform
pygame.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.image = self.game.platform_img
self.rect = self.image.get_rect()
x = randrange(int(WIDTH*0.4), int(WIDTH-self.rect.width),50)
y = randrange(int(10+self.rect.height), int(HEIGHT-self.rect.height-50),50)
self.pos = vec(x,y)
self.rect.topleft = self.pos
self.mask = pygame.mask.from_surface(self.image)
class Enemy(pygame.sprite.Sprite):
def __init__(self,game,platform):
self.groups = game.all_sprites,game.enemy_group
pygame.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.image = self.game.enemy_img[0]
self.rect = self.image.get_rect()
#self.hit_rect_body = pygame.Rect(0,0,int(self.rect.width*0.3),self.rect.height*0.6)
#self.hit_rect_head = pygame.Rect(0,0,self.rect.width*0.2,self.rect.height*0.3)
self.pos = vec(platform.rect.right-20,platform.rect.top+10)
self.i = 0 # to change image of enemy
def update(self):
self.image = self.game.enemy_img[self.i]
self.rect = self.image.get_rect()
self.rect.right = self.pos.x
self.rect.bottom = self.pos.y
#self.hit_rect_body.right = self.rect.right-30
#self.hit_rect_body.bottom = self.rect.bottom
#self.hit_rect_head.right = self.rect.right-50
#self.hit_rect_head.y = self.rect.top+10
self.i +=1
if self.i>= len(self.game.enemy_img)-1:
self.i = 0
self.mask = pygame.mask.from_surface(self.image)
| 41.767442 | 92 | 0.649777 |
4a1ac16d769bf76943e33514212d418bb4594290
| 305 |
py
|
Python
|
2021/05/test_code.py
|
Akumatic/Advent-of-Code
|
bf2efe4d5a2c95ceb5f52ddbbc15ef0f2ac48618
|
[
"MIT"
] | 22 |
2019-12-13T20:41:52.000Z
|
2022-01-05T00:19:21.000Z
|
2021/05/test_code.py
|
Akumatic/Advent-of-Code
|
bf2efe4d5a2c95ceb5f52ddbbc15ef0f2ac48618
|
[
"MIT"
] | null | null | null |
2021/05/test_code.py
|
Akumatic/Advent-of-Code
|
bf2efe4d5a2c95ceb5f52ddbbc15ef0f2ac48618
|
[
"MIT"
] | 13 |
2019-12-21T02:35:19.000Z
|
2022-02-14T09:37:01.000Z
|
# SPDX-License-Identifier: MIT
# Copyright (c) 2021 Akumatic
from code import part1, part2, read_file
def test():
vals = read_file("test_input.txt")
assert part1(vals) == 5
print("Passed Part 1")
assert part2(vals) == 12
print("Passed Part 2")
if __name__ == "__main__":
test()
| 20.333333 | 40 | 0.652459 |
4a1ac1768594b565edee8817e2b1e09c374198e2
| 7,836 |
py
|
Python
|
verifier/accessions.py
|
jwestgard/aws-verifier
|
436258d5e5cf008bc39a9fbac334e6acbdba6693
|
[
"MIT"
] | null | null | null |
verifier/accessions.py
|
jwestgard/aws-verifier
|
436258d5e5cf008bc39a9fbac334e6acbdba6693
|
[
"MIT"
] | null | null | null |
verifier/accessions.py
|
jwestgard/aws-verifier
|
436258d5e5cf008bc39a9fbac334e6acbdba6693
|
[
"MIT"
] | null | null | null |
import csv
from datetime import datetime
import os
import re
import sys
from .utils import calculate_md5
from .utils import human_readable
class Asset():
"""
Class representing a single asset under preservation.
"""
def __init__(self, filename, sourcefile, sourceline,
bytes=None, timestamp=None, md5=None):
self.filename = filename
self.bytes = bytes
self.timestamp = timestamp
self.md5 = md5
self.restored = []
self.duplicates = []
self.extra_copies = []
self.sourcefile = sourcefile
self.sourceline = sourceline
self.status = 'Not checked'
@property
def signature(self):
return (self.filename, self.md5, self.bytes)
class Batch():
"""
Class representing a set of assets having been accessioned.
"""
def __init__(self, identifier, *dirlists):
self.identifier = identifier
self.dirlists = [d for d in dirlists]
self.assets = []
self.status = None
for dirlist in self.dirlists:
self.load_assets(dirlist)
@property
def bytes(self):
return sum(
[asset.bytes for asset in self.assets if asset.bytes is not None]
)
@property
def has_hashes(self):
return all(
[asset.md5 is not None for asset in self.assets]
)
def load_assets(self, dirlist):
self.assets.extend([asset for asset in dirlist.assets])
def summary_dict(self):
return {'identifier': self.identifier,
'dirlists': {d.md5: d.filename for d in self.dirlists},
'num_assets': len(self.assets),
'bytes': self.bytes,
'human_readable': human_readable(self.bytes),
'status': self.status
}
@property
def asset_root(self):
return os.path.commonpath([a.restored.path for a in self.assets])
def has_duplicates(self):
return len(self.assets) < len(set([a.signature for a in self.assets]))
class DirList():
"""
Class representing an accession inventory list
making up all or part of a batch.
"""
def __init__(self, path):
self.filename = os.path.basename(path)
self.path = path
self.bytes = int(os.path.getsize(path))
self.md5 = calculate_md5(path)
self.dirlines = 0
self.extralines = 0
self.lines = self.read()
def read(self):
for encoding in ['utf8', 'iso-8859-1', 'macroman']:
try:
with open(self.path, encoding=encoding) as handle:
return [line.strip() for line in handle.readlines()]
except ValueError:
continue
print(f'Could not read directory listing file {self.path}')
sys.exit(1)
@property
def assets(self):
results = []
# Examine the dirlist layout and set up iteration
# Handle space-delimited dirlists
ptrn = r'^(\d{2}/\d{2}/\d{4}\s+\d{2}:\d{2}\s[AP]M)\s+([0-9,]+)\s(.+?)$'
if self.lines[0].startswith('Volume in drive'):
for n, line in enumerate(self.lines):
# check if the line describes an asset
match = re.match(ptrn, line)
if not match:
continue
else:
timestamp = datetime.strptime(match.group(1),
'%m/%d/%Y %I:%M %p'
)
bytes = int(''.join(
[c for c in match.group(2) if c.isdigit()])
)
filename = match.group(3)
results.append(
Asset(filename=filename, bytes=bytes,
timestamp=timestamp, sourcefile=self.filename,
sourceline=n)
)
return results
# Handle semi-colon separted dirlists
elif ';' in self.lines[0]:
for n, line in enumerate(self.lines):
cols = line.split(';')
if cols[2] == 'Directory':
continue
else:
filename = os.path.basename(cols[0].rsplit('\\')[-1])
timestamp = datetime.strptime(cols[1],
'%m/%d/%Y %I:%M:%S %p'
)
bytes = round(float(cols[2].replace(',', '')) * 1024)
results.append(
Asset(filename=filename, bytes=bytes,
timestamp=timestamp, sourcefile=self.filename,
sourceline=n)
)
return results
# Handle CSV and TSV files
else:
delimiter = '\t' if '\t' in self.lines[0] else ','
possible_keys = {
'filename': ['Filename', 'File Name', 'FILENAME', 'Key',
'"Filename"', '"Key"'],
'bytes': ['Size', 'SIZE', 'File Size', 'Bytes', 'BYTES',
'"Size"'],
'timestamp': ['Mod Date', 'Moddate', 'MODDATE', '"Mod Date"'],
'md5': ['MD5', 'Other', 'Data', '"Other"', '"Data"', 'md5']
}
columns = self.lines[0].split(delimiter)
operative_keys = {}
for attribute, keys in possible_keys.items():
for key in keys:
if key in columns:
operative_keys[attribute] = key.replace('"','')
break
reader = csv.DictReader(self.lines,
quotechar='"',
delimiter=delimiter
)
for n, row in enumerate(reader):
# Skip extra rows in Prange-style "CSV" files
if 'File Name' in row and any([
(row.get('Type') == 'Directory'),
(row.get('File Name').startswith('Extension')),
(row.get('File Name').startswith('Total file size')),
(row.get('File Name') == '')
]):
continue
else:
filename_key = operative_keys.get('filename')
if filename_key is not None:
filename = row[filename_key]
else:
filename = None
bytes_key = operative_keys.get('bytes')
if bytes_key is not None:
raw = row[bytes_key]
digits = ''.join([c for c in raw if c.isdigit()])
if digits is not '':
bytes = int(digits)
else:
bytes = None
else:
bytes = None
timestamp_key = operative_keys.get('timestamp')
if timestamp_key is not None:
timestamp = row[timestamp_key]
else:
timestamp = None
md5_key = operative_keys.get('md5')
if md5_key is not None:
md5 = row[md5_key]
else:
md5 = None
results.append(
Asset(filename=filename, bytes=bytes,
timestamp=timestamp, md5=md5,
sourcefile=self.filename, sourceline=n)
)
return results
| 36.446512 | 79 | 0.460822 |
4a1ac24371a3692e9d56dad74d5f91e43cadad05
| 11,005 |
py
|
Python
|
client_sdk_python/providers/eth_tester/defaults.py
|
awake006/client-sdk-python
|
34393e417f74a65f04b643b26d8f9d2ccb0c886e
|
[
"MIT"
] | null | null | null |
client_sdk_python/providers/eth_tester/defaults.py
|
awake006/client-sdk-python
|
34393e417f74a65f04b643b26d8f9d2ccb0c886e
|
[
"MIT"
] | null | null | null |
client_sdk_python/providers/eth_tester/defaults.py
|
awake006/client-sdk-python
|
34393e417f74a65f04b643b26d8f9d2ccb0c886e
|
[
"MIT"
] | null | null | null |
import operator
import random
import sys
from eth_tester.exceptions import (
BlockNotFound,
FilterNotFound,
TransactionNotFound,
ValidationError,
)
from eth_utils import (
decode_hex,
encode_hex,
is_null,
keccak,
)
from client_sdk_python.utils.formatters import (
apply_formatter_if,
)
from client_sdk_python.utils.toolz import (
compose,
curry,
excepts,
)
def not_implemented(*args, **kwargs):
raise NotImplementedError("RPC method not implemented")
@curry
def call_eth_tester(fn_name, eth_tester, fn_args, fn_kwargs=None):
if fn_kwargs is None:
fn_kwargs = {}
return getattr(eth_tester, fn_name)(*fn_args, **fn_kwargs)
def without_eth_tester(fn):
# workaround for: https://github.com/pytoolz/cytoolz/issues/103
# @functools.wraps(fn)
def inner(eth_tester, params):
return fn(params)
return inner
def without_params(fn):
# workaround for: https://github.com/pytoolz/cytoolz/issues/103
# @functools.wraps(fn)
def inner(eth_tester, params):
return fn(eth_tester)
return inner
@curry
def preprocess_params(eth_tester, params, preprocessor_fn):
return eth_tester, preprocessor_fn(params)
def static_return(value):
def inner(*args, **kwargs):
return value
return inner
def client_version(eth_tester, params):
# TODO: account for the backend that is in use.
from eth_tester import __version__
return "EthereumTester/{version}/{platform}/python{v.major}.{v.minor}.{v.micro}".format(
version=__version__,
v=sys.version_info,
platform=sys.platform,
)
@curry
def null_if_excepts(exc_type, fn):
return excepts(
exc_type,
fn,
static_return(None),
)
null_if_block_not_found = null_if_excepts(BlockNotFound)
null_if_transaction_not_found = null_if_excepts(TransactionNotFound)
null_if_filter_not_found = null_if_excepts(FilterNotFound)
null_if_indexerror = null_if_excepts(IndexError)
@null_if_indexerror
@null_if_block_not_found
def get_transaction_by_block_hash_and_index(eth_tester, params):
block_hash, transaction_index = params
block = eth_tester.get_block_by_hash(block_hash, full_transactions=True)
transaction = block['transactions'][transaction_index]
return transaction
@null_if_indexerror
@null_if_block_not_found
def get_transaction_by_block_number_and_index(eth_tester, params):
block_number, transaction_index = params
block = eth_tester.get_block_by_number(block_number, full_transactions=True)
transaction = block['transactions'][transaction_index]
return transaction
def create_log_filter(eth_tester, params):
filter_params = params[0]
filter_id = eth_tester.create_log_filter(**filter_params)
return filter_id
def get_logs(eth_tester, params):
filter_params = params[0]
logs = eth_tester.get_logs(**filter_params)
return logs
def _generate_random_private_key():
"""
WARNING: This is not a secure way to generate private keys and should only
be used for testing purposes.
"""
return encode_hex(bytes(bytearray((
random.randint(0, 255)
for _ in range(32)
))))
@without_params
def create_new_account(eth_tester):
return eth_tester.add_account(_generate_random_private_key())
def personal_send_transaction(eth_tester, params):
transaction, password = params
try:
eth_tester.unlock_account(transaction['from'], password)
transaction_hash = eth_tester.send_transaction(transaction)
finally:
eth_tester.lock_account(transaction['from'])
return transaction_hash
API_ENDPOINTS = {
'web3': {
'clientVersion': client_version,
'sha3': compose(
encode_hex,
keccak,
decode_hex,
without_eth_tester(operator.itemgetter(0)),
),
},
'net': {
'version': not_implemented,
'peerCount': not_implemented,
'listening': not_implemented,
},
'eth': {
'protocolVersion': not_implemented,
'syncing': not_implemented,
'coinbase': compose(
operator.itemgetter(0),
call_eth_tester('get_accounts'),
),
'mining': not_implemented,
'hashrate': not_implemented,
'gasPrice': not_implemented,
'accounts': call_eth_tester('get_accounts'),
'blockNumber': compose(
operator.itemgetter('number'),
call_eth_tester('get_block_by_number', fn_kwargs={'block_number': 'latest'}),
),
'getBalance': call_eth_tester('get_balance'),
'getStorageAt': not_implemented,
'getTransactionCount': call_eth_tester('get_nonce'),
'getBlockTransactionCountByHash': null_if_block_not_found(compose(
len,
operator.itemgetter('transactions'),
call_eth_tester('get_block_by_hash'),
)),
'getBlockTransactionCountByNumber': null_if_block_not_found(compose(
len,
operator.itemgetter('transactions'),
call_eth_tester('get_block_by_number'),
)),
'getUncleCountByBlockHash': null_if_block_not_found(compose(
len,
operator.itemgetter('uncles'),
call_eth_tester('get_block_by_hash'),
)),
'getUncleCountByBlockNumber': null_if_block_not_found(compose(
len,
operator.itemgetter('uncles'),
call_eth_tester('get_block_by_number'),
)),
'getCode': call_eth_tester('get_code'),
'sign': not_implemented,
'sendTransaction': call_eth_tester('send_transaction'),
'sendRawTransaction': call_eth_tester('send_raw_transaction'),
'call': call_eth_tester('call'), # TODO: untested
'estimateGas': call_eth_tester('estimate_gas'), # TODO: untested
'getBlockByHash': null_if_block_not_found(call_eth_tester('get_block_by_hash')),
'getBlockByNumber': null_if_block_not_found(call_eth_tester('get_block_by_number')),
'getTransactionByHash': null_if_transaction_not_found(
call_eth_tester('get_transaction_by_hash')
),
'getTransactionByBlockHashAndIndex': get_transaction_by_block_hash_and_index,
'getTransactionByBlockNumberAndIndex': get_transaction_by_block_number_and_index,
'getTransactionReceipt': null_if_transaction_not_found(compose(
apply_formatter_if(
compose(is_null, operator.itemgetter('block_number')),
static_return(None),
),
call_eth_tester('get_transaction_receipt'),
)),
'getUncleByBlockHashAndIndex': not_implemented,
'getUncleByBlockNumberAndIndex': not_implemented,
'getCompilers': not_implemented,
'compileLLL': not_implemented,
'compileSolidity': not_implemented,
'compileSerpent': not_implemented,
'newFilter': create_log_filter,
'newBlockFilter': call_eth_tester('create_block_filter'),
'newPendingTransactionFilter': call_eth_tester('create_pending_transaction_filter'),
'uninstallFilter': excepts(
FilterNotFound,
compose(
is_null,
call_eth_tester('delete_filter'),
),
static_return(False),
),
'getFilterChanges': null_if_filter_not_found(call_eth_tester('get_only_filter_changes')),
'getFilterLogs': null_if_filter_not_found(call_eth_tester('get_all_filter_logs')),
'getLogs': get_logs,
'getWork': not_implemented,
'submitWork': not_implemented,
'submitHashrate': not_implemented,
},
'db': {
'putString': not_implemented,
'getString': not_implemented,
'putHex': not_implemented,
'getHex': not_implemented,
},
'shh': {
'post': not_implemented,
'version': not_implemented,
'newIdentity': not_implemented,
'hasIdentity': not_implemented,
'newGroup': not_implemented,
'addToGroup': not_implemented,
'newFilter': not_implemented,
'uninstallFilter': not_implemented,
'getFilterChanges': not_implemented,
'getMessages': not_implemented,
},
'admin': {
'addPeer': not_implemented,
'datadir': not_implemented,
'nodeInfo': not_implemented,
'peers': not_implemented,
'setSolc': not_implemented,
'startRPC': not_implemented,
'startWS': not_implemented,
'stopRPC': not_implemented,
'stopWS': not_implemented,
},
'debug': {
'backtraceAt': not_implemented,
'blockProfile': not_implemented,
'cpuProfile': not_implemented,
'dumpBlock': not_implemented,
'gtStats': not_implemented,
'getBlockRLP': not_implemented,
'goTrace': not_implemented,
'memStats': not_implemented,
'seedHashSign': not_implemented,
'setBlockProfileRate': not_implemented,
'setHead': not_implemented,
'stacks': not_implemented,
'startCPUProfile': not_implemented,
'startGoTrace': not_implemented,
'stopCPUProfile': not_implemented,
'stopGoTrace': not_implemented,
'traceBlock': not_implemented,
'traceBlockByNumber': not_implemented,
'traceBlockByHash': not_implemented,
'traceBlockFromFile': not_implemented,
'traceTransaction': not_implemented,
'verbosity': not_implemented,
'vmodule': not_implemented,
'writeBlockProfile': not_implemented,
'writeMemProfile': not_implemented,
},
'miner': {
'makeDAG': not_implemented,
'setExtra': not_implemented,
'setGasPrice': not_implemented,
'start': not_implemented,
'startAutoDAG': not_implemented,
'stop': not_implemented,
'stopAutoDAG': not_implemented,
},
'personal': {
'ecRecover': not_implemented,
'importRawKey': call_eth_tester('add_account'),
'listAccounts': call_eth_tester('get_accounts'),
'lockAccount': excepts(
ValidationError,
compose(static_return(True), call_eth_tester('lock_account')),
static_return(False),
),
'newAccount': create_new_account,
'unlockAccount': excepts(
ValidationError,
compose(static_return(True), call_eth_tester('unlock_account')),
static_return(False),
),
'sendTransaction': personal_send_transaction,
'sign': not_implemented,
},
'testing': {
'timeTravel': call_eth_tester('time_travel'),
},
'txpool': {
'content': not_implemented,
'inspect': not_implemented,
'status': not_implemented,
},
'evm': {
'mine': call_eth_tester('mine_blocks'),
'revert': call_eth_tester('revert_to_snapshot'),
'snapshot': call_eth_tester('take_snapshot'),
},
}
| 32.084548 | 97 | 0.660245 |
4a1ac465ef210307d339bbfc1b3821e6f610fb92
| 21,290 |
py
|
Python
|
pdm/pep517/_vendor/cerberus/errors.py
|
danieleades/pdm-pep517
|
129697f841c0f635465caf83332c75f5e30b0c6f
|
[
"MIT"
] | 4 |
2021-04-14T16:18:08.000Z
|
2022-01-13T13:03:47.000Z
|
pdm/pep517/_vendor/cerberus/errors.py
|
danieleades/pdm-pep517
|
129697f841c0f635465caf83332c75f5e30b0c6f
|
[
"MIT"
] | 29 |
2021-03-23T15:40:56.000Z
|
2022-03-10T11:55:38.000Z
|
pdm/pep517/_vendor/cerberus/errors.py
|
frostming/pdm-pep517
|
99b6aab5f3cb2dac657f3a750d8eb4ad001dd095
|
[
"MIT"
] | 6 |
2021-03-21T17:42:25.000Z
|
2022-01-25T21:28:35.000Z
|
# -*-: coding utf-8 -*-
""" This module contains the error-related constants and classes. """
from __future__ import absolute_import
from collections import defaultdict, namedtuple
from copy import copy, deepcopy
from functools import wraps
from pprint import pformat
from pdm.pep517._vendor.cerberus.platform import PYTHON_VERSION, MutableMapping
from pdm.pep517._vendor.cerberus.utils import compare_paths_lt, quote_string
ErrorDefinition = namedtuple('ErrorDefinition', 'code, rule')
"""
This class is used to define possible errors. Each distinguishable error is
defined by a *unique* error ``code`` as integer and the ``rule`` that can
cause it as string.
The instances' names do not contain a common prefix as they are supposed to be
referenced within the module namespace, e.g. ``errors.CUSTOM``.
"""
# custom
CUSTOM = ErrorDefinition(0x00, None)
# existence
DOCUMENT_MISSING = ErrorDefinition(0x01, None) # issues/141
DOCUMENT_MISSING = "document is missing"
REQUIRED_FIELD = ErrorDefinition(0x02, 'required')
UNKNOWN_FIELD = ErrorDefinition(0x03, None)
DEPENDENCIES_FIELD = ErrorDefinition(0x04, 'dependencies')
DEPENDENCIES_FIELD_VALUE = ErrorDefinition(0x05, 'dependencies')
EXCLUDES_FIELD = ErrorDefinition(0x06, 'excludes')
# shape
DOCUMENT_FORMAT = ErrorDefinition(0x21, None) # issues/141
DOCUMENT_FORMAT = "'{0}' is not a document, must be a dict"
EMPTY_NOT_ALLOWED = ErrorDefinition(0x22, 'empty')
NOT_NULLABLE = ErrorDefinition(0x23, 'nullable')
BAD_TYPE = ErrorDefinition(0x24, 'type')
BAD_TYPE_FOR_SCHEMA = ErrorDefinition(0x25, 'schema')
ITEMS_LENGTH = ErrorDefinition(0x26, 'items')
MIN_LENGTH = ErrorDefinition(0x27, 'minlength')
MAX_LENGTH = ErrorDefinition(0x28, 'maxlength')
# color
REGEX_MISMATCH = ErrorDefinition(0x41, 'regex')
MIN_VALUE = ErrorDefinition(0x42, 'min')
MAX_VALUE = ErrorDefinition(0x43, 'max')
UNALLOWED_VALUE = ErrorDefinition(0x44, 'allowed')
UNALLOWED_VALUES = ErrorDefinition(0x45, 'allowed')
FORBIDDEN_VALUE = ErrorDefinition(0x46, 'forbidden')
FORBIDDEN_VALUES = ErrorDefinition(0x47, 'forbidden')
MISSING_MEMBERS = ErrorDefinition(0x48, 'contains')
# other
NORMALIZATION = ErrorDefinition(0x60, None)
COERCION_FAILED = ErrorDefinition(0x61, 'coerce')
RENAMING_FAILED = ErrorDefinition(0x62, 'rename_handler')
READONLY_FIELD = ErrorDefinition(0x63, 'readonly')
SETTING_DEFAULT_FAILED = ErrorDefinition(0x64, 'default_setter')
# groups
ERROR_GROUP = ErrorDefinition(0x80, None)
MAPPING_SCHEMA = ErrorDefinition(0x81, 'schema')
SEQUENCE_SCHEMA = ErrorDefinition(0x82, 'schema')
# TODO remove KEYSCHEMA AND VALUESCHEMA with next major release
KEYSRULES = KEYSCHEMA = ErrorDefinition(0x83, 'keysrules')
VALUESRULES = VALUESCHEMA = ErrorDefinition(0x84, 'valuesrules')
BAD_ITEMS = ErrorDefinition(0x8F, 'items')
LOGICAL = ErrorDefinition(0x90, None)
NONEOF = ErrorDefinition(0x91, 'noneof')
ONEOF = ErrorDefinition(0x92, 'oneof')
ANYOF = ErrorDefinition(0x93, 'anyof')
ALLOF = ErrorDefinition(0x94, 'allof')
""" SchemaError messages """
SCHEMA_ERROR_DEFINITION_TYPE = "schema definition for field '{0}' must be a dict"
SCHEMA_ERROR_MISSING = "validation schema missing"
""" Error representations """
class ValidationError(object):
"""A simple class to store and query basic error information."""
def __init__(self, document_path, schema_path, code, rule, constraint, value, info):
self.document_path = document_path
""" The path to the field within the document that caused the error.
Type: :class:`tuple` """
self.schema_path = schema_path
""" The path to the rule within the schema that caused the error.
Type: :class:`tuple` """
self.code = code
""" The error's identifier code. Type: :class:`int` """
self.rule = rule
""" The rule that failed. Type: `string` """
self.constraint = constraint
""" The constraint that failed. """
self.value = value
""" The value that failed. """
self.info = info
""" May hold additional information about the error.
Type: :class:`tuple` """
def __eq__(self, other):
"""Assumes the errors relate to the same document and schema."""
return hash(self) == hash(other)
def __hash__(self):
"""Expects that all other properties are transitively determined."""
return hash(self.document_path) ^ hash(self.schema_path) ^ hash(self.code)
def __lt__(self, other):
if self.document_path != other.document_path:
return compare_paths_lt(self.document_path, other.document_path)
else:
return compare_paths_lt(self.schema_path, other.schema_path)
def __repr__(self):
return (
"{class_name} @ {memptr} ( "
"document_path={document_path},"
"schema_path={schema_path},"
"code={code},"
"constraint={constraint},"
"value={value},"
"info={info} )".format(
class_name=self.__class__.__name__,
memptr=hex(id(self)), # noqa: E501
document_path=self.document_path,
schema_path=self.schema_path,
code=hex(self.code),
constraint=quote_string(self.constraint),
value=quote_string(self.value),
info=self.info,
)
)
@property
def child_errors(self):
"""
A list that contains the individual errors of a bulk validation error.
"""
return self.info[0] if self.is_group_error else None
@property
def definitions_errors(self):
"""
Dictionary with errors of an \*of-rule mapped to the index of the definition it
occurred in. Returns :obj:`None` if not applicable.
"""
if not self.is_logic_error:
return None
result = defaultdict(list)
for error in self.child_errors:
i = error.schema_path[len(self.schema_path)]
result[i].append(error)
return result
@property
def field(self):
"""Field of the contextual mapping, possibly :obj:`None`."""
if self.document_path:
return self.document_path[-1]
else:
return None
@property
def is_group_error(self):
"""``True`` for errors of bulk validations."""
return bool(self.code & ERROR_GROUP.code)
@property
def is_logic_error(self):
"""
``True`` for validation errors against different schemas with \*of-rules.
"""
return bool(self.code & LOGICAL.code - ERROR_GROUP.code)
@property
def is_normalization_error(self):
"""``True`` for normalization errors."""
return bool(self.code & NORMALIZATION.code)
class ErrorList(list):
"""
A list for :class:`~cerberus.errors.ValidationError` instances that can be queried
with the ``in`` keyword for a particular :class:`~cerberus.errors.ErrorDefinition`.
"""
def __contains__(self, error_definition):
if not isinstance(error_definition, ErrorDefinition):
raise TypeError
wanted_code = error_definition.code
return any(x.code == wanted_code for x in self)
class ErrorTreeNode(MutableMapping):
__slots__ = ('descendants', 'errors', 'parent_node', 'path', 'tree_root')
def __init__(self, path, parent_node):
self.parent_node = parent_node
self.tree_root = self.parent_node.tree_root
self.path = path[: self.parent_node.depth + 1]
self.errors = ErrorList()
self.descendants = {}
def __contains__(self, item):
if isinstance(item, ErrorDefinition):
return item in self.errors
else:
return item in self.descendants
def __delitem__(self, key):
del self.descendants[key]
def __iter__(self):
return iter(self.errors)
def __getitem__(self, item):
if isinstance(item, ErrorDefinition):
for error in self.errors:
if item.code == error.code:
return error
return None
else:
return self.descendants.get(item)
def __len__(self):
return len(self.errors)
def __repr__(self):
return self.__str__()
def __setitem__(self, key, value):
self.descendants[key] = value
def __str__(self):
return str(self.errors) + ',' + str(self.descendants)
@property
def depth(self):
return len(self.path)
@property
def tree_type(self):
return self.tree_root.tree_type
def add(self, error):
error_path = self._path_of_(error)
key = error_path[self.depth]
if key not in self.descendants:
self[key] = ErrorTreeNode(error_path, self)
node = self[key]
if len(error_path) == self.depth + 1:
node.errors.append(error)
node.errors.sort()
if error.is_group_error:
for child_error in error.child_errors:
self.tree_root.add(child_error)
else:
node.add(error)
def _path_of_(self, error):
return getattr(error, self.tree_type + '_path')
class ErrorTree(ErrorTreeNode):
"""
Base class for :class:`~cerberus.errors.DocumentErrorTree` and
:class:`~cerberus.errors.SchemaErrorTree`.
"""
def __init__(self, errors=()):
self.parent_node = None
self.tree_root = self
self.path = ()
self.errors = ErrorList()
self.descendants = {}
for error in errors:
self.add(error)
def add(self, error):
"""
Add an error to the tree.
:param error: :class:`~cerberus.errors.ValidationError`
"""
if not self._path_of_(error):
self.errors.append(error)
self.errors.sort()
else:
super(ErrorTree, self).add(error)
def fetch_errors_from(self, path):
"""
Returns all errors for a particular path.
:param path: :class:`tuple` of :term:`hashable` s.
:rtype: :class:`~cerberus.errors.ErrorList`
"""
node = self.fetch_node_from(path)
if node is not None:
return node.errors
else:
return ErrorList()
def fetch_node_from(self, path):
"""
Returns a node for a path.
:param path: Tuple of :term:`hashable` s.
:rtype: :class:`~cerberus.errors.ErrorTreeNode` or :obj:`None`
"""
context = self
for key in path:
context = context[key]
if context is None:
break
return context
class DocumentErrorTree(ErrorTree):
"""
Implements a dict-like class to query errors by indexes following the structure of a
validated document.
"""
tree_type = 'document'
class SchemaErrorTree(ErrorTree):
"""
Implements a dict-like class to query errors by indexes following the structure of
the used schema.
"""
tree_type = 'schema'
class BaseErrorHandler(object):
"""Base class for all error handlers.
Subclasses are identified as error-handlers with an instance-test."""
def __init__(self, *args, **kwargs):
"""Optionally initialize a new instance."""
pass
def __call__(self, errors):
"""
Returns errors in a handler-specific format.
:param errors: An object containing the errors.
:type errors: :term:`iterable` of
:class:`~cerberus.errors.ValidationError` instances or a
:class:`~cerberus.Validator` instance
"""
raise NotImplementedError
def __iter__(self):
"""Be a superhero and implement an iterator over errors."""
raise NotImplementedError
def add(self, error):
"""
Add an error to the errors' container object of a handler.
:param error: The error to add.
:type error: :class:`~cerberus.errors.ValidationError`
"""
raise NotImplementedError
def emit(self, error):
"""
Optionally emits an error in the handler's format to a stream. Or light a LED,
or even shut down a power plant.
:param error: The error to emit.
:type error: :class:`~cerberus.errors.ValidationError`
"""
pass
def end(self, validator):
"""
Gets called when a validation ends.
:param validator: The calling validator.
:type validator: :class:`~cerberus.Validator`
"""
pass
def extend(self, errors):
"""
Adds all errors to the handler's container object.
:param errors: The errors to add.
:type errors: :term:`iterable` of
:class:`~cerberus.errors.ValidationError` instances
"""
for error in errors:
self.add(error)
def start(self, validator):
"""
Gets called when a validation starts.
:param validator: The calling validator.
:type validator: :class:`~cerberus.Validator`
"""
pass
class ToyErrorHandler(BaseErrorHandler):
def __call__(self, *args, **kwargs):
raise RuntimeError('This is not supposed to happen.')
def clear(self):
pass
def encode_unicode(f):
"""Cerberus error messages expect regular binary strings.
If unicode is used in a ValidationError message can't be printed.
This decorator ensures that if legacy Python is used unicode
strings are encoded before passing to a function.
"""
@wraps(f)
def wrapped(obj, error):
def _encode(value):
"""Helper encoding unicode strings into binary utf-8"""
if isinstance(value, unicode): # noqa: F821
return value.encode('utf-8')
return value
error = copy(error)
error.document_path = _encode(error.document_path)
error.schema_path = _encode(error.schema_path)
error.constraint = _encode(error.constraint)
error.value = _encode(error.value)
error.info = _encode(error.info)
return f(obj, error)
return wrapped if PYTHON_VERSION < 3 else f
class BasicErrorHandler(BaseErrorHandler):
"""
Models cerberus' legacy. Returns a :class:`dict`. When mangled through :class:`str`
a pretty-formatted representation of that tree is returned.
"""
messages = {
0x00: "{0}",
0x01: "document is missing",
0x02: "required field",
0x03: "unknown field",
0x04: "field '{0}' is required",
0x05: "depends on these values: {constraint}",
0x06: "{0} must not be present with '{field}'",
0x21: "'{0}' is not a document, must be a dict",
0x22: "empty values not allowed",
0x23: "null value not allowed",
0x24: "must be of {constraint} type",
0x25: "must be of dict type",
0x26: "length of list should be {0}, it is {1}",
0x27: "min length is {constraint}",
0x28: "max length is {constraint}",
0x41: "value does not match regex '{constraint}'",
0x42: "min value is {constraint}",
0x43: "max value is {constraint}",
0x44: "unallowed value {value}",
0x45: "unallowed values {0}",
0x46: "unallowed value {value}",
0x47: "unallowed values {0}",
0x48: "missing members {0}",
0x61: "field '{field}' cannot be coerced: {0}",
0x62: "field '{field}' cannot be renamed: {0}",
0x63: "field is read-only",
0x64: "default value for '{field}' cannot be set: {0}",
0x81: "mapping doesn't validate subschema: {0}",
0x82: "one or more sequence-items don't validate: {0}",
0x83: "one or more keys of a mapping don't validate: {0}",
0x84: "one or more values in a mapping don't validate: {0}",
0x85: "one or more sequence-items don't validate: {0}",
0x91: "one or more definitions validate",
0x92: "none or more than one rule validate",
0x93: "no definitions validate",
0x94: "one or more definitions don't validate",
}
def __init__(self, tree=None):
self.tree = {} if tree is None else tree
def __call__(self, errors):
self.clear()
self.extend(errors)
return self.pretty_tree
def __str__(self):
return pformat(self.pretty_tree)
@property
def pretty_tree(self):
pretty = deepcopy(self.tree)
for field in pretty:
self._purge_empty_dicts(pretty[field])
return pretty
@encode_unicode
def add(self, error):
# Make sure the original error is not altered with
# error paths specific to the handler.
error = deepcopy(error)
self._rewrite_error_path(error)
if error.is_logic_error:
self._insert_logic_error(error)
elif error.is_group_error:
self._insert_group_error(error)
elif error.code in self.messages:
self._insert_error(
error.document_path, self._format_message(error.field, error)
)
def clear(self):
self.tree = {}
def start(self, validator):
self.clear()
def _format_message(self, field, error):
return self.messages[error.code].format(
*error.info, constraint=error.constraint, field=field, value=error.value
)
def _insert_error(self, path, node):
"""
Adds an error or sub-tree to :attr:tree.
:param path: Path to the error.
:type path: Tuple of strings and integers.
:param node: An error message or a sub-tree.
:type node: String or dictionary.
"""
field = path[0]
if len(path) == 1:
if field in self.tree:
subtree = self.tree[field].pop()
self.tree[field] += [node, subtree]
else:
self.tree[field] = [node, {}]
elif len(path) >= 1:
if field not in self.tree:
self.tree[field] = [{}]
subtree = self.tree[field][-1]
if subtree:
new = self.__class__(tree=copy(subtree))
else:
new = self.__class__()
new._insert_error(path[1:], node)
subtree.update(new.tree)
def _insert_group_error(self, error):
for child_error in error.child_errors:
if child_error.is_logic_error:
self._insert_logic_error(child_error)
elif child_error.is_group_error:
self._insert_group_error(child_error)
else:
self._insert_error(
child_error.document_path,
self._format_message(child_error.field, child_error),
)
def _insert_logic_error(self, error):
field = error.field
self._insert_error(error.document_path, self._format_message(field, error))
for definition_errors in error.definitions_errors.values():
for child_error in definition_errors:
if child_error.is_logic_error:
self._insert_logic_error(child_error)
elif child_error.is_group_error:
self._insert_group_error(child_error)
else:
self._insert_error(
child_error.document_path,
self._format_message(field, child_error),
)
def _purge_empty_dicts(self, error_list):
subtree = error_list[-1]
if not error_list[-1]:
error_list.pop()
else:
for key in subtree:
self._purge_empty_dicts(subtree[key])
def _rewrite_error_path(self, error, offset=0):
"""
Recursively rewrites the error path to correctly represent logic errors
"""
if error.is_logic_error:
self._rewrite_logic_error_path(error, offset)
elif error.is_group_error:
self._rewrite_group_error_path(error, offset)
def _rewrite_group_error_path(self, error, offset=0):
child_start = len(error.document_path) - offset
for child_error in error.child_errors:
relative_path = child_error.document_path[child_start:]
child_error.document_path = error.document_path + relative_path
self._rewrite_error_path(child_error, offset)
def _rewrite_logic_error_path(self, error, offset=0):
child_start = len(error.document_path) - offset
for i, definition_errors in error.definitions_errors.items():
if not definition_errors:
continue
nodename = '%s definition %s' % (error.rule, i)
path = error.document_path + (nodename,)
for child_error in definition_errors:
rel_path = child_error.document_path[child_start:]
child_error.document_path = path + rel_path
self._rewrite_error_path(child_error, offset + 1)
class SchemaErrorHandler(BasicErrorHandler):
messages = BasicErrorHandler.messages.copy()
messages[0x03] = "unknown rule"
| 32.503817 | 88 | 0.621043 |
4a1ac507494bd8e55049ea325528eb7babf490de
| 9,020 |
py
|
Python
|
EInk_Bonnet_Event_Calendar/code.py
|
claycooper/Adafruit_Learning_System_Guides
|
890431bd4b9df929bc601e5886c2a735d89814f9
|
[
"MIT"
] | null | null | null |
EInk_Bonnet_Event_Calendar/code.py
|
claycooper/Adafruit_Learning_System_Guides
|
890431bd4b9df929bc601e5886c2a735d89814f9
|
[
"MIT"
] | null | null | null |
EInk_Bonnet_Event_Calendar/code.py
|
claycooper/Adafruit_Learning_System_Guides
|
890431bd4b9df929bc601e5886c2a735d89814f9
|
[
"MIT"
] | null | null | null |
# SPDX-FileCopyrightText: 2020 Melissa LeBlanc-Williams for Adafruit Industries
#
# SPDX-License-Identifier: MIT
from __future__ import print_function
from datetime import datetime
import time
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import textwrap
import digitalio
import busio
import board
from PIL import Image, ImageDraw, ImageFont
from adafruit_epd.epd import Adafruit_EPD
from adafruit_epd.ssd1675 import Adafruit_SSD1675
from adafruit_epd.ssd1680 import Adafruit_SSD1680
spi = busio.SPI(board.SCK, MOSI=board.MOSI, MISO=board.MISO)
ecs = digitalio.DigitalInOut(board.CE0)
dc = digitalio.DigitalInOut(board.D22)
rst = digitalio.DigitalInOut(board.D27)
busy = digitalio.DigitalInOut(board.D17)
up_button = digitalio.DigitalInOut(board.D5)
up_button.switch_to_input()
down_button = digitalio.DigitalInOut(board.D6)
down_button.switch_to_input()
# If modifying these scopes, delete the file token.pickle.
SCOPES = ["https://www.googleapis.com/auth/calendar.readonly"]
# Check for new/deleted events every 10 seconds
QUERY_DELAY = 10 # Time in seconds to delay between querying the Google Calendar API
MAX_EVENTS_PER_CAL = 5
MAX_LINES = 2
DEBOUNCE_DELAY = 0.3
# Initialize the Display
display = Adafruit_SSD1680( # Newer eInk Bonnet
# display = Adafruit_SSD1675( # Older eInk Bonnet
122, 250, spi, cs_pin=ecs, dc_pin=dc, sramcs_pin=None, rst_pin=rst, busy_pin=busy,
)
display.rotation = 1
# RGB Colors
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists("token.pickle"):
with open("token.pickle", "rb") as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file("credentials.json", SCOPES)
creds = flow.run_console()
# Save the credentials for the next run
with open("token.pickle", "wb") as token:
pickle.dump(creds, token)
service = build("calendar", "v3", credentials=creds)
current_event_id = None
last_check = None
events = []
def display_event(event_id):
event_index = search_id(event_id)
if event_index is None:
if len(events) > 0:
# Event was probably deleted while we were updating
event_index = 0
event = events[0]
else:
event = None
else:
event = events[event_index]
current_time = get_current_time()
display.fill(Adafruit_EPD.WHITE)
image = Image.new("RGB", (display.width, display.height), color=WHITE)
draw = ImageDraw.Draw(image)
event_font = ImageFont.truetype(
"/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", 24
)
time_font = ImageFont.truetype(
"/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf", 18
)
next_event_font = ImageFont.truetype(
"/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf", 16
)
# Draw Time
current_time = get_current_time()
(font_width, font_height) = time_font.getsize(current_time)
draw.text(
(display.width - font_width - 2, 2), current_time, font=time_font, fill=BLACK,
)
if event is None:
text = "No events found"
(font_width, font_height) = event_font.getsize(text)
draw.text(
(
display.width // 2 - font_width // 2,
display.height // 2 - font_height // 2,
),
text,
font=event_font,
fill=BLACK,
)
else:
how_long = format_interval(
event["start"].get("dateTime", event["start"].get("date"))
)
draw.text(
(2, 2), how_long, font=time_font, fill=BLACK,
)
(font_width, font_height) = event_font.getsize(event["summary"])
lines = textwrap.wrap(event["summary"], width=20)
for line_index, line in enumerate(lines):
if line_index < MAX_LINES:
draw.text(
(2, line_index * font_height + 22),
line,
font=event_font,
fill=BLACK,
)
# Draw Next Event if there is one
if event_index < len(events) - 1:
next_event = events[event_index + 1]
next_time = format_event_date(
next_event["start"].get("dateTime", next_event["start"].get("date"))
)
next_item = "Then " + next_time + ": "
(font_width, font_height) = next_event_font.getsize(next_item)
draw.text(
(2, display.height - font_height * 2 - 8),
next_item,
font=next_event_font,
fill=BLACK,
)
draw.text(
(2, display.height - font_height - 2),
next_event["summary"],
font=next_event_font,
fill=BLACK,
)
display.image(image)
display.display()
def format_event_date(datestr):
event_date = datetime.fromisoformat(datestr)
# If the same day, just return time
if event_date.date() == datetime.now().date():
return event_date.strftime("%I:%M %p")
# If a future date, return date and time
return event_date.strftime("%m/%d/%y %I:%M %p")
def format_interval(datestr):
event_date = datetime.fromisoformat(datestr).replace(tzinfo=None)
delta = event_date - datetime.now()
# if < 60 minutes, return minutes
if delta.days < 0:
return "Now:"
if not delta.days and delta.seconds < 3600:
value = round(delta.seconds / 60)
return "In {} minute{}:".format(value, "s" if value > 1 else "")
# if < 24 hours return hours
if not delta.days:
value = round(delta.seconds / 3600)
return "In {} hour{}:".format(value, "s" if value > 1 else "")
return "In {} day{}:".format(delta.days, "s" if delta.days > 1 else "")
def search_id(event_id):
if event_id is not None:
for index, event in enumerate(events):
if event["id"] == event_id:
return index
return None
def get_current_time():
now = datetime.now()
return now.strftime("%I:%M %p")
current_time = get_current_time()
def get_events(calendar_id):
print("Fetching Events for {}".format(calendar_id))
page_token = None
events = (
service.events()
.list(
calendarId=calendar_id,
timeMin=now,
maxResults=MAX_EVENTS_PER_CAL,
singleEvents=True,
orderBy="startTime",
)
.execute()
)
return events.get("items", [])
def get_all_calendar_ids():
page_token = None
calendar_ids = []
while True:
print("Fetching Calendar IDs")
calendar_list = service.calendarList().list(pageToken=page_token).execute()
for calendar_list_entry in calendar_list["items"]:
calendar_ids.append(calendar_list_entry["id"])
page_token = calendar_list.get("nextPageToken")
if not page_token:
break
return calendar_ids
while True:
last_event_id = current_event_id
last_time = current_time
if last_check is None or time.monotonic() >= last_check + QUERY_DELAY:
# Call the Calendar API
now = datetime.utcnow().isoformat() + "Z"
calendar_ids = get_all_calendar_ids()
events = []
for calendar_id in calendar_ids:
events += get_events(calendar_id)
# Sort Events by Start Time
events = sorted(
events, key=lambda k: k["start"].get("dateTime", k["start"].get("date"))
)
last_check = time.monotonic()
# Update the current time
current_time = get_current_time()
if not events:
current_event_id = None
current_index = None
else:
if current_event_id is None:
current_index = 0
else:
current_index = search_id(current_event_id)
if current_index is not None:
# Check for Button Presses
if up_button.value != down_button.value:
if not up_button.value and current_index < len(events) - 1:
current_index += 1
time.sleep(DEBOUNCE_DELAY)
if not down_button.value and current_index > 0:
current_index -= 1
time.sleep(DEBOUNCE_DELAY)
current_event_id = events[current_index]["id"]
else:
current_event_id = None
if current_event_id != last_event_id or current_time != last_time:
display_event(current_event_id)
| 31.760563 | 86 | 0.625499 |
4a1ac52e232df17e8fdb26da68c0a77bae92aa5d
| 4,090 |
py
|
Python
|
src/ramstk/models/dbtables/programdb_stakeholder_table.py
|
weibullguy/ramstk
|
3ec41d7e2933045a7a8028aed6c6b04365495095
|
[
"BSD-3-Clause"
] | 4 |
2018-08-26T09:11:36.000Z
|
2019-05-24T12:01:02.000Z
|
src/ramstk/models/dbtables/programdb_stakeholder_table.py
|
weibullguy/ramstk
|
3ec41d7e2933045a7a8028aed6c6b04365495095
|
[
"BSD-3-Clause"
] | 52 |
2018-08-24T12:51:22.000Z
|
2020-12-28T04:59:42.000Z
|
src/ramstk/models/dbtables/programdb_stakeholder_table.py
|
weibullguy/ramstk
|
3ec41d7e2933045a7a8028aed6c6b04365495095
|
[
"BSD-3-Clause"
] | 1 |
2018-10-11T07:57:55.000Z
|
2018-10-11T07:57:55.000Z
|
# -*- coding: utf-8 -*-
#
# ramstk.models.dbtables.programdb_stakeholder_table.py is part of The RAMSTK
# Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""RAMSTKStakeholder Table Model."""
# Standard Library Imports
from datetime import date
from typing import Dict, Type, Union
# Third Party Imports
from pubsub import pub
# RAMSTK Package Imports
from ramstk.analyses import improvementfactor
# RAMSTK Local Imports
from ..dbrecords import RAMSTKStakeholderRecord
from .basetable import RAMSTKBaseTable
class RAMSTKStakeholderTable(RAMSTKBaseTable):
"""Contain the attributes and methods of the Stakeholder table model."""
# Define private dictionary class attributes.
# Define private list class attributes.
# Define private scalar class attributes.
_db_id_colname = "fld_stakeholder_id"
_db_tablename = "ramstk_stakeholder"
_select_msg = "selected_revision"
_tag = "stakeholder"
# Define public dictionary class attributes.
# Define public list class attributes.
# Define public scalar class attributes.
def __init__(self, **kwargs: Dict[str, Union[float, int, str]]) -> None:
"""Initialize a RAMSTKStakeholder table model instance."""
super().__init__(**kwargs)
# Initialize private dictionary attributes.
# Initialize private list attributes.
self._lst_id_columns = [
"revision_id",
"stakeholder_id",
]
# Initialize private scalar attributes.
self._record: Type[RAMSTKStakeholderRecord] = RAMSTKStakeholderRecord
# Initialize public dictionary attributes.
# Initialize public list attributes.
# Initialize public scalar attributes.
self.pkey = "stakeholder_id"
# Subscribe to PyPubSub messages.
pub.subscribe(self.do_calculate_stakeholder, "request_calculate_stakeholder")
# pylint: disable=method-hidden
def do_get_new_record(
self, attributes: Dict[str, Union[date, float, int, str]]
) -> RAMSTKStakeholderRecord:
"""Get a new record instance with attributes set.
:param attributes: the dict of attribute values to assign to the new record.
:return: None
:rtype: None
"""
_new_record = self._record()
_new_record.revision_id = attributes["revision_id"]
_new_record.stakeholder_id = self.last_id + 1
_new_record.description = "New Stakeholder Input"
return _new_record
def do_calculate_stakeholder(self, node_id: int) -> None:
"""Calculate improvement factor and weight for currently selected item.
:param node_id: the ID of the record to calculate.
:return: None
:rtype: None
"""
self._do_calculate_improvement(node_id)
pub.sendMessage(
"succeed_calculate_stakeholder",
tree=self.tree,
)
def _do_calculate_improvement(self, node_id: int) -> None:
"""Calculate improvement factor and weight for currently selected item.
:param node_id: the ID of the record to calculate.
:return: None
:rtype: None
"""
_record = self.tree.get_node(node_id).data[self._tag]
_attributes = _record.get_attributes()
(_improvement, _overall_weight,) = improvementfactor.calculate_improvement(
_attributes["planned_rank"],
_attributes["customer_rank"],
_attributes["priority"],
user_float_1=_attributes["user_float_1"],
user_float_2=_attributes["user_float_2"],
user_float_3=_attributes["user_float_3"],
user_float_4=_attributes["user_float_4"],
user_float_5=_attributes["user_float_5"],
)
self.do_set_attributes(
node_id=node_id,
package={"improvement": _improvement},
)
self.do_set_attributes(
node_id=node_id,
package={"overall_weight": _overall_weight},
)
| 31.705426 | 88 | 0.66577 |
4a1ac557069ae9376b067f578803c4e989e61d74
| 1,857 |
py
|
Python
|
EAST/crnn.py
|
Abhishek-Aditya-bs/Scene-Text-Detection-and-Recognition
|
8fa079e4146bc5b254235e4ab88262ea897f4d9f
|
[
"MIT"
] | null | null | null |
EAST/crnn.py
|
Abhishek-Aditya-bs/Scene-Text-Detection-and-Recognition
|
8fa079e4146bc5b254235e4ab88262ea897f4d9f
|
[
"MIT"
] | null | null | null |
EAST/crnn.py
|
Abhishek-Aditya-bs/Scene-Text-Detection-and-Recognition
|
8fa079e4146bc5b254235e4ab88262ea897f4d9f
|
[
"MIT"
] | null | null | null |
import sys
sys.path.insert(1, '../crnn')
import torch
from torch.autograd import Variable
import torch.nn as nn
from torch.autograd import Variable
import torch.utils.data
import util
import dataset
from PIL import Image
import os
import models.crnn as crnn
from warpctc_pytorch import CTCLoss
import numpy as np
import editdistance
use_gpu = True
device_id = 0
if use_gpu:
torch.cuda.set_device(device_id)
model_path = '/home/zhanfangneng/projects/crnn/samples/crnn.pth'
im_dir = '/home/zhanfangneng/datasets/svt/textline/test/'
#lex_dir = '/export/home/frankzhan/datasets/IIIT5K/lower/test_lexicon_1k/'
sv_path = '/home/zhanfangneng/projects/crnn/submit.txt'
alphabet = '0123456789abcdefghijklmnopqrstuvwxyz'
model = crnn.CRNN(32, 1, 37, 256,1)
if use_gpu:
model = model.cuda()
print('loading pretrained model from %s' % model_path)
model.load_state_dict(torch.load(model_path, map_location=lambda storage, loc:storage))
print ('end loading')
converter = util.strLabelConverter(alphabet)
#transformer = dataset.keep_ratio_normalize(True)
transformer = dataset.resizeNormalize((100, 32))
criterion = CTCLoss()
File = open(sv_path,'w')
im_ls = os.listdir(im_dir)
#im_ls = im_ls[:100]
for nm in im_ls:
if nm.endswith('.png'):
im_path = im_dir + nm
image = Image.open(im_path).convert('L')
image = transformer(image)
if use_gpu:
image = image.cuda()
image = image.view(1, *image.size())
image = Variable(image)
model.eval()
preds = model(image)
_, preds = preds.max(2)
preds = preds.transpose(1, 0).contiguous().view(-1)
preds_size = Variable(torch.IntTensor([preds.size(0)]))
raw_pred = converter.decode(preds.data, preds_size.data, raw=True)
sim_pred = converter.decode(preds.data, preds_size.data, raw=False)
print (nm)
| 29.951613 | 87 | 0.712439 |
4a1ac5da326011b4e0fc3c8f9342792408d238d0
| 2,005 |
py
|
Python
|
day7/sensor/app.py
|
smalljiny/raspi-class-example
|
7f92f34d366b94f9ae3c7da5ebeacdeb628be446
|
[
"Apache-2.0"
] | null | null | null |
day7/sensor/app.py
|
smalljiny/raspi-class-example
|
7f92f34d366b94f9ae3c7da5ebeacdeb628be446
|
[
"Apache-2.0"
] | null | null | null |
day7/sensor/app.py
|
smalljiny/raspi-class-example
|
7f92f34d366b94f9ae3c7da5ebeacdeb628be446
|
[
"Apache-2.0"
] | null | null | null |
import time
import cherrypy
from paste.translogger import TransLogger
from controllers import app
import RPi.GPIO as GPIO
import time
class RaspiTransLogger(TransLogger):
def write_log(self, environ, method, req_uri, start, status, bytes):
if bytes is None:
bytes = '-'
remote_addr = '-'
if environ.get('HTTP_X_FORWARDED_FOR'):
remote_addr = environ['HTTP_X_FORWARDED_FOR']
elif environ.get('REMOTE_ADDR'):
remote_addr = environ['REMOTE_ADDR']
d = {
'REMOTE_ADDR': remote_addr,
'REMOTE_USER': environ.get('REMOTE_USER') or '-',
'REQUEST_METHOD': method,
'REQUEST_URI': req_uri,
'HTTP_VERSION': environ.get('SERVER_PROTOCOL'),
'time': time.strftime('%d/%b/%Y:%H:%M:%S', start),
'status': status.split(None, 1)[0],
'bytes': bytes,
'HTTP_REFERER': environ.get('HTTP_REFERER', '-'),
'HTTP_USER_AGENT': environ.get('HTTP_USER_AGENT', '-'),
}
message = self.format % d
self.logger.log(self.logging_level, message)
def run_server():
GPIO.cleanup()
GPIO.setmode(GPIO.BCM)
GPIO.setup(4, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(17, GPIO.OUT, initial=GPIO.LOW)
# Enable custom Paste access logging
log_format = (
'[%(time)s] REQUES %(REQUEST_METHOD)s %(status)s %(REQUEST_URI)s '
'(%(REMOTE_ADDR)s) %(bytes)s'
)
app_logged = RaspiTransLogger(app, format=log_format)
# Mount the WSGI callable object (app) on the root directory
cherrypy.tree.graft(app_logged, '/')
# Set the configuration of the web server
cherrypy.config.update({
'engine.autoreload_on': True,
'log.screen': True,
'server.socket_port': 5000,
'server.socket_host': '0.0.0.0'
})
# Start the CherryPy WSGI web server
cherrypy.engine.start()
cherrypy.engine.block()
if __name__ == '__main__':
run_server()
| 31.825397 | 74 | 0.610973 |
4a1ac5edec3b0fa551d2272832260765a92ff05c
| 6,940 |
py
|
Python
|
bbob_pproc/config.py
|
RubenProject/GlowwormSwarmOptimizationBench
|
a00f95eddb8a029f2d029ea8a33188c1c077f1d9
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
bbob_pproc/config.py
|
RubenProject/GlowwormSwarmOptimizationBench
|
a00f95eddb8a029f2d029ea8a33188c1c077f1d9
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
bbob_pproc/config.py
|
RubenProject/GlowwormSwarmOptimizationBench
|
a00f95eddb8a029f2d029ea8a33188c1c077f1d9
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""This module is an attempt for a global configuration file for various parameters.
The import of this module, :py:mod:`config`, changes default settings (attributes)
of other modules. This works, because each module has only one instance.
Before this module is imported somewhere, modules use their default settings.
This file could be dynamically modified and reloaded.
See also genericsettings.py which is a central place to define settings
used by other modules, but does not modify settings of other modules.
"""
import numpy as np
import ppfig, ppfigdim, pptable
from . import genericsettings, pproc, pprldistr
from .comp2 import ppfig2, ppscatter, pptable2
from .compall import ppfigs, pprldmany, pptables
def target_values(is_expensive, dict_max_fun_evals={}, runlength_limit=1e3):
"""manage target values setting in "expensive" optimization scenario,
when ``is_expensive not in (True, False), the setting is based on
the comparison of entries in ``dict_max_fun_evals`` with
``runlength_limit``.
"""
# if len(dict_max_fun_evals):
# genericsettings.dict_max_fun_evals = dict_max_fun_evals
is_runlength_based = True if is_expensive else None
if is_expensive:
genericsettings.maxevals_fix_display = genericsettings.xlimit_expensive
if is_runlength_based:
genericsettings.runlength_based_targets = True
elif is_runlength_based is False:
genericsettings.runlength_based_targets = False
else: # if genericsettings.runlength_based_targets == 'auto': # automatic choice of evaluation setup, looks still like a hack
if len(dict_max_fun_evals) and np.max([ val / dim for dim, val in dict_max_fun_evals.iteritems()]) < runlength_limit:
genericsettings.runlength_based_targets = True
genericsettings.maxevals_fix_display = genericsettings.xlimit_expensive
else:
genericsettings.runlength_based_targets = False
def config():
"""called from a high level, e.g. rungeneric, to configure the lower level
modules via modifying parameter settings.
"""
# pprldist.plotRLDistr2 needs to be revised regarding run_length based targets
if genericsettings.runlength_based_targets in (True, 1):
print 'Using bestGECCO2009 based target values: now for each function the target ' + \
'values differ, but the "level of difficulty" is "the same". '
# pprldmany:
if 1 < 3: # not yet functional, captions need to be adjusted and the bug reported by Ilya sorted out
pprldmany.target_values = pproc.RunlengthBasedTargetValues(np.logspace(np.log10(0.5), np.log10(50), 31),
smallest_target=1e-8 * 10**0.000,
force_different_targets_factor=1,
unique_target_values=True)
# pprldmany.caption = ... captions are still hard coded in LaTeX
pprldmany.x_limit = genericsettings.maxevals_fix_display # always fixed
# genericsettings (to be used in rungeneric2 while calling pprldistr.comp(...)):
genericsettings.rldValsOfInterest = pproc.RunlengthBasedTargetValues(genericsettings.target_runlengths_in_single_rldistr,
force_different_targets_factor=10**-0.2)
# pprldistr:
pprldistr.single_target_values = pproc.RunlengthBasedTargetValues(genericsettings.target_runlengths_in_single_rldistr,
force_different_targets_factor=10**-0.2)
pprldistr.runlen_xlimits_max = genericsettings.maxevals_fix_display / 2 if genericsettings.maxevals_fix_display else None # can be None
pprldistr.runlen_xlimits_min = 10**-0.3 # can be None
# ppfigdim:
ppfigdim.values_of_interest = pproc.RunlengthBasedTargetValues(genericsettings.target_runlengths_in_scaling_figs,
# [10**i for i in [2.0, 1.5, 1.0, 0.5, 0.1, -0.3]],
# [10**i for i in [1.7, 1, 0.3, -0.3]]
force_different_targets_factor=10**-0.2)
ppfigdim.xlim_max = genericsettings.maxevals_fix_display
if ppfigdim.xlim_max:
ppfigdim.styles = [ # sort of rainbow style, most difficult (red) first
{'color': 'y', 'marker': '^', 'markeredgecolor': 'k', 'markeredgewidth': 2, 'linewidth': 4},
{'color': 'g', 'marker': '.', 'linewidth': 4},
{'color': 'r', 'marker': 'o', 'markeredgecolor': 'k', 'markeredgewidth': 2, 'linewidth': 4},
{'color': 'm', 'marker': '.', 'linewidth': 4},
{'color': 'c', 'marker': 'v', 'markeredgecolor': 'k', 'markeredgewidth': 2, 'linewidth': 4},
{'color': 'b', 'marker': '.', 'linewidth': 4},
{'color': 'k', 'marker': 'o', 'markeredgecolor': 'k', 'markeredgewidth': 2, 'linewidth': 4},
]
# pptable:
pptable.table_caption=pptable.table_caption_rlbased
pptable.targetsOfInterest = pproc.RunlengthBasedTargetValues(genericsettings.target_runlengths_in_table,
force_different_targets_factor=10**-0.2)
# pptable2:
pptable2.targetsOfInterest = pproc.RunlengthBasedTargetValues(genericsettings.target_runlengths_in_table,
force_different_targets_factor=10**-0.2)
# pptables (for rungenericmany):
pptables.targetsOfInterest = pproc.RunlengthBasedTargetValues(genericsettings.target_runlengths_in_table,
force_different_targets_factor=10**-0.2)
ppscatter.markersize = 16
else:
pass # here the default values of the modules apply
# pprlmany.x_limit = ...should depend on noisy/noiseless
if 11 < 3: # for testing purpose
# TODO: this case needs to be tested yet: the current problem is that no noisy data are in this folder
pprldmany.target_values = pproc.RunlengthBasedTargetValues(10**np.arange(1, 4, 0.2), 'RANDOMSEARCH')
pprldmany.fontsize = 20.0 # should depend on the number of data lines down to 10.0 ?
ppscatter.markersize = 14
ppfig2.linewidth = 4
ppfig2.styles = ppfig2.styles
ppfigs.styles = ppfigs.styles
def main():
config()
| 55.967742 | 143 | 0.609078 |
4a1ac68bfc1971367a235a2a095c36c3cc02d11f
| 2,273 |
py
|
Python
|
insure_model/training/test_train.py
|
WipadaChan/MLOpsPython
|
f4752b6fdd5f1b04f0720c4990144c94b72f2b99
|
[
"MIT"
] | null | null | null |
insure_model/training/test_train.py
|
WipadaChan/MLOpsPython
|
f4752b6fdd5f1b04f0720c4990144c94b72f2b99
|
[
"MIT"
] | null | null | null |
insure_model/training/test_train.py
|
WipadaChan/MLOpsPython
|
f4752b6fdd5f1b04f0720c4990144c94b72f2b99
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import lightgbm
# functions to test are imported from train.py
from insure_model.training.train import split_data
from insure_model.training.train import train_model
from insure_model.training.train import get_model_metrics
"""A set of simple unit tests for protecting against regressions in train.py"""
def test_split_data():
test_data = {
'id': [0, 1, 2, 3, 4],
'target': [0, 0, 1, 0, 1],
'col1': [1, 2, 3, 4, 5],
'col2': [2, 1, 1, 2, 1]
}
data_df = pd.DataFrame(data=test_data)
data = split_data(data_df)
# verify that columns were removed correctly
assert "target" not in data[0].data.columns
assert "id" not in data[0].data.columns
assert "col1" in data[0].data.columns
# verify that data was split as desired
assert data[0].data.shape == (4, 2)
assert data[1].data.shape == (1, 2)
# the valid_data set's raw data is used for metric calculation, so
# free_raw_data should be False
assert not data[1].free_raw_data
def test_train_model():
data = __get_test_datasets()
params = {
"learning_rate": 0.05,
"metric": "auc",
"min_data": 1
}
model = train_model(data, params)
# verify that parameters are passed in to the model correctly
for param_name in params.keys():
assert param_name in model.params
assert params[param_name] == model.params[param_name]
def test_get_model_metrics():
class MockModel:
@staticmethod
def predict(data):
return np.array([0, 0])
data = __get_test_datasets()
metrics = get_model_metrics(MockModel(), data)
# verify that metrics is a dictionary containing the auc value.
assert "auc" in metrics
auc = metrics["auc"]
np.testing.assert_almost_equal(auc, 0.5)
def __get_test_datasets():
"""This is a helper function to set up some test data"""
X_train = np.array([1, 2, 3, 4, 5, 6]).reshape(-1, 1)
y_train = np.array([1, 1, 0, 1, 0, 1])
X_test = np.array([7, 8]).reshape(-1, 1)
y_test = np.array([0, 1])
train_data = lightgbm.Dataset(X_train, y_train)
valid_data = lightgbm.Dataset(X_test, y_test)
data = (train_data, valid_data)
return data
| 27.385542 | 79 | 0.650242 |
4a1ac6d93eb2094bfcdd569ffe9e5ba3a9bdfce6
| 4,275 |
py
|
Python
|
cryptowatch/stream/proto/public/markets/asset_pb2.py
|
hikarubw/cw-sdk-python
|
1b72a85b3b2f5fc8003677a68a26e1b349bea2f1
|
[
"BSD-2-Clause"
] | 134 |
2019-12-03T21:21:31.000Z
|
2022-03-27T16:15:23.000Z
|
cryptowatch/stream/proto/public/markets/asset_pb2.py
|
hikarubw/cw-sdk-python
|
1b72a85b3b2f5fc8003677a68a26e1b349bea2f1
|
[
"BSD-2-Clause"
] | 28 |
2019-12-11T10:58:28.000Z
|
2021-12-20T21:16:46.000Z
|
cryptowatch/stream/proto/public/markets/asset_pb2.py
|
hikarubw/cw-sdk-python
|
1b72a85b3b2f5fc8003677a68a26e1b349bea2f1
|
[
"BSD-2-Clause"
] | 34 |
2019-12-10T21:54:47.000Z
|
2022-01-11T06:02:22.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: public/markets/asset.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='public/markets/asset.proto',
package='ProtobufMarkets',
syntax='proto3',
serialized_options=None,
serialized_pb=b'\n\x1apublic/markets/asset.proto\x12\x0fProtobufMarkets\"o\n\x12\x41ssetUpdateMessage\x12\r\n\x05\x61sset\x18\x01 \x01(\x05\x12@\n\x0fusdVolumeUpdate\x18\x02 \x01(\x0b\x32%.ProtobufMarkets.AssetUSDVolumeUpdateH\x00\x42\x08\n\x06Update\"&\n\x14\x41ssetUSDVolumeUpdate\x12\x0e\n\x06volume\x18\x01 \x01(\tb\x06proto3'
)
_ASSETUPDATEMESSAGE = _descriptor.Descriptor(
name='AssetUpdateMessage',
full_name='ProtobufMarkets.AssetUpdateMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='asset', full_name='ProtobufMarkets.AssetUpdateMessage.asset', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='usdVolumeUpdate', full_name='ProtobufMarkets.AssetUpdateMessage.usdVolumeUpdate', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='Update', full_name='ProtobufMarkets.AssetUpdateMessage.Update',
index=0, containing_type=None, fields=[]),
],
serialized_start=47,
serialized_end=158,
)
_ASSETUSDVOLUMEUPDATE = _descriptor.Descriptor(
name='AssetUSDVolumeUpdate',
full_name='ProtobufMarkets.AssetUSDVolumeUpdate',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='volume', full_name='ProtobufMarkets.AssetUSDVolumeUpdate.volume', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=160,
serialized_end=198,
)
_ASSETUPDATEMESSAGE.fields_by_name['usdVolumeUpdate'].message_type = _ASSETUSDVOLUMEUPDATE
_ASSETUPDATEMESSAGE.oneofs_by_name['Update'].fields.append(
_ASSETUPDATEMESSAGE.fields_by_name['usdVolumeUpdate'])
_ASSETUPDATEMESSAGE.fields_by_name['usdVolumeUpdate'].containing_oneof = _ASSETUPDATEMESSAGE.oneofs_by_name['Update']
DESCRIPTOR.message_types_by_name['AssetUpdateMessage'] = _ASSETUPDATEMESSAGE
DESCRIPTOR.message_types_by_name['AssetUSDVolumeUpdate'] = _ASSETUSDVOLUMEUPDATE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AssetUpdateMessage = _reflection.GeneratedProtocolMessageType('AssetUpdateMessage', (_message.Message,), {
'DESCRIPTOR' : _ASSETUPDATEMESSAGE,
'__module__' : 'public.markets.asset_pb2'
# @@protoc_insertion_point(class_scope:ProtobufMarkets.AssetUpdateMessage)
})
_sym_db.RegisterMessage(AssetUpdateMessage)
AssetUSDVolumeUpdate = _reflection.GeneratedProtocolMessageType('AssetUSDVolumeUpdate', (_message.Message,), {
'DESCRIPTOR' : _ASSETUSDVOLUMEUPDATE,
'__module__' : 'public.markets.asset_pb2'
# @@protoc_insertion_point(class_scope:ProtobufMarkets.AssetUSDVolumeUpdate)
})
_sym_db.RegisterMessage(AssetUSDVolumeUpdate)
# @@protoc_insertion_point(module_scope)
| 35.040984 | 332 | 0.773567 |
4a1ac6ea0838e0f524a553c2b0f218921ca31d7e
| 1,095 |
py
|
Python
|
src/fractal.py
|
Gaoadt/python-fractal-renderer
|
968a7cbb5c864a760992913bc5fb350503eda8ac
|
[
"MIT"
] | null | null | null |
src/fractal.py
|
Gaoadt/python-fractal-renderer
|
968a7cbb5c864a760992913bc5fb350503eda8ac
|
[
"MIT"
] | null | null | null |
src/fractal.py
|
Gaoadt/python-fractal-renderer
|
968a7cbb5c864a760992913bc5fb350503eda8ac
|
[
"MIT"
] | null | null | null |
from expression_types import NamedVarExpr, ExpressionLink
class Fractal:
def __init__(self, expression, radius, iterations):
self.postOrder = []
self.expression = ExpressionLink()
self.identifiers = None
self.operIndex = 0
self.expression.link = expression
self.radius = radius
self.iterations = iterations
self.buildIdentifierDictionary()
def putIdentifier(self, expression):
self.identifiers[expression.identifierName] = len(self.identifiers)
def dfsIdentifierFinder(self, nodeLink):
expression = nodeLink.link
expression.operIndex = self.operIndex
self.operIndex += 1
if isinstance(expression, NamedVarExpr):
if expression.identifierName not in self.identifiers.keys():
self.putIdentifier(expression)
for x in expression.args:
self.dfsIdentifierFinder(x)
self.postOrder.append(nodeLink)
def buildIdentifierDictionary(self):
self.identifiers = dict()
self.dfsIdentifierFinder(self.expression)
| 31.285714 | 75 | 0.671233 |
4a1ac768f243c27f72e2c964fc8111099b85fb79
| 1,113 |
py
|
Python
|
setup.py
|
djjh/reinforcement-learning-labs
|
22706dab9e7f16e364ee4ed79c0bd67a343e5b08
|
[
"MIT"
] | 1 |
2019-10-06T11:45:52.000Z
|
2019-10-06T11:45:52.000Z
|
setup.py
|
djjh/reinforcement-learning-labs
|
22706dab9e7f16e364ee4ed79c0bd67a343e5b08
|
[
"MIT"
] | null | null | null |
setup.py
|
djjh/reinforcement-learning-labs
|
22706dab9e7f16e364ee4ed79c0bd67a343e5b08
|
[
"MIT"
] | null | null | null |
from os.path import join, dirname, realpath
from setuptools import setup
import sys
assert sys.version_info.major == 3 and sys.version_info.minor >= 6, \
"The Reinforcement Learning Labs repo is designed to work with Python 3.6" \
+ " and greater. Please install it before proceeding."
__version__ = '0.0.0'
setup(
name='reinforcement-learning-labs',
package_dir={'': 'src'},
# py_modules=['reinforcement-learning-labs'],
packages = ['experiments', 'rl'],
version=__version__,#'0.1',
install_requires=[
'coloredlogs',
'cloudpickle',
'gym[atari,box2d,classic_control]>=0.10.8',
'ipython',
'joblib',
'matplotlib',
'mpi4py',
'numpy==1.16.1', # 1.16.1 is required to load the imbd dataset (for now)
'pandas',
'pytest',
'pytest-cov',
'psutil',
'scipy',
'seaborn==0.8.1',
'tensorflow==1.13.2',
'tensorflow-probability>=0.6.0',
'tqdm',
'nevergrad'
],
description="Exercises in reinforcement learning.",
author="Dylan Holmes",
)
| 27.825 | 81 | 0.592992 |
4a1ac7aa721b279fa8ba6b52a9f82821e6bd0e02
| 597 |
py
|
Python
|
httpmocker/mixins.py
|
ketan86/py_mock_http
|
f554c0c1a03c0fec13c8a5969443dfe97e016c24
|
[
"MIT"
] | null | null | null |
httpmocker/mixins.py
|
ketan86/py_mock_http
|
f554c0c1a03c0fec13c8a5969443dfe97e016c24
|
[
"MIT"
] | null | null | null |
httpmocker/mixins.py
|
ketan86/py_mock_http
|
f554c0c1a03c0fec13c8a5969443dfe97e016c24
|
[
"MIT"
] | null | null | null |
from collections import namedtuple
import os
from httpmocker.config import get_config
class SSLMixin(object):
def save_cert(self, cert, key):
# get storage location
storage_root = self.get_cert_storage_root()
# create storage root folder if does not exist
os.makedirs(storage_root, exist_ok=True)
self.ssl_cert = f'{storage_root}/app.crt'
self.ssl_key = f'{storage_root}/app.key'
# save cert
with open(self.ssl_cert, 'w') as f:
f.write(cert)
with open(self.ssl_key, 'w') as f:
f.write(key)
| 24.875 | 54 | 0.633166 |
4a1ac7e2ca4b6322fe825c54fda726d5d6911946
| 4,205 |
py
|
Python
|
gruppegenerator/settings.py
|
Snailed/group-generator
|
d19286872fa548eab36d9d427ed8a860370ce96c
|
[
"MIT"
] | null | null | null |
gruppegenerator/settings.py
|
Snailed/group-generator
|
d19286872fa548eab36d9d427ed8a860370ce96c
|
[
"MIT"
] | 3 |
2020-02-11T23:02:46.000Z
|
2021-06-10T18:08:29.000Z
|
gruppegenerator/settings.py
|
Snailed/group-generator
|
d19286872fa548eab36d9d427ed8a860370ce96c
|
[
"MIT"
] | null | null | null |
"""
Django settings for gruppegenerator project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#u5w%tqkvcc-x*jc4lb9b1cqx5jliv4s$ghud_u@_1kzj&f83i'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'gruppeapp.apps.GruppeappConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gruppegenerator.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'libraries':{
'gruppeapp_extras': 'gruppeapp.templatetags.gruppeapp_extras',
}
},
},
]
WSGI_APPLICATION = 'gruppegenerator.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
#if not DEBUG:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'gruppegeneratordb', # Or path to database file if using sqlite3.
'USER': 'gruppeadmin',
'PASSWORD': 'password',
'HOST': 'localhost', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
#else:
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 8,
}
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = ("gruppeapp.backends.EmailOrUsernameBackend",)
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
LOGOUT_REDIRECT_URL = '/group/'
if not DEBUG:
import dj_database_url
DATABASES['default'] = dj_database_url.config()
| 27.847682 | 146 | 0.664685 |
4a1ac93ecbdafca5f1a19f9acf5ffa8f6e6f4bef
| 1,455 |
py
|
Python
|
venv/lib/python3.8/site-packages/tensorflow/_api/v2/compat/v1/autograph/__init__.py
|
JIANG-CX/data_labeling
|
8d2470bbb537dfc09ed2f7027ed8ee7de6447248
|
[
"MIT"
] | 1 |
2021-05-24T10:08:51.000Z
|
2021-05-24T10:08:51.000Z
|
venv/lib/python3.8/site-packages/tensorflow/_api/v2/compat/v1/autograph/__init__.py
|
JIANG-CX/data_labeling
|
8d2470bbb537dfc09ed2f7027ed8ee7de6447248
|
[
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/tensorflow/_api/v2/compat/v1/autograph/__init__.py
|
JIANG-CX/data_labeling
|
8d2470bbb537dfc09ed2f7027ed8ee7de6447248
|
[
"MIT"
] | null | null | null |
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Conversion of eager-style Python into TensorFlow graph code.
NOTE: In TensorFlow 2.0, AutoGraph is automatically applied when using
`tf.function`. This module contains lower-level APIs for advanced use.
AutoGraph transforms a subset of Python which operates on TensorFlow objects
into equivalent TensorFlow graph code. When executing the graph, it has the same
effect as if you ran the original code in eager mode.
Python code which doesn't operate on TensorFlow objects remains functionally
unchanged, but keep in mind that `tf.function` only executes such code at trace
time, and generally will not be consistent with eager execution.
For more information, see the
[AutoGraph reference documentation](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/autograph/g3doc/reference/index.md),
and the [tf.function guide](https://www.tensorflow.org/guide/function#autograph_transformations).
"""
from __future__ import print_function as _print_function
import sys as _sys
from . import experimental
from tensorflow.python.autograph.impl.api import to_code_v1 as to_code
from tensorflow.python.autograph.impl.api import to_graph_v1 as to_graph
from tensorflow.python.autograph.utils.ag_logging import set_verbosity
from tensorflow.python.autograph.utils.ag_logging import trace
del _print_function
| 45.46875 | 143 | 0.823368 |
4a1ac9503438a8f455d783b160b0cf16154276e9
| 19,567 |
py
|
Python
|
TimeWrapper_JE/venv/Lib/site-packages/wheel/bdist_wheel.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | 2 |
2020-09-22T14:38:24.000Z
|
2020-10-30T03:11:36.000Z
|
TimeWrapper_JE/venv/Lib/site-packages/wheel/bdist_wheel.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
TimeWrapper_JE/venv/Lib/site-packages/wheel/bdist_wheel.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
"""
Create a wheel (.whl) distribution.
A wheel is a built archive format.
"""
import distutils
import os
import shutil
import stat
import sys
import re
import warnings
from collections import OrderedDict
from distutils.core import Command
from distutils import log as logger
from io import BytesIO
from glob import iglob
from shutil import rmtree
from sysconfig import get_config_var
from zipfile import ZIP_DEFLATED, ZIP_STORED
import pkg_resources
from .pkginfo import write_pkg_info
from .macosx_libfile import calculate_macosx_platform_tag
from .metadata import pkginfo_to_metadata
from .vendored.packaging import tags
from .wheelfile import WheelFile
from . import __version__ as wheel_version
if sys.version_info < (3,):
from email.generator import Generator as BytesGenerator
else:
from email.generator import BytesGenerator
safe_name = pkg_resources.safe_name
safe_version = pkg_resources.safe_version
PY_LIMITED_API_PATTERN = r'cp3\d'
def python_tag():
return 'py{}'.format(sys.version_info[0])
def get_platform(archive_root):
"""Return our platform name 'win32', 'linux_x86_64'"""
# XXX remove distutils dependency
result = distutils.util.get_platform()
if result.startswith("macosx") and archive_root is not None:
result = calculate_macosx_platform_tag(archive_root, result)
if result == "linux_x86_64" and sys.maxsize == 2147483647:
# pip pull request #3497
result = "linux_i686"
return result
def get_flag(var, fallback, expected=True, warn=True):
"""Use a fallback value for determining SOABI flags if the needed config
var is unset or unavailable."""
val = get_config_var(var)
if val is None:
if warn:
warnings.warn("Config variable '{0}' is unset, Python ABI tag may "
"be incorrect".format(var), RuntimeWarning, 2)
return fallback
return val == expected
def get_abi_tag():
"""Return the ABI tag based on SOABI (if available) or emulate SOABI
(CPython 2, PyPy)."""
soabi = get_config_var('SOABI')
impl = tags.interpreter_name()
if not soabi and impl in ('cp', 'pp') and hasattr(sys, 'maxunicode'):
d = ''
m = ''
u = ''
if get_flag('Py_DEBUG',
hasattr(sys, 'gettotalrefcount'),
warn=(impl == 'cp')):
d = 'd'
if get_flag('WITH_PYMALLOC',
impl == 'cp',
warn=(impl == 'cp' and
sys.version_info < (3, 8))) \
and sys.version_info < (3, 8):
m = 'm'
if get_flag('Py_UNICODE_SIZE',
sys.maxunicode == 0x10ffff,
expected=4,
warn=(impl == 'cp' and
sys.version_info < (3, 3))) \
and sys.version_info < (3, 3):
u = 'u'
abi = '%s%s%s%s%s' % (impl, tags.interpreter_version(), d, m, u)
elif soabi and soabi.startswith('cpython-'):
abi = 'cp' + soabi.split('-')[1]
elif soabi and soabi.startswith('pypy-'):
# we want something like pypy36-pp73
abi = '-'.join(soabi.split('-')[:2])
abi = abi.replace('.', '_').replace('-', '_')
elif soabi:
abi = soabi.replace('.', '_').replace('-', '_')
else:
abi = None
return abi
def safer_name(name):
return safe_name(name).replace('-', '_')
def safer_version(version):
return safe_version(version).replace('-', '_')
def remove_readonly(func, path, excinfo):
print(str(excinfo[1]))
os.chmod(path, stat.S_IWRITE)
func(path)
class bdist_wheel(Command):
description = 'create a wheel distribution'
supported_compressions = OrderedDict([
('stored', ZIP_STORED),
('deflated', ZIP_DEFLATED)
])
user_options = [('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform(None)),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('relative', None,
"build the archive using relative paths "
"(default: false)"),
('owner=', 'u',
"Owner name used when creating a tar file"
" [default: current user]"),
('group=', 'g',
"Group name used when creating a tar file"
" [default: current group]"),
('universal', None,
"make a universal wheel"
" (default: false)"),
('compression=', None,
"zipfile compression (one of: {})"
" (default: 'deflated')"
.format(', '.join(supported_compressions))),
('python-tag=', None,
"Python implementation compatibility tag"
" (default: '%s')" % (python_tag())),
('build-number=', None,
"Build number for this particular version. "
"As specified in PEP-0427, this must start with a digit. "
"[default: None]"),
('py-limited-api=', None,
"Python tag (cp32|cp33|cpNN) for abi3 wheel tag"
" (default: false)"),
]
boolean_options = ['keep-temp', 'skip-build', 'relative', 'universal']
def initialize_options(self):
self.bdist_dir = None
self.data_dir = None
self.plat_name = None
self.plat_tag = None
self.format = 'zip'
self.keep_temp = False
self.dist_dir = None
self.egginfo_dir = None
self.root_is_pure = None
self.skip_build = None
self.relative = False
self.owner = None
self.group = None
self.universal = False
self.compression = 'deflated'
self.python_tag = python_tag()
self.build_number = None
self.py_limited_api = False
self.plat_name_supplied = False
def finalize_options(self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'wheel')
self.data_dir = self.wheel_dist_name + '.data'
self.plat_name_supplied = self.plat_name is not None
try:
self.compression = self.supported_compressions[self.compression]
except KeyError:
raise ValueError('Unsupported compression: {}'.format(self.compression))
need_options = ('dist_dir', 'plat_name', 'skip_build')
self.set_undefined_options('bdist',
*zip(need_options, need_options))
self.root_is_pure = not (self.distribution.has_ext_modules()
or self.distribution.has_c_libraries())
if self.py_limited_api and not re.match(PY_LIMITED_API_PATTERN, self.py_limited_api):
raise ValueError("py-limited-api must match '%s'" % PY_LIMITED_API_PATTERN)
# Support legacy [wheel] section for setting universal
wheel = self.distribution.get_option_dict('wheel')
if 'universal' in wheel:
# please don't define this in your global configs
logger.warn('The [wheel] section is deprecated. Use [bdist_wheel] instead.')
val = wheel['universal'][1].strip()
if val.lower() in ('1', 'true', 'yes'):
self.universal = True
if self.build_number is not None and not self.build_number[:1].isdigit():
raise ValueError("Build tag (build-number) must start with a digit.")
@property
def wheel_dist_name(self):
"""Return distribution full name with - replaced with _"""
components = (safer_name(self.distribution.get_name()),
safer_version(self.distribution.get_version()))
if self.build_number:
components += (self.build_number,)
return '-'.join(components)
def get_tag(self):
# bdist sets self.plat_name if unset, we should only use it for purepy
# wheels if the user supplied it.
if self.plat_name_supplied:
plat_name = self.plat_name
elif self.root_is_pure:
plat_name = 'any'
else:
# macosx contains system version in platform name so need special handle
if self.plat_name and not self.plat_name.startswith("macosx"):
plat_name = self.plat_name
else:
# on macosx always limit the platform name to comply with any
# c-extension modules in bdist_dir, since the user can specify
# a higher MACOSX_DEPLOYMENT_TARGET via tools like CMake
# on other platforms, and on macosx if there are no c-extension
# modules, use the default platform name.
plat_name = get_platform(self.bdist_dir)
if plat_name in ('linux-x86_64', 'linux_x86_64') and sys.maxsize == 2147483647:
plat_name = 'linux_i686'
plat_name = plat_name.lower().replace('-', '_').replace('.', '_')
if self.root_is_pure:
if self.universal:
impl = 'py2.py3'
else:
impl = self.python_tag
tag = (impl, 'none', plat_name)
else:
impl_name = tags.interpreter_name()
impl_ver = tags.interpreter_version()
impl = impl_name + impl_ver
# We don't work on CPython 3.1, 3.0.
if self.py_limited_api and (impl_name + impl_ver).startswith('cp3'):
impl = self.py_limited_api
abi_tag = 'abi3'
else:
abi_tag = str(get_abi_tag()).lower()
tag = (impl, abi_tag, plat_name)
# issue gh-374: allow overriding plat_name
supported_tags = [(t.interpreter, t.abi, plat_name)
for t in tags.sys_tags()]
assert tag in supported_tags, "would build wheel with unsupported tag {}".format(tag)
return tag
def run(self):
build_scripts = self.reinitialize_command('build_scripts')
build_scripts.executable = 'python'
build_scripts.force = True
build_ext = self.reinitialize_command('build_ext')
build_ext.inplace = False
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install',
reinit_subcommands=True)
install.root = self.bdist_dir
install.compile = False
install.skip_build = self.skip_build
install.warn_dir = False
# A wheel without setuptools scripts is more cross-platform.
# Use the (undocumented) `no_ep` option to setuptools'
# install_scripts command to avoid creating entry point scripts.
install_scripts = self.reinitialize_command('install_scripts')
install_scripts.no_ep = True
# Use a custom scheme for the archive, because we have to decide
# at installation time which scheme to use.
for key in ('headers', 'scripts', 'data', 'purelib', 'platlib'):
setattr(install,
'install_' + key,
os.path.join(self.data_dir, key))
basedir_observed = ''
if os.name == 'nt':
# win32 barfs if any of these are ''; could be '.'?
# (distutils.command.install:change_roots bug)
basedir_observed = os.path.normpath(os.path.join(self.data_dir, '..'))
self.install_libbase = self.install_lib = basedir_observed
setattr(install,
'install_purelib' if self.root_is_pure else 'install_platlib',
basedir_observed)
logger.info("installing to %s", self.bdist_dir)
self.run_command('install')
impl_tag, abi_tag, plat_tag = self.get_tag()
archive_basename = "{}-{}-{}-{}".format(self.wheel_dist_name, impl_tag, abi_tag, plat_tag)
if not self.relative:
archive_root = self.bdist_dir
else:
archive_root = os.path.join(
self.bdist_dir,
self._ensure_relative(install.install_base))
self.set_undefined_options('install_egg_info', ('target', 'egginfo_dir'))
distinfo_dirname = '{}-{}.dist-info'.format(
safer_name(self.distribution.get_name()),
safer_version(self.distribution.get_version()))
distinfo_dir = os.path.join(self.bdist_dir, distinfo_dirname)
self.egg2dist(self.egginfo_dir, distinfo_dir)
self.write_wheelfile(distinfo_dir)
# Make the archive
if not os.path.exists(self.dist_dir):
os.makedirs(self.dist_dir)
wheel_path = os.path.join(self.dist_dir, archive_basename + '.whl')
with WheelFile(wheel_path, 'w', self.compression) as wf:
wf.write_files(archive_root)
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution, 'dist_files', []).append(
('bdist_wheel',
'{}.{}'.format(*sys.version_info[:2]), # like 3.7
wheel_path))
if not self.keep_temp:
logger.info('removing %s', self.bdist_dir)
if not self.dry_run:
rmtree(self.bdist_dir, onerror=remove_readonly)
def write_wheelfile(self, wheelfile_base, generator='bdist_wheel (' + wheel_version + ')'):
from email.message import Message
# Workaround for Python 2.7 for when "generator" is unicode
if sys.version_info < (3,) and not isinstance(generator, str):
generator = generator.encode('utf-8')
msg = Message()
msg['Wheel-Version'] = '1.0' # of the spec
msg['Generator'] = generator
msg['Root-Is-Purelib'] = str(self.root_is_pure).lower()
if self.build_number is not None:
msg['Build'] = self.build_number
# Doesn't work for bdist_wininst
impl_tag, abi_tag, plat_tag = self.get_tag()
for impl in impl_tag.split('.'):
for abi in abi_tag.split('.'):
for plat in plat_tag.split('.'):
msg['Tag'] = '-'.join((impl, abi, plat))
wheelfile_path = os.path.join(wheelfile_base, 'WHEEL')
logger.info('creating %s', wheelfile_path)
buffer = BytesIO()
BytesGenerator(buffer, maxheaderlen=0).flatten(msg)
with open(wheelfile_path, 'wb') as f:
f.write(buffer.getvalue().replace(b'\r\n', b'\r'))
def _ensure_relative(self, path):
# copied from dir_util, deleted
drive, path = os.path.splitdrive(path)
if path[0:1] == os.sep:
path = drive + path[1:]
return path
@property
def license_paths(self):
metadata = self.distribution.get_option_dict('metadata')
files = set()
patterns = sorted({
option for option in metadata.get('license_files', ('', ''))[1].split()
})
if 'license_file' in metadata:
warnings.warn('The "license_file" option is deprecated. Use '
'"license_files" instead.', DeprecationWarning)
files.add(metadata['license_file'][1])
if 'license_file' not in metadata and 'license_files' not in metadata:
patterns = ('LICEN[CS]E*', 'COPYING*', 'NOTICE*', 'AUTHORS*')
for pattern in patterns:
for path in iglob(pattern):
if path.endswith('~'):
logger.debug('ignoring license file "%s" as it looks like a backup', path)
continue
if path not in files and os.path.isfile(path):
logger.info('adding license file "%s" (matched pattern "%s")', path, pattern)
files.add(path)
return files
def egg2dist(self, egginfo_path, distinfo_path):
"""Convert an .egg-info directory into a .dist-info directory"""
def adios(p):
"""Appropriately delete directory, file or link."""
if os.path.exists(p) and not os.path.islink(p) and os.path.isdir(p):
shutil.rmtree(p)
elif os.path.exists(p):
os.unlink(p)
adios(distinfo_path)
if not os.path.exists(egginfo_path):
# There is no egg-info. This is probably because the egg-info
# file/directory is not named matching the distribution name used
# to name the archive file. Check for this case and report
# accordingly.
import glob
pat = os.path.join(os.path.dirname(egginfo_path), '*.egg-info')
possible = glob.glob(pat)
err = "Egg metadata expected at %s but not found" % (egginfo_path,)
if possible:
alt = os.path.basename(possible[0])
err += " (%s found - possible misnamed archive file?)" % (alt,)
raise ValueError(err)
if os.path.isfile(egginfo_path):
# .egg-info is a single file
pkginfo_path = egginfo_path
pkg_info = pkginfo_to_metadata(egginfo_path, egginfo_path)
os.mkdir(distinfo_path)
else:
# .egg-info is a directory
pkginfo_path = os.path.join(egginfo_path, 'PKG-INFO')
pkg_info = pkginfo_to_metadata(egginfo_path, pkginfo_path)
# ignore common egg metadata that is useless to wheel
shutil.copytree(egginfo_path, distinfo_path,
ignore=lambda x, y: {'PKG-INFO', 'requires.txt', 'SOURCES.txt',
'not-zip-safe'}
)
# delete dependency_links if it is only whitespace
dependency_links_path = os.path.join(distinfo_path, 'dependency_links.txt')
with open(dependency_links_path, 'r') as dependency_links_file:
dependency_links = dependency_links_file.read().strip()
if not dependency_links:
adios(dependency_links_path)
write_pkg_info(os.path.join(distinfo_path, 'METADATA'), pkg_info)
for license_path in self.license_paths:
filename = os.path.basename(license_path)
shutil.copy(license_path, os.path.join(distinfo_path, filename))
adios(egginfo_path)
| 39.689655 | 99 | 0.564011 |
4a1ac958ed6b3a63a1d93248821468473d0f7f59
| 860 |
py
|
Python
|
Desafios/des039.py
|
joseangelooliveira-br/Python3
|
c0ba39768706f84f26b0616b75dd8c7971145b0e
|
[
"MIT"
] | null | null | null |
Desafios/des039.py
|
joseangelooliveira-br/Python3
|
c0ba39768706f84f26b0616b75dd8c7971145b0e
|
[
"MIT"
] | null | null | null |
Desafios/des039.py
|
joseangelooliveira-br/Python3
|
c0ba39768706f84f26b0616b75dd8c7971145b0e
|
[
"MIT"
] | null | null | null |
from datetime import date
sexo = int(input("""Informe o seu sexo:
[ 1 ] Masculino
[ 2 ] Feminino
Opção: """))
if sexo == 1:
atual = date.today().year
nasc = int(input('Digite o ano de seu nascimento: '))
idade = atual - nasc
print('Quem nasceu em {}, tem {} anos, em {}.'.format(nasc, idade, atual))
if idade == 18:
print('Você deve se alistar este ano.')
elif idade < 18:
saldo = 18 - idade
print('Ainda faltam {} anos para seu alistamento.'.format(saldo))
ano = atual + saldo
print('Seu alistamento será em {}'.format(ano))
elif idade > 18:
saldo = idade - 18
print('Você já deveria ter ser alistado ha {} anos.'.format(saldo))
ano = atual - saldo
print('Seu alistamento seria em {}'.format(ano))
elif sexo ==2:
print('Voce não precisa se alistar.')
| 30.714286 | 78 | 0.59186 |
4a1ac981747ddf7aff4f66d7d462d7bbf87745ef
| 394 |
py
|
Python
|
sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/models.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 3 |
2020-06-23T02:25:27.000Z
|
2021-09-07T18:48:11.000Z
|
sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/models.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 510 |
2019-07-17T16:11:19.000Z
|
2021-08-02T08:38:32.000Z
|
sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/models.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 5 |
2019-09-04T12:51:37.000Z
|
2020-09-16T07:28:40.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from .v2015_05_01.models import *
from .v2017_10_01.models import *
| 43.777778 | 76 | 0.474619 |
4a1ac9e2a709b2c26bcd10f165dd3a65a43931ed
| 5,659 |
py
|
Python
|
data_process/filter_test.py
|
GCaptainNemo/materials-ML
|
0cbc2097a31968bdd167e25d4bdb0750e57ee9fb
|
[
"MIT"
] | null | null | null |
data_process/filter_test.py
|
GCaptainNemo/materials-ML
|
0cbc2097a31968bdd167e25d4bdb0750e57ee9fb
|
[
"MIT"
] | null | null | null |
data_process/filter_test.py
|
GCaptainNemo/materials-ML
|
0cbc2097a31968bdd167e25d4bdb0750e57ee9fb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: 11360
# datetime: 2021/5/29 21:49
from ase.io import read
from data_process.crys_data import ElementData
import numpy as np
import collections
import sys
def find_uncertain_atoms(atomic_number_lst, yinyang_lst):
"""
input: 原子序数, 对应阴阳离子
output: 确定价电子数、确定价态数、不确定原子序数
"""
# print("1. ", atomic_number_lst)
uncertify_atoms_lst = []
valence_electron_num = 0
charge_num = 0
_i = 0
for index, atomic_num in enumerate(atomic_number_lst):
if ElementData.valence_electron[atomic_num] is not np.nan:
valence_electron_num += ElementData.valence_electron[atomic_num]
# ion charge num
if ElementData.valence_electron[atomic_num] > 4:
charge_num = charge_num - 8 + ElementData.valence_electron[atomic_num]
elif ElementData.valence_electron[atomic_num] < 4:
charge_num = charge_num + ElementData.valence_electron[atomic_num]
else:
charge_num += yinyang_lst[_i] * 4
_i += 1
else:
uncertify_atoms_lst.append(atomic_num)
# print("2. valence_electron_num = ", valence_electron_num)
# print("3. charge_num = ", charge_num)
# print("4. uncertify_atoms_lst = ", uncertify_atoms_lst)
return valence_electron_num, charge_num, uncertify_atoms_lst
def hcf(lst): #计算最大公约数
smaller = min(lst)
for i in reversed(range(1, smaller+1)):
if list(filter(lambda j: j%i != 0, lst)) == []:
return i
def reduced_atomic_numbers(atomic_num_dict):
values = list(atomic_num_dict.values())
max_gongyue = hcf(values)
new_lst = []
for key in atomic_num_dict.keys():
new_lst += [key] * (atomic_num_dict[key] // max_gongyue)
print(new_lst)
return new_lst
def uncertain_charge_permutation(uncertain_atoms_charge_lst):
stack = [[]] # 对不确定元素的价态进行排列组合
# print(uncertain_atoms_charge_lst)
while uncertain_atoms_charge_lst:
charge_lst = uncertain_atoms_charge_lst.pop(0)
lst = []
for zuhe in stack:
for charge in charge_lst:
lst.append(zuhe + [charge])
stack = lst
return stack
def write_supplement_file(atomic_number_lst, valence_electron_lst, charge_lst, file):
text = "原子序数: " + str(atomic_number_lst) + "\n" + \
"价电子数: " + str(valence_electron_lst) + "\n" + \
"价数:" + str(charge_lst) + "\n"
with open(file, "w") as f:
f.write(text)
def judge_yin_yang_num(atomic_number_lst):
"""
judge whether atomic number
"""
uncertain_num = 0
yinyang_num = 0
for index, atomic_number in enumerate(atomic_number_lst):
if ElementData.cation_anion_lst[atomic_number] is np.nan:
uncertain_num += 1
else:
yinyang_num += ElementData.cation_anion_lst[atomic_number]
margin = abs(int(yinyang_num)) - uncertain_num
if margin > 1e-3:
return False, None
if uncertain_num:
if yinyang_num > 0: # yang > yin
uncertain_yang_num = -margin // 2
uncertain_yin_num = uncertain_num - uncertain_yang_num
else:
uncertain_yin_num = -margin // 2
uncertain_yang_num = uncertain_num - uncertain_yin_num
yinyang_lst = [1 for i in range(uncertain_yang_num)] + [-1 for i in range(uncertain_yin_num)]
return True, yinyang_lst
else:
return True, []
if __name__ == "__main__":
# file_name = "Ba4 Ge8 As8-mp-27810.cif"
file_name = "Na1 Lu1 Pd6 O8-mp-6533.cif"
# file_name = "Ba4 Ge8 P8-mp-27809.cif"
# file_name = "As4 Pd12 Pb8-mp-20257.cif"
src_address = "../data/conventional_cell/" + file_name
atoms = read(src_address)
print("\n---------------------")
print(file_name)
atomic_number_lst = atoms.get_atomic_numbers().tolist()
print("origin atomic_number_lst = ", atomic_number_lst)
atomic_num_dict = collections.Counter(atomic_number_lst)
# atomic_number_lst = reduced_atomic_numbers(atomic_num_dict)
# atomic_number_lst *= 2
print("after atomic_number_lst = ", atomic_number_lst)
yinyang_is_equal, yinyang_lst = judge_yin_yang_num(atomic_number_lst)
print("yinyang_is_equal = ", yinyang_is_equal)
print("yinyang_lst = ", yinyang_lst)
if not yinyang_is_equal:
sys.exit()
atoms_num = len(atomic_number_lst)
valence_electron_num, charge_num, uncertify_atoms_lst = find_uncertain_atoms(atomic_number_lst, yinyang_lst)
print("valence_electron_num = ", valence_electron_num)
print("charge_num = ", charge_num)
print(uncertify_atoms_lst)
#
if not uncertify_atoms_lst:
average_valence_electron = valence_electron_num / atoms_num
if np.abs(average_valence_electron - 4) < 1e-3:
print(file_name, "123 abbey sommerfeid rule!!!!!!!!!!!!!!")
else:
uncertain_atoms_charge_lst = [ElementData.charge_num[_] for _ in uncertify_atoms_lst]
if len(uncertain_atoms_charge_lst) < 9:
charge_stack = uncertain_charge_permutation(uncertain_atoms_charge_lst)
for charge_zuhe in charge_stack:
total_charge = int(sum(charge_zuhe) + charge_num)
if total_charge == 0:
valence_electron_num += sum(charge_zuhe)
average_valence_electron = valence_electron_num / atoms_num
if np.abs(average_valence_electron - 4) < 1e-3:
print(file_name, " abbey sommerfeid rule!!!!!!!!!!!!!!")
break
del atoms
| 36.044586 | 112 | 0.646051 |
4a1acb2410acef8f00ee3c543c70a62b535bbbfa
| 3,811 |
py
|
Python
|
level_19/level_19.py
|
hermes-jr/adventofcode-in-python
|
6699583f449aa2e928427f2c56962de7a84f9dcf
|
[
"MIT"
] | null | null | null |
level_19/level_19.py
|
hermes-jr/adventofcode-in-python
|
6699583f449aa2e928427f2c56962de7a84f9dcf
|
[
"MIT"
] | null | null | null |
level_19/level_19.py
|
hermes-jr/adventofcode-in-python
|
6699583f449aa2e928427f2c56962de7a84f9dcf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import re
molecule = ''
reps = []
result1 = set()
result2 = 0
with open('in.txt', 'r') as f:
for line in f:
line = line.strip()
if not line: continue
if __debug__: print("{}".format(line))
if(re.match(r'^\w+$', line) == None):
rep = re.findall(r'^(\w+) => (\w+)$', line)[0]
if __debug__: print ("Rep: {}".format(rep))
reps.append(rep)
else:
molecule = line # could be done cleaner with list[-1]
print ("Molecule: {}".format(molecule))
for sh, exp in reps:
for idx in range(len(molecule)):
if molecule[idx:idx + len(sh)] == sh:
if __debug__: print("Replaceable atom found at {}".format(idx))
y = molecule[:idx] + exp + molecule[idx + len(sh):]
result1.add(y)
print("Unique combinations found: {}".format(len(result1)))
while molecule != 'e':
#tmp = molecule
if __debug__: print("Processing molecule: {}".format(molecule))
for k, v in reps:
if(v not in molecule): continue
molecule = molecule.replace(v, k, 1)
result2 += 1
print("Got 'e' in {} cycles".format(result2))
r"""
--- Day 19: Medicine for Rudolph ---
Rudolph the Red-Nosed Reindeer is sick! His nose isn't shining very brightly, and he needs medicine.
Red-Nosed Reindeer biology isn't similar to regular reindeer biology; Rudolph is going to need custom-made medicine. Unfortunately, Red-Nosed Reindeer chemistry isn't similar to regular reindeer chemistry, either.
The North Pole is equipped with a Red-Nosed Reindeer nuclear fusion/fission plant, capable of constructing any Red-Nosed Reindeer molecule you need. It works by starting with some input molecule and then doing a series of replacements, one per step, until it has the right molecule.
However, the machine has to be calibrated before it can be used. Calibration involves determining the number of molecules that can be generated in one step from a given starting point.
For example, imagine a simpler machine that supports only the following replacements:
H => HO
H => OH
O => HH
Given the replacements above and starting with HOH, the following molecules could be generated:
HOOH (via H => HO on the first H).
HOHO (via H => HO on the second H).
OHOH (via H => OH on the first H).
HOOH (via H => OH on the second H).
HHHH (via O => HH).
So, in the example above, there are 4 distinct molecules (not five, because HOOH appears twice) after one replacement from HOH. Santa's favorite molecule, HOHOHO, can become 7 distinct molecules (over nine replacements: six from H, and three from O).
The machine replaces without regard for the surrounding characters. For example, given the string H2O, the transition H => OO would result in OO2O.
Your puzzle input describes all of the possible replacements and, at the bottom, the medicine molecule for which you need to calibrate the machine. How many distinct molecules can be created after all the different ways you can do one replacement on the medicine molecule?
--- Part Two ---
Now that the machine is calibrated, you're ready to begin molecule fabrication.
Molecule fabrication always begins with just a single electron, e, and applying replacements one at a time, just like the ones during calibration.
For example, suppose you have the following replacements:
e => H
e => O
H => HO
H => OH
O => HH
If you'd like to make HOH, you start with e, and then make the following replacements:
e => O to get O
O => HH to get HH
H => OH (on the second H) to get HOH
So, you could make HOH after 3 steps. Santa's favorite molecule, HOHOHO, can be made in 6 steps.
How long will it take to make the medicine? Given the available replacements and the medicine molecule in your puzzle input, what is the fewest number of steps to go from e to the medicine molecule?
"""
""" Couldn't solve this one completely by myself :( """
| 37.732673 | 282 | 0.718447 |
4a1acb2f68ce5dfa111e2a0356216f1911e60b98
| 12,001 |
py
|
Python
|
lib/python3.8/site-packages/ansible_collections/theforeman/foreman/plugins/modules/compute_resource.py
|
cjsteel/python3-venv-ansible-2.10.5
|
c95395c4cae844dc66fddde9b4343966f4b2ecd5
|
[
"Apache-1.1"
] | null | null | null |
lib/python3.8/site-packages/ansible_collections/theforeman/foreman/plugins/modules/compute_resource.py
|
cjsteel/python3-venv-ansible-2.10.5
|
c95395c4cae844dc66fddde9b4343966f4b2ecd5
|
[
"Apache-1.1"
] | null | null | null |
lib/python3.8/site-packages/ansible_collections/theforeman/foreman/plugins/modules/compute_resource.py
|
cjsteel/python3-venv-ansible-2.10.5
|
c95395c4cae844dc66fddde9b4343966f4b2ecd5
|
[
"Apache-1.1"
] | 2 |
2021-03-30T14:26:02.000Z
|
2021-04-01T18:17:29.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) Philipp Joos 2017
# (c) Baptiste Agasse 2019
# (c) Mark Hlawatschek 2020
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: compute_resource
version_added: 1.0.0
short_description: Manage Compute Resources
description:
- Create, update, and delete Compute Resources
author:
- "Philipp Joos (@philippj)"
- "Baptiste Agasse (@bagasse)"
- "Manisha Singhal (@Manisha15) ATIX AG"
- "Mark Hlawatschek (@hlawatschek) ATIX AG"
options:
name:
description: compute resource name
required: true
type: str
updated_name:
description: new compute resource name
required: false
type: str
description:
description: compute resource description
required: false
type: str
provider:
description: Compute resource provider. Required if I(state=present_with_defaults).
required: false
choices: ["vmware", "libvirt", "ovirt", "proxmox", "EC2", "AzureRm", "GCE"]
type: str
provider_params:
description: Parameter specific to compute resource provider. Required if I(state=present_with_defaults).
required: false
type: dict
suboptions:
url:
description:
- URL of the compute resource
type: str
user:
description:
- Username for the compute resource connection, not valid for I(provider=libvirt)
type: str
password:
description:
- Password for the compute resource connection, not valid for I(provider=libvirt)
type: str
region:
description:
- AWS region, AZURE region
type: str
tenant:
description:
- AzureRM tenant
type: str
app_ident:
description:
- AzureRM client id
type: str
datacenter:
description:
- Datacenter the compute resource is in, not valid for I(provider=libvirt)
type: str
display_type:
description:
- Display type to use for the remote console, only valid for I(provider=libvirt)
type: str
use_v4:
description:
- Use oVirt API v4, only valid for I(provider=ovirt)
type: bool
ovirt_quota:
description:
- oVirt quota ID, only valid for I(provider=ovirt)
type: str
project:
description:
- Project id for I(provider=GCE)
type: str
email:
description:
- Email for I(provider=GCE)
type: str
key_path:
description:
- Certificate path for I(provider=GCE)
type: str
zone:
description:
- zone for I(provider=GCE)
type: str
ssl_verify_peer:
description:
- verify ssl from provider I(provider=proxmox)
type: bool
caching_enabled:
description:
- enable caching for I(provider=vmware)
type: bool
extends_documentation_fragment:
- theforeman.foreman.foreman
- theforeman.foreman.foreman.entity_state_with_defaults
- theforeman.foreman.foreman.taxonomy
'''
EXAMPLES = '''
- name: Create livirt compute resource
theforeman.foreman.compute_resource:
name: example_compute_resource
locations:
- Munich
organizations:
- ACME
provider: libvirt
provider_params:
url: libvirt.example.com
display_type: vnc
server_url: "https://foreman.example.com"
username: "admin"
password: "changeme"
state: present
- name: Update libvirt compute resource
theforeman.foreman.compute_resource:
name: example_compute_resource
description: updated compute resource
locations:
- Munich
organizations:
- ACME
provider: libvirt
provider_params:
url: libvirt.example.com
display_type: vnc
server_url: "https://foreman.example.com"
username: "admin"
password: "changeme"
state: present
- name: Delete libvirt compute resource
theforeman.foreman.compute_resource:
name: example_compute_resource
server_url: "https://foreman.example.com"
username: "admin"
password: "changeme"
state: absent
- name: Create vmware compute resource
theforeman.foreman.compute_resource:
name: example_compute_resource
locations:
- Munich
organizations:
- ACME
provider: vmware
provider_params:
caching_enabled: false
url: vsphere.example.com
user: admin
password: secret
datacenter: ax01
server_url: "https://foreman.example.com"
username: "admin"
password: "changeme"
state: present
- name: Create ovirt compute resource
theforeman.foreman.compute_resource:
name: ovirt_compute_resource
locations:
- France/Toulouse
organizations:
- Example Org
provider: ovirt
provider_params:
url: ovirt.example.com
user: ovirt-admin@example.com
password: ovirtsecret
datacenter: aa92fb54-0736-4066-8fa8-b8b9e3bd75ac
ovirt_quota: 24868ab9-c2a1-47c3-87e7-706f17d215ac
use_v4: true
server_url: "https://foreman.example.com"
username: "admin"
password: "changeme"
state: present
- name: Create proxmox compute resource
theforeman.foreman.compute_resource:
name: proxmox_compute_resource
locations:
- Munich
organizations:
- ACME
provider: proxmox
provider_params:
url: https://proxmox.example.com:8006/api2/json
user: root@pam
password: secretpassword
ssl_verify_peer: true
server_url: "https://foreman.example.com"
username: "admin"
password: "changeme"
state: present
- name: create EC2 compute resource
theforeman.foreman.compute_resource:
name: EC2_compute_resource
description: EC2
locations:
- AWS
organizations:
- ACME
provider: EC2
provider_params:
user: AWS_ACCESS_KEY
password: AWS_SECRET_KEY
region: eu-west-1
server_url: "https://foreman.example.com"
username: "admin"
password: "changeme"
state: present
- name: create Azure compute resource
theforeman.foreman.compute_resource:
name: AzureRm_compute_resource
description: AzureRm
locations:
- Azure
organizations:
- ACME
provider: AzureRm
provider_params:
user: SUBSCRIPTION_ID
tenant: TENANT_ID
app_ident: CLIENT_ID
password: CLIENT_SECRET
region: westeurope
server_url: "https://foreman.example.com"
username: "admin"
password: "changeme"
state: present
- name: create GCE compute resource
theforeman.foreman.compute_resource:
name: GCE compute resource
description: Google Cloud Engine
locations:
- GCE
organizations:
- ACME
provider: GCE
provider_params:
project: orcharhino
email: myname@atix.de
key_path: "/usr/share/foreman/gce_orcharhino_key.json"
zone: europe-west3-b
server_url: "https://foreman.example.com"
username: "admin"
password: "changeme"
state: present
'''
RETURN = '''
entity:
description: Final state of the affected entities grouped by their type.
returned: success
type: dict
contains:
compute_resources:
description: List of compute resources.
type: list
elements: dict
'''
from ansible_collections.theforeman.foreman.plugins.module_utils.foreman_helper import ForemanTaxonomicEntityAnsibleModule
def get_provider_info(provider):
provider_name = provider.lower()
if provider_name == 'libvirt':
return 'Libvirt', ['url', 'display_type']
elif provider_name == 'ovirt':
return 'Ovirt', ['url', 'user', 'password', 'datacenter', 'use_v4', 'ovirt_quota']
elif provider_name == 'proxmox':
return 'Proxmox', ['url', 'user', 'password', 'ssl_verify_peer']
elif provider_name == 'vmware':
return 'Vmware', ['url', 'user', 'password', 'datacenter', 'caching_enabled']
elif provider_name == 'ec2':
return 'EC2', ['user', 'password', 'region']
elif provider_name == 'azurerm':
return 'AzureRm', ['user', 'password', 'tenant', 'region', 'app_ident']
elif provider_name == 'gce':
return 'GCE', ['project', 'email', 'key_path', 'zone']
else:
return '', []
class ForemanComputeResourceModule(ForemanTaxonomicEntityAnsibleModule):
pass
def main():
module = ForemanComputeResourceModule(
foreman_spec=dict(
name=dict(required=True),
updated_name=dict(),
description=dict(),
provider=dict(choices=['vmware', 'libvirt', 'ovirt', 'proxmox', 'EC2', 'AzureRm', 'GCE']),
display_type=dict(invisible=True),
datacenter=dict(invisible=True),
url=dict(invisible=True),
caching_enabled=dict(invisible=True),
user=dict(invisible=True),
password=dict(invisible=True),
region=dict(invisible=True),
tenant=dict(invisible=True),
app_ident=dict(invisible=True),
use_v4=dict(invisible=True),
ovirt_quota=dict(invisible=True),
project=dict(invisible=True),
email=dict(invisible=True),
key_path=dict(invisible=True),
zone=dict(invisible=True),
ssl_verify_peer=dict(invisible=True),
),
argument_spec=dict(
provider_params=dict(type='dict', options=dict(
url=dict(),
display_type=dict(),
user=dict(),
password=dict(no_log=True),
region=dict(),
tenant=dict(),
app_ident=dict(),
datacenter=dict(),
caching_enabled=dict(type='bool'),
use_v4=dict(type='bool'),
ovirt_quota=dict(),
project=dict(),
email=dict(),
key_path=dict(),
zone=dict(),
ssl_verify_peer=dict(type='bool'),
)),
state=dict(type='str', default='present', choices=['present', 'absent', 'present_with_defaults']),
),
required_if=(
['state', 'present_with_defaults', ['provider', 'provider_params']],
),
)
if not module.desired_absent:
if 'provider' in module.foreman_params:
module.foreman_params['provider'], provider_param_keys = get_provider_info(provider=module.foreman_params['provider'])
provider_params = module.foreman_params.pop('provider_params', {})
for key in provider_param_keys:
if key in provider_params:
module.foreman_params[key] = provider_params.pop(key)
if provider_params:
module.fail_json(msg="Provider {0} does not support the following given parameters: {1}".format(
module.foreman_params['provider'], list(provider_params.keys())))
with module.api_connection():
entity = module.lookup_entity('entity')
if not module.desired_absent and 'provider' not in module.foreman_params and entity is None:
module.fail_json(msg='To create a compute resource a valid provider must be supplied')
module.run()
if __name__ == '__main__':
main()
| 29.486486 | 130 | 0.639947 |
4a1acbab106fcde8b224a650af22269e14498eee
| 646 |
py
|
Python
|
tests/fontamental/glyphslib_test.py
|
fadox/fontamental
|
feb0d904a74d739148abf5b9d8e47a034e563dd8
|
[
"BSD-2-Clause"
] | 13 |
2017-06-10T13:32:36.000Z
|
2018-08-31T07:31:56.000Z
|
tests/fontamental/glyphslib_test.py
|
fadox/fontamental
|
feb0d904a74d739148abf5b9d8e47a034e563dd8
|
[
"BSD-2-Clause"
] | 2 |
2018-08-27T16:41:28.000Z
|
2019-07-24T09:15:24.000Z
|
tests/fontamental/glyphslib_test.py
|
fadox/fontamental
|
feb0d904a74d739148abf5b9d8e47a034e563dd8
|
[
"BSD-2-Clause"
] | 3 |
2018-02-11T13:55:04.000Z
|
2018-08-28T12:35:58.000Z
|
import sys
import unittest
from fontamental.glyphslib import GlyphsLib
class IndexTest(unittest.TestCase):
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
def setUp(self):
# self.infoTab = NameTabWidget(None)
options = {
'buildFea': True,
}
self.gl = GlyphsLib(options)
def tearDown(self):
# self.fontInfo.close()
pass
def test_sample1(self):
val = self.gl.Prod2Decimal
self.assertTrue(len(val) >= 1)
def test_sample2(self):
self.assertEqual(2, 2)
if __name__ == "__main__":
unittest.main()
| 19.575758 | 52 | 0.617647 |
4a1acbfb87cc2090275664cab54514341350a01b
| 26,583 |
py
|
Python
|
src/ecs/packing_method.py
|
luojie1024/Huawei_CodeCraft_2018
|
f7fc6db09c65d9b19c773d3a8933109084ec0489
|
[
"Apache-2.0"
] | 3 |
2019-03-01T12:16:02.000Z
|
2019-12-19T07:59:07.000Z
|
src/ecs/packing_method.py
|
luojie1024/Huawei_CodeCraft_2018
|
f7fc6db09c65d9b19c773d3a8933109084ec0489
|
[
"Apache-2.0"
] | null | null | null |
src/ecs/packing_method.py
|
luojie1024/Huawei_CodeCraft_2018
|
f7fc6db09c65d9b19c773d3a8933109084ec0489
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import copy
from math import ceil
import packing_utils_v2
from const_map import VM_TYPE_DIRT, VM_PARAM, VM_CPU_QU, VM_MEM_QU, PM_TYPE
# 添加模拟退火的算法
def pack_model1(vmPicker, machineGroup, opt_target="CPU"):
vm_cpu_size, vm_mem_size = vmPicker.origin_cpu_mem_sum()
C = machineGroup.machine_info["CPU"]
M = machineGroup.machine_info["MEM"]
num = max(vm_cpu_size * 1.0 / C, vm_mem_size * 1.0 / M)
T = 100.0 # 模拟退火初始温度
Tmin = 1 # 模拟退火终止温度
r = 0.9999 # 温度下降系数
pass
def pack_model(vmWorker, serverObj, opt_target='CPU'):
'''
具体装配方案1,packing1,M/U权重分配
'''
# 获得放置顺序
vm_orders = [[], # vm_types
[]] # cot
weightes = [1, 2, 4]
cpu = [1, 2, 4, 8, 16, 32]
vm_cpu_size, vm_mem_size = vmWorker.origin_cpu_mem_sum()
if vm_cpu_size == 0: return # 无需装装配, 结束
pw = vm_mem_size * 1.0 / vm_cpu_size
C = serverObj.server_info['CPU'] # 物理机CPU数
M = serverObj.server_info['MEM'] # 物理机MEM数
bw = M * 1.0 / C # 物理机权重
#######################################
print 'pw=%.2f,bw=%.2f' % (pw, bw)
#
num = max(vm_cpu_size * 1.0 / C, vm_mem_size * 1.0 / M)
print 'num=%d' % (ceil(num))
print 'cpu%%=%.2f mem%%=%.2f' % (vm_cpu_size * 100.0 / (num * C),
vm_mem_size * 100.0 / (num * M))
#######################################
# 创建最小量的虚拟机,原来集群中就存在一台,需要减一台
serverObj.new_physic_machine(num=num - 1)
# 获取CPU从大到小,权重都
pick_func = vmWorker.get_vm_by_cpu
dirt = cpu
start = len(dirt) - 1
end = -1
step = -1
order = 0
for i in range(start, end, step):
tmp = pick_func(dirt[i], order)
if tmp != None:
vm_orders[0].extend(tmp[0])
vm_orders[1].extend(tmp[1])
if opt_target == 'CPU':
opt_index = 0
elif opt_target == 'MEM':
opt_index = 1
else:
opt_index = 2
vm_type_size = len(vm_orders[0])
if vm_type_size == 0: return # 无装配项,结束
for vm_index in range(vm_type_size):
vm_type = vm_orders[0][vm_index]
vm_cot = vm_orders[1][vm_index]
pm_size = serverObj.pm_size
for rept in range(vm_cot):
in_id = -1
max_opt = -1
for pm_id in range(pm_size):
ok, re_items = serverObj.test_put_vm(pm_id, vm_type)
if not ok: continue
if max_opt < re_items[opt_index]:
max_opt = re_items[opt_index]
in_id = pm_id
if in_id < 0: # 在现有的物理机中无法安排该虚拟机
pm_size = serverObj.new_physic_machine()
re_items = serverObj.put_vm(pm_size - 1, vm_type)
if re_items == None:
raise ValueError('ENDLESS LOOP ! ')
else:
serverObj.put_vm(in_id, vm_type)
return (vm_cpu_size * 100.0 / (num * C), vm_mem_size * 100.0 / (num * M))
def pack_model2(vmWorker, serverObj,target_c_m=None):
'''
具体装配方案1,packing1,M/U权重分配
:param vmWorker:虚拟机
:param serverObj: 物理机
:return:
'''
# 获得放置顺序
vm_orders = [[], # vm_types
[]] # cot
weightes = [1, 2, 4]
cpu = [1, 2, 4, 8, 16, 32]
# 物理机id
pm_id = 0
# 获得所需的CPU 内存总数
vm_cpu_size, vm_mem_size = vmWorker.origin_cpu_mem_sum()
# 无需装装配,结束
if vm_cpu_size == 0:
return
# 基线
baseline_C_M = 56.0 / 128
print ('baseline C/M %.2f \n' % (baseline_C_M))
# 拾取放置队列
'''
[[vm_type],[count]]
vm_orders['1.0']
vm_orders['2.0']
vm_orders['4.0']
'''
# 获取放置队列
vm_orders = vmWorker.get_vm_order(cpu[-1])
# 队列为0,结束
if len(vm_orders) == 0:
return # 无装配项,结束
# 还没有放完
while (serverObj.is_packing()):
# 物理机资源比
C_M = serverObj.get_sum_C_M()
vm_cpu_size, vm_mem_size=serverObj.get_lave_cpu_mem_sum()
# 大量虚拟机时候,首选大号物理机
if (vm_cpu_size >= 112 and vm_mem_size >= 256):
if (C_M > baseline_C_M): # cpu比例大,选用高性能物理机
print ('C/M %.2f > baseline C/M %.2f \n' % (C_M, baseline_C_M))
pm_id = serverObj.new_physic_machine('High-Performance')
else: # mem比例大,选择大内存的物理机
pm_id = serverObj.new_physic_machine('Large-Memory')
# elif (vm_cpu_size > 56 and vm_cpu_size <= 84 and vm_mem_size > 192 and vm_mem_size <=256): # 最后资源优化策略
# pm_id = serverObj.new_physic_machine('Large-Memory')
# elif (vm_cpu_size > 84 and vm_cpu_size <= 112 and vm_mem_size > 128 and vm_mem_size <= 192): # 最后资源优化策略
# pm_id = serverObj.new_physic_machine('High-Performance')
# else: # 最后资源优化策略
# pm_id = serverObj.new_physic_machine('General')
elif vm_cpu_size>=112 and vm_mem_size<256 and vm_mem_size>=192:
pm_id=serverObj.new_physic_machine("High-Performance")
elif vm_cpu_size>=112 and vm_mem_size<192 and vm_mem_size>128:
pm_id=serverObj.new_physic_machine("General")
elif vm_cpu_size<112 and vm_cpu_size>=84 and vm_mem_size>=256:
pm_id=serverObj.new_physic_machine("Large-Memory")
elif vm_cpu_size<112 and vm_cpu_size>=84 and vm_mem_size<256 and vm_mem_size>=192:
pm_id=serverObj.new_physic_machine("General")
elif vm_cpu_size<112 and vm_cpu_size>=84 and vm_mem_size<192 and vm_mem_size>=128:
pm_id=serverObj.new_physic_machine("High-Performance")
elif vm_cpu_size<112 and vm_cpu_size>=84 and vm_mem_size<128:
pm_id=serverObj.new_physic_machine("High-Performance")
elif vm_cpu_size<84 and vm_cpu_size>=56 and vm_mem_size>=256:
pm_id=serverObj.new_physic_machine("General")
elif vm_cpu_size<84 and vm_cpu_size>=56 and vm_mem_size<256 and vm_mem_size>=192:
pm_id=serverObj.new_physic_machine("Large-Memory")
elif vm_cpu_size<84 and vm_cpu_size>=56 and vm_mem_size<192 and vm_mem_size>=128:
pm_id=serverObj.new_physic_machine("Large-Memory")
elif vm_cpu_size<84 and vm_cpu_size>=56 and vm_mem_size<128:
pm_id = serverObj.new_physic_machine("High-Performance")
elif vm_cpu_size<56 and vm_cpu_size>=192 and vm_mem_size<256:
pm_id = serverObj.new_physic_machine("Large-Memory")
elif vm_cpu_size<56 and vm_mem_size>=128 and vm_mem_size<192:
pm_id = serverObj.new_physic_machine("High-Performance")
elif vm_cpu_size <= 56 and vm_mem_size <= 128:
pm_id = serverObj.new_physic_machine("General")
else:
pm_id = serverObj.new_physic_machine("General")
# pm_id = serverObj.new_physic_machine('High-Performance')
# pass
# 是全部遍历过了
is_all_picked = False
# 标记
vm_index = ''
# 还有剩余空间,而且没有把所有情况遍历完
while (serverObj.is_free(pm_id) and not is_all_picked):
# 根据物理机的c/m比例来选择放置
c_m = serverObj.get_pm_c_m(pm_id)
# 获取最接近的优化目标
if target_c_m==None:
target_c_m = serverObj.get_nearest_distance(c_m)
# else:
# target_c_m=0.5
# 为了靠近目标比例:选择 内存多,cpu少的vm先放,从而使c/m比接近目标c/m
# if c_m < target_c_m:
# 距离优化目标
distance_target = 10
# 应该放入的类型
in_type = None
for i in range(len(vm_orders['4.0'][0])):
vm_type = vm_orders['4.0'][0][i]
# 数量大于0
if vm_orders['4.0'][1][i] > 0:
ok, re_items = serverObj.test_put_vm(pm_id, vm_type)
if not ok: continue
# 如果,距离目标更近了,则保存
if distance_target > abs(re_items[2] - target_c_m):
distance_target = abs(re_items[2] - target_c_m)
in_type = vm_type
vm_index = '4.0'
break
for i in range(len(vm_orders['2.0'][0])):
vm_type = vm_orders['2.0'][0][i]
# 数量大于0
if vm_orders['2.0'][1][i] > 0:
ok, re_items = serverObj.test_put_vm(pm_id, vm_type)
if not ok: continue
# 如果,距离目标更近了,则保存
if distance_target > abs(re_items[2] - target_c_m):
distance_target = abs(re_items[2] - target_c_m)
in_type = vm_type
vm_index = '2.0'
break
for i in range(len(vm_orders['1.0'][0])):
vm_type = vm_orders['1.0'][0][i]
# 数量大于0
if vm_orders['1.0'][1][i] > 0:
ok, re_items = serverObj.test_put_vm(pm_id, vm_type)
if not ok: continue
# 如果,距离目标更近了,则保存
if distance_target > abs(re_items[2] - target_c_m):
distance_target = abs(re_items[2] - target_c_m)
in_type = vm_type
vm_index = '1.0'
break
# 遍历所有虚拟机类型,均无法放下,跳出去开物理机
if in_type == None:
is_all_picked = True
else: # 放置最优的那台虚拟机
serverObj.put_vm(pm_id, in_type)
postion = vm_orders[vm_index][0].index(in_type)
# 减去对应队列的物理机
if vm_orders[vm_index][1][postion] > 0:
vm_orders[vm_index][1][postion] -= 1
else:
print('error: in_type 0 \n')
return vm_cpu_size * 100.0
###########################################################################################
def pack_model3(vmWorker, serverObj,target_c_m=None):
'''
具体装配方案1,packing1,M/U权重分配
:param vmWorker:虚拟机
:param serverObj: 物理机
:return:
'''
# 获得放置顺序
vm_orders = [[], # vm_types
[]] # cot
weightes = [1, 2, 4]
cpu = [1, 2, 4, 8, 16, 32]
# 物理机id
pm_id = 0
# 获得所需的CPU 内存总数
vm_cpu_size, vm_mem_size = vmWorker.origin_cpu_mem_sum()
# 无需装装配,结束
if vm_cpu_size == 0:
return
# 基线
baseline_C_M = 56.0 / 128
print ('baseline C/M %.2f \n' % (baseline_C_M))
# 拾取放置队列
'''
[[vm_type],[count]]
vm_orders['1.0']
vm_orders['2.0']
vm_orders['4.0']
'''
# 获取放置队列
vm_orders = vmWorker.get_vm_order(cpu[-1]) #队列已经放置好
# 队列为0,结束
# if len(vm_orders) == 0:
# return # 无装配项,结束
if len(vm_orders["4.0"][0])==0 and len(vm_orders["2.0"][0])==0 and len(vm_orders["1.0"][0])==0:
return
# 还没有放完
while (serverObj.is_packing()):
# 物理机资源比
C_M = serverObj.get_sum_C_M()
vm_cpu_size, vm_mem_size=serverObj.get_lave_cpu_mem_sum()
if vm_cpu_size>=112 and vm_mem_size>=256:
if C_M>2:
print ('C/M %.2f >2 \n',C_M)
pm_id=serverObj.new_physic_machine("Large-Memory")
else:
pm_id=serverObj.new_physic_machine("High-Performance")
elif vm_cpu_size<=56 and vm_mem_size<=128:
pm_id=serverObj.new_physic_machine("General")
elif vm_cpu_size>=112 and vm_mem_size<256 and vm_mem_size>=192:
pm_id=serverObj.new_physic_machine("High-Performance")
elif vm_cpu_size>=112 and vm_mem_size<192 and vm_mem_size>128:
pm_id=serverObj.new_physic_machine("General")
elif vm_cpu_size<112 and vm_cpu_size>=84 and vm_mem_size>=256:
pm_id=serverObj.new_physic_machine("Large-Memory")
elif vm_cpu_size<112 and vm_cpu_size>=84 and vm_mem_size<256 and vm_mem_size>=192:
pm_id=serverObj.new_physic_machine("General")
elif vm_cpu_size<112 and vm_cpu_size>=84 and vm_mem_size<192 and vm_mem_size>=128:
pm_id=serverObj.new_physic_machine("High-Performance")
elif vm_cpu_size<112 and vm_cpu_size>=84 and vm_mem_size<128:
pm_id=serverObj.new_physic_machine("High-Performance")
elif vm_cpu_size<84 and vm_cpu_size>=56 and vm_mem_size>=256:
pm_id=serverObj.new_physic_machine("General")
elif vm_cpu_size<84 and vm_cpu_size>=56 and vm_mem_size<256 and vm_mem_size>=192:
pm_id=serverObj.new_physic_machine("Large-Memory")
elif vm_cpu_size<84 and vm_cpu_size>=56 and vm_mem_size<192 and vm_mem_size>=128:
pm_id=serverObj.new_physic_machine("Large-Memory")
elif vm_cpu_size<84 and vm_cpu_size>=56 and vm_mem_size<128:
pm_id = serverObj.new_physic_machine("High-Performance")
elif vm_cpu_size<56 and vm_cpu_size>=192 and vm_mem_size<256:
pm_id = serverObj.new_physic_machine("Large-Memory")
elif vm_cpu_size<56 and vm_mem_size>=128 and vm_mem_size<192:
pm_id = serverObj.new_physic_machine("High-Performance")
# 大量虚拟机时候,首选大号物理机
# if (vm_cpu_size >= 112 and vm_mem_size >= 256):
# if (C_M > baseline_C_M): # cpu比例大,选用高性能物理机
# c
# pm_id = serverObj.new_physic_machine('High-Performance')
# elif (C_M<=baseline_C_M and C_M>1.5):
# pm_id=serverObj.new_physic_machine("Large-Memory")
# else:
# pm_id=serverObj.new_physic_machine("High-Performance")
# #
# # else: # mem比例大,选择大内存的物理机
# # pm_id = serverObj.new_physic_machine('Large-Memory')
# elif (vm_cpu_size > 56 and vm_cpu_size <= 84 and vm_mem_size > 192 and vm_mem_size <=256): # 最后资源优化策略
# pm_id = serverObj.new_physic_machine('Large-Memory')
# elif (vm_cpu_size > 84 and vm_cpu_size <= 112 and vm_mem_size > 128 and vm_mem_size <= 192): # 最后资源优化策略
# pm_id = serverObj.new_physic_machine('High-Performance')
# else: # 最后资源优化策略
# pm_id = serverObj.new_physic_machine('General')
# pm_id = serverObj.new_physic_machine('High-Performance')
# pass
# 是全部遍历过了
is_all_picked = False
# 标记
vm_index = ''
# 还有剩余空间,而且没有把所有情况遍历完
while (serverObj.is_free(pm_id) and not is_all_picked):
# 根据物理机的c/m比例来选择放置
c_m = serverObj.get_pm_c_m(pm_id)
# 获取最接近的优化目标
if target_c_m==None:
target_c_m = serverObj.get_nearest_distance(c_m)
# else:
# target_c_m=0.5
# 为了靠近目标比例:选择 内存多,cpu少的vm先放,从而使c/m比接近目标c/m
# if c_m < target_c_m:
# 距离优化目标
distance_target = 10
# 应该放入的类型
in_type = None
for i in range(len(vm_orders['4.0'][0])):
vm_type = vm_orders['4.0'][0][i]
# 数量大于0
if vm_orders['4.0'][1][i] > 0:
for j in range(vm_orders["4.0"][1][i]):
ok, re_items = serverObj.test_put_vm(pm_id, vm_type)
if not ok: continue
# 如果,距离目标更近了,则保存
if distance_target > abs(re_items[2] - target_c_m):
distance_target = abs(re_items[2] - target_c_m)
in_type = vm_type
vm_index = '4.0'
break
for i in range(len(vm_orders['2.0'][0])):
vm_type = vm_orders['2.0'][0][i]
# 数量大于0
if vm_orders['2.0'][1][i] > 0:
for j in range(vm_orders["2.0"][1][i]):
ok, re_items = serverObj.test_put_vm(pm_id, vm_type)
if not ok: continue
# 如果,距离目标更近了,则保存
if distance_target > abs(re_items[2] - target_c_m):
distance_target = abs(re_items[2] - target_c_m)
in_type = vm_type
vm_index = '2.0'
break
for i in range(len(vm_orders['1.0'][0])):
vm_type = vm_orders['1.0'][0][i]
# 数量大于0
if vm_orders['1.0'][1][i] > 0:
for j in range(vm_orders["1.0"][1][i]):
ok, re_items = serverObj.test_put_vm(pm_id, vm_type)
if not ok: continue
# 如果,距离目标更近了,则保存
if distance_target > abs(re_items[2] - target_c_m):
distance_target = abs(re_items[2] - target_c_m)
in_type = vm_type
vm_index = '1.0'
break
# 遍历所有虚拟机类型,均无法放下,跳出去开物理机
if in_type == None:
is_all_picked = True
else: # 放置最优的那台虚拟机
serverObj.put_vm(pm_id, in_type)
postion = vm_orders[vm_index][0].index(in_type)
# 减去对应队列的物理机
if vm_orders[vm_index][1][postion] > 0:
vm_orders[vm_index][1][postion] -= 1
else:
print('error: in_type 0 \n')
return vm_cpu_size * 100.0
#################################################优化方案########################################
#
# def search_maximum_way1(dataObj, predict_result):
# global res_use_pro
# global vm_size
# global vm
# global pm_size
# global pm
# global try_result
# global other_res_use_pro
# global vm_map
# vm_size, vm, pm_size, pm, res_use_pro, other_res_use_pro, _ = packing_utils.pack_api(dataObj, predict_result)
# pading_que = []
#
# # 搜索优先级
# if dataObj.opt_target == 'CPU':
# pading_que = [1.0, 2.0, 4.0]
# else:
# pading_que = [4.0, 2.0, 1.0]
#
# # 根据数量初始化队列
# # vm_que=init_que(caseInfo)
#
# try_result = copy.deepcopy(predict_result)
#
# end_vm_pos = 0
# # 找到第一个非0位[1,15]
# for vm_type_index in range(len(VM_TYPE_DIRT) - 1, -1, -1):
# if try_result.has_key(VM_TYPE_DIRT[vm_type_index]) and try_result[VM_TYPE_DIRT[vm_type_index]] > 0: # 键值对存在
# end_vm_pos = vm_type_index
# break
# for que in range(3):
# # 在有数量的区间内填充[1,8]
# for vm_type in range(end_vm_pos, -1, -1):
# if try_result.has_key(VM_TYPE_DIRT[vm_type]) and VM_PARAM[VM_TYPE_DIRT[vm_type]][2] == pading_que[
# que]: # 键值对存在,C/M比相等
# if try_result[VM_TYPE_DIRT[vm_type]] > 0:
# result_modify1(try_result, dataObj, 1, VM_TYPE_DIRT[vm_type], vm_map)
# result_modify1(try_result, dataObj, -1, VM_TYPE_DIRT[vm_type], vm_map)
# else:
# # 找到非0的,最大,虚拟机
# result_modify1(try_result, dataObj, 1, VM_TYPE_DIRT[vm_type], vm_map)
#
#
# def search_maximum_way2(caseInfo, predict_result):
# global res_use_pro
# global vm_size
# global vm
# global pm_size
# global pm
# global try_result
# global other_res_use_pro
# vm_size, vm, pm_size, pm, res_use_pro, other_res_use_pro = packing_utils.pack_api(caseInfo, predict_result)
# pading_que = []
#
# # 搜索优先级
# if caseInfo.opt_target == 'CPU':
# pading_que = [1.0, 2.0, 4.0]
# else:
# pading_que = [4.0, 2.0, 1.0]
#
# # 根据数量初始化队列
# # vm_que=init_que(caseInfo)
#
# # 震荡范围
# value_range = 3
# # 范围表
# data_range = [[value_range] * caseInfo.vm_types_size]
# # 虚拟机类型
# vm_type = caseInfo.vm_types
# # 虚拟机震荡表
# vm_range = dict(zip(vm_type, data_range))
#
# try_result = copy.deepcopy(predict_result)
# end_vm_pos = 0
# # 找到第一个非0位[1,15]
# for vm_type_index in range(len(VM_TYPE_DIRT) - 1, -1, -1):
# if try_result.has_key(VM_TYPE_DIRT[vm_type_index]) and try_result[VM_TYPE_DIRT[vm_type_index]] > 0: # 键值对存在
# end_vm_pos = vm_type_index
# break
# for que in range(3):
# # 在有数量的区间内填充[1,8]
# for vm_type in range(end_vm_pos, -1, -1):
# if try_result.has_key(VM_TYPE_DIRT[vm_type]) and VM_PARAM[VM_TYPE_DIRT[vm_type]][2] == pading_que[
# que]: # 键值对存在,C/M比相等
# # 数量
# if try_result[VM_TYPE_DIRT[vm_type]] > 0:
# result_modify1(try_result, caseInfo, 1, VM_TYPE_DIRT[vm_type])
# result_modify1(try_result, caseInfo, -1, VM_TYPE_DIRT[vm_type])
# else:
# # 找到非0的,最大,虚拟机
# result_modify1(try_result, caseInfo, 1, VM_TYPE_DIRT[vm_type])
#
#
# def result_modify1(predict_result, caseInfo, try_value, vm_type, try_vm_map):
# '''
# :param predict_result: 虚拟机预测结果 贪心搜索局部优解
# :param caseInfo: 训练集信息
# :param try_value: 尝试值
# :param vm_type: 虚拟机类型
# :return:
# '''
# global other_res_use_pro
# global res_use_pro
# global vm_size
# global vm
# global pm_size
# global pm
# global try_result
# global vm_map
# try_predict = copy.deepcopy(predict_result)
# try_vm_map = copy.deepcopy(vm_map)
# try_predict[vm_type][0] = try_predict[vm_type][0] + try_value
# if try_predict[vm_type][0] < 0: # 小于0没有意义
# return
# try_vm_size, try_vm, try_pm_size, try_pm, try_res_use_pro, try_other_res_use_pro, _ = packing_utils.pack_api(
# caseInfo, try_predict)
# if try_res_use_pro > res_use_pro and try_pm_size <= pm_size: # 如果结果优,物理机数量相等或者 【更小,利用率更高 】保存最优结果
# vm_size, vm, pm_size, pm, res_use_pro, other_res_use_pro = try_vm_size, try_vm, try_pm_size, try_pm, try_res_use_pro, try_other_res_use_pro
# try_result = try_predict
# try_vm_map[vm_type] += try_value
# vm_map = try_vm_map
# # 继续深度搜索
# result_modify1(try_predict, caseInfo, try_value, vm_type, try_vm_map)
# elif try_res_use_pro == res_use_pro and try_other_res_use_pro > other_res_use_pro: # 如果没有当前的好,则返回
# vm_size, vm, pm_size, pm, res_use_pro, other_res_use_pro = try_vm_size, try_vm, try_pm_size, try_pm, try_res_use_pro, try_other_res_use_pro
# try_result = try_predict
# try_vm_map[vm_type] += try_value
# vm_map = try_vm_map
# # 继续深度搜索
# result_modify1(try_predict, caseInfo, try_value, vm_type, try_vm_map)
# else:
# return
#
#
# def result_smooth(vm_size, vm, pm_size, pm, dataObje, pm_free):
# '''
# 平滑填充结果集
# :param vm:虚拟机列表
# :param pm_size:虚拟机数量
# :param pm:物理机列表
# :param dataObje:数据对象
# :return:
# '''
# vm_types = dataObje.vm_types
# res_use_pro = 0.0
# other_res_use_pro = 0.0
# VM_QUE = []
# free_cpu = 0.0
# free_mem = 0.0
# # 初始化填充队列
# if dataObje.opt_target == 'CPU':
# VM_QUE = VM_CPU_QU
# res_use_pro = dataObje.CPU * pm
# other_res_use_pro = dataObje.MEM * pm
# else:
# VM_QUE = VM_MEM_QU
# res_use_pro = dataObje.MEM * pm
# other_res_use_pro = dataObje.CPU * pm
#
# epoch = 2
# # 遍历物理机
# for i in range(pm_size):
# M_C = 0.0
# # 进行多轮赋值,防止漏空
# for e in range(epoch): # CPU 内存均有空间
# if pm_free[i][0] and pm_free[i][1]:
# # 计算占比
# M_C = computer_MC(pm_free[i])
# while (M_C >= 1 and pm_free[i][0] and pm_free[i][1]): # CPU 内存均有空间
# # 3轮不同比例的检索
# for vm_type_index in range(len(VM_PARAM) - 1, -1, -1):
# # 比例匹配,并且是属于预测列表的最大资源虚拟机
# if VM_PARAM[VM_TYPE_DIRT[vm_type_index]][2] == M_C and (
# VM_TYPE_DIRT[vm_type_index] in vm_types):
# # CPU 内存均有空间放入该虚拟机
# if VM_PARAM[VM_TYPE_DIRT[vm_type_index]][0] <= pm_free[i][0] and \
# VM_PARAM[VM_TYPE_DIRT[vm_type_index]][1] <= pm_free[i][1]:
# # 虚拟机数量增加
# vm_size += 1
# # 列表中数量添加
# vm[VM_TYPE_DIRT[vm_type_index]] += 1
# # 物理机列表中添加
# if isContainKey(pm[i], VM_TYPE_DIRT[vm_type_index]):
# pm[i][VM_TYPE_DIRT[vm_type_index]] += 1
# else:
# pm[i][VM_TYPE_DIRT[vm_type_index]] = 1
# # 剪切空闲空间数
# pm_free[i][0] = pm_free[i][0] - VM_PARAM[VM_TYPE_DIRT[vm_type_index]][0]
# pm_free[i][1] = pm_free[i][1] - VM_PARAM[VM_TYPE_DIRT[vm_type_index]][1]
# # 无空闲资源,则跳出循环
# if pm_free[i][0] == 0 or pm_free[i][1] == 0:
# break
# # 占比减半
# M_C = M_C / 2.0
# free_cpu += pm_free[i][0]
# free_mem += pm_free[i][1]
# print('i:cpu:%d mem:%d' % (pm_free[i][0], pm_free[i][1]))
# if dataObje.opt_target == 'CPU':
# res_use_pro = free_cpu / (dataObje.CPU * pm_size)
# other_res_use_pro = free_mem / (dataObje.MEM * pm_size)
# else:
# res_use_pro = free_mem / (dataObje.MEM * pm_size)
# other_res_use_pro = free_cpu / (dataObje.CPU * pm_size)
#
# res_use_pro = (1.0 - res_use_pro) * 100
# other_res_use_pro = (1.0 - other_res_use_pro) * 100
# return vm_size, vm, pm_size, pm, res_use_pro, other_res_use_pro
#
#
# def res_average(vm_size, vm, pm_size, pm, res_use_pro, other_res_use_pro, pm_free, vm_map, dataObj, predict_result):
# avg_predict_result = copy.deepcopy(predict_result)
#
# vm_types = dataObj.vm_types
#
# avg_value = -1
# M_C = 0.0
# if dataObj.opt_target == 'CPU':
# M_C = 4.0
# else:
# M_C = 1.0
#
# if res_use_pro < other_res_use_pro:
# for vm_type in vm_types:
# if VM_PARAM[vm_type][2] == M_C and avg_predict_result[vm_type][0] >= -avg_value:
# avg_predict_result[vm_type][0] += avg_value
#
# return avg_predict_result
#
#
# # 检查dict中是否存在key
# def isContainKey(dic, key):
# return key in dic
#
#
# def computer_MC(CM_free):
# # 计算内存/CPU占比
# M_C = CM_free[1] / CM_free[0]
# if M_C >= 4:
# M_C = 4.0
# elif M_C >= 2:
# M_C = 2.0
# else:
# M_C = 1.0
# return M_C
#########################################
# 选择装配方案
used_func = pack_model2
#########################################
| 37.179021 | 149 | 0.544596 |
4a1acd456f02ea00c85ad9179fd6b3eb10a01ba6
| 10,729 |
py
|
Python
|
cms/models/base.py
|
cresset-group/cms
|
727b81e40dd1196e85c240e728a7824121163d4d
|
[
"BSD-3-Clause"
] | null | null | null |
cms/models/base.py
|
cresset-group/cms
|
727b81e40dd1196e85c240e728a7824121163d4d
|
[
"BSD-3-Clause"
] | null | null | null |
cms/models/base.py
|
cresset-group/cms
|
727b81e40dd1196e85c240e728a7824121163d4d
|
[
"BSD-3-Clause"
] | null | null | null |
"""Abstract base models used by the page management application."""
from django.db import models
from django.shortcuts import render
from django.utils.crypto import constant_time_compare, salted_hmac
from watson.search import SearchAdapter
from cms.apps.media.models import ImageRefField
from cms.models.managers import (OnlineBaseManager, PageBaseManager,
PublishedBaseManager, SearchMetaBaseManager)
class PathTokenGenerator:
'''
A simple token generator that takes a path and generates a hash for it.
Intended for use by the CMS publication middleware and OnlineBase derivatives.
In reality it just takes a string so it can be used for other purposes.
'''
key_salt = "django.contrib.auth.tokens.PasswordResetTokenGenerator"
def make_token(self, path):
return salted_hmac(
self.key_salt,
path,
).hexdigest()[::2]
def check_token(self, token, path):
return constant_time_compare(
token,
salted_hmac(self.key_salt, path).hexdigest()[::2]
)
path_token_generator = PathTokenGenerator()
class PublishedBase(models.Model):
"""A model with publication controls."""
objects = PublishedBaseManager()
class Meta:
abstract = True
class PublishedBaseSearchAdapter(SearchAdapter):
"""Base search adapter for PublishedBase derivatives."""
def get_live_queryset(self):
"""Selects only live models."""
return self.model.objects.all()
class OnlineBase(PublishedBase):
objects = OnlineBaseManager()
is_online = models.BooleanField(
"online",
default=True,
help_text=(
"Uncheck this box to remove the page from the public website. "
"Logged-in admin users will still be able to view this page by clicking the 'view on site' button."
),
)
def get_preview_url(self):
if not hasattr(self, 'get_absolute_url'):
return None
return f'{self.get_absolute_url()}?preview={path_token_generator.make_token(self.get_absolute_url())}'
class Meta:
abstract = True
class OnlineBaseSearchAdapter(PublishedBaseSearchAdapter):
"""Base search adapter for OnlineBase derivatives."""
class SearchMetaBase(OnlineBase):
"""Base model for models used to generate a standalone HTML page."""
objects = SearchMetaBaseManager()
# SEO fields.
browser_title = models.CharField(
max_length=1000,
blank=True,
help_text=(
"The heading to use in the user's web browser. "
"Leave blank to use the page title. "
"Search engines pay particular attention to this attribute."
)
)
meta_description = models.TextField(
"description",
blank=True,
help_text="A brief description of the contents of this page.",
)
sitemap_priority = models.FloatField(
"priority",
choices=(
(1.0, "Very high"),
(0.8, "High"),
(0.5, "Medium"),
(0.3, "Low"),
(0.0, "Very low"),
),
default=None,
blank=True,
null=True,
help_text=(
"The relative importance of this content on your site. Search engines use this "
"as a hint when ranking the pages within your site."
),
)
sitemap_changefreq = models.IntegerField(
"change frequency",
choices=(
(1, "Always"),
(2, "Hourly"),
(3, "Daily"),
(4, "Weekly"),
(5, "Monthly"),
(6, "Yearly"),
(7, "Never")
),
default=None,
blank=True,
null=True,
help_text=(
"How frequently you expect this content to be updated. "
"Search engines use this as a hint when scanning your site for updates."
),
)
robots_index = models.BooleanField(
"allow indexing",
default=True,
help_text=(
"Uncheck to prevent search engines from indexing this page. "
"Do this only if the page contains information which you do not wish "
"to show up in search results."
),
)
robots_follow = models.BooleanField(
"follow links",
default=True,
help_text=(
"Uncheck to prevent search engines from following any links they find in this page. "
"Do this only if the page contains links to other sites that you do not wish to "
"publicise."
),
)
robots_archive = models.BooleanField(
"allow archiving",
default=True,
help_text=(
"Uncheck this to prevent search engines from archiving this page. "
"Do this this only if the page is likely to change on a very regular basis. "
),
)
# Open Graph fields
og_title = models.CharField(
verbose_name='title',
blank=True,
max_length=100,
help_text='Title that will appear on social media posts. This is limited to 100 characters, '
'but Facebook will truncate the title to 88 characters.'
)
og_description = models.TextField(
verbose_name='description',
blank=True,
max_length=300,
help_text='Description that will appear on social media posts. It is limited to 300 '
'characters, but it is recommended that you do not use anything over 200.'
)
og_image = ImageRefField(
verbose_name='image',
blank=True,
null=True,
help_text='The recommended image size is 1200x627 (1.91:1 ratio); this gives you a big '
'stand out thumbnail. Using an image smaller than 400x209 will give you a '
'small thumbnail and will splits posts into 2 columns. '
'If you have text on the image make sure it is centered.'
)
# Twitter card fields
# If you make a change here, you'll also need to update the lookup dict in
# pages/templatetages/pages.py where used.
twitter_card = models.IntegerField(
verbose_name='card',
choices=[
(0, 'Summary'),
(1, 'Photo'),
(2, 'Video'),
(3, 'Product'),
(4, 'App'),
(5, 'Gallery'),
(6, 'Large Summary'),
],
blank=True,
null=True,
default=None,
help_text='The type of content on the page. Most of the time "Summary" will suffice. '
'Before you can benefit from any of these fields make sure to go to '
'https://dev.twitter.com/docs/cards/validation/validator and get approved.'
)
twitter_title = models.CharField(
verbose_name='title',
blank=True,
max_length=70,
help_text='The title that appears on the Twitter card, it is limited to 70 characters.'
)
twitter_description = models.TextField(
verbose_name='description',
blank=True,
max_length=200,
help_text='Description that will appear on Twitter cards. It is limited '
'to 200 characters. This does\'nt effect SEO, so focus on copy '
'that complements the tweet and title rather than on keywords.'
)
twitter_image = ImageRefField(
verbose_name='image',
blank=True,
null=True,
help_text='The minimum size it needs to be is 280x150. If you want to use a larger image'
'make sure the card type is set to "Large Summary".'
)
def get_context_data(self):
"""Returns the SEO context data for this page."""
title = str(self)
# Return the context.
return {
"meta_description": self.meta_description,
"robots_index": self.robots_index,
"robots_archive": self.robots_archive,
"robots_follow": self.robots_follow,
"title": self.browser_title or title,
"header": title,
"og_title": self.og_title,
"og_description": self.og_description,
"og_image": self.og_image,
"twitter_card": self.twitter_card,
"twitter_title": self.twitter_title,
"twitter_description": self.twitter_description,
"twitter_image": self.twitter_image
}
def render(self, request, template, context=None, **kwargs):
"""Renders a template as a HttpResponse using the context of this page."""
page_context = self.get_context_data()
page_context.update(context or {})
return render(request, template, page_context, **kwargs)
class Meta:
abstract = True
class SearchMetaBaseSearchAdapter(OnlineBaseSearchAdapter):
"""Search adapter for SearchMetaBase derivatives."""
def get_description(self, obj):
"""Returns the meta description."""
return obj.meta_description
def get_live_queryset(self):
"""Selects only live models."""
return super().get_live_queryset().filter(robots_index=True)
class PageBase(SearchMetaBase):
"""
An enhanced SearchMetaBase with a sensible set of common features suitable for
most pages.
"""
objects = PageBaseManager()
# Base fields.
slug = models.SlugField(
max_length=150,
help_text='A unique portion of the URL that is used to identify this '
'specific page using human-readable keywords (e.g., about-us)'
)
title = models.CharField(
max_length=1000,
)
# Navigation fields.
short_title = models.CharField(
max_length=200,
blank=True,
help_text=(
"A shorter version of the title that will be used in site navigation. "
"Leave blank to use the full-length title."
),
)
# SEO fields.
def get_context_data(self):
"""Returns the SEO context data for this page."""
context_data = super().get_context_data()
context_data.update({
"title": self.browser_title or self.title,
"header": self.title,
})
return context_data
# Base model methods.
def __str__(self):
"""
Returns the short title of this page, falling back to the standard
title.
"""
return self.short_title or self.title
class Meta:
abstract = True
class PageBaseSearchAdapter(SearchMetaBaseSearchAdapter):
"""Search adapter for PageBase derivatives."""
def get_title(self, obj):
"""Returns the title of the page."""
return obj.title
| 30.480114 | 111 | 0.605369 |
4a1acd630a8f2039e0d497b1f75a28cddae2871e
| 707 |
py
|
Python
|
lab/refactoring/replace_magic_numbers_with_named_constants.py
|
Andre-Williams22/SPD-2.31-Testing-and-Architecture
|
a28abb56c7b0c920144867f5aa138f70aae65260
|
[
"MIT"
] | null | null | null |
lab/refactoring/replace_magic_numbers_with_named_constants.py
|
Andre-Williams22/SPD-2.31-Testing-and-Architecture
|
a28abb56c7b0c920144867f5aa138f70aae65260
|
[
"MIT"
] | null | null | null |
lab/refactoring/replace_magic_numbers_with_named_constants.py
|
Andre-Williams22/SPD-2.31-Testing-and-Architecture
|
a28abb56c7b0c920144867f5aa138f70aae65260
|
[
"MIT"
] | null | null | null |
# by Kami Bigdely
# Replace magic numbers with named constanst
def calculation(charge1, charge2, distance):
constant = 8.9875517923*1e9
return constant * charge1 * charge2 / (distance**2)
# First Section
# Given two point charges, calcualte the electric force exerted on them.
q1 = int(input('Enter a value of charge q1: '))
q2 = int(input('Enter a value of charge q2: '))
distance = int(input("Enter the distance be10tween two charges: "))
print("Electric Force between q1 and q2 is: ",calculation(q1, q2, distance), "Newton")
# Second Section
num = int(input('Enter an integer number: '))
if num % 2 == 0:
print(num, "is an even number.")
else:
print(num, "is an odd number.")
| 33.666667 | 86 | 0.691655 |
4a1acdb0fbbcd31a4a69258a508d19496ea52d45
| 95,177 |
py
|
Python
|
moto/s3/responses.py
|
185504a9/moto
|
efc440ef143d25894844a6bc7f7257e2a2ccb95a
|
[
"Apache-2.0"
] | null | null | null |
moto/s3/responses.py
|
185504a9/moto
|
efc440ef143d25894844a6bc7f7257e2a2ccb95a
|
[
"Apache-2.0"
] | null | null | null |
moto/s3/responses.py
|
185504a9/moto
|
efc440ef143d25894844a6bc7f7257e2a2ccb95a
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
import os
import re
import sys
from botocore.awsrequest import AWSPreparedRequest
from moto.core.utils import (
str_to_rfc_1123_datetime,
py2_strip_unicode_keys,
unix_time_millis,
)
from urllib.parse import (
parse_qs,
parse_qsl,
urlparse,
unquote,
urlencode,
urlunparse,
)
import xmltodict
from moto.packages.httpretty.core import HTTPrettyRequest
from moto.core.responses import _TemplateEnvironmentMixin, ActionAuthenticatorMixin
from moto.core.utils import path_url
from moto.core import ACCOUNT_ID
from moto.settings import S3_IGNORE_SUBDOMAIN_BUCKETNAME
from moto.s3bucket_path.utils import (
bucket_name_from_url as bucketpath_bucket_name_from_url,
parse_key_name as bucketpath_parse_key_name,
is_delete_keys as bucketpath_is_delete_keys,
)
from .exceptions import (
BucketAlreadyExists,
DuplicateTagKeys,
InvalidContinuationToken,
S3ClientError,
MissingBucket,
MissingKey,
MissingVersion,
InvalidPartOrder,
MalformedXML,
MalformedACLError,
IllegalLocationConstraintException,
InvalidNotificationARN,
InvalidNotificationEvent,
ObjectNotInActiveTierError,
NoSystemTags,
PreconditionFailed,
InvalidRange,
)
from .models import (
s3_backend,
get_canned_acl,
FakeGrantee,
FakeGrant,
FakeAcl,
FakeKey,
FakeMultipart,
)
from .utils import (
bucket_name_from_url,
clean_key_name,
undo_clean_key_name,
metadata_from_headers,
parse_region_from_url,
)
from xml.dom import minidom
DEFAULT_REGION_NAME = "us-east-1"
ACTION_MAP = {
"BUCKET": {
"GET": {
"uploads": "ListBucketMultipartUploads",
"location": "GetBucketLocation",
"lifecycle": "GetLifecycleConfiguration",
"versioning": "GetBucketVersioning",
"policy": "GetBucketPolicy",
"website": "GetBucketWebsite",
"acl": "GetBucketAcl",
"tagging": "GetBucketTagging",
"logging": "GetBucketLogging",
"cors": "GetBucketCORS",
"notification": "GetBucketNotification",
"accelerate": "GetAccelerateConfiguration",
"versions": "ListBucketVersions",
"public_access_block": "GetPublicAccessBlock",
"DEFAULT": "ListBucket",
},
"PUT": {
"lifecycle": "PutLifecycleConfiguration",
"versioning": "PutBucketVersioning",
"policy": "PutBucketPolicy",
"website": "PutBucketWebsite",
"acl": "PutBucketAcl",
"tagging": "PutBucketTagging",
"logging": "PutBucketLogging",
"cors": "PutBucketCORS",
"notification": "PutBucketNotification",
"accelerate": "PutAccelerateConfiguration",
"public_access_block": "PutPublicAccessBlock",
"DEFAULT": "CreateBucket",
},
"DELETE": {
"lifecycle": "PutLifecycleConfiguration",
"policy": "DeleteBucketPolicy",
"website": "DeleteBucketWebsite",
"tagging": "PutBucketTagging",
"cors": "PutBucketCORS",
"public_access_block": "DeletePublicAccessBlock",
"DEFAULT": "DeleteBucket",
},
},
"KEY": {
"GET": {
"uploadId": "ListMultipartUploadParts",
"acl": "GetObjectAcl",
"tagging": "GetObjectTagging",
"versionId": "GetObjectVersion",
"DEFAULT": "GetObject",
},
"PUT": {
"acl": "PutObjectAcl",
"tagging": "PutObjectTagging",
"DEFAULT": "PutObject",
},
"DELETE": {
"uploadId": "AbortMultipartUpload",
"versionId": "DeleteObjectVersion",
"DEFAULT": " DeleteObject",
},
"POST": {
"uploads": "PutObject",
"restore": "RestoreObject",
"uploadId": "PutObject",
},
},
"CONTROL": {
"GET": {"publicAccessBlock": "GetPublicAccessBlock"},
"PUT": {"publicAccessBlock": "PutPublicAccessBlock"},
"DELETE": {"publicAccessBlock": "DeletePublicAccessBlock"},
},
}
def parse_key_name(pth):
# strip the first '/' left by urlparse
return pth[1:] if pth.startswith("/") else pth
def is_delete_keys(request, path, bucket_name):
# GOlang sends a request as url/?delete= (treating it as a normal key=value, even if the value is empty)
# Python sends a request as url/?delete (treating it as a flag)
# https://github.com/spulec/moto/issues/2937
return (
path == "/?delete"
or path == "/?delete="
or (path == "/" and getattr(request, "query_string", "") == "delete")
)
class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
def __init__(self, backend):
super(ResponseObject, self).__init__()
self.backend = backend
self.method = ""
self.path = ""
self.data = {}
self.headers = {}
@property
def should_autoescape(self):
return True
def all_buckets(self):
self.data["Action"] = "ListAllMyBuckets"
self._authenticate_and_authorize_s3_action()
# No bucket specified. Listing all buckets
all_buckets = self.backend.get_all_buckets()
template = self.response_template(S3_ALL_BUCKETS)
return template.render(buckets=all_buckets)
def subdomain_based_buckets(self, request):
if S3_IGNORE_SUBDOMAIN_BUCKETNAME:
return False
host = request.headers.get("host", request.headers.get("Host"))
if not host:
host = urlparse(request.url).netloc
if (
not host
or host.startswith("localhost")
or host.startswith("localstack")
or re.match(r"^[^.]+$", host)
or re.match(r"^.*\.svc\.cluster\.local:?\d*$", host)
):
# Default to path-based buckets for (1) localhost, (2) localstack hosts (e.g. localstack.dev),
# (3) local host names that do not contain a "." (e.g., Docker container host names), or
# (4) kubernetes host names
return False
match = re.match(r"^([^\[\]:]+)(:\d+)?$", host)
if match:
match = re.match(
r"((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.|$)){4}", match.groups()[0]
)
if match:
return False
match = re.match(r"^\[(.+)\](:\d+)?$", host)
if match:
match = re.match(
r"^(((?=.*(::))(?!.*\3.+\3))\3?|[\dA-F]{1,4}:)([\dA-F]{1,4}(\3|:\b)|\2){5}(([\dA-F]{1,4}(\3|:\b|$)|\2){2}|(((2[0-4]|1\d|[1-9])?\d|25[0-5])\.?\b){4})\Z",
match.groups()[0],
re.IGNORECASE,
)
if match:
return False
path_based = host == "s3.amazonaws.com" or re.match(
r"s3[\.\-]([^.]*)\.amazonaws\.com", host
)
return not path_based
def is_delete_keys(self, request, path, bucket_name):
if self.subdomain_based_buckets(request):
return is_delete_keys(request, path, bucket_name)
else:
return bucketpath_is_delete_keys(request, path, bucket_name)
def parse_bucket_name_from_url(self, request, url):
if self.subdomain_based_buckets(request):
return bucket_name_from_url(url)
else:
return bucketpath_bucket_name_from_url(url)
def parse_key_name(self, request, url):
if self.subdomain_based_buckets(request):
return parse_key_name(url)
else:
return bucketpath_parse_key_name(url)
def ambiguous_response(self, request, full_url, headers):
# Depending on which calling format the client is using, we don't know
# if this is a bucket or key request so we have to check
if self.subdomain_based_buckets(request):
return self.key_or_control_response(request, full_url, headers)
else:
# Using path-based buckets
return self.bucket_response(request, full_url, headers)
def bucket_response(self, request, full_url, headers):
self.method = request.method
self.path = self._get_path(request)
self.headers = request.headers
if "host" not in self.headers:
self.headers["host"] = urlparse(full_url).netloc
try:
response = self._bucket_response(request, full_url, headers)
except S3ClientError as s3error:
response = s3error.code, {}, s3error.description
return self._send_response(response)
@staticmethod
def _send_response(response):
if isinstance(response, str):
return 200, {}, response.encode("utf-8")
else:
status_code, headers, response_content = response
if not isinstance(response_content, bytes):
response_content = response_content.encode("utf-8")
return status_code, headers, response_content
def _bucket_response(self, request, full_url, headers):
querystring = self._get_querystring(full_url)
method = request.method
region_name = parse_region_from_url(full_url)
bucket_name = self.parse_bucket_name_from_url(request, full_url)
if not bucket_name:
# If no bucket specified, list all buckets
return self.all_buckets()
self.data["BucketName"] = bucket_name
if hasattr(request, "body"):
# Boto
body = request.body
else:
# Flask server
body = request.data
if body is None:
body = b""
if isinstance(body, bytes):
body = body.decode("utf-8")
body = "{0}".format(body).encode("utf-8")
if method == "HEAD":
return self._bucket_response_head(bucket_name)
elif method == "GET":
return self._bucket_response_get(bucket_name, querystring)
elif method == "PUT":
return self._bucket_response_put(
request, body, region_name, bucket_name, querystring
)
elif method == "DELETE":
return self._bucket_response_delete(body, bucket_name, querystring)
elif method == "POST":
return self._bucket_response_post(request, body, bucket_name)
else:
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(
method
)
)
@staticmethod
def _get_querystring(full_url):
parsed_url = urlparse(full_url)
querystring = parse_qs(parsed_url.query, keep_blank_values=True)
return querystring
def _bucket_response_head(self, bucket_name):
try:
self.backend.get_bucket(bucket_name)
except MissingBucket:
# Unless we do this, boto3 does not raise ClientError on
# HEAD (which the real API responds with), and instead
# raises NoSuchBucket, leading to inconsistency in
# error response between real and mocked responses.
return 404, {}, ""
return 200, {}, ""
def _bucket_response_get(self, bucket_name, querystring):
self._set_action("BUCKET", "GET", querystring)
self._authenticate_and_authorize_s3_action()
if "uploads" in querystring:
for unsup in ("delimiter", "max-uploads"):
if unsup in querystring:
raise NotImplementedError(
"Listing multipart uploads with {} has not been implemented yet.".format(
unsup
)
)
multiparts = list(self.backend.get_all_multiparts(bucket_name).values())
if "prefix" in querystring:
prefix = querystring.get("prefix", [None])[0]
multiparts = [
upload
for upload in multiparts
if upload.key_name.startswith(prefix)
]
template = self.response_template(S3_ALL_MULTIPARTS)
return template.render(bucket_name=bucket_name, uploads=multiparts)
elif "location" in querystring:
bucket = self.backend.get_bucket(bucket_name)
template = self.response_template(S3_BUCKET_LOCATION)
location = bucket.location
# us-east-1 is different - returns a None location
if location == DEFAULT_REGION_NAME:
location = None
return template.render(location=location)
elif "lifecycle" in querystring:
bucket = self.backend.get_bucket(bucket_name)
if not bucket.rules:
template = self.response_template(S3_NO_LIFECYCLE)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_BUCKET_LIFECYCLE_CONFIGURATION)
return template.render(rules=bucket.rules)
elif "versioning" in querystring:
versioning = self.backend.get_bucket_versioning(bucket_name)
template = self.response_template(S3_BUCKET_GET_VERSIONING)
return template.render(status=versioning)
elif "policy" in querystring:
policy = self.backend.get_bucket_policy(bucket_name)
if not policy:
template = self.response_template(S3_NO_POLICY)
return 404, {}, template.render(bucket_name=bucket_name)
return 200, {}, policy
elif "website" in querystring:
website_configuration = self.backend.get_bucket_website_configuration(
bucket_name
)
if not website_configuration:
template = self.response_template(S3_NO_BUCKET_WEBSITE_CONFIG)
return 404, {}, template.render(bucket_name=bucket_name)
return 200, {}, website_configuration
elif "acl" in querystring:
bucket = self.backend.get_bucket(bucket_name)
template = self.response_template(S3_OBJECT_ACL_RESPONSE)
return template.render(obj=bucket)
elif "tagging" in querystring:
tags = self.backend.get_bucket_tagging(bucket_name)["Tags"]
# "Special Error" if no tags:
if len(tags) == 0:
template = self.response_template(S3_NO_BUCKET_TAGGING)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_OBJECT_TAGGING_RESPONSE)
return template.render(tags=tags)
elif "logging" in querystring:
logging = self.backend.get_bucket_logging(bucket_name)
if not logging:
template = self.response_template(S3_NO_LOGGING_CONFIG)
return 200, {}, template.render()
template = self.response_template(S3_LOGGING_CONFIG)
return 200, {}, template.render(logging=logging)
elif "cors" in querystring:
cors = self.backend.get_bucket_cors(bucket_name)
if len(cors) == 0:
template = self.response_template(S3_NO_CORS_CONFIG)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_BUCKET_CORS_RESPONSE)
return template.render(cors=cors)
elif "notification" in querystring:
notification_configuration = self.backend.get_bucket_notification_configuration(
bucket_name
)
if not notification_configuration:
return 200, {}, ""
template = self.response_template(S3_GET_BUCKET_NOTIFICATION_CONFIG)
return template.render(config=notification_configuration)
elif "accelerate" in querystring:
bucket = self.backend.get_bucket(bucket_name)
if bucket.accelerate_configuration is None:
template = self.response_template(S3_BUCKET_ACCELERATE_NOT_SET)
return 200, {}, template.render()
template = self.response_template(S3_BUCKET_ACCELERATE)
return template.render(bucket=bucket)
elif "publicAccessBlock" in querystring:
public_block_config = self.backend.get_bucket_public_access_block(
bucket_name
)
template = self.response_template(S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION)
return template.render(public_block_config=public_block_config)
elif "versions" in querystring:
delimiter = querystring.get("delimiter", [None])[0]
encoding_type = querystring.get("encoding-type", [None])[0]
key_marker = querystring.get("key-marker", [None])[0]
max_keys = querystring.get("max-keys", [None])[0]
prefix = querystring.get("prefix", [""])[0]
version_id_marker = querystring.get("version-id-marker", [None])[0]
bucket = self.backend.get_bucket(bucket_name)
versions = self.backend.get_bucket_versions(
bucket_name,
delimiter=delimiter,
encoding_type=encoding_type,
key_marker=key_marker,
max_keys=max_keys,
version_id_marker=version_id_marker,
prefix=prefix,
)
latest_versions = self.backend.get_bucket_latest_versions(
bucket_name=bucket_name
)
key_list = []
delete_marker_list = []
for version in versions:
if isinstance(version, FakeKey):
key_list.append(version)
else:
delete_marker_list.append(version)
template = self.response_template(S3_BUCKET_GET_VERSIONS)
key_list.sort(key=lambda r: (r.name, -unix_time_millis(r.last_modified)))
return (
200,
{},
template.render(
key_list=key_list,
delete_marker_list=delete_marker_list,
latest_versions=latest_versions,
bucket=bucket,
prefix=prefix,
max_keys=1000,
delimiter="",
is_truncated="false",
),
)
elif "encryption" in querystring:
encryption = self.backend.get_bucket_encryption(bucket_name)
if not encryption:
template = self.response_template(S3_NO_ENCRYPTION)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_ENCRYPTION_CONFIG)
return 200, {}, template.render(encryption=encryption)
elif querystring.get("list-type", [None])[0] == "2":
return 200, {}, self._handle_list_objects_v2(bucket_name, querystring)
bucket = self.backend.get_bucket(bucket_name)
prefix = querystring.get("prefix", [None])[0]
if prefix and isinstance(prefix, bytes):
prefix = prefix.decode("utf-8")
delimiter = querystring.get("delimiter", [None])[0]
max_keys = int(querystring.get("max-keys", [1000])[0])
marker = querystring.get("marker", [None])[0]
result_keys, result_folders = self.backend.prefix_query(
bucket, prefix, delimiter
)
if marker:
result_keys = self._get_results_from_token(result_keys, marker)
result_keys, is_truncated, next_marker = self._truncate_result(
result_keys, max_keys
)
template = self.response_template(S3_BUCKET_GET_RESPONSE)
return (
200,
{},
template.render(
bucket=bucket,
prefix=prefix,
delimiter=delimiter,
result_keys=result_keys,
result_folders=result_folders,
is_truncated=is_truncated,
next_marker=next_marker,
max_keys=max_keys,
),
)
def _set_action(self, action_resource_type, method, querystring):
action_set = False
for action_in_querystring, action in ACTION_MAP[action_resource_type][
method
].items():
if action_in_querystring in querystring:
self.data["Action"] = action
action_set = True
if not action_set:
self.data["Action"] = ACTION_MAP[action_resource_type][method]["DEFAULT"]
def _handle_list_objects_v2(self, bucket_name, querystring):
template = self.response_template(S3_BUCKET_GET_RESPONSE_V2)
bucket = self.backend.get_bucket(bucket_name)
continuation_token = querystring.get("continuation-token", [None])[0]
if continuation_token is not None and continuation_token == "":
raise InvalidContinuationToken()
prefix = querystring.get("prefix", [None])[0]
if prefix and isinstance(prefix, bytes):
prefix = prefix.decode("utf-8")
delimiter = querystring.get("delimiter", [None])[0]
result_keys, result_folders = self.backend.prefix_query(
bucket, prefix, delimiter
)
fetch_owner = querystring.get("fetch-owner", [False])[0]
max_keys = int(querystring.get("max-keys", [1000])[0])
start_after = querystring.get("start-after", [None])[0]
# sort the combination of folders and keys into lexicographical order
all_keys = result_keys + result_folders
all_keys.sort(key=self._get_name)
if continuation_token or start_after:
limit = continuation_token or start_after
all_keys = self._get_results_from_token(all_keys, limit)
truncated_keys, is_truncated, next_continuation_token = self._truncate_result(
all_keys, max_keys
)
result_keys, result_folders = self._split_truncated_keys(truncated_keys)
key_count = len(result_keys) + len(result_folders)
return template.render(
bucket=bucket,
prefix=prefix or "",
delimiter=delimiter,
key_count=key_count,
result_keys=result_keys,
result_folders=result_folders,
fetch_owner=fetch_owner,
max_keys=max_keys,
is_truncated=is_truncated,
next_continuation_token=next_continuation_token,
start_after=None if continuation_token else start_after,
)
@staticmethod
def _get_name(key):
if isinstance(key, FakeKey):
return key.name
else:
return key
@staticmethod
def _split_truncated_keys(truncated_keys):
result_keys = []
result_folders = []
for key in truncated_keys:
if isinstance(key, FakeKey):
result_keys.append(key)
else:
result_folders.append(key)
return result_keys, result_folders
def _get_results_from_token(self, result_keys, token):
continuation_index = 0
for key in result_keys:
if (key.name if isinstance(key, FakeKey) else key) > token:
break
continuation_index += 1
return result_keys[continuation_index:]
def _truncate_result(self, result_keys, max_keys):
if len(result_keys) > max_keys:
is_truncated = "true"
result_keys = result_keys[:max_keys]
item = result_keys[-1]
next_continuation_token = item.name if isinstance(item, FakeKey) else item
else:
is_truncated = "false"
next_continuation_token = None
return result_keys, is_truncated, next_continuation_token
def _body_contains_location_constraint(self, body):
if body:
try:
xmltodict.parse(body)["CreateBucketConfiguration"]["LocationConstraint"]
return True
except KeyError:
pass
return False
def _create_bucket_configuration_is_empty(self, body):
if body:
try:
create_bucket_configuration = xmltodict.parse(body)[
"CreateBucketConfiguration"
]
del create_bucket_configuration["@xmlns"]
if len(create_bucket_configuration) == 0:
return True
except KeyError:
pass
return False
def _parse_pab_config(self, body):
parsed_xml = xmltodict.parse(body)
parsed_xml["PublicAccessBlockConfiguration"].pop("@xmlns", None)
# If Python 2, fix the unicode strings:
if sys.version_info[0] < 3:
parsed_xml = {
"PublicAccessBlockConfiguration": py2_strip_unicode_keys(
dict(parsed_xml["PublicAccessBlockConfiguration"])
)
}
return parsed_xml
def _bucket_response_put(
self, request, body, region_name, bucket_name, querystring
):
if not request.headers.get("Content-Length"):
return 411, {}, "Content-Length required"
self._set_action("BUCKET", "PUT", querystring)
self._authenticate_and_authorize_s3_action()
if "versioning" in querystring:
ver = re.search("<Status>([A-Za-z]+)</Status>", body.decode())
if ver:
self.backend.set_bucket_versioning(bucket_name, ver.group(1))
template = self.response_template(S3_BUCKET_VERSIONING)
return template.render(bucket_versioning_status=ver.group(1))
else:
return 404, {}, ""
elif "lifecycle" in querystring:
rules = xmltodict.parse(body)["LifecycleConfiguration"]["Rule"]
if not isinstance(rules, list):
# If there is only one rule, xmldict returns just the item
rules = [rules]
self.backend.set_bucket_lifecycle(bucket_name, rules)
return ""
elif "policy" in querystring:
self.backend.set_bucket_policy(bucket_name, body)
return "True"
elif "acl" in querystring:
# Headers are first. If not set, then look at the body (consistent with the documentation):
acls = self._acl_from_headers(request.headers)
if not acls:
acls = self._acl_from_xml(body)
self.backend.set_bucket_acl(bucket_name, acls)
return ""
elif "tagging" in querystring:
tagging = self._bucket_tagging_from_xml(body)
self.backend.put_bucket_tagging(bucket_name, tagging)
return ""
elif "website" in querystring:
self.backend.set_bucket_website_configuration(bucket_name, body)
return ""
elif "cors" in querystring:
try:
self.backend.put_bucket_cors(bucket_name, self._cors_from_xml(body))
return ""
except KeyError:
raise MalformedXML()
elif "logging" in querystring:
try:
self.backend.put_bucket_logging(
bucket_name, self._logging_from_xml(body)
)
return ""
except KeyError:
raise MalformedXML()
elif "notification" in querystring:
try:
self.backend.put_bucket_notification_configuration(
bucket_name, self._notification_config_from_xml(body)
)
return ""
except KeyError:
raise MalformedXML()
except Exception as e:
raise e
elif "accelerate" in querystring:
try:
accelerate_status = self._accelerate_config_from_xml(body)
self.backend.put_bucket_accelerate_configuration(
bucket_name, accelerate_status
)
return ""
except KeyError:
raise MalformedXML()
except Exception as e:
raise e
elif "publicAccessBlock" in querystring:
pab_config = self._parse_pab_config(body)
self.backend.put_bucket_public_access_block(
bucket_name, pab_config["PublicAccessBlockConfiguration"]
)
return ""
elif "encryption" in querystring:
try:
self.backend.put_bucket_encryption(
bucket_name, self._encryption_config_from_xml(body)
)
return ""
except KeyError:
raise MalformedXML()
except Exception as e:
raise e
else:
# us-east-1, the default AWS region behaves a bit differently
# - you should not use it as a location constraint --> it fails
# - querying the location constraint returns None
# - LocationConstraint has to be specified if outside us-east-1
if (
region_name != DEFAULT_REGION_NAME
and not self._body_contains_location_constraint(body)
):
raise IllegalLocationConstraintException()
if body:
if self._create_bucket_configuration_is_empty(body):
raise MalformedXML()
try:
forced_region = xmltodict.parse(body)["CreateBucketConfiguration"][
"LocationConstraint"
]
if forced_region == DEFAULT_REGION_NAME:
raise S3ClientError(
"InvalidLocationConstraint",
"The specified location-constraint is not valid",
)
else:
region_name = forced_region
except KeyError:
pass
try:
new_bucket = self.backend.create_bucket(bucket_name, region_name)
except BucketAlreadyExists:
if region_name == DEFAULT_REGION_NAME:
# us-east-1 has different behavior
new_bucket = self.backend.get_bucket(bucket_name)
else:
raise
if "x-amz-acl" in request.headers:
# TODO: Support the XML-based ACL format
self.backend.set_bucket_acl(
bucket_name, self._acl_from_headers(request.headers)
)
template = self.response_template(S3_BUCKET_CREATE_RESPONSE)
return 200, {}, template.render(bucket=new_bucket)
def _bucket_response_delete(self, body, bucket_name, querystring):
self._set_action("BUCKET", "DELETE", querystring)
self._authenticate_and_authorize_s3_action()
if "policy" in querystring:
self.backend.delete_bucket_policy(bucket_name, body)
return 204, {}, ""
elif "tagging" in querystring:
self.backend.delete_bucket_tagging(bucket_name)
return 204, {}, ""
elif "website" in querystring:
self.backend.delete_bucket_website(bucket_name)
return 204, {}, ""
elif "cors" in querystring:
self.backend.delete_bucket_cors(bucket_name)
return 204, {}, ""
elif "lifecycle" in querystring:
bucket = self.backend.get_bucket(bucket_name)
bucket.delete_lifecycle()
return 204, {}, ""
elif "publicAccessBlock" in querystring:
self.backend.delete_bucket_public_access_block(bucket_name)
return 204, {}, ""
elif "encryption" in querystring:
bucket = self.backend.delete_bucket_encryption(bucket_name)
return 204, {}, ""
removed_bucket = self.backend.delete_bucket(bucket_name)
if removed_bucket:
# Bucket exists
template = self.response_template(S3_DELETE_BUCKET_SUCCESS)
return 204, {}, template.render(bucket=removed_bucket)
else:
# Tried to delete a bucket that still has keys
template = self.response_template(S3_DELETE_BUCKET_WITH_ITEMS_ERROR)
return 409, {}, template.render(bucket=removed_bucket)
def _bucket_response_post(self, request, body, bucket_name):
response_headers = {}
if not request.headers.get("Content-Length"):
return 411, {}, "Content-Length required"
path = self._get_path(request)
if self.is_delete_keys(request, path, bucket_name):
self.data["Action"] = "DeleteObject"
self._authenticate_and_authorize_s3_action()
return self._bucket_response_delete_keys(request, body, bucket_name)
self.data["Action"] = "PutObject"
self._authenticate_and_authorize_s3_action()
# POST to bucket-url should create file from form
if hasattr(request, "form"):
# Not HTTPretty
form = request.form
else:
# HTTPretty, build new form object
body = body.decode()
form = dict(parse_qsl(body))
key = form["key"]
if "file" in form:
f = form["file"]
else:
fobj = request.files["file"]
f = fobj.stream.read()
key = key.replace("${filename}", os.path.basename(fobj.filename))
if "success_action_redirect" in form:
redirect = form["success_action_redirect"]
parts = urlparse(redirect)
queryargs = parse_qs(parts.query)
queryargs["key"] = key
queryargs["bucket"] = bucket_name
redirect_queryargs = urlencode(queryargs, doseq=True)
newparts = (
parts.scheme,
parts.netloc,
parts.path,
parts.params,
redirect_queryargs,
parts.fragment,
)
fixed_redirect = urlunparse(newparts)
response_headers["Location"] = fixed_redirect
if "success_action_status" in form:
status_code = form["success_action_status"]
elif "success_action_redirect" in form:
status_code = 303
else:
status_code = 204
new_key = self.backend.set_object(bucket_name, key, f)
if form.get("acl"):
acl = get_canned_acl(form.get("acl"))
new_key.set_acl(acl)
# Metadata
metadata = metadata_from_headers(form)
new_key.set_metadata(metadata)
return status_code, response_headers, ""
@staticmethod
def _get_path(request):
if isinstance(request, HTTPrettyRequest):
path = request.path
else:
path = (
request.full_path
if hasattr(request, "full_path")
else path_url(request.url)
)
return path
def _bucket_response_delete_keys(self, request, body, bucket_name):
template = self.response_template(S3_DELETE_KEYS_RESPONSE)
body_dict = xmltodict.parse(body)
objects = body_dict["Delete"].get("Object", [])
if not isinstance(objects, list):
# We expect a list of objects, but when there is a single <Object> node xmltodict does not
# return a list.
objects = [objects]
if len(objects) == 0:
raise MalformedXML()
deleted_objects = []
error_names = []
for object_ in objects:
key_name = object_["Key"]
version_id = object_.get("VersionId", None)
self.backend.delete_object(
bucket_name, undo_clean_key_name(key_name), version_id=version_id
)
deleted_objects.append((key_name, version_id))
return (
200,
{},
template.render(deleted=deleted_objects, delete_errors=error_names),
)
def _handle_range_header(self, request, headers, response_content):
response_headers = {}
length = len(response_content)
last = length - 1
_, rspec = request.headers.get("range").split("=")
if "," in rspec:
raise NotImplementedError("Multiple range specifiers not supported")
def toint(i):
return int(i) if i else None
begin, end = map(toint, rspec.split("-"))
if begin is not None: # byte range
end = last if end is None else min(end, last)
elif end is not None: # suffix byte range
begin = length - min(end, length)
end = last
else:
return 400, response_headers, ""
if begin < 0 or end > last or begin > min(end, last):
raise InvalidRange(
actual_size=str(length), range_requested=request.headers.get("range")
)
response_headers["content-range"] = "bytes {0}-{1}/{2}".format(
begin, end, length
)
content = response_content[begin : end + 1]
response_headers["content-length"] = len(content)
return 206, response_headers, content
def key_or_control_response(self, request, full_url, headers):
# Key and Control are lumped in because splitting out the regex is too much of a pain :/
self.method = request.method
self.path = self._get_path(request)
self.headers = request.headers
if "host" not in self.headers:
self.headers["host"] = urlparse(full_url).netloc
response_headers = {}
try:
# Is this an S3 control response?
if isinstance(request, AWSPreparedRequest) and "s3-control" in request.url:
response = self._control_response(request, full_url, headers)
else:
response = self._key_response(request, full_url, headers)
except S3ClientError as s3error:
response = s3error.code, {}, s3error.description
if isinstance(response, str):
status_code = 200
response_content = response
else:
status_code, response_headers, response_content = response
if (
status_code == 200
and "range" in request.headers
and request.headers["range"] != ""
):
try:
return self._handle_range_header(
request, response_headers, response_content
)
except S3ClientError as s3error:
return s3error.code, {}, s3error.description
return status_code, response_headers, response_content
def _control_response(self, request, full_url, headers):
parsed_url = urlparse(full_url)
query = parse_qs(parsed_url.query, keep_blank_values=True)
method = request.method
if hasattr(request, "body"):
# Boto
body = request.body
if hasattr(body, "read"):
body = body.read()
else:
# Flask server
body = request.data
if body is None:
body = b""
if method == "GET":
return self._control_response_get(request, query, headers)
elif method == "PUT":
return self._control_response_put(request, body, query, headers)
elif method == "DELETE":
return self._control_response_delete(request, query, headers)
else:
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(
method
)
)
def _control_response_get(self, request, query, headers):
action = self.path.split("?")[0].split("/")[
-1
] # Gets the action out of the URL sans query params.
self._set_action("CONTROL", "GET", action)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if "publicAccessBlock" in action:
public_block_config = self.backend.get_account_public_access_block(
headers["x-amz-account-id"]
)
template = self.response_template(S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION)
return (
200,
response_headers,
template.render(public_block_config=public_block_config),
)
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(action)
)
def _control_response_put(self, request, body, query, headers):
action = self.path.split("?")[0].split("/")[
-1
] # Gets the action out of the URL sans query params.
self._set_action("CONTROL", "PUT", action)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if "publicAccessBlock" in action:
pab_config = self._parse_pab_config(body)
self.backend.put_account_public_access_block(
headers["x-amz-account-id"],
pab_config["PublicAccessBlockConfiguration"],
)
return 200, response_headers, ""
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(action)
)
def _control_response_delete(self, request, query, headers):
action = self.path.split("?")[0].split("/")[
-1
] # Gets the action out of the URL sans query params.
self._set_action("CONTROL", "DELETE", action)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if "publicAccessBlock" in action:
self.backend.delete_account_public_access_block(headers["x-amz-account-id"])
return 200, response_headers, ""
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(action)
)
def _key_response(self, request, full_url, headers):
parsed_url = urlparse(full_url)
query = parse_qs(parsed_url.query, keep_blank_values=True)
method = request.method
key_name = self.parse_key_name(request, parsed_url.path)
bucket_name = self.parse_bucket_name_from_url(request, full_url)
# Because we patch the requests library the boto/boto3 API
# requests go through this method but so do
# `requests.get("https://bucket-name.s3.amazonaws.com/file-name")`
# Here we deny public access to private files by checking the
# ACL and checking for the mere presence of an Authorization
# header.
if "Authorization" not in request.headers:
if hasattr(request, "url"):
signed_url = "Signature=" in request.url
elif hasattr(request, "requestline"):
signed_url = "Signature=" in request.path
key = self.backend.get_object(bucket_name, key_name)
if key:
if not key.acl.public_read and not signed_url:
return 403, {}, ""
elif signed_url:
# coming in from requests.get(s3.generate_presigned_url())
if self._invalid_headers(request.url, dict(request.headers)):
return 403, {}, S3_INVALID_PRESIGNED_PARAMETERS
if hasattr(request, "body"):
# Boto
body = request.body
if hasattr(body, "read"):
body = body.read()
else:
# Flask server
body = request.data
# when the data is being passed as a file
if request.files and not body:
for _, value in request.files.items():
body = value.stream.read()
if body is None:
body = b""
if method == "GET":
return self._key_response_get(
bucket_name, query, key_name, headers=request.headers
)
elif method == "PUT":
return self._key_response_put(
request, body, bucket_name, query, key_name, headers
)
elif method == "HEAD":
return self._key_response_head(
bucket_name, query, key_name, headers=request.headers
)
elif method == "DELETE":
return self._key_response_delete(bucket_name, query, key_name)
elif method == "POST":
return self._key_response_post(request, body, bucket_name, query, key_name)
else:
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(
method
)
)
def _key_response_get(self, bucket_name, query, key_name, headers):
self._set_action("KEY", "GET", query)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if query.get("uploadId"):
upload_id = query["uploadId"][0]
parts = self.backend.list_multipart(bucket_name, upload_id)
template = self.response_template(S3_MULTIPART_LIST_RESPONSE)
return (
200,
response_headers,
template.render(
bucket_name=bucket_name,
key_name=key_name,
upload_id=upload_id,
count=len(parts),
parts=parts,
),
)
version_id = query.get("versionId", [None])[0]
if_modified_since = headers.get("If-Modified-Since", None)
if_match = headers.get("If-Match", None)
if_none_match = headers.get("If-None-Match", None)
if_unmodified_since = headers.get("If-Unmodified-Since", None)
key = self.backend.get_object(bucket_name, key_name, version_id=version_id)
if key is None and version_id is None:
raise MissingKey(key_name)
elif key is None:
raise MissingVersion()
if if_unmodified_since:
if_unmodified_since = str_to_rfc_1123_datetime(if_unmodified_since)
if key.last_modified > if_unmodified_since:
raise PreconditionFailed("If-Unmodified-Since")
if if_match and key.etag not in [if_match, '"{0}"'.format(if_match)]:
raise PreconditionFailed("If-Match")
if if_modified_since:
if_modified_since = str_to_rfc_1123_datetime(if_modified_since)
if key.last_modified < if_modified_since:
return 304, response_headers, "Not Modified"
if if_none_match and key.etag == if_none_match:
return 304, response_headers, "Not Modified"
if "acl" in query:
template = self.response_template(S3_OBJECT_ACL_RESPONSE)
return 200, response_headers, template.render(obj=key)
if "tagging" in query:
tags = self.backend.get_key_tags(key)["Tags"]
template = self.response_template(S3_OBJECT_TAGGING_RESPONSE)
return 200, response_headers, template.render(tags=tags)
response_headers.update(key.metadata)
response_headers.update(key.response_dict)
return 200, response_headers, key.value
def _key_response_put(self, request, body, bucket_name, query, key_name, headers):
self._set_action("KEY", "PUT", query)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if query.get("uploadId") and query.get("partNumber"):
upload_id = query["uploadId"][0]
part_number = int(query["partNumber"][0])
if "x-amz-copy-source" in request.headers:
src = unquote(request.headers.get("x-amz-copy-source")).lstrip("/")
src_bucket, src_key = src.split("/", 1)
src_key, src_version_id = (
src_key.split("?versionId=")
if "?versionId=" in src_key
else (src_key, None)
)
src_range = request.headers.get("x-amz-copy-source-range", "").split(
"bytes="
)[-1]
try:
start_byte, end_byte = src_range.split("-")
start_byte, end_byte = int(start_byte), int(end_byte)
except ValueError:
start_byte, end_byte = None, None
if self.backend.get_object(
src_bucket, src_key, version_id=src_version_id
):
key = self.backend.copy_part(
bucket_name,
upload_id,
part_number,
src_bucket,
src_key,
src_version_id,
start_byte,
end_byte,
)
else:
return 404, response_headers, ""
template = self.response_template(S3_MULTIPART_UPLOAD_RESPONSE)
response = template.render(part=key)
else:
key = self.backend.set_part(bucket_name, upload_id, part_number, body)
response = ""
response_headers.update(key.response_dict)
return 200, response_headers, response
storage_class = request.headers.get("x-amz-storage-class", "STANDARD")
encryption = request.headers.get("x-amz-server-side-encryption", None)
kms_key_id = request.headers.get(
"x-amz-server-side-encryption-aws-kms-key-id", None
)
bucket_key_enabled = request.headers.get(
"x-amz-server-side-encryption-bucket-key-enabled", None
)
if bucket_key_enabled is not None:
bucket_key_enabled = str(bucket_key_enabled).lower()
acl = self._acl_from_headers(request.headers)
if acl is None:
acl = self.backend.get_bucket(bucket_name).acl
tagging = self._tagging_from_headers(request.headers)
if "acl" in query:
key = self.backend.get_object(bucket_name, key_name)
# TODO: Support the XML-based ACL format
if key is not None:
key.set_acl(acl)
return 200, response_headers, ""
else:
raise MissingKey(key_name)
if "tagging" in query:
if "versionId" in query:
version_id = query["versionId"][0]
else:
version_id = None
key = self.backend.get_object(bucket_name, key_name, version_id=version_id)
tagging = self._tagging_from_xml(body)
self.backend.set_key_tags(key, tagging, key_name)
return 200, response_headers, ""
if "x-amz-copy-source" in request.headers:
# Copy key
# you can have a quoted ?version=abc with a version Id, so work on
# we need to parse the unquoted string first
src_key = request.headers.get("x-amz-copy-source")
if isinstance(src_key, bytes):
src_key = src_key.decode("utf-8")
src_key_parsed = urlparse(src_key)
src_bucket, src_key = (
clean_key_name(src_key_parsed.path).lstrip("/").split("/", 1)
)
src_version_id = parse_qs(src_key_parsed.query).get("versionId", [None])[0]
key = self.backend.get_object(
src_bucket, src_key, version_id=src_version_id
)
if key is not None:
if key.storage_class in ["GLACIER", "DEEP_ARCHIVE"]:
if key.response_dict.get(
"x-amz-restore"
) is None or 'ongoing-request="true"' in key.response_dict.get(
"x-amz-restore"
):
raise ObjectNotInActiveTierError(key)
self.backend.copy_key(
src_bucket,
src_key,
bucket_name,
key_name,
storage=storage_class,
acl=acl,
src_version_id=src_version_id,
)
else:
return 404, response_headers, ""
new_key = self.backend.get_object(bucket_name, key_name)
mdirective = request.headers.get("x-amz-metadata-directive")
if mdirective is not None and mdirective == "REPLACE":
metadata = metadata_from_headers(request.headers)
new_key.set_metadata(metadata, replace=True)
tdirective = request.headers.get("x-amz-tagging-directive")
if tdirective == "REPLACE":
tagging = self._tagging_from_headers(request.headers)
self.backend.set_key_tags(new_key, tagging)
template = self.response_template(S3_OBJECT_COPY_RESPONSE)
response_headers.update(new_key.response_dict)
return 200, response_headers, template.render(key=new_key)
streaming_request = hasattr(request, "streaming") and request.streaming
closing_connection = headers.get("connection") == "close"
if closing_connection and streaming_request:
# Closing the connection of a streaming request. No more data
new_key = self.backend.get_object(bucket_name, key_name)
elif streaming_request:
# Streaming request, more data
new_key = self.backend.append_to_key(bucket_name, key_name, body)
else:
# Initial data
new_key = self.backend.set_object(
bucket_name,
key_name,
body,
storage=storage_class,
encryption=encryption,
kms_key_id=kms_key_id,
bucket_key_enabled=bucket_key_enabled,
)
request.streaming = True
metadata = metadata_from_headers(request.headers)
metadata.update(metadata_from_headers(query))
new_key.set_metadata(metadata)
new_key.set_acl(acl)
new_key.website_redirect_location = request.headers.get(
"x-amz-website-redirect-location"
)
self.backend.set_key_tags(new_key, tagging)
response_headers.update(new_key.response_dict)
return 200, response_headers, ""
def _key_response_head(self, bucket_name, query, key_name, headers):
response_headers = {}
version_id = query.get("versionId", [None])[0]
part_number = query.get("partNumber", [None])[0]
if part_number:
part_number = int(part_number)
if_modified_since = headers.get("If-Modified-Since", None)
if_match = headers.get("If-Match", None)
if_none_match = headers.get("If-None-Match", None)
if_unmodified_since = headers.get("If-Unmodified-Since", None)
key = self.backend.get_object(
bucket_name, key_name, version_id=version_id, part_number=part_number
)
if key:
response_headers.update(key.metadata)
response_headers.update(key.response_dict)
if if_unmodified_since:
if_unmodified_since = str_to_rfc_1123_datetime(if_unmodified_since)
if key.last_modified > if_unmodified_since:
return 412, response_headers, ""
if if_match and key.etag != if_match:
return 412, response_headers, ""
if if_modified_since:
if_modified_since = str_to_rfc_1123_datetime(if_modified_since)
if key.last_modified < if_modified_since:
return 304, response_headers, "Not Modified"
if if_none_match and key.etag == if_none_match:
return 304, response_headers, "Not Modified"
return 200, response_headers, ""
else:
return 404, response_headers, ""
def _acl_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if not parsed_xml.get("AccessControlPolicy"):
raise MalformedACLError()
# The owner is needed for some reason...
if not parsed_xml["AccessControlPolicy"].get("Owner"):
# TODO: Validate that the Owner is actually correct.
raise MalformedACLError()
# If empty, then no ACLs:
if parsed_xml["AccessControlPolicy"].get("AccessControlList") is None:
return []
if not parsed_xml["AccessControlPolicy"]["AccessControlList"].get("Grant"):
raise MalformedACLError()
permissions = ["READ", "WRITE", "READ_ACP", "WRITE_ACP", "FULL_CONTROL"]
if not isinstance(
parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"], list
):
parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"] = [
parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"]
]
grants = self._get_grants_from_xml(
parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"],
MalformedACLError,
permissions,
)
return FakeAcl(grants)
def _get_grants_from_xml(self, grant_list, exception_type, permissions):
grants = []
for grant in grant_list:
if grant.get("Permission", "") not in permissions:
raise exception_type()
if grant["Grantee"].get("@xsi:type", "") not in [
"CanonicalUser",
"AmazonCustomerByEmail",
"Group",
]:
raise exception_type()
# TODO: Verify that the proper grantee data is supplied based on the type.
grants.append(
FakeGrant(
[
FakeGrantee(
id=grant["Grantee"].get("ID", ""),
display_name=grant["Grantee"].get("DisplayName", ""),
uri=grant["Grantee"].get("URI", ""),
)
],
[grant["Permission"]],
)
)
return grants
def _acl_from_headers(self, headers):
canned_acl = headers.get("x-amz-acl", "")
if canned_acl:
return get_canned_acl(canned_acl)
grants = []
for header, value in headers.items():
header = header.lower()
if not header.startswith("x-amz-grant-"):
continue
permission = {
"read": "READ",
"write": "WRITE",
"read-acp": "READ_ACP",
"write-acp": "WRITE_ACP",
"full-control": "FULL_CONTROL",
}[header[len("x-amz-grant-") :]]
grantees = []
for key_and_value in value.split(","):
key, value = re.match(
'([^=]+)="?([^"]+)"?', key_and_value.strip()
).groups()
if key.lower() == "id":
grantees.append(FakeGrantee(id=value))
else:
grantees.append(FakeGrantee(uri=value))
grants.append(FakeGrant(grantees, [permission]))
if grants:
return FakeAcl(grants)
else:
return None
def _tagging_from_headers(self, headers):
tags = {}
if headers.get("x-amz-tagging"):
parsed_header = parse_qs(headers["x-amz-tagging"], keep_blank_values=True)
for tag in parsed_header.items():
tags[tag[0]] = tag[1][0]
return tags
def _tagging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml, force_list={"Tag": True})
tags = {}
for tag in parsed_xml["Tagging"]["TagSet"]["Tag"]:
tags[tag["Key"]] = tag["Value"]
return tags
def _bucket_tagging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
tags = {}
# Optional if no tags are being sent:
if parsed_xml["Tagging"].get("TagSet"):
# If there is only 1 tag, then it's not a list:
if not isinstance(parsed_xml["Tagging"]["TagSet"]["Tag"], list):
tags[parsed_xml["Tagging"]["TagSet"]["Tag"]["Key"]] = parsed_xml[
"Tagging"
]["TagSet"]["Tag"]["Value"]
else:
for tag in parsed_xml["Tagging"]["TagSet"]["Tag"]:
if tag["Key"] in tags:
raise DuplicateTagKeys()
tags[tag["Key"]] = tag["Value"]
# Verify that "aws:" is not in the tags. If so, then this is a problem:
for key, _ in tags.items():
if key.startswith("aws:"):
raise NoSystemTags()
return tags
def _cors_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if isinstance(parsed_xml["CORSConfiguration"]["CORSRule"], list):
return [cors for cors in parsed_xml["CORSConfiguration"]["CORSRule"]]
return [parsed_xml["CORSConfiguration"]["CORSRule"]]
def _encryption_config_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if (
not parsed_xml["ServerSideEncryptionConfiguration"].get("Rule")
or not parsed_xml["ServerSideEncryptionConfiguration"]["Rule"].get(
"ApplyServerSideEncryptionByDefault"
)
or not parsed_xml["ServerSideEncryptionConfiguration"]["Rule"][
"ApplyServerSideEncryptionByDefault"
].get("SSEAlgorithm")
):
raise MalformedXML()
return [parsed_xml["ServerSideEncryptionConfiguration"]]
def _logging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if not parsed_xml["BucketLoggingStatus"].get("LoggingEnabled"):
return {}
if not parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetBucket"):
raise MalformedXML()
if not parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetPrefix"):
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetPrefix"] = ""
# Get the ACLs:
if parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetGrants"):
permissions = ["READ", "WRITE", "FULL_CONTROL"]
if not isinstance(
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"][
"Grant"
],
list,
):
target_grants = self._get_grants_from_xml(
[
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"][
"TargetGrants"
]["Grant"]
],
MalformedXML,
permissions,
)
else:
target_grants = self._get_grants_from_xml(
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"][
"Grant"
],
MalformedXML,
permissions,
)
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"][
"TargetGrants"
] = target_grants
return parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]
def _notification_config_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if not len(parsed_xml["NotificationConfiguration"]):
return {}
# The types of notifications, and their required fields (apparently lambda is categorized by the API as
# "CloudFunction"):
notification_fields = [
("Topic", "sns"),
("Queue", "sqs"),
("CloudFunction", "lambda"),
]
event_names = [
"s3:ReducedRedundancyLostObject",
"s3:ObjectCreated:*",
"s3:ObjectCreated:Put",
"s3:ObjectCreated:Post",
"s3:ObjectCreated:Copy",
"s3:ObjectCreated:CompleteMultipartUpload",
"s3:ObjectRemoved:*",
"s3:ObjectRemoved:Delete",
"s3:ObjectRemoved:DeleteMarkerCreated",
]
found_notifications = (
0 # Tripwire -- if this is not ever set, then there were no notifications
)
for name, arn_string in notification_fields:
# 1st verify that the proper notification configuration has been passed in (with an ARN that is close
# to being correct -- nothing too complex in the ARN logic):
the_notification = parsed_xml["NotificationConfiguration"].get(
"{}Configuration".format(name)
)
if the_notification:
found_notifications += 1
if not isinstance(the_notification, list):
the_notification = parsed_xml["NotificationConfiguration"][
"{}Configuration".format(name)
] = [the_notification]
for n in the_notification:
if not n[name].startswith("arn:aws:{}:".format(arn_string)):
raise InvalidNotificationARN()
# 2nd, verify that the Events list is correct:
assert n["Event"]
if not isinstance(n["Event"], list):
n["Event"] = [n["Event"]]
for event in n["Event"]:
if event not in event_names:
raise InvalidNotificationEvent()
# Parse out the filters:
if n.get("Filter"):
# Error if S3Key is blank:
if not n["Filter"]["S3Key"]:
raise KeyError()
if not isinstance(n["Filter"]["S3Key"]["FilterRule"], list):
n["Filter"]["S3Key"]["FilterRule"] = [
n["Filter"]["S3Key"]["FilterRule"]
]
for filter_rule in n["Filter"]["S3Key"]["FilterRule"]:
assert filter_rule["Name"] in ["suffix", "prefix"]
assert filter_rule["Value"]
if not found_notifications:
return {}
return parsed_xml["NotificationConfiguration"]
def _accelerate_config_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
config = parsed_xml["AccelerateConfiguration"]
return config["Status"]
def _key_response_delete(self, bucket_name, query, key_name):
self._set_action("KEY", "DELETE", query)
self._authenticate_and_authorize_s3_action()
if query.get("uploadId"):
upload_id = query["uploadId"][0]
self.backend.cancel_multipart(bucket_name, upload_id)
return 204, {}, ""
version_id = query.get("versionId", [None])[0]
if "tagging" in query:
self.backend.delete_object_tagging(
bucket_name, key_name, version_id=version_id
)
template = self.response_template(S3_DELETE_KEY_TAGGING_RESPONSE)
return 204, {}, template.render(version_id=version_id)
success, response_meta = self.backend.delete_object(
bucket_name, key_name, version_id=version_id
)
response_headers = {}
if response_meta is not None:
for k in response_meta:
response_headers["x-amz-{}".format(k)] = response_meta[k]
return 204, response_headers, ""
def _complete_multipart_body(self, body):
ps = minidom.parseString(body).getElementsByTagName("Part")
prev = 0
for p in ps:
pn = int(p.getElementsByTagName("PartNumber")[0].firstChild.wholeText)
if pn <= prev:
raise InvalidPartOrder()
yield (pn, p.getElementsByTagName("ETag")[0].firstChild.wholeText)
def _key_response_post(self, request, body, bucket_name, query, key_name):
self._set_action("KEY", "POST", query)
self._authenticate_and_authorize_s3_action()
if body == b"" and "uploads" in query:
metadata = metadata_from_headers(request.headers)
multipart = FakeMultipart(key_name, metadata)
multipart.storage = request.headers.get("x-amz-storage-class", "STANDARD")
bucket = self.backend.get_bucket(bucket_name)
bucket.multiparts[multipart.id] = multipart
template = self.response_template(S3_MULTIPART_INITIATE_RESPONSE)
response = template.render(
bucket_name=bucket_name, key_name=key_name, upload_id=multipart.id
)
return 200, {}, response
if query.get("uploadId"):
body = self._complete_multipart_body(body)
multipart_id = query["uploadId"][0]
bucket = self.backend.get_bucket(bucket_name)
multipart = bucket.multiparts[multipart_id]
value, etag = multipart.complete(body)
if value is None:
return 400, {}, ""
del bucket.multiparts[multipart_id]
key = self.backend.set_object(
bucket_name,
multipart.key_name,
value,
storage=multipart.storage,
etag=etag,
multipart=multipart,
)
key.set_metadata(multipart.metadata)
template = self.response_template(S3_MULTIPART_COMPLETE_RESPONSE)
headers = {}
if key.version_id:
headers["x-amz-version-id"] = key.version_id
return (
200,
headers,
template.render(
bucket_name=bucket_name, key_name=key.name, etag=key.etag
),
)
elif "restore" in query:
es = minidom.parseString(body).getElementsByTagName("Days")
days = es[0].childNodes[0].wholeText
key = self.backend.get_object(bucket_name, key_name)
r = 202
if key.expiry_date is not None:
r = 200
key.restore(int(days))
return r, {}, ""
else:
raise NotImplementedError(
"Method POST had only been implemented for multipart uploads and restore operations, so far"
)
def _invalid_headers(self, url, headers):
"""
Verify whether the provided metadata in the URL is also present in the headers
:param url: .../file.txt&content-type=app%2Fjson&Signature=..
:param headers: Content-Type=app/json
:return: True or False
"""
metadata_to_check = {
"content-disposition": "Content-Disposition",
"content-encoding": "Content-Encoding",
"content-language": "Content-Language",
"content-length": "Content-Length",
"content-md5": "Content-MD5",
"content-type": "Content-Type",
}
for url_key, header_key in metadata_to_check.items():
metadata_in_url = re.search(url_key + "=(.+?)(&.+$|$)", url)
if metadata_in_url:
url_value = unquote(metadata_in_url.group(1))
if header_key not in headers or (url_value != headers[header_key]):
return True
return False
S3ResponseInstance = ResponseObject(s3_backend)
S3_ALL_BUCKETS = """<ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Owner>
<ID>bcaf1ffd86f41161ca5fb16fd081034f</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<Buckets>
{% for bucket in buckets %}
<Bucket>
<Name>{{ bucket.name }}</Name>
<CreationDate>{{ bucket.creation_date_ISO8601 }}</CreationDate>
</Bucket>
{% endfor %}
</Buckets>
</ListAllMyBucketsResult>"""
S3_BUCKET_GET_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>{{ bucket.name }}</Name>
{% if prefix != None %}
<Prefix>{{ prefix }}</Prefix>
{% endif %}
<MaxKeys>{{ max_keys }}</MaxKeys>
{% if delimiter %}
<Delimiter>{{ delimiter }}</Delimiter>
{% endif %}
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% if next_marker %}
<NextMarker>{{ next_marker }}</NextMarker>
{% endif %}
{% for key in result_keys %}
<Contents>
<Key>{{ key.name }}</Key>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Contents>
{% endfor %}
{% if delimiter %}
{% for folder in result_folders %}
<CommonPrefixes>
<Prefix>{{ folder }}</Prefix>
</CommonPrefixes>
{% endfor %}
{% endif %}
</ListBucketResult>"""
S3_BUCKET_GET_RESPONSE_V2 = """<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>{{ bucket.name }}</Name>
{% if prefix != None %}
<Prefix>{{ prefix }}</Prefix>
{% endif %}
<MaxKeys>{{ max_keys }}</MaxKeys>
<KeyCount>{{ key_count }}</KeyCount>
{% if delimiter %}
<Delimiter>{{ delimiter }}</Delimiter>
{% endif %}
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% if next_continuation_token %}
<NextContinuationToken>{{ next_continuation_token }}</NextContinuationToken>
{% endif %}
{% if start_after %}
<StartAfter>{{ start_after }}</StartAfter>
{% endif %}
{% for key in result_keys %}
<Contents>
<Key>{{ key.name }}</Key>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
{% if fetch_owner %}
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
{% endif %}
</Contents>
{% endfor %}
{% if delimiter %}
{% for folder in result_folders %}
<CommonPrefixes>
<Prefix>{{ folder }}</Prefix>
</CommonPrefixes>
{% endfor %}
{% endif %}
</ListBucketResult>"""
S3_BUCKET_CREATE_RESPONSE = """<CreateBucketResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<CreateBucketResponse>
<Bucket>{{ bucket.name }}</Bucket>
</CreateBucketResponse>
</CreateBucketResponse>"""
S3_DELETE_BUCKET_SUCCESS = """<DeleteBucketResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<DeleteBucketResponse>
<Code>204</Code>
<Description>No Content</Description>
</DeleteBucketResponse>
</DeleteBucketResponse>"""
S3_DELETE_BUCKET_WITH_ITEMS_ERROR = """<?xml version="1.0" encoding="UTF-8"?>
<Error><Code>BucketNotEmpty</Code>
<Message>The bucket you tried to delete is not empty</Message>
<BucketName>{{ bucket.name }}</BucketName>
<RequestId>asdfasdfsdafds</RequestId>
<HostId>sdfgdsfgdsfgdfsdsfgdfs</HostId>
</Error>"""
S3_BUCKET_LOCATION = """<?xml version="1.0" encoding="UTF-8"?>
<LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">{% if location != None %}{{ location }}{% endif %}</LocationConstraint>"""
S3_BUCKET_LIFECYCLE_CONFIGURATION = """<?xml version="1.0" encoding="UTF-8"?>
<LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
{% for rule in rules %}
<Rule>
<ID>{{ rule.id }}</ID>
{% if rule.filter %}
<Filter>
{% if rule.filter.prefix != None %}
<Prefix>{{ rule.filter.prefix }}</Prefix>
{% endif %}
{% if rule.filter.tag_key %}
<Tag>
<Key>{{ rule.filter.tag_key }}</Key>
<Value>{{ rule.filter.tag_value }}</Value>
</Tag>
{% endif %}
{% if rule.filter.and_filter %}
<And>
{% if rule.filter.and_filter.prefix != None %}
<Prefix>{{ rule.filter.and_filter.prefix }}</Prefix>
{% endif %}
{% for key, value in rule.filter.and_filter.tags.items() %}
<Tag>
<Key>{{ key }}</Key>
<Value>{{ value }}</Value>
</Tag>
{% endfor %}
</And>
{% endif %}
</Filter>
{% else %}
{% if rule.prefix != None %}
<Prefix>{{ rule.prefix }}</Prefix>
{% endif %}
{% endif %}
<Status>{{ rule.status }}</Status>
{% if rule.storage_class %}
<Transition>
{% if rule.transition_days %}
<Days>{{ rule.transition_days }}</Days>
{% endif %}
{% if rule.transition_date %}
<Date>{{ rule.transition_date }}</Date>
{% endif %}
<StorageClass>{{ rule.storage_class }}</StorageClass>
</Transition>
{% endif %}
{% if rule.expiration_days or rule.expiration_date or rule.expired_object_delete_marker %}
<Expiration>
{% if rule.expiration_days %}
<Days>{{ rule.expiration_days }}</Days>
{% endif %}
{% if rule.expiration_date %}
<Date>{{ rule.expiration_date }}</Date>
{% endif %}
{% if rule.expired_object_delete_marker %}
<ExpiredObjectDeleteMarker>{{ rule.expired_object_delete_marker }}</ExpiredObjectDeleteMarker>
{% endif %}
</Expiration>
{% endif %}
{% if rule.nvt_noncurrent_days and rule.nvt_storage_class %}
<NoncurrentVersionTransition>
<NoncurrentDays>{{ rule.nvt_noncurrent_days }}</NoncurrentDays>
<StorageClass>{{ rule.nvt_storage_class }}</StorageClass>
</NoncurrentVersionTransition>
{% endif %}
{% if rule.nve_noncurrent_days %}
<NoncurrentVersionExpiration>
<NoncurrentDays>{{ rule.nve_noncurrent_days }}</NoncurrentDays>
</NoncurrentVersionExpiration>
{% endif %}
{% if rule.aimu_days %}
<AbortIncompleteMultipartUpload>
<DaysAfterInitiation>{{ rule.aimu_days }}</DaysAfterInitiation>
</AbortIncompleteMultipartUpload>
{% endif %}
</Rule>
{% endfor %}
</LifecycleConfiguration>
"""
S3_BUCKET_VERSIONING = """<?xml version="1.0" encoding="UTF-8"?>
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ bucket_versioning_status }}</Status>
</VersioningConfiguration>
"""
S3_BUCKET_GET_VERSIONING = """<?xml version="1.0" encoding="UTF-8"?>
{% if status is none %}
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>
{% else %}
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ status }}</Status>
</VersioningConfiguration>
{% endif %}
"""
S3_BUCKET_GET_VERSIONS = """<?xml version="1.0" encoding="UTF-8"?>
<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Name>{{ bucket.name }}</Name>
{% if prefix != None %}
<Prefix>{{ prefix }}</Prefix>
{% endif %}
<KeyMarker>{{ key_marker }}</KeyMarker>
<MaxKeys>{{ max_keys }}</MaxKeys>
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% for key in key_list %}
<Version>
<Key>{{ key.name }}</Key>
<VersionId>{% if key.version_id is none %}null{% else %}{{ key.version_id }}{% endif %}</VersionId>
<IsLatest>{% if latest_versions[key.name] == key.version_id %}true{% else %}false{% endif %}</IsLatest>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Version>
{% endfor %}
{% for marker in delete_marker_list %}
<DeleteMarker>
<Key>{{ marker.name }}</Key>
<VersionId>{{ marker.version_id }}</VersionId>
<IsLatest>{% if latest_versions[marker.name] == marker.version_id %}true{% else %}false{% endif %}</IsLatest>
<LastModified>{{ marker.last_modified_ISO8601 }}</LastModified>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</DeleteMarker>
{% endfor %}
</ListVersionsResult>
"""
S3_DELETE_KEYS_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
{% for k, v in deleted %}
<Deleted>
<Key>{{k}}</Key>
{% if v %}<VersionId>{{v}}</VersionId>{% endif %}
</Deleted>
{% endfor %}
{% for k in delete_errors %}
<Error>
<Key>{{k}}</Key>
</Error>
{% endfor %}
</DeleteResult>"""
S3_DELETE_KEY_TAGGING_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<DeleteObjectTaggingResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<VersionId>{{version_id}}</VersionId>
</DeleteObjectTaggingResult>
"""
S3_OBJECT_ACL_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<AccessControlPolicy xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<AccessControlList>
{% for grant in obj.acl.grants %}
<Grant>
{% for grantee in grant.grantees %}
<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="{{ grantee.type }}">
{% if grantee.uri %}
<URI>{{ grantee.uri }}</URI>
{% endif %}
{% if grantee.id %}
<ID>{{ grantee.id }}</ID>
{% endif %}
{% if grantee.display_name %}
<DisplayName>{{ grantee.display_name }}</DisplayName>
{% endif %}
</Grantee>
{% endfor %}
{% for permission in grant.permissions %}
<Permission>{{ permission }}</Permission>
{% endfor %}
</Grant>
{% endfor %}
</AccessControlList>
</AccessControlPolicy>"""
S3_OBJECT_TAGGING_RESPONSE = """\
<?xml version="1.0" encoding="UTF-8"?>
<Tagging xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<TagSet>
{% for tag in tags %}
<Tag>
<Key>{{ tag.Key }}</Key>
<Value>{{ tag.Value }}</Value>
</Tag>
{% endfor %}
</TagSet>
</Tagging>"""
S3_BUCKET_CORS_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CORSConfiguration>
{% for cors in cors %}
<CORSRule>
{% for origin in cors.allowed_origins %}
<AllowedOrigin>{{ origin }}</AllowedOrigin>
{% endfor %}
{% for method in cors.allowed_methods %}
<AllowedMethod>{{ method }}</AllowedMethod>
{% endfor %}
{% if cors.allowed_headers is not none %}
{% for header in cors.allowed_headers %}
<AllowedHeader>{{ header }}</AllowedHeader>
{% endfor %}
{% endif %}
{% if cors.exposed_headers is not none %}
{% for header in cors.exposed_headers %}
<ExposedHeader>{{ header }}</ExposedHeader>
{% endfor %}
{% endif %}
{% if cors.max_age_seconds is not none %}
<MaxAgeSeconds>{{ cors.max_age_seconds }}</MaxAgeSeconds>
{% endif %}
</CORSRule>
{% endfor %}
</CORSConfiguration>
"""
S3_OBJECT_COPY_RESPONSE = """\
<CopyObjectResult xmlns="http://doc.s3.amazonaws.com/2006-03-01">
<ETag>{{ key.etag }}</ETag>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
</CopyObjectResult>"""
S3_MULTIPART_INITIATE_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<InitiateMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<UploadId>{{ upload_id }}</UploadId>
</InitiateMultipartUploadResult>"""
S3_MULTIPART_UPLOAD_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CopyPartResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<LastModified>{{ part.last_modified_ISO8601 }}</LastModified>
<ETag>{{ part.etag }}</ETag>
</CopyPartResult>"""
S3_MULTIPART_LIST_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<ListPartsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<UploadId>{{ upload_id }}</UploadId>
<StorageClass>STANDARD</StorageClass>
<Initiator>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Initiator>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<PartNumberMarker>1</PartNumberMarker>
<NextPartNumberMarker>{{ count }}</NextPartNumberMarker>
<MaxParts>{{ count }}</MaxParts>
<IsTruncated>false</IsTruncated>
{% for part in parts %}
<Part>
<PartNumber>{{ part.name }}</PartNumber>
<LastModified>{{ part.last_modified_ISO8601 }}</LastModified>
<ETag>{{ part.etag }}</ETag>
<Size>{{ part.size }}</Size>
</Part>
{% endfor %}
</ListPartsResult>"""
S3_MULTIPART_COMPLETE_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CompleteMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Location>http://{{ bucket_name }}.s3.amazonaws.com/{{ key_name }}</Location>
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<ETag>{{ etag }}</ETag>
</CompleteMultipartUploadResult>
"""
S3_ALL_MULTIPARTS = (
"""<?xml version="1.0" encoding="UTF-8"?>
<ListMultipartUploadsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<KeyMarker></KeyMarker>
<UploadIdMarker></UploadIdMarker>
<MaxUploads>1000</MaxUploads>
<IsTruncated>false</IsTruncated>
{% for upload in uploads %}
<Upload>
<Key>{{ upload.key_name }}</Key>
<UploadId>{{ upload.id }}</UploadId>
<Initiator>
<ID>arn:aws:iam::"""
+ ACCOUNT_ID
+ """:user/user1-11111a31-17b5-4fb7-9df5-b111111f13de</ID>
<DisplayName>user1-11111a31-17b5-4fb7-9df5-b111111f13de</DisplayName>
</Initiator>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<Initiated>2010-11-10T20:48:33.000Z</Initiated>
</Upload>
{% endfor %}
</ListMultipartUploadsResult>
"""
)
S3_NO_POLICY = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchBucketPolicy</Code>
<Message>The bucket policy does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>0D68A23BB2E2215B</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_LIFECYCLE = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchLifecycleConfiguration</Code>
<Message>The lifecycle configuration does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_BUCKET_TAGGING = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchTagSet</Code>
<Message>The TagSet does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_BUCKET_WEBSITE_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchWebsiteConfiguration</Code>
<Message>The specified bucket does not have a website configuration</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_INVALID_CORS_REQUEST = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchWebsiteConfiguration</Code>
<Message>The specified bucket does not have a website configuration</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_CORS_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchCORSConfiguration</Code>
<Message>The CORS configuration does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_LOGGING_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01">
<LoggingEnabled>
<TargetBucket>{{ logging["TargetBucket"] }}</TargetBucket>
<TargetPrefix>{{ logging["TargetPrefix"] }}</TargetPrefix>
{% if logging.get("TargetGrants") %}
<TargetGrants>
{% for grant in logging["TargetGrants"] %}
<Grant>
<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="{{ grant.grantees[0].type }}">
{% if grant.grantees[0].uri %}
<URI>{{ grant.grantees[0].uri }}</URI>
{% endif %}
{% if grant.grantees[0].id %}
<ID>{{ grant.grantees[0].id }}</ID>
{% endif %}
{% if grant.grantees[0].display_name %}
<DisplayName>{{ grant.grantees[0].display_name }}</DisplayName>
{% endif %}
</Grantee>
<Permission>{{ grant.permissions[0] }}</Permission>
</Grant>
{% endfor %}
</TargetGrants>
{% endif %}
</LoggingEnabled>
</BucketLoggingStatus>
"""
S3_NO_LOGGING_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01" />
"""
S3_ENCRYPTION_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<ServerSideEncryptionConfiguration xmlns="http://doc.s3.amazonaws.com/2006-03-01">
{% for entry in encryption %}
<Rule>
<ApplyServerSideEncryptionByDefault>
<SSEAlgorithm>{{ entry["Rule"]["ApplyServerSideEncryptionByDefault"]["SSEAlgorithm"] }}</SSEAlgorithm>
{% if entry["Rule"]["ApplyServerSideEncryptionByDefault"].get("KMSMasterKeyID") %}
<KMSMasterKeyID>{{ entry["Rule"]["ApplyServerSideEncryptionByDefault"]["KMSMasterKeyID"] }}</KMSMasterKeyID>
{% endif %}
</ApplyServerSideEncryptionByDefault>
<BucketKeyEnabled>{{ 'true' if entry["Rule"].get("BucketKeyEnabled") == 'true' else 'false' }}</BucketKeyEnabled>
</Rule>
{% endfor %}
</ServerSideEncryptionConfiguration>
"""
S3_INVALID_PRESIGNED_PARAMETERS = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>SignatureDoesNotMatch</Code>
<Message>The request signature we calculated does not match the signature you provided. Check your key and signing method.</Message>
<RequestId>0D68A23BB2E2215B</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_ENCRYPTION = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>ServerSideEncryptionConfigurationNotFoundError</Code>
<Message>The server side encryption configuration was not found</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>0D68A23BB2E2215B</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_GET_BUCKET_NOTIFICATION_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<NotificationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
{% for topic in config.topic %}
<TopicConfiguration>
<Id>{{ topic.id }}</Id>
<Topic>{{ topic.arn }}</Topic>
{% for event in topic.events %}
<Event>{{ event }}</Event>
{% endfor %}
{% if topic.filters %}
<Filter>
<S3Key>
{% for rule in topic.filters["S3Key"]["FilterRule"] %}
<FilterRule>
<Name>{{ rule["Name"] }}</Name>
<Value>{{ rule["Value"] }}</Value>
</FilterRule>
{% endfor %}
</S3Key>
</Filter>
{% endif %}
</TopicConfiguration>
{% endfor %}
{% for queue in config.queue %}
<QueueConfiguration>
<Id>{{ queue.id }}</Id>
<Queue>{{ queue.arn }}</Queue>
{% for event in queue.events %}
<Event>{{ event }}</Event>
{% endfor %}
{% if queue.filters %}
<Filter>
<S3Key>
{% for rule in queue.filters["S3Key"]["FilterRule"] %}
<FilterRule>
<Name>{{ rule["Name"] }}</Name>
<Value>{{ rule["Value"] }}</Value>
</FilterRule>
{% endfor %}
</S3Key>
</Filter>
{% endif %}
</QueueConfiguration>
{% endfor %}
{% for cf in config.cloud_function %}
<CloudFunctionConfiguration>
<Id>{{ cf.id }}</Id>
<CloudFunction>{{ cf.arn }}</CloudFunction>
{% for event in cf.events %}
<Event>{{ event }}</Event>
{% endfor %}
{% if cf.filters %}
<Filter>
<S3Key>
{% for rule in cf.filters["S3Key"]["FilterRule"] %}
<FilterRule>
<Name>{{ rule["Name"] }}</Name>
<Value>{{ rule["Value"] }}</Value>
</FilterRule>
{% endfor %}
</S3Key>
</Filter>
{% endif %}
</CloudFunctionConfiguration>
{% endfor %}
</NotificationConfiguration>
"""
S3_BUCKET_ACCELERATE = """
<AccelerateConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ bucket.accelerate_configuration }}</Status>
</AccelerateConfiguration>
"""
S3_BUCKET_ACCELERATE_NOT_SET = """
<AccelerateConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>
"""
S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION = """
<PublicAccessBlockConfiguration>
<BlockPublicAcls>{{public_block_config.block_public_acls}}</BlockPublicAcls>
<IgnorePublicAcls>{{public_block_config.ignore_public_acls}}</IgnorePublicAcls>
<BlockPublicPolicy>{{public_block_config.block_public_policy}}</BlockPublicPolicy>
<RestrictPublicBuckets>{{public_block_config.restrict_public_buckets}}</RestrictPublicBuckets>
</PublicAccessBlockConfiguration>
"""
| 37.949362 | 168 | 0.586959 |
4a1acea2328fb83abf77597b21f1ee178e0f51a7
| 11,664 |
py
|
Python
|
Polaritonic-Quantum-Chemistry/helper_CQED_RHF.py
|
cheechonghian/psi4numpy
|
1fcd4885c62beadda67d448cf66227c9ea493daa
|
[
"BSD-3-Clause"
] | 214 |
2017-03-01T08:04:48.000Z
|
2022-03-23T08:52:04.000Z
|
Polaritonic-Quantum-Chemistry/helper_CQED_RHF.py
|
cheechonghian/psi4numpy
|
1fcd4885c62beadda67d448cf66227c9ea493daa
|
[
"BSD-3-Clause"
] | 100 |
2017-03-03T13:20:20.000Z
|
2022-03-05T18:20:27.000Z
|
Polaritonic-Quantum-Chemistry/helper_CQED_RHF.py
|
cheechonghian/psi4numpy
|
1fcd4885c62beadda67d448cf66227c9ea493daa
|
[
"BSD-3-Clause"
] | 150 |
2017-02-17T19:44:47.000Z
|
2022-03-22T05:52:43.000Z
|
"""
Helper function for CQED_RHF
References:
Equations and algorithms from
[Haugland:2020:041043], [DePrince:2021:094112], and [McTague:2021:ChemRxiv]
"""
__authors__ = ["Jon McTague", "Jonathan Foley"]
__credits__ = ["Jon McTague", "Jonathan Foley"]
__copyright_amp__ = "(c) 2014-2018, The Psi4NumPy Developers"
__license__ = "BSD-3-Clause"
__date__ = "2021-08-19"
# ==> Import Psi4, NumPy, & SciPy <==
import psi4
import numpy as np
import time
def cqed_rhf(lambda_vector, molecule_string, psi4_options_dict):
"""Computes the QED-RHF energy and density
Arguments
---------
lambda_vector : 1 x 3 array of floats
the electric field vector, see e.g. Eq. (1) in [DePrince:2021:094112]
and (15) in [Haugland:2020:041043]
molecule_string : string
specifies the molecular geometry
options_dict : dictionary
specifies the psi4 options to be used in running the canonical RHF
Returns
-------
cqed_rhf_dictionary : dictionary
Contains important quantities from the cqed_rhf calculation, with keys including:
'RHF ENERGY' -> result of canonical RHF calculation using psi4 defined by molecule_string and psi4_options_dict
'CQED-RHF ENERGY' -> result of CQED-RHF calculation, see Eq. (13) of [McTague:2021:ChemRxiv]
'CQED-RHF C' -> orbitals resulting from CQED-RHF calculation
'CQED-RHF DENSITY MATRIX' -> density matrix resulting from CQED-RHF calculation
'CQED-RHF EPS' -> orbital energies from CQED-RHF calculation
'PSI4 WFN' -> wavefunction object from psi4 canonical RHF calcluation
'CQED-RHF DIPOLE MOMENT' -> total dipole moment from CQED-RHF calculation (1x3 numpy array)
'NUCLEAR DIPOLE MOMENT' -> nuclear dipole moment (1x3 numpy array)
'DIPOLE ENERGY' -> See Eq. (14) of [McTague:2021:ChemRxiv]
'NUCLEAR REPULSION ENERGY' -> Total nuclear repulsion energy
Example
-------
>>> cqed_rhf_dictionary = cqed_rhf([0., 0., 1e-2], '''\nMg\nH 1 1.7\nsymmetry c1\n1 1\n''', psi4_options_dictionary)
"""
# define geometry using the molecule_string
mol = psi4.geometry(molecule_string)
# define options for the calculation
psi4.set_options(psi4_options_dict)
# run psi4 to get ordinary scf energy and wavefunction object
psi4_rhf_energy, wfn = psi4.energy("scf", return_wfn=True)
# Create instance of MintsHelper class
mints = psi4.core.MintsHelper(wfn.basisset())
# Grab data from wavfunction
# number of doubly occupied orbitals
ndocc = wfn.nalpha()
# grab all transformation vectors and store to a numpy array
C = np.asarray(wfn.Ca())
# use canonical RHF orbitals for guess CQED-RHF orbitals
Cocc = C[:, :ndocc]
# form guess density
D = np.einsum("pi,qi->pq", Cocc, Cocc) # [Szabo:1996] Eqn. 3.145, pp. 139
# Integrals required for CQED-RHF
# Ordinary integrals first
V = np.asarray(mints.ao_potential())
T = np.asarray(mints.ao_kinetic())
I = np.asarray(mints.ao_eri())
# Extra terms for Pauli-Fierz Hamiltonian
# nuclear dipole
mu_nuc_x = mol.nuclear_dipole()[0]
mu_nuc_y = mol.nuclear_dipole()[1]
mu_nuc_z = mol.nuclear_dipole()[2]
# electronic dipole integrals in AO basis
mu_ao_x = np.asarray(mints.ao_dipole()[0])
mu_ao_y = np.asarray(mints.ao_dipole()[1])
mu_ao_z = np.asarray(mints.ao_dipole()[2])
# \lambda \cdot \mu_el (see within the sum of line 3 of Eq. (9) in [McTague:2021:ChemRxiv])
l_dot_mu_el = lambda_vector[0] * mu_ao_x
l_dot_mu_el += lambda_vector[1] * mu_ao_y
l_dot_mu_el += lambda_vector[2] * mu_ao_z
# compute electronic dipole expectation value with
# canonincal RHF density
mu_exp_x = np.einsum("pq,pq->", 2 * mu_ao_x, D)
mu_exp_y = np.einsum("pq,pq->", 2 * mu_ao_y, D)
mu_exp_z = np.einsum("pq,pq->", 2 * mu_ao_z, D)
# need to add the nuclear term to the sum over the electronic dipole integrals
mu_exp_x += mu_nuc_x
mu_exp_y += mu_nuc_y
mu_exp_z += mu_nuc_z
rhf_dipole_moment = np.array([mu_exp_x, mu_exp_y, mu_exp_z])
# We need to carry around the electric field dotted into the nuclear dipole moment
# and the electric field dotted into the RHF electronic dipole expectation value
# see prefactor to sum of Line 3 of Eq. (9) in [McTague:2021:ChemRxiv]
# \lambda_vector \cdot \mu_{nuc}
l_dot_mu_nuc = (
lambda_vector[0] * mu_nuc_x
+ lambda_vector[1] * mu_nuc_y
+ lambda_vector[2] * mu_nuc_z
)
# \lambda_vecto \cdot < \mu > where <\mu> contains electronic and nuclear contributions
l_dot_mu_exp = (
lambda_vector[0] * mu_exp_x
+ lambda_vector[1] * mu_exp_y
+ lambda_vector[2] * mu_exp_z
)
# dipole energy, Eq. (14) in [McTague:2021:ChemRxiv]
# 0.5 * (\lambda_vector \cdot \mu_{nuc})** 2
# - (\lambda_vector \cdot <\mu> ) ( \lambda_vector\cdot \mu_{nuc})
# +0.5 * (\lambda_vector \cdot <\mu>) ** 2
d_c = (
0.5 * l_dot_mu_nuc ** 2 - l_dot_mu_nuc * l_dot_mu_exp + 0.5 * l_dot_mu_exp ** 2
)
# quadrupole arrays
Q_ao_xx = np.asarray(mints.ao_quadrupole()[0])
Q_ao_xy = np.asarray(mints.ao_quadrupole()[1])
Q_ao_xz = np.asarray(mints.ao_quadrupole()[2])
Q_ao_yy = np.asarray(mints.ao_quadrupole()[3])
Q_ao_yz = np.asarray(mints.ao_quadrupole()[4])
Q_ao_zz = np.asarray(mints.ao_quadrupole()[5])
# Pauli-Fierz 1-e quadrupole terms, Line 2 of Eq. (9) in [McTague:2021:ChemRxiv]
Q_PF = -0.5 * lambda_vector[0] * lambda_vector[0] * Q_ao_xx
Q_PF -= 0.5 * lambda_vector[1] * lambda_vector[1] * Q_ao_yy
Q_PF -= 0.5 * lambda_vector[2] * lambda_vector[2] * Q_ao_zz
# accounting for the fact that Q_ij = Q_ji
# by weighting Q_ij x 2 which cancels factor of 1/2
Q_PF -= lambda_vector[0] * lambda_vector[1] * Q_ao_xy
Q_PF -= lambda_vector[0] * lambda_vector[2] * Q_ao_xz
Q_PF -= lambda_vector[1] * lambda_vector[2] * Q_ao_yz
# Pauli-Fierz 1-e dipole terms scaled by
# (\lambda_vector \cdot \mu_{nuc} - \lambda_vector \cdot <\mu>)
# Line 3 in full of Eq. (9) in [McTague:2021:ChemRxiv]
d_PF = (l_dot_mu_nuc - l_dot_mu_exp) * l_dot_mu_el
# ordinary H_core
H_0 = T + V
# Add Pauli-Fierz terms to H_core
# Eq. (11) in [McTague:2021:ChemRxiv]
H = H_0 + Q_PF + d_PF
# Overlap for DIIS
S = mints.ao_overlap()
# Orthogonalizer A = S^(-1/2) using Psi4's matrix power.
A = mints.ao_overlap()
A.power(-0.5, 1.0e-16)
A = np.asarray(A)
print("\nStart SCF iterations:\n")
t = time.time()
E = 0.0
Enuc = mol.nuclear_repulsion_energy()
Eold = 0.0
E_1el_crhf = np.einsum("pq,pq->", H_0 + H_0, D)
E_1el = np.einsum("pq,pq->", H + H, D)
print("Canonical RHF One-electron energy = %4.16f" % E_1el_crhf)
print("CQED-RHF One-electron energy = %4.16f" % E_1el)
print("Nuclear repulsion energy = %4.16f" % Enuc)
print("Dipole energy = %4.16f" % d_c)
# Set convergence criteria from psi4_options_dict
if "e_convergence" in psi4_options_dict:
E_conv = psi4_options_dict["e_convergence"]
else:
E_conv = 1.0e-7
if "d_convergence" in psi4_options_dict:
D_conv = psi4_options_dict["d_convergence"]
else:
D_conv = 1.0e-5
t = time.time()
# maxiter
maxiter = 500
for SCF_ITER in range(1, maxiter + 1):
# Build fock matrix: [Szabo:1996] Eqn. 3.154, pp. 141
J = np.einsum("pqrs,rs->pq", I, D)
K = np.einsum("prqs,rs->pq", I, D)
# Pauli-Fierz 2-e dipole-dipole terms, line 2 of Eq. (12) in [McTague:2021:ChemRxiv]
M = np.einsum("pq,rs,rs->pq", l_dot_mu_el, l_dot_mu_el, D)
N = np.einsum("pr,qs,rs->pq", l_dot_mu_el, l_dot_mu_el, D)
# Build fock matrix: [Szabo:1996] Eqn. 3.154, pp. 141
# plus Pauli-Fierz terms Eq. (12) in [McTague:2021:ChemRxiv]
F = H + J * 2 - K + 2 * M - N
diis_e = np.einsum("ij,jk,kl->il", F, D, S) - np.einsum("ij,jk,kl->il", S, D, F)
diis_e = A.dot(diis_e).dot(A)
dRMS = np.mean(diis_e ** 2) ** 0.5
# SCF energy and update: [Szabo:1996], Eqn. 3.184, pp. 150
# Pauli-Fierz terms Eq. 13 of [McTague:2021:ChemRxiv]
SCF_E = np.einsum("pq,pq->", F + H, D) + Enuc + d_c
print(
"SCF Iteration %3d: Energy = %4.16f dE = % 1.5E dRMS = %1.5E"
% (SCF_ITER, SCF_E, (SCF_E - Eold), dRMS)
)
if (abs(SCF_E - Eold) < E_conv) and (dRMS < D_conv):
break
Eold = SCF_E
# Diagonalize Fock matrix: [Szabo:1996] pp. 145
Fp = A.dot(F).dot(A) # Eqn. 3.177
e, C2 = np.linalg.eigh(Fp) # Solving Eqn. 1.178
C = A.dot(C2) # Back transform, Eqn. 3.174
Cocc = C[:, :ndocc]
D = np.einsum("pi,qi->pq", Cocc, Cocc) # [Szabo:1996] Eqn. 3.145, pp. 139
# update electronic dipole expectation value
mu_exp_x = np.einsum("pq,pq->", 2 * mu_ao_x, D)
mu_exp_y = np.einsum("pq,pq->", 2 * mu_ao_y, D)
mu_exp_z = np.einsum("pq,pq->", 2 * mu_ao_z, D)
mu_exp_x += mu_nuc_x
mu_exp_y += mu_nuc_y
mu_exp_z += mu_nuc_z
# update \lambda \cdot <\mu>
l_dot_mu_exp = (
lambda_vector[0] * mu_exp_x
+ lambda_vector[1] * mu_exp_y
+ lambda_vector[2] * mu_exp_z
)
# Line 3 in full of Eq. (9) in [McTague:2021:ChemRxiv]
d_PF = (l_dot_mu_nuc - l_dot_mu_exp) * l_dot_mu_el
# update Core Hamiltonian
H = H_0 + Q_PF + d_PF
# update dipole energetic contribution, Eq. (14) in [McTague:2021:ChemRxiv]
d_c = (
0.5 * l_dot_mu_nuc ** 2
- l_dot_mu_nuc * l_dot_mu_exp
+ 0.5 * l_dot_mu_exp ** 2
)
if SCF_ITER == maxiter:
psi4.core.clean()
raise Exception("Maximum number of SCF cycles exceeded.")
print("Total time for SCF iterations: %.3f seconds \n" % (time.time() - t))
print("QED-RHF energy: %.8f hartree" % SCF_E)
print("Psi4 SCF energy: %.8f hartree" % psi4_rhf_energy)
rhf_one_e_cont = (
2 * H_0
) # note using H_0 which is just T + V, and does not include Q_PF and d_PF
rhf_two_e_cont = (
J * 2 - K
) # note using just J and K that would contribute to ordinary RHF 2-electron energy
pf_two_e_cont = 2 * M - N
SCF_E_One = np.einsum("pq,pq->", rhf_one_e_cont, D)
SCF_E_Two = np.einsum("pq,pq->", rhf_two_e_cont, D)
CQED_SCF_E_Two = np.einsum("pq,pq->", pf_two_e_cont, D)
CQED_SCF_E_D_PF = np.einsum("pq,pq->", 2 * d_PF, D)
CQED_SCF_E_Q_PF = np.einsum("pq,pq->", 2 * Q_PF, D)
assert np.isclose(
SCF_E_One + SCF_E_Two + CQED_SCF_E_D_PF + CQED_SCF_E_Q_PF + CQED_SCF_E_Two,
SCF_E - d_c - Enuc,
)
cqed_rhf_dict = {
"RHF ENERGY": psi4_rhf_energy,
"CQED-RHF ENERGY": SCF_E,
"1E ENERGY": SCF_E_One,
"2E ENERGY": SCF_E_Two,
"1E DIPOLE ENERGY": CQED_SCF_E_D_PF,
"1E QUADRUPOLE ENERGY": CQED_SCF_E_Q_PF,
"2E DIPOLE ENERGY": CQED_SCF_E_Two,
"CQED-RHF C": C,
"CQED-RHF DENSITY MATRIX": D,
"CQED-RHF EPS": e,
"PSI4 WFN": wfn,
"RHF DIPOLE MOMENT": rhf_dipole_moment,
"CQED-RHF DIPOLE MOMENT": np.array([mu_exp_x, mu_exp_y, mu_exp_z]),
"NUCLEAR DIPOLE MOMENT": np.array([mu_nuc_x, mu_nuc_y, mu_nuc_z]),
"DIPOLE ENERGY": d_c,
"NUCLEAR REPULSION ENERGY": Enuc,
}
return cqed_rhf_dict
| 36.564263 | 123 | 0.618313 |
4a1acf47235969105d2a914f4669d8e83bd424c1
| 22,030 |
py
|
Python
|
gym_vertical_landing/envs/vertical_landing_env.py
|
bbueno5000/gym-vertical-landing
|
385dab5f38220ac704d0c082e0c458ad2b3c0a6a
|
[
"Apache-2.0"
] | null | null | null |
gym_vertical_landing/envs/vertical_landing_env.py
|
bbueno5000/gym-vertical-landing
|
385dab5f38220ac704d0c082e0c458ad2b3c0a6a
|
[
"Apache-2.0"
] | null | null | null |
gym_vertical_landing/envs/vertical_landing_env.py
|
bbueno5000/gym-vertical-landing
|
385dab5f38220ac704d0c082e0c458ad2b3c0a6a
|
[
"Apache-2.0"
] | null | null | null |
"""
The objective of this environment is to land a rocket on a platform.
STATE VARIABLES
---------------
The state consists of the following variables:
- x position
- y position
- angle
- first leg ground contact indicator
- second leg ground contact indicator
- throttle
- engine gimbal
If vel_state is set to true, the velocities values are included:
- x velocity
- y velocity
- angular velocity
all state variables are roughly in the range [-1, 1]
CONTROL INPUTS
--------------
Discrete Control Inputs:
- gimbal left
- gimbal right
- throttle up
- throttle down
- use first control thruster
- use second control thruster
- no action
Continuous Control Inputs:
- gimbal (left/right)
- throttle (up/down)
- control thruster (left/right)
"""
import Box2D
import gym
import gym.spaces as spaces
import gym.utils.seeding
import numpy as np
FPS = 60
class ContactDetector(Box2D.b2ContactListener):
def __init__(self, env):
super(ContactDetector, self).__init__()
self.env = env
def BeginContact(self, contact):
if self.env.water in [contact.fixtureA.body, contact.fixtureB.body] \
or self.env.lander in [contact.fixtureA.body, contact.fixtureB.body] \
or self.env.containers[0] in [contact.fixtureA.body, contact.fixtureB.body] \
or self.env.containers[1] in [contact.fixtureA.body, contact.fixtureB.body]:
self.env.game_over = True
else:
for i in range(2):
if self.env.legs[i] in [contact.fixtureA.body, contact.fixtureB.body]:
self.env.legs[i].ground_contact = True
def EndContact(self, contact):
for i in range(2):
if self.env.legs[i] in [contact.fixtureA.body, contact.fixtureB.body]:
self.env.legs[i].ground_contact = False
class VerticalLandingEnv(gym.Env):
metadata = {'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': FPS}
def __init__(self):
self.continuous = True
self.initial_random = 0.4 # Random scaling of initial velocity, higher is more difficult
self.scale_s = 0.35 # Temporal Scaling, lower is faster - adjust forces appropriately
self.start_height = 1000.0
self.start_speed = 80.0
self.vel_state = True # Add velocity info to state
# ROCKET PARAMETERS
self.gimbal_threshold = 0.4
self.main_engine_power = 1600 * self.scale_s
self.min_throttle = 0.4
self.rocket_width = 3.66 * self.scale_s
self.rocket_height = self.rocket_width / 3.7 * 47.9
self.engine_height = self.rocket_width * 0.5
self.engine_width = self.engine_height * 0.7
self.side_engine_power = 100 / FPS * self.scale_s
self.thruster_height = self.rocket_height * 0.86
# LEG PARAMETERS
self.base_angle = -0.27
self.leg_away = self.rocket_width / 2
self.leg_length = self.rocket_width * 2.2
self.spring_angle = 0.27
# SHIP PARAMETERS
self.ship_height = self.rocket_width
self.ship_width = self.ship_height * 40
# VIEWPORT PARAMETERS
self.viewport_h = 720
self.viewport_w = 500
self.H = 1.1 * self.start_height * self.scale_s
self.W = float(self.viewport_w) / self.viewport_h * self.H
# SMOKE FOR VISUALS PARAMETERS
self.max_smoke_lifetime = 2 * FPS
self.mean = np.array([-0.034, -0.15, -0.016, 0.0024, 0.0024, 0.137, -0.02, -0.01, -0.8, 0.002])
self.var = np.sqrt(np.array([0.08, 0.33, 0.0073, 0.0023, 0.0023, 0.8, 0.085, 0.0088, 0.063, 0.076]))
# GENERAL PARAMETERS
self._seed()
self.engine = None
self.episode_number = 0
self.lander = None
self.legs = []
self.ship = None
self.viewer = None
self.water = None
self.world = Box2D.b2World()
high = np.array([1, 1, 1, 1, 1, 1, 1, np.inf, np.inf, np.inf], dtype=np.float32)
low = -high
if not self.vel_state:
high = high[0:7]
low = low[0:7]
self.observation_space = spaces.Box(low, high, dtype=np.float32)
if self.continuous:
self.action_space = spaces.Box(-1.0, +1.0, (3,), dtype=np.float32)
else:
self.action_space = spaces.Discrete(7)
self.reset()
def _destroy(self):
if not self.water:
return
self.world.contactListener = None
self.world.DestroyBody(self.containers[0])
self.world.DestroyBody(self.containers[1])
self.world.DestroyBody(self.lander)
self.world.DestroyBody(self.legs[0])
self.world.DestroyBody(self.legs[1])
self.world.DestroyBody(self.ship)
self.world.DestroyBody(self.water)
self.containers = []
self.lander = None
self.legs = []
self.ship = None
self.water = None
def _seed(self, seed=None):
self.np_random, seed = gym.utils.seeding.np_random(seed)
return [seed]
def render(self, mode='human', close=False):
import gym.envs.classic_control.rendering as rendering
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
if self.viewer is None:
self.viewer = rendering.Viewer(self.viewport_w, self.viewport_h)
self.viewer.set_bounds(0, self.W, 0, self.H)
sky = rendering.FilledPolygon(((0, 0), (0, self.H), (self.W, self.H), (self.W, 0)))
self.sky_color = self.rgb(126, 150, 233)
sky.set_color(*self.sky_color)
self.sky_color_half_transparent = np.array((np.array(self.sky_color) + self.rgb(255, 255, 255))) / 2
self.viewer.add_geom(sky)
self.rockettrans = rendering.Transform()
engine = rendering.FilledPolygon(((0, 0),
(self.engine_width / 2, -self.engine_height),
(-self.engine_width / 2, -self.engine_height)))
self.enginetrans = rendering.Transform()
engine.add_attr(self.enginetrans)
engine.add_attr(self.rockettrans)
engine.set_color(.4, .4, .4)
self.viewer.add_geom(engine)
self.fire = rendering.FilledPolygon(((self.engine_width * 0.4, 0),
(-self.engine_width * 0.4, 0),
(-self.engine_width * 1.2, -self.engine_height * 5),
(0, -self.engine_height * 8),
(self.engine_width * 1.2, -self.engine_height * 5)))
self.fire.set_color(*self.rgb(255, 230, 107))
self.firescale = rendering.Transform(scale=(1, 1))
self.firetrans = rendering.Transform(translation=(0, -self.engine_height))
self.fire.add_attr(self.firescale)
self.fire.add_attr(self.firetrans)
self.fire.add_attr(self.enginetrans)
self.fire.add_attr(self.rockettrans)
smoke = rendering.FilledPolygon(((self.rocket_width / 2, self.thruster_height * 1),
(self.rocket_width * 3, self.thruster_height * 1.03),
(self.rocket_width * 4, self.thruster_height * 1),
(self.rocket_width * 3, self.thruster_height * 0.97)))
smoke.set_color(*self.sky_color_half_transparent)
self.smokescale = rendering.Transform(scale=(1, 1))
smoke.add_attr(self.smokescale)
smoke.add_attr(self.rockettrans)
self.viewer.add_geom(smoke)
self.gridfins = []
for i in (-1, 1):
finpoly = ((i * self.rocket_width * 1.1, self.thruster_height * 1.01),
(i * self.rocket_width * 0.4, self.thruster_height * 1.01),
(i * self.rocket_width * 0.4, self.thruster_height * 0.99),
(i * self.rocket_width * 1.1, self.thruster_height * 0.99))
gridfin = rendering.FilledPolygon(finpoly)
gridfin.add_attr(self.rockettrans)
gridfin.set_color(0.25, 0.25, 0.25)
self.gridfins.append(gridfin)
if self.stepnumber % round(FPS / 10) == 0 and self.power > 0:
s = [self.max_smoke_lifetime * self.power, # total lifetime
0, # current lifetime
self.power * (1 + 0.2 * np.random.random()), # size
np.array(self.lander.position)
+ self.power * self.rocket_width * 10 * np.array((np.sin(self.lander.angle + self.gimbal),
-np.cos(self.lander.angle + self.gimbal)))
+ self.power * 5 * (np.random.random(2) - 0.5)] # position
self.smoke.append(s)
for s in self.smoke:
s[1] += 1
if s[1] > s[0]:
self.smoke.remove(s)
continue
t = rendering.Transform(translation=(s[3][0], s[3][1] + self.H * s[1] / 2000))
self.viewer.draw_circle(radius=0.05 * s[1] + s[2],
color=self.sky_color + (1 - (2 * s[1] / s[0] - 1) ** 2) / 3 * (self.sky_color_half_transparent - self.sky_color)).add_attr(t)
self.viewer.add_onetime(self.fire)
for g in self.gridfins:
self.viewer.add_onetime(g)
for obj in self.drawlist:
for f in obj.fixtures:
trans = f.body.transform
path = [trans * v for v in f.shape.vertices]
self.viewer.draw_polygon(path, color=obj.color1)
for l in zip(self.legs, [-1, 1]):
path = [self.lander.fixtures[0].body.transform * (l[1] * self.rocket_width / 2,
self.rocket_height / 8),
l[0].fixtures[0].body.transform * (l[1] * self.leg_length * 0.8, 0)]
self.viewer.draw_polyline(path, color=self.ship.color1, linewidth=1 if self.start_height > 500 else 2)
self.viewer.draw_polyline(((self.helipad_x2, self.terranheight + self.ship_height),
(self.helipad_x1, self.terranheight + self.ship_height)),
color=self.rgb(206, 206, 2),
linewidth=1)
self.rockettrans.set_translation(*self.lander.position)
self.rockettrans.set_rotation(self.lander.angle)
self.enginetrans.set_rotation(self.gimbal)
self.firescale.set_scale(newx=1, newy=self.power * np.random.uniform(1, 1.3))
self.smokescale.set_scale(newx=self.force_dir, newy=1)
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def rgb(self, red, green, blue):
return float(red) / 255, float(green) / 255, float(blue) / 255
def reset(self):
self._destroy()
self.world.contactListener_keepref = ContactDetector(self)
self.world.contactListener = self.world.contactListener_keepref
self.game_over = False
self.gimbal = 0.0
self.landed_ticks = 0
self.prev_shaping = None
self.smoke = []
self.stepnumber = 0
self.throttle = 0
self.terranheight = self.H / 20
self.shipheight = self.terranheight + self.ship_height
# ship_pos = self.np_random.uniform(0, self.ship_width / SCALE) + self.ship_width / SCALE
ship_pos = self.W / 2
self.helipad_x1 = ship_pos - self.ship_width / 2
self.helipad_x2 = self.helipad_x1 + self.ship_width
self.helipad_y = self.terranheight + self.ship_height
self.water = self.world.CreateStaticBody(
fixtures=Box2D.b2FixtureDef(
shape=Box2D.b2PolygonShape(
vertices=((0, 0),
(self.W, 0),
(self.W, self.terranheight),
(0, self.terranheight))),
friction=0.1,
restitution=0.0))
self.water.color1 = self.rgb(70, 96, 176)
self.ship = self.world.CreateStaticBody(
fixtures=Box2D.b2FixtureDef(
shape=Box2D.b2PolygonShape(
vertices=((self.helipad_x1, self.terranheight),
(self.helipad_x2, self.terranheight),
(self.helipad_x2, self.terranheight + self.ship_height),
(self.helipad_x1, self.terranheight + self.ship_height))),
friction=0.5,
restitution=0.0))
self.containers = []
for side in [-1, 1]:
self.containers.append(self.world.CreateStaticBody(
fixtures=Box2D.b2FixtureDef(
shape=Box2D.b2PolygonShape(
vertices=((ship_pos + side * 0.95 * self.ship_width / 2, self.helipad_y),
(ship_pos + side * 0.95 * self.ship_width / 2, self.helipad_y + self.ship_height),
(ship_pos + side * 0.95 * self.ship_width / 2 - side * self.ship_height,
self.helipad_y + self.ship_height),
(ship_pos + side * 0.95 * self.ship_width / 2 - side * self.ship_height, self.helipad_y))),
friction=0.2,
restitution=0.0)))
self.containers[-1].color1 = self.rgb(206, 206, 2)
self.ship.color1 = (0.2, 0.2, 0.2)
initial_x = self.W / 2 + self.W * np.random.uniform(-0.3, 0.3)
initial_y = self.H * 0.95
self.lander = self.world.CreateDynamicBody(
position=(initial_x, initial_y),
angle=0.0,
fixtures=Box2D.b2FixtureDef(
shape=Box2D.b2PolygonShape(
vertices=((-self.rocket_width / 2, 0),
(self.rocket_width / 2, 0),
(self.rocket_width / 2, self.rocket_height),
(-self.rocket_width / 2, self.rocket_height))),
density=1.0,
friction=0.5,
categoryBits=0x0010,
maskBits=0x001,
restitution=0.0))
self.lander.color1 = self.rgb(230, 230, 230)
for i in [-1, +1]:
leg = self.world.CreateDynamicBody(
position=(initial_x - i * self.leg_away, initial_y + self.rocket_width * 0.2),
angle=(i * self.base_angle),
fixtures=Box2D.b2FixtureDef(
shape=Box2D.b2PolygonShape(
vertices=((0, 0),
(0, self.leg_length / 25),
(i * self.leg_length, 0),
(i * self.leg_length, -self.leg_length / 20),
(i * self.leg_length / 3, -self.leg_length / 7))),
density=1,
restitution=0.0,
friction=0.2,
categoryBits=0x0020,
maskBits=0x001))
leg.ground_contact = False
leg.color1 = (0.25, 0.25, 0.25)
rjd = Box2D.b2RevoluteJointDef(bodyA=self.lander,
bodyB=leg,
localAnchorA=(i * self.leg_away, self.rocket_width * 0.2),
localAnchorB=(0, 0),
enableLimit=True,
maxMotorTorque=2500.0,
motorSpeed=-0.05 * i,
enableMotor=True)
djd = Box2D.b2DistanceJointDef(bodyA=self.lander,
bodyB=leg,
anchorA=(i * self.leg_away, self.rocket_height / 8),
anchorB=leg.fixtures[0].body.transform * (i * self.leg_length, 0),
collideConnected=False,
frequencyHz=0.01,
dampingRatio=0.9)
if i == 1:
rjd.lowerAngle = -self.spring_angle
rjd.upperAngle = 0
else:
rjd.lowerAngle = 0
rjd.upperAngle = + self.spring_angle
leg.joint = self.world.CreateJoint(rjd)
leg.joint2 = self.world.CreateJoint(djd)
self.legs.append(leg)
self.lander.linearVelocity = (
-self.np_random.uniform(0, self.initial_random) * \
self.start_speed * (initial_x - self.W / 2) / self.W, -self.start_speed)
self.lander.angularVelocity = (1 + self.initial_random) * np.random.uniform(-1, 1)
self.drawlist = self.legs + [self.water] + [self.ship] + self.containers + [self.lander]
if self.continuous:
return self.step([0, 0, 0])[0]
else:
return self.step(6)[0]
def step(self, action):
self.force_dir = 0
if self.continuous:
np.clip(action, -1, 1)
self.gimbal += action[0] * 0.15 / FPS
self.throttle += action[1] * 0.5 / FPS
if action[2] > 0.5:
self.force_dir = 1
elif action[2] < -0.5:
self.force_dir = -1
else:
if action == 0:
self.gimbal += 0.01
elif action == 1:
self.gimbal -= 0.01
elif action == 2:
self.throttle += 0.01
elif action == 3:
self.throttle -= 0.01
elif action == 4: # left
self.force_dir = -1
elif action == 5: # right
self.force_dir = 1
self.gimbal = np.clip(self.gimbal, -self.gimbal_threshold, self.gimbal_threshold)
self.throttle = np.clip(self.throttle, 0.0, 1.0)
self.power = 0 if self.throttle == 0.0 else self.min_throttle + self.throttle * (1 - self.min_throttle)
# main engine force
force_pos = (self.lander.position[0], self.lander.position[1])
force = (-np.sin(self.lander.angle + self.gimbal) * self.main_engine_power * self.power,
np.cos(self.lander.angle + self.gimbal) * self.main_engine_power * self.power)
self.lander.ApplyForce(force=force, point=force_pos, wake=False)
# control thruster force
force_pos_c = self.lander.position + self.thruster_height * np.array((np.sin(self.lander.angle), np.cos(self.lander.angle)))
force_c = (-self.force_dir * np.cos(self.lander.angle) * self.side_engine_power,
self.force_dir * np.sin(self.lander.angle) * self.side_engine_power)
self.lander.ApplyLinearImpulse(impulse=force_c, point=force_pos_c, wake=False)
self.world.Step(1.0 / FPS, 60, 60)
pos = self.lander.position
vel_l = np.array(self.lander.linearVelocity) / self.start_speed
vel_a = self.lander.angularVelocity
x_distance = (pos.x - self.W / 2) / self.W
y_distance = (pos.y - self.shipheight) / (self.H - self.shipheight)
angle = (self.lander.angle / np.pi) % 2
if angle > 1:
angle -= 2
state = [2 * x_distance,
2 * (y_distance - 0.5),
angle,
1.0 if self.legs[0].ground_contact else 0.0,
1.0 if self.legs[1].ground_contact else 0.0,
2 * (self.throttle - 0.5),
(self.gimbal / self.gimbal_threshold)]
if self.vel_state:
state.extend([vel_l[0], vel_l[1], vel_a])
# REWARD BEGINS -----
# state variables for reward
distance = np.linalg.norm((3 * x_distance, y_distance)) # weight x position more
speed = np.linalg.norm(vel_l)
groundcontact = self.legs[0].ground_contact or self.legs[1].ground_contact
brokenleg = (self.legs[0].joint.angle < 0 or self.legs[1].joint.angle > -0) and groundcontact
outside = abs(pos.x - self.W / 2) > self.W / 2 or pos.y > self.H
fuelcost = 0.1 * (0 * self.power + abs(self.force_dir)) / FPS
landed = self.legs[0].ground_contact and self.legs[1].ground_contact and speed < 0.1
done = False
reward = -fuelcost
if outside or brokenleg:
self.game_over = True
if self.game_over:
done = True
else:
# reward shaping
shaping = -0.5 * (distance + speed + abs(angle) ** 2)
shaping += 0.1 * (self.legs[0].ground_contact + self.legs[1].ground_contact)
if self.prev_shaping is not None:
reward += shaping - self.prev_shaping
self.prev_shaping = shaping
if landed:
self.landed_ticks += 1
else:
self.landed_ticks = 0
if self.landed_ticks == FPS:
reward = 1.0
done = True
if done:
reward += max(-1, 0 - 2 * (speed + distance + abs(angle) + abs(vel_a)))
elif not groundcontact:
reward -= 0.25 / FPS
reward = np.clip(reward, -1, 1)
# REWARD ENDS -----
self.stepnumber += 1
state = (state - self.mean[:len(state)]) / self.var[:len(state)]
return np.array(state), reward, done, {}
| 43.537549 | 161 | 0.529914 |
4a1acf4be6ca102f4baf445d0fba2edff00a16b0
| 1,807 |
py
|
Python
|
kerMIT/kerMIT/legacyCode/learner.py
|
ART-Group-it/KERMIT
|
e3ed25bb0d67e77fc3afdef04007b34df1805ba1
|
[
"MIT"
] | 45 |
2020-10-07T13:22:14.000Z
|
2022-03-22T03:49:44.000Z
|
kerMIT/kerMIT/legacyCode/learner.py
|
ART-Group-it/kerMIT
|
ff309ce3154c5292602c53cd19633d789bf759e2
|
[
"MIT"
] | 1 |
2020-12-27T13:01:03.000Z
|
2020-12-28T09:49:33.000Z
|
kerMIT/kerMIT/legacyCode/learner.py
|
ART-Group-it/KERMIT
|
e3ed25bb0d67e77fc3afdef04007b34df1805ba1
|
[
"MIT"
] | 5 |
2020-11-20T16:53:48.000Z
|
2022-02-07T09:30:48.000Z
|
__author__ = 'lorenzo'
import os
import numpy as np
import dataset_reader as dr
from sklearn import svm
from sklearn import tree
from dtk import DT
from tree import Tree
from semantic_vector import SemanticVector
import math
from feature import Feature
# class Learner:
# def __init__(self, dev, test, func):
# self.func = func
# self.dev = dr.Dataset(dev, func)
# self.test = dr.Dataset(test, func)
#
# def fit(self):
# clf = svm.SVC()
# clf.fit(self.dev.X, self.dev.y)
#
# @staticmethod
# def poly_kernel(a,b):
# p = (1 + np.dot(a[:,-1].reshape((len(a),1)),b[:,-1].reshape((len(b),1)).T))**2
# #print(p.shape)
# return p
if __name__ == "__main__":
dir = "/Users/lorenzo/Documents/Universita/PHD/Lavori/DSTK/RTE"
dev = "RTE3_dev_processed.xml"
test = "RTE3_test_processed.xml"
dir_ = "/Users/lorenzo/Documents/Universita/PHD/Lavori/DSTK/SVM/"
matrix = "single-target.dm"
sm = os.path.join(dir_, matrix)
#sv = SemanticVector(sm, True)
def poly_kernel(a,b):
p = (1 + np.dot(a[:,-1].reshape((len(a),1)),b[:,-1].reshape((len(b),1)).T))**2
#print(p.shape)
return p
def my_kernel(a, b):
p = np.dot(a[:,:-1], b[:,:-1].T)
#print(p.shape)
return p + poly_kernel(a,b)
F = Feature(True, True, sm)
dev = dr.Dataset2(os.path.join(dir, dev), F.dst, processed=True)
test = dr.Dataset2(os.path.join(dir, test), F.dst, processed=True)
X = dev.X
y = dev.y
clf = svm.SVC(C = 0.3, kernel=my_kernel)
clf.fit(X,y)
# clf = tree.DecisionTreeClassifier()
# clf = clf.fit(X,y)
results = clf.predict(test.X)
#print(results)
mistakes = sum(results != test.y)
print(((800 - mistakes)/800) * 100)
| 22.873418 | 88 | 0.591588 |
4a1acf4d14bd69043249d1b70a9212f74e6c11ff
| 7,466 |
py
|
Python
|
Statstool-Web/statstool_web/main/outsource.py
|
Declaminius/EU4-MP-Statstool
|
2df7b7f08f1c97257dec325322a2e491ea856432
|
[
"MIT"
] | 1 |
2020-10-06T14:48:32.000Z
|
2020-10-06T14:48:32.000Z
|
Statstool-Web/statstool_web/main/outsource.py
|
Declaminius/EU4-MP-Statstool
|
2df7b7f08f1c97257dec325322a2e491ea856432
|
[
"MIT"
] | 3 |
2021-09-08T02:36:13.000Z
|
2022-03-12T00:50:09.000Z
|
Statstool-Web/statstool_web/main/outsource.py
|
Declaminius/EU4-MP-Statstool
|
2df7b7f08f1c97257dec325322a2e491ea856432
|
[
"MIT"
] | 1 |
2020-09-26T15:31:24.000Z
|
2020-09-26T15:31:24.000Z
|
from statstool_web.models import *
from math import ceil
def get_nation_info(savegame):
nation_tags = [x.tag for x in savegame.player_nations]
nation_names = [NationSavegameData.query.filter_by(
savegame_id=savegame.id, nation_tag=tag).first().nation_name for tag in nation_tags]
nation_colors_hex = [NationSavegameData.query.filter_by(nation_tag=tag,
savegame_id=savegame.id).first().color for tag in nation_tags]
nation_colors_hsl = [NationSavegameData.query.filter_by(nation_tag=tag,
savegame_id=savegame.id).first().color.hsl for tag in nation_tags]
return zip(nation_names, nation_tags, nation_colors_hex, nation_colors_hsl)
def mp3_data(mp_id, savegame):
institutions = ("colonialism", "printing_press", "global_trade",
"manufactories", "enlightenment", "industrialization")
payload = {"mp_id": mp_id, "current_mp": MP.query.get(mp_id)}
payload["header_labels"] = ["Team", "Provinzen", "Kolonialismus", "Druckerpresse",
"Globaler Handel", "Manufakturen", "Aufklärung", "Industrialisierung",
"erster Spielerkrieg-Sieger", "Globaler Handel-Sieger", "Gesamt"]
payload["num_columns"] = len(payload["header_labels"])
two_vp_province_ids = {382: "Damaskus", 227: "Lissabon", 333: "Majorca",
1765: "Sofia", 177: "Maine", 45: "Lübeck", 257: "Warschau", 295: "Moskau"}
one_vp_province_ids = {361: "Kairo", 223: "Granada", 151: "Konstantinopel",
341: "Tunis", 231: "Porto", 170: "Finistère", 183: "Paris", 50: "Berlin",
153: "Pest", 1756: "Bessarabien", 4142: "Ostjylland", 41: "Königsberg"}
free_provinces_one_vp = [x for x in one_vp_province_ids.values()]
free_provinces_two_vp = [x for x in two_vp_province_ids.values()]
teams = sorted(MP.query.get(mp_id).teams, key = lambda team: team.id)
team_names = []
for team in teams:
team_names.append("Team {}".format(team.id))
team_colors_hex = ["#ffffff"]*len(teams)
team_colors_hsl = [(0, 0, 100)]*len(teams)
team_ids = [team.id for team in teams]
payload["nation_info"] = zip(team_names, team_ids, team_colors_hex, team_colors_hsl)
data = {}
for team in teams:
data[team.id] = [0]*(len(payload["header_labels"])-2)
for institution, column in zip(institutions, range(1, 7)):
for team in teams:
if (result := VictoryPoint.query.filter_by(mp_id=mp_id, category=institution, team_id=team.id).first()):
data[team.id][column] = result.points
province_points_dict = {}
for team in teams:
if (vp := VictoryPoint.query.filter_by(mp_id=mp_id, team_id=team.id, category="provinces").first()):
vp.points = 0
province_points_dict[team.id] = vp
else:
vp = VictoryPoint(mp_id=mp_id, team_id=team.id,
category="provinces", points=0)
db.session.add(vp)
province_points_dict[team.id] = vp
team_province_dict = {team.id: [[], []] for team in teams}
for (id, name) in two_vp_province_ids.items():
ProviceData = NationSavegameProvinces.query.filter_by(
savegame_id=savegame.id, province_id=id).first()
owner = ProviceData.nation_tag
for team in teams:
if owner in (team.team_tag1, team.team_tag2):
province_points_dict[team.id].points += 2
team_province_dict[team.id][0].append(name)
free_provinces_two_vp.remove(name)
for (id, name) in one_vp_province_ids.items():
ProviceData = NationSavegameProvinces.query.filter_by(
savegame_id=savegame.id, province_id=id).first()
owner = ProviceData.nation_tag
for team in teams:
if owner in (team.team_tag1, team.team_tag2):
province_points_dict[team.id].points += 1
team_province_dict[team.id][1].append(name)
free_provinces_one_vp.remove(name)
db.session.commit()
payload["num_free_rows"] = max([ceil(len(free_provinces_one_vp)/3), ceil(len(free_provinces_two_vp)/3)])
payload["free_provinces_one_vp"] = free_provinces_one_vp
payload["free_provinces_two_vp"] = free_provinces_two_vp
payload["team_province_dict"] = team_province_dict
for team in teams:
data[team.id][0] = VictoryPoint.query.filter_by(
mp_id=mp_id, team_id=team.id, category="provinces").first().points
vp = VictoryPoint.query.filter_by(
mp_id=mp_id, team_id=team.id, category="first_player_war").first()
if vp:
data[team.id][-2] = vp.points
vp = VictoryPoint.query.filter_by(
mp_id=mp_id, team_id=team.id, category="global_trade_spawn").first()
if vp:
data[team.id][-1] = vp.points
data[team.id].append(sum(data[team.id]))
payload["data"] = data
return payload
def mp2_data(mp_id, savegame):
payload = {"mp_id": mp_id, "current_mp": MP.query.get(mp_id)}
payload["header_labels"] = ["Nation", "Basis", "Kriege", "Kolonialismus", "Druckerpresse", \
"Globaler Handel", "Manufakturen", "Aufklärung", "Industrialisierung", \
"erster Spielerkrieg-Sieger", "erste Weltumseglung", "Armee-Professionalität", \
"Großmacht", "Hegemonie", "Gesamt"]
payload["num_columns"] = len(payload["header_labels"])
data = {}
nation_tags = [x.tag for x in savegame.player_nations]
for tag in nation_tags:
data[tag] = [2] + [0]*(len(payload["header_labels"])-3)
institutions = ("colonialism", "printing_press", "global_trade", "manufactories", "enlightenment","industrialization")
for institution, column in zip(institutions, range(2,8)):
for tag in nation_tags:
if (result := VictoryPoints.query.filter_by(mp_id = mp_id, institution = institution, nation_tag = tag).first()):
data[tag][column] = result.victory_points
#erster Spielerkrieg
#data["D00"][8] = 1
#global_trade
data["D05"][4] += 2
#great_power
data["MPK"][11] = 1
#army_prof
data["D03"][10] = 2
#hegemony
data["MPK"][12] = 2
data["D05"][12] = 2
data["D08"][12] = 1
#wars
data["D03"][1] = 1
data["MPK"][1] = 1
data["D02"][1] = -1
data["D07"][1] = -1
for tag in data.keys():
data[tag].append(sum(data[tag]))
payload["data"] = data
return payload
def mp1_data():
payload = {"mp_id": mp_id, "current_mp": MP.query.get(mp_id)}
payload["header_labels"] = ["Nation", "Basis", "Kriege", "Renaissance", "Kolonialismus", \
"Druckerpresse", "Globaler Handel", "Manufakturen", "Aufklärung", "Industrialisierung", "Gesamt"]
payload["num_columns"] = len(payload["header_labels"])
data = {}
data["SPA"] = [2,0,0,0,0,0,0,0,0]
data["FRA"] = [2,-2,0,0,0,0,0,0,0]
data["GBR"] = [2,-2,0,2,3,2,0,0,0]
data["NED"] = [2,2,0,0,0,1,1,3,0]
data["HAB"] = [2,4,0,0,0,0,3,1,0]
data["SWE"] = [2,-2,0,0,0,0,0,0,0]
data["PLC"] = [2,-1,0,0,0,0,0,1,0]
data["TUR"] = [1,-3,2,1,1,2,2,0,0]
data["RUS"] = [1,4,0,0,1,1,0,1,0]
for tag in data.keys():
data[tag].append(sum(data[tag]))
payload["data"] = data
return payload
| 39.089005 | 126 | 0.610501 |
4a1acff401cb9d73723a901bb73fd7ba8d8b90ad
| 1,039 |
py
|
Python
|
osdria/views/components/graph_view.py
|
soberleitner/osdriaApp
|
8b4638e6edc2d265afd09098cfc4a8db34403cce
|
[
"MIT"
] | null | null | null |
osdria/views/components/graph_view.py
|
soberleitner/osdriaApp
|
8b4638e6edc2d265afd09098cfc4a8db34403cce
|
[
"MIT"
] | 20 |
2019-01-17T10:31:10.000Z
|
2019-03-15T07:12:11.000Z
|
osdria/views/components/graph_view.py
|
soberleitner/osdriaApp
|
8b4638e6edc2d265afd09098cfc4a8db34403cce
|
[
"MIT"
] | null | null | null |
from PySide2.QtCharts import QtCharts
class GraphView(QtCharts.QChartView):
"""define functionality of chart view in dataset dialog"""
def __init__(self, parent):
super(GraphView, self).__init__(parent)
self._model = None
self._series = QtCharts.QLineSeries()
def set_model(self, model):
"""set dataset model for displaying in line chart"""
self._model = model
model.dataChanged.connect(self.reset_chart)
model.modelReset.connect(self.reset_chart)
chart = QtCharts.QChart()
self.setChart(chart)
def reset_chart(self, start_index=0, end_index=0):
self.chart().removeAllSeries()
self._series = QtCharts.QLineSeries()
for index, data in enumerate(self._model.retrieve_data()):
try:
self._series.append(index, float(data))
except ValueError:
pass
self.chart().addSeries(self._series)
self.chart().createDefaultAxes()
self.chart().legend().hide()
| 33.516129 | 66 | 0.636189 |
4a1ad04c01ff54bbd49a3161719dd38b92fa248d
| 2,135 |
py
|
Python
|
stream_viewer/widgets/control_panel.py
|
intheon/stream_viewer
|
386b9e27d5cd7e66eece0dc2e4977e917ef94877
|
[
"MIT"
] | 2 |
2022-01-07T11:38:48.000Z
|
2022-02-23T09:07:58.000Z
|
stream_viewer/widgets/control_panel.py
|
intheon/stream_viewer
|
386b9e27d5cd7e66eece0dc2e4977e917ef94877
|
[
"MIT"
] | null | null | null |
stream_viewer/widgets/control_panel.py
|
intheon/stream_viewer
|
386b9e27d5cd7e66eece0dc2e4977e917ef94877
|
[
"MIT"
] | 1 |
2022-02-23T09:08:06.000Z
|
2022-02-23T09:08:06.000Z
|
from qtpy import QtWidgets
from qtpy import QtCore
from stream_viewer.widgets.interface import IControlPanel
class GenericControlPanel(IControlPanel):
pass
class NoChansControlPanel(IControlPanel):
def reset_widgets(self, renderer):
super().reset_widgets(renderer)
# Disable a couple standard widgets that we can't use.
_tree = self.findChild(QtWidgets.QTreeWidget, name="Chans_TreeWidget")
_tree.setEnabled(False)
_tree.setVisible(False)
_checkbox = self.findChild(QtWidgets.QCheckBox, name="ShowNames_CheckBox")
_checkbox.setEnabled(False)
class HidableCtrlWrapWidget(QtWidgets.QWidget):
def __init__(self, control_panel, name="HidableCtrlWrapWidget", is_visible=False, **kwargs):
super().__init__(**kwargs)
self.setSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.MinimumExpanding)
self.setObjectName(name)
# push button to show/hide everything in control_panel.
showhide_pb = QtWidgets.QPushButton()
showhide_pb.setObjectName("ShowHide_PushButton")
showhide_pb.clicked.connect(self.handle_showhide)
showhide_pb.setIcon(self.style().standardIcon(
QtWidgets.QStyle.SP_ArrowLeft if is_visible else QtWidgets.QStyle.SP_ArrowRight, None, self))
self._vis_toggle = is_visible
self._ctrl_panel = control_panel
self._ctrl_panel.setVisible(self._vis_toggle)
self.setLayout(QtWidgets.QVBoxLayout())
self.layout().addWidget(showhide_pb)
self.layout().addWidget(self._ctrl_panel)
@QtCore.Slot(bool)
def handle_showhide(self, checked):
self._ctrl_panel.setVisible(not self._vis_toggle)
# Update pushbutton icon
_pb = self.findChild(QtWidgets.QPushButton, "ShowHide_PushButton")
_pb.setIcon(self.style().standardIcon(
QtWidgets.QStyle.SP_ArrowRight if self._vis_toggle else QtWidgets.QStyle.SP_ArrowLeft,
None, self))
# Save state
self._vis_toggle = not self._vis_toggle
@property
def control_panel(self):
return self._ctrl_panel
| 37.45614 | 105 | 0.715222 |
4a1ad0a089356fd65e8c3e74d17ed3a4ef5c8399
| 1,705 |
py
|
Python
|
Experiment_2/src/NPBDAA_LM/Unigram_generator.py
|
RyoOzaki/myResearch
|
a8f8ec63e1b1802517b12db2b6f3e286a3984bcf
|
[
"MIT"
] | null | null | null |
Experiment_2/src/NPBDAA_LM/Unigram_generator.py
|
RyoOzaki/myResearch
|
a8f8ec63e1b1802517b12db2b6f3e286a3984bcf
|
[
"MIT"
] | null | null | null |
Experiment_2/src/NPBDAA_LM/Unigram_generator.py
|
RyoOzaki/myResearch
|
a8f8ec63e1b1802517b12db2b6f3e286a3984bcf
|
[
"MIT"
] | null | null | null |
import numpy as np
def make_unigram(sentences, depth):
cnt_matrix = np.zeros((depth, ))
words = sentences[sentences != -1]
for w in words:
cnt_matrix[int(w)] += 1
cnt_matrix /= cnt_matrix.sum()
return cnt_matrix
class Unigram_generator(object):
def __init__(self, sentences_file):
sentences_npz = np.load(sentences_file)
sentences = sentences_npz["sentences"]
depth = sentences_npz["depth"]
BOS_index = sentences_npz["BOS"]
EOS_index = sentences_npz["EOS"]
unigram = make_unigram(sentences, depth)
# merged_bigram /= merged_bigram.sum(axis=1, keepdims=True)
self.depth = depth
self.unigram = unigram
self.BOS_index = BOS_index
self.EOS_index = EOS_index
def generate(self, size=1, unique=False):
sentences = []
if unique:
i = 0
while i < size:
wrd = self.BOS_index
snt = [wrd, ]
while wrd != self.EOS_index:
wrd = np.random.choice(self.depth, p=self.unigram)
if wrd == self.BOS_index:
continue
snt.append(wrd)
if snt[1:-1] in sentences or len(snt) <= 2:
continue
sentences.append(snt[1:-1])
i += 1
else:
for s in range(size):
wrd = self.BOS_index
snt = [wrd, ]
while wrd != self.EOS_index:
wrd = np.random.choice(self.depth, p=self.unigram)
snt.append(wrd)
sentences.append(snt[1:-1])
return sentences
| 31.574074 | 70 | 0.520235 |
4a1ad124e8ed14f85252eb714f31ef6e4459c105
| 15,473 |
py
|
Python
|
tests/api/test_tokens.py
|
dsg-bielefeld/slurk
|
e3e86fbd6bdfee70d50644a03564da629a37c75e
|
[
"BSD-3-Clause"
] | 2 |
2019-01-21T02:44:50.000Z
|
2019-03-26T20:54:38.000Z
|
tests/api/test_tokens.py
|
dsg-bielefeld/slurk
|
e3e86fbd6bdfee70d50644a03564da629a37c75e
|
[
"BSD-3-Clause"
] | 44 |
2018-09-30T22:09:18.000Z
|
2019-04-02T13:09:25.000Z
|
tests/api/test_tokens.py
|
dsg-bielefeld/slurk
|
e3e86fbd6bdfee70d50644a03564da629a37c75e
|
[
"BSD-3-Clause"
] | 8 |
2018-09-30T21:16:14.000Z
|
2018-12-13T14:50:40.000Z
|
# -*- coding: utf-8 -*-
"""Test requests to the `tokens` table."""
from http import HTTPStatus
import json
import os
import pytest
from .. import parse_error
from tests.api import InvalidWithEtagTemplate, RequestOptionsTemplate
PREFIX = f'{__name__.replace(".", os.sep)}.py'
class TokensTable:
@property
def table_name(self):
return "tokens"
class TestRequestOptions(TokensTable, RequestOptionsTemplate):
pass
@pytest.mark.depends(
on=[
f"{PREFIX}::TestRequestOptions::test_request_option[GET]",
f"{PREFIX}::TestPostValid",
]
)
class TestGetValid:
def test_valid_request(self, client, tokens):
response = client.get("/slurk/api/tokens")
assert response.status_code == HTTPStatus.OK, parse_error(response)
# check that the posted table instance is included
def retr_by_id(inst):
return inst["id"] == tokens.json["id"]
retr_inst = next(filter(retr_by_id, response.json), None)
assert retr_inst == tokens.json
# check that the `get` request did not alter the database
response = client.get(
"/slurk/api/tokens", headers={"If-None-Match": response.headers["ETag"]}
)
assert response.status_code == HTTPStatus.NOT_MODIFIED
@pytest.mark.depends(
on=[
f"{PREFIX}::TestRequestOptions::test_request_option[POST]",
"tests/api/test_permissions.py::TestPostValid",
"tests/api/test_tasks.py::TestPostValid",
"tests/api/test_rooms.py::TestPostValid",
]
)
class TestPostValid:
REQUEST_CONTENT = [
{"json": {"permissions_id": -1}},
{"json": {"permissions_id": -1, "registrations_left": 3, "task_id": -1}},
{"json": {"permissions_id": -1, "room_id": -1, "task_id": -1}},
{
"data": {"permissions_id": -1},
"headers": {"Content-Type": "application/json"},
},
]
@pytest.mark.parametrize("content", REQUEST_CONTENT)
def test_valid_request(self, client, content, permissions, rooms, tasks):
# replace placeholder ids with valid ones
for key in content:
if content[key].get("permissions_id") == -1:
content[key]["permissions_id"] = permissions.json["id"]
if content[key].get("room_id") == -1:
content[key]["room_id"] = rooms.json["id"]
if content[key].get("task_id") == -1:
content[key]["task_id"] = tasks.json["id"]
data = content.get("json", {}) or content.get("data", {})
# convert dictionary to json
if "data" in content:
content["data"] = json.dumps(content["data"])
response = client.post("/slurk/api/tokens", **content)
assert response.status_code == HTTPStatus.CREATED, parse_error(response)
token = response.json
assert token["date_modified"] is None
assert token["permissions_id"] == data.get("permissions_id")
assert token["registrations_left"] == data.get("registrations_left", 1)
assert token["task_id"] == data.get("task_id", None)
assert token["room_id"] == data.get("room_id", None)
@pytest.mark.depends(
on=[
f"{PREFIX}::TestRequestOptions::test_request_option[POST]",
"tests/api/test_permissions.py::TestPostValid",
]
)
class TestPostInvalid:
REQUEST_CONTENT = [
({"json": {}}, HTTPStatus.UNPROCESSABLE_ENTITY),
(
{"json": {"permissions_id": -1, "registrations_left": -2}},
HTTPStatus.UNPROCESSABLE_ENTITY,
),
(
{"json": {"permissions_id": -1, "room_id": -42}},
HTTPStatus.UNPROCESSABLE_ENTITY,
),
(
{"json": {"permissions_id": -1, "task_id": "Test Task"}},
HTTPStatus.UNPROCESSABLE_ENTITY,
),
({"data": {"permissions_id": -1}}, HTTPStatus.UNSUPPORTED_MEDIA_TYPE),
]
@pytest.mark.parametrize("content, status", REQUEST_CONTENT)
def test_invalid_request(self, client, content, status, permissions):
# replace placeholder ids with valid ones
for key in content:
if (
"permissions_id" in content[key]
and content[key]["permissions_id"] == -1
):
content[key]["permissions_id"] = permissions.json["id"]
response = client.post("/slurk/api/tokens", **content)
assert response.status_code == status, parse_error(response)
@pytest.mark.depends(on=["tests/api/test_tokens.py::TestPostValid"])
def test_unauthorized_access(self, client, tokens, permissions):
response = client.post(
"/slurk/api/tokens",
json={"permissions_id": permissions.json["id"]},
headers={"Authorization": f'Bearer {tokens.json["id"]}'},
)
assert response.status_code == HTTPStatus.UNAUTHORIZED, parse_error(response)
def test_unauthenticated_access(self, client, permissions):
response = client.post(
"/slurk/api/tokens",
json={"permissions_id": permissions.json["id"]},
headers={"Authorization": "Bearer invalid_token"},
)
assert response.status_code == HTTPStatus.UNAUTHORIZED, parse_error(response)
@pytest.mark.depends(
on=[
f"{PREFIX}::TestRequestOptions::test_request_option_with_id[GET]",
f"{PREFIX}::TestPostValid",
]
)
class TestGetIdValid:
def test_valid_request(self, client, tokens):
response = client.get(f'/slurk/api/tokens/{tokens.json["id"]}')
assert response.status_code == HTTPStatus.OK, parse_error(response)
assert response.json == tokens.json
# check that the `get` request did not alter the database
response = client.get(
f'/slurk/api/tokens/{tokens.json["id"]}',
headers={"If-None-Match": response.headers["ETag"]},
)
assert response.status_code == HTTPStatus.NOT_MODIFIED
@pytest.mark.depends(
on=[f"{PREFIX}::TestRequestOptions::test_request_option_with_id[GET]"]
)
class TestGetIdInvalid:
def test_not_existing(self, client):
response = client.get("/slurk/api/tokens/invalid_id")
assert response.status_code == HTTPStatus.NOT_FOUND, parse_error(response)
@pytest.mark.depends(
on=[
f"{PREFIX}::TestRequestOptions::test_request_option_with_id[PUT]",
f"{PREFIX}::TestPostValid",
"tests/api/test_permissions.py::TestPostValid",
"tests/api/test_tasks.py::TestPostValid",
"tests/api/test_rooms.py::TestPostValid",
]
)
class TestPutValid:
REQUEST_CONTENT = [
{"json": {"permissions_id": -1, "registrations_left": -1}},
{"json": {"permissions_id": -1, "registrations_left": 5, "room_id": -1}},
{
"json": {
"permissions_id": -1,
"registrations_left": 0,
"room_id": -1,
"task_id": -1,
}
},
{
"data": {"permissions_id": -1, "registrations_left": 1},
"headers": {"Content-Type": "application/json"},
},
]
@pytest.mark.parametrize("content", REQUEST_CONTENT)
def test_valid_request(self, client, tokens, content, permissions, rooms, tasks):
# replace placeholder ids with valid ones
for key in content:
if content[key].get("permissions_id") == -1:
content[key]["permissions_id"] = permissions.json["id"]
if content[key].get("room_id") == -1:
content[key]["room_id"] = rooms.json["id"]
if content[key].get("task_id") == -1:
content[key]["task_id"] = tasks.json["id"]
data = content.get("json", {}) or content.get("data", {})
# serialize data content to json
if "data" in content:
content["data"] = json.dumps(content["data"])
# set the etag
content.setdefault("headers", {}).update({"If-Match": tokens.headers["ETag"]})
response = client.put(f'/slurk/api/tokens/{tokens.json["id"]}', **content)
assert response.status_code == HTTPStatus.OK, parse_error(response)
new_tokens = response.json
# check that a modification was performed
assert new_tokens["id"] == tokens.json["id"]
assert new_tokens["date_created"] == tokens.json["date_created"]
assert response.json["date_modified"] is not None
assert response.headers["ETag"] != tokens.headers["ETag"]
assert new_tokens["permissions_id"] == data.get("permissions_id")
assert new_tokens["registrations_left"] == data.get("registrations_left", 1)
assert new_tokens["task_id"] == data.get("task_id", None)
assert new_tokens["room_id"] == data.get("room_id", None)
@pytest.mark.depends(
on=[
f"{PREFIX}::TestRequestOptions::test_request_option_with_id[PUT]",
f"{PREFIX}::TestPostValid",
"tests/api/test_permissions.py::TestPostValid",
]
)
class TestPutInvalid(TokensTable, InvalidWithEtagTemplate):
@property
def request_method(self):
return "put"
def json(self, request):
permissions = request.getfixturevalue("permissions")
return {"json": {"permissions_id": permissions.json["id"]}}
REQUEST_CONTENT = [
({"json": {"permissions_id": None}}, HTTPStatus.UNPROCESSABLE_ENTITY),
(
{"json": {"permissions_id": -1, "registrations_left": 2**63}},
HTTPStatus.UNPROCESSABLE_ENTITY,
),
(
{"json": {"permissions_id": -1, "task_id": -42}},
HTTPStatus.UNPROCESSABLE_ENTITY,
),
]
@pytest.mark.parametrize("content, status", REQUEST_CONTENT)
def test_invalid_request(self, client, tokens, content, status, permissions):
# replace placeholder ids with valid ones
for key in content:
if content[key].get("permissions_id") == -1:
content[key]["permissions_id"] = permissions.json["id"]
# set the etag
content.setdefault("headers", {}).update({"If-Match": tokens.headers["ETag"]})
response = client.put(f'/slurk/api/tokens/{tokens.json["id"]}', **content)
assert response.status_code == status, parse_error(response)
@pytest.mark.depends(
on=[
f"{PREFIX}::TestRequestOptions::test_request_option_with_id[DELETE]",
f"{PREFIX}::TestPostValid",
]
)
class TestDeleteValid:
def test_valid_request(self, client, tokens):
response = client.delete(
f'/slurk/api/tokens/{tokens.json["id"]}',
headers={"If-Match": tokens.headers["ETag"]},
)
assert response.status_code == HTTPStatus.NO_CONTENT, parse_error(response)
@pytest.mark.depends(
on=[
f"{PREFIX}::TestRequestOptions::test_request_option_with_id[DELETE]",
f"{PREFIX}::TestPostValid",
]
)
class TestDeleteInvalid(TokensTable, InvalidWithEtagTemplate):
@property
def request_method(self):
return "delete"
@pytest.mark.depends(
on=[
f"{PREFIX}::TestGetIdValid",
# TODO 'tests/api/test_users.py::TestPostValid',
# TODO 'tests/api/test_users.py::TestDeleteValid'
]
)
def test_deletion_of_token_in_user(self, client, tokens):
# create user that uses the token
user = client.post(
"/slurk/api/users",
json={"name": "Test User", "token_id": tokens.json["id"]},
)
token_uri = f'/slurk/api/tokens/{tokens.json["id"]}'
# the deletion of a tokens entry that is in use should fail
response = client.delete(
token_uri, headers={"If-Match": client.head(token_uri).headers["ETag"]}
)
assert response.status_code == HTTPStatus.UNPROCESSABLE_ENTITY, parse_error(
response
)
# free the token entry by deleting the users
client.delete(
f'/slurk/api/users/{user.json["id"]}',
headers={"If-Match": user.headers["ETag"]},
)
# now one should be able to delete the token
response = client.delete(
token_uri, headers={"If-Match": client.head(token_uri).headers["ETag"]}
)
assert response.status_code == HTTPStatus.NO_CONTENT, parse_error(response)
@pytest.mark.depends(
on=[
f"{PREFIX}::TestRequestOptions::test_request_option_with_id[PATCH]",
f"{PREFIX}::TestPostValid",
"tests/api/test_tasks.py::TestPostValid",
]
)
class TestPatchValid:
REQUEST_CONTENT = [
{"json": {"registrations_left": 0}},
{"json": {"registrations_left": 2**63 - 1}},
{"json": {"room_id": None}},
{"json": {"task_id": -1, "room_id": None}},
]
@pytest.mark.parametrize("content", REQUEST_CONTENT)
def test_valid_request(self, client, tokens, content, tasks):
# replace placeholder ids with valid ones
for key in content:
if content[key].get("task_id") == -1:
content[key]["task_id"] = tasks.json["id"]
data = content.get("json", {}) or content.get("data", {})
# set the etag
content.setdefault("headers", {}).update({"If-Match": tokens.headers["ETag"]})
response = client.patch(f'/slurk/api/tokens/{tokens.json["id"]}', **content)
assert response.status_code == HTTPStatus.OK, parse_error(response)
new_tokens = response.json
# check that a modification was performed without creating new table entry
assert new_tokens["id"] == tokens.json["id"]
assert new_tokens["date_created"] == tokens.json["date_created"]
assert response.json["date_modified"] is not None
assert response.headers["ETag"] != tokens.headers["ETag"]
expected_permissions_id = data.get(
"permissions_id", tokens.json["permissions_id"]
)
assert new_tokens["permissions_id"] == expected_permissions_id
expected_registrations_left = data.get(
"registrations_left", tokens.json["registrations_left"]
)
assert new_tokens["registrations_left"] == expected_registrations_left
expected_task_id = data.get("task_id", tokens.json["task_id"])
assert new_tokens["task_id"] == expected_task_id
expected_room_id = data.get("room_id", tokens.json["room_id"])
assert new_tokens["room_id"] == expected_room_id
@pytest.mark.depends(
on=[
f"{PREFIX}::TestRequestOptions::test_request_option_with_id[PATCH]",
f"{PREFIX}::TestPostValid",
]
)
class TestPatchInvalid(TokensTable, InvalidWithEtagTemplate):
@property
def request_method(self):
return "patch"
REQUEST_CONTENT = [
({"json": {"permissions_id": None}}, HTTPStatus.UNPROCESSABLE_ENTITY),
({"json": {"room_id": -42}}, HTTPStatus.UNPROCESSABLE_ENTITY),
({"json": {"registrations_left": -2}}, HTTPStatus.UNPROCESSABLE_ENTITY),
({"json": {"id": 2}}, HTTPStatus.UNPROCESSABLE_ENTITY),
]
@pytest.mark.parametrize("content, status", REQUEST_CONTENT)
def test_invalid_request(self, client, tokens, content, status):
# set the etag
content.setdefault("headers", {}).update({"If-Match": tokens.headers["ETag"]})
response = client.patch(f'/slurk/api/tokens/{tokens.json["id"]}', **content)
assert response.status_code == status, parse_error(response)
| 36.236534 | 86 | 0.614425 |
4a1ad37b9e22ce66f2a23ddde73e15c78dfad955
| 961 |
py
|
Python
|
solaredge/api/equipment.py
|
chrisjohnson00/pysolaredge
|
c8fa314ed59109c0b3032f88e5ccdcd16da41cc3
|
[
"BSD-2-Clause"
] | 8 |
2020-05-25T09:35:28.000Z
|
2021-06-08T04:00:40.000Z
|
solaredge/api/equipment.py
|
chrisjohnson00/pysolaredge
|
c8fa314ed59109c0b3032f88e5ccdcd16da41cc3
|
[
"BSD-2-Clause"
] | 2 |
2020-11-23T21:37:52.000Z
|
2020-12-04T21:35:41.000Z
|
solaredge/api/equipment.py
|
chrisjohnson00/pysolaredge
|
c8fa314ed59109c0b3032f88e5ccdcd16da41cc3
|
[
"BSD-2-Clause"
] | 6 |
2020-06-28T02:50:32.000Z
|
2022-01-01T23:02:03.000Z
|
import requests
from ..api.error import IdentifierError
BASE_URL = 'https://monitoringapi.solaredge.com'
class Equipment(object):
"""
Object for getting API details about equipments related to a site
"""
def __init__(self, client):
self.client = client
def get_site_equipment(self, site_id):
"""
Returns the equipments for a specific site.
Parameters:
site_id (int): the ID of a site location (can be fetched from the get_sites function)
Returns:
response (JSON): a JSON dictionary containing the equipment data
"""
if not site_id:
raise IdentifierError("This API call needs to have a site_id.")
api_endpoint = '/equipment/%s/list' % site_id
full_api_url = BASE_URL + api_endpoint
response = requests.get(full_api_url, params={'api_key': self.client.get_api_key()})
return response.json()
| 34.321429 | 101 | 0.635796 |
4a1ad3ab563f45e09492227f8d05547c61fba920
| 670 |
py
|
Python
|
AppServer/lib/django-1.4/tests/modeltests/validation/__init__.py
|
loftwah/appscale
|
586fc1347ebc743d7a632de698f4dbfb09ae38d6
|
[
"Apache-2.0"
] | 790 |
2015-01-03T02:13:39.000Z
|
2020-05-10T19:53:57.000Z
|
AppServer/lib/django-1.4/tests/modeltests/validation/__init__.py
|
nlake44/appscale
|
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
|
[
"Apache-2.0"
] | 1,361 |
2015-01-08T23:09:40.000Z
|
2020-04-14T00:03:04.000Z
|
AppServer/lib/django-1.4/tests/modeltests/validation/__init__.py
|
nlake44/appscale
|
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
|
[
"Apache-2.0"
] | 155 |
2015-01-08T22:59:31.000Z
|
2020-04-08T08:01:53.000Z
|
from __future__ import with_statement
from django.core.exceptions import ValidationError
from django.test import TestCase
class ValidationTestCase(TestCase):
def assertFailsValidation(self, clean, failed_fields):
with self.assertRaises(ValidationError) as cm:
clean()
self.assertEqual(sorted(failed_fields), sorted(cm.exception.message_dict))
def assertFieldFailsValidationWithMessage(self, clean, field_name, message):
with self.assertRaises(ValidationError) as cm:
clean()
self.assertIn(field_name, cm.exception.message_dict)
self.assertEqual(message, cm.exception.message_dict[field_name])
| 35.263158 | 82 | 0.747761 |
4a1ad3cee46df7665ee9c59b1a293cf660cdd4a9
| 544 |
py
|
Python
|
src/models/user.py
|
FernandoZnga/flask-api-setup
|
220ad881e61432d017a1eb34fead461e2042309d
|
[
"MIT"
] | null | null | null |
src/models/user.py
|
FernandoZnga/flask-api-setup
|
220ad881e61432d017a1eb34fead461e2042309d
|
[
"MIT"
] | null | null | null |
src/models/user.py
|
FernandoZnga/flask-api-setup
|
220ad881e61432d017a1eb34fead461e2042309d
|
[
"MIT"
] | null | null | null |
"""
Define the User model
"""
from . import db
from .abc import BaseModel
class User(db.Model, BaseModel):
""" The User model """
__tablename__ = 'user'
first_name = db.Column(db.String(300), primary_key=True)
last_name = db.Column(db.String(300), primary_key=True)
# The age of our user
age = db.Column(db.Integer, nullable=True)
def __init__(self, first_name, last_name, age=None):
""" Create a new User """
self.first_name = first_name
self.last_name = last_name
self.age = age
| 25.904762 | 60 | 0.645221 |
4a1ad448877fab50353ba4d6dcd89ac2c9834cdf
| 37,207 |
py
|
Python
|
tes/models.py
|
gurza/alfastrah-tes-python-sdk
|
092c01e3b58f5f866037241e94a74fc9bab7026e
|
[
"MIT"
] | null | null | null |
tes/models.py
|
gurza/alfastrah-tes-python-sdk
|
092c01e3b58f5f866037241e94a74fc9bab7026e
|
[
"MIT"
] | null | null | null |
tes/models.py
|
gurza/alfastrah-tes-python-sdk
|
092c01e3b58f5f866037241e94a74fc9bab7026e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
tes.models
~~~~~~~~~~
This module contains the primary objects.
"""
import datetime
import numbers
import sys
import typing
from enum import Enum
from decimal import Decimal
PRODUCT_TYPES = ['AIR']
class AcquisitionChannel(Enum):
"""Acquisition (data collection) channel."""
DESKTOP = 1
MOBILE_SITE = 2
MOBILE_APP = 3
CROSS_SALE = 4
class CancellationType(Enum):
"""Cancellation type."""
TRIP_CANCELLATION = 1
TECH_CANCELLATION = 2
INSURANCE_CANCELLATION = 3
class DocumentType(Enum):
"""Document type."""
PASSPORT = 1
INTERNATIONAL = 2
IDCARD = 3
MILITARY = 4
FOREIGNER = 5
JURIDICAL = 6
ERGUL = 7
DRIVER_LICENCE = 8
BIRTHCERTIFICATE = 9
class FareType(Enum):
"""Fare type (refundability)."""
REFUNDABLE = 1
NO_RETURN = 2
class FlightDirection(Enum):
"""Flight direction."""
OW = 1 # One way
RT = 2 # Round trip
class Gender(Enum):
"""Gender."""
MALE = 1
FEMALE = 2
class LuggageType(Enum):
"""Luggage type."""
STANDARD = 1
class Opt(Enum):
"""Option state."""
OPT_IN = 1
OPT_OUT = 2
SMART_OPT_IN = 3
SMART_OPT_OUT = 4
class PhoneType(Enum):
"""Phone type"""
MOBILE = 1
HOME = 2
OFFICE = 3
OTHER = 4
class PolicyStatus(Enum):
"""Policy status."""
ISSUING = 1
CONFIRMED = 2
CANCELLED = 3
DELETED = 4
class SellingPage(Enum):
"""Selling page."""
CROSS_SALE = 1
BOOKING_EDITION = 2
WEB_CHECK_IN = 3
STANDALONE = 4
class ServiceClass(Enum):
"""Service class."""
ECONOM = 1
COMFORT = 2
BUSINESS = 3
class SportKind(Enum):
"""Insured sport."""
COMMON_SPORT = 1
DANGEROUS_SPORT = 2
class RiskType(Enum):
"""Risk type."""
RISK_MR = 1
RISK_NSP = 2
RISK_NS = 3
RISK_FLIGHT_DELAYS_PERSONAL = 4
RISK_SPORT = 5
RISK_LOSS_LUGGAGE_PERSONAL = 6
RISK_DELAYED_LUGGAGE_PERSONAL = 7
RISK_GO = 8
RISK_LUGGAGE_MASSIVE = 9
RISK_FLIGHT_DELAYS_MASSIVE = 10
RISK_NR = 11
RISK_PROPERTY = 12
RISK_EVENT = 13
RISK_LOSS_RESTORE_DOCUMENTS = 14
RISK_CL = 15
RISK_LUGGAGE_DAMAGE = 16
RISK_COVID = 17
class TravelType(Enum):
"""Travel type."""
SINGLE = 1
MULTIPLE = 2
class BaseModel(object):
"""Base model."""
__attrs__ = {}
def __init__(self, *args, **kwargs):
pass
def encode(self):
"""Translates a class instance into a string in JSON format.
:return: JSON representation of a class instance.
:rtype: str
"""
json = dict()
if not hasattr(self, '__attrs__') or not isinstance(self.__attrs__, dict):
return json
for attr in self.__attrs__.keys():
if not hasattr(self, attr) or self.__getattribute__(attr) is None:
continue
if isinstance(self.__getattribute__(attr), Enum):
json[attr] = self.__getattribute__(attr).name
else:
json[attr] = self.__getattribute__(attr)
return json
@classmethod
def decode(cls, dct):
"""Makes a class instance from the given dict.
:param dct: JSON representation of a class instance.
:type dct: dict
:return: Class instance.
"""
def cast(json_value, target_type):
if json_value is None:
return None
if target_type == Decimal:
return Decimal(json_value)
if isinstance(json_value, bool) or isinstance(json_value, numbers.Number):
return json_value
if isinstance(json_value, str if sys.version_info[0] == 3 else basestring):
if target_type == datetime.date:
return datetime.datetime.strptime(json_value, '%Y-%m-%d').date()
if target_type == datetime.datetime:
return datetime.datetime.strptime(json_value, '%Y-%m-%dT%H:%M:%S')
if issubclass(target_type, Enum):
return target_type[json_value]
return json_value
if issubclass(target_type, BaseModel):
return target_type.decode(json_value)
raise NotImplementedError
def get_list_args(tp):
"""get_list_args: typing.List[int] -> (<class 'int'>,)"""
if sys.version_info[:3] >= (3, 7, 0):
if isinstance(tp, typing._GenericAlias) and tp.__origin__ == list:
return tp.__args__
else:
if isinstance(tp, typing.GenericMeta) and tp.__origin__ == typing.List:
return tp.__args__
return ()
params = {}
for attr_name, attr_type in cls.__attrs__.items():
type_args = get_list_args(attr_type)
if len(type_args):
params[attr_name] = [cast(o, type_args[0]) for o in dct.get(attr_name, [])]
else:
params[attr_name] = cast(dct.get(attr_name), attr_type)
return cls(**params)
class ApiRequest:
"""API request base class."""
class ApiProblem(BaseModel):
"""Description of the error that occurred while handling your request."""
__attrs__ = {
'title': str,
'status': str,
'detail': str,
}
def __init__(self, title=None, status=None, detail=None):
"""Init.
:param title: Short error description, e.g. 'POLICY_NOT_FOUND'.
:type title: str or None
:param status: Status code, e.g. 'PNF_002'.
:type status: str or None
:param detail: Full error description,
e.g. 'Policy with id 12345 not found or does not belong to agent'.
:type detail: str or None
"""
BaseModel.__init__(self)
self.title = title
self.status = status
self.detail = detail
class InsuranceProduct(BaseModel):
"""Insurance product."""
# __attrs__ = [
# 'code', 'type', 'description', 'currency',
# ]
__attrs__ = {
'code': str,
'type': str,
'description': str,
'currency': str,
}
def __init__(self, code, type=None, description=None, currency=None):
"""Init.
:param code: Code of insurance product, e.g. 'ON_ANTICOVID_AVIA_1'.
:type code: str
:param type: Type of insurance product, one of ``POSSIBLE_PRODUCT_TYPES``, e.g. 'AIR'.
:type type: str or None
:param description: Description of insurance product, e.g. 'Страховка от риска медицинских расходов'.
:type description: str or None
:param currency: (obsolete) Currency code of the product, ISO 4217, e.g. 'RUB'.
:type currency: str or None
"""
BaseModel.__init__(self)
self.code = code
self.type = type
self.description = description
self.currency = currency
class Amount(BaseModel):
"""Amount."""
__attrs__ = {
'value': Decimal, 'currency': str,
}
def __init__(self, value, currency=None):
"""Init.
:param value: Value, e.g. 35000.
:type value: Decimal
:param currency: Currency code, ISO 4217, e.g. 'RUB'.
:type currency: str or None
"""
BaseModel.__init__(self)
self.value = value
self.currency = currency
class Operator(BaseModel):
"""Operator."""
__attrs__ = {
'code': str,
}
def __init__(self, code):
"""Init.
:param code: Operator code.
:type code: str
"""
BaseModel.__init__(self)
self.code = code
class SubAgent(BaseModel):
"""Subagent."""
__attrs__ = {
'code': str,
}
def __init__(self, code):
"""Init.
:param code: Subagent code.
:type code: str
"""
BaseModel.__init__(self)
self.code = code
class Agent(BaseModel):
"""Agent."""
__attrs__ = {
'code': str, 'sub': SubAgent,
}
def __init__(self, code, sub=None):
"""Init.
:param code: Agent code, e.g. 'TestTravelFlightAgent'.
:type code: str
:param sub: Subagent.
The subagent code is used to split sales across different channels or divisions within the same agent.
:type sub: SubAgent or None
"""
BaseModel.__init__(self)
self.code = code
self.sub = sub
class Cancellation(BaseModel):
"""Policy cancellation."""
__attrs__ = {
'reason': str, 'amount': Amount,
}
def __init__(self, reason=None, amount=None):
"""Init.
:param reason: Reason for cancellation of the insurance policy.
:type reason: str or None
:param amount: Cancellation (refund) amount.
:type amount: Amount or None
"""
BaseModel.__init__(self)
self.reason = reason
self.amount = amount
class ServiceCompany:
pass
class Phone(BaseModel):
"""Phone."""
__attrs__ = {
'number': str, 'type': PhoneType,
}
def __init__(self, number=None, type=None):
"""Init.
:param number: Phone number, e.g. '89101234567'.
:type number: str or None
:param type: Phone type.
:type type: PhoneType or None
"""
BaseModel.__init__(self)
self.number = number
self.type = type
class Document(BaseModel):
"""Document ID."""
__attrs__ = {
'type': DocumentType, 'number': str, 'country': str,
}
def __init__(self, type=None, number=None, country=None):
"""Init.
:param type: Document type.
:type type: DocumentType or None
:param number: Document number, e.g. '2901178356'.
:type number: str or None
:param country: Code of the country where the document was issued, ISO 3166-1, e.g. 'RU'.
:type country: str or None
"""
BaseModel.__init__(self)
self.type = type
self.number = number
self.country = country
class Ticket(BaseModel):
"""Ticket."""
__attrs__ = {
'number': str, 'price': Amount, 'issue_date': datetime.date,
}
def __init__(self, number=None, price=None, issue_date=None):
"""Init.
:param number: Ticket number, e.g. '5723574320584'.
:type number: str or None
:param price: Ticket price.
:type price: Amount or None
:param issue_date: Issue date.
:type issue_date: datetime.date or None
"""
BaseModel.__init__(self)
self.number = number
self.price = price
self.issue_date = issue_date
class Risk(BaseModel):
"""Risk."""
__attrs__ = {
'type': RiskType, 'coverage': Amount, 'franchise': Amount,
}
def __init__(self, type=None, coverage=None, franchise=None):
"""Init.
:param type: Risk type.
:type type: RiskType or None
:param coverage: Insurance amount.
:type coverage: Amount or None
:param franchise: Franchise amount.
:type franchise: Amount or None
"""
BaseModel.__init__(self)
self.type = type
self.coverage = coverage
self.franchise = franchise
class Person(BaseModel):
"""Person."""
__attrs__ = {
'first_name': str, 'last_name': str, 'patronymic': str,
'nick_name': str, 'gender': Gender, 'birth_date': datetime.date, 'email': str,
'address': str, 'infant': bool, 'nationality': str, 'id_card': str,
'phone': Phone, 'document': Document, 'ticket': Ticket, 'risks': typing.List[Risk],
}
def __init__(self, first_name=None, last_name=None, patronymic=None,
nick_name=None, gender=None, birth_date=None, email=None,
address=None, infant=None, nationality=None, id_card=None,
phone=None, document=None, ticket=None, risks=None):
"""Init.
:param first_name: First name, e.g. 'Федор'.
:type first_name: str or None
:param last_name: Last name, e.g. 'Васильев'.
:type last_name: str or None
:param patronymic: Patronymic, e.g. 'Иванович'.
:type patronymic: str or None
:param nick_name: Nick, e.g. 'Васильев Федор Иванович'.
:type nick_name: str or None
:param gender: Gender.
:type gender: Gender or None
:param birth_date: Birth date.
:type birth_date: datetime.date
:param email: Email, e.g. 'fedor.vasilyev@email.com'.
:type email: str or None
:param address: Address, e.g. 'г. Москва, ул. Иванова, д. 4, кв. 198'.
:type address: str or None
:param infant: True if the person is an infant.
:type infant: bool or None
:param nationality: Code of country, ISO 3166-1, e.g. 'RU'.
:type nationality: str or None
:param id_card: Number of additional document ID, e.g. '5456876321656'.
:type id_card: str or None
:param phone: Contact phone.
:type phone: Phone or None
:param document: Document ID.
:type document: Document or None
:param ticket: Ticket information.
:type ticket: Ticket or None
:param risks: Information about risks.
:type risks: list[Risk] or None
"""
BaseModel.__init__(self)
self.first_name = first_name
self.last_name = last_name
self.patronymic = patronymic
self.nick_name = nick_name
self.gender = gender
self.birth_date = birth_date
self.email = email
self.address = address
self.infant = infant
self.nationality = nationality
self.id_card = id_card
self.phone = phone
self.document = document
self.ticket = ticket
self.risks = risks if risks is not None else []
class Point(BaseModel):
"""Departure or arrival point."""
__attrs__ = {
'date': datetime.datetime, 'point': str, 'country': str,
}
def __init__(self, date=None, point=None, country=None):
"""Init.
:param date: Datetime of departure/arrival.
:type date: datetime.datetime or None
:param point: Code of departure/arrival point, e.g. 'SVO'.
:type point: str or None
:param country: Code of country, ISO 3166-1, e.g. 'RU'.
:type country: str or None
"""
BaseModel.__init__(self)
self.date = date
self.point = point
self.country = country
class Segment(BaseModel):
"""Travel segment."""
__attrs__ = {
'transport_operator_code': str, 'route_number': str, 'service_class': ServiceClass,
'connection_time': int, 'departure': Point, 'arrival': Point, 'place_number': str,
'car_number': str, 'car_type': str, 'connecting_flight': bool, 'flight_direction': FlightDirection,
}
def __init__(self, transport_operator_code=None, route_number=None, service_class=None,
connection_time=None, departure=None, arrival=None, place_number=None,
car_number=None, car_type=None, connecting_flight=None, flight_direction=None):
"""Init.
:param transport_operator_code: Carrier code, e.g. 'SU'.
:type transport_operator_code: str or None
:param route_number: Route number (flight number, train number, etc), e.g. '1490'.
:type route_number: str or None
:param service_class: Service class.
:type service_class: ServiceClass or None
:param connection_time: Connection time in minutes, e.g. 120.
:type connection_time: int or None
:param departure: Departure point.
:type departure: Point or None
:param arrival: Arrival point.
:type arrival: Point or None
:param place_number: Place or seat number, e.g. '56b'.
:type place_number: str or None
:param car_number: Train car number, e.g. '12'.
:type car_number: str or None
:param car_type: Train car type, e.g. 'SV'.
:type car_type: str or None
:param connecting_flight: True if flight is connecting.
:type connecting_flight: bool or None
:param flight_direction: Flight direction.
:type flight_direction: FlightDirection or None
"""
BaseModel.__init__(self)
self.transport_operator_code = transport_operator_code
self.route_number = route_number
self.service_class = service_class
self.connection_time = connection_time
self.departure = departure
self.arrival = arrival
self.place_number = place_number
self.car_number = car_number
self.car_type = car_type
self.connecting_flight = connecting_flight
self.flight_direction = flight_direction
class Policy(BaseModel):
"""Insurance policy."""
__attrs__ = {
'policy_id': int, 'product': InsuranceProduct, 'insured': Person,
'insurer': Person, 'customer_email': str, 'customer_phone': str, 'pnr': str,
'series': str, 'payment_type': str, 'sale_session': str, 'issuance_city': str,
'external_id': str, 'commentary': str, 'description': str, 'resources': typing.List[str],
'travel_type': TravelType, 'sport': typing.List[SportKind], 'service_company': str,
'segments': typing.List[Segment],
'ticket': Ticket, 'rate': typing.List[Amount], 'discounted_rate': typing.List[Amount],
'begin_date': datetime.datetime,
'end_date': datetime.datetime, 'period_of_validity': int, 'risks': typing.List[Risk], 'status': PolicyStatus,
'created_at': datetime.datetime, 'update_at': datetime.datetime, 'fare_type': FareType,
'luggage_type': LuggageType,
'fare_code': str, 'cancellation': Cancellation, 'operator': Operator, 'agent': Agent,
'manager_name': str, 'manager_code': str, 'opt': Opt, 'selling_page': SellingPage,
'service_class': ServiceClass, 'age_group': str, 'acquisition_channel': AcquisitionChannel, 'error': str,
}
def __init__(self, policy_id=None, product=None, insured=None,
insurer=None, customer_email=None, customer_phone=None, pnr=None,
series=None, payment_type=None, sale_session=None, issuance_city=None,
external_id=None, commentary=None, description=None, resources=None,
travel_type=None, sport=None, service_company=None, segments=None,
ticket=None, rate=None, discounted_rate=None, begin_date=None,
end_date=None, period_of_validity=None, risks=None, status=None,
created_at=None, update_at=None, fare_type=None, luggage_type=None,
fare_code=None, cancellation=None, operator=None, agent=None,
manager_name=None, manager_code=None, opt=None, selling_page=None,
service_class=None, age_group=None, acquisition_channel=None, error=None):
"""Init.
:param policy_id: Policy ID, e.g. 21684956.
:type policy_id: int or None
:param product: Insurance product.
:type product: InsuranceProduct or None
:param insured: Insured person.
:type insured: Person or None
:param insurer: Insurer.
:type insurer: Person or None
:param customer_email: Customer contact email, e.g. 'example@mail.com'.
:type customer_email: str or None
:param customer_phone: Customer contact phone, e.g. '+79876543210'.
:type customer_phone: str or None
:param pnr: Booking number, e.g. 'TR097S'.
:type pnr: str or None
:param series: Policy series, e.g. '247.F'.
:type series: str or None
:param payment_type: Form of payment, e.g. 'CARD'.
:type payment_type: str or None
:param sale_session: Sale session, e.g. 'PQGWIXCLPY4613323570'.
:type sale_session: str or None
:param issuance_city: City where the policy was issued, e.g. 'Moscow'.
:type issuance_city: str or None
:param external_id: Policy ID in partner system, e.g. 'FQU/12324264/546546654'.
:type external_id: str or None
:param commentary: Comment, e.g. 'PQGWIXCLPY4613323570'.
:type commentary: str or None
:param description: Description: risks and insurance premium, e.g.
'Несчастный случай - 500 000 RUBПотеря багажа - 35 000 RUBПовреждение багажа - 25 000 RUB...'.
:type description: str or None
:param resources: Resources, e.g. ['resource1.pdf', 'resource2.pdf'].
:type resources: list[str] or None
:param travel_type: Travel type.
:type travel_type: TravelType or None
:param sport: Insured sports kind.
:type sport: list[SportKind] or None
:param service_company: Service company.
:type service_company: str or None
:param segments: Travel segments.
:type segments: list[Segment] or None
:param ticket: Ticket.
:type ticket: Ticket or None
:param rate: Rates in different currencies.
:type rate: list[Amount] or None
:param discounted_rate: Discounted rates in different currencies.
:type discounted_rate: list[Amount] or None
:param begin_date: Start date of the policy.
:type begin_date: datetime.datetime or None
:param end_date: Expiry date of the policy.
:type end_date: datetime.datetime or None
:param period_of_validity: Policy validity period in days, e.g. 14.
:type period_of_validity: int or None
:param risks: Information about risks.
:type risks: list[Risk] or None
:param status: Policy status.
:type status: PolicyStatus or None
:param created_at: Policy created datetime.
:type created_at: datetime.datetime or None
:param update_at: Policy updated datetime.
:type update_at: datetime.datetime or None
:param fare_type: Refundability.
:type fare_type: FareType or None
:param luggage_type: Luggage type.
:type luggage_type: LuggageType or None
:param fare_code: Fare code (fare basis), e.g. 'BPXOWRF'.
:type fare_code: str or None
:param cancellation: Reason for cancellation of the insurance policy.
:type cancellation: Cancellation or None
:param operator: Operator who created the insurance policy.
:type operator: Operator or None
:param agent: Agent who owns this policy.
:type agent: Agent or None
:param manager_name: Manager (cashier) code, e.g. 'Ivanova A.A.'.
:type manager_name: str or None
:param manager_code: Manager (cashier) code, e.g. '1q2w3e4r'.
:type manager_code: str or None
:param opt: Option state.
:type opt: Opt or None
:param selling_page: Policy selling page.
:type selling_page: SellingPage or None
:param service_class: Service class.
:type service_class: ServiceClass or None
:param age_group: Age group, e.g. '0-75'.
:type age_group: str or None
:param acquisition_channel: Acquisition (data collection) channel.
:type acquisition_channel: AcquisitionChannel or None
:param error: Error message.
:type error: str or None
"""
BaseModel.__init__(self)
self.policy_id = policy_id
self.product = product
self.insured = insured
self.insurer = insurer
self.customer_email = customer_email
self.customer_phone = customer_phone
self.pnr = pnr
self.series = series
self.payment_type = payment_type
self.sale_session = sale_session
self.issuance_city = issuance_city
self.external_id = external_id
self.commentary = commentary
self.description = description
self.resources = resources if resources is not None else []
self.travel_type = travel_type
self.sport = sport if sport is not None else []
self.service_company = service_company
self.segments = segments if segments is not None else []
self.ticket = ticket
self.rate = rate if rate is not None else []
self.discounted_rate = discounted_rate if discounted_rate is not None else []
self.begin_date = begin_date
self.end_date = end_date
self.period_of_validity = period_of_validity
self.risks = risks if risks is not None else []
self.status = status
self.created_at = created_at
self.update_at = update_at
self.fare_type = fare_type
self.luggage_type = luggage_type
self.fare_code = fare_code
self.cancellation = cancellation
self.operator = operator
self.agent = agent
self.manager_name = manager_name
self.manager_code = manager_code
self.opt = opt
self.selling_page = selling_page
self.service_class = service_class
self.age_group = age_group
self.acquisition_channel = acquisition_channel
self.error = error
class Declaration:
"""Client's application information."""
__attrs__ = {
'number': str, 'date': datetime.datetime,
}
def __init__(self, number, date):
"""Init.
:param number: Client's application number.
:type number: str
:param date: Application date.
:type date: datetime.datetime
"""
self.number = number
self.date = date
class Quote(BaseModel):
"""Quote/Calculating."""
__attrs__ = {
'policies': typing.List[Policy], 'error': str,
}
def __init__(self, policies=None, error=None):
"""Init.
:param policies: List of policies.
:type policies: list[Policy]
:param error: Policy calculating error.
:type error: str or None
"""
BaseModel.__init__(self)
self.policies = policies if policies is not None else []
self.error = error
class ConfirmRequest(BaseModel, ApiRequest):
"""Request for confirmation of insurance policy."""
__attrs__ = {
'session_id': str,
}
def __init__(self, session_id=None):
"""Init.
:param session_id: Session id, e.g. '88c70099-8e11-4325-9239-9c027195c069'.
:type session_id: str or None
"""
BaseModel.__init__(self)
self.session_id = session_id
class CreateRequest(BaseModel, ApiRequest):
"""Request for creating one or more insurance policies."""
__attrs__ = {
'insureds': typing.List[Person], 'session_id': str, 'product': InsuranceProduct,
'insurer': Person, 'segments': typing.List[Segment], 'booking_price': Amount, 'currency': str,
'discounted_rate': typing.List[Amount], 'service_class': ServiceClass, 'pnr': str, 'customer_email': str,
'customer_phone': str, 'payment_type': str, 'sale_session': str, 'country': str,
'issuance_city': str, 'sport': typing.List[SportKind], 'fare_type': FareType, 'luggage_type': LuggageType,
'fare_code': str, 'manager_name': str, 'manager_code': str, 'begin_date': datetime.datetime,
'end_date': datetime.datetime, 'external_id': str, 'opt': Opt, 'selling_page': SellingPage,
'acquisition_channel': AcquisitionChannel,
}
def __init__(self, insureds, session_id=None, product=None,
insurer=None, segments=None, booking_price=None, currency=None,
discounted_rate=None, service_class=None, pnr=None, customer_email=None,
customer_phone=None, payment_type=None, sale_session=None, country=None,
issuance_city=None, sport=None, fare_type=None, luggage_type=None,
fare_code=None, manager_name=None, manager_code=None, begin_date=None,
end_date=None, external_id=None, opt=None, selling_page=None,
acquisition_channel=None):
"""Init.
:param insureds: List of insured persons.
:type insureds: list[Person]
:param session_id: Session id, e.g. '88c70099-8e11-4325-9239-9c027195c069'.
:type session_id: str or None
:param product: Insurance product.
:type product: InsuranceProduct or None
:param insurer: Insurer.
:type insurer: Person or None
:param segments: Travel segments.
:type segments: list[Segment] or None
:param booking_price: Total price of the booking.
:type booking_price: Amount or None
:param currency: Quote currency code, ISO 4217, e.g. 'RUB'.
:type currency: str or None
:param discounted_rate: Discounted rates in different currencies.
:type discounted_rate: list[Amount] or None
:param service_class: Service class.
:type service_class: ServiceClass or None
:param pnr: Booking number, e.g. 'TR097S'.
:type pnr: str or None
:param customer_email: Customer contact email, e.g. 'example@mail.com'.
:type customer_email: str or None
:param customer_phone: Customer contact phone, e.g. '+79876543210'.
:type customer_phone: str or None
:param payment_type: Form of payment, e.g. 'CARD'.
:type payment_type: str or None
:param sale_session: Sale session, e.g. 'PQGWIXCLPY4613323570'.
:type sale_session: str or None
:param country: Code of the country where the document was issued, ISO 3166-1, e.g. 'RU'.
:type country: str or None
:param issuance_city: City where the policy was issued, e.g. 'Moscow'.
:type issuance_city: str or None
:param sport: Insured sports kind.
:type sport: list[SportKind] or None
:param fare_type: Refundability.
:type fare_type: FareType or None
:param luggage_type: Luggage type.
:type luggage_type: LuggageType or None
:param fare_code: Fare code (fare basis), e.g. 'BPXOWRF'.
:type fare_code: str or None
:param manager_name: Manager (cashier) code, e.g. 'Ivanova A.A.'.
:type manager_name: str or None
:param manager_code: Manager (cashier) code, e.g. '1q2w3e4r'.
:type manager_code: str or None
:param begin_date: Start date of the policy.
:type begin_date: datetime.datetime or None
:param end_date: Expiry date of the policy.
:type end_date: datetime.datetime or None
:param external_id: Policy ID in partner system, e.g. 'FQU/12324264/546546654'.
:type external_id: str or None
:param opt: Option state.
:type opt: Opt or None
:param selling_page: Policy selling page.
:type selling_page: SellingPage or None
:param acquisition_channel: Acquisition (data collection) channel.
:type acquisition_channel: AcquisitionChannel or None
"""
BaseModel.__init__(self)
self.insureds = insureds
self.session_id = session_id
self.product = product
self.insurer = insurer
self.segments = segments if segments is not None else []
self.booking_price = booking_price
self.currency = currency
# self.discounted_rate = discounted_rate if discounted_rate is not None else []
# "discounted_rate": []
# Invalid JSON. Cannot deserialize instance of `java.math.BigDecimal` out of START_ARRAY token
self.discounted_rate = discounted_rate
self.service_class = service_class
self.pnr = pnr
self.customer_email = customer_email
self.customer_phone = customer_phone
self.payment_type = payment_type
self.sale_session = sale_session
self.country = country
self.issuance_city = issuance_city
self.sport = sport if sport is not None else []
self.fare_type = fare_type
self.luggage_type = luggage_type
self.fare_code = fare_code
self.manager_name = manager_name
self.manager_code = manager_code
self.begin_date = begin_date
self.end_date = end_date
self.external_id = external_id
self.opt = opt
self.selling_page = selling_page
self.acquisition_channel = acquisition_channel
class CreateResponse(BaseModel):
"""Create response."""
__attrs__ = {
'policies': typing.List[Policy],
}
def __init__(self, policies=None):
"""Init.
:param policies: List of policies.
:type policies: list[Policy]
"""
BaseModel.__init__(self)
self.policies = policies if policies is not None else []
class QuoteRequest(BaseModel, ApiRequest):
"""Request for calculating one or more insurance policies."""
__attrs__ = {
'session_id': str, 'product': InsuranceProduct, 'insureds': typing.List[Person],
'segments': typing.List[Segment], 'booking_price': Amount, 'currency': str, 'service_class': ServiceClass,
'country': str, 'sport': typing.List[SportKind], 'fare_type': FareType, 'luggage_type': LuggageType,
'fare_code': str, 'manager_name': str, 'manager_code': str, 'opt': Opt,
'selling_page': SellingPage, 'end_date': datetime.datetime, 'acquisition_channel': AcquisitionChannel,
}
def __init__(self, session_id=None, product=None, insureds=None,
segments=None, booking_price=None, currency=None, service_class=None,
country=None, sport=None, fare_type=None, luggage_type=None,
fare_code=None, manager_name=None, manager_code=None, opt=None,
selling_page=None, end_date=None, acquisition_channel=None):
"""Init.
:param session_id: Session id, e.g. '88c70099-8e11-4325-9239-9c027195c069'.
:type session_id: str or None
:param product: Insurance product.
:type product: InsuranceProduct or None
:param insureds: List of insured persons.
:type insureds: list[Person] or None
:param segments: List of travel segments, e.g. list of flights.
:type segments: list[Segment] or None
:param booking_price: Total price of the booking.
:type booking_price: Amount or None
:param currency: Quote currency code, ISO 4217, e.g. 'RUB'.
:type currency: str or None
:param service_class: Service class.
:type service_class: ServiceClass or None
:param country: Country code where the insurance policy will be paid for, ISO 3166-1, e.g. 'RU'.
:type country: str or None
:param sport: Insured sports kind.
:type sport: list[SportKind] or None
:param fare_type: Refundability.
:type fare_type: FareType or None
:param luggage_type: Luggage type.
:type luggage_type: LuggageType or None
:param fare_code: Fare code (fare basis), e.g. 'BPXOWRF'.
:type fare_code: str or None
:param manager_name: Manager (cashier) code, e.g. 'Ivanova A.A.'.
:type manager_name: str or None
:param manager_code: Manager (cashier) code, e.g. '1q2w3e4r'.
:type manager_code: str or None
:param opt: Option state.
:type opt: Opt or None
:param selling_page: Policy selling page.
:type selling_page: SellingPage or None
:param end_date: Expiry date of the policy.
:type end_date: datetime.datetime or None
:param acquisition_channel: Acquisition (data collection) channel.
:type acquisition_channel: AcquisitionChannel or None
"""
BaseModel.__init__(self)
self.session_id = session_id
self.product = product
self.insureds = insureds if insureds is not None else []
self.segments = segments if segments is not None else []
self.booking_price = booking_price
self.currency = currency
self.service_class = service_class
self.country = country
self.sport = sport
self.fare_type = fare_type
self.luggage_type = luggage_type
self.fare_code = fare_code
self.manager_name = manager_name
self.manager_code = manager_code
self.opt = opt
self.selling_page = selling_page
self.end_date = end_date
self.acquisition_channel = acquisition_channel
class QuoteResponse(BaseModel):
"""Quote response."""
__attrs__ = {
'session_id': str, 'quotes': typing.List[Quote],
}
def __init__(self, session_id=None, quotes=None):
"""Init.
:param session_id: Session id, e.g. '88c70099-8e11-4325-9239-9c027195c069'.
:type session_id: str or None
:param quotes: List of policies for each insured person.
:type quotes: list[Quote] or None
"""
BaseModel.__init__(self)
self.session_id = session_id
self.quotes = quotes if quotes is not None else []
class SaleWithoutInsuranceRequest(ApiRequest):
pass
class SaleWithoutInsuranceResponse:
pass
class UpdateRequest(ApiRequest):
pass
class UpdateResponse:
pass
| 33.978995 | 117 | 0.621872 |
4a1ad479806b07ac223ac85e3bdc09c56d0ea4c0
| 1,281 |
py
|
Python
|
setup.py
|
maprihoda/binpacking
|
418226a09bbad9a464d2ef7a9b05ecf02f77e02f
|
[
"MIT"
] | null | null | null |
setup.py
|
maprihoda/binpacking
|
418226a09bbad9a464d2ef7a9b05ecf02f77e02f
|
[
"MIT"
] | null | null | null |
setup.py
|
maprihoda/binpacking
|
418226a09bbad9a464d2ef7a9b05ecf02f77e02f
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(name='binpacking',
version='1.5.1',
description='Heuristic distribution of weighted items to bins (either a fixed number of bins or a fixed number of volume per bin). Data may be in form of list, dictionary, list of tuples or csv-file.',
url='https://www.github.com/benmaier/binpacking',
author='Benjamin F. Maier',
author_email='bfmaier@physik.hu-berlin.de',
license='MIT',
packages=['binpacking'],
setup_requires=['pytest-runner'],
install_requires=[
'future',
],
tests_require=['pytest', 'pytest-cov'],
dependency_links=[
],
entry_points = {
'console_scripts': [
'binpacking = binpacking.binpacking_binary:main',
],
},
classifiers=['License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
zip_safe=False)
| 40.03125 | 207 | 0.551132 |
4a1ad5731fbd87e4290205c541e4049926b3f301
| 11,374 |
py
|
Python
|
python/sklearn/sklearn/decomposition/tests/test_pca.py
|
seckcoder/lang-learn
|
1e0d6f412bbd7f89b1af00293fd907ddb3c1b571
|
[
"Unlicense"
] | 1 |
2017-10-14T04:23:45.000Z
|
2017-10-14T04:23:45.000Z
|
python/sklearn/sklearn/decomposition/tests/test_pca.py
|
seckcoder/lang-learn
|
1e0d6f412bbd7f89b1af00293fd907ddb3c1b571
|
[
"Unlicense"
] | null | null | null |
python/sklearn/sklearn/decomposition/tests/test_pca.py
|
seckcoder/lang-learn
|
1e0d6f412bbd7f89b1af00293fd907ddb3c1b571
|
[
"Unlicense"
] | null | null | null |
import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less, assert_greater
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import ProbabilisticPCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
"""PCA on dense arrays"""
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
def test_whitening():
"""Check that PCA output has unit-variance"""
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
# whiten the data while projecting to the lower dim subspace
pca = PCA(n_components=n_components, whiten=True)
# test fit_transform
X_whitened = pca.fit_transform(X)
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X)
assert_array_almost_equal(X_whitened, X_whitened2)
# all output component have unit variances
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components))
# is possible to project on the low dim space without scaling by the
# singular values
pca = PCA(n_components=n_components, whiten=False).fit(X)
X_unwhitened = pca.transform(X)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
def test_pca_check_projection():
"""Test that the projection of data is correct"""
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
"""Test that the projection of data can be inverted"""
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_randomized_pca_check_projection():
"""Test that the projection by RandomizedPCA on dense data is correct"""
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
"""Test that the projection by RandomizedPCA on list data is correct"""
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1, random_state=0
).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
"""Test that RandomizedPCA is inversible on dense data"""
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_sparse_randomized_pca_check_projection():
"""Test that the projection by RandomizedPCA on sparse data is correct"""
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
X = csr_matrix(X)
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Xt = csr_matrix(Xt)
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
np.testing.assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_sparse_randomized_pca_inverse():
"""Test that RandomizedPCA is inversible on sparse data"""
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
# no large means because the sparse version of randomized pca does not do
# centering to avoid breaking the sparsity
X = csr_matrix(X)
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X.todense(), Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X.todense() - Y_inverse)
/ np.abs(X).mean()).max()
# XXX: this does not seam to work as expected:
assert_almost_equal(relative_max_delta, 0.91, decimal=2)
def test_pca_dim():
"""Check automated dimensionality setting"""
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 1)
def test_infer_dim_1():
"""TODO: explain what this is testing
Or at least use explicit variable names...
"""
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2]) \
+ np.array([1, 0, 7, 4, 6])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
"""TODO: explain what this is testing
Or at least use explicit variable names...
"""
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
"""
"""
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 2)
def test_probabilistic_pca_1():
"""Test that probabilistic PCA yields a reasonable score"""
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
ppca = ProbabilisticPCA(n_components=2)
ppca.fit(X)
ll1 = ppca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1.mean() / h, 1, 0)
def test_probabilistic_pca_2():
"""Test that probabilistic PCA correctly separated different datasets"""
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
ppca = ProbabilisticPCA(n_components=2)
ppca.fit(X)
ll1 = ppca.score(X)
ll2 = ppca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1.mean(), ll2.mean())
def test_probabilistic_pca_3():
"""The homoscedastic model should work slightly worth
than the heteroscedastic one in over-fitting condition
"""
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
ppca = ProbabilisticPCA(n_components=2)
ppca.fit(X)
ll1 = ppca.score(X)
ppca.fit(X, homoscedastic=False)
ll2 = ppca.score(X)
assert_less(ll1.mean(), ll2.mean())
def test_probabilistic_pca_4():
"""Check that ppca select the right model"""
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
ppca = ProbabilisticPCA(n_components=k)
ppca.fit(Xl)
ll[k] = ppca.score(Xt).mean()
assert_true(ll.argmax() == 1)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| 32.590258 | 79 | 0.644364 |
4a1ad5aa03c23dec8237297912266861c6be06e1
| 220 |
py
|
Python
|
suitcase/dataexchange/tests/conftest.py
|
danielballan/suitcase-dataexchange
|
91821b8d06baf2dbf910b8b232b1d5ebd0ebbf5f
|
[
"BSD-3-Clause"
] | null | null | null |
suitcase/dataexchange/tests/conftest.py
|
danielballan/suitcase-dataexchange
|
91821b8d06baf2dbf910b8b232b1d5ebd0ebbf5f
|
[
"BSD-3-Clause"
] | 2 |
2020-07-27T18:49:26.000Z
|
2020-12-02T16:39:51.000Z
|
suitcase/dataexchange/tests/conftest.py
|
danielballan/suitcase-dataexchange
|
91821b8d06baf2dbf910b8b232b1d5ebd0ebbf5f
|
[
"BSD-3-Clause"
] | 3 |
2019-07-02T22:17:13.000Z
|
2019-09-16T07:28:37.000Z
|
from bluesky.tests.conftest import RE # noqa
from ophyd.tests.conftest import hw # noqa
from suitcase.utils.tests.conftest import ( # noqa
example_data, generate_data, plan_type, detector_list, event_type) # noqa
| 44 | 77 | 0.777273 |
4a1ad5ebc9ab12c40fb5342292ca71ae7ced5ed2
| 15,206 |
py
|
Python
|
gs_api_client/swagger/models/serverin_ip.py
|
gridscale/gridscale_api_client_python
|
755b8e8a017784a4f5c6b3a577338ff988c41a9a
|
[
"MIT"
] | 7 |
2019-07-12T13:59:45.000Z
|
2021-03-16T08:46:20.000Z
|
gs_api_client/swagger/models/serverin_ip.py
|
gridscale/gridscale_api_client_python
|
755b8e8a017784a4f5c6b3a577338ff988c41a9a
|
[
"MIT"
] | 13 |
2020-01-23T07:50:29.000Z
|
2022-03-21T14:32:40.000Z
|
gs_api_client/swagger/models/serverin_ip.py
|
gridscale/gridscale_api_client_python
|
755b8e8a017784a4f5c6b3a577338ff988c41a9a
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
API Specification
# Introduction Welcome to gridscales API documentation. A REST API is a programming interface that allows you to access and send data directly to our systems using HTTPS requests, without the need to use a web GUI. All the functionality you are already familiar with in your control panel is accessible through the API, including expert methods that are only available through the API. Allowing you to script any actions you require, regardless of their complexity. First we will start with a general overview about how the API works, followed by an extensive list of each endpoint, describing them in great detail. ## Requests For security, gridscale requires all API requests are made through the HTTPS protocol so that traffic is encrypted. The following table displays the different type of requests that the interface responds to, depending on the action you require. | Method | Description | | --- | --- | | GET | A simple search of information. The response is a JSON object. Requests using GET are always read-only. | | POST | Adds new objects and object relations. The POST request must contain all the required parameters in the form of a JSON object. | | PATCH | Changes an object or an object relation. The parameters in PATCH requests are usually optional, so only the changed parameters must be specified in a JSON object. | | DELETE | Deletes an object or object relation. The object is deleted if it exists. | | OPTIONS | Get an extensive list of the servers support methods and characteristics. We will not give example OPTION requests on each endpoint, as they are extensive and self-descriptive. | <aside class=\"notice\"> The methods PATCH and DELETE are idempotent - that is, a request with identical parameters can be sent several times, and it doesn't change the result. </aside> ## Status Codes | HTTP Status | `Message` | Description | | --- | --- | --- | | 200 | `OK` | The request has been successfully processed and the result of the request is transmitted in the response. | | 202 | `Accepted` | The request has been accepted, but will run at a later date. Meaning we can not guarantee the success of the request. You should poll the request to be notified once the resource has been provisioned - see the requests endpoint on how to poll. | | 204 | `No Content` | The request was successful, but the answer deliberately contains no data. | | 400 | `Bad Request` | The request message was built incorrectly. | | 401 | `Unauthorised` | The request can not be performed without a valid authentication. X-Auth UserId or X-Auth token HTTP header is not set or the userID / token is invalid. | | 402 | `Payment Required` | Action can not be executed - not provided any or invalid payment methods. | | 403 | `Forbidden` | The request was not carried out due to lack of authorization of the user or because an impossible action was requested. | | 404 | `Not Found` | The requested resource was not found. Will also be used if you do a resource exists, but the user does not have permission for it. | | 405 | `Method Not Allowed` | The request may be made only with other HTTP methods (eg GET rather than POST). | | 409 | `Conflict` | The request was made under false assumptions. For example, a user can not be created twice with the same email. | | 415 | `Unsupported Media Type` | The contents of the request have been submitted with an invalid media type. All POST or PATCH requests must have \"Content-Type : application / json\" as a header, and send a JSON object as a payload. | | 416 | `Requested Range Not Satisfiable` | The request could not be fulfilled. It is possible that a resource limit was reached or an IPv4 address pool is exhausted. | | 424 | `Failed Dependency` | The request could not be performed because the object is in the wrong status. | | 429 | `Too Many Requests` | The request has been rejected because rate limits have been exceeded. | <aside class=\"success\"> Status 200-204 indicates that the request has been accepted and is processed. </aside> <aside class=\"notice\"> Status 400-429 indicates that there was a problem with the request that originated on the client. You will find more information about the problem in the body of 4xx response. </aside> <aside class=\"warning\"> A status 500 means that there was a server-side problem and your request can not be processed now. </aside> ## Request Headers | Header | Description | | --- | --- | | Content-Type | Always \"application/json\". | | X-Auth-userId | The user UUID. This can be found in the panel under \"API\" and will never change ( even after the change of user e-mail). | | X-Auth-Token | Is generated from the API hash and must be sent with all API requests. Both the token and its permissions can be configured in the panel.| ## Response Headers | Header | Description | | --- | --- | | Content-Type | Always \"application/json\". | | X-Time-Provisioning | The time taken to process the request (in ms). | | X-Api-Identity | The currently active Provisioning API version. Useful when reporting bugs to us. | | X-Request-Id | The unique identifier of the request, be sure to include it when referring to a request. | | RateLimit-Limit | The number of requests that can be made per minute. | | RateLimit-Remaining | The number of requests that still remain before you hit your request limit. | | RateLimit-Reset | A [Unix timestamp](https://en.wikipedia.org/wiki/Unix_time) in milliseconds of when the rate limit will reset, or the time at which a request no longer will return 429 - Too Many Requests. | ## Timestamp Format All timestamps follow <a href=\"https://de.wikipedia.org/wiki/ISO_8601\" target=\"_blank_\">ISO 8601</a> and issued in <a href=\"https://www.timeanddate.de/zeitzonen/utc-gmt\" target=\"_blank_\">UTC</a> ## CORS ### Cross Origin Resource Sharing To allow API access from other domains that supports the API CORS (Cross Origin Resource Sharing). See: enable-cors.org/ . This allows direct use the API in the browser running a JavaScript web control panel. All this is done in the background by the browser. The following HTTP headers are set by the API: Header | Parameter | Description --- | --- | --- Access-Control-Allow-Methods | GET, POST, PUT, PATCH, DELETE, OPTIONS | Contains all available methods that may be used for queries. Access-Control-Allow-Credentials | true | Is set to \"true\". Allows the browser to send the authentication data via X-Auth HTTP header. Access-Control-Allow-Headers | Origin, X-Requested-With, Content-Type, Accept, X-Auth-UserId, X-Auth-Token, X-Exec-Time, X-API-Version, X-Api-Client | The HTTP headers available for requests. Access-Control-Allow-Origin | * | The domain sent by the browser as a source of demand. Access-Control-Expose-Headers | X-Exec-Time, X-Api-Version | The HTTP headers that can be used by a browser application. ## Rate Limits The number of requests that can be made through our API is currently limited to 210 requests per 60 seconds. The current state of rate limiting is returned within the response headers of each request. The relevant response headers are - RateLimit-Limit - RateLimit-Remaining - RateLimit-Reset See the Response Headers section for details. As long as the `RateLimit-Remaining` count is above zero, you will be able to make further requests. As soon as the `RateLimit-Remaining` header value is zero, subsequent requests will return the 429 status code. This will stay until the timestamp given in `RateLimit-Reset` has been reached. ### Example rate limiting response ```shell HTTP/1.0 429 TOO MANY REQUESTS Content-Length: 66 Content-Type: application/json; charset=utf-8 Date: Mon, 11 Nov 2019 11:11:33 GMT RateLimit-Limit: 210 RateLimit-Remaining: 0 RateLimit-Reset: 1573468299256 { \"id\": \"too_many_requests\", \"message\": \"API Rate limit exceeded.\" } ``` It is important to understand how rate limits are reset in order to use the API efficiently. Rate limits are reset for all counted requests at once. This means that that once the timestamp `RateLimit-Remaining` has arrived all counted request are reset and you can again start sending requests to the API. This allows for short burst of traffic. The downside is once you have hit the request limit no more requests are allowed until the rate limit duration is reset. ## Object Relations Relationships describe resource objects (storages, networks, IPs, etc.) that are connected to a server. These relationships are treated like objects themselves and can have properties specific to this relation. One example would be, that the MAC address of a private network connected to a server (Server-to-Network relation) can be found as property of the relation itself - the relation is the _network interface_ in the server. Another example is storage, where the SCSI LUN is also part of the Server-to-Storage relation object. This information is especially interesting if some kind of network boot is used on the servers, where the properties of the server need to be known beforehand. ## Deleted Objects Objects that are deleted are no longer visible on their *regular* endpoints. For historical reasons these objects are still available read-only on a special endpoint named /deleted. If objects have been deleted but have not yet been billed in the current period, the yet-to-be-billed price is still shown. <!-- #strip_js --> ## Node.js / Javascript Library We have a JavaScript library for you to use our API with ease. <a href=\"https://badge.fury.io/js/%40gridscale%2Fgsclient-js\"><img src=\"https://badge.fury.io/js/%40gridscale%2Fgsclient-js.svg\" alt=\"npm version\" height=\"18\"></a> <aside class=\"success\"> We want to make it even easier for you to manage your Infrastructure via our API - so feel free to contact us with any ideas, or languages you would like to see included. </aside> Requests with our Node.js lib return a little differently. Everything is the same except it allows you to add URL parameters to customize your requests. To get started <a href=\"https://www.npmjs.com/package/@gridscale/gsclient-js\" target=\"_blank\">click here</a> . <!-- #strip_js_end --> <!-- #strip_go --> ## Golang Library We also have a Golang library for Gophers. Requests with our Golang lib return a little differently. Everything is the same except it allows you to add URL parameters to customize your requests. To get started <a href=\"https://github.com/gridscale/gsclient-go\" target=\"_blank\">click here</a> . <!-- #strip_go_end --> <!-- #strip_python --> ## Python Library We have a Python library, that optionally also simplifies handling of asynchronous requests by mimicking synchronous blocking behaviour. To get started <a href=\"https://pypi.org/project/gs-api-client/\" target=\"_blank\">click here</a> . <!-- #strip_python_end --> # Authentication In order to use the API, the User-UUID and an API_Token are required. Both are available via the web GUI which can be found here on <a href=\"https://my.gridscale.io/APIs/\" target=\"_blank\">Your Account</a> <aside class=\"success\"> If you are logged in, your UUID and Token will be pulled dynamically from your account, so you can copy request examples straight into your code. </aside> The User-UUID remains the same, even if the users email address is changed. The API_Token is a randomly generated hash that allows read/write access. ## API_Token <table class=\"security-details\"><tbody><tr><th> Security scheme type: </th><td> API Key </td></tr><tr><th> header parameter name:</th><td> X-Auth-Token </td></tr></tbody></table> ## User_UUID <table class=\"security-details\"><tbody><tr><th> Security scheme type: </th><td> API Key </td></tr><tr><th> header parameter name:</th><td> X-Auth-UserId </td></tr></tbody></table> ## Examples <!-- #strip_js --> > Node.js ``` // to get started // read the docs @ https://www.npmjs.com/package/@gs_js_auth/api var gs_js_auth = require('@gs_js_auth/api').gs_js_auth; var client = new gs_js_auth.Client(\"##API_TOKEN##\",\"##USER_UUID##\"); ``` <!-- #strip_js_end --> <!-- #strip_go --> > Golang ``` // to get started // read the docs @ https://github.com/gridscale/gsclient-go config := gsclient.NewConfiguration( \"https://api.gridscale.io\", \"##USER_UUID##\", \"##API_TOKEN##\", false, //set debug mode ) client := gsclient.NewClient(config) ``` <!-- #strip_go_end --> > Shell Authentication Headers ``` -H \"X-Auth-UserId: ##USER_UUID##\" \\ -H \"X-Auth-Token: ##API_TOKEN##\" \\ ``` > Setting Authentication in your Environment variables ``` export API_TOKEN=\"##API_TOKEN##\" USER_UUID=\"##USER_UUID##\" ``` <aside class=\"notice\"> You must replace <code>USER_UUID</code> and <code>API_Token</code> with your personal UUID and API key respectively. </aside> # noqa: E501
OpenAPI spec version: 1.0.50
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from gs_api_client.swagger.models.serverin_ip_inner import ServerinIpInner # noqa: F401,E501
class ServerinIp(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""ServerinIp - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ServerinIp, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ServerinIp):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 168.955556 | 12,787 | 0.71406 |
4a1ad6ce1764e8ccc1ceb9a0b7bba7cd5594848e
| 2,403 |
py
|
Python
|
tests/unit/legacy/api/test_pypi.py
|
tony/warehouse
|
0c260a285cf9d95728756a643a404883f2bb3bfb
|
[
"Apache-2.0"
] | 4 |
2017-12-07T17:45:12.000Z
|
2021-11-15T11:14:44.000Z
|
tests/unit/legacy/api/test_pypi.py
|
tony/warehouse
|
0c260a285cf9d95728756a643a404883f2bb3bfb
|
[
"Apache-2.0"
] | 11 |
2020-01-06T18:55:57.000Z
|
2022-03-11T23:27:05.000Z
|
tests/unit/legacy/api/test_pypi.py
|
startnayit/warehouse
|
b89517891ff5b7da49e6ebdda299d7870a7811dd
|
[
"Apache-2.0"
] | 2 |
2017-12-07T17:45:15.000Z
|
2019-11-25T23:47:20.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pretend
import pytest
from pyramid.httpexceptions import HTTPBadRequest
from warehouse.legacy.api import pypi
from ....common.db.classifiers import ClassifierFactory
def test_exc_with_message():
exc = pypi._exc_with_message(HTTPBadRequest, "My Test Message.")
assert isinstance(exc, HTTPBadRequest)
assert exc.status_code == 400
assert exc.status == "400 My Test Message."
@pytest.mark.parametrize(
("settings", "expected_domain"),
[
({}, "example.com"),
({"warehouse.domain": "w.example.com"}, "w.example.com"),
(
{
"forklift.domain": "f.example.com",
"warehouse.domain": "w.example.com",
},
"f.example.com",
),
],
)
def test_forklifted(settings, expected_domain):
request = pretend.stub(
domain="example.com",
registry=pretend.stub(settings=settings),
)
information_url = "TODO"
resp = pypi.forklifted(request)
assert resp.status_code == 410
assert resp.status == (
"410 This API has moved to https://{}/legacy/. See {} for more "
"information."
).format(expected_domain, information_url)
def test_doap(pyramid_request):
resp = pypi.doap(pyramid_request)
assert resp.status_code == 410
assert resp.status == "410 DOAP is no longer supported."
def test_forbidden_legacy():
exc, request = pretend.stub(), pretend.stub()
resp = pypi.forbidden_legacy(exc, request)
assert resp is exc
def test_list_classifiers(db_request):
ClassifierFactory.create(classifier="foo :: bar")
ClassifierFactory.create(classifier="foo :: baz")
ClassifierFactory.create(classifier="fiz :: buz")
resp = pypi.list_classifiers(db_request)
assert resp.status_code == 200
assert resp.text == "fiz :: buz\nfoo :: bar\nfoo :: baz"
| 28.951807 | 74 | 0.677903 |
4a1ad6f6f801cbf566ca53e0b728b357ce3f68c0
| 19,658 |
py
|
Python
|
composer/utils/file_helpers.py
|
growlix/composer
|
27418a3c65dca26d90ac09c6ae67cbd5d0202ccf
|
[
"Apache-2.0"
] | null | null | null |
composer/utils/file_helpers.py
|
growlix/composer
|
27418a3c65dca26d90ac09c6ae67cbd5d0202ccf
|
[
"Apache-2.0"
] | null | null | null |
composer/utils/file_helpers.py
|
growlix/composer
|
27418a3c65dca26d90ac09c6ae67cbd5d0202ccf
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Helpers for working with files."""
from __future__ import annotations
import os
import pathlib
import re
import uuid
from typing import TYPE_CHECKING, Optional, Union
import requests
import tqdm
from composer.core.time import Time, Timestamp
from composer.utils import dist
from composer.utils.iter_helpers import iterate_with_callback
from composer.utils.object_store import ObjectStore
if TYPE_CHECKING:
from composer.loggers import LoggerDestination
__all__ = [
'get_file',
'ensure_folder_is_empty',
'ensure_folder_has_no_conflicting_files',
'format_name_with_dist',
'format_name_with_dist_and_time',
'is_tar',
]
def is_tar(name: Union[str, pathlib.Path]) -> bool:
"""Returns whether ``name`` has a tar-like extension.
Args:
name (str | pathlib.Path): The name to check.
Returns:
bool: Whether ``name`` is a tarball.
"""
return any(str(name).endswith(x) for x in (".tar", ".tgz", ".tar.gz", ".tar.bz2", ".tar.lzma"))
def ensure_folder_is_empty(folder_name: Union[str, pathlib.Path]):
"""Ensure that the given folder is empty.
Hidden files and folders (those beginning with ``.``) and ignored. Sub-folders are checked recursively.
Args:
folder_name (str | pathlib.Path): The folder to ensure is empty.
Raises:
FileExistsError: If ``folder_name`` contains any non-hidden files, recursively.
"""
for root, dirs, files in os.walk(folder_name, topdown=True):
# Filter out hidden folders
dirs[:] = (x for x in dirs if not x.startswith('.'))
for file in files:
if not file.startswith("."):
raise FileExistsError(f"{folder_name} is not empty; {os.path.join(root, file)} exists.")
def ensure_folder_has_no_conflicting_files(folder_name: Union[str, pathlib.Path], filename: str, timestamp: Timestamp):
"""Ensure that the given folder does not have any files conflicting with the ``filename`` format string.
If any filename is formatted with a timestamp where the epoch, batch, sample, or token counts are after
``timestamp``, a ``FileExistsError`` will be raised.
If ``filename`` and occurs later than ``timestamp``, raise a ``FileExistsError``.
Args:
folder_name (str | pathlib.Path): The folder to inspect.
filename (str): The pattern string for potential files.
timestamp (Timestamp): Ignore any files that occur before the provided timestamp.
Raises:
FileExistsError: If ``folder_name`` contains any files matching the ``filename`` template before ``timestamp``.
"""
# Prepare regex pattern by replacing f-string formatting with regex.
pattern = f"^{filename}$"
# Format time vars for capture
time_names = ["epoch", "batch", "sample", "token", "batch_in_epoch", "sample_in_epoch", "token_in_epoch"]
captured_names = {time_name: f"{{{time_name}}}" in filename for time_name in time_names}
for time_name, is_captured in captured_names.items():
if is_captured:
pattern = pattern.replace(f"{{{time_name}}}", f"(?P<{time_name}>\\d+)")
# Format rank information
pattern = pattern.format(rank=dist.get_global_rank(),
local_rank=dist.get_local_rank(),
world_size=dist.get_world_size(),
local_world_size=dist.get_local_world_size(),
node_rank=dist.get_node_rank())
template = re.compile(pattern)
for file in os.listdir(folder_name):
match = template.match(file)
# Encountered an invalid match
if match is not None:
valid_match = True
# Check each base unit of time and flag later checkpoints
if captured_names["token"] and Time.from_token(int(match.group("token"))) > timestamp.token:
valid_match = False
elif captured_names["sample"] and Time.from_sample(int(match.group("sample"))) > timestamp.sample:
valid_match = False
elif captured_names["batch"] and Time.from_batch(int(match.group("batch"))) > timestamp.batch:
valid_match = False
elif captured_names["epoch"] and Time.from_epoch(int(match.group("epoch"))) > timestamp.epoch:
valid_match = False
# If epoch count is same, check batch_in_epoch, sample_in_epoch, token_in_epoch
elif captured_names["epoch"] and Time.from_epoch(int(match.group("epoch"))) == timestamp.epoch:
if captured_names["token_in_epoch"] and Time.from_token(int(
match.group("token_in_epoch"))) > timestamp.token_in_epoch:
valid_match = False
elif captured_names["sample_in_epoch"] and Time.from_sample(int(
match.group("sample_in_epoch"))) > timestamp.sample_in_epoch:
valid_match = False
elif captured_names["batch_in_epoch"] and Time.from_batch(int(
match.group("batch_in_epoch"))) > timestamp.batch_in_epoch:
valid_match = False
if not valid_match:
raise FileExistsError(
f"{os.path.join(folder_name, file)} exists and conflicts in namespace with a future checkpoint of the current run."
)
FORMAT_NAME_WITH_DIST_TABLE = """
+------------------------+-------------------------------------------------------+
| Variable | Description |
+========================+=======================================================+
| ``{run_name}`` | The name of the training run. See |
| | :attr:`~composer.loggers.logger.Logger.run_name`. |
+------------------------+-------------------------------------------------------+
| ``{rank}`` | The global rank, as returned by |
| | :func:`~composer.utils.dist.get_global_rank`. |
+------------------------+-------------------------------------------------------+
| ``{local_rank}`` | The local rank of the process, as returned by |
| | :func:`~composer.utils.dist.get_local_rank`. |
+------------------------+-------------------------------------------------------+
| ``{world_size}`` | The world size, as returned by |
| | :func:`~composer.utils.dist.get_world_size`. |
+------------------------+-------------------------------------------------------+
| ``{local_world_size}`` | The local world size, as returned by |
| | :func:`~composer.utils.dist.get_local_world_size`. |
+------------------------+-------------------------------------------------------+
| ``{node_rank}`` | The node rank, as returned by |
| | :func:`~composer.utils.dist.get_node_rank`. |
+------------------------+-------------------------------------------------------+
"""
def format_name_with_dist(format_str: str, run_name: str, **extra_format_kwargs: object): # noqa: D103
formatted_str = format_str.format(
run_name=run_name,
rank=dist.get_global_rank(),
local_rank=dist.get_local_rank(),
world_size=dist.get_world_size(),
local_world_size=dist.get_local_world_size(),
node_rank=dist.get_node_rank(),
**extra_format_kwargs,
)
return formatted_str
format_name_with_dist.__doc__ = f"""
Format ``format_str`` with the ``run_name``, distributed variables, and ``extra_format_kwargs``.
The following format variables are available:
{FORMAT_NAME_WITH_DIST_TABLE}
For example, assume that the rank is ``0``. Then:
>>> from composer.utils import format_name_with_dist
>>> format_str = '{{run_name}}/rank{{rank}}.{{extension}}'
>>> format_name_with_dist(
... format_str,
... run_name='awesome_training_run',
... extension='json',
... )
'awesome_training_run/rank0.json'
Args:
format_str (str): The format string for the checkpoint filename.
run_name (str): The value for the ``{{run_name}}`` format variable.
extra_format_kwargs (object): Any additional :meth:`~str.format` kwargs.
"""
FORMAT_NAME_WITH_DIST_AND_TIME_TABLE = """
+----------------------------+------------------------------------------------------------+
| Variable | Description |
+============================+============================================================+
| ``{run_name}`` | The name of the training run. See |
| | :attr:`~composer.loggers.logger.Logger.run_name`. |
+----------------------------+------------------------------------------------------------+
| ``{rank}`` | The global rank, as returned by |
| | :func:`~composer.utils.dist.get_global_rank`. |
+----------------------------+------------------------------------------------------------+
| ``{local_rank}`` | The local rank of the process, as returned by |
| | :func:`~composer.utils.dist.get_local_rank`. |
+----------------------------+------------------------------------------------------------+
| ``{world_size}`` | The world size, as returned by |
| | :func:`~composer.utils.dist.get_world_size`. |
+----------------------------+------------------------------------------------------------+
| ``{local_world_size}`` | The local world size, as returned by |
| | :func:`~composer.utils.dist.get_local_world_size`. |
+----------------------------+------------------------------------------------------------+
| ``{node_rank}`` | The node rank, as returned by |
| | :func:`~composer.utils.dist.get_node_rank`. |
+----------------------------+------------------------------------------------------------+
| ``{epoch}`` | The total epoch count, as returned by |
| | :meth:`~composer.core.time.Timestamp.epoch`. |
+----------------------------+------------------------------------------------------------+
| ``{batch}`` | The total batch count, as returned by |
| | :meth:`~composer.core.time.Timestamp.batch`. |
+----------------------------+------------------------------------------------------------+
| ``{batch_in_epoch}`` | The batch count in the current epoch, as returned by |
| | :meth:`~composer.core.time.Timestamp.batch_in_epoch`. |
+----------------------------+------------------------------------------------------------+
| ``{sample}`` | The total sample count, as returned by |
| | :meth:`~composer.core.time.Timestamp.sample`. |
+----------------------------+------------------------------------------------------------+
| ``{sample_in_epoch}`` | The sample count in the current epoch, as returned by |
| | :meth:`~composer.core.time.Timestamp.sample_in_epoch`. |
+----------------------------+------------------------------------------------------------+
| ``{token}`` | The total token count, as returned by |
| | :meth:`~composer.core.time.Timestamp.token`. |
+----------------------------+------------------------------------------------------------+
| ``{token_in_epoch}`` | The token count in the current epoch, as returned by |
| | :meth:`~composer.core.time.Timestamp.token_in_epoch`. |
+----------------------------+------------------------------------------------------------+
| ``{total_wct}`` | The total training duration in seconds, as returned by |
| | :meth:`~composer.core.time.Timestamp.total_wct`. |
+----------------------------+------------------------------------------------------------+
| ``{epoch_wct}`` | The epoch duration in seconds, as returned by |
| | :meth:`~composer.core.time.Timestamp.epoch_wct`. |
+----------------------------+------------------------------------------------------------+
| ``{batch_wct}`` | The batch duration in seconds, as returned by |
| | :meth:`~composer.core.time.Timestamp.batch_wct`. |
+----------------------------+------------------------------------------------------------+
"""
def format_name_with_dist_and_time(
format_str: str,
run_name: str,
timestamp: Timestamp,
**extra_format_kwargs: object,
): # noqa: D103
formatted_str = format_str.format(
run_name=run_name,
rank=dist.get_global_rank(),
local_rank=dist.get_local_rank(),
world_size=dist.get_world_size(),
local_world_size=dist.get_local_world_size(),
node_rank=dist.get_node_rank(),
epoch=int(timestamp.epoch),
batch=int(timestamp.batch),
batch_in_epoch=int(timestamp.batch_in_epoch),
sample=int(timestamp.sample),
sample_in_epoch=int(timestamp.sample_in_epoch),
token=int(timestamp.token),
token_in_epoch=int(timestamp.token_in_epoch),
total_wct=timestamp.total_wct.total_seconds(),
epoch_wct=timestamp.epoch_wct.total_seconds(),
batch_wct=timestamp.batch_wct.total_seconds(),
**extra_format_kwargs,
)
return formatted_str
format_name_with_dist_and_time.__doc__ = f"""\
Format ``format_str`` with the ``run_name``, distributed variables, ``timestamp``, and ``extra_format_kwargs``.
In addition to the variables specified via ``extra_format_kwargs``, the following format variables are available:
{FORMAT_NAME_WITH_DIST_AND_TIME_TABLE}
For example, assume that the current epoch is ``0``, batch is ``0``, and rank is ``0``. Then:
>>> from composer.utils import format_name_with_dist_and_time
>>> format_str = '{{run_name}}/ep{{epoch}}-ba{{batch}}-rank{{rank}}.{{extension}}'
>>> format_name_with_dist_and_time(
... format_str,
... run_name='awesome_training_run',
... timestamp=state.timestamp,
... extension='json',
... )
'awesome_training_run/ep0-ba0-rank0.json'
Args:
format_str (str): The format string for the checkpoint filename.
run_name (str): The value for the ``{{run_name}}`` format variable.
timestamp (Timestamp): The timestamp.
extra_format_kwargs (object): Any additional :meth:`~str.format` kwargs.
"""
def get_file(
path: str,
destination: str,
object_store: Optional[Union[ObjectStore, LoggerDestination]] = None,
overwrite: bool = False,
progress_bar: bool = True,
):
"""Get a file from a local folder, URL, or object store.
Args:
path (str): The path to the file to retrieve.
* If ``object_store`` is specified, then the ``path`` should be the object name for the file to get.
Do not include the the cloud provider or bucket name.
* If ``object_store`` is not specified but the ``path`` begins with ``http://`` or ``https://``,
the object at this URL will be downloaded.
* Otherwise, ``path`` is presumed to be a local filepath.
destination (str): The destination filepath.
If ``path`` is a local filepath, then a symlink to ``path`` at ``destination`` will be created.
Otherwise, ``path`` will be downloaded to a file at ``destination``.
object_store (ObjectStore, optional): An :class:`~.ObjectStore`, if ``path`` is located inside
an object store (i.e. AWS S3 or Google Cloud Storage). (default: ``None``)
This :class:`~.ObjectStore` instance will be used to retrieve the file. The ``path`` parameter
should be set to the object name within the object store.
Set this parameter to ``None`` (the default) if ``path`` is a URL or a local file.
overwrite (bool): Whether to overwrite an existing file at ``destination``. (default: ``False``)
progress_bar (bool, optional): Whether to show a progress bar. Ignored if ``path`` is a local file.
(default: ``True``)
Raises:
FileNotFoundError: If the ``path`` does not exist.
"""
if object_store is not None:
if isinstance(object_store, ObjectStore):
total_size_in_bytes = object_store.get_object_size(path)
object_store.download_object(
object_name=path,
filename=destination,
callback=_get_callback(f"Downloading {path}") if progress_bar else None,
overwrite=overwrite,
)
else:
# Type LoggerDestination
object_store.get_file_artifact(
artifact_name=path,
destination=destination,
progress_bar=progress_bar,
overwrite=overwrite,
)
return
if path.lower().startswith("http://") or path.lower().startswith("https://"):
# it's a url
with requests.get(path, stream=True) as r:
try:
r.raise_for_status()
except requests.exceptions.HTTPError as e:
if r.status_code == 404:
raise FileNotFoundError(f"URL {path} not found") from e
raise e
total_size_in_bytes = r.headers.get('content-length')
if total_size_in_bytes is not None:
total_size_in_bytes = int(total_size_in_bytes)
else:
total_size_in_bytes = 0
tmp_path = destination + f".{uuid.uuid4()}.tmp"
try:
with open(tmp_path, "wb") as f:
for data in iterate_with_callback(
r.iter_content(2**20),
total_size_in_bytes,
callback=_get_callback(f"Downloading {path}") if progress_bar else None,
):
f.write(data)
except:
# The download failed for some reason. Make a best-effort attempt to remove the temporary file.
try:
os.remove(tmp_path)
except OSError:
pass
else:
os.rename(tmp_path, destination)
return
# It's a local filepath
if not os.path.exists(path):
raise FileNotFoundError(f"Local path {path} does not exist")
os.symlink(os.path.abspath(path), destination)
def _get_callback(description: str):
if len(description) > 60:
description = description[:42] + "..." + description[-15:]
pbar = None
def callback(num_bytes: int, total_size: int):
nonlocal pbar
if num_bytes == 0 or pbar is None:
pbar = tqdm.tqdm(desc=description, total=total_size, unit='iB', unit_scale=True)
pbar.update(num_bytes)
return callback
| 47.598063 | 135 | 0.51811 |
4a1ad7596c7fdf5d5c6ccd738151268f3f09c9cb
| 1,463 |
py
|
Python
|
python/static-server.py
|
tbremer/dotfiles
|
6a726e32599c6755e52a10bb21d749a320044474
|
[
"MIT"
] | 5 |
2015-10-21T15:47:43.000Z
|
2018-08-03T10:36:51.000Z
|
python/static-server.py
|
tbremer/dotfiles
|
6a726e32599c6755e52a10bb21d749a320044474
|
[
"MIT"
] | 2 |
2020-07-11T00:04:43.000Z
|
2020-07-11T00:04:51.000Z
|
python/static-server.py
|
tbremer/dotfiles
|
6a726e32599c6755e52a10bb21d749a320044474
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Static file server with History API fallback
"""
from http.server import BaseHTTPRequestHandler, HTTPServer
import ssl
import os
import sys
import urllib
import time
import mimetypes
host = 'localhost'
try:
port = int(sys.argv[1])
except IndexError:
port = 4443
class Handler(BaseHTTPRequestHandler):
def do_GET(self):
urlparts = urllib.parse.urlparse(self.path)
request_file_path = urlparts.path.strip('/')
try:
with open(request_file_path, 'rb') as file:
filecontents = file.read()
mime = mimetypes.guess_type(request_file_path)
except Exception:
with open('index.html', 'rb') as file:
mime = "text/html"
filecontents = file.read()
self.send_response(200)
self.send_header("Content-Type", mime[0])
self.end_headers()
self.wfile.write(filecontents)
myServer = HTTPServer((host, port), Handler)
print(time.asctime(time.localtime()))
print("Server Starts - http://%s:%s" % (host, port))
print("----")
try:
try:
myServer.socket = ssl.wrap_socket(myServer.socket, certfile='./server.pem', server_side=True)
except:
pass
myServer.serve_forever()
except KeyboardInterrupt:
print("")
pass
myServer.server_close()
print("----")
print(time.asctime(time.localtime()))
print("Server Stopped - http://%s:%s" % (host, port))
print("----")
| 22.859375 | 101 | 0.637047 |
4a1ad86aa745323d3dbe2b950e10def6a06879bb
| 1,132 |
py
|
Python
|
tests/forward_time.py
|
tkianai/Facenet.Inference
|
d16f8ba938441875a30f11c0ff910d52571d9568
|
[
"MIT"
] | 2 |
2020-04-22T23:27:33.000Z
|
2020-06-08T11:43:41.000Z
|
tests/forward_time.py
|
tkianai/Facenet.Inference
|
d16f8ba938441875a30f11c0ff910d52571d9568
|
[
"MIT"
] | null | null | null |
tests/forward_time.py
|
tkianai/Facenet.Inference
|
d16f8ba938441875a30f11c0ff910d52571d9568
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append('../')
from det_rec import build_det_model
import time
import torch
torch.set_grad_enabled(False)
model = build_det_model('../checkpoint/det_model.pth')
model.cuda()
model.eval()
batch_size = 32
image_size = 800
for _ in range(10):
print("Start single image forward...")
data = torch.Tensor(1, 3, image_size, image_size).cuda()
torch.cuda.synchronize()
_start = time.time()
for i in range(batch_size):
model(data)
torch.cuda.synchronize()
_end = time.time()
print("Used: {}".format(_end - _start))
print("Start batch image forward...")
data = torch.Tensor(batch_size, 3, image_size, image_size).cuda()
torch.cuda.synchronize()
_start = time.time()
model(data)
torch.cuda.synchronize()
_end = time.time()
print("Used: {}".format(_end - _start))
'''
batch_size | image size | single/batch
16 | 1600 | 1.3/1.2
8 | 1600 | 0.64/0.6
8 | 800 | 0.19/0.15
16 | 800 | 0.37/0.31
32 | 800 | 0.75/0.66
'''
| 22.64 | 69 | 0.572438 |
4a1adb41d2fe501e252cfb2a5c35bba0373a27b4
| 6,455 |
py
|
Python
|
DQM/MuonMonitor/test/mutracking_dqm_sourceclient-live_cfg.py
|
jkiesele/cmssw
|
e626860d26692de5880c52c7c80aec7b859a0c60
|
[
"Apache-2.0"
] | null | null | null |
DQM/MuonMonitor/test/mutracking_dqm_sourceclient-live_cfg.py
|
jkiesele/cmssw
|
e626860d26692de5880c52c7c80aec7b859a0c60
|
[
"Apache-2.0"
] | null | null | null |
DQM/MuonMonitor/test/mutracking_dqm_sourceclient-live_cfg.py
|
jkiesele/cmssw
|
e626860d26692de5880c52c7c80aec7b859a0c60
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Run2_2018_pp_on_AA_cff import Run2_2018_pp_on_AA
process = cms.Process("MUTRKDQM", Run2_2018_pp_on_AA)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:/eos/cms/store/express/Commissioning2019/ExpressCosmics/FEVT/Express-v1/000/331/571/00000/35501AC0-29E7-EA4C-AC1C-194D9B2F12D9.root'),
secondaryFileNames = cms.untracked.vstring()
)
#----------------------------
#### DQM Environment
#----------------------------
process.load("DQMServices.Components.DQMEnvironment_cfi")
process.load("DQM.Integration.config.environment_cfi")
#----------------------------
# DQM Live Environment
#-----------------------------
dqmRunConfigDefaults = {
'userarea': cms.PSet(
type = cms.untracked.string("userarea"),
collectorPort = cms.untracked.int32(9190),
collectorHost = cms.untracked.string('lxplus748'),
),
}
dqmRunConfigType = "userarea"
dqmRunConfig = dqmRunConfigDefaults[dqmRunConfigType]
process.load("DQMServices.Core.DQMStore_cfi")
process.DQM = cms.Service("DQM",
debug = cms.untracked.bool(False),
publishFrequency = cms.untracked.double(5.0),
collectorPort = dqmRunConfig.collectorPort,
collectorHost = dqmRunConfig.collectorHost,
filter = cms.untracked.string(''),
)
process.DQMMonitoringService = cms.Service("DQMMonitoringService")
process.load("DQMServices.Components.DQMEventInfo_cfi")
process.load("DQMServices.FileIO.DQMFileSaverOnline_cfi")
#upload should be either a directory or a symlink for dqm gui destination
process.dqmSaver.path = "."
process.dqmSaver.producer = 'MUTRKDQM'
process.dqmSaver.backupLumiCount = 15
TAG = "Muons"
process.dqmEnv.subSystemFolder =TAG
process.dqmSaver.tag = TAG
# Imports
#-------------------------------------------------
# GEOMETRY
#-------------------------------------------------
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
#-----------------------------
# Magnetic Field
#-----------------------------
process.load("Configuration.StandardSequences.MagneticField_cff")
#-----------------------------
# Cosmics muon reco sequence
#-----------------------------
process.load("Configuration.StandardSequences.RawToDigi_Data_cff")
process.load("Configuration.StandardSequences.ReconstructionCosmics_cff")
process.load("L1Trigger.Configuration.L1TRawToDigi_cff")
#-------------------------------------------------
# GLOBALTAG
#-------------------------------------------------
# Condition for P5 cluster
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
from Configuration.AlCa.GlobalTag import GlobalTag as gtCustomise
process.GlobalTag = gtCustomise(process.GlobalTag, 'auto:run2_data', '')
# message logger
process.MessageLogger = cms.Service("MessageLogger",
destinations = cms.untracked.vstring('cout'),
cout = cms.untracked.PSet(threshold = cms.untracked.string('DEBUG'),
)
)
process.dqmmodules = cms.Sequence(process.dqmEnv + process.dqmSaver)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100)
)
# output module
#
process.load("Configuration.EventContent.EventContentCosmics_cff")
process.RECOoutput = cms.OutputModule("PoolOutputModule",
outputCommands = process.RecoMuonRECO.outputCommands, fileName = cms.untracked.string('promptrecoCosmics.root')
)
process.output = cms.EndPath(process.RECOoutput)
#------------------------------------
# Cosmic muons reconstruction modules
#------------------------------------
#1 RAW-TO-DIGI
process.muRawToDigi = cms.Sequence(process.L1TRawToDigi +
process.muonCSCDigis +
process.muonDTDigis +
process.muonRPCDigis +
process.muonGEMDigis)
#2 STA RECO
## From cmssw/RecoMuon/Configuration/python/RecoMuonCosmics_cff.py
process.muSTAreco = cms.Sequence(process.STAmuontrackingforcosmics)
#--------------------------
# Filters
#--------------------------
# HLT Filter
# 0=random, 1=physics, 2=calibration, 3=technical
process.hltTriggerTypeFilter = cms.EDFilter("HLTTriggerTypeFilter",
SelectedTriggerType = cms.int32(1)
)
# HLT trigger selection (HLT_ZeroBias)
# modified for 0 Tesla HLT menu (no ZeroBias_*)
process.load('HLTrigger.HLTfilters.hltHighLevel_cfi')
process.hltHighLevel.HLTPaths = cms.vstring('HLT*Mu*','HLT_*Physics*')
process.hltHighLevel.andOr = cms.bool(True)
process.hltHighLevel.throw = cms.bool(False)
#-----------------------------
# DQM monitor modules
#-----------------------------
process.load("DQM.MuonMonitor.muonCosmicAnalyzer_cff")
process.muonDQM = cms.Sequence(process.muonCosmicAnalyzer)
#--------------------------
# Scheduling
#--------------------------
process.allReco = cms.Sequence(process.muRawToDigi*process.muSTAreco)
process.allDQM = cms.Sequence(process.muonDQM*process.dqmEnv*process.dqmSaver)
process.allPaths = cms.Path(process.hltHighLevel * process.hltTriggerTypeFilter * process.allReco * process.allDQM)
from DQM.Integration.config.online_customizations_cfi import *
process = customise(process)
#--------------------------
# Service
#--------------------------
process.AdaptorConfig = cms.Service("AdaptorConfig")
| 33.102564 | 230 | 0.546863 |
4a1adb9d1c2a87c048891359266dd64fb7fd0b9a
| 208 |
py
|
Python
|
hotrecharge/__init__.py
|
takumade/Hot-Recharge-ZW
|
9d59bcdb83c0ad38c7a4b944bc74e82ed20adda1
|
[
"MIT"
] | 8 |
2020-07-22T07:25:48.000Z
|
2021-12-18T17:38:45.000Z
|
hotrecharge/__init__.py
|
takumade/Hot-Recharge-ZW
|
9d59bcdb83c0ad38c7a4b944bc74e82ed20adda1
|
[
"MIT"
] | 2 |
2020-12-28T07:31:49.000Z
|
2021-09-15T20:49:34.000Z
|
hotrecharge/__init__.py
|
DonnC/Hot-Recharge-ZW
|
51bf6bf09aea879dc06f99f6ac7752b354ed3be8
|
[
"MIT"
] | 2 |
2021-02-04T10:01:23.000Z
|
2021-02-13T23:17:20.000Z
|
# __init__.py
from hotrecharge import *
from .HotRecharge import HotRecharge, HRAuthConfig
from .HotRechargeException import *
__author__ = "Donald Chinhuru"
__version__ = "3.3.1"
__name__ = "hot-recharge"
| 23.111111 | 50 | 0.778846 |
4a1adc557f9ba5a6ab9803f17109ca47a75f0211
| 12,516 |
py
|
Python
|
Data_Processor.py
|
SeTheory/Text_Mining
|
b54036bc29777a51f1087a65c0157d9511aaa585
|
[
"MIT"
] | 1 |
2021-11-29T10:38:02.000Z
|
2021-11-29T10:38:02.000Z
|
Data_Processor.py
|
SeTheory/Text_Mining
|
b54036bc29777a51f1087a65c0157d9511aaa585
|
[
"MIT"
] | null | null | null |
Data_Processor.py
|
SeTheory/Text_Mining
|
b54036bc29777a51f1087a65c0157d9511aaa585
|
[
"MIT"
] | null | null | null |
#! /user/bin/evn python
# -*- coding:utf8 -*-
"""
Data_Processor
======
A class for something.
@author: Guoxiu He
@contact: gxhe@fem.ecnu.edu.cn
@site: https://scholar.google.com/citations?user=2NVhxpAAAAAJ
@time: 20:44, 2021/11/22
@copyright: "Copyright (c) 2021 Guoxiu He. All Rights Reserved"
"""
import os
import sys
import argparse
import datetime
import json
import random
import nltk
class Data_Processor(object):
def __init__(self):
print('Init...')
self.data_root = './Datasets/'
self.original_root = self.data_root + 'original/'
self.aapr_root = self.original_root + 'AAPR_Dataset/'
self.exp_root = './exp/'
##############################
# AAPR
##############################
def show_json_data(self):
for i in range(4):
path = self.aapr_root + 'data{}'.format(i+1)
with open(path, 'r') as fp:
data = json.load(fp)
print(len(data))
for paper_id, info in data.items():
for key, value in info.items():
print(key)
break
def extract_abs_label(self):
abs_list = []
category_list = []
category_dict = {}
venue_list = []
venue_dict = {}
label_list = []
count = 0
error_count = 0
for i in range(4):
path = self.aapr_root + 'data{}'.format(i+1)
with open(path, 'r') as fp:
data = json.load(fp)
for paper_id, info in data.items():
abs = info['abstract'].strip()
category = info['category'].strip()
venue = info['venue'].strip()
if abs and category and venue:
abs_list.append(abs)
category_list.append(category)
if category not in category_dict:
category_dict[category] = 1
else:
category_dict[category] += 1
venue_list.append(venue)
if venue not in venue_dict:
venue_dict[venue] = 1
else:
venue_dict[venue] += 1
if venue in {'CoRR', 'No'}:
label_list.append('0')
else:
label_list.append('1')
else:
print("Error abs: {}".format(abs))
print("Error label: {}".format(category))
print("Error venue: {}".format(venue))
error_count += 1
count += 1
top_num = 5
print("Print top {} abs:".format(top_num))
for abs in abs_list[:top_num]:
print(abs)
print("Print top {} category:".format(top_num))
for category in category_list[:top_num]:
print(category)
print("Print top {} venue:".format(top_num))
for venue in venue_list[:top_num]:
print(venue)
print("category_dict:\n", category_dict)
print("venue_dict:\n", venue_dict)
print("There are {} papers.".format(count))
print("There are {} error abs or labels.".format(error_count))
return abs_list, label_list
def save_single(self, data, path, clean=0):
count = 0
with open(path, 'w') as fw:
for line in data:
if clean:
line = self.clean_line(line)
fw.write(line + '\n')
count += 1
print("Done for saving {} lines to {}.".format(count, path))
def save_pair(self, data_input, data_output, input_path, output_path, clean=0):
self.save_single(data_input, input_path, clean=clean)
self.save_single(data_output, output_path, clean=clean)
def save_abs_label(self):
save_path = self.data_root + 'aapr/'
if not os.path.exists(save_path):
os.mkdir(save_path)
abs_list, label_list = self.extract_abs_label()
input_path = save_path + 'data.input'
output_path = save_path + 'data.output'
self.save_pair(data_input=abs_list, data_output=label_list, input_path=input_path, output_path=output_path)
print("There are {} 1 labels.".format(sum(list(map(int, label_list)))/len(label_list)))
################################################################################
def clean_line(self, line):
new_line = nltk.word_tokenize(line.lower())
return ' '.join(new_line)
def split_data(self, data_name='aapr', fold=10, split_rate=0.7, clean=0, *args, **kwargs):
with open(self.data_root + '{}/data.input'.format(data_name), 'r') as fp:
data_input = list(map(lambda x: x.strip(), fp.readlines()))
print("Successfully load input data from {}.".format(self.data_root + '{}/data.input'.format(data_name)))
with open(self.data_root + '{}/data.output'.format(data_name), 'r') as fp:
data_output = list(map(lambda x: x.strip(), fp.readlines()))
print("Successfully load output data from {}.".format(self.data_root + '{}/data.output'.format(data_name)))
for i in range(fold):
print("Processing fold {}...".format(i))
random.seed(i)
data = list(zip(data_input, data_output))
random.shuffle(data)
data_input, data_output = zip(*data)
data_size = len(data_output)
train_input = data_input[:int(data_size*split_rate)]
train_output = data_output[:int(data_size*split_rate)]
val_input = data_input[int(data_size*split_rate): int(data_size*(split_rate+(1-split_rate)/2))]
val_output = data_output[int(data_size*split_rate): int(data_size*(split_rate+(1-split_rate)/2))]
test_input = data_input[int(data_size*(split_rate+(1-split_rate)/2)):]
test_output = data_output[int(data_size*(split_rate+(1-split_rate)/2)):]
data_folder = self.data_root + '{}/'.format(data_name)
data_fold_folder = data_folder + '{}/'.format(i)
if not os.path.exists(data_fold_folder):
os.mkdir(data_fold_folder)
if clean:
mode = '_'.join(['clean'])
train_input_path = data_fold_folder + 'train_{}_{}.input'.format(mode, i)
train_output_path = data_fold_folder + 'train_{}_{}.output'.format(mode, i)
else:
train_input_path = data_fold_folder + 'train_{}.input'.format(i)
train_output_path = data_fold_folder + 'train_{}.output'.format(i)
self.save_pair(data_input=train_input, data_output=train_output,
input_path=train_input_path, output_path=train_output_path,
clean=clean)
print("There are {} 1 labels.".format(sum(list(map(int, train_output)))/len(train_output)))
if clean:
mode = '_'.join(['clean'])
val_input_path = data_fold_folder + 'val_{}_{}.input'.format(mode, i)
val_output_path = data_fold_folder + 'val_{}_{}.output'.format(mode, i)
else:
val_input_path = data_fold_folder + 'val_{}.input'.format(i)
val_output_path = data_fold_folder + 'val_{}.output'.format(i)
self.save_pair(data_input=val_input, data_output=val_output,
input_path=val_input_path, output_path=val_output_path, clean=clean)
print("There are {} 1 labels.".format(sum(list(map(int, val_output))) / len(val_output)))
if clean:
mode = '_'.join(['clean'])
test_input_path = data_fold_folder + '/test_{}_{}.input'.format(mode, i)
test_output_path = data_fold_folder + '/test_{}_{}.output'.format(mode, i)
else:
test_input_path = data_fold_folder + '/test_{}.input'.format(i)
test_output_path = data_fold_folder + '/test_{}.output'.format(i)
self.save_pair(data_input=test_input, data_output=test_output,
input_path=test_input_path, output_path=test_output_path, clean=clean)
print("There are {} 1 labels.".format(sum(list(map(int, test_output))) / len(test_output)))
def get_vocab(self, data_name='aapr', fold=10, clean=0, cover_rate=1, mincount=0, *args, **kwargs):
data_folder = self.data_root + '{}/'.format(data_name)
for i in range(fold):
data_fold_folder = data_folder + '{}/'.format(i)
if clean:
mode = '_'.join(['clean'])
train_input_path = data_fold_folder + 'train_{}_{}.input'.format(mode, i)
else:
train_input_path = data_fold_folder + 'train_{}.input'.format(i)
word_count_dict = {}
total_word_count = 0
with open(train_input_path, 'r') as fp:
for line in fp.readlines():
for word in line.strip().split():
total_word_count += 1
if word not in word_count_dict:
word_count_dict[word] = 1
else:
word_count_dict[word] += 1
sorted_word_count_dict = sorted(word_count_dict.items(), key=lambda x: x[1], reverse=True)
print("There are {} words originally.".format(len(sorted_word_count_dict)))
word_dict = {'PAD': 0, 'UNK': 1, 'SOS': 2, 'EOS': 3}
tmp_word_count = 0
for word, count in sorted_word_count_dict:
tmp_word_count += count
current_rate = tmp_word_count / total_word_count
if count > mincount and current_rate < cover_rate:
word_dict[word] = len(word_dict)
print("There are {} words finally.".format(len(word_dict)))
exp_data_folder = self.exp_root + '{}/'.format(data_name)
if not os.path.exists(exp_data_folder):
os.mkdir(exp_data_folder)
exp_data_dl_folder = exp_data_folder + 'dl/'
if not os.path.exists(exp_data_dl_folder):
os.mkdir(exp_data_dl_folder)
vocal_data_dl_folder = exp_data_dl_folder + 'vocab/'
if not os.path.exists(vocal_data_dl_folder):
os.mkdir(vocal_data_dl_folder)
word_dict_path = vocal_data_dl_folder + 'vocab.cover{}.min{}.{}.json'.format(cover_rate, mincount, i)
with open(word_dict_path, 'w') as fw:
json.dump(word_dict, fw)
print("Successfully save word dict to {}.".format(word_dict_path))
if __name__ == '__main__':
start_time = datetime.datetime.now()
parser = argparse.ArgumentParser(description='Process some description.')
parser.add_argument('--phase', default='test', help='the function name.')
data_processor = Data_Processor()
args = parser.parse_args()
if args.phase == 'test':
print('This is a test process.')
elif args.phase == 'show_json_data':
data_processor.show_json_data()
elif args.phase == 'extract_abs_label':
data_processor.extract_abs_label()
elif args.phase == 'save_abs_label':
data_processor.save_abs_label()
elif args.phase.split('+')[0] == 'split_data':
config_name = args.phase.split('+')[1]
data_name = config_name.strip().split('.')[0]
model_cate = config_name.strip().split('.')[1]
config_path = './config/{}/{}/{}.json'.format(data_name, model_cate, config_name)
config = json.load(open(config_path, 'r'))
data_processor.split_data(**config)
elif args.phase.split('+')[0] == 'get_vocab':
config_name = args.phase.split('+')[1]
data_name = config_name.strip().split('.')[0]
model_cate = config_name.strip().split('.')[1]
config_path = './config/{}/{}/{}.json'.format(data_name, model_cate, config_name)
config = json.load(open(config_path, 'r'))
data_processor.get_vocab(**config)
else:
print("What the F**K! There is no {} function.".format(args.phase))
end_time = datetime.datetime.now()
print('{} takes {} seconds.'.format(args.phase, (end_time - start_time).seconds))
print('Done data_processor!')
| 43.609756 | 119 | 0.559604 |
4a1ade300303d853f37909f48551cf60fb68fafc
| 419 |
py
|
Python
|
tests/test_beautifulsoup.py
|
ht-loefflad/hidden_pdfs_heilbronn
|
662edefc42256ee661095ea1a525decf543bd75d
|
[
"CC-BY-4.0"
] | null | null | null |
tests/test_beautifulsoup.py
|
ht-loefflad/hidden_pdfs_heilbronn
|
662edefc42256ee661095ea1a525decf543bd75d
|
[
"CC-BY-4.0"
] | 3 |
2022-03-16T15:20:16.000Z
|
2022-03-22T08:43:34.000Z
|
tests/test_beautifulsoup.py
|
ht-loefflad/hidden_pdfs_heilbronn
|
662edefc42256ee661095ea1a525decf543bd75d
|
[
"CC-BY-4.0"
] | null | null | null |
import pytest
from src.crawlers.beatifulsoup import BeatifulSoupCrawler
class TestBeautifulSoupCrawler:
@pytest.fixture
def bs_crawler(self):
crawler = BeatifulSoupCrawler()
return crawler
def test_get_links(self, bs_crawler):
soup = bs_crawler._get_links('https://www.heilbronn.de/sitemap.html')
print(soup)
def test_run(self, bs_crawler):
bs_crawler.run()
| 23.277778 | 77 | 0.701671 |
4a1adeb0b3bea5d544822f6511531de5c64cd771
| 635 |
py
|
Python
|
extras/encrypt.py
|
jbygdell/LocalEGA
|
e0e5f9ee5c7e18cd4fbb1c8e89a77832c7c122b9
|
[
"Apache-2.0"
] | null | null | null |
extras/encrypt.py
|
jbygdell/LocalEGA
|
e0e5f9ee5c7e18cd4fbb1c8e89a77832c7c122b9
|
[
"Apache-2.0"
] | 3 |
2019-01-17T14:13:33.000Z
|
2019-01-17T14:45:42.000Z
|
extras/encrypt.py
|
secureb2share/secureb2share-localega
|
329dce036114945787bfb138321c99ca9170a601
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''Encrypt a relatively small file, since it loads it in memory.'''
import sys
import argparse
from pgpy import PGPMessage, PGPKey
def main():
parser = argparse.ArgumentParser(description='''Encrypting a relatively small message''')
parser.add_argument('pubkey', help='PGP public key')
parser.add_argument('file', help='File to encrypt')
args = parser.parse_args()
message = PGPMessage.new(args.file, file=True)
key, _ = PGPKey.from_file(args.pubkey)
enc = key.encrypt(message)
sys.stdout.buffer.write(bytes(enc))
if __name__ == '__main__':
main()
| 23.518519 | 93 | 0.688189 |
4a1adef0e6576eb4b4a9f4e509af99fcb94b526a
| 3,005 |
py
|
Python
|
app/forms.py
|
Zyntab/recept
|
9f11d734d6dcdc2a61f6984372f40f98446358cf
|
[
"MIT"
] | null | null | null |
app/forms.py
|
Zyntab/recept
|
9f11d734d6dcdc2a61f6984372f40f98446358cf
|
[
"MIT"
] | null | null | null |
app/forms.py
|
Zyntab/recept
|
9f11d734d6dcdc2a61f6984372f40f98446358cf
|
[
"MIT"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField
from wtforms.validators import DataRequired, ValidationError, Email, EqualTo
from app.models import User
class LoginForm(FlaskForm):
username = StringField('Användarnamn eller email',
validators=[DataRequired(message='Du måste ange ett användarnamn')])
password = PasswordField('Lösenord',
validators=[DataRequired(message='Du måste ange ett lösenord')])
remember_me = BooleanField('Kom ihåg mig')
submit = SubmitField('Logga in')
class RegistrationForm(FlaskForm):
username = StringField('Användarnamn',
validators=[DataRequired(message='Du måste ange ett användarnamn')])
email = StringField('Email',
validators=[DataRequired(message='Du måste ange en email-adress'),
Email(message='Detta verkar inte vara en email-adress')])
password = PasswordField('Lösenord',
validators=[DataRequired(message='Du måste ange ett lösenord')])
password2 = PasswordField('Upprepa lösenord',
validators=[DataRequired(message='Måste vara ifyllt'),
EqualTo('password',
message='Lösenordet måste skrivas likadant två gånger')])
submit = SubmitField('Registrera')
def validate_username(self, username):
if '@' in username.data:
raise ValidationError('Användarnamnet får inte innehålla "@".')
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError('Användarnamnet är upptaget. Välj ett annat.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError('Det finns redan en användare med den email-adressen.')
class EditProfileForm(FlaskForm):
username = StringField('Användarnamn',
validators=[DataRequired(
message='Måste vara ifyllt')])
submit = SubmitField('Spara')
def __init__(self, original_username, *args, **kwargs):
super(EditProfileForm, self).__init__(*args, **kwargs)
self.original_username = original_username
def validate_username(self, username):
if username.data != self.original_username:
user = User.query.filter_by(username=self.username.data).first()
if user is not None:
raise ValidationError('Användarnamnet är upptaget. Vänligen välj ett annat.')
class RecipeForm(FlaskForm):
name = StringField('Namn',
validators=[DataRequired(
message='Du måste ge receptet ett namn')])
notes = TextAreaField('Anteckningar')
submit = SubmitField('Spara')
| 48.467742 | 107 | 0.627953 |
4a1ae1dd653d22583d8127351f9a00a7d6473e2e
| 4,970 |
py
|
Python
|
layers.py
|
ashi22998/minor-project
|
740dd42423feebd0eb7891a151d74a2392d758f3
|
[
"MIT"
] | null | null | null |
layers.py
|
ashi22998/minor-project
|
740dd42423feebd0eb7891a151d74a2392d758f3
|
[
"MIT"
] | null | null | null |
layers.py
|
ashi22998/minor-project
|
740dd42423feebd0eb7891a151d74a2392d758f3
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.a = nn.Parameter(torch.zeros(size=(2*out_features, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, input, adj):
h = torch.mm(input, self.W)
N = h.size()[0]
# torch.cuda.empty_cache()
# print(torch.cuda.memory_allocated(device=None))
# print(torch.cuda.max_memory_cached(device=None))
a_input = torch.cat([h.repeat(1, N).view(N * N, -1), h.repeat(N, 1)], dim=1).view(N, -1, 2 * self.out_features)
e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2))
# torch.cuda.empty_cache()
zero_vec = -9e15*torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, h)
# torch.cuda.empty_cache()
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'
class SpecialSpmmFunction(torch.autograd.Function):
"""Special function for only sparse region backpropataion layer."""
@staticmethod
def forward(ctx, indices, values, shape, b):
assert indices.requires_grad == False
a = torch.sparse_coo_tensor(indices, values, shape)
ctx.save_for_backward(a, b)
ctx.N = shape[0]
return torch.matmul(a, b)
@staticmethod
def backward(ctx, grad_output):
a, b = ctx.saved_tensors
grad_values = grad_b = None
if ctx.needs_input_grad[1]:
grad_a_dense = grad_output.matmul(b.t())
edge_idx = a._indices()[0, :] * ctx.N + a._indices()[1, :]
grad_values = grad_a_dense.view(-1)[edge_idx]
if ctx.needs_input_grad[3]:
grad_b = a.t().matmul(grad_output)
return None, grad_values, None, grad_b
class SpecialSpmm(nn.Module):
def forward(self, indices, values, shape, b):
return SpecialSpmmFunction.apply(indices, values, shape, b)
class SpGraphAttentionLayer(nn.Module):
"""
Sparse version GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(SpGraphAttentionLayer, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))
nn.init.xavier_normal_(self.W.data, gain=1.414)
self.a = nn.Parameter(torch.zeros(size=(1, 2*out_features)))
nn.init.xavier_normal_(self.a.data, gain=1.414)
self.dropout = nn.Dropout(dropout)
self.leakyrelu = nn.LeakyReLU(self.alpha)
self.special_spmm = SpecialSpmm()
def forward(self, input, adj):
dv = 'cuda' if input.is_cuda else 'cpu'
N = input.size()[0]
edge = adj.nonzero().t()
h = torch.mm(input, self.W)
# h: N x out
assert not torch.isnan(h).any()
# Self-attention on the nodes - Shared attention mechanism
edge_h = torch.cat((h[edge[0, :], :], h[edge[1, :], :]), dim=1).t()
# edge: 2*D x E
edge_e = torch.exp(-self.leakyrelu(self.a.mm(edge_h).squeeze()))
assert not torch.isnan(edge_e).any()
# edge_e: E
e_rowsum = self.special_spmm(edge, edge_e, torch.Size([N, N]), torch.ones(size=(N,1), device=dv))
# e_rowsum: N x 1
edge_e = self.dropout(edge_e)
# edge_e: E
h_prime = self.special_spmm(edge, edge_e, torch.Size([N, N]), h)
assert not torch.isnan(h_prime).any()
# h_prime: N x out
h_prime = h_prime.div(e_rowsum)
# h_prime: N x out
assert not torch.isnan(h_prime).any()
if self.concat:
# if this layer is not last layer,
return F.elu(h_prime)
else:
# if this layer is last layer,
return h_prime
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'
| 35 | 119 | 0.613682 |
4a1ae23ef0319e427d681fd0b925d4b53a75b8f2
| 1,456 |
py
|
Python
|
src/sentry/api/serializers/base.py
|
mastacheata/sentry
|
cc4536901db0323d1e6433416abf1d0ecd977d61
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/api/serializers/base.py
|
mastacheata/sentry
|
cc4536901db0323d1e6433416abf1d0ecd977d61
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/api/serializers/base.py
|
mastacheata/sentry
|
cc4536901db0323d1e6433416abf1d0ecd977d61
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from django.contrib.auth.models import AnonymousUser
registry = {}
def serialize(objects, user=None, serializer=None):
if user is None:
user = AnonymousUser()
if not objects:
return objects
# sets aren't predictable, so generally you should use a list, but it's
# supported out of convenience
elif not isinstance(objects, (list, tuple, set, frozenset)):
return serialize([objects], user=user, serializer=serializer)[0]
# elif isinstance(obj, dict):
# return dict((k, serialize(v, request=request)) for k, v in obj.iteritems())
if serializer is None:
# find the first object that is in the registry
for o in objects:
try:
serializer = registry[type(o)]
break
except KeyError:
pass
else:
return objects
attrs = serializer.get_attrs(item_list=objects, user=user)
return [serializer(o, attrs=attrs.get(o, {}), user=user) for o in objects]
def register(type):
def wrapped(cls):
registry[type] = cls()
return cls
return wrapped
class Serializer(object):
def __call__(self, obj, attrs, user):
if obj is None:
return
return self.serialize(obj, attrs, user)
def get_attrs(self, item_list, user):
return {}
def serialize(self, obj, attrs, user):
return {}
| 26 | 85 | 0.616758 |
4a1ae2670eb28f8ca21e2d54676eaf06e5b8b818
| 1,057 |
py
|
Python
|
python/288_Unique_Word_Abbreviation.py
|
dvlpsh/leetcode-1
|
f965328af72113ac8a5a9d6624868c1502be937b
|
[
"MIT"
] | 4,416 |
2016-03-30T15:02:26.000Z
|
2022-03-31T16:31:03.000Z
|
python/288_Unique_Word_Abbreviation.py
|
YinpuLi/leetcode-6
|
1371de2631d745efba39de41b51c3424e35da434
|
[
"MIT"
] | 20 |
2018-11-17T13:46:25.000Z
|
2022-03-13T05:37:06.000Z
|
python/288_Unique_Word_Abbreviation.py
|
YinpuLi/leetcode-6
|
1371de2631d745efba39de41b51c3424e35da434
|
[
"MIT"
] | 1,374 |
2017-05-26T15:44:30.000Z
|
2022-03-30T19:21:02.000Z
|
class ValidWordAbbr(object):
def __init__(self, dictionary):
"""
initialize your data structure here.
:type dictionary: List[str]
"""
self.dictionary = set(dictionary)
self.abb_dic = {}
for s in self.dictionary:
curr = self.getAbb(s)
if curr in self.abb_dic:
self.abb_dic[curr] = False
else:
self.abb_dic[curr] = True
def isUnique(self, word):
"""
check if a word is unique.
:type word: str
:rtype: bool
"""
abb = self.getAbb(word)
hasAbbr = self.abb_dic.get(abb, None)
return hasAbbr == None or (hasAbbr and word in self.dictionary)
def getAbb(self, word):
if len(word) <= 2:
return word
return word[0] + str(len(word) - 2) + word[-1]
# Your ValidWordAbbr object will be instantiated and called as such:
# vwa = ValidWordAbbr(dictionary)
# vwa.isUnique("word")
# vwa.isUnique("anotherWord")
| 28.567568 | 76 | 0.541154 |
4a1ae2822de16383e9835da6152db257d13d8a7c
| 28,134 |
py
|
Python
|
udify/dataset_readers/conll18_ud_eval.py
|
annaproxy/udify
|
acdacd30c0a9cf7e3f2ba9982d6abbb42aa65af8
|
[
"MIT"
] | null | null | null |
udify/dataset_readers/conll18_ud_eval.py
|
annaproxy/udify
|
acdacd30c0a9cf7e3f2ba9982d6abbb42aa65af8
|
[
"MIT"
] | null | null | null |
udify/dataset_readers/conll18_ud_eval.py
|
annaproxy/udify
|
acdacd30c0a9cf7e3f2ba9982d6abbb42aa65af8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Compatible with Python 2.7 and 3.2+, can be used either as a module
# or a standalone executable.
#
# Copyright 2017, 2018 Institute of Formal and Applied Linguistics (UFAL),
# Faculty of Mathematics and Physics, Charles University, Czech Republic.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Authors: Milan Straka, Martin Popel <surname@ufal.mff.cuni.cz>
#
# Changelog:
# - [12 Apr 2018] Version 0.9: Initial release.
# - [19 Apr 2018] Version 1.0: Fix bug in MLAS (duplicate entries in functional_children).
# Add --counts option.
# - [02 May 2018] Version 1.1: When removing spaces to match gold and system characters,
# consider all Unicode characters of category Zs instead of
# just ASCII space.
# - [25 Jun 2018] Version 1.2: Use python3 in the she-bang (instead of python).
# In Python2, make the whole computation use `unicode` strings.
# Command line usage
# ------------------
# conll18_ud_eval.py [-v] gold_conllu_file system_conllu_file
#
# - if no -v is given, only the official CoNLL18 UD Shared Task evaluation metrics
# are printed
# - if -v is given, more metrics are printed (as precision, recall, F1 score,
# and in case the metric is computed on aligned words also accuracy on these):
# - Tokens: how well do the gold tokens match system tokens
# - Sentences: how well do the gold sentences match system sentences
# - Words: how well can the gold words be aligned to system words
# - UPOS: using aligned words, how well does UPOS match
# - XPOS: using aligned words, how well does XPOS match
# - UFeats: using aligned words, how well does universal FEATS match
# - AllTags: using aligned words, how well does UPOS+XPOS+FEATS match
# - Lemmas: using aligned words, how well does LEMMA match
# - UAS: using aligned words, how well does HEAD match
# - LAS: using aligned words, how well does HEAD+DEPREL(ignoring subtypes) match
# - CLAS: using aligned words with content DEPREL, how well does
# HEAD+DEPREL(ignoring subtypes) match
# - MLAS: using aligned words with content DEPREL, how well does
# HEAD+DEPREL(ignoring subtypes)+UPOS+UFEATS+FunctionalChildren(DEPREL+UPOS+UFEATS) match
# - BLEX: using aligned words with content DEPREL, how well does
# HEAD+DEPREL(ignoring subtypes)+LEMMAS match
# - if -c is given, raw counts of correct/gold_total/system_total/aligned words are printed
# instead of precision/recall/F1/AlignedAccuracy for all metrics.
# API usage
# ---------
# - load_conllu(file)
# - loads CoNLL-U file from given file object to an internal representation
# - the file object should return str in both Python 2 and Python 3
# - raises UDError exception if the given file cannot be loaded
# - evaluate(gold_ud, system_ud)
# - evaluate the given gold and system CoNLL-U files (loaded with load_conllu)
# - raises UDError if the concatenated tokens of gold and system file do not match
# - returns a dictionary with the metrics described above, each metric having
# three fields: precision, recall and f1
# Description of token matching
# -----------------------------
# In order to match tokens of gold file and system file, we consider the text
# resulting from concatenation of gold tokens and text resulting from
# concatenation of system tokens. These texts should match -- if they do not,
# the evaluation fails.
#
# If the texts do match, every token is represented as a range in this original
# text, and tokens are equal only if their range is the same.
# Description of word matching
# ----------------------------
# When matching words of gold file and system file, we first match the tokens.
# The words which are also tokens are matched as tokens, but words in multi-word
# tokens have to be handled differently.
#
# To handle multi-word tokens, we start by finding "multi-word spans".
# Multi-word span is a span in the original text such that
# - it contains at least one multi-word token
# - all multi-word tokens in the span (considering both gold and system ones)
# are completely inside the span (i.e., they do not "stick out")
# - the multi-word span is as small as possible
#
# For every multi-word span, we align the gold and system words completely
# inside this span using LCS on their FORMs. The words not intersecting
# (even partially) any multi-word span are then aligned as tokens.
from __future__ import division
from __future__ import print_function
import argparse
import io
import sys
import unicodedata
import unittest
# CoNLL-U column names
ID, FORM, LEMMA, UPOS, XPOS, FEATS, HEAD, DEPREL, DEPS, MISC = range(10)
# Content and functional relations
CONTENT_DEPRELS = {
"nsubj", "obj", "iobj", "csubj", "ccomp", "xcomp", "obl", "vocative",
"expl", "dislocated", "advcl", "advmod", "discourse", "nmod", "appos",
"nummod", "acl", "amod", "conj", "fixed", "flat", "compound", "list",
"parataxis", "orphan", "goeswith", "reparandum", "root", "dep"
}
FUNCTIONAL_DEPRELS = {
"aux", "cop", "mark", "det", "clf", "case", "cc"
}
UNIVERSAL_FEATURES = {
"PronType", "NumType", "Poss", "Reflex", "Foreign", "Abbr", "Gender",
"Animacy", "Number", "Case", "Definite", "Degree", "VerbForm", "Mood",
"Tense", "Aspect", "Voice", "Evident", "Polarity", "Person", "Polite"
}
# UD Error is used when raising exceptions in this module
class UDError(Exception):
pass
# Conversion methods handling `str` <-> `unicode` conversions in Python2
def _decode(text):
return text if sys.version_info[0] >= 3 or not isinstance(text, str) else text.decode("utf-8")
def _encode(text):
return text if sys.version_info[0] >= 3 or not isinstance(text, unicode) else text.encode("utf-8")
# Load given CoNLL-U file into internal representation
def load_conllu(file):
# Internal representation classes
class UDRepresentation:
def __init__(self):
# Characters of all the tokens in the whole file.
# Whitespace between tokens is not included.
self.characters = []
# List of UDSpan instances with start&end indices into `characters`.
self.tokens = []
# List of UDWord instances.
self.words = []
# List of UDSpan instances with start&end indices into `characters`.
self.sentences = []
class UDSpan:
def __init__(self, start, end):
self.start = start
# Note that self.end marks the first position **after the end** of span,
# so we can use characters[start:end] or range(start, end).
self.end = end
class UDWord:
def __init__(self, span, columns, is_multiword):
# Span of this word (or MWT, see below) within ud_representation.characters.
self.span = span
# 10 columns of the CoNLL-U file: ID, FORM, LEMMA,...
self.columns = columns
# is_multiword==True means that this word is part of a multi-word token.
# In that case, self.span marks the span of the whole multi-word token.
self.is_multiword = is_multiword
# Reference to the UDWord instance representing the HEAD (or None if root).
self.parent = None
# List of references to UDWord instances representing functional-deprel children.
self.functional_children = []
# Only consider universal FEATS.
self.columns[FEATS] = "|".join(sorted(feat for feat in columns[FEATS].split("|")
if feat.split("=", 1)[0] in UNIVERSAL_FEATURES))
# Let's ignore language-specific deprel subtypes.
self.columns[DEPREL] = columns[DEPREL].split(":")[0]
# Precompute which deprels are CONTENT_DEPRELS and which FUNCTIONAL_DEPRELS
self.is_content_deprel = self.columns[DEPREL] in CONTENT_DEPRELS
self.is_functional_deprel = self.columns[DEPREL] in FUNCTIONAL_DEPRELS
ud = UDRepresentation()
# Load the CoNLL-U file
index, sentence_start = 0, None
while True:
line = file.readline()
if not line:
break
line = _decode(line.rstrip("\r\n"))
# Handle sentence start boundaries
if sentence_start is None:
# Skip comments
if line.startswith("#"):
continue
# Start a new sentence
ud.sentences.append(UDSpan(index, 0))
sentence_start = len(ud.words)
if not line:
# Add parent and children UDWord links and check there are no cycles
def process_word(word):
if word.parent == "remapping":
raise UDError("There is a cycle in a sentence")
if word.parent is None:
try:
head = int(word.columns[HEAD])
except:
head = 0
if head < 0 or head > len(ud.words) - sentence_start:
raise UDError("HEAD '{}' points outside of the sentence".format(_encode(word.columns[HEAD])))
if head:
parent = ud.words[sentence_start + head - 1]
word.parent = "remapping"
process_word(parent)
word.parent = parent
for word in ud.words[sentence_start:]:
process_word(word)
# func_children cannot be assigned within process_word
# because it is called recursively and may result in adding one child twice.
for word in ud.words[sentence_start:]:
if word.parent and word.is_functional_deprel:
word.parent.functional_children.append(word)
# Check there is a single root node
# if len([word for word in ud.words[sentence_start:] if word.parent is None]) != 1:
# raise UDError("There are multiple roots in a sentence")
# End the sentence
ud.sentences[-1].end = index
sentence_start = None
continue
# Read next token/word
columns = line.split("\t")
if len(columns) != 10:
raise UDError("The CoNLL-U line does not contain 10 tab-separated columns: '{}'".format(_encode(line)))
# Skip empty nodes
if "." in columns[ID]:
continue
# Delete spaces from FORM, so gold.characters == system.characters
# even if one of them tokenizes the space. Use any Unicode character
# with category Zs.
columns[FORM] = "".join(filter(lambda c: unicodedata.category(c) != "Zs", columns[FORM]))
if not columns[FORM]:
raise UDError("There is an empty FORM in the CoNLL-U file")
# Save token
ud.characters.extend(columns[FORM])
ud.tokens.append(UDSpan(index, index + len(columns[FORM])))
index += len(columns[FORM])
# Handle multi-word tokens to save word(s)
if "-" in columns[ID]:
try:
start, end = map(int, columns[ID].split("-"))
except:
raise UDError("Cannot parse multi-word token ID '{}'".format(_encode(columns[ID])))
for _ in range(start, end + 1):
word_line = _decode(file.readline().rstrip("\r\n"))
word_columns = word_line.split("\t")
if len(word_columns) != 10:
raise UDError("The CoNLL-U line does not contain 10 tab-separated columns: '{}'".format(_encode(word_line)))
ud.words.append(UDWord(ud.tokens[-1], word_columns, is_multiword=True))
# Basic tokens/words
else:
try:
word_id = int(columns[ID])
except:
raise UDError("Cannot parse word ID '{}'".format(_encode(columns[ID])))
if word_id != len(ud.words) - sentence_start + 1:
raise UDError("Incorrect word ID '{}' for word '{}', expected '{}'".format(
_encode(columns[ID]), _encode(columns[FORM]), len(ud.words) - sentence_start + 1))
try:
head_id = int(columns[HEAD])
except:
head_id = 0
# raise UDError("Cannot parse HEAD '{}'".format(_encode(columns[HEAD])))
if head_id < 0:
raise UDError("HEAD cannot be negative")
ud.words.append(UDWord(ud.tokens[-1], columns, is_multiword=False))
if sentence_start is not None:
raise UDError("The CoNLL-U file does not end with empty line")
return ud
# Evaluate the gold and system treebanks (loaded using load_conllu).
def evaluate(gold_ud, system_ud):
class Score:
def __init__(self, gold_total, system_total, correct, aligned_total=None):
self.correct = correct
self.gold_total = gold_total
self.system_total = system_total
self.aligned_total = aligned_total
self.precision = correct / system_total if system_total else 0.0
self.recall = correct / gold_total if gold_total else 0.0
self.f1 = 2 * correct / (system_total + gold_total) if system_total + gold_total else 0.0
self.aligned_accuracy = correct / aligned_total if aligned_total else aligned_total
class AlignmentWord:
def __init__(self, gold_word, system_word):
self.gold_word = gold_word
self.system_word = system_word
class Alignment:
def __init__(self, gold_words, system_words):
self.gold_words = gold_words
self.system_words = system_words
self.matched_words = []
self.matched_words_map = {}
def append_aligned_words(self, gold_word, system_word):
self.matched_words.append(AlignmentWord(gold_word, system_word))
self.matched_words_map[system_word] = gold_word
def spans_score(gold_spans, system_spans):
correct, gi, si = 0, 0, 0
while gi < len(gold_spans) and si < len(system_spans):
if system_spans[si].start < gold_spans[gi].start:
si += 1
elif gold_spans[gi].start < system_spans[si].start:
gi += 1
else:
correct += gold_spans[gi].end == system_spans[si].end
si += 1
gi += 1
return Score(len(gold_spans), len(system_spans), correct)
def alignment_score(alignment, key_fn=None, filter_fn=None):
if filter_fn is not None:
gold = sum(1 for gold in alignment.gold_words if filter_fn(gold))
system = sum(1 for system in alignment.system_words if filter_fn(system))
aligned = sum(1 for word in alignment.matched_words if filter_fn(word.gold_word))
else:
gold = len(alignment.gold_words)
system = len(alignment.system_words)
aligned = len(alignment.matched_words)
if key_fn is None:
# Return score for whole aligned words
return Score(gold, system, aligned)
def gold_aligned_gold(word):
return word
def gold_aligned_system(word):
return alignment.matched_words_map.get(word, "NotAligned") if word is not None else None
correct = 0
for words in alignment.matched_words:
if filter_fn is None or filter_fn(words.gold_word):
if key_fn(words.gold_word, gold_aligned_gold) == key_fn(words.system_word, gold_aligned_system):
correct += 1
return Score(gold, system, correct, aligned)
def beyond_end(words, i, multiword_span_end):
if i >= len(words):
return True
if words[i].is_multiword:
return words[i].span.start >= multiword_span_end
return words[i].span.end > multiword_span_end
def extend_end(word, multiword_span_end):
if word.is_multiword and word.span.end > multiword_span_end:
return word.span.end
return multiword_span_end
def find_multiword_span(gold_words, system_words, gi, si):
# We know gold_words[gi].is_multiword or system_words[si].is_multiword.
# Find the start of the multiword span (gs, ss), so the multiword span is minimal.
# Initialize multiword_span_end characters index.
if gold_words[gi].is_multiword:
multiword_span_end = gold_words[gi].span.end
if not system_words[si].is_multiword and system_words[si].span.start < gold_words[gi].span.start:
si += 1
else: # if system_words[si].is_multiword
multiword_span_end = system_words[si].span.end
if not gold_words[gi].is_multiword and gold_words[gi].span.start < system_words[si].span.start:
gi += 1
gs, ss = gi, si
# Find the end of the multiword span
# (so both gi and si are pointing to the word following the multiword span end).
while not beyond_end(gold_words, gi, multiword_span_end) or \
not beyond_end(system_words, si, multiword_span_end):
if gi < len(gold_words) and (si >= len(system_words) or
gold_words[gi].span.start <= system_words[si].span.start):
multiword_span_end = extend_end(gold_words[gi], multiword_span_end)
gi += 1
else:
multiword_span_end = extend_end(system_words[si], multiword_span_end)
si += 1
return gs, ss, gi, si
def compute_lcs(gold_words, system_words, gi, si, gs, ss):
lcs = [[0] * (si - ss) for i in range(gi - gs)]
for g in reversed(range(gi - gs)):
for s in reversed(range(si - ss)):
if gold_words[gs + g].columns[FORM].lower() == system_words[ss + s].columns[FORM].lower():
lcs[g][s] = 1 + (lcs[g+1][s+1] if g+1 < gi-gs and s+1 < si-ss else 0)
lcs[g][s] = max(lcs[g][s], lcs[g+1][s] if g+1 < gi-gs else 0)
lcs[g][s] = max(lcs[g][s], lcs[g][s+1] if s+1 < si-ss else 0)
return lcs
def align_words(gold_words, system_words):
alignment = Alignment(gold_words, system_words)
gi, si = 0, 0
while gi < len(gold_words) and si < len(system_words):
if gold_words[gi].is_multiword or system_words[si].is_multiword:
# A: Multi-word tokens => align via LCS within the whole "multiword span".
gs, ss, gi, si = find_multiword_span(gold_words, system_words, gi, si)
if si > ss and gi > gs:
lcs = compute_lcs(gold_words, system_words, gi, si, gs, ss)
# Store aligned words
s, g = 0, 0
while g < gi - gs and s < si - ss:
if gold_words[gs + g].columns[FORM].lower() == system_words[ss + s].columns[FORM].lower():
alignment.append_aligned_words(gold_words[gs+g], system_words[ss+s])
g += 1
s += 1
elif lcs[g][s] == (lcs[g+1][s] if g+1 < gi-gs else 0):
g += 1
else:
s += 1
else:
# B: No multi-word token => align according to spans.
if (gold_words[gi].span.start, gold_words[gi].span.end) == (system_words[si].span.start, system_words[si].span.end):
alignment.append_aligned_words(gold_words[gi], system_words[si])
gi += 1
si += 1
elif gold_words[gi].span.start <= system_words[si].span.start:
gi += 1
else:
si += 1
return alignment
# Check that the underlying character sequences do match.
if gold_ud.characters != system_ud.characters:
index = 0
while index < len(gold_ud.characters) and index < len(system_ud.characters) and \
gold_ud.characters[index] == system_ud.characters[index]:
index += 1
raise UDError(
"The concatenation of tokens in gold file and in system file differ!\n" +
"First 20 differing characters in gold file: '{}' and system file: '{}'".format(
"".join(map(_encode, gold_ud.characters[index:index + 20])),
"".join(map(_encode, system_ud.characters[index:index + 20]))
)
)
# Align words
alignment = align_words(gold_ud.words, system_ud.words)
# Compute the F1-scores
return {
"Tokens": spans_score(gold_ud.tokens, system_ud.tokens),
"Sentences": spans_score(gold_ud.sentences, system_ud.sentences),
"Words": alignment_score(alignment),
"UPOS": alignment_score(alignment, lambda w, _: w.columns[UPOS]),
"XPOS": alignment_score(alignment, lambda w, _: w.columns[XPOS]),
"UFeats": alignment_score(alignment, lambda w, _: w.columns[FEATS]),
"AllTags": alignment_score(alignment, lambda w, _: (w.columns[UPOS], w.columns[XPOS], w.columns[FEATS])),
"Lemmas": alignment_score(alignment, lambda w, ga: w.columns[LEMMA] if ga(w).columns[LEMMA] != "_" else "_"),
"UAS": alignment_score(alignment, lambda w, ga: ga(w.parent)),
"LAS": alignment_score(alignment, lambda w, ga: (ga(w.parent), w.columns[DEPREL])),
"CLAS": alignment_score(alignment, lambda w, ga: (ga(w.parent), w.columns[DEPREL]),
filter_fn=lambda w: w.is_content_deprel),
"MLAS": alignment_score(alignment, lambda w, ga: (ga(w.parent), w.columns[DEPREL], w.columns[UPOS], w.columns[FEATS],
[(ga(c), c.columns[DEPREL], c.columns[UPOS], c.columns[FEATS])
for c in w.functional_children]),
filter_fn=lambda w: w.is_content_deprel),
"BLEX": alignment_score(alignment, lambda w, ga: (ga(w.parent), w.columns[DEPREL],
w.columns[LEMMA] if ga(w).columns[LEMMA] != "_" else "_"),
filter_fn=lambda w: w.is_content_deprel),
}
def load_conllu_file(path):
"""
Insanity for Lisa. Does not work
Justification: gold standard file cannot be read with utf-8 somehow on lisa.
On any updated version it will be read with utf-8 automatically
"""
#if 'predict' not in path:
# _file = open(path, mode="r")
#else:
_file = open(path, mode="r", encoding='utf-8')
return load_conllu(_file)
def evaluate_wrapper(args):
# Load CoNLL-U files
gold_ud = load_conllu_file(args.gold_file)
system_ud = load_conllu_file(args.system_file)
return evaluate(gold_ud, system_ud)
def main():
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("gold_file", type=str,
help="Name of the CoNLL-U file with the gold data.")
parser.add_argument("system_file", type=str,
help="Name of the CoNLL-U file with the predicted data.")
parser.add_argument("--verbose", "-v", default=False, action="store_true",
help="Print all metrics.")
parser.add_argument("--counts", "-c", default=False, action="store_true",
help="Print raw counts of correct/gold/system/aligned words instead of prec/rec/F1 for all metrics.")
args = parser.parse_args()
# Evaluate
evaluation = evaluate_wrapper(args)
# Print the evaluation
if not args.verbose and not args.counts:
print("LAS F1 Score: {:.2f}".format(100 * evaluation["LAS"].f1))
print("MLAS Score: {:.2f}".format(100 * evaluation["MLAS"].f1))
print("BLEX Score: {:.2f}".format(100 * evaluation["BLEX"].f1))
else:
if args.counts:
print("Metric | Correct | Gold | Predicted | Aligned")
else:
print("Metric | Precision | Recall | F1 Score | AligndAcc")
print("-----------+-----------+-----------+-----------+-----------")
for metric in["Tokens", "Sentences", "Words", "UPOS", "XPOS", "UFeats", "AllTags", "Lemmas", "UAS", "LAS", "CLAS", "MLAS", "BLEX"]:
if args.counts:
print("{:11}|{:10} |{:10} |{:10} |{:10}".format(
metric,
evaluation[metric].correct,
evaluation[metric].gold_total,
evaluation[metric].system_total,
evaluation[metric].aligned_total or (evaluation[metric].correct if metric == "Words" else "")
))
else:
print("{:11}|{:10.2f} |{:10.2f} |{:10.2f} |{}".format(
metric,
100 * evaluation[metric].precision,
100 * evaluation[metric].recall,
100 * evaluation[metric].f1,
"{:10.2f}".format(100 * evaluation[metric].aligned_accuracy) if evaluation[metric].aligned_accuracy is not None else ""
))
if __name__ == "__main__":
main()
# Tests, which can be executed with `python -m unittest conll18_ud_eval`.
class TestAlignment(unittest.TestCase):
@staticmethod
def _load_words(words):
"""Prepare fake CoNLL-U files with fake HEAD to prevent multiple roots errors."""
lines, num_words = [], 0
for w in words:
parts = w.split(" ")
if len(parts) == 1:
num_words += 1
lines.append("{}\t{}\t_\t_\t_\t_\t{}\t_\t_\t_".format(num_words, parts[0], int(num_words>1)))
else:
lines.append("{}-{}\t{}\t_\t_\t_\t_\t_\t_\t_\t_".format(num_words + 1, num_words + len(parts) - 1, parts[0]))
for part in parts[1:]:
num_words += 1
lines.append("{}\t{}\t_\t_\t_\t_\t{}\t_\t_\t_".format(num_words, part, int(num_words>1)))
return load_conllu((io.StringIO if sys.version_info >= (3, 0) else io.BytesIO)("\n".join(lines+["\n"])))
def _test_exception(self, gold, system):
self.assertRaises(UDError, evaluate, self._load_words(gold), self._load_words(system))
def _test_ok(self, gold, system, correct):
metrics = evaluate(self._load_words(gold), self._load_words(system))
gold_words = sum((max(1, len(word.split(" ")) - 1) for word in gold))
system_words = sum((max(1, len(word.split(" ")) - 1) for word in system))
self.assertEqual((metrics["Words"].precision, metrics["Words"].recall, metrics["Words"].f1),
(correct / system_words, correct / gold_words, 2 * correct / (gold_words + system_words)))
def test_exception(self):
self._test_exception(["a"], ["b"])
def test_equal(self):
self._test_ok(["a"], ["a"], 1)
self._test_ok(["a", "b", "c"], ["a", "b", "c"], 3)
def test_equal_with_multiword(self):
self._test_ok(["abc a b c"], ["a", "b", "c"], 3)
self._test_ok(["a", "bc b c", "d"], ["a", "b", "c", "d"], 4)
self._test_ok(["abcd a b c d"], ["ab a b", "cd c d"], 4)
self._test_ok(["abc a b c", "de d e"], ["a", "bcd b c d", "e"], 5)
def test_alignment(self):
self._test_ok(["abcd"], ["a", "b", "c", "d"], 0)
self._test_ok(["abc", "d"], ["a", "b", "c", "d"], 1)
self._test_ok(["a", "bc", "d"], ["a", "b", "c", "d"], 2)
self._test_ok(["a", "bc b c", "d"], ["a", "b", "cd"], 2)
self._test_ok(["abc a BX c", "def d EX f"], ["ab a b", "cd c d", "ef e f"], 4)
self._test_ok(["ab a b", "cd bc d"], ["a", "bc", "d"], 2)
self._test_ok(["a", "bc b c", "d"], ["ab AX BX", "cd CX a"], 1)
| 47.284034 | 139 | 0.592557 |
4a1ae2cca35fb0408a0dbb028f8a7b10e0bb6b08
| 2,103 |
py
|
Python
|
tests/integration/misc/FileSizeLimit.py
|
rainoftime/yinyang
|
5061be266226b8a5933321d98e68c6d9ee8137dd
|
[
"MIT"
] | 143 |
2020-10-30T18:52:48.000Z
|
2022-03-15T11:30:23.000Z
|
tests/integration/misc/FileSizeLimit.py
|
rainoftime/yinyang
|
5061be266226b8a5933321d98e68c6d9ee8137dd
|
[
"MIT"
] | 28 |
2020-11-11T12:11:39.000Z
|
2022-01-11T19:29:17.000Z
|
tests/integration/misc/FileSizeLimit.py
|
rainoftime/yinyang
|
5061be266226b8a5933321d98e68c6d9ee8137dd
|
[
"MIT"
] | 16 |
2020-10-31T22:54:42.000Z
|
2022-03-26T11:31:10.000Z
|
# MIT License
#
# Copyright (c) [2020 - 2021] The yinyang authors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import subprocess
import sys
python = sys.executable
def call_fuzzer(first_config, second_config, fn, opts):
cmd = (
python
+ " bin/opfuzz "
+ '"'
+ first_config
+ ";"
+ second_config
+ '" '
+ opts
+ " "
+ fn
)
output = subprocess.getoutput(cmd)
return output, cmd
def create_mocksolver_msg(msg, script_fn):
code = "#! /usr/bin/env python3\n"
code += 'msg="""' + msg + '"""\n'
code += "print(msg)"
open(script_fn, "w").write(code)
os.system("chmod +x " + script_fn)
solver = "solver.py"
msg = "sat"
create_mocksolver_msg(msg, solver)
first_config = os.path.abspath(solver)
second_config = os.path.abspath(solver)
opts = "-i 1 -m 1"
FN = os.path.dirname(os.path.realpath(__file__)) + "/too_large.smt2"
out, cmd = call_fuzzer(first_config, second_config, FN, opts)
if "1 seeds processed, 0 valid, 1 invalid" not in out:
print(cmd)
exit(1)
| 31.38806 | 79 | 0.69282 |
4a1ae3e4d04b12248dc19fa2281c97fc681ef0ec
| 3,495 |
py
|
Python
|
merge XY FISH results.py
|
webjo099/FlyFISH-and-DNA
|
7690e991501961c449ca8496ee1aaaab25c5ac6b
|
[
"MIT"
] | null | null | null |
merge XY FISH results.py
|
webjo099/FlyFISH-and-DNA
|
7690e991501961c449ca8496ee1aaaab25c5ac6b
|
[
"MIT"
] | null | null | null |
merge XY FISH results.py
|
webjo099/FlyFISH-and-DNA
|
7690e991501961c449ca8496ee1aaaab25c5ac6b
|
[
"MIT"
] | null | null | null |
#load python included modules
import ntpath
import os
import re
import tkinter as tk
from tkinter import filedialog, simpledialog, messagebox
#load additional python modules
import numpy as np
import pandas as pd
# define a function to get the list of all the filename in a directory and its subdirectories
def getListOfFiles(dirName):
# create a list
listOfFile = os.listdir(dirName)
allFiles = list()
# Iterate over all the entries
for entry in listOfFile:
# Create full path
fullPath = os.path.join(dirName, entry)
# If entry is a directory run the function on that directory
if os.path.isdir(fullPath):
allFiles = allFiles + getListOfFiles(fullPath)
# If entry is a file then add the filename to the list
else:
allFiles.append(fullPath)
return allFiles
# define a function to search for XY FISH results files and merge them
def findAndMerge(filelist):
# Prepare a dataframe
df_out = pd.DataFrame(columns=['directory', 'image', 'nul', 'X', 'Y', 'XY','genotype'])
for file in filelist:
file_name = ntpath.basename(file)
dir_name = ntpath.dirname(file)
#if a FISH results file extract the data and add it to the dataframe as a new row
if "FISH results.txt" in file_name:
print("I found a results table!")
text_file = open(file, "r")
lines = text_file.readlines()
text_file.close()
nul = int(re.findall('\d+',lines[-4])[0])
X = int(re.findall('\d+',lines[-3])[0])
Y = int(re.findall('\d+',lines[-2])[0])
XY = int(re.findall('\d+',lines[-1])[0])
imagename = file.replace('\\', '/').replace(dirName, '').split(' FISH results.txt')[0]
df_temp = pd.DataFrame([[dirName, imagename, nul, X, Y, XY]], columns=['directory', 'image', 'nul', 'X', 'Y', 'XY'])
df_out = df_out.append(df_temp, sort=False, ignore_index=True)
print("It contains " + str(nul) + " nuls, " + str(X) + " X, " + str(Y) + " Y and " + str(XY) + " XY signal containing nuclei :o")
return df_out
#required for the dialog boxes
root = tk.Tk()
root.withdraw()
# Prepare a dataframe
df_merged = pd.DataFrame(columns=['directory', 'image', 'nul', 'X', 'Y', 'XY','genotype'])
#loop until all genotypes are merged
go_on = True
while(go_on):
#ask for a directory
dirName = filedialog.askdirectory(title = "Choose a folder containing results from 1 genotype")
#get filelist and search for chr3 FISH results files
filelist = getListOfFiles(dirName)
df_out = findAndMerge(filelist)
#ask user to specify the genotype
genotype = simpledialog.askstring(title = None, prompt = "Enter genotype")
df_out["genotype"] = genotype
df_merged = df_merged.append(df_out, sort = False, ignore_index = True)
go_on = messagebox.askyesnocancel(title = None, message="Add another genotype?")
#calculate the aneuploidy ratio of spermatids
df_merged['aneuploidy ratio'] = ((2*df_merged['XY'])/(df_merged['Y']+df_merged['X']+2*df_merged['XY']))
#ask the user where to save the final dataframe
save_path = filedialog.asksaveasfilename(title='Save compiled results as ...',defaultextension = '.xlsx',initialdir = dirName, initialfile = "compiled FISH results");
df_merged.to_excel(save_path, index=False)
print('done')
| 42.108434 | 167 | 0.637768 |
4a1ae3e5fca8fb54f187e0edbb879c97179ef693
| 11,463 |
py
|
Python
|
fetchData.py
|
duyunhe/vehAnalysis
|
a3788c3851b786683ed11a3b2f88dc7502a163d3
|
[
"Apache-2.0"
] | 1 |
2020-05-26T11:13:17.000Z
|
2020-05-26T11:13:17.000Z
|
fetchData.py
|
duyunhe/vehAnalysis
|
a3788c3851b786683ed11a3b2f88dc7502a163d3
|
[
"Apache-2.0"
] | null | null | null |
fetchData.py
|
duyunhe/vehAnalysis
|
a3788c3851b786683ed11a3b2f88dc7502a163d3
|
[
"Apache-2.0"
] | 1 |
2021-06-24T13:13:00.000Z
|
2021-06-24T13:13:00.000Z
|
# -*- coding: utf-8 -*-
# @Time : 2019/5/17 16:12
# @Author : yhdu@tongwoo.cn
# @简介 : 获取gps信息
# @File : fetchData.py
import cx_Oracle
from datetime import timedelta, datetime
from coord import bl2xy
from geo import calc_dist
from time import clock
import json
import os
import redis
from taxiStruct import TaxiData
os.environ['NLS_LANG'] = 'SIMPLIFIED CHINESE_CHINA.UTF8'
def debug_time(func):
def wrapper(*args, **kwargs):
bt = clock()
a = func(*args, **kwargs)
et = clock()
print "fetch.py", func.__name__, "cost", round(et - bt, 2), "secs"
return a
return wrapper
@debug_time
def get_all_data(all_data=False, begin_time=None, end_time=None):
if begin_time is None and end_time is None:
begin_time = datetime(2018, 5, 1, 12, 0, 0)
end_time = begin_time + timedelta(minutes=60)
conn = cx_Oracle.connect('hz/hz@192.168.11.88:1521/orcl')
if all_data:
sql = "select px, py, speed_time, state, speed, carstate, direction, vehicle_num from " \
"TB_GPS_1805 t where speed_time >= :1 " \
"and speed_time < :2 order by speed_time "
else:
sql = "select px, py, speed_time, state, speed, carstate, direction, vehicle_num from " \
"TB_GPS_1805 t where speed_time >= :1 " \
"and speed_time < :2 and vehicle_num = '浙ALT002' order by speed_time "
tup = (begin_time, end_time)
cursor = conn.cursor()
cursor.execute(sql, tup)
veh_trace = {}
for item in cursor.fetchall():
lng, lat = map(float, item[0:2])
if 119 < lng < 121 and 29 < lat < 31:
px, py = bl2xy(lat, lng)
state = int(item[3])
stime = item[2]
speed = float(item[4])
car_state = int(item[5])
ort = float(item[6])
veh = item[7][-6:]
veh_head = veh[:2]
# if veh_head != 'AT' and veh_head != 'AL':
# continue
# if veh != 'AT0956':
# continue
taxi_data = TaxiData(veh, px, py, stime, state, speed, car_state, ort)
try:
veh_trace[veh].append(taxi_data)
except KeyError:
veh_trace[veh] = [taxi_data]
cursor.close()
conn.close()
return veh_trace
def get_all_on():
conn = cx_Oracle.connect("hz/hz@192.168.11.88/orcl")
cur = conn.cursor()
sql = "select vehicle_num, during_time from tb_during_on_time"
cur.execute(sql)
veh_dict = {}
for item in cur:
veh, on_time = item
veh_dict[veh] = on_time
cur.close()
conn.close()
return veh_dict
@debug_time
def get_formal_data(all_data=False, begin_time=None, end_time=None):
conn = cx_Oracle.connect('hzczdsj/tw85450077@192.168.0.80:1521/orcl')
on_dict = get_all_on()
if all_data:
sql = "select px, py, speed_time, state, carstate, vehicle_num from " \
"TB_GPS_TEMP t where speed_time >= :1 " \
"and speed_time < :2 and carstate = 0 order by speed_time "
else:
sql = "select px, py, speed_time, state, carstate, vehicle_num from " \
"TB_GPS_TEMP t where speed_time >= :1 " \
"and speed_time < :2 and vehicle_num = '浙ALT002' and state = 1 order by speed_time "
tup = (begin_time, end_time)
cursor = conn.cursor()
cursor.execute(sql, tup)
veh_trace = {}
for item in cursor.fetchall():
lng, lat = map(float, item[0:2])
if 119 < lng < 121 and 29 < lat < 31:
px, py = bl2xy(lat, lng)
state = int(item[3])
stime = item[2]
speed = 0
car_state = int(item[4])
ort = 0
veh = item[5][-6:]
veh_head = veh[:2]
# if veh_head != 'AT' and veh_head != 'AL':
# continue
# if veh in on_set:
# continue
taxi_data = TaxiData(veh, px, py, stime, state, speed, car_state, ort)
try:
veh_trace[veh].append(taxi_data)
except KeyError:
veh_trace[veh] = [taxi_data]
new_dict = {}
for veh, trace in veh_trace.iteritems():
new_trace = []
last_data = None
try:
total_itv = on_dict[veh]
except KeyError:
total_itv = 0
for data in trace:
esti = True
if data.state == 0:
esti = False
if last_data is not None:
dist = calc_dist([data.x, data.y], [last_data.x, last_data.y])
# 过滤异常
if dist < 10: # GPS的误差在10米,不准确
esti = False
if data.state == 0:
total_itv = 0
else:
total_itv += data - last_data
last_data = data
if esti:
new_trace.append(data)
# print i, dist
# i += 1
# 假如重车时间太长(超过两个小时),那么可能存在问题
if total_itv < 7200:
new_dict[veh] = new_trace
on_dict[veh] = total_itv
# print "all car:{0}, ave:{1}".format(len(static_num), len(trace) / len(static_num))
cursor.close()
conn.close()
save_all_on(on_dict)
return new_dict, on_dict
def save_all_on(on_dict):
"""
:param on_dict: {veh: during_time(seconds)}
:return:
"""
conn = cx_Oracle.connect('hz/hz@192.168.11.88:1521/orcl')
cursor = conn.cursor()
sql = "delete from tb_during_on_time"
cursor.execute(sql)
tup_list = []
sql = "insert into tb_during_on_time values(:1,:2)"
for veh, on_time in on_dict.items():
tup_list.append((veh, on_time))
cursor.executemany(sql, tup_list)
conn.commit()
cursor.close()
conn.close()
@debug_time
def get_gps_data(all_data=False, begin_time=None, end_time=None):
"""
历史数据,采纳两小时的GPS数据
:param all_data:
:param begin_time:
:param end_time:
:return:
"""
if begin_time is None and end_time is None:
begin_time = datetime(2018, 5, 1, 12, 0, 0)
end_time = begin_time + timedelta(minutes=60)
conn = cx_Oracle.connect('hz/hz@192.168.11.88:1521/orcl')
if all_data:
sql = "select px, py, speed_time, state, speed, carstate, direction, vehicle_num from " \
"TB_GPS_1805 t where speed_time >= :1 " \
"and speed_time < :2 and carstate = '0' order by speed_time "
else:
sql = "select px, py, speed_time, state, speed, carstate, direction, vehicle_num from " \
"TB_GPS_1805 t where speed_time >= :1 " \
"and speed_time < :2 and vehicle_num = '浙AT7484' and carstate = '0' order by speed_time "
tup = (begin_time, end_time)
cursor = conn.cursor()
cursor.execute(sql, tup)
veh_trace = {}
for item in cursor.fetchall():
lng, lat = map(float, item[0:2])
if 119 < lng < 121 and 29 < lat < 31:
px, py = bl2xy(lat, lng)
state = int(item[3])
stime = item[2]
speed = float(item[4])
car_state = int(item[5])
ort = float(item[6])
veh = item[7][-6:]
veh_head = veh[:2]
# if veh_head != 'AT' and veh_head != 'AL':
# continue
# if veh != 'AT0956':
# continue
taxi_data = TaxiData(veh, px, py, stime, state, speed, car_state, ort)
try:
veh_trace[veh].append(taxi_data)
except KeyError:
veh_trace[veh] = [taxi_data]
new_dict = {}
for veh, trace in veh_trace.iteritems():
new_trace = []
last_data = None
on_cnt, off_cnt = 0, 0
for data in trace:
esti = True
if data.state == 1:
on_cnt += 1
else:
off_cnt += 1
if last_data is not None:
dist = calc_dist([data.x, data.y], [last_data.x, last_data.y])
# 过滤异常
if data.state == 0:
esti = False
if dist < 10: # GPS的误差在10米,不准确
esti = False
last_data = data
if esti:
new_trace.append(data)
# print i, dist
# i += 1
per = float(on_cnt) / (on_cnt + off_cnt)
if per > 0.9:
continue
new_dict[veh] = new_trace
# print "all car:{0}, ave:{1}".format(len(static_num), len(trace) / len(static_num))
cursor.close()
conn.close()
return new_dict
@debug_time
def trans2redis(trace_dict):
conn = redis.Redis(host="192.168.11.229", port=6300, db=0)
conn.flushdb()
idx = 0
for veh, trace in trace_dict.iteritems():
msg = {}
for data in trace:
x, y, spd, speed_time, pos, load, ort = data.x, data.y, data.speed, \
data.stime, data.car_state, data.state, data.direction
speed_time = speed_time.strftime("%Y-%m-%d %H:%M:%S")
msg_dict = {'isu': veh, 'x': x, 'y': y, 'speed': spd, 'speed_time': speed_time, 'pos': pos, 'load': load,
'ort': ort}
msg_json = json.dumps(msg_dict)
msg_key = "{0}".format(idx)
idx += 1
msg[msg_key] = msg_json
conn.mset(msg)
def redis2redis():
conn = redis.Redis(host="192.168.11.229", port=6300, db=1)
conn2 = redis.Redis(host="192.168.11.229", port=6300, db=2)
conn2.flushdb()
keys = conn.keys()
res = conn.mget(keys)
with conn2.pipeline() as p:
for i, key in enumerate(keys):
if res[i] is not None:
p.set(key, res[i])
p.execute()
def main():
trace_dict = get_gps_data(False)
trans2redis(trace_dict)
def get_gps_list(trace_dict, history=False):
"""
:param trace_dict:
:param history: 是否统计历史数据
:return:
"""
trace_list = []
pt_cnt = 0
for veh, trace in trace_dict.iteritems():
new_trace = trace
last_data = None
x_trace = []
for data in new_trace:
if last_data is not None:
itv = data - last_data
if itv > 300:
if len(x_trace) > 1:
dist = calc_dist(x_trace[0], x_trace[-1])
if history:
if dist > 1000:
trace_list.append(x_trace)
else:
trace_list.append(x_trace)
x_trace = [data]
else:
x_trace.append(data)
else:
x_trace.append(data)
last_data = data
if len(x_trace) > 1:
dist = calc_dist(x_trace[0], x_trace[-1])
if history:
if dist > 1000:
trace_list.append(x_trace)
else:
trace_list.append(x_trace)
for trace in trace_list:
pt_cnt += len(trace)
return trace_list, pt_cnt
def get_def_speed():
conn = cx_Oracle.connect('hz/hz@192.168.11.88:1521/orcl')
cursor = conn.cursor()
sql = "select rid, speed from tb_road_def_speed"
cursor.execute(sql)
def_speed = {}
for item in cursor:
rid, speed = item
def_speed[rid] = speed
cursor.close()
conn.close()
return def_speed
| 32.473088 | 117 | 0.529617 |
4a1ae45da69e1320bd7ccbc9f6bfa9b429002a21
| 3,920 |
py
|
Python
|
dbops_venv/lib/python3.5/site-packages/selenium/webdriver/ie/service.py
|
fractal520/dbops
|
20c6b6b5669e09b43cd19e6f3fa0448bc7d5eaac
|
[
"MIT"
] | 15 |
2016-02-24T06:32:57.000Z
|
2020-06-17T05:06:36.000Z
|
dbops_venv/lib/python3.5/site-packages/selenium/webdriver/ie/service.py
|
fractal520/dbops
|
20c6b6b5669e09b43cd19e6f3fa0448bc7d5eaac
|
[
"MIT"
] | 5 |
2020-03-24T15:33:06.000Z
|
2021-02-02T21:42:36.000Z
|
dbops_venv/lib/python3.5/site-packages/selenium/webdriver/ie/service.py
|
fractal520/dbops
|
20c6b6b5669e09b43cd19e6f3fa0448bc7d5eaac
|
[
"MIT"
] | 4 |
2017-02-04T13:45:31.000Z
|
2018-07-05T11:57:24.000Z
|
#!/usr/bin/python
#
# Copyright 2012 Webdriver_name committers
# Copyright 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from subprocess import PIPE
import time
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common import utils
class Service(object):
"""
Object that manages the starting and stopping of the IEDriver
"""
def __init__(self, executable_path, port=0, host=None, log_level=None, log_file=None):
"""
Creates a new instance of the Service
:Args:
- executable_path : Path to the IEDriver
- port : Port the service is running on
- host : IP address the service port is bound
- log_level : Level of logging of service, may be "FATAL", "ERROR", "WARN", "INFO", "DEBUG", "TRACE".
Default is "FATAL".
- log_file : Target of logging of service, may be "stdout", "stderr" or file path.
Default is "stdout"."""
self.port = port
self.path = executable_path
if self.port == 0:
self.port = utils.free_port()
self.host = host
self.log_level = log_level
self.log_file = log_file
def start(self):
"""
Starts the IEDriver Service.
:Exceptions:
- WebDriverException : Raised either when it can't start the service
or when it can't connect to the service
"""
try:
cmd = [self.path, "--port=%d" % self.port]
if self.host is not None:
cmd.append("--host=%s" % self.host)
if self.log_level is not None:
cmd.append("--log-level=%s" % self.log_level)
if self.log_file is not None:
cmd.append("--log-file=%s" % self.log_file)
self.process = subprocess.Popen(cmd,
stdout=PIPE, stderr=PIPE)
except TypeError:
raise
except:
raise WebDriverException(
"IEDriver executable needs to be available in the path. "
"Please download from http://selenium-release.storage.googleapis.com/index.html "
"and read up at http://code.google.com/p/selenium/wiki/InternetExplorerDriver")
count = 0
while not utils.is_url_connectable(self.port):
count += 1
time.sleep(1)
if count == 30:
raise WebDriverException("Can not connect to the IEDriver")
def stop(self):
"""
Tells the IEDriver to stop and cleans up the process
"""
#If its dead dont worry
if self.process is None:
return
#Tell the Server to die!
try:
from urllib import request as url_request
except ImportError:
import urllib2 as url_request
url_request.urlopen("http://127.0.0.1:%d/shutdown" % self.port)
count = 0
while utils.is_connectable(self.port):
if count == 30:
break
count += 1
time.sleep(1)
#Tell the Server to properly die in case
try:
if self.process:
self.process.kill()
self.process.wait()
except WindowsError:
# kill may not be available under windows environment
pass
| 35.315315 | 110 | 0.592347 |
4a1ae5549341618f081eca605526ccc552378008
| 1,577 |
py
|
Python
|
utils/log/handlers.py
|
goztrk/django-htk
|
c56bf112e5d627780d2f4288460eae5cce80fa9e
|
[
"MIT"
] | null | null | null |
utils/log/handlers.py
|
goztrk/django-htk
|
c56bf112e5d627780d2f4288460eae5cce80fa9e
|
[
"MIT"
] | null | null | null |
utils/log/handlers.py
|
goztrk/django-htk
|
c56bf112e5d627780d2f4288460eae5cce80fa9e
|
[
"MIT"
] | null | null | null |
# Python Standard Library Imports
import logging
import traceback
# Third Party (PyPI) Imports
import rollbar
# Django Imports
from django.views.debug import ExceptionReporter
# HTK Imports
from htk.utils.debug import slack_debug
class RollbarHandler(logging.Handler):
"""An exception log handler that emits log entries to Rollbar
"""
def __init__(self):
logging.Handler.__init__(self)
def emit(self, record):
exc_info = record.exc_info
request = record.request
rollbar.report_exc_info(exc_info=exc_info, request=request)
class SlackDebugHandler(logging.Handler):
"""An exception log handler that emits log entries to Slack
"""
def __init__(self):
logging.Handler.__init__(self)
def emit(self, record):
exc_type, exc_value, exc_traceback = record.exc_info
request = record.request
reporter = ExceptionReporter(request, exc_type, exc_value, exc_traceback, is_email=True)
try:
exc = reporter.format_exception()
except AttributeError:
frames = reporter.get_traceback_frames()
tb = [(f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames]
exc = ['Traceback (most recent call last):\n']
exc += traceback.format_list(tb)
exc += traceback.format_exception_only(exc_type, exc_value)
message = '*%s* at `%s`\n\n```%s```' % (
exc[-1].strip(),
request.path_info,
''.join(exc[:-1]),
)
slack_debug(message)
| 27.666667 | 97 | 0.640457 |
4a1ae5e607f7f238cedab1fcfa807dbddc797935
| 30,882 |
py
|
Python
|
selfdrive/controls/lib/events.py
|
ImpressionsAK/openpilot
|
e828ec437b81c48913abc67bf43af4b7f742d6a5
|
[
"MIT"
] | null | null | null |
selfdrive/controls/lib/events.py
|
ImpressionsAK/openpilot
|
e828ec437b81c48913abc67bf43af4b7f742d6a5
|
[
"MIT"
] | null | null | null |
selfdrive/controls/lib/events.py
|
ImpressionsAK/openpilot
|
e828ec437b81c48913abc67bf43af4b7f742d6a5
|
[
"MIT"
] | null | null | null |
from enum import IntEnum
from typing import Dict, Union, Callable
from cereal import log, car
import cereal.messaging as messaging
from common.realtime import DT_CTRL
from selfdrive.config import Conversions as CV
from selfdrive.locationd.calibrationd import MIN_SPEED_FILTER
AlertSize = log.ControlsState.AlertSize
AlertStatus = log.ControlsState.AlertStatus
VisualAlert = car.CarControl.HUDControl.VisualAlert
AudibleAlert = car.CarControl.HUDControl.AudibleAlert
EventName = car.CarEvent.EventName
# Alert priorities
class Priority(IntEnum):
LOWEST = 0
LOWER = 1
LOW = 2
MID = 3
HIGH = 4
HIGHEST = 5
# Event types
class ET:
ENABLE = 'enable'
PRE_ENABLE = 'preEnable'
NO_ENTRY = 'noEntry'
WARNING = 'warning'
USER_DISABLE = 'userDisable'
SOFT_DISABLE = 'softDisable'
IMMEDIATE_DISABLE = 'immediateDisable'
PERMANENT = 'permanent'
# get event name from enum
EVENT_NAME = {v: k for k, v in EventName.schema.enumerants.items()}
class Events:
def __init__(self):
self.events = []
self.static_events = []
self.events_prev = dict.fromkeys(EVENTS.keys(), 0)
@property
def names(self):
return self.events
def __len__(self):
return len(self.events)
def add(self, event_name, static=False):
if static:
self.static_events.append(event_name)
self.events.append(event_name)
def clear(self):
self.events_prev = {k: (v + 1 if k in self.events else 0) for k, v in self.events_prev.items()}
self.events = self.static_events.copy()
def any(self, event_type):
for e in self.events:
if event_type in EVENTS.get(e, {}).keys():
return True
return False
def create_alerts(self, event_types, callback_args=None):
if callback_args is None:
callback_args = []
ret = []
for e in self.events:
types = EVENTS[e].keys()
for et in event_types:
if et in types:
alert = EVENTS[e][et]
if not isinstance(alert, Alert):
alert = alert(*callback_args)
if DT_CTRL * (self.events_prev[e] + 1) >= alert.creation_delay:
alert.alert_type = f"{EVENT_NAME[e]}/{et}"
alert.event_type = et
ret.append(alert)
return ret
def add_from_msg(self, events):
for e in events:
self.events.append(e.name.raw)
def to_msg(self):
ret = []
for event_name in self.events:
event = car.CarEvent.new_message()
event.name = event_name
for event_type in EVENTS.get(event_name, {}).keys():
setattr(event, event_type, True)
ret.append(event)
return ret
class Alert:
def __init__(self,
alert_text_1: str,
alert_text_2: str,
alert_status: log.ControlsState.AlertStatus,
alert_size: log.ControlsState.AlertSize,
priority: Priority,
visual_alert: car.CarControl.HUDControl.VisualAlert,
audible_alert: car.CarControl.HUDControl.AudibleAlert,
duration: float,
alert_rate: float = 0.,
creation_delay: float = 0.):
self.alert_text_1 = alert_text_1
self.alert_text_2 = alert_text_2
self.alert_status = alert_status
self.alert_size = alert_size
self.priority = priority
self.visual_alert = visual_alert
self.audible_alert = audible_alert
self.duration = int(duration / DT_CTRL)
self.alert_rate = alert_rate
self.creation_delay = creation_delay
self.alert_type = ""
self.event_type = None
def __str__(self) -> str:
return f"{self.alert_text_1}/{self.alert_text_2} {self.priority} {self.visual_alert} {self.audible_alert}"
def __gt__(self, alert2) -> bool:
return self.priority > alert2.priority
class NoEntryAlert(Alert):
def __init__(self, alert_text_2, visual_alert=VisualAlert.none):
super().__init__("openpilot Unavailable", alert_text_2, AlertStatus.normal,
AlertSize.mid, Priority.LOW, visual_alert,
AudibleAlert.refuse, 3.)
class SoftDisableAlert(Alert):
def __init__(self, alert_text_2):
super().__init__("TAKE CONTROL IMMEDIATELY", alert_text_2,
AlertStatus.userPrompt, AlertSize.full,
Priority.MID, VisualAlert.steerRequired,
AudibleAlert.warningSoft, 2.),
# less harsh version of SoftDisable, where the condition is user-triggered
class UserSoftDisableAlert(SoftDisableAlert):
def __init__(self, alert_text_2):
super().__init__(alert_text_2),
self.alert_text_1 = "openpilot will disengage"
class ImmediateDisableAlert(Alert):
def __init__(self, alert_text_2):
super().__init__("TAKE CONTROL IMMEDIATELY", alert_text_2,
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.steerRequired,
AudibleAlert.warningImmediate, 4.),
class EngagementAlert(Alert):
def __init__(self, audible_alert: car.CarControl.HUDControl.AudibleAlert):
super().__init__("", "",
AlertStatus.normal, AlertSize.none,
Priority.MID, VisualAlert.none,
audible_alert, .2),
class NormalPermanentAlert(Alert):
def __init__(self, alert_text_1: str, alert_text_2: str = "", duration: float = 0.2, priority: Priority = Priority.LOWER, creation_delay: float = 0.):
super().__init__(alert_text_1, alert_text_2,
AlertStatus.normal, AlertSize.mid if len(alert_text_2) else AlertSize.small,
priority, VisualAlert.none, AudibleAlert.none, duration, creation_delay=creation_delay),
class StartupAlert(Alert):
def __init__(self, alert_text_1: str, alert_text_2: str = "Always keep hands on wheel and eyes on road", alert_status=AlertStatus.normal):
super().__init__(alert_text_1, alert_text_2,
alert_status, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 10.),
# ********** helper functions **********
def get_display_speed(speed_ms: float, metric: bool) -> str:
speed = int(round(speed_ms * (CV.MS_TO_KPH if metric else CV.MS_TO_MPH)))
unit = 'km/h' if metric else 'mph'
return f"{speed} {unit}"
# ********** alert callback functions **********
AlertCallbackType = Callable[[car.CarParams, messaging.SubMaster, bool, int], Alert]
def soft_disable_alert(alert_text_2: str) -> AlertCallbackType:
def func(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
if soft_disable_time < int(0.5 / DT_CTRL):
return ImmediateDisableAlert(alert_text_2)
return SoftDisableAlert(alert_text_2)
return func
def user_soft_disable_alert(alert_text_2: str) -> AlertCallbackType:
def func(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
if soft_disable_time < int(0.5 / DT_CTRL):
return ImmediateDisableAlert(alert_text_2)
return UserSoftDisableAlert(alert_text_2)
return func
def below_engage_speed_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return NoEntryAlert(f"Speed Below {get_display_speed(CP.minEnableSpeed, metric)}")
def below_steer_speed_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return Alert(
f"Steer Unavailable Below {get_display_speed(CP.minSteerSpeed, metric)}",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.prompt, 0.4)
def calibration_incomplete_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return Alert(
"Calibration in Progress: %d%%" % sm['liveCalibration'].calPerc,
f"Drive Above {get_display_speed(MIN_SPEED_FILTER, metric)}",
AlertStatus.normal, AlertSize.mid,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .2)
def no_gps_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
gps_integrated = sm['peripheralState'].pandaType in [log.PandaState.PandaType.uno, log.PandaState.PandaType.dos]
return Alert(
"Poor GPS reception",
"If sky is visible, contact support" if gps_integrated else "",
AlertStatus.normal, AlertSize.small,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, .2, creation_delay=300.)
def wrong_car_mode_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
text = "Cruise Mode Disabled"
if CP.carName == "honda":
text = "Main Switch Off"
return NoEntryAlert(text)
def joystick_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
axes = sm['testJoystick'].axes
gb, steer = list(axes)[:2] if len(axes) else (0., 0.)
vals = f"Gas: {round(gb * 100.)}%, Steer: {round(steer * 100.)}%"
return NormalPermanentAlert("Joystick Mode", vals)
EVENTS: Dict[int, Dict[str, Union[Alert, AlertCallbackType]]] = {
# ********** events with no alerts **********
EventName.stockFcw: {},
# ********** events only containing alerts displayed in all states **********
EventName.joystickDebug: {
ET.WARNING: joystick_alert,
ET.PERMANENT: NormalPermanentAlert("Joystick Mode"),
},
EventName.controlsInitializing: {
ET.NO_ENTRY: NoEntryAlert("System Initializing"),
},
EventName.startup: {
ET.PERMANENT: StartupAlert("Be ready to take over at any time")
},
EventName.startupMaster: {
ET.PERMANENT: StartupAlert("Welcome Back, Austin.",
alert_status=AlertStatus.userPrompt),
},
# Car is recognized, but marked as dashcam only
EventName.startupNoControl: {
ET.PERMANENT: StartupAlert("Dashcam mode"),
},
# Car is not recognized
EventName.startupNoCar: {
ET.PERMANENT: StartupAlert("Dashcam mode for unsupported car"),
},
EventName.startupNoFw: {
ET.PERMANENT: StartupAlert("Car Unrecognized",
"Check comma power connections",
alert_status=AlertStatus.userPrompt),
},
EventName.dashcamMode: {
ET.PERMANENT: NormalPermanentAlert("Dashcam Mode",
priority=Priority.LOWEST),
},
EventName.invalidLkasSetting: {
ET.PERMANENT: NormalPermanentAlert("Stock LKAS is on",
"Turn off stock LKAS to engage"),
},
EventName.cruiseMismatch: {
#ET.PERMANENT: ImmediateDisableAlert("openpilot failed to cancel cruise"),
},
# Some features or cars are marked as community features. If openpilot
# detects the use of a community feature it switches to dashcam mode
# until these features are allowed using a toggle in settings.
EventName.communityFeatureDisallowed: {
ET.PERMANENT: NormalPermanentAlert("openpilot Unavailable",
"Enable Community Features in Settings"),
},
# openpilot doesn't recognize the car. This switches openpilot into a
# read-only mode. This can be solved by adding your fingerprint.
# See https://github.com/commaai/openpilot/wiki/Fingerprinting for more information
EventName.carUnrecognized: {
ET.PERMANENT: NormalPermanentAlert("Dashcam Mode",
"Car Unrecognized",
priority=Priority.LOWEST),
},
EventName.stockAeb: {
ET.PERMANENT: Alert(
"BRAKE!",
"Stock AEB: Risk of Collision",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.none, 2.),
ET.NO_ENTRY: NoEntryAlert("Stock AEB: Risk of Collision"),
},
EventName.fcw: {
ET.PERMANENT: Alert(
"BRAKE!",
"Risk of Collision",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.warningSoft, 2.),
},
EventName.ldw: {
ET.PERMANENT: Alert(
"Lane Departure Detected",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.ldw, AudibleAlert.prompt, 3.),
},
# ********** events only containing alerts that display while engaged **********
EventName.gasPressed: {
ET.PRE_ENABLE: Alert(
"Release Gas Pedal to Engage",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .1, creation_delay=1.),
},
# openpilot tries to learn certain parameters about your car by observing
# how the car behaves to steering inputs from both human and openpilot driving.
# This includes:
# - steer ratio: gear ratio of the steering rack. Steering angle divided by tire angle
# - tire stiffness: how much grip your tires have
# - angle offset: most steering angle sensors are offset and measure a non zero angle when driving straight
# This alert is thrown when any of these values exceed a sanity check. This can be caused by
# bad alignment or bad sensor data. If this happens consistently consider creating an issue on GitHub
EventName.vehicleModelInvalid: {
ET.NO_ENTRY: NoEntryAlert("Vehicle Parameter Identification Failed"),
ET.SOFT_DISABLE: soft_disable_alert("Vehicle Parameter Identification Failed"),
},
EventName.steerTempUnavailableSilent: {
ET.WARNING: Alert(
"Autosteer still enabled",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 1.),
},
EventName.preDriverDistracted: {
ET.WARNING: Alert(
"Pay Attention",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1),
},
EventName.promptDriverDistracted: {
ET.WARNING: Alert(
"Pay Attention",
"Driver Distracted",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.promptDistracted, .1),
},
EventName.driverDistracted: {
ET.WARNING: Alert(
"DISENGAGE IMMEDIATELY",
"Driver Distracted",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.warningImmediate, .1),
},
EventName.preDriverUnresponsive: {
ET.WARNING: Alert(
"Touch Steering Wheel: No Face Detected",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.promptDriverUnresponsive: {
ET.WARNING: Alert(
"Touch Steering Wheel",
"Driver Unresponsive",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.promptDistracted, .1),
},
EventName.driverUnresponsive: {
ET.WARNING: Alert(
"DISENGAGE IMMEDIATELY",
"Driver Unresponsive",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.warningImmediate, .1),
},
EventName.manualRestart: {
ET.WARNING: Alert(
"TAKE CONTROL",
"Resume Driving Manually",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .2),
},
EventName.resumeRequired: {
ET.WARNING: Alert(
"STOPPED",
"Press Resume to Go",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .2),
},
EventName.belowSteerSpeed: {
ET.WARNING: below_steer_speed_alert,
},
EventName.preLaneChangeLeft: {
ET.WARNING: Alert(
"Steer Left to Start Lane Change Once Safe",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.preLaneChangeRight: {
ET.WARNING: Alert(
"Steer Right to Start Lane Change Once Safe",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.laneChangeBlocked: {
ET.WARNING: Alert(
"Car Detected in Blindspot",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.prompt, .1),
},
EventName.laneChange: {
ET.WARNING: Alert(
"Changing Lanes",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1),
},
EventName.manualSteeringRequired: {
ET.WARNING: Alert(
"Lane Keeping Assist is OFF",
"Manual Steering Required",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.disengage, 1.),
},
EventName.manualLongitudinalRequired: {
ET.WARNING: Alert(
"Adaptive Cruise Control is OFF",
"Manual Gas/Brakes Required",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 1.),
},
EventName.steerSaturated: {
ET.WARNING: Alert(
"Take Control",
"Turn Exceeds Steering Limit",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, 1.),
},
# Thrown when the fan is driven at >50% but is not rotating
EventName.fanMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Fan Malfunction", "Contact Support"),
},
# Camera is not outputting frames at a constant framerate
EventName.cameraMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Camera Malfunction", "Contact Support"),
},
# Unused
EventName.gpsMalfunction: {
ET.PERMANENT: NormalPermanentAlert("GPS Malfunction", "Contact Support"),
},
# When the GPS position and localizer diverge the localizer is reset to the
# current GPS position. This alert is thrown when the localizer is reset
# more often than expected.
EventName.localizerMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Sensor Malfunction", "Contact Support"),
},
# ********** events that affect controls state transitions **********
EventName.pcmEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.engage),
},
EventName.buttonEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.engage),
},
EventName.silentButtonEnable: {
ET.ENABLE: Alert(
"",
"",
AlertStatus.normal, AlertSize.none,
Priority.MID, VisualAlert.none, AudibleAlert.none, .2, 0., 0.),
},
EventName.pcmDisable: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
},
EventName.buttonCancel: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
},
EventName.brakeHold: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Brake Hold Active"),
},
EventName.silentBrakeHold: {
ET.USER_DISABLE: Alert(
"",
"",
AlertStatus.normal, AlertSize.none,
Priority.MID, VisualAlert.none, AudibleAlert.none, .2, 0., 0.),
ET.NO_ENTRY: NoEntryAlert("Brake Hold Active"),
},
EventName.parkBrake: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Parking Brake Engaged"),
},
EventName.pedalPressed: {
ET.WARNING: Alert(
"",
"",
AlertStatus.normal, AlertSize.none,
Priority.MID, VisualAlert.none, AudibleAlert.none, .2, 0., 0.),
},
EventName.silentPedalPressed: {
ET.WARNING: Alert(
"",
"",
AlertStatus.normal, AlertSize.none,
Priority.MID, VisualAlert.none, AudibleAlert.none, .2, 0., 0.),
},
EventName.wrongCarMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: wrong_car_mode_alert,
},
EventName.wrongCruiseMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Adaptive Cruise Disabled"),
},
EventName.steerTempUnavailable: {
ET.WARNING: Alert(
"Autosteer still enabled",
"Large steer override by driver.",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 1.),
},
EventName.outOfSpace: {
ET.PERMANENT: NormalPermanentAlert("Out of Storage"),
ET.NO_ENTRY: NoEntryAlert("Out of Storage"),
},
EventName.belowEngageSpeed: {
ET.NO_ENTRY: below_engage_speed_alert,
},
EventName.sensorDataInvalid: {
ET.PERMANENT: Alert(
"No Data from Device Sensors",
"Reboot your Device",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, .2, creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("No Data from Device Sensors"),
},
EventName.noGps: {
ET.PERMANENT: no_gps_alert,
},
EventName.soundsUnavailable: {
ET.PERMANENT: NormalPermanentAlert("Speaker not found", "Reboot your Device"),
ET.NO_ENTRY: NoEntryAlert("Speaker not found"),
},
EventName.tooDistracted: {
ET.NO_ENTRY: NoEntryAlert("Distraction Level Too High"),
},
EventName.overheat: {
ET.SOFT_DISABLE: Alert(
"System Overheated",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 0., 0., .2),
ET.SOFT_DISABLE: SoftDisableAlert("System Overheated"),
ET.NO_ENTRY: NoEntryAlert("System Overheated"),
},
EventName.wrongGear: {
ET.SOFT_DISABLE: user_soft_disable_alert("Gear not D"),
ET.NO_ENTRY: NoEntryAlert("Gear not D"),
},
EventName.silentWrongGear: {
ET.SOFT_DISABLE: Alert(
"Gear not D",
"openpilot Unavailable",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 2., 3.),
ET.NO_ENTRY: Alert(
"Gear not D",
"openpilot Unavailable",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 2., 3.),
},
# This alert is thrown when the calibration angles are outside of the acceptable range.
# For example if the device is pointed too much to the left or the right.
# Usually this can only be solved by removing the mount from the windshield completely,
# and attaching while making sure the device is pointed straight forward and is level.
# See https://comma.ai/setup for more information
EventName.calibrationInvalid: {
ET.PERMANENT: NormalPermanentAlert("Calibration Invalid", "Remount Device and Recalibrate"),
ET.SOFT_DISABLE: soft_disable_alert("Calibration Invalid: Remount Device & Recalibrate"),
ET.NO_ENTRY: NoEntryAlert("Calibration Invalid: Remount Device & Recalibrate"),
},
EventName.calibrationIncomplete: {
ET.PERMANENT: calibration_incomplete_alert,
ET.SOFT_DISABLE: soft_disable_alert("Calibration in Progress"),
ET.NO_ENTRY: NoEntryAlert("Calibration in Progress"),
},
EventName.doorOpen: {
ET.SOFT_DISABLE: user_soft_disable_alert("Door Open"),
ET.NO_ENTRY: NoEntryAlert("Door Open"),
},
EventName.seatbeltNotLatched: {
ET.WARNING: Alert(
"",
"",
AlertStatus.normal, AlertSize.none,
Priority.MID, VisualAlert.none, AudibleAlert.none, .2, 0., 0.),
},
EventName.espDisabled: {
ET.SOFT_DISABLE: soft_disable_alert("ESP Off"),
ET.NO_ENTRY: NoEntryAlert("ESP Off"),
},
EventName.lowBattery: {
ET.SOFT_DISABLE: soft_disable_alert("Low Battery"),
ET.NO_ENTRY: NoEntryAlert("Low Battery"),
},
# Different openpilot services communicate between each other at a certain
# interval. If communication does not follow the regular schedule this alert
# is thrown. This can mean a service crashed, did not broadcast a message for
# ten times the regular interval, or the average interval is more than 10% too high.
EventName.commIssue: {
ET.SOFT_DISABLE: soft_disable_alert("Communication Issue between Processes"),
ET.NO_ENTRY: NoEntryAlert("Communication Issue between Processes"),
},
# Thrown when manager detects a service exited unexpectedly while driving
EventName.processNotRunning: {
ET.NO_ENTRY: NoEntryAlert("System Malfunction: Reboot Your Device"),
},
EventName.radarFault: {
ET.SOFT_DISABLE: soft_disable_alert("Radar Error: Restart the Car"),
ET.NO_ENTRY: NoEntryAlert("Radar Error: Restart the Car"),
},
# Every frame from the camera should be processed by the model. If modeld
# is not processing frames fast enough they have to be dropped. This alert is
# thrown when over 20% of frames are dropped.
EventName.modeldLagging: {
ET.SOFT_DISABLE: soft_disable_alert("Driving model lagging"),
ET.NO_ENTRY: NoEntryAlert("Driving model lagging"),
},
# Besides predicting the path, lane lines and lead car data the model also
# predicts the current velocity and rotation speed of the car. If the model is
# very uncertain about the current velocity while the car is moving, this
# usually means the model has trouble understanding the scene. This is used
# as a heuristic to warn the driver.
EventName.posenetInvalid: {
ET.SOFT_DISABLE: soft_disable_alert("Model Output Uncertain"),
ET.NO_ENTRY: NoEntryAlert("Model Output Uncertain"),
},
# When the localizer detects an acceleration of more than 40 m/s^2 (~4G) we
# alert the driver the device might have fallen from the windshield.
EventName.deviceFalling: {
ET.SOFT_DISABLE: soft_disable_alert("Device Fell Off Mount"),
ET.NO_ENTRY: NoEntryAlert("Device Fell Off Mount"),
},
EventName.lowMemory: {
ET.SOFT_DISABLE: soft_disable_alert("Low Memory: Reboot Your Device"),
ET.PERMANENT: NormalPermanentAlert("Low Memory", "Reboot your Device"),
ET.NO_ENTRY: NoEntryAlert("Low Memory: Reboot Your Device"),
},
EventName.highCpuUsage: {
#ET.SOFT_DISABLE: soft_disable_alert("System Malfunction: Reboot Your Device"),
#ET.PERMANENT: NormalPermanentAlert("System Malfunction", "Reboot your Device"),
ET.NO_ENTRY: NoEntryAlert("System Malfunction: Reboot Your Device"),
},
EventName.accFaulted: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Faulted"),
ET.PERMANENT: NormalPermanentAlert("Cruise Faulted", ""),
ET.NO_ENTRY: NoEntryAlert("Cruise Faulted"),
},
EventName.controlsMismatch: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Controls Mismatch"),
},
EventName.roadCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera Error",
duration=1.,
creation_delay=30.),
},
EventName.driverCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera Error",
duration=1.,
creation_delay=30.),
},
EventName.wideRoadCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera Error",
duration=1.,
creation_delay=30.),
},
# Sometimes the USB stack on the device can get into a bad state
# causing the connection to the panda to be lost
EventName.usbError: {
ET.SOFT_DISABLE: soft_disable_alert("USB Error: Reboot Your Device"),
ET.PERMANENT: NormalPermanentAlert("USB Error: Reboot Your Device", ""),
ET.NO_ENTRY: NoEntryAlert("USB Error: Reboot Your Device"),
},
# This alert can be thrown for the following reasons:
# - No CAN data received at all
# - CAN data is received, but some message are not received at the right frequency
# If you're not writing a new car port, this is usually cause by faulty wiring
EventName.canError: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("CAN Error: Check Connections"),
ET.PERMANENT: Alert(
"CAN Error: Check Connections",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 1., creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("CAN Error: Check Connections"),
},
EventName.steerUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("LKAS Fault: Restart the Car"),
ET.PERMANENT: NormalPermanentAlert("LKAS Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("LKAS Fault: Restart the Car"),
},
EventName.brakeUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Fault: Restart the Car"),
ET.PERMANENT: NormalPermanentAlert("Cruise Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("Cruise Fault: Restart the Car"),
},
EventName.reverseGear: {
ET.PERMANENT: Alert(
"Reverse\nGear",
"",
AlertStatus.normal, AlertSize.full,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .2, creation_delay=0.5),
ET.USER_DISABLE: EngagementAlert(AudibleAlert.none),
ET.NO_ENTRY: NoEntryAlert("Reverse Gear"),
},
# On cars that use stock ACC the car can decide to cancel ACC for various reasons.
# When this happens we can no long control the car so the user needs to be warned immediately.
EventName.cruiseDisabled: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Is Off"),
},
# For planning the trajectory Model Predictive Control (MPC) is used. This is
# an optimization algorithm that is not guaranteed to find a feasible solution.
# If no solution is found or the solution has a very high cost this alert is thrown.
EventName.plannerError: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Planner Solution Error"),
ET.NO_ENTRY: NoEntryAlert("Planner Solution Error"),
},
# When the relay in the harness box opens the CAN bus between the LKAS camera
# and the rest of the car is separated. When messages from the LKAS camera
# are received on the car side this usually means the relay hasn't opened correctly
# and this alert is thrown.
EventName.relayMalfunction: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Harness Malfunction"),
ET.PERMANENT: NormalPermanentAlert("Harness Malfunction", "Check Hardware"),
ET.NO_ENTRY: NoEntryAlert("Harness Malfunction"),
},
EventName.noTarget: {
ET.IMMEDIATE_DISABLE: Alert(
"openpilot Canceled",
"No close lead car",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.disengage, 3.),
ET.NO_ENTRY: NoEntryAlert("No Close Lead Car"),
},
EventName.speedTooLow: {
ET.IMMEDIATE_DISABLE: Alert(
"openpilot Canceled",
"Speed too low",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.disengage, 3.),
},
# When the car is driving faster than most cars in the training data, the model outputs can be unpredictable.
EventName.speedTooHigh: {
ET.WARNING: Alert(
"Speed Too High",
"Model uncertain at this speed",
AlertStatus.userPrompt, AlertSize.mid,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.promptRepeat, 4.),
ET.NO_ENTRY: NoEntryAlert("Slow down to engage"),
},
EventName.lowSpeedLockout: {
ET.PERMANENT: NormalPermanentAlert("Cruise Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("Cruise Fault: Restart the Car"),
},
}
| 34.237251 | 152 | 0.687747 |
4a1ae785fe38d3c0a21b0293768ac5b7ef28ff55
| 5,710 |
py
|
Python
|
tests/backend/test_email_queue.py
|
plastr/extrasolar-game
|
1aad5971556d498e3617afe75f27e2f4132d4668
|
[
"MIT",
"Unlicense"
] | null | null | null |
tests/backend/test_email_queue.py
|
plastr/extrasolar-game
|
1aad5971556d498e3617afe75f27e2f4132d4668
|
[
"MIT",
"Unlicense"
] | null | null | null |
tests/backend/test_email_queue.py
|
plastr/extrasolar-game
|
1aad5971556d498e3617afe75f27e2f4132d4668
|
[
"MIT",
"Unlicense"
] | null | null | null |
# Copyright (c) 2010-2011 Lazy 8 Studios, LLC.
# All rights reserved.
from front.backend import email_queue
from front.lib import db, email_module
from front.tests import base
class TestEmailQueue(base.TestCase):
def test_enqueue_email_message(self):
test_email = email_module.EmailMessage('fromuser@example.com', 'touser@example.com', 'Test Subject', 'Test Body pa\xd9\xad')
with db.commit_or_rollback(self.get_ctx()) as ctx:
with db.conn(ctx) as ctx:
# Add an email to the queue.
email_queue.enqueue_email_message(ctx, test_email)
self.assertEqual(len(self.get_sent_emails()), 0)
# And then process the queue.
processed = email_queue.process_email_queue(ctx)
self.assertEqual(processed, 1)
self.assertEqual(len(self.get_sent_emails()), 1)
self.assertEqual(self.get_sent_emails()[0].email_from, test_email.email_from)
self.assertEqual(self.get_sent_emails()[0].email_to, test_email.email_to)
self.assertEqual(self.get_sent_emails()[0].subject, test_email.subject)
self.assertEqual(self.get_sent_emails()[0].body_html, test_email.body_html)
self.clear_sent_emails()
# Should be no more work to do on the queue.
processed = email_queue.process_email_queue(ctx)
self.assertEqual(processed, 0)
self.assertEqual(len(self.get_sent_emails()), 0)
def test_email_module_queue_mode(self):
self.create_user('testuser@example.com', 'pw', first_name="EmailUserFirst", last_name="EmailUserLast")
# Put the email_module into queue dispatch mode.
email_module.set_queue_dispatcher()
# Send an email 'now' which will put it on the queue.
self.assertEqual(len(self.get_sent_emails()), 0)
with db.commit_or_rollback(self.get_ctx()) as ctx:
with db.conn(ctx) as ctx:
user = self.get_logged_in_user(ctx=ctx)
email_module.send_now(ctx, user, 'EMAIL_TEST')
self.assertEqual(len(self.get_sent_emails()), 0)
# Now process the queue which should send the email.
with db.commit_or_rollback(self.get_ctx()) as ctx:
with db.conn(ctx) as ctx:
processed = email_queue.process_email_queue(ctx)
self.assertEqual(processed, 1)
self.assertEqual(len(self.get_sent_emails()), 1)
self.assertEqual(self.get_sent_emails()[0].email_from, '"Test Sender" <test@example.com>')
self.assertEqual(self.get_sent_emails()[0].email_to, 'testuser@example.com')
self.assertTrue("Test message" in self.get_sent_emails()[0].subject)
self.assertTrue("Hello EmailUserFirst" in self.get_sent_emails()[0].body_html)
self.clear_sent_emails()
# Should be no more work to do on the queue.
processed = email_queue.process_email_queue(ctx)
self.assertEqual(processed, 0)
self.assertEqual(len(self.get_sent_emails()), 0)
# Sending an alarm however should bypass the queue.
self.clear_sent_emails()
self.assertEqual(len(self.get_sent_emails()), 0)
with db.commit_or_rollback(self.get_ctx()) as ctx:
with db.conn(ctx) as ctx:
email_module.send_alarm("toalarm@example.com", 'EMAIL_TEST_ALARM')
self.assertEqual(len(self.get_sent_emails()), 1)
self.assertEqual(self.get_sent_emails()[0].email_to, 'toalarm@example.com')
self.assertTrue("Test message for an alarm" in self.get_sent_emails()[0].subject)
# An alarm email should not end up in the queue,
with db.commit_or_rollback(self.get_ctx()) as ctx:
with db.conn(ctx) as ctx:
processed = email_queue.process_email_queue(ctx)
self.assertEqual(processed, 0)
def test_delivery_fail(self):
self.create_user('testuser@example.com', 'pw')
# Put the email_module into queue dispatch mode.
email_module.set_queue_dispatcher()
# Signal to the unit test base class to raise an exception when sending an email via email_ses.
self._fail_email_delivery = True
# Send an email 'now' which will put it on the queue.
self.assertEqual(len(self.get_sent_emails()), 0)
with db.commit_or_rollback(self.get_ctx()) as ctx:
with db.conn(ctx) as ctx:
user = self.get_logged_in_user(ctx=ctx)
email_module.send_now(ctx, user, 'EMAIL_TEST')
# Attempt to process the queue, which would log an exception and rollback.
self.expect_log('front.backend.email_queue', '.*Sending queued email failed.*')
with db.commit_or_rollback(self.get_ctx()) as ctx:
with db.conn(ctx) as ctx:
processed = email_queue.process_email_queue(ctx)
# No email should have been processed because an exception should have occurred and rolled back the
# transaction.
self.assertEqual(processed, 0)
self.assertEqual(len(self.get_sent_emails()), 0)
# And now remove the exception raising.
self._fail_email_delivery = False
with db.commit_or_rollback(self.get_ctx()) as ctx:
with db.conn(ctx) as ctx:
processed = email_queue.process_email_queue(ctx)
# Should now be able ot process the email on the queue.
self.assertEqual(processed, 1)
self.assertEqual(len(self.get_sent_emails()), 1)
| 51.441441 | 132 | 0.638354 |
4a1ae7f1eb994ff67444bcdc892f19055d3ca452
| 887 |
py
|
Python
|
community_app/community_app/users/tests/test_drf_views.py
|
otivedani/monorepo_community_web
|
a0a719500c5d2cf8426c8502a181d7ceb50830fb
|
[
"MIT"
] | null | null | null |
community_app/community_app/users/tests/test_drf_views.py
|
otivedani/monorepo_community_web
|
a0a719500c5d2cf8426c8502a181d7ceb50830fb
|
[
"MIT"
] | 2 |
2021-02-04T14:34:11.000Z
|
2021-02-04T14:36:31.000Z
|
community_app/community_app/users/tests/test_drf_views.py
|
otivedani/monorepo_community_web
|
a0a719500c5d2cf8426c8502a181d7ceb50830fb
|
[
"MIT"
] | 2 |
2021-02-04T14:37:37.000Z
|
2021-02-06T04:39:00.000Z
|
import pytest
from django.test import RequestFactory
from community_app.users.api.views import UserViewSet
from community_app.users.models import User
pytestmark = pytest.mark.django_db
class TestUserViewSet:
def test_get_queryset(self, user: User, rf: RequestFactory):
view = UserViewSet()
request = rf.get("/fake-url/")
request.user = user
view.request = request
assert user in view.get_queryset()
def test_me(self, user: User, rf: RequestFactory):
view = UserViewSet()
request = rf.get("/fake-url/")
request.user = user
view.request = request
response = view.me(request)
assert response.data == {
"username": user.username,
"email": user.email,
"name": user.name,
"url": f"http://testserver/api/users/{user.username}/",
}
| 25.342857 | 67 | 0.621195 |
4a1ae80200996e87020ba900969dbdcbc8340b46
| 2,404 |
py
|
Python
|
python/torch_mlir/compiler_utils.py
|
ramiro050/torch-mlir
|
a34dad2e077592deb497a9077fc3188b6e1154d5
|
[
"Apache-2.0"
] | 2 |
2022-02-16T21:56:00.000Z
|
2022-02-20T17:34:47.000Z
|
python/torch_mlir/compiler_utils.py
|
makslevental/torch-mlir
|
5cff40c88a1468752a23a7c9971cfa6a5c045351
|
[
"Apache-2.0"
] | null | null | null |
python/torch_mlir/compiler_utils.py
|
makslevental/torch-mlir
|
5cff40c88a1468752a23a7c9971cfa6a5c045351
|
[
"Apache-2.0"
] | null | null | null |
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# Also available under a BSD-style license. See LICENSE.
from io import StringIO
import os
import sys
import tempfile
from torch_mlir.passmanager import PassManager
from torch_mlir.ir import StringAttr
def get_module_name_for_debug_dump(module):
"""Gets a name suitable for a debug dump.
The name is not guaranteed to be unique.
"""
if not "torch.debug_module_name" in module.operation.attributes:
return "UnnammedModule"
return StringAttr(module.operation.attributes["torch.debug_module_name"]).value
def run_pipeline_with_repro_report(module,
pipeline: str,
description: str):
"""Runs `pipeline` on `module`, with a nice repro report if it fails."""
module_name = get_module_name_for_debug_dump(module)
try:
original_stderr = sys.stderr
sys.stderr = StringIO()
asm_for_error_report = module.operation.get_asm(
large_elements_limit=10, enable_debug_info=True)
# Lower module in place to make it ready for compiler backends.
with module.context:
pm = PassManager.parse(pipeline)
pm.run(module)
except Exception as e:
# TODO: More robust.
# - don't arbitrarily clutter up /tmp. When a test suite has many
# tests, this can be a big disk cost (also, /tmp/ is frequently a
# RAM fs, which increases worries about capacity).
# - don't have colliding filenames (hard to do without cluttering
# up /tmp)
# - if we do have have colliding filenames, writes should at least
# avoid being racy.
filename = os.path.join(tempfile.gettempdir(), module_name + ".mlir")
with open(filename, 'w') as f:
f.write(asm_for_error_report)
debug_options="-mlir-print-ir-after-all -mlir-disable-threading"
raise Exception(f"""
{description} failed with the following diagnostics:
{sys.stderr.getvalue()}
Error can be reproduced with:
$ torch-mlir-opt -pass-pipeline='{pipeline}' {filename}
Add '{debug_options}' to get the IR dump for debugging purpose.
""") from None
finally:
sys.stderr = original_stderr
| 40.745763 | 83 | 0.674709 |
4a1aea19fd57e1bef2a59121de2ac85014f131e4
| 5,850 |
py
|
Python
|
process_idear.py
|
Multiscale-Genomics/mg-process-fastq
|
50c7115c0c1a6af48dc34f275e469d1b9eb02999
|
[
"Apache-2.0"
] | 2 |
2017-07-31T11:45:46.000Z
|
2017-08-09T09:32:35.000Z
|
process_idear.py
|
Multiscale-Genomics/mg-process-fastq
|
50c7115c0c1a6af48dc34f275e469d1b9eb02999
|
[
"Apache-2.0"
] | 28 |
2016-11-17T11:12:32.000Z
|
2018-11-02T14:09:13.000Z
|
process_idear.py
|
Multiscale-Genomics/mg-process-fastq
|
50c7115c0c1a6af48dc34f275e469d1b9eb02999
|
[
"Apache-2.0"
] | 4 |
2017-02-12T17:47:21.000Z
|
2018-05-29T08:16:27.000Z
|
#!/usr/bin/env python
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import argparse
from basic_modules.workflow import Workflow
from utils import logger
from tool.idear import idearTool
# ------------------------------------------------------------------------------
class process_idear(Workflow):
"""
Functions for processing Chip-Seq FastQ files. Files are the aligned,
filtered and analysed for peak calling
"""
def __init__(self, configuration=None):
"""
Initialise the class
Parameters
----------
configuration : dict
a dictionary containing parameters that define how the operation
should be carried out, which are specific to each Tool.
"""
logger.info("Processing DamID-Seq")
if configuration is None:
configuration = {}
self.configuration.update(configuration)
def run(self, input_files, metadata, output_files):
"""
Main run function for processing DamID-seq FastQ data. Pipeline aligns
the FASTQ files to the genome using BWA. iDEAR is then used for peak
calling to identify transcription factor binding sites within the
genome.
Currently this can only handle a single data file and a single
background file.
Parameters
----------
input_files : dict
Location of the initial input files required by the workflow
bsgenome : str
BSgenome index file
bam_1 : str
Location of the FASTQ aligned reads files
bam_2 : str
Location of the FASTQ repeat aligned reads files
bg_bam_1 : str
Location of the background FASTQ aligned reads files
bg_bam_2 : str
Location of the background FASTQ repeat aligned reads files
metadata : dict
Input file meta data associated with their roles
bsgenome : str
bam_1 : str
bam_2 : str
bg_bam_1 : str
bg_bam_2 : str
output_files : dict
Output file locations
bigwig : str
Returns
-------
output_files : dict
Output file locations associated with their roles, for the output
bigwig : str
Location of the bigwig peaks
output_metadata : dict
Output metadata for the associated files in output_files
bigwig : Metadata
"""
output_files_generated = {}
output_metadata = {}
# Add in BSgenome section
logger.info("PROCESS DAMIDSEQ - DEFINED OUTPUT:", output_files)
# iDEAR to call peaks
idear_caller = idearTool(self.configuration)
logger.progress("iDEAR Peak Caller", status="RUNNING")
idear_caller.run(
{
"bam": input_files["bam"],
"bg_bam": input_files["bg_bam"],
"bsgenome": input_files["bsgenome"]
}, {
"bam": metadata["bam"],
"bg_bam": metadata["bg_bam"],
"bsgenome": metadata["bsgenome"]
}, {
"bigwig": output_files["bigwig"],
}
)
logger.progress("iDEAR Peak Caller", status="DONE")
print("DAMID-SEQ RESULTS:", output_metadata)
return output_files_generated, output_metadata
# ------------------------------------------------------------------------------
def main_json(config, in_metadata, out_metadata):
"""
Alternative main function
-------------
This function launches the app using configuration written in
two json files: config.json and input_metadata.json.
"""
# 1. Instantiate and launch the App
print("1. Instantiate and launch the App")
from apps.jsonapp import JSONApp
app = JSONApp()
result = app.launch(process_idear,
config,
in_metadata,
out_metadata)
# 2. The App has finished
print("2. Execution finished; see " + out_metadata)
print(result)
return result
# ------------------------------------------------------------------------------
if __name__ == "__main__":
# Set up the command line parameters
PARSER = argparse.ArgumentParser(
description="iDEAR iDamID-seq peak calling")
PARSER.add_argument("--config", help="Configuration file")
PARSER.add_argument(
"--in_metadata", help="Location of input metadata file")
PARSER.add_argument(
"--out_metadata", help="Location of output metadata file")
PARSER.add_argument(
"--local", action="store_const", const=True, default=False)
# Get the matching parameters from the command line
ARGS = PARSER.parse_args()
CONFIG = ARGS.config
IN_METADATA = ARGS.in_metadata
OUT_METADATA = ARGS.out_metadata
LOCAL = ARGS.local
if LOCAL:
import sys
sys._run_from_cmdl = True # pylint: disable=protected-access
RESULTS = main_json(CONFIG, IN_METADATA, OUT_METADATA)
print(RESULTS)
| 29.396985 | 80 | 0.59265 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.