author
int64
658
755k
date
stringlengths
19
19
timezone
int64
-46,800
43.2k
hash
stringlengths
40
40
message
stringlengths
5
490
mods
list
language
stringclasses
20 values
license
stringclasses
3 values
repo
stringlengths
5
68
original_message
stringlengths
12
491
499,331
21.05.2021 19:14:41
-28,800
8c7126bfa059713f2a44d2fdac7a1081c8fd61e4
fix doc, set min_subgraph_size of s2anet
[ { "change_type": "MODIFY", "old_path": "ppdet/engine/export_utils.py", "new_path": "ppdet/engine/export_utils.py", "diff": "@@ -31,7 +31,7 @@ TRT_MIN_SUBGRAPH = {\n'SSD': 60,\n'RCNN': 40,\n'RetinaNet': 40,\n- 'S2ANet': 40,\n+ 'S2ANet': 80,\n'EfficientDet': 40,\n'Face': 3,\n'TTFNet': 60,\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix doc, set min_subgraph_size of s2anet (#3112)
499,298
23.05.2021 00:38:51
-28,800
7a732663bb8e982013a0740f041ef38256e0c447
[MOT] Fix video fairmot reader
[ { "change_type": "MODIFY", "old_path": "configs/mot/fairmot/_base_/fairmot_reader_1088x608.yml", "new_path": "configs/mot/fairmot/_base_/fairmot_reader_1088x608.yml", "diff": "@@ -34,7 +34,6 @@ TestMOTReader:\ninputs_def:\nimage_shape: [3, 608, 1088]\nsample_transforms:\n- - Decode: {to_rgb: False}\n- LetterBoxResize: {target_size: [608, 1088]}\n- NormalizeImage: {mean: [0, 0, 0], std: [1, 1, 1]}\n- Permute: {to_rgb: True}\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[MOT] Fix video fairmot reader (#3130)
499,333
24.05.2021 10:19:31
-28,800
d6f36b14577daa4e6dc730de737eaf5d129410f9
support BatchRandomResize for 2-D input
[ { "change_type": "MODIFY", "old_path": "ppdet/data/transform/batch_operators.py", "new_path": "ppdet/data/transform/batch_operators.py", "diff": "@@ -136,7 +136,8 @@ class BatchRandomResize(BaseOperator):\ndef __call__(self, samples, context=None):\nif self.random_size:\n- target_size = np.random.choice(self.target_size)\n+ index = np.random.choice(len(self.target_size))\n+ target_size = self.target_size[index]\nelse:\ntarget_size = self.target_size\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
support BatchRandomResize for 2-D input (#3132)
499,304
24.05.2021 16:03:35
-28,800
a1acbce3f9f1b52885aace6306b35b6027a455d9
fix readme and fcos docs
[ { "change_type": "MODIFY", "old_path": "README_en.md", "new_path": "README_en.md", "diff": "@@ -18,6 +18,7 @@ After a long time of industry practice polishing, PaddleDetection has had smooth\n<div align=\"center\">\n<img src=\"static/docs/images/football.gif\" width='800'/>\n+ <img src=\"docs/images/mot_pose_demo_640x360.gif\" width='800'/>\n</div>\n### Product news\n@@ -247,6 +248,13 @@ The relationship between COCO mAP and FPS on Tesla V100 of representative models\n- [SOLOv2](configs/solov2/README.md)\n- Rotation object detection\n- [S2ANet](configs/dota/README.md)\n+- [Keypoint detection](configs/keypoint)\n+ - HigherHRNet\n+ - HRNeet\n+- [Multi-Object Tracking](configs/mot/README.md)\n+ - [DeepSORT](configs/mot/deepsort/README.md)\n+ - [JDE](configs/mot/jde/README.md)\n+ - [FairMOT](configs/mot/fairmot/README.md)\n- Vertical field\n- [Face detection](configs/face_detection/README.md)\n- [Pedestrian detection](configs/pedestrian/README.md)\n" }, { "change_type": "MODIFY", "old_path": "configs/fcos/README.md", "new_path": "configs/fcos/README.md", "diff": "@@ -15,6 +15,7 @@ FCOS (Fully Convolutional One-Stage Object Detection) is a fast anchor-free obje\n| ResNet50-FPN | FCOS | 2 | 1x | ---- | 39.6 | [download](https://paddledet.bj.bcebos.com/models/fcos_r50_fpn_1x_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/fcos/fcos_r50_fpn_1x_coco.yml) |\n| ResNet50-FPN | FCOS+DCN | 2 | 1x | ---- | 44.3 | [download](https://paddledet.bj.bcebos.com/models/fcos_dcn_r50_fpn_1x_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/fcos/fcos_dcn_r50_fpn_1x_coco.yml) |\n| ResNet50-FPN | FCOS+multiscale_train | 2 | 2x | ---- | 41.8 | [download](https://paddledet.bj.bcebos.com/models/fcos_r50_fpn_multiscale_2x_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/fcos/fcos_r50_fpn_multiscale_2x_coco.yml) |\n+\n**Notes:**\n- FCOS is trained on COCO train2017 dataset and evaluated on val2017 results of `mAP(IoU=0.5:0.95)`.\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix readme and fcos docs (#3144)
499,331
25.05.2021 21:07:26
-28,800
5f59a4b506d86a1a595ceb25636673fea8a20c9f
fix bug of predict video
[ { "change_type": "MODIFY", "old_path": "deploy/python/infer.py", "new_path": "deploy/python/infer.py", "diff": "@@ -531,6 +531,8 @@ def predict_video(detector, camera_id):\ncapture = cv2.VideoCapture(FLAGS.video_file)\nvideo_name = os.path.split(FLAGS.video_file)[-1]\nfps = 30\n+ frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n+ print('frame_count', frame_count)\nwidth = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))\nheight = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n# yapf: disable\n@@ -547,7 +549,7 @@ def predict_video(detector, camera_id):\nbreak\nprint('detect frame:%d' % (index))\nindex += 1\n- results = detector.predict(frame, FLAGS.threshold)\n+ results = detector.predict([frame], FLAGS.threshold)\nim = visualize_box_mask(\nframe,\nresults,\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix bug of predict video (#3158)
499,304
28.05.2021 12:19:28
-28,800
fa6c5a11820e550ce31419d83960b8fcd18fc748
fix num_classes in solov2
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/solov2_head.py", "new_path": "ppdet/modeling/heads/solov2_head.py", "diff": "@@ -462,7 +462,8 @@ class SOLOv2Head(nn.Layer):\n# cate_labels & kernel_preds\ncate_labels = inds[:, 1]\nkernel_preds = paddle.gather(kernel_preds, index=inds[:, 0])\n- cate_score_idx = paddle.add(inds[:, 0] * 80, cate_labels)\n+ cate_score_idx = paddle.add(inds[:, 0] * self.cate_out_channels,\n+ cate_labels)\ncate_scores = paddle.gather(cate_preds, index=cate_score_idx)\nsize_trans = np.power(self.seg_num_grids, 2)\n" }, { "change_type": "MODIFY", "old_path": "static/ppdet/modeling/anchor_heads/solov2_head.py", "new_path": "static/ppdet/modeling/anchor_heads/solov2_head.py", "diff": "@@ -367,8 +367,8 @@ class SOLOv2Head(object):\n# cate_labels & kernel_preds\ncate_labels = inds[:, 1]\nkernel_preds = fluid.layers.gather(kernel_preds, index=inds[:, 0])\n- cate_score_idx = fluid.layers.elementwise_add(inds[:, 0] * 80,\n- cate_labels)\n+ cate_score_idx = fluid.layers.elementwise_add(\n+ inds[:, 0] * self.cate_out_channels, cate_labels)\ncate_scores = fluid.layers.gather(cate_preds, index=cate_score_idx)\nsize_trans = np.power(self.seg_num_grids, 2)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix num_classes in solov2 (#3193)
499,333
28.05.2021 19:25:37
-28,800
6b74c7622cdc6d13c40c657affd9346a221e33c2
fix solov2 deploy
[ { "change_type": "MODIFY", "old_path": "deploy/python/infer.py", "new_path": "deploy/python/infer.py", "diff": "@@ -242,17 +242,20 @@ class DetectorSOLOv2(Detector):\nfor i in range(warmup):\nself.predictor.run()\noutput_names = self.predictor.get_output_names()\n+ np_boxes_num = self.predictor.get_output_handle(output_names[\n+ 0]).copy_to_cpu()\nnp_label = self.predictor.get_output_handle(output_names[\n1]).copy_to_cpu()\nnp_score = self.predictor.get_output_handle(output_names[\n2]).copy_to_cpu()\nnp_segms = self.predictor.get_output_handle(output_names[\n3]).copy_to_cpu()\n-\nself.det_times.inference_time_s.start()\nfor i in range(repeats):\nself.predictor.run()\noutput_names = self.predictor.get_output_names()\n+ np_boxes_num = self.predictor.get_output_handle(output_names[\n+ 0]).copy_to_cpu()\nnp_label = self.predictor.get_output_handle(output_names[\n1]).copy_to_cpu()\nnp_score = self.predictor.get_output_handle(output_names[\n@@ -262,7 +265,11 @@ class DetectorSOLOv2(Detector):\nself.det_times.inference_time_s.end(repeats=repeats)\nself.det_times.img_num += 1\n- return dict(segm=np_segms, label=np_label, score=np_score)\n+ return dict(\n+ segm=np_segms,\n+ label=np_label,\n+ score=np_score,\n+ boxes_num=np_boxes_num)\ndef create_inputs(imgs, im_info):\n@@ -481,6 +488,13 @@ def visualize(image_list, results, labels, output_dir='output/', threshold=0.5):\nif 'segm' in results:\nim_results['segm'] = results['segm'][start_idx:start_idx +\nim_bboxes_num, :]\n+ if 'label' in results:\n+ im_results['label'] = results['label'][start_idx:start_idx +\n+ im_bboxes_num]\n+ if 'score' in results:\n+ im_results['score'] = results['score'][start_idx:start_idx +\n+ im_bboxes_num]\n+\nstart_idx += im_bboxes_num\nim = visualize_box_mask(\nimage_file, im_results, labels, threshold=threshold)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix solov2 deploy (#3200)
499,298
29.05.2021 15:41:24
-28,800
d585c310ea6f7328bf24c0f2500c3e5d587585f5
fix decode permute rgb
[ { "change_type": "MODIFY", "old_path": "configs/mot/fairmot/_base_/fairmot_reader_1088x608.yml", "new_path": "configs/mot/fairmot/_base_/fairmot_reader_1088x608.yml", "diff": "@@ -3,15 +3,17 @@ TrainReader:\ninputs_def:\nimage_shape: [3, 608, 1088]\nsample_transforms:\n- - Decode: {to_rgb: False}\n- - AugmentHSV: {is_bgr: True}\n+ - Decode: {}\n+ - RGBReverse: {}\n+ - AugmentHSV: {}\n- LetterBoxResize: {target_size: [608, 1088]}\n- MOTRandomAffine: {reject_outside: False}\n- RandomFlip: {}\n- BboxXYXY2XYWH: {}\n- NormalizeBox: {}\n- NormalizeImage: {mean: [0, 0, 0], std: [1, 1, 1]}\n- - Permute: {to_rgb: True}\n+ - RGBReverse: {}\n+ - Permute: {}\nbatch_transforms:\n- Gt2FairMOTTarget: {}\nbatch_size: 6\n@@ -23,10 +25,10 @@ EvalMOTReader:\ninputs_def:\nimage_shape: [3, 608, 1088]\nsample_transforms:\n- - Decode: {to_rgb: False}\n+ - Decode: {}\n- LetterBoxResize: {target_size: [608, 1088]}\n- NormalizeImage: {mean: [0, 0, 0], std: [1, 1, 1]}\n- - Permute: {to_rgb: True}\n+ - Permute: {}\nbatch_size: 1\n@@ -36,5 +38,5 @@ TestMOTReader:\nsample_transforms:\n- LetterBoxResize: {target_size: [608, 1088]}\n- NormalizeImage: {mean: [0, 0, 0], std: [1, 1, 1]}\n- - Permute: {to_rgb: True}\n+ - Permute: {}\nbatch_size: 1\n" }, { "change_type": "MODIFY", "old_path": "configs/mot/jde/_base_/jde_reader_1088x608.yml", "new_path": "configs/mot/jde/_base_/jde_reader_1088x608.yml", "diff": "@@ -2,6 +2,7 @@ worker_num: 2\nTrainReader:\nsample_transforms:\n- Decode: {}\n+ - RGBReverse: {}\n- AugmentHSV: {}\n- LetterBoxResize: {target_size: [608, 1088]}\n- MOTRandomAffine: {}\n@@ -9,6 +10,7 @@ TrainReader:\n- BboxXYXY2XYWH: {}\n- NormalizeBox: {}\n- NormalizeImage: {mean: [0, 0, 0], std: [1, 1, 1], is_scale: True}\n+ - RGBReverse: {}\n- Permute: {}\nbatch_transforms:\n- Gt2JDETargetThres:\n" }, { "change_type": "MODIFY", "old_path": "configs/mot/jde/_base_/jde_reader_576x320.yml", "new_path": "configs/mot/jde/_base_/jde_reader_576x320.yml", "diff": "@@ -2,6 +2,7 @@ worker_num: 2\nTrainReader:\nsample_transforms:\n- Decode: {}\n+ - RGBReverse: {}\n- AugmentHSV: {}\n- LetterBoxResize: {target_size: [320, 576]}\n- MOTRandomAffine: {}\n@@ -9,11 +10,12 @@ TrainReader:\n- BboxXYXY2XYWH: {}\n- NormalizeBox: {}\n- NormalizeImage: {mean: [0, 0, 0], std: [1, 1, 1], is_scale: True}\n+ - RGBReverse: {}\n- Permute: {}\nbatch_transforms:\n- Gt2JDETargetThres:\nanchor_masks: [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]\n- anchors: [[[85,255], [120,320], [170,320], [340,320]],\n+ anchors: [[[85,255], [120,360], [170,420], [340,420]],\n[[21,64], [30,90], [43,128], [60,180]],\n[[6,16], [8,23], [11,32], [16,45]]]\ndownsample_ratios: [32, 16, 8]\n" }, { "change_type": "MODIFY", "old_path": "configs/mot/jde/_base_/jde_reader_864x480.yml", "new_path": "configs/mot/jde/_base_/jde_reader_864x480.yml", "diff": "@@ -2,6 +2,7 @@ worker_num: 2\nTrainReader:\nsample_transforms:\n- Decode: {}\n+ - RGBReverse: {}\n- AugmentHSV: {}\n- LetterBoxResize: {target_size: [480, 864]}\n- MOTRandomAffine: {}\n@@ -9,6 +10,7 @@ TrainReader:\n- BboxXYXY2XYWH: {}\n- NormalizeBox: {}\n- NormalizeImage: {mean: [0, 0, 0], std: [1, 1, 1], is_scale: True}\n+ - RGBReverse: {}\n- Permute: {}\nbatch_transforms:\n- Gt2JDETargetThres:\n" }, { "change_type": "MODIFY", "old_path": "configs/mot/jde/jde_darknet53_30e_576x320.yml", "new_path": "configs/mot/jde/jde_darknet53_30e_576x320.yml", "diff": "@@ -20,7 +20,7 @@ YOLOv3:\nfor_mot: True\nYOLOv3Head:\n- anchors: [[85,255], [120,320], [170,320], [340,320],\n+ anchors: [[85,255], [120,360], [170,420], [340,420],\n[21,64], [30,90], [43,128], [60,180],\n[6,16], [8,23], [11,32], [16,45]]\nanchor_masks: [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]\n" }, { "change_type": "MODIFY", "old_path": "ppdet/data/transform/mot_operators.py", "new_path": "ppdet/data/transform/mot_operators.py", "diff": "@@ -36,11 +36,25 @@ from ppdet.utils.logger import setup_logger\nlogger = setup_logger(__name__)\n__all__ = [\n- 'LetterBoxResize', 'MOTRandomAffine', 'Gt2JDETargetThres',\n+ 'RGBReverse', 'LetterBoxResize', 'MOTRandomAffine', 'Gt2JDETargetThres',\n'Gt2JDETargetMax', 'Gt2FairMOTTarget'\n]\n+@register_op\n+class RGBReverse(BaseOperator):\n+ \"\"\"RGB to BGR, or BGR to RGB, sensitive to MOTRandomAffine\n+ \"\"\"\n+\n+ def __init__(self):\n+ super(RGBReverse, self).__init__()\n+\n+ def apply(self, sample, context=None):\n+ im = sample['image']\n+ sample['image'] = np.ascontiguousarray(im[:, :, ::-1])\n+ return sample\n+\n+\n@register_op\nclass LetterBoxResize(BaseOperator):\ndef __init__(self, target_size):\n" }, { "change_type": "MODIFY", "old_path": "ppdet/data/transform/operators.py", "new_path": "ppdet/data/transform/operators.py", "diff": "@@ -107,12 +107,10 @@ class BaseOperator(object):\n@register_op\nclass Decode(BaseOperator):\n- def __init__(self, to_rgb=True):\n+ def __init__(self):\n\"\"\" Transform the image data to numpy format following the rgb format\n\"\"\"\nsuper(Decode, self).__init__()\n- # TODO: remove this parameter\n- self.to_rgb = to_rgb\ndef apply(self, sample, context=None):\n\"\"\" load image if 'im_file' field is not empty but 'image' is\"\"\"\n@@ -126,7 +124,6 @@ class Decode(BaseOperator):\nim = cv2.imdecode(data, 1) # BGR mode, but need RGB mode\nif 'keep_ori_im' in sample and sample['keep_ori_im']:\nsample['ori_image'] = im\n- if self.to_rgb:\nim = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\nsample['image'] = im\n@@ -154,18 +151,14 @@ class Decode(BaseOperator):\n@register_op\nclass Permute(BaseOperator):\n- def __init__(self, to_rgb=False):\n+ def __init__(self):\n\"\"\"\nChange the channel to be (C, H, W)\n\"\"\"\nsuper(Permute, self).__init__()\n- # TODO: remove this parameter\n- self.to_rgb = to_rgb\ndef apply(self, sample, context=None):\nim = sample['image']\n- if self.to_rgb:\n- im = np.ascontiguousarray(im[:, :, ::-1])\nim = im.transpose((2, 0, 1))\nsample['image'] = im\nreturn sample\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix decode permute rgb (#3198)
499,333
31.05.2021 10:47:10
-28,800
3d669f97f995d17a66c554cea6e436d4c50dab74
fix pafnet_lite
[ { "change_type": "MODIFY", "old_path": "configs/ttfnet/_base_/pafnet_lite_reader.yml", "new_path": "configs/ttfnet/_base_/pafnet_lite_reader.yml", "diff": "@@ -18,6 +18,7 @@ TrainReader:\nshuffle: true\ndrop_last: true\nuse_shared_memory: true\n+ cutmix_epoch: 200\nEvalReader:\nsample_transforms:\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/ttf_head.py", "new_path": "ppdet/modeling/heads/ttf_head.py", "diff": "@@ -86,11 +86,13 @@ class HMHead(nn.Layer):\nhead_conv.add_sublayer(name + '.act', nn.ReLU())\nself.feat = head_conv\nbias_init = float(-np.log((1 - 0.01) / 0.01))\n+ weight_attr = None if lite_head else ParamAttr(initializer=Normal(0,\n+ 0.01))\nself.head = nn.Conv2D(\nin_channels=ch_out,\nout_channels=num_classes,\nkernel_size=1,\n- weight_attr=ParamAttr(initializer=Normal(0, 0.01)),\n+ weight_attr=weight_attr,\nbias_attr=ParamAttr(\nlearning_rate=2.,\nregularizer=L2Decay(0.),\n@@ -160,12 +162,14 @@ class WHHead(nn.Layer):\nlearning_rate=2., regularizer=L2Decay(0.))))\nhead_conv.add_sublayer(name + '.act', nn.ReLU())\n+ weight_attr = None if lite_head else ParamAttr(initializer=Normal(0,\n+ 0.01))\nself.feat = head_conv\nself.head = nn.Conv2D(\nin_channels=ch_out,\nout_channels=4,\nkernel_size=1,\n- weight_attr=ParamAttr(initializer=Normal(0, 0.001)),\n+ weight_attr=weight_attr,\nbias_attr=ParamAttr(\nlearning_rate=2., regularizer=L2Decay(0.)))\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix pafnet_lite (#3196)
499,304
03.06.2021 12:32:52
-28,800
00848aea70834b86a04db012b11ae45426428b9f
fix voc dataset download
[ { "change_type": "MODIFY", "old_path": "ppdet/utils/download.py", "new_path": "ppdet/utils/download.py", "diff": "@@ -68,6 +68,9 @@ DATASETS = {\n(\n'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar',\n'b6e924de25625d8de591ea690078ad9f', ),\n+ (\n+ 'https://paddledet.bj.bcebos.com/data/label_list.txt',\n+ '5ae5d62183cfb6f6d3ac109359d06a1b', ),\n], [\"VOCdevkit/VOC2012\", \"VOCdevkit/VOC2007\"]),\n'wider_face': ([\n(\n@@ -346,7 +349,6 @@ def _download(url, path, md5sum=None):\nlogger.info(\"Downloading {} from {}\".format(fname, url))\n-\n# NOTE: windows path join may incur \\, which is invalid in url\nif sys.platform == \"win32\":\nurl = url.replace('\\\\', '/')\n@@ -439,6 +441,8 @@ def _decompress(fname):\nelif fname.find('zip') >= 0:\nwith zipfile.ZipFile(fname) as zf:\nzf.extractall(path=fpath_tmp)\n+ elif fname.find('.txt') >= 0:\n+ return\nelse:\nraise TypeError(\"Unsupport compress file type {}\".format(fname))\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix voc dataset download (#3252)
499,333
03.06.2021 14:33:59
-28,800
b7e44ebf94fd48d45f759e5a1aa51798328e04c7
correct pafnet head
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/ttf_head.py", "new_path": "ppdet/modeling/heads/ttf_head.py", "diff": "@@ -62,7 +62,6 @@ class HMHead(nn.Layer):\nin_channels=ch_in if i == 0 else ch_out,\nout_channels=ch_out,\nnorm_type=norm_type))\n- head_conv.add_sublayer(lite_name + '.act', nn.ReLU6())\nelse:\nif dcn_head:\nhead_conv.add_sublayer(\n@@ -139,7 +138,6 @@ class WHHead(nn.Layer):\nin_channels=ch_in if i == 0 else ch_out,\nout_channels=ch_out,\nnorm_type=norm_type))\n- head_conv.add_sublayer(lite_name + '.act', nn.ReLU6())\nelse:\nif dcn_head:\nhead_conv.add_sublayer(\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
correct pafnet head (#3261)
499,313
03.06.2021 16:46:54
-28,800
44b5115dee9865b3537d9e19796bc6399239888e
fix typo in RandomErasing op
[ { "change_type": "MODIFY", "old_path": "ppdet/data/transform/operators.py", "new_path": "ppdet/data/transform/operators.py", "diff": "@@ -200,7 +200,7 @@ class RandomErasingImage(BaseOperator):\nsuper(RandomErasingImage, self).__init__()\nself.prob = prob\nself.lower = lower\n- self.heigher = heigher\n+ self.higher = higher\nself.aspect_ratio = aspect_ratio\ndef apply(self, sample):\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix typo in RandomErasing op (#3190)
499,304
04.06.2021 09:49:03
-28,800
edd7bc4d2b1c0eabd37fce757c34ed197f9d4837
fix segmentation is None in coco
[ { "change_type": "MODIFY", "old_path": "ppdet/data/source/coco.py", "new_path": "ppdet/data/source/coco.py", "diff": "@@ -175,7 +175,6 @@ class COCODataSet(DetDataset):\ngt_theta = np.zeros((num_bbox, 1), dtype=np.int32)\ngt_class = np.zeros((num_bbox, 1), dtype=np.int32)\nis_crowd = np.zeros((num_bbox, 1), dtype=np.int32)\n- difficult = np.zeros((num_bbox, 1), dtype=np.int32)\ngt_poly = [None] * num_bbox\nhas_segmentation = False\n@@ -189,8 +188,16 @@ class COCODataSet(DetDataset):\nis_crowd[i][0] = box['iscrowd']\n# check RLE format\nif 'segmentation' in box and box['iscrowd'] == 1:\n- gt_poly[i] = [[0.0, 0.0], ]\n+ gt_poly[i] = [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]\nelif 'segmentation' in box and box['segmentation']:\n+ if not np.array(box['segmentation']\n+ ).size > 0 and not self.allow_empty:\n+ bboxes.pop(i)\n+ gt_poly.pop(i)\n+ np.delete(is_crowd, i)\n+ np.delete(gt_class, i)\n+ np.delete(gt_bbox, i)\n+ else:\ngt_poly[i] = box['segmentation']\nhas_segmentation = True\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix segmentation is None in coco (#3272)
499,333
04.06.2021 16:15:30
-28,800
9f9db4b706c8d2e2809a94e1ffd17cb46ddd7d8e
fix match weight
[ { "change_type": "MODIFY", "old_path": "ppdet/utils/checkpoint.py", "new_path": "ppdet/utils/checkpoint.py", "diff": "@@ -164,7 +164,7 @@ def match_state_dict(model_state_dict, weight_state_dict):\n# In Faster RCNN, res5 pretrained weights have prefix of backbone,\n# however, the corresponding model weights have difficult prefix,\n# bbox_head.\n- b = b.strip('backbone.')\n+ b = b[9:]\nreturn a == b or a.endswith(\".\" + b)\nmatch_matrix = np.zeros([len(model_keys), len(weight_keys)])\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix match weight (#3280)
499,304
04.06.2021 20:13:51
-28,800
12ae16e4c40c649b67455b2357468795b05bc519
fix trt inference when bs > 1
[ { "change_type": "MODIFY", "old_path": "deploy/python/infer.py", "new_path": "deploy/python/infer.py", "diff": "@@ -425,9 +425,15 @@ def load_predictor(model_dir,\nuse_calib_mode=trt_calib_mode)\nif use_dynamic_shape:\n- min_input_shape = {'image': [1, 3, trt_min_shape, trt_min_shape]}\n- max_input_shape = {'image': [1, 3, trt_max_shape, trt_max_shape]}\n- opt_input_shape = {'image': [1, 3, trt_opt_shape, trt_opt_shape]}\n+ min_input_shape = {\n+ 'image': [batch_size, 3, trt_min_shape, trt_min_shape]\n+ }\n+ max_input_shape = {\n+ 'image': [batch_size, 3, trt_max_shape, trt_max_shape]\n+ }\n+ opt_input_shape = {\n+ 'image': [batch_size, 3, trt_opt_shape, trt_opt_shape]\n+ }\nconfig.set_trt_dynamic_shape_info(min_input_shape, max_input_shape,\nopt_input_shape)\nprint('trt set dynamic shape done!')\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix trt inference when bs > 1 (#3283)
499,304
04.06.2021 21:00:08
-28,800
32abf1a06fc80258aa5bc0c5be206da9b54bd896
Remove redundant func in json_results
[ { "change_type": "MODIFY", "old_path": "ppdet/metrics/json_results.py", "new_path": "ppdet/metrics/json_results.py", "diff": "@@ -55,31 +55,7 @@ def get_det_poly_res(bboxes, bbox_nums, image_id, label_to_cat_id_map, bias=0):\nnum_id, score, x1, y1, x2, y2, x3, y3, x4, y4 = dt.tolist()\nif int(num_id) < 0:\ncontinue\n- category_id = int(num_id)\n- rbox = [x1, y1, x2, y2, x3, y3, x4, y4]\n- dt_res = {\n- 'image_id': cur_image_id,\n- 'category_id': category_id,\n- 'bbox': rbox,\n- 'score': score\n- }\n- det_res.append(dt_res)\n- return det_res\n-\n-\n-def get_det_poly_res(bboxes, bbox_nums, image_id, label_to_cat_id_map, bias=0):\n- det_res = []\n- k = 0\n- for i in range(len(bbox_nums)):\n- cur_image_id = int(image_id[i][0])\n- det_nums = bbox_nums[i]\n- for j in range(det_nums):\n- dt = bboxes[k]\n- k = k + 1\n- num_id, score, x1, y1, x2, y2, x3, y3, x4, y4 = dt.tolist()\n- if int(num_id) < 0:\n- continue\n- category_id = int(num_id)\n+ category_id = label_to_cat_id_map[int(num_id)]\nrbox = [x1, y1, x2, y2, x3, y3, x4, y4]\ndt_res = {\n'image_id': cur_image_id,\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
Remove redundant func in json_results (#3284)
499,304
05.06.2021 21:54:40
-28,800
1adb26eff092ed60c8189446ff2d0b2483fc90e7
add solov2_r101vd model
[ { "change_type": "MODIFY", "old_path": "configs/solov2/README.md", "new_path": "configs/solov2/README.md", "diff": "@@ -21,6 +21,7 @@ SOLOv2 (Segmenting Objects by Locations) is a fast instance segmentation framewo\n| SOLOv2 (Paper) | X101-DCN-FPN | True | 3x | 42.4 | 5.9 | V100 | - | - |\n| SOLOv2 | R50-FPN | False | 1x | 35.5 | 21.9 | V100 | [model](https://paddledet.bj.bcebos.com/models/solov2_r50_fpn_1x_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/solov2/solov2_r50_fpn_1x_coco.yml) |\n| SOLOv2 | R50-FPN | True | 3x | 38.0 | 21.9 | V100 | [model](https://paddledet.bj.bcebos.com/models/solov2_r50_fpn_3x_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/solov2/solov2_r50_fpn_3x_coco.yml) |\n+| SOLOv2 | R101vd-FPN | True | 3x | 42.7 | 12.1 | V100 | [model](https://paddledet.bj.bcebos.com/models/solov2_r101_vd_fpn_3x_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/solov2/solov2_r101_vd_fpn_3x_coco.yml) |\n**Notes:**\n" }, { "change_type": "MODIFY", "old_path": "configs/solov2/_base_/solov2_r50_fpn.yml", "new_path": "configs/solov2/_base_/solov2_r50_fpn.yml", "diff": "@@ -9,7 +9,6 @@ SOLOv2:\nResNet:\ndepth: 50\n- norm_type: bn\nfreeze_at: 0\nreturn_idx: [0,1,2,3]\nnum_stages: 4\n" }, { "change_type": "ADD", "old_path": null, "new_path": "configs/solov2/solov2_r101_vd_fpn_3x_coco.yml", "diff": "+_BASE_: [\n+ '../datasets/coco_instance.yml',\n+ '../runtime.yml',\n+ '_base_/solov2_r50_fpn.yml',\n+ '_base_/optimizer_1x.yml',\n+ '_base_/solov2_reader.yml',\n+]\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet101_vd_pretrained.pdparams\n+weights: output/solov2_r101_vd_fpn_3x_coco/model_final\n+epoch: 36\n+use_ema: true\n+ema_decay: 0.9998\n+\n+ResNet:\n+ depth: 101\n+ variant: d\n+ freeze_at: 0\n+ return_idx: [0,1,2,3]\n+ dcn_v2_stages: [1,2,3]\n+ num_stages: 4\n+\n+SOLOv2Head:\n+ seg_feat_channels: 512\n+ stacked_convs: 4\n+ num_grids: [40, 36, 24, 16, 12]\n+ kernel_out_channels: 256\n+ solov2_loss: SOLOv2Loss\n+ mask_nms: MaskMatrixNMS\n+ dcn_v2_stages: [0, 1, 2, 3]\n+\n+SOLOv2MaskHead:\n+ mid_channels: 128\n+ out_channels: 256\n+ start_level: 0\n+ end_level: 3\n+ use_dcn_in_tower: True\n+\n+\n+LearningRate:\n+ base_lr: 0.01\n+ schedulers:\n+ - !PiecewiseDecay\n+ gamma: 0.1\n+ milestones: [24, 33]\n+ - !LinearWarmup\n+ start_factor: 0.\n+ steps: 2000\n+\n+TrainReader:\n+ sample_transforms:\n+ - Decode: {}\n+ - Poly2Mask: {}\n+ - RandomResize: {interp: 1,\n+ target_size: [[640, 1333], [672, 1333], [704, 1333], [736, 1333], [768, 1333], [800, 1333]],\n+ keep_ratio: True}\n+ - RandomFlip: {}\n+ - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n+ - Permute: {}\n+ batch_transforms:\n+ - PadBatch: {pad_to_stride: 32}\n+ - Gt2Solov2Target: {num_grids: [40, 36, 24, 16, 12],\n+ scale_ranges: [[1, 96], [48, 192], [96, 384], [192, 768], [384, 2048]],\n+ coord_sigma: 0.2}\n+ batch_size: 2\n+ shuffle: true\n+ drop_last: true\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/solov2_head.py", "new_path": "ppdet/modeling/heads/solov2_head.py", "diff": "@@ -43,6 +43,7 @@ class SOLOv2MaskHead(nn.Layer):\nend_level (int): The position where the input ends.\nuse_dcn_in_tower (bool): Whether to use dcn in tower or not.\n\"\"\"\n+ __shared__ = ['norm_type']\ndef __init__(self,\nin_channels=256,\n@@ -50,7 +51,8 @@ class SOLOv2MaskHead(nn.Layer):\nout_channels=256,\nstart_level=0,\nend_level=3,\n- use_dcn_in_tower=False):\n+ use_dcn_in_tower=False,\n+ norm_type='gn'):\nsuper(SOLOv2MaskHead, self).__init__()\nassert start_level >= 0 and end_level >= start_level\nself.in_channels = in_channels\n@@ -58,24 +60,22 @@ class SOLOv2MaskHead(nn.Layer):\nself.mid_channels = mid_channels\nself.use_dcn_in_tower = use_dcn_in_tower\nself.range_level = end_level - start_level + 1\n- # TODO: add DeformConvNorm\n- conv_type = [ConvNormLayer]\n- self.conv_func = conv_type[0]\n- if self.use_dcn_in_tower:\n- self.conv_func = conv_type[1]\n+ self.use_dcn = True if self.use_dcn_in_tower else False\nself.convs_all_levels = []\n+ self.norm_type = norm_type\nfor i in range(start_level, end_level + 1):\nconv_feat_name = 'mask_feat_head.convs_all_levels.{}'.format(i)\nconv_pre_feat = nn.Sequential()\nif i == start_level:\nconv_pre_feat.add_sublayer(\nconv_feat_name + '.conv' + str(i),\n- self.conv_func(\n+ ConvNormLayer(\nch_in=self.in_channels,\nch_out=self.mid_channels,\nfilter_size=3,\nstride=1,\n- norm_type='gn'))\n+ use_dcn=self.use_dcn,\n+ norm_type=self.norm_type))\nself.add_sublayer('conv_pre_feat' + str(i), conv_pre_feat)\nself.convs_all_levels.append(conv_pre_feat)\nelse:\n@@ -87,12 +87,13 @@ class SOLOv2MaskHead(nn.Layer):\nch_in = self.mid_channels\nconv_pre_feat.add_sublayer(\nconv_feat_name + '.conv' + str(j),\n- self.conv_func(\n+ ConvNormLayer(\nch_in=ch_in,\nch_out=self.mid_channels,\nfilter_size=3,\nstride=1,\n- norm_type='gn'))\n+ use_dcn=self.use_dcn,\n+ norm_type=self.norm_type))\nconv_pre_feat.add_sublayer(\nconv_feat_name + '.conv' + str(j) + 'act', nn.ReLU())\nconv_pre_feat.add_sublayer(\n@@ -105,12 +106,13 @@ class SOLOv2MaskHead(nn.Layer):\nconv_pred_name = 'mask_feat_head.conv_pred.0'\nself.conv_pred = self.add_sublayer(\nconv_pred_name,\n- self.conv_func(\n+ ConvNormLayer(\nch_in=self.mid_channels,\nch_out=self.out_channels,\nfilter_size=1,\nstride=1,\n- norm_type='gn'))\n+ use_dcn=self.use_dcn,\n+ norm_type=self.norm_type))\ndef forward(self, inputs):\n\"\"\"\n@@ -165,7 +167,7 @@ class SOLOv2Head(nn.Layer):\nmask_nms (object): MaskMatrixNMS instance.\n\"\"\"\n__inject__ = ['solov2_loss', 'mask_nms']\n- __shared__ = ['num_classes']\n+ __shared__ = ['norm_type', 'num_classes']\ndef __init__(self,\nnum_classes=80,\n@@ -179,7 +181,8 @@ class SOLOv2Head(nn.Layer):\nsolov2_loss=None,\nscore_threshold=0.1,\nmask_threshold=0.5,\n- mask_nms=None):\n+ mask_nms=None,\n+ norm_type='gn'):\nsuper(SOLOv2Head, self).__init__()\nself.num_classes = num_classes\nself.in_channels = in_channels\n@@ -194,33 +197,33 @@ class SOLOv2Head(nn.Layer):\nself.mask_nms = mask_nms\nself.score_threshold = score_threshold\nself.mask_threshold = mask_threshold\n+ self.norm_type = norm_type\n- conv_type = [ConvNormLayer]\n- self.conv_func = conv_type[0]\nself.kernel_pred_convs = []\nself.cate_pred_convs = []\nfor i in range(self.stacked_convs):\n- if i in self.dcn_v2_stages:\n- self.conv_func = conv_type[1]\n+ use_dcn = True if i in self.dcn_v2_stages else False\nch_in = self.in_channels + 2 if i == 0 else self.seg_feat_channels\nkernel_conv = self.add_sublayer(\n'bbox_head.kernel_convs.' + str(i),\n- self.conv_func(\n+ ConvNormLayer(\nch_in=ch_in,\nch_out=self.seg_feat_channels,\nfilter_size=3,\nstride=1,\n- norm_type='gn'))\n+ use_dcn=use_dcn,\n+ norm_type=self.norm_type))\nself.kernel_pred_convs.append(kernel_conv)\nch_in = self.in_channels if i == 0 else self.seg_feat_channels\ncate_conv = self.add_sublayer(\n'bbox_head.cate_convs.' + str(i),\n- self.conv_func(\n+ ConvNormLayer(\nch_in=ch_in,\nch_out=self.seg_feat_channels,\nfilter_size=3,\nstride=1,\n- norm_type='gn'))\n+ use_dcn=use_dcn,\n+ norm_type=self.norm_type))\nself.cate_pred_convs.append(cate_conv)\nself.solo_kernel = self.add_sublayer(\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add solov2_r101vd model (#3286)
499,304
08.06.2021 10:32:43
-28,800
c2bc4b4afaab03c39150e62247dcc447d039cdfc
add armv7 in lite demo
[ { "change_type": "ADD", "old_path": null, "new_path": "deploy/lite/Makefile_armv7", "diff": "+ARM_ABI = arm7\n+export ARM_ABI\n+\n+include ../Makefile.def\n+\n+LITE_ROOT=../../../\n+\n+THIRD_PARTY_DIR=${LITE_ROOT}/third_party\n+\n+OPENCV_VERSION=opencv4.1.0\n+\n+OPENCV_LIBS = ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/armeabi-v7a/libs/libopencv_imgcodecs.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/armeabi-v7a/libs/libopencv_imgproc.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/armeabi-v7a/libs/libopencv_core.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/libtegra_hal.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/liblibjpeg-turbo.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/liblibwebp.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/liblibpng.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/liblibjasper.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/liblibtiff.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/libIlmImf.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/libtbb.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/libcpufeatures.a\n+\n+OPENCV_INCLUDE = -I../../../third_party/${OPENCV_VERSION}/armeabi-v7a/include\n+\n+CXX_INCLUDES = $(INCLUDES) ${OPENCV_INCLUDE} -I$(LITE_ROOT)/cxx/include\n+\n+CXX_LIBS = ${OPENCV_LIBS} -L$(LITE_ROOT)/cxx/lib/ -lpaddle_light_api_shared $(SYSTEM_LIBS)\n+\n+###############################################################\n+# How to use one of static libaray: #\n+# `libpaddle_api_full_bundled.a` #\n+# `libpaddle_api_light_bundled.a` #\n+###############################################################\n+# Note: default use lite's shared library. #\n+###############################################################\n+# 1. Comment above line using `libpaddle_light_api_shared.so`\n+# 2. Undo comment below line using `libpaddle_api_light_bundled.a`\n+\n+#CXX_LIBS = $(LITE_ROOT)/cxx/lib/libpaddle_api_light_bundled.a $(SYSTEM_LIBS)\n+\n+detect_system: fetch_opencv detect_system.o\n+ $(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) detect_system.o -o detect_system $(CXX_LIBS) $(LDFLAGS)\n+\n+detect_system.o: run_detection.cc\n+ $(CC) $(SYSROOT_COMPLILE) $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o detect_system.o -c run_detection.cc\n+\n+fetch_opencv:\n+ @ test -d ${THIRD_PARTY_DIR} || mkdir ${THIRD_PARTY_DIR}\n+ @ test -e ${THIRD_PARTY_DIR}/${OPENCV_VERSION}.tar.gz || \\\n+ (echo \"fetch opencv libs\" && \\\n+ wget -P ${THIRD_PARTY_DIR} https://paddle-inference-dist.bj.bcebos.com/${OPENCV_VERSION}.tar.gz)\n+ @ test -d ${THIRD_PARTY_DIR}/${OPENCV_VERSION} || \\\n+ tar -zxvf ${THIRD_PARTY_DIR}/${OPENCV_VERSION}.tar.gz -C ${THIRD_PARTY_DIR}\n+\n+\n+.PHONY: clean\n+clean:\n+ rm -f detect_system.o\n+ rm -f detect_system\n" }, { "change_type": "MODIFY", "old_path": "deploy/lite/run_detection.cc", "new_path": "deploy/lite/run_detection.cc", "diff": "@@ -35,7 +35,7 @@ struct Object {\n// Object for storing all preprocessed data\nstruct ImageBlob {\n// image width and height\n- std::vector<float> im_shape_;\n+ std::vector<int> im_shape_;\n// Buffer for image data after preprocessing\nconst float* im_data_;\n// Scale factor for image size to origin image size\n@@ -270,8 +270,8 @@ ImageBlob prepare_imgdata(const cv::Mat& img,\nint width = target_size_[0];\nint height = target_size_[1];\nimg_data.im_shape_ = {\n- static_cast<float>(target_size_[0]),\n- static_cast<float>(target_size_[1])\n+ static_cast<int>(target_size_[0]),\n+ static_cast<int>(target_size_[1])\n};\nimg_data.scale_factor_ = {\n" }, { "change_type": "ADD", "old_path": null, "new_path": "static/deploy/lite/Makefile_armv7", "diff": "+ARM_ABI = arm7\n+export ARM_ABI\n+\n+include ../Makefile.def\n+\n+LITE_ROOT=../../../\n+\n+THIRD_PARTY_DIR=${LITE_ROOT}/third_party\n+\n+OPENCV_VERSION=opencv4.1.0\n+\n+OPENCV_LIBS = ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/armeabi-v7a/libs/libopencv_imgcodecs.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/armeabi-v7a/libs/libopencv_imgproc.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/armeabi-v7a/libs/libopencv_core.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/libtegra_hal.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/liblibjpeg-turbo.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/liblibwebp.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/liblibpng.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/liblibjasper.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/liblibtiff.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/libIlmImf.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/libtbb.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/armeabi-v7a/3rdparty/libs/libcpufeatures.a\n+\n+OPENCV_INCLUDE = -I../../../third_party/${OPENCV_VERSION}/armeabi-v7a/include\n+\n+CXX_INCLUDES = $(INCLUDES) ${OPENCV_INCLUDE} -I$(LITE_ROOT)/cxx/include\n+\n+CXX_LIBS = ${OPENCV_LIBS} -L$(LITE_ROOT)/cxx/lib/ -lpaddle_light_api_shared $(SYSTEM_LIBS)\n+\n+###############################################################\n+# How to use one of static libaray: #\n+# `libpaddle_api_full_bundled.a` #\n+# `libpaddle_api_light_bundled.a` #\n+###############################################################\n+# Note: default use lite's shared library. #\n+###############################################################\n+# 1. Comment above line using `libpaddle_light_api_shared.so`\n+# 2. Undo comment below line using `libpaddle_api_light_bundled.a`\n+\n+#CXX_LIBS = $(LITE_ROOT)/cxx/lib/libpaddle_api_light_bundled.a $(SYSTEM_LIBS)\n+\n+detect_system: fetch_opencv detect_system.o\n+ $(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) detect_system.o -o detect_system $(CXX_LIBS) $(LDFLAGS)\n+\n+detect_system.o: run_detection.cc\n+ $(CC) $(SYSROOT_COMPLILE) $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o detect_system.o -c run_detection.cc\n+\n+fetch_opencv:\n+ @ test -d ${THIRD_PARTY_DIR} || mkdir ${THIRD_PARTY_DIR}\n+ @ test -e ${THIRD_PARTY_DIR}/${OPENCV_VERSION}.tar.gz || \\\n+ (echo \"fetch opencv libs\" && \\\n+ wget -P ${THIRD_PARTY_DIR} https://paddle-inference-dist.bj.bcebos.com/${OPENCV_VERSION}.tar.gz)\n+ @ test -d ${THIRD_PARTY_DIR}/${OPENCV_VERSION} || \\\n+ tar -zxvf ${THIRD_PARTY_DIR}/${OPENCV_VERSION}.tar.gz -C ${THIRD_PARTY_DIR}\n+\n+\n+.PHONY: clean\n+clean:\n+ rm -f detect_system.o\n+ rm -f detect_system\n" }, { "change_type": "MODIFY", "old_path": "static/deploy/lite/run_detection.cc", "new_path": "static/deploy/lite/run_detection.cc", "diff": "@@ -35,7 +35,7 @@ struct Object {\n// Object for storing all preprocessed data\nstruct ImageBlob {\n// image width and height\n- std::vector<float> im_shape_;\n+ std::vector<int> im_shape_;\n// Buffer for image data after preprocessing\nconst float* im_data_;\nstd::vector<float> mean_;\n@@ -268,8 +268,8 @@ ImageBlob prepare_imgdata(const cv::Mat& img,\nint width = target_size_[0];\nint height = target_size_[1];\nimg_data.im_shape_ = {\n- static_cast<float>(target_size_[0]),\n- static_cast<float>(target_size_[1])\n+ static_cast<int>(target_size_[0]),\n+ static_cast<int>(target_size_[1])\n};\nstd::vector<float> mean_;\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add armv7 in lite demo (#3306)
499,333
08.06.2021 21:30:25
-28,800
e5ebac111bd908164f4a3822fb46161be2329d73
fix pafnet reader
[ { "change_type": "MODIFY", "old_path": "configs/ttfnet/_base_/pafnet_lite_reader.yml", "new_path": "configs/ttfnet/_base_/pafnet_lite_reader.yml", "diff": "@@ -2,8 +2,7 @@ worker_num: 2\nTrainReader:\nsample_transforms:\n- Decode: {}\n- - Cutmix: {alpha: 1.5, beta: 1.5}\n- - RandomDistort: {}\n+ - RandomDistort: {brightness: [-32., 32., 0.5], random_apply: False, random_channel: True}\n- RandomExpand: {fill_value: [123.675, 116.28, 103.53]}\n- RandomCrop: {aspect_ratio: NULL, cover_all_box: True}\n- RandomFlip: {}\n@@ -18,7 +17,6 @@ TrainReader:\nshuffle: true\ndrop_last: true\nuse_shared_memory: true\n- cutmix_epoch: 200\nEvalReader:\nsample_transforms:\n" }, { "change_type": "MODIFY", "old_path": "configs/ttfnet/_base_/pafnet_reader.yml", "new_path": "configs/ttfnet/_base_/pafnet_reader.yml", "diff": "@@ -2,8 +2,7 @@ worker_num: 2\nTrainReader:\nsample_transforms:\n- Decode: {}\n- - Cutmix: {alpha: 1.5, beta: 1.5}\n- - RandomDistort: {random_apply: false, random_channel: true}\n+ - RandomDistort: {brightness: [-32., 32., 0.5], random_apply: false, random_channel: true}\n- RandomExpand: {fill_value: [123.675, 116.28, 103.53]}\n- RandomCrop: {aspect_ratio: NULL, cover_all_box: True}\n- RandomFlip: {prob: 0.5}\n@@ -17,7 +16,6 @@ TrainReader:\nshuffle: true\ndrop_last: true\nuse_shared_memory: true\n- mixup_epoch: 100\nEvalReader:\nsample_transforms:\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix pafnet reader (#3329)
499,304
09.06.2021 10:54:09
-28,800
796874cfaaf63ee3d5ffcaca1da15937b7600c11
cherry-pick some pr
[ { "change_type": "MODIFY", "old_path": ".gitignore", "new_path": ".gitignore", "diff": "@@ -61,7 +61,6 @@ coverage.xml\n# Sphinx documentation\n/docs/_build/\n-*.json\n*.tar\n*.pyc\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/post_process.py", "new_path": "ppdet/modeling/post_process.py", "diff": "@@ -50,7 +50,6 @@ class BBoxPostProcess(nn.Layer):\ndtype='float32'))\nself.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))\n-\ndef forward(self, head_out, rois, im_shape, scale_factor):\n\"\"\"\nDecode the bbox and do NMS if needed.\n@@ -84,7 +83,7 @@ class BBoxPostProcess(nn.Layer):\nCurrently only support bs = 1.\nArgs:\n- bbox_pred (Tensor): The output bboxes with shape [N, 6] after decode\n+ bboxes (Tensor): The output bboxes with shape [N, 6] after decode\nand NMS, including labels, scores and bboxes.\nbbox_num (Tensor): The number of prediction boxes of each batch with\nshape [1], and is N.\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
cherry-pick some pr (#3333)
499,396
09.06.2021 12:58:48
-28,800
18596b0146c6a0fd8afce2b74b5f8912797eccb4
fix bug in Pad transform
[ { "change_type": "MODIFY", "old_path": "ppdet/data/transform/operators.py", "new_path": "ppdet/data/transform/operators.py", "diff": "@@ -1845,7 +1845,8 @@ class Pad(BaseOperator):\nassert pad_mode in [\n-1, 0, 1, 2\n], 'currently only supports four modes [-1, 0, 1, 2]'\n- assert pad_mode == -1 and offsets, 'if pad_mode is -1, offsets should not be None'\n+ if pad_mode == -1:\n+ assert offsets, 'if pad_mode is -1, offsets should not be None'\nself.size = size\nself.size_divisor = size_divisor\n@@ -1912,7 +1913,7 @@ class Pad(BaseOperator):\nim_h < h and im_w < w\n), '(h, w) of target size should be greater than (im_h, im_w)'\nelse:\n- h = np.ceil(im_h // self.size_divisor) * self.size_divisor\n+ h = np.ceil(im_h / self.size_divisor) * self.size_divisor\nw = np.ceil(im_w / self.size_divisor) * self.size_divisor\nif h == im_h and w == im_w:\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix bug in Pad transform (#2569)
499,304
10.06.2021 13:19:23
-28,800
9d942c50663d47173eb78b4450ce6779e9aeb243
fix np.int warning in operator
[ { "change_type": "MODIFY", "old_path": "ppdet/data/transform/operators.py", "new_path": "ppdet/data/transform/operators.py", "diff": "@@ -1506,8 +1506,8 @@ class Cutmix(BaseOperator):\nw = max(img1.shape[1], img2.shape[1])\ncut_rat = np.sqrt(1. - factor)\n- cut_w = np.int(w * cut_rat)\n- cut_h = np.int(h * cut_rat)\n+ cut_w = np.int32(w * cut_rat)\n+ cut_h = np.int32(h * cut_rat)\n# uniform\ncx = np.random.randint(w)\n" }, { "change_type": "MODIFY", "old_path": "static/ppdet/data/transform/operators.py", "new_path": "static/ppdet/data/transform/operators.py", "diff": "@@ -1343,8 +1343,8 @@ class CutmixImage(BaseOperator):\nw = max(img1.shape[1], img2.shape[1])\ncut_rat = np.sqrt(1. - factor)\n- cut_w = np.int(w * cut_rat)\n- cut_h = np.int(h * cut_rat)\n+ cut_w = np.int32(w * cut_rat)\n+ cut_h = np.int32(h * cut_rat)\n# uniform\ncx = np.random.randint(w)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix np.int warning in operator (#3344)
499,304
11.06.2021 17:20:58
-28,800
6a849313125114d869dcafad003f70a203bb4fe0
fix distill_prune export model
[ { "change_type": "MODIFY", "old_path": "ppdet/slim/__init__.py", "new_path": "ppdet/slim/__init__.py", "diff": "@@ -47,6 +47,7 @@ def build_slim_model(cfg, slim_cfg, mode='train'):\nmodel = pruner(model)\nload_pretrain_weight(model, weights)\ncfg['model'] = model\n+ cfg['slim_type'] = cfg.slim\nelse:\nload_config(slim_cfg)\nmodel = create(cfg.architecture)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix distill_prune export model (#3362)
499,395
13.06.2021 15:51:03
-28,800
990390258339ec1fd92890235036db56a6ff8acd
fix ema copy problem and correct config of ppyolo in static graph
[ { "change_type": "MODIFY", "old_path": "ppdet/engine/trainer.py", "new_path": "ppdet/engine/trainer.py", "diff": "@@ -18,6 +18,7 @@ from __future__ import print_function\nimport os\nimport sys\n+import copy\nimport time\nimport random\nimport datetime\n@@ -333,7 +334,7 @@ class Trainer(object):\n# apply ema weight on model\nif self.use_ema:\n- weight = self.model.state_dict()\n+ weight = copy.deepcopy(self.model.state_dict())\nself.model.set_dict(self.ema.apply())\nself._compose_callback.on_epoch_end(self.status)\n" }, { "change_type": "MODIFY", "old_path": "static/configs/ppyolo/ppyolov2_reader.yml", "new_path": "static/configs/ppyolo/ppyolov2_reader.yml", "diff": "@@ -17,7 +17,6 @@ TrainReader:\nbeta: 1.5\n- !ColorDistort {}\n- !RandomExpand\n- ratio: 2.0\nfill_value: [123.675, 116.28, 103.53]\n- !RandomCrop {}\n- !RandomFlipImage\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix ema copy problem and correct config of ppyolo in static graph (#3370)
499,348
15.06.2021 13:54:32
-28,800
a30e9869ded4e9677cdc4163b5c2535664b818c6
add dark model_zoo; add config dark_hrnet_w32_384x288.yml; test=document_fix;
[ { "change_type": "ADD", "old_path": null, "new_path": "configs/keypoint/hrnet/dark_hrnet_w32_384x288.yml", "diff": "+use_gpu: true\n+log_iter: 5\n+save_dir: output\n+snapshot_epoch: 10\n+weights: output/hrnet_w32_384x288/model_final\n+epoch: 210\n+num_joints: &num_joints 17\n+pixel_std: &pixel_std 200\n+metric: KeyPointTopDownCOCOEval\n+num_classes: 1\n+train_height: &train_height 384\n+train_width: &train_width 288\n+trainsize: &trainsize [*train_width, *train_height]\n+hmsize: &hmsize [72, 96]\n+flip_perm: &flip_perm [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]\n+\n+\n+#####model\n+architecture: TopDownHRNet\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/Trunc_HRNet_W32_C_pretrained.pdparams\n+\n+TopDownHRNet:\n+ backbone: HRNet\n+ post_process: HRNetPostProcess\n+ flip_perm: *flip_perm\n+ num_joints: *num_joints\n+ width: &width 32\n+ loss: KeyPointMSELoss\n+ flip: true\n+\n+HRNet:\n+ width: *width\n+ freeze_at: -1\n+ freeze_norm: false\n+ return_idx: [0]\n+\n+KeyPointMSELoss:\n+ use_target_weight: true\n+\n+\n+#####optimizer\n+LearningRate:\n+ base_lr: 0.0005\n+ schedulers:\n+ - !PiecewiseDecay\n+ milestones: [170, 200]\n+ gamma: 0.1\n+ - !LinearWarmup\n+ start_factor: 0.001\n+ steps: 1000\n+\n+OptimizerBuilder:\n+ optimizer:\n+ type: Adam\n+ regularizer:\n+ factor: 0.0\n+ type: L2\n+\n+\n+#####data\n+TrainDataset:\n+ !KeypointTopDownCocoDataset\n+ image_dir: train2017\n+ anno_path: annotations/person_keypoints_train2017.json\n+ dataset_dir: dataset/coco\n+ num_joints: *num_joints\n+ trainsize: *trainsize\n+ pixel_std: *pixel_std\n+ use_gt_bbox: True\n+\n+\n+EvalDataset:\n+ !KeypointTopDownCocoDataset\n+ image_dir: val2017\n+ anno_path: annotations/person_keypoints_val2017.json\n+ dataset_dir: dataset/coco\n+ bbox_file: bbox.json\n+ num_joints: *num_joints\n+ trainsize: *trainsize\n+ pixel_std: *pixel_std\n+ use_gt_bbox: True\n+ image_thre: 0.0\n+\n+\n+TestDataset:\n+ !ImageFolder\n+ anno_path: dataset/coco/keypoint_imagelist.txt\n+\n+worker_num: 2\n+global_mean: &global_mean [0.485, 0.456, 0.406]\n+global_std: &global_std [0.229, 0.224, 0.225]\n+TrainReader:\n+ sample_transforms:\n+ - RandomFlipHalfBodyTransform:\n+ scale: 0.5\n+ rot: 40\n+ num_joints_half_body: 8\n+ prob_half_body: 0.3\n+ pixel_std: *pixel_std\n+ trainsize: *trainsize\n+ upper_body_ids: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n+ flip_pairs: *flip_perm\n+ - TopDownAffine:\n+ trainsize: *trainsize\n+ - ToHeatmapsTopDown_DARK:\n+ hmsize: *hmsize\n+ sigma: 2\n+ batch_transforms:\n+ - NormalizeImage:\n+ mean: *global_mean\n+ std: *global_std\n+ is_scale: true\n+ - Permute: {}\n+ batch_size: 64\n+ shuffle: true\n+ drop_last: false\n+\n+EvalReader:\n+ sample_transforms:\n+ - TopDownAffine:\n+ trainsize: *trainsize\n+ - ToHeatmapsTopDown_DARK:\n+ hmsize: *hmsize\n+ sigma: 2\n+ batch_transforms:\n+ - NormalizeImage:\n+ mean: *global_mean\n+ std: *global_std\n+ is_scale: true\n+ - Permute: {}\n+ batch_size: 16\n+ drop_empty: false\n+\n+TestReader:\n+ sample_transforms:\n+ - Decode: {}\n+ - TopDownEvalAffine:\n+ trainsize: *trainsize\n+ - NormalizeImage:\n+ mean: *global_mean\n+ std: *global_std\n+ is_scale: true\n+ - Permute: {}\n+ batch_size: 1\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add dark model_zoo; (#3369) add config dark_hrnet_w32_384x288.yml; test=document_fix;
499,304
16.06.2021 12:33:20
-28,800
1dc5c01476905d5f8d613c5efa6be5fd116f745c
fix benchmark in deploy with solov2
[ { "change_type": "MODIFY", "old_path": "deploy/python/infer.py", "new_path": "deploy/python/infer.py", "diff": "@@ -213,6 +213,7 @@ class DetectorSOLOv2(Detector):\ncpu_threads=cpu_threads,\nenable_mkldnn=enable_mkldnn)\nself.det_times = Timer()\n+ self.cpu_mem, self.gpu_mem, self.gpu_util = 0, 0, 0\ndef predict(self, image, threshold=0.5, warmup=0, repeats=1):\n'''\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix benchmark in deploy with solov2 (#3391)
499,301
17.06.2021 18:13:45
-28,800
ff42349823cc3476c18dccf3cfb809d667446542
voc data format
[ { "change_type": "MODIFY", "old_path": "tools/x2coco.py", "new_path": "tools/x2coco.py", "diff": "@@ -195,7 +195,8 @@ def voc_get_label_anno(ann_dir_path, ann_ids_path, labels_path):\nlabels_ids = list(range(1, len(labels_str) + 1))\nwith open(ann_ids_path, 'r') as f:\n- ann_ids = f.read().split()\n+ ann_ids = [lin.strip().split(' ')[-1] for lin in f.readlines()]\n+\nann_paths = []\nfor aid in ann_ids:\nif aid.endswith('xml'):\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
voc data format (#3399)
499,298
19.06.2021 23:52:07
-28,800
d214d9ece7a7d435df405febb8ad8fad7c4e9efa
[MOT] add jde fairmot deploy doc
[ { "change_type": "MODIFY", "old_path": "configs/mot/README.md", "new_path": "configs/mot/README.md", "diff": "@@ -246,6 +246,21 @@ CUDA_VISIBLE_DEVICES=0 python tools/infer_mot.py -c configs/mot/fairmot/fairmot_\nPlease make sure that [ffmpeg](https://ffmpeg.org/ffmpeg.html) is installed first, on Linux(Ubuntu) platform you can directly install it by the following command:`apt-get update && apt-get install -y ffmpeg`.\n+### 4. Export model\n+\n+```bash\n+CUDA_VISIBLE_DEVICES=0 python tools/export_model.py -c configs/mot/fairmot/fairmot_dla34_30e_1088x608.yml -o weights=https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608.pdparams\n+```\n+\n+### 5. Using exported model for python inference\n+\n+```bash\n+python deploy/python/mot_infer.py --model_dir=output_inference/fairmot_dla34_30e_1088x608 --video_file={your video name}.mp4 --device=GPU --use_gpu=True --save_results\n+```\n+**Notes:**\n+The tracking model is used to predict the video, and does not support the prediction of a single image. The visualization video of the tracking results is saved by default. You can add `--save_results` to save the txt result file, or `--save_images` to save the visualization images.\n+\n+\n## Citations\n```\n@inproceedings{Wojke2017simple,\n" }, { "change_type": "MODIFY", "old_path": "configs/mot/fairmot/README.md", "new_path": "configs/mot/fairmot/README.md", "diff": "@@ -77,6 +77,21 @@ CUDA_VISIBLE_DEVICES=0 python tools/infer_mot.py -c configs/mot/fairmot/fairmot_\nPlease make sure that [ffmpeg](https://ffmpeg.org/ffmpeg.html) is installed first, on Linux(Ubuntu) platform you can directly install it by the following command:`apt-get update && apt-get install -y ffmpeg`.\n+### 4. Export model\n+\n+```bash\n+CUDA_VISIBLE_DEVICES=0 python tools/export_model.py -c configs/mot/fairmot/fairmot_dla34_30e_1088x608.yml -o weights=https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608.pdparams\n+```\n+\n+### 5. Using exported model for python inference\n+\n+```bash\n+python deploy/python/mot_infer.py --model_dir=output_inference/fairmot_dla34_30e_1088x608 --video_file={your video name}.mp4 --device=GPU --use_gpu=True --save_results\n+```\n+**Notes:**\n+The tracking model is used to predict the video, and does not support the prediction of a single image. The visualization video of the tracking results is saved by default. You can add `--save_results` to save the txt result file, or `--save_images` to save the visualization images.\n+\n+\n## Citations\n```\n@article{zhang2020fair,\n" }, { "change_type": "MODIFY", "old_path": "configs/mot/jde/README.md", "new_path": "configs/mot/jde/README.md", "diff": "@@ -83,6 +83,21 @@ CUDA_VISIBLE_DEVICES=0 python tools/infer_mot.py -c configs/mot/jde/jde_darknet5\nPlease make sure that [ffmpeg](https://ffmpeg.org/ffmpeg.html) is installed first, on Linux(Ubuntu) platform you can directly install it by the following command:`apt-get update && apt-get install -y ffmpeg`.\n+### 4. Export model\n+\n+```bash\n+CUDA_VISIBLE_DEVICES=0 python tools/export_model.py -c configs/mot/jde/jde_darknet53_30e_1088x608.yml -o weights=https://paddledet.bj.bcebos.com/models/mot/jde_darknet53_30e_1088x608.pdparams\n+```\n+\n+### 5. Using exported model for python inference\n+\n+```bash\n+python deploy/python/mot_infer.py --model_dir=output_inference/jde_darknet53_30e_1088x608 --video_file={your video name}.mp4 --device=GPU --use_gpu=True --save_results\n+```\n+**Notes:**\n+The tracking model is used to predict the video, and does not support the prediction of a single image. The visualization video of the tracking results is saved by default. You can add `--save_results` to save the txt result file, or `--save_images` to save the visualization images.\n+\n+\n## Citations\n```\n@article{wang2019towards,\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[MOT] add jde fairmot deploy doc (#3421)
499,369
21.06.2021 11:40:05
-28,800
a8015e4b320b2425c4f89067b0fab7395dddebcd
fix train diff for fairmot
[ { "change_type": "MODIFY", "old_path": "ppdet/data/transform/operators.py", "new_path": "ppdet/data/transform/operators.py", "diff": "@@ -2015,12 +2015,12 @@ class Rbox2Poly(BaseOperator):\n@register_op\nclass AugmentHSV(BaseOperator):\n- def __init__(self, fraction=0.50, is_bgr=False):\n+ def __init__(self, fraction=0.50, is_bgr=True):\n\"\"\"\nAugment the SV channel of image data.\nArgs:\n- fraction (float): the fraction for augment\n- is_bgr (bool): whether the image is BGR mode\n+ fraction (float): the fraction for augment. Default: 0.5.\n+ is_bgr (bool): whether the image is BGR mode. Default: True.\n\"\"\"\nsuper(AugmentHSV, self).__init__()\nself.fraction = fraction\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/layers.py", "new_path": "ppdet/modeling/layers.py", "diff": "@@ -163,6 +163,8 @@ class ConvNormLayer(nn.Layer):\nbias_attr=True,\nlr_scale=dcn_lr_scale,\nregularizer=dcn_regularizer,\n+ dcn_bias_regularizer=dcn_regularizer,\n+ dcn_bias_lr_scale=dcn_lr_scale,\nskip_quant=skip_quant)\nnorm_lr = 0. if freeze_norm else 1.\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix train diff for fairmot (#3436)
499,333
22.06.2021 19:16:01
-28,800
6063d163aa84eea09323bdab53f52704011a69a0
unify int32 in op type
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/ops.py", "new_path": "ppdet/modeling/ops.py", "diff": "@@ -993,7 +993,7 @@ def multiclass_nms(bboxes,\nelse:\noutput = helper.create_variable_for_type_inference(dtype=bboxes.dtype)\n- index = helper.create_variable_for_type_inference(dtype='int')\n+ index = helper.create_variable_for_type_inference(dtype='int32')\ninputs = {'BBoxes': bboxes, 'Scores': scores}\noutputs = {'Out': output, 'Index': index}\n@@ -1136,7 +1136,7 @@ def matrix_nms(bboxes,\nelse:\nhelper = LayerHelper('matrix_nms', **locals())\noutput = helper.create_variable_for_type_inference(dtype=bboxes.dtype)\n- index = helper.create_variable_for_type_inference(dtype='int')\n+ index = helper.create_variable_for_type_inference(dtype='int32')\noutputs = {'Out': output, 'Index': index}\nif return_rois_num:\nrois_num = helper.create_variable_for_type_inference(dtype='int32')\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
unify int32 in op type (#3467)
499,348
23.06.2021 16:40:21
-28,800
35e6657241e176863309830ffe6784f84a7365e0
add hrnet mpii dataset; add dark deploy supported, mpii deploy supported;
[ { "change_type": "MODIFY", "old_path": "configs/keypoint/hrnet/dark_hrnet_w32_256x192.yml", "new_path": "configs/keypoint/hrnet/dark_hrnet_w32_256x192.yml", "diff": "@@ -118,9 +118,6 @@ EvalReader:\nsample_transforms:\n- TopDownAffine:\ntrainsize: *trainsize\n- - ToHeatmapsTopDown_DARK:\n- hmsize: *hmsize\n- sigma: 2\nbatch_transforms:\n- NormalizeImage:\nmean: *global_mean\n" }, { "change_type": "MODIFY", "old_path": "configs/keypoint/hrnet/dark_hrnet_w48_256x192.yml", "new_path": "configs/keypoint/hrnet/dark_hrnet_w48_256x192.yml", "diff": "@@ -118,9 +118,6 @@ EvalReader:\nsample_transforms:\n- TopDownAffine:\ntrainsize: *trainsize\n- - ToHeatmapsTopDown_DARK:\n- hmsize: *hmsize\n- sigma: 2\nbatch_transforms:\n- NormalizeImage:\nmean: *global_mean\n" }, { "change_type": "MODIFY", "old_path": "configs/keypoint/hrnet/hrnet_w32_256x192.yml", "new_path": "configs/keypoint/hrnet/hrnet_w32_256x192.yml", "diff": "@@ -118,9 +118,6 @@ EvalReader:\nsample_transforms:\n- TopDownAffine:\ntrainsize: *trainsize\n- - ToHeatmapsTopDown:\n- hmsize: *hmsize\n- sigma: 2\nbatch_transforms:\n- NormalizeImage:\nmean: *global_mean\n" }, { "change_type": "ADD", "old_path": null, "new_path": "configs/keypoint/hrnet/hrnet_w32_256x256_mpii.yml", "diff": "+use_gpu: true\n+log_iter: 5\n+save_dir: output\n+snapshot_epoch: 10\n+weights: output/hrnet_w32_256x256_mpii/model_final\n+epoch: 210\n+num_joints: &num_joints 16\n+pixel_std: &pixel_std 200\n+metric: KeyPointTopDownMPIIEval\n+num_classes: 1\n+train_height: &train_height 256\n+train_width: &train_width 256\n+trainsize: &trainsize [*train_width, *train_height]\n+hmsize: &hmsize [64, 64]\n+flip_perm: &flip_perm [[0, 5], [1, 4], [2, 3], [10, 15], [11, 14], [12, 13]]\n+\n+#####model\n+architecture: TopDownHRNet\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/Trunc_HRNet_W32_C_pretrained.pdparams\n+\n+TopDownHRNet:\n+ backbone: HRNet\n+ post_process: HRNetPostProcess\n+ flip_perm: *flip_perm\n+ num_joints: *num_joints\n+ width: &width 32\n+ loss: KeyPointMSELoss\n+\n+HRNet:\n+ width: *width\n+ freeze_at: -1\n+ freeze_norm: false\n+ return_idx: [0]\n+\n+KeyPointMSELoss:\n+ use_target_weight: true\n+\n+\n+#####optimizer\n+LearningRate:\n+ base_lr: 0.0005\n+ schedulers:\n+ - !PiecewiseDecay\n+ milestones: [170, 200]\n+ gamma: 0.1\n+ - !LinearWarmup\n+ start_factor: 0.001\n+ steps: 1000\n+\n+OptimizerBuilder:\n+ optimizer:\n+ type: Adam\n+ regularizer:\n+ factor: 0.0\n+ type: L2\n+\n+\n+#####data\n+TrainDataset:\n+ !KeypointTopDownMPIIDataset\n+ image_dir: images\n+ anno_path: annotations/mpii_train.json\n+ dataset_dir: dataset/mpii\n+ num_joints: *num_joints\n+\n+\n+EvalDataset:\n+ !KeypointTopDownMPIIDataset\n+ image_dir: images\n+ anno_path: annotations/mpii_val.json\n+ dataset_dir: dataset/mpii\n+ num_joints: *num_joints\n+\n+\n+TestDataset:\n+ !ImageFolder\n+ anno_path: dataset/coco/keypoint_imagelist.txt\n+\n+worker_num: 4\n+global_mean: &global_mean [0.485, 0.456, 0.406]\n+global_std: &global_std [0.229, 0.224, 0.225]\n+TrainReader:\n+ sample_transforms:\n+ - RandomFlipHalfBodyTransform:\n+ scale: 0.5\n+ rot: 40\n+ num_joints_half_body: 8\n+ prob_half_body: 0.3\n+ pixel_std: *pixel_std\n+ trainsize: *trainsize\n+ upper_body_ids: [7, 8, 9, 10, 11, 12, 13, 14, 15]\n+ flip_pairs: *flip_perm\n+ - TopDownAffine:\n+ trainsize: *trainsize\n+ - ToHeatmapsTopDown:\n+ hmsize: *hmsize\n+ sigma: 2\n+ batch_transforms:\n+ - NormalizeImage:\n+ mean: *global_mean\n+ std: *global_std\n+ is_scale: true\n+ - Permute: {}\n+ batch_size: 64\n+ shuffle: true\n+ drop_last: false\n+\n+EvalReader:\n+ sample_transforms:\n+ - TopDownAffine:\n+ trainsize: *trainsize\n+ batch_transforms:\n+ - NormalizeImage:\n+ mean: *global_mean\n+ std: *global_std\n+ is_scale: true\n+ - Permute: {}\n+ batch_size: 16\n+\n+TestReader:\n+ sample_transforms:\n+ - Decode: {}\n+ - TopDownEvalAffine:\n+ trainsize: *trainsize\n+ - NormalizeImage:\n+ mean: *global_mean\n+ std: *global_std\n+ is_scale: true\n+ - Permute: {}\n+ batch_size: 1\n" }, { "change_type": "MODIFY", "old_path": "configs/keypoint/hrnet/hrnet_w32_384x288.yml", "new_path": "configs/keypoint/hrnet/hrnet_w32_384x288.yml", "diff": "@@ -119,9 +119,6 @@ EvalReader:\nsample_transforms:\n- TopDownAffine:\ntrainsize: *trainsize\n- - ToHeatmapsTopDown:\n- hmsize: *hmsize\n- sigma: 2\nbatch_transforms:\n- NormalizeImage:\nmean: *global_mean\n" }, { "change_type": "MODIFY", "old_path": "deploy/python/keypoint_det_unite_infer.py", "new_path": "deploy/python/keypoint_det_unite_infer.py", "diff": "@@ -178,7 +178,8 @@ def main():\ntrt_opt_shape=FLAGS.trt_opt_shape,\ntrt_calib_mode=FLAGS.trt_calib_mode,\ncpu_threads=FLAGS.cpu_threads,\n- enable_mkldnn=FLAGS.enable_mkldnn)\n+ enable_mkldnn=FLAGS.enable_mkldnn,\n+ use_dark=FLAGS.use_dark)\n# predict from video file or camera video stream\nif FLAGS.video_file is not None or FLAGS.camera_id != -1:\n" }, { "change_type": "MODIFY", "old_path": "deploy/python/keypoint_infer.py", "new_path": "deploy/python/keypoint_infer.py", "diff": "@@ -63,7 +63,8 @@ class KeyPoint_Detector(object):\ntrt_opt_shape=640,\ntrt_calib_mode=False,\ncpu_threads=1,\n- enable_mkldnn=False):\n+ enable_mkldnn=False,\n+ use_dark=True):\nself.pred_config = pred_config\nself.predictor, self.config = load_predictor(\nmodel_dir,\n@@ -79,6 +80,7 @@ class KeyPoint_Detector(object):\nenable_mkldnn=enable_mkldnn)\nself.det_times = Timer()\nself.cpu_mem, self.gpu_mem, self.gpu_util = 0, 0, 0\n+ self.use_dark = use_dark\ndef preprocess(self, im):\npreprocess_ops = []\n@@ -109,7 +111,7 @@ class KeyPoint_Detector(object):\nimshape = inputs['im_shape'][:, ::-1]\ncenter = np.round(imshape / 2.)\nscale = imshape / 200.\n- keypoint_postprocess = HRNetPostProcess()\n+ keypoint_postprocess = HRNetPostProcess(use_dark=self.use_dark)\nresults['keypoint'] = keypoint_postprocess(np_boxes, center, scale)\nreturn results\nelse:\n@@ -390,7 +392,8 @@ def main():\ntrt_opt_shape=FLAGS.trt_opt_shape,\ntrt_calib_mode=FLAGS.trt_calib_mode,\ncpu_threads=FLAGS.cpu_threads,\n- enable_mkldnn=FLAGS.enable_mkldnn)\n+ enable_mkldnn=FLAGS.enable_mkldnn,\n+ use_dark=FLAGS.use_dark)\n# predict from video file or camera video stream\nif FLAGS.video_file is not None or FLAGS.camera_id != -1:\n" }, { "change_type": "MODIFY", "old_path": "deploy/python/keypoint_postprocess.py", "new_path": "deploy/python/keypoint_postprocess.py", "diff": "from scipy.optimize import linear_sum_assignment\nfrom collections import abc, defaultdict\n+import cv2\nimport numpy as np\nimport math\nimport paddle\n@@ -193,6 +194,9 @@ def warp_affine_joints(joints, mat):\nclass HRNetPostProcess(object):\n+ def __init__(self, use_dark=True):\n+ self.use_dark = use_dark\n+\ndef flip_back(self, output_flipped, matched_parts):\nassert output_flipped.ndim == 4,\\\n'output_flipped should be [batch_size, num_joints, height, width]'\n@@ -242,7 +246,54 @@ class HRNetPostProcess(object):\nreturn preds, maxvals\n- def get_final_preds(self, heatmaps, center, scale):\n+ def gaussian_blur(self, heatmap, kernel):\n+ border = (kernel - 1) // 2\n+ batch_size = heatmap.shape[0]\n+ num_joints = heatmap.shape[1]\n+ height = heatmap.shape[2]\n+ width = heatmap.shape[3]\n+ for i in range(batch_size):\n+ for j in range(num_joints):\n+ origin_max = np.max(heatmap[i, j])\n+ dr = np.zeros((height + 2 * border, width + 2 * border))\n+ dr[border:-border, border:-border] = heatmap[i, j].copy()\n+ dr = cv2.GaussianBlur(dr, (kernel, kernel), 0)\n+ heatmap[i, j] = dr[border:-border, border:-border].copy()\n+ heatmap[i, j] *= origin_max / np.max(heatmap[i, j])\n+ return heatmap\n+\n+ def dark_parse(self, hm, coord):\n+ heatmap_height = hm.shape[0]\n+ heatmap_width = hm.shape[1]\n+ px = int(coord[0])\n+ py = int(coord[1])\n+ if 1 < px < heatmap_width - 2 and 1 < py < heatmap_height - 2:\n+ dx = 0.5 * (hm[py][px + 1] - hm[py][px - 1])\n+ dy = 0.5 * (hm[py + 1][px] - hm[py - 1][px])\n+ dxx = 0.25 * (hm[py][px + 2] - 2 * hm[py][px] + hm[py][px - 2])\n+ dxy = 0.25 * (hm[py+1][px+1] - hm[py-1][px+1] - hm[py+1][px-1] \\\n+ + hm[py-1][px-1])\n+ dyy = 0.25 * (\n+ hm[py + 2 * 1][px] - 2 * hm[py][px] + hm[py - 2 * 1][px])\n+ derivative = np.matrix([[dx], [dy]])\n+ hessian = np.matrix([[dxx, dxy], [dxy, dyy]])\n+ if dxx * dyy - dxy**2 != 0:\n+ hessianinv = hessian.I\n+ offset = -hessianinv * derivative\n+ offset = np.squeeze(np.array(offset.T), axis=0)\n+ coord += offset\n+ return coord\n+\n+ def dark_postprocess(self, hm, coords, kernelsize):\n+ hm = self.gaussian_blur(hm, kernelsize)\n+ hm = np.maximum(hm, 1e-10)\n+ hm = np.log(hm)\n+ for n in range(coords.shape[0]):\n+ for p in range(coords.shape[1]):\n+ coords[n, p] = self.dark_parse(hm[n][p], coords[n][p])\n+ return coords\n+\n+ def get_final_preds(self, heatmaps, center, scale, kernelsize=3):\n\"\"\"the highest heatvalue location with a quarter offset in the\ndirection from the highest response to the second highest response.\n@@ -261,6 +312,9 @@ class HRNetPostProcess(object):\nheatmap_height = heatmaps.shape[2]\nheatmap_width = heatmaps.shape[3]\n+ if self.use_dark:\n+ coords = self.dark_postprocess(heatmaps, coords, kernelsize)\n+ else:\nfor n in range(coords.shape[0]):\nfor p in range(coords.shape[1]):\nhm = heatmaps[n][p]\n" }, { "change_type": "MODIFY", "old_path": "deploy/python/keypoint_visualize.py", "new_path": "deploy/python/keypoint_visualize.py", "diff": "@@ -34,9 +34,16 @@ def draw_pose(imgfile,\n'for example: `pip install matplotlib`.')\nraise e\n+ skeletons, scores = results['keypoint']\n+ kpt_nums = len(skeletons[0])\n+ if kpt_nums == 17: #plot coco keypoint\nEDGES = [(0, 1), (0, 2), (1, 3), (2, 4), (3, 5), (4, 6), (5, 7), (6, 8),\n- (7, 9), (8, 10), (5, 11), (6, 12), (11, 13), (12, 14), (13, 15),\n- (14, 16), (11, 12)]\n+ (7, 9), (8, 10), (5, 11), (6, 12), (11, 13), (12, 14),\n+ (13, 15), (14, 16), (11, 12)]\n+ else: #plot mpii keypoint\n+ EDGES = [(0, 1), (1, 2), (3, 4), (4, 5), (2, 6), (3, 6), (6, 7), (7, 8),\n+ (8, 9), (10, 11), (11, 12), (13, 14), (14, 15), (8, 12),\n+ (8, 13)]\nNUM_EDGES = len(EDGES)\ncolors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \\\n@@ -46,7 +53,7 @@ def draw_pose(imgfile,\nplt.figure()\nimg = cv2.imread(imgfile) if type(imgfile) == str else imgfile\n- skeletons, scores = results['keypoint']\n+\ncolor_set = results['colors'] if 'colors' in results else None\nif 'bbox' in results:\n@@ -58,7 +65,7 @@ def draw_pose(imgfile,\ncv2.rectangle(img, (xmin, ymin), (xmax, ymax), color, 1)\ncanvas = img.copy()\n- for i in range(17):\n+ for i in range(kpt_nums):\nfor j in range(len(skeletons)):\nif skeletons[j][i, 2] < visual_thread:\ncontinue\n" }, { "change_type": "MODIFY", "old_path": "deploy/python/topdown_unite_utils.py", "new_path": "deploy/python/topdown_unite_utils.py", "diff": "@@ -103,5 +103,10 @@ def argsparser():\ndefault=False,\nhelp=\"If the model is produced by TRT offline quantitative \"\n\"calibration, trt_calib_mode need to set True.\")\n+ parser.add_argument(\n+ '--use_dark',\n+ type=bool,\n+ default=True,\n+ help='whether to use darkpose to get better keypoint position predict ')\nreturn parser\n" }, { "change_type": "MODIFY", "old_path": "deploy/python/utils.py", "new_path": "deploy/python/utils.py", "diff": "@@ -108,6 +108,11 @@ def argsparser():\n'--save_results',\naction='store_true',\nhelp='Save tracking results (txt).')\n+ parser.add_argument(\n+ '--use_dark',\n+ type=bool,\n+ default=True,\n+ help='whether to use darkpose to get better keypoint position predict ')\nreturn parser\n" }, { "change_type": "MODIFY", "old_path": "ppdet/data/source/keypoint_coco.py", "new_path": "ppdet/data/source/keypoint_coco.py", "diff": "@@ -25,7 +25,8 @@ from ppdet.core.workspace import register, serializable\n@serializable\nclass KeypointBottomUpBaseDataset(DetDataset):\n- \"\"\"Base class for bottom-up datasets.\n+ \"\"\"Base class for bottom-up datasets. Adapted from\n+ https://github.com/open-mmlab/mmpose\nAll datasets should subclass it.\nAll subclasses should overwrite:\n@@ -86,7 +87,8 @@ class KeypointBottomUpBaseDataset(DetDataset):\n@register\n@serializable\nclass KeypointBottomUpCocoDataset(KeypointBottomUpBaseDataset):\n- \"\"\"COCO dataset for bottom-up pose estimation.\n+ \"\"\"COCO dataset for bottom-up pose estimation. Adapted from\n+ https://github.com/open-mmlab/mmpose\nThe dataset loads raw features and apply specified transforms\nto return a dict containing the image tensors and other information.\n@@ -253,7 +255,8 @@ class KeypointBottomUpCocoDataset(KeypointBottomUpBaseDataset):\n@register\n@serializable\nclass KeypointBottomUpCrowdPoseDataset(KeypointBottomUpCocoDataset):\n- \"\"\"CrowdPose dataset for bottom-up pose estimation.\n+ \"\"\"CrowdPose dataset for bottom-up pose estimation. Adapted from\n+ https://github.com/open-mmlab/mmpose\nThe dataset loads raw features and apply specified transforms\nto return a dict containing the image tensors and other information.\n@@ -374,7 +377,9 @@ class KeypointTopDownBaseDataset(DetDataset):\n@register\n@serializable\nclass KeypointTopDownCocoDataset(KeypointTopDownBaseDataset):\n- \"\"\"COCO dataset for top-down pose estimation.\n+ \"\"\"COCO dataset for top-down pose estimation. Adapted from\n+ https://github.com/leoxiaobin/deep-high-resolution-net.pytorch\n+ Copyright (c) Microsoft, under the MIT License.\nThe dataset loads raw features and apply specified transforms\nto return a dict containing the image tensors and other information.\n@@ -567,7 +572,9 @@ class KeypointTopDownCocoDataset(KeypointTopDownBaseDataset):\n@register\n@serializable\nclass KeypointTopDownMPIIDataset(KeypointTopDownBaseDataset):\n- \"\"\"MPII dataset for topdown pose estimation.\n+ \"\"\"MPII dataset for topdown pose estimation. Adapted from\n+ https://github.com/leoxiaobin/deep-high-resolution-net.pytorch\n+ Copyright (c) Microsoft, under the MIT License.\nThe dataset loads raw features and apply specified transforms\nto return a dict containing the image tensors and other information.\n@@ -653,4 +660,5 @@ class KeypointTopDownMPIIDataset(KeypointTopDownBaseDataset):\n'joints': joints,\n'joints_vis': joints_vis\n})\n+ print(\"number length: {}\".format(len(gt_db)))\nself.db = gt_db\n" }, { "change_type": "MODIFY", "old_path": "ppdet/engine/trainer.py", "new_path": "ppdet/engine/trainer.py", "diff": "@@ -35,7 +35,7 @@ from ppdet.optimizer import ModelEMA\nfrom ppdet.core.workspace import create\nfrom ppdet.utils.checkpoint import load_weight, load_pretrain_weight\nfrom ppdet.utils.visualizer import visualize_results, save_result\n-from ppdet.metrics import Metric, COCOMetric, VOCMetric, WiderFaceMetric, get_infer_results, KeyPointTopDownCOCOEval\n+from ppdet.metrics import Metric, COCOMetric, VOCMetric, WiderFaceMetric, get_infer_results, KeyPointTopDownCOCOEval, KeyPointTopDownMPIIEval\nfrom ppdet.metrics import RBoxMetric\nfrom ppdet.data.source.category import get_categories\nimport ppdet.utils.stats as stats\n@@ -234,6 +234,15 @@ class Trainer(object):\nlen(eval_dataset), self.cfg.num_joints,\nself.cfg.save_dir)\n]\n+ elif self.cfg.metric == 'KeyPointTopDownMPIIEval':\n+ eval_dataset = self.cfg['EvalDataset']\n+ eval_dataset.check_or_download_dataset()\n+ anno_file = eval_dataset.get_anno()\n+ self._metrics = [\n+ KeyPointTopDownMPIIEval(anno_file,\n+ len(eval_dataset), self.cfg.num_joints,\n+ self.cfg.save_dir)\n+ ]\nelse:\nlogger.warn(\"Metric not support for metric type {}\".format(\nself.cfg.metric))\n" }, { "change_type": "MODIFY", "old_path": "ppdet/metrics/keypoint_metrics.py", "new_path": "ppdet/metrics/keypoint_metrics.py", "diff": "@@ -21,11 +21,18 @@ import numpy as np\nfrom pycocotools.coco import COCO\nfrom pycocotools.cocoeval import COCOeval\nfrom ..modeling.keypoint_utils import oks_nms\n+from scipy.io import loadmat, savemat\n-__all__ = ['KeyPointTopDownCOCOEval']\n+__all__ = ['KeyPointTopDownCOCOEval', 'KeyPointTopDownMPIIEval']\nclass KeyPointTopDownCOCOEval(object):\n+ '''\n+ Adapted from\n+ https://github.com/leoxiaobin/deep-high-resolution-net.pytorch\n+ Copyright (c) Microsoft, under the MIT License.\n+ '''\n+\ndef __init__(self,\nanno_file,\nnum_samples,\n@@ -200,3 +207,161 @@ class KeyPointTopDownCOCOEval(object):\ndef get_results(self):\nreturn self.eval_results\n+\n+\n+class KeyPointTopDownMPIIEval(object):\n+ def __init__(self,\n+ anno_file,\n+ num_samples,\n+ num_joints,\n+ output_eval,\n+ oks_thre=0.9):\n+ super(KeyPointTopDownMPIIEval, self).__init__()\n+ self.ann_file = anno_file\n+ self.reset()\n+\n+ def reset(self):\n+ self.results = []\n+ self.eval_results = {}\n+ self.idx = 0\n+\n+ def update(self, inputs, outputs):\n+ kpts, _ = outputs['keypoint'][0]\n+\n+ num_images = inputs['image'].shape[0]\n+ results = {}\n+ results['preds'] = kpts[:, :, 0:3]\n+ results['boxes'] = np.zeros((num_images, 6))\n+ results['boxes'][:, 0:2] = inputs['center'].numpy()[:, 0:2]\n+ results['boxes'][:, 2:4] = inputs['scale'].numpy()[:, 0:2]\n+ results['boxes'][:, 4] = np.prod(inputs['scale'].numpy() * 200, 1)\n+ results['boxes'][:, 5] = np.squeeze(inputs['score'].numpy())\n+ results['image_path'] = inputs['image_file']\n+\n+ self.results.append(results)\n+\n+ def accumulate(self):\n+ self.eval_results = self.evaluate(self.results)\n+\n+ def log(self):\n+ for item, value in self.eval_results.items():\n+ print(\"{} : {}\".format(item, value))\n+\n+ def get_results(self):\n+ return self.eval_results\n+\n+ def evaluate(self, outputs, savepath=None):\n+ \"\"\"Evaluate PCKh for MPII dataset. Adapted from\n+ https://github.com/leoxiaobin/deep-high-resolution-net.pytorch\n+ Copyright (c) Microsoft, under the MIT License.\n+\n+ Args:\n+ outputs(list(preds, boxes)):\n+\n+ * preds (np.ndarray[N,K,3]): The first two dimensions are\n+ coordinates, score is the third dimension of the array.\n+ * boxes (np.ndarray[N,6]): [center[0], center[1], scale[0]\n+ , scale[1],area, score]\n+\n+ Returns:\n+ dict: PCKh for each joint\n+ \"\"\"\n+\n+ kpts = []\n+ for output in outputs:\n+ preds = output['preds']\n+ batch_size = preds.shape[0]\n+ for i in range(batch_size):\n+ kpts.append({'keypoints': preds[i]})\n+\n+ preds = np.stack([kpt['keypoints'] for kpt in kpts])\n+\n+ # convert 0-based index to 1-based index,\n+ # and get the first two dimensions.\n+ preds = preds[..., :2] + 1.0\n+\n+ if savepath is not None:\n+ pred_file = os.path.join(savepath, 'pred.mat')\n+ savemat(pred_file, mdict={'preds': preds})\n+\n+ SC_BIAS = 0.6\n+ threshold = 0.5\n+\n+ gt_file = os.path.join(\n+ os.path.dirname(self.ann_file), 'mpii_gt_val.mat')\n+ gt_dict = loadmat(gt_file)\n+ dataset_joints = gt_dict['dataset_joints']\n+ jnt_missing = gt_dict['jnt_missing']\n+ pos_gt_src = gt_dict['pos_gt_src']\n+ headboxes_src = gt_dict['headboxes_src']\n+\n+ pos_pred_src = np.transpose(preds, [1, 2, 0])\n+\n+ head = np.where(dataset_joints == 'head')[1][0]\n+ lsho = np.where(dataset_joints == 'lsho')[1][0]\n+ lelb = np.where(dataset_joints == 'lelb')[1][0]\n+ lwri = np.where(dataset_joints == 'lwri')[1][0]\n+ lhip = np.where(dataset_joints == 'lhip')[1][0]\n+ lkne = np.where(dataset_joints == 'lkne')[1][0]\n+ lank = np.where(dataset_joints == 'lank')[1][0]\n+\n+ rsho = np.where(dataset_joints == 'rsho')[1][0]\n+ relb = np.where(dataset_joints == 'relb')[1][0]\n+ rwri = np.where(dataset_joints == 'rwri')[1][0]\n+ rkne = np.where(dataset_joints == 'rkne')[1][0]\n+ rank = np.where(dataset_joints == 'rank')[1][0]\n+ rhip = np.where(dataset_joints == 'rhip')[1][0]\n+\n+ jnt_visible = 1 - jnt_missing\n+ uv_error = pos_pred_src - pos_gt_src\n+ uv_err = np.linalg.norm(uv_error, axis=1)\n+ headsizes = headboxes_src[1, :, :] - headboxes_src[0, :, :]\n+ headsizes = np.linalg.norm(headsizes, axis=0)\n+ headsizes *= SC_BIAS\n+ scale = headsizes * np.ones((len(uv_err), 1), dtype=np.float32)\n+ scaled_uv_err = uv_err / scale\n+ scaled_uv_err = scaled_uv_err * jnt_visible\n+ jnt_count = np.sum(jnt_visible, axis=1)\n+ less_than_threshold = (scaled_uv_err <= threshold) * jnt_visible\n+ PCKh = 100. * np.sum(less_than_threshold, axis=1) / jnt_count\n+\n+ # save\n+ rng = np.arange(0, 0.5 + 0.01, 0.01)\n+ pckAll = np.zeros((len(rng), 16), dtype=np.float32)\n+\n+ for r, threshold in enumerate(rng):\n+ less_than_threshold = (scaled_uv_err <= threshold) * jnt_visible\n+ pckAll[r, :] = 100. * np.sum(less_than_threshold,\n+ axis=1) / jnt_count\n+\n+ PCKh = np.ma.array(PCKh, mask=False)\n+ PCKh.mask[6:8] = True\n+\n+ jnt_count = np.ma.array(jnt_count, mask=False)\n+ jnt_count.mask[6:8] = True\n+ jnt_ratio = jnt_count / np.sum(jnt_count).astype(np.float64)\n+\n+ name_value = [ #noqa\n+ ('Head', PCKh[head]),\n+ ('Shoulder', 0.5 * (PCKh[lsho] + PCKh[rsho])),\n+ ('Elbow', 0.5 * (PCKh[lelb] + PCKh[relb])),\n+ ('Wrist', 0.5 * (PCKh[lwri] + PCKh[rwri])),\n+ ('Hip', 0.5 * (PCKh[lhip] + PCKh[rhip])),\n+ ('Knee', 0.5 * (PCKh[lkne] + PCKh[rkne])),\n+ ('Ankle', 0.5 * (PCKh[lank] + PCKh[rank])),\n+ ('PCKh', np.sum(PCKh * jnt_ratio)),\n+ ('PCKh@0.1', np.sum(pckAll[11, :] * jnt_ratio))\n+ ]\n+ name_value = OrderedDict(name_value)\n+\n+ return name_value\n+\n+ def _sort_and_unique_bboxes(self, kpts, key='bbox_id'):\n+ \"\"\"sort kpts and remove the repeated ones.\"\"\"\n+ kpts = sorted(kpts, key=lambda x: x[key])\n+ num = len(kpts)\n+ for i in range(num - 1, 0, -1):\n+ if kpts[i][key] == kpts[i - 1][key]:\n+ del kpts[i]\n+\n+ return kpts\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add hrnet mpii dataset; (#3460) add dark deploy supported, mpii deploy supported;
499,331
28.06.2021 20:06:39
-28,800
51574ea1de1c367fbfee0395a4222fe49d5c6355
[dev] reorganize code of s2anet_head * reorganize the code, and fix bug * fix typo * remve comment * reorganize code * Revert "reorganize code" This reverts commit * set weight * set default config * fix code style
[ { "change_type": "MODIFY", "old_path": "configs/dota/_base_/s2anet.yml", "new_path": "configs/dota/_base_/s2anet.yml", "diff": "@@ -36,6 +36,8 @@ S2ANetHead:\nalign_conv_type: 'Conv' # AlignConv Conv\nalign_conv_size: 3\nuse_sigmoid_cls: True\n+ reg_loss_weight: [ 1.0, 1.0, 1.0, 1.0, 1.1 ]\n+ cls_loss_weight: [ 1.1, 1.05 ]\nRBoxAssigner:\npos_iou_thr: 0.5\n@@ -52,4 +54,3 @@ S2ANetBBoxPostProcess:\nscore_threshold: 0.05\nnms_threshold: 0.1\nnormalized: False\n- #background_label: -1\n" }, { "change_type": "MODIFY", "old_path": "configs/dota/s2anet_1x_dota.yml", "new_path": "configs/dota/s2anet_1x_dota.yml", "diff": "-_BASE_: [\n+it _BASE_: [\n'../datasets/dota.yml',\n'../runtime.yml',\n'_base_/s2anet_optimizer_1x.yml',\n@@ -6,3 +6,18 @@ _BASE_: [\n'_base_/s2anet_reader.yml',\n]\nweights: output/s2anet_1x_dota/model_final\n+\n+S2ANetHead:\n+ anchor_strides: [8, 16, 32, 64, 128]\n+ anchor_scales: [4]\n+ anchor_ratios: [1.0]\n+ anchor_assign: RBoxAssigner\n+ stacked_convs: 2\n+ feat_in: 256\n+ feat_out: 256\n+ num_classes: 15\n+ align_conv_type: 'AlignConv' # AlignConv Conv\n+ align_conv_size: 3\n+ use_sigmoid_cls: True\n+ reg_loss_weight: [1.0, 1.0, 1.0, 1.0, 1.1]\n+ cls_loss_weight: [1.1, 1.05]\n" }, { "change_type": "MODIFY", "old_path": "configs/dota/s2anet_conv_1x_dota.yml", "new_path": "configs/dota/s2anet_conv_1x_dota.yml", "diff": "@@ -19,3 +19,5 @@ S2ANetHead:\nalign_conv_type: 'Conv' # AlignConv Conv\nalign_conv_size: 3\nuse_sigmoid_cls: True\n+ reg_loss_weight: [1.0, 1.0, 1.0, 1.0, 1.1]\n+ cls_loss_weight: [1.1, 1.05]\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/bbox_utils.py", "new_path": "ppdet/modeling/bbox_utils.py", "diff": "@@ -267,6 +267,150 @@ def bbox_iou(box1, box2, giou=False, diou=False, ciou=False, eps=1e-9):\nreturn iou\n+def rect2rbox(bboxes):\n+ \"\"\"\n+ :param bboxes: shape (n, 4) (xmin, ymin, xmax, ymax)\n+ :return: dbboxes: shape (n, 5) (x_ctr, y_ctr, w, h, angle)\n+ \"\"\"\n+ bboxes = bboxes.reshape(-1, 4)\n+ num_boxes = bboxes.shape[0]\n+\n+ x_ctr = (bboxes[:, 2] + bboxes[:, 0]) / 2.0\n+ y_ctr = (bboxes[:, 3] + bboxes[:, 1]) / 2.0\n+ edges1 = np.abs(bboxes[:, 2] - bboxes[:, 0])\n+ edges2 = np.abs(bboxes[:, 3] - bboxes[:, 1])\n+ angles = np.zeros([num_boxes], dtype=bboxes.dtype)\n+\n+ inds = edges1 < edges2\n+\n+ rboxes = np.stack((x_ctr, y_ctr, edges1, edges2, angles), axis=1)\n+ rboxes[inds, 2] = edges2[inds]\n+ rboxes[inds, 3] = edges1[inds]\n+ rboxes[inds, 4] = np.pi / 2.0\n+ return rboxes\n+\n+\n+def delta2rbox(rrois,\n+ deltas,\n+ means=[0, 0, 0, 0, 0],\n+ stds=[1, 1, 1, 1, 1],\n+ wh_ratio_clip=1e-6):\n+ \"\"\"\n+ :param rrois: (cx, cy, w, h, theta)\n+ :param deltas: (dx, dy, dw, dh, dtheta)\n+ :param means:\n+ :param stds:\n+ :param wh_ratio_clip:\n+ :return:\n+ \"\"\"\n+ means = paddle.to_tensor(means)\n+ stds = paddle.to_tensor(stds)\n+ deltas = paddle.reshape(deltas, [-1, deltas.shape[-1]])\n+ denorm_deltas = deltas * stds + means\n+\n+ dx = denorm_deltas[:, 0]\n+ dy = denorm_deltas[:, 1]\n+ dw = denorm_deltas[:, 2]\n+ dh = denorm_deltas[:, 3]\n+ dangle = denorm_deltas[:, 4]\n+\n+ max_ratio = np.abs(np.log(wh_ratio_clip))\n+ dw = paddle.clip(dw, min=-max_ratio, max=max_ratio)\n+ dh = paddle.clip(dh, min=-max_ratio, max=max_ratio)\n+\n+ rroi_x = rrois[:, 0]\n+ rroi_y = rrois[:, 1]\n+ rroi_w = rrois[:, 2]\n+ rroi_h = rrois[:, 3]\n+ rroi_angle = rrois[:, 4]\n+\n+ gx = dx * rroi_w * paddle.cos(rroi_angle) - dy * rroi_h * paddle.sin(\n+ rroi_angle) + rroi_x\n+ gy = dx * rroi_w * paddle.sin(rroi_angle) + dy * rroi_h * paddle.cos(\n+ rroi_angle) + rroi_y\n+ gw = rroi_w * dw.exp()\n+ gh = rroi_h * dh.exp()\n+ ga = np.pi * dangle + rroi_angle\n+ ga = (ga + np.pi / 4) % np.pi - np.pi / 4\n+ ga = paddle.to_tensor(ga)\n+\n+ gw = paddle.to_tensor(gw, dtype='float32')\n+ gh = paddle.to_tensor(gh, dtype='float32')\n+ bboxes = paddle.stack([gx, gy, gw, gh, ga], axis=-1)\n+ return bboxes\n+\n+\n+def rbox2delta(proposals, gt, means=[0, 0, 0, 0, 0], stds=[1, 1, 1, 1, 1]):\n+ \"\"\"\n+\n+ Args:\n+ proposals:\n+ gt:\n+ means: 1x5\n+ stds: 1x5\n+\n+ Returns:\n+\n+ \"\"\"\n+ proposals = proposals.astype(np.float64)\n+\n+ PI = np.pi\n+\n+ gt_widths = gt[..., 2]\n+ gt_heights = gt[..., 3]\n+ gt_angle = gt[..., 4]\n+\n+ proposals_widths = proposals[..., 2]\n+ proposals_heights = proposals[..., 3]\n+ proposals_angle = proposals[..., 4]\n+\n+ coord = gt[..., 0:2] - proposals[..., 0:2]\n+ dx = (np.cos(proposals[..., 4]) * coord[..., 0] + np.sin(proposals[..., 4])\n+ * coord[..., 1]) / proposals_widths\n+ dy = (-np.sin(proposals[..., 4]) * coord[..., 0] + np.cos(proposals[..., 4])\n+ * coord[..., 1]) / proposals_heights\n+ dw = np.log(gt_widths / proposals_widths)\n+ dh = np.log(gt_heights / proposals_heights)\n+ da = (gt_angle - proposals_angle)\n+\n+ da = (da + PI / 4) % PI - PI / 4\n+ da /= PI\n+\n+ deltas = np.stack([dx, dy, dw, dh, da], axis=-1)\n+ means = np.array(means, dtype=deltas.dtype)\n+ stds = np.array(stds, dtype=deltas.dtype)\n+ deltas = (deltas - means) / stds\n+ deltas = deltas.astype(np.float32)\n+ return deltas\n+\n+\n+def bbox_decode(bbox_preds,\n+ anchors,\n+ means=[0, 0, 0, 0, 0],\n+ stds=[1, 1, 1, 1, 1]):\n+ \"\"\"decode bbox from deltas\n+ Args:\n+ bbox_preds: [N,H,W,5]\n+ anchors: [H*W,5]\n+ return:\n+ bboxes: [N,H,W,5]\n+ \"\"\"\n+ means = paddle.to_tensor(means)\n+ stds = paddle.to_tensor(stds)\n+ num_imgs, H, W, _ = bbox_preds.shape\n+ bboxes_list = []\n+ for img_id in range(num_imgs):\n+ bbox_pred = bbox_preds[img_id]\n+ # bbox_pred.shape=[5,H,W]\n+ bbox_delta = bbox_pred\n+ anchors = paddle.to_tensor(anchors)\n+ bboxes = delta2rbox(\n+ anchors, bbox_delta, means, stds, wh_ratio_clip=1e-6)\n+ bboxes = paddle.reshape(bboxes, [H, W, 5])\n+ bboxes_list.append(bboxes)\n+ return paddle.stack(bboxes_list, axis=0)\n+\n+\ndef poly2rbox(polys):\n\"\"\"\npoly:[x0,y0,x1,y1,x2,y2,x3,y3]\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/s2anet_head.py", "new_path": "ppdet/modeling/heads/s2anet_head.py", "diff": "@@ -17,21 +17,26 @@ import paddle.nn as nn\nimport paddle.nn.functional as F\nfrom paddle.nn.initializer import Normal, Constant\nfrom ppdet.core.workspace import register\n+from ppdet.modeling import ops\nfrom ppdet.modeling import bbox_utils\nfrom ppdet.modeling.proposal_generator.target_layer import RBoxAssigner\nimport numpy as np\n-class S2ANetAnchorGenerator(nn.Layer):\n+class S2ANetAnchorGenerator(object):\n\"\"\"\n- AnchorGenerator by paddle\n+ S2ANetAnchorGenerator by np\n\"\"\"\n- def __init__(self, base_size, scales, ratios, scale_major=True, ctr=None):\n- super(S2ANetAnchorGenerator, self).__init__()\n+ def __init__(self,\n+ base_size=8,\n+ scales=1.0,\n+ ratios=1.0,\n+ scale_major=True,\n+ ctr=None):\nself.base_size = base_size\n- self.scales = paddle.to_tensor(scales)\n- self.ratios = paddle.to_tensor(ratios)\n+ self.scales = scales\n+ self.ratios = ratios\nself.scale_major = scale_major\nself.ctr = ctr\nself.base_anchors = self.gen_base_anchors()\n@@ -49,7 +54,7 @@ class S2ANetAnchorGenerator(nn.Layer):\nelse:\nx_ctr, y_ctr = self.ctr\n- h_ratios = paddle.sqrt(self.ratios)\n+ h_ratios = np.sqrt(self.ratios)\nw_ratios = 1 / h_ratios\nif self.scale_major:\nws = (w * w_ratios[:] * self.scales[:]).reshape([-1])\n@@ -58,51 +63,53 @@ class S2ANetAnchorGenerator(nn.Layer):\nws = (w * self.scales[:] * w_ratios[:]).reshape([-1])\nhs = (h * self.scales[:] * h_ratios[:]).reshape([-1])\n- base_anchors = paddle.stack(\n+ # yapf: disable\n+ base_anchors = np.stack(\n[\nx_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1),\nx_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1)\n],\naxis=-1)\n- base_anchors = paddle.round(base_anchors)\n+ base_anchors = np.round(base_anchors)\n+ # yapf: enable\n+\nreturn base_anchors\ndef _meshgrid(self, x, y, row_major=True):\n- yy, xx = paddle.meshgrid(x, y)\n- yy = yy.reshape([-1])\n- xx = xx.reshape([-1])\n+ xx, yy = np.meshgrid(x, y)\n+ xx = xx.reshape(-1)\n+ yy = yy.reshape(-1)\nif row_major:\nreturn xx, yy\nelse:\nreturn yy, xx\n- def forward(self, featmap_size, stride=16):\n+ def grid_anchors(self, featmap_size, stride=16):\n# featmap_size*stride project it to original area\nbase_anchors = self.base_anchors\n-\n- feat_h = featmap_size[0]\n- feat_w = featmap_size[1]\n- shift_x = paddle.arange(0, feat_w, 1, 'int32') * stride\n- shift_y = paddle.arange(0, feat_h, 1, 'int32') * stride\n+ feat_h, feat_w = featmap_size\n+ shift_x = np.arange(0, feat_w, 1, 'int32') * stride\n+ shift_y = np.arange(0, feat_h, 1, 'int32') * stride\nshift_xx, shift_yy = self._meshgrid(shift_x, shift_y)\n- shifts = paddle.stack([shift_xx, shift_yy, shift_xx, shift_yy], axis=-1)\n+ shifts = np.stack([shift_xx, shift_yy, shift_xx, shift_yy], axis=-1)\n- all_anchors = base_anchors[:, :] + shifts[:, :]\n- all_anchors = all_anchors.reshape([feat_h * feat_w, 4])\n+ all_anchors = base_anchors[None, :, :] + shifts[:, None, :]\nreturn all_anchors\ndef valid_flags(self, featmap_size, valid_size):\nfeat_h, feat_w = featmap_size\nvalid_h, valid_w = valid_size\nassert valid_h <= feat_h and valid_w <= feat_w\n- valid_x = paddle.zeros([feat_w], dtype='uint8')\n- valid_y = paddle.zeros([feat_h], dtype='uint8')\n+ valid_x = np.zeros([feat_w], dtype='uint8')\n+ valid_y = np.zeros([feat_h], dtype='uint8')\nvalid_x[:valid_w] = 1\nvalid_y[:valid_h] = 1\nvalid_xx, valid_yy = self._meshgrid(valid_x, valid_y)\nvalid = valid_xx & valid_yy\n- valid = valid[:, None].expand(\n- [valid.size(0), self.num_base_anchors]).reshape([-1])\n+ valid = valid.reshape([-1])\n+\n+ # valid = valid[:, None].expand(\n+ # [valid.size(0), self.num_base_anchors]).reshape([-1])\nreturn valid\n@@ -225,8 +232,8 @@ class S2ANetHead(nn.Layer):\nanchor_strides=[8, 16, 32, 64, 128],\nanchor_scales=[4],\nanchor_ratios=[1.0],\n- target_means=0.0,\n- target_stds=1.0,\n+ target_means=(.0, .0, .0, .0, .0),\n+ target_stds=(1.0, 1.0, 1.0, 1.0, 1.0),\nalign_conv_type='AlignConv',\nalign_conv_size=3,\nuse_sigmoid_cls=True,\n@@ -263,8 +270,6 @@ class S2ANetHead(nn.Layer):\nself.anchor_generators.append(\nS2ANetAnchorGenerator(anchor_base, anchor_scales,\nanchor_ratios))\n- self.anchor_generators = paddle.nn.LayerList(self.anchor_generators)\n- self.add_sublayer('s2anet_anchor_gen', self.anchor_generators)\nself.fam_cls_convs = nn.Sequential()\nself.fam_reg_convs = nn.Sequential()\n@@ -399,9 +404,9 @@ class S2ANetHead(nn.Layer):\nweight_attr=ParamAttr(initializer=Normal(0.0, 0.01)),\nbias_attr=ParamAttr(initializer=Constant(0)))\n- self.featmap_size_list = []\n- self.init_anchors_list = []\n- self.rbox_anchors_list = []\n+ self.base_anchors = dict()\n+ self.featmap_sizes = dict()\n+ self.base_anchors = dict()\nself.refine_anchor_list = []\ndef forward(self, feats):\n@@ -411,27 +416,13 @@ class S2ANetHead(nn.Layer):\nodm_reg_branch_list = []\nodm_cls_branch_list = []\n- fam_reg1_branch_list = []\n-\n- self.featmap_size_list = []\n- self.init_anchors_list = []\n- self.rbox_anchors_list = []\n+ self.featmap_sizes = dict()\n+ self.base_anchors = dict()\nself.refine_anchor_list = []\nfor i, feat in enumerate(feats):\n- # prepare anchor\n- featmap_size = paddle.shape(feat)[-2:]\n- self.featmap_size_list.append(featmap_size)\n- init_anchors = self.anchor_generators[i](featmap_size,\n- self.anchor_strides[i])\n- init_anchors = paddle.reshape(\n- init_anchors, [featmap_size[0] * featmap_size[1], 4])\n- self.init_anchors_list.append(init_anchors)\n-\n- rbox_anchors = self.rect2rbox(init_anchors)\n- self.rbox_anchors_list.append(rbox_anchors)\n-\nfam_cls_feat = self.fam_cls_convs(feat)\n+\nfam_cls = self.fam_cls(fam_cls_feat)\n# [N, CLS, H, W] --> [N, H, W, CLS]\nfam_cls = fam_cls.transpose([0, 2, 3, 1])\n@@ -447,13 +438,21 @@ class S2ANetHead(nn.Layer):\nfam_reg_reshape = paddle.reshape(fam_reg, [fam_reg.shape[0], -1, 5])\nfam_reg_branch_list.append(fam_reg_reshape)\n- # refine anchors\n- fam_reg1 = fam_reg.clone()\n- fam_reg1.stop_gradient = True\n- rbox_anchors.stop_gradient = True\n- fam_reg1_branch_list.append(fam_reg1)\n- refine_anchor = self.bbox_decode(\n- fam_reg1, rbox_anchors, self.target_stds, self.target_means)\n+ # prepare anchor\n+ featmap_size = feat.shape[-2:]\n+ self.featmap_sizes[i] = featmap_size\n+ init_anchors = self.anchor_generators[i].grid_anchors(\n+ featmap_size, self.anchor_strides[i])\n+\n+ init_anchors = bbox_utils.rect2rbox(init_anchors)\n+ self.base_anchors[(i, featmap_size[0])] = init_anchors\n+\n+ #fam_reg1 = fam_reg\n+ #fam_reg1.stop_gradient = True\n+ refine_anchor = bbox_utils.bbox_decode(\n+ fam_reg.detach(), init_anchors, self.target_means,\n+ self.target_stds)\n+\nself.refine_anchor_list.append(refine_anchor)\nif self.align_conv_type == 'AlignConv':\n@@ -493,87 +492,6 @@ class S2ANetHead(nn.Layer):\nodm_cls_branch_list, odm_reg_branch_list)\nreturn self.s2anet_head_out\n- def rect2rbox(self, bboxes):\n- \"\"\"\n- :param bboxes: shape (n, 4) (xmin, ymin, xmax, ymax)\n- :return: dbboxes: shape (n, 5) (x_ctr, y_ctr, w, h, angle)\n- \"\"\"\n- num_boxes = paddle.shape(bboxes)[0]\n- x_ctr = (bboxes[:, 2] + bboxes[:, 0]) / 2.0\n- y_ctr = (bboxes[:, 3] + bboxes[:, 1]) / 2.0\n- edges1 = paddle.abs(bboxes[:, 2] - bboxes[:, 0])\n- edges2 = paddle.abs(bboxes[:, 3] - bboxes[:, 1])\n-\n- rbox_w = paddle.maximum(edges1, edges2)\n- rbox_h = paddle.minimum(edges1, edges2)\n-\n- # set angle\n- inds = edges1 < edges2\n- inds = paddle.cast(inds, 'int32')\n- inds1 = inds * paddle.arange(0, num_boxes)\n- rboxes_angle = inds1 * np.pi / 2.0\n-\n- rboxes = paddle.stack(\n- (x_ctr, y_ctr, rbox_w, rbox_h, rboxes_angle), axis=1)\n- return rboxes\n-\n- # deltas to rbox\n- def delta2rbox(self, rrois, deltas, means, stds, wh_ratio_clip=1e-6):\n- \"\"\"\n- :param rrois: (cx, cy, w, h, theta)\n- :param deltas: (dx, dy, dw, dh, dtheta)\n- :param means: means of anchor\n- :param stds: stds of anchor\n- :param wh_ratio_clip: clip threshold of wh_ratio\n- :return:\n- \"\"\"\n- deltas = paddle.reshape(deltas, [-1, 5])\n- rrois = paddle.reshape(rrois, [-1, 5])\n- pd_means = paddle.ones(shape=[5]) * means\n- pd_stds = paddle.ones(shape=[5]) * stds\n- denorm_deltas = deltas * pd_stds + pd_means\n-\n- dx = denorm_deltas[:, 0]\n- dy = denorm_deltas[:, 1]\n- dw = denorm_deltas[:, 2]\n- dh = denorm_deltas[:, 3]\n- dangle = denorm_deltas[:, 4]\n- max_ratio = np.abs(np.log(wh_ratio_clip))\n- dw = paddle.clip(dw, min=-max_ratio, max=max_ratio)\n- dh = paddle.clip(dh, min=-max_ratio, max=max_ratio)\n-\n- rroi_x = rrois[:, 0]\n- rroi_y = rrois[:, 1]\n- rroi_w = rrois[:, 2]\n- rroi_h = rrois[:, 3]\n- rroi_angle = rrois[:, 4]\n-\n- gx = dx * rroi_w * paddle.cos(rroi_angle) - dy * rroi_h * paddle.sin(\n- rroi_angle) + rroi_x\n- gy = dx * rroi_w * paddle.sin(rroi_angle) + dy * rroi_h * paddle.cos(\n- rroi_angle) + rroi_y\n- gw = rroi_w * dw.exp()\n- gh = rroi_h * dh.exp()\n- ga = np.pi * dangle + rroi_angle\n- ga = (ga + np.pi / 4) % np.pi - np.pi / 4\n- bboxes = paddle.stack([gx, gy, gw, gh, ga], axis=-1)\n- return bboxes\n-\n- def bbox_decode(self, bbox_preds, anchors, stds, means, wh_ratio_clip=1e-6):\n- \"\"\"decode bbox from deltas\n- Args:\n- bbox_preds: bbox_preds, shape=[N,H,W,5]\n- anchors: anchors, shape=[H,W,5]\n- return:\n- bboxes: return decoded bboxes, shape=[N*H*W,5]\n- \"\"\"\n-\n- num_imgs, H, W, _ = bbox_preds.shape\n- bbox_delta = paddle.reshape(bbox_preds, [-1, 5])\n- bboxes = self.delta2rbox(anchors, bbox_delta, means, stds,\n- wh_ratio_clip)\n- return bboxes\n-\ndef get_prediction(self, nms_pre):\nrefine_anchors = self.refine_anchor_list\nfam_cls_branch_list, fam_reg_branch_list, odm_cls_branch_list, odm_reg_branch_list = self.s2anet_head_out\n@@ -584,7 +502,6 @@ class S2ANetHead(nn.Layer):\nnms_pre,\ncls_out_channels=self.cls_out_channels,\nuse_sigmoid_cls=self.use_sigmoid_cls)\n-\nreturn pred_scores, pred_bboxes\ndef smooth_l1_loss(self, pred, label, delta=1.0 / 9.0):\n@@ -603,23 +520,40 @@ class S2ANetHead(nn.Layer):\nreturn loss\ndef get_fam_loss(self, fam_target, s2anet_head_out):\n- (feat_labels, feat_label_weights, feat_bbox_targets, feat_bbox_weights,\n- pos_inds, neg_inds) = fam_target\n- fam_cls_score, fam_bbox_pred = s2anet_head_out\n-\n- # step1: sample count\n+ (labels, label_weights, bbox_targets, bbox_weights, pos_inds,\n+ neg_inds) = fam_target\n+ fam_cls_branch_list, fam_reg_branch_list, odm_cls_branch_list, odm_reg_branch_list = s2anet_head_out\n+\n+ fam_cls_losses = []\n+ fam_bbox_losses = []\n+ st_idx = 0\n+ featmap_sizes = [self.featmap_sizes[e] for e in self.featmap_sizes]\nnum_total_samples = len(pos_inds) + len(\nneg_inds) if self.sampling else len(pos_inds)\nnum_total_samples = max(1, num_total_samples)\n+ for idx, feat_size in enumerate(featmap_sizes):\n+ feat_anchor_num = feat_size[0] * feat_size[1]\n+\n+ # step1: get data\n+ feat_labels = labels[st_idx:st_idx + feat_anchor_num]\n+ feat_label_weights = label_weights[st_idx:st_idx + feat_anchor_num]\n+\n+ feat_bbox_targets = bbox_targets[st_idx:st_idx + feat_anchor_num, :]\n+ feat_bbox_weights = bbox_weights[st_idx:st_idx + feat_anchor_num, :]\n+ st_idx += feat_anchor_num\n+\n# step2: calc cls loss\nfeat_labels = feat_labels.reshape(-1)\nfeat_label_weights = feat_label_weights.reshape(-1)\n+\n+ fam_cls_score = fam_cls_branch_list[idx]\nfam_cls_score = paddle.squeeze(fam_cls_score, axis=0)\nfam_cls_score1 = fam_cls_score\nfeat_labels = paddle.to_tensor(feat_labels)\n- feat_labels_one_hot = F.one_hot(feat_labels, self.cls_out_channels + 1)\n+ feat_labels_one_hot = paddle.nn.functional.one_hot(\n+ feat_labels, self.cls_out_channels + 1)\nfeat_labels_one_hot = feat_labels_one_hot[:, 1:]\nfeat_labels_one_hot.stop_gradient = True\n@@ -641,11 +575,15 @@ class S2ANetHead(nn.Layer):\nfam_cls = fam_cls * feat_label_weights\nfam_cls_total = paddle.sum(fam_cls)\n+ fam_cls_losses.append(fam_cls_total)\n# step3: regression loss\n+ fam_bbox_pred = fam_reg_branch_list[idx]\nfeat_bbox_targets = paddle.to_tensor(\nfeat_bbox_targets, dtype='float32', stop_gradient=True)\nfeat_bbox_targets = paddle.reshape(feat_bbox_targets, [-1, 5])\n+\n+ fam_bbox_pred = fam_reg_branch_list[idx]\nfam_bbox_pred = paddle.squeeze(fam_bbox_pred, axis=0)\nfam_bbox_pred = paddle.reshape(fam_bbox_pred, [-1, 5])\nfam_bbox = self.smooth_l1_loss(fam_bbox_pred, feat_bbox_targets)\n@@ -657,36 +595,54 @@ class S2ANetHead(nn.Layer):\nfam_bbox = fam_bbox * feat_bbox_weights\nfam_bbox_total = paddle.sum(fam_bbox) / num_total_samples\n+ fam_bbox_losses.append(fam_bbox_total)\n+\n+ fam_cls_loss = paddle.add_n(fam_cls_losses)\nfam_cls_loss_weight = paddle.to_tensor(\nself.cls_loss_weight[0], dtype='float32', stop_gradient=True)\n- fam_cls_loss = fam_cls_total * fam_cls_loss_weight\n- fam_reg_loss = paddle.add_n(fam_bbox_total)\n+ fam_cls_loss = fam_cls_loss * fam_cls_loss_weight\n+ fam_reg_loss = paddle.add_n(fam_bbox_losses)\nreturn fam_cls_loss, fam_reg_loss\ndef get_odm_loss(self, odm_target, s2anet_head_out):\n- (feat_labels, feat_label_weights, feat_bbox_targets, feat_bbox_weights,\n- pos_inds, neg_inds) = odm_target\n- odm_cls_score, odm_bbox_pred = s2anet_head_out\n-\n- # step1: sample count\n+ (labels, label_weights, bbox_targets, bbox_weights, pos_inds,\n+ neg_inds) = odm_target\n+ fam_cls_branch_list, fam_reg_branch_list, odm_cls_branch_list, odm_reg_branch_list = s2anet_head_out\n+\n+ odm_cls_losses = []\n+ odm_bbox_losses = []\n+ st_idx = 0\n+ featmap_sizes = [self.featmap_sizes[e] for e in self.featmap_sizes]\nnum_total_samples = len(pos_inds) + len(\nneg_inds) if self.sampling else len(pos_inds)\nnum_total_samples = max(1, num_total_samples)\n+ for idx, feat_size in enumerate(featmap_sizes):\n+ feat_anchor_num = feat_size[0] * feat_size[1]\n+\n+ # step1: get data\n+ feat_labels = labels[st_idx:st_idx + feat_anchor_num]\n+ feat_label_weights = label_weights[st_idx:st_idx + feat_anchor_num]\n+\n+ feat_bbox_targets = bbox_targets[st_idx:st_idx + feat_anchor_num, :]\n+ feat_bbox_weights = bbox_weights[st_idx:st_idx + feat_anchor_num, :]\n+ st_idx += feat_anchor_num\n# step2: calc cls loss\nfeat_labels = feat_labels.reshape(-1)\nfeat_label_weights = feat_label_weights.reshape(-1)\n+\n+ odm_cls_score = odm_cls_branch_list[idx]\nodm_cls_score = paddle.squeeze(odm_cls_score, axis=0)\nodm_cls_score1 = odm_cls_score\nfeat_labels = paddle.to_tensor(feat_labels)\n- feat_labels_one_hot = F.one_hot(feat_labels, self.cls_out_channels + 1)\n+ feat_labels_one_hot = paddle.nn.functional.one_hot(\n+ feat_labels, self.cls_out_channels + 1)\nfeat_labels_one_hot = feat_labels_one_hot[:, 1:]\nfeat_labels_one_hot.stop_gradient = True\nnum_total_samples = paddle.to_tensor(\nnum_total_samples, dtype='float32', stop_gradient=True)\n-\nodm_cls = F.sigmoid_focal_loss(\nodm_cls_score1,\nfeat_labels_one_hot,\n@@ -697,16 +653,20 @@ class S2ANetHead(nn.Layer):\nfeat_label_weights.shape[0], 1)\nfeat_label_weights = np.repeat(\nfeat_label_weights, self.cls_out_channels, axis=1)\n- feat_label_weights = paddle.to_tensor(\n- feat_label_weights, stop_gradient=True)\n+ feat_label_weights = paddle.to_tensor(feat_label_weights)\n+ feat_label_weights.stop_gradient = True\nodm_cls = odm_cls * feat_label_weights\nodm_cls_total = paddle.sum(odm_cls)\n+ odm_cls_losses.append(odm_cls_total)\n- # step3: regression loss\n+ # # step3: regression loss\nfeat_bbox_targets = paddle.to_tensor(\n- feat_bbox_targets, dtype='float32', stop_gradient=True)\n+ feat_bbox_targets, dtype='float32')\nfeat_bbox_targets = paddle.reshape(feat_bbox_targets, [-1, 5])\n+ feat_bbox_targets.stop_gradient = True\n+\n+ odm_bbox_pred = odm_reg_branch_list[idx]\nodm_bbox_pred = paddle.squeeze(odm_bbox_pred, axis=0)\nodm_bbox_pred = paddle.reshape(odm_bbox_pred, [-1, 5])\nodm_bbox = self.smooth_l1_loss(odm_bbox_pred, feat_bbox_targets)\n@@ -717,11 +677,13 @@ class S2ANetHead(nn.Layer):\nfeat_bbox_weights, stop_gradient=True)\nodm_bbox = odm_bbox * feat_bbox_weights\nodm_bbox_total = paddle.sum(odm_bbox) / num_total_samples\n+ odm_bbox_losses.append(odm_bbox_total)\n+ odm_cls_loss = paddle.add_n(odm_cls_losses)\nodm_cls_loss_weight = paddle.to_tensor(\n- self.cls_loss_weight[0], dtype='float32', stop_gradient=True)\n- odm_cls_loss = odm_cls_total * odm_cls_loss_weight\n- odm_reg_loss = paddle.add_n(odm_bbox_total)\n+ self.cls_loss_weight[1], dtype='float32', stop_gradient=True)\n+ odm_cls_loss = odm_cls_loss * odm_cls_loss_weight\n+ odm_reg_loss = paddle.add_n(odm_bbox_losses)\nreturn odm_cls_loss, odm_reg_loss\ndef get_loss(self, inputs):\n@@ -743,38 +705,46 @@ class S2ANetHead(nn.Layer):\nis_crowd = inputs['is_crowd'][im_id].numpy()\ngt_labels = gt_labels + 1\n+ # featmap_sizes\n+ featmap_sizes = [self.featmap_sizes[e] for e in self.featmap_sizes]\n+ anchors_list, valid_flag_list = self.get_init_anchors(featmap_sizes,\n+ np_im_shape)\n+ anchors_list_all = []\n+ for ii, anchor in enumerate(anchors_list):\n+ anchor = anchor.reshape(-1, 4)\n+ anchor = bbox_utils.rect2rbox(anchor)\n+ anchors_list_all.extend(anchor)\n+ anchors_list_all = np.array(anchors_list_all)\n+\n+ # get im_feat\n+ fam_cls_feats_list = [e[im_id] for e in self.s2anet_head_out[0]]\n+ fam_reg_feats_list = [e[im_id] for e in self.s2anet_head_out[1]]\n+ odm_cls_feats_list = [e[im_id] for e in self.s2anet_head_out[2]]\n+ odm_reg_feats_list = [e[im_id] for e in self.s2anet_head_out[3]]\n+ im_s2anet_head_out = (fam_cls_feats_list, fam_reg_feats_list,\n+ odm_cls_feats_list, odm_reg_feats_list)\n+\n# FAM\n- for idx, rbox_anchors in enumerate(self.rbox_anchors_list):\n- rbox_anchors = rbox_anchors.numpy()\n- rbox_anchors = rbox_anchors.reshape(-1, 5)\n- im_fam_target = self.anchor_assign(rbox_anchors, gt_bboxes,\n+ im_fam_target = self.anchor_assign(anchors_list_all, gt_bboxes,\ngt_labels, is_crowd)\n- # feat\n- fam_cls_feat = self.s2anet_head_out[0][idx][im_id]\n- fam_reg_feat = self.s2anet_head_out[1][idx][im_id]\n-\n- im_s2anet_fam_feat = (fam_cls_feat, fam_reg_feat)\n+ if im_fam_target is not None:\nim_fam_cls_loss, im_fam_reg_loss = self.get_fam_loss(\n- im_fam_target, im_s2anet_fam_feat)\n+ im_fam_target, im_s2anet_head_out)\nfam_cls_loss_lst.append(im_fam_cls_loss)\nfam_reg_loss_lst.append(im_fam_reg_loss)\n# ODM\n- for idx, refine_anchors in enumerate(self.refine_anchor_list):\n- refine_anchors = refine_anchors.numpy()\n- refine_anchors = refine_anchors.reshape(-1, 5)\n- im_odm_target = self.anchor_assign(refine_anchors, gt_bboxes,\n+ refine_anchors_list, valid_flag_list = self.get_refine_anchors(\n+ featmap_sizes, image_shape=np_im_shape)\n+ refine_anchors_list = np.array(refine_anchors_list)\n+ im_odm_target = self.anchor_assign(refine_anchors_list, gt_bboxes,\ngt_labels, is_crowd)\n- odm_cls_feat = self.s2anet_head_out[2][idx][im_id]\n- odm_reg_feat = self.s2anet_head_out[3][idx][im_id]\n-\n- im_s2anet_odm_feat = (odm_cls_feat, odm_reg_feat)\n+ if im_odm_target is not None:\nim_odm_cls_loss, im_odm_reg_loss = self.get_odm_loss(\n- im_odm_target, im_s2anet_odm_feat)\n+ im_odm_target, im_s2anet_head_out)\nodm_cls_loss_lst.append(im_odm_cls_loss)\nodm_reg_loss_lst.append(im_odm_reg_loss)\n-\nfam_cls_loss = paddle.add_n(fam_cls_loss_lst)\nfam_reg_loss = paddle.add_n(fam_reg_loss_lst)\nodm_cls_loss = paddle.add_n(odm_cls_loss_lst)\n@@ -786,6 +756,65 @@ class S2ANetHead(nn.Layer):\n'odm_reg_loss': odm_reg_loss\n}\n+ def get_init_anchors(self, featmap_sizes, image_shape):\n+ \"\"\"Get anchors according to feature map sizes.\n+\n+ Args:\n+ featmap_sizes (list[tuple]): Multi-level feature map sizes.\n+ image_shape (list[dict]): Image meta info.\n+ Returns:\n+ tuple: anchors of each image, valid flags of each image\n+ \"\"\"\n+ num_levels = len(featmap_sizes)\n+\n+ # since feature map sizes of all images are the same, we only compute\n+ # anchors for one time\n+ anchor_list = []\n+ for i in range(num_levels):\n+ anchors = self.anchor_generators[i].grid_anchors(\n+ featmap_sizes[i], self.anchor_strides[i])\n+ anchor_list.append(anchors)\n+\n+ # for each image, we compute valid flags of multi level anchors\n+ valid_flag_list = []\n+ for i in range(num_levels):\n+ anchor_stride = self.anchor_strides[i]\n+ feat_h, feat_w = featmap_sizes[i]\n+ h, w = image_shape\n+ valid_feat_h = min(int(np.ceil(h / anchor_stride)), feat_h)\n+ valid_feat_w = min(int(np.ceil(w / anchor_stride)), feat_w)\n+ flags = self.anchor_generators[i].valid_flags(\n+ (feat_h, feat_w), (valid_feat_h, valid_feat_w))\n+ valid_flag_list.append(flags)\n+\n+ return anchor_list, valid_flag_list\n+\n+ def get_refine_anchors(self, featmap_sizes, image_shape):\n+ num_levels = len(featmap_sizes)\n+\n+ refine_anchors_list = []\n+ for i in range(num_levels):\n+ refine_anchor = self.refine_anchor_list[i]\n+ refine_anchor = paddle.squeeze(refine_anchor, axis=0)\n+ refine_anchor = refine_anchor.numpy()\n+ refine_anchor = np.reshape(refine_anchor,\n+ [-1, refine_anchor.shape[-1]])\n+ refine_anchors_list.extend(refine_anchor)\n+\n+ # for each image, we compute valid flags of multi level anchors\n+ valid_flag_list = []\n+ for i in range(num_levels):\n+ anchor_stride = self.anchor_strides[i]\n+ feat_h, feat_w = featmap_sizes[i]\n+ h, w = image_shape\n+ valid_feat_h = min(int(np.ceil(h / anchor_stride)), feat_h)\n+ valid_feat_w = min(int(np.ceil(w / anchor_stride)), feat_w)\n+ flags = self.anchor_generators[i].valid_flags(\n+ (feat_h, feat_w), (valid_feat_h, valid_feat_w))\n+ valid_flag_list.append(flags)\n+\n+ return refine_anchors_list, valid_flag_list\n+\ndef get_bboxes(self, cls_score_list, bbox_pred_list, mlvl_anchors, nms_pre,\ncls_out_channels, use_sigmoid_cls):\nassert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_anchors)\n@@ -819,8 +848,10 @@ class S2ANetHead(nn.Layer):\nbbox_pred = paddle.gather(bbox_pred, topk_inds)\nscores = paddle.gather(scores, topk_inds)\n- bboxes = self.delta2rbox(anchors, bbox_pred, self.target_means,\n- self.target_stds)\n+ target_means = (.0, .0, .0, .0, .0)\n+ target_stds = (1.0, 1.0, 1.0, 1.0, 1.0)\n+ bboxes = bbox_utils.delta2rbox(anchors, bbox_pred, target_means,\n+ target_stds)\nmlvl_bboxes.append(bboxes)\nmlvl_scores.append(scores)\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/proposal_generator/target_layer.py", "new_path": "ppdet/modeling/proposal_generator/target_layer.py", "diff": "@@ -296,7 +296,7 @@ class RBoxAssigner(object):\nanchors = anchors.reshape(-1, anchors.shape[-1])\nassert anchors.ndim == 2\nanchor_num = anchors.shape[0]\n- anchor_valid = np.ones((anchor_num), np.uint8)\n+ anchor_valid = np.ones((anchor_num), np.int32)\nanchor_inds = np.arange(anchor_num)\nreturn anchor_inds\n@@ -371,9 +371,8 @@ class RBoxAssigner(object):\n# calc rbox iou\nanchors_xc_yc = anchors_xc_yc.astype(np.float32)\ngt_bboxes_xc_yc = gt_bboxes_xc_yc.astype(np.float32)\n- anchors_xc_yc = paddle.to_tensor(anchors_xc_yc, place=paddle.CPUPlace())\n- gt_bboxes_xc_yc = paddle.to_tensor(\n- gt_bboxes_xc_yc, place=paddle.CPUPlace())\n+ anchors_xc_yc = paddle.to_tensor(anchors_xc_yc)\n+ gt_bboxes_xc_yc = paddle.to_tensor(gt_bboxes_xc_yc)\ntry:\nfrom rbox_iou_ops import rbox_iou\n@@ -433,8 +432,7 @@ class RBoxAssigner(object):\nignore_iof_thr = self.ignore_iof_thr\nanchor_num = anchors.shape[0]\n- anchors_inds = self.anchor_valid(anchors)\n- anchors = anchors[anchors_inds]\n+\ngt_bboxes = gt_bboxes\nis_crowd_slice = is_crowd\nnot_crowd_inds = np.where(is_crowd_slice == 0)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[dev] reorganize code of s2anet_head (#3481) * reorganize the code, and fix bug * fix typo * remve comment * reorganize code * Revert "reorganize code" This reverts commit 4f928c3a46b264ba8815978b914bf81a46b66b62. * set weight * set default config * fix code style
499,333
28.06.2021 21:00:48
-28,800
7f8eb5a2b1c5847d39b7091965c319911a043066
fix fleet train
[ { "change_type": "MODIFY", "old_path": "ppdet/engine/trainer.py", "new_path": "ppdet/engine/trainer.py", "diff": "@@ -301,8 +301,7 @@ class Trainer(object):\nmodel = self.model\nif self.cfg.get('fleet', False):\nmodel = fleet.distributed_model(model)\n- self.optimizer = fleet.distributed_optimizer(\n- self.optimizer).user_defined_optimizer\n+ self.optimizer = fleet.distributed_optimizer(self.optimizer)\nelif self._nranks > 1:\nfind_unused_parameters = self.cfg[\n'find_unused_parameters'] if 'find_unused_parameters' in self.cfg else False\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix fleet train (#3527)
499,395
29.06.2021 20:47:27
-28,800
1cae81443cddbfabc2c4b68761aed9a7398d9ef3
add mosaic data augmentation
[ { "change_type": "MODIFY", "old_path": "ppdet/data/transform/op_helper.py", "new_path": "ppdet/data/transform/op_helper.py", "diff": "@@ -462,3 +462,62 @@ def gaussian2D(shape, sigma_x=1, sigma_y=1):\nsigma_y)))\nh[h < np.finfo(h.dtype).eps * h.max()] = 0\nreturn h\n+\n+\n+def transform_bbox(sample,\n+ M,\n+ w,\n+ h,\n+ area_thr=0.25,\n+ wh_thr=2,\n+ ar_thr=20,\n+ perspective=False):\n+ \"\"\"\n+ transfrom bbox according to tranformation matrix M,\n+ refer to https://github.com/ultralytics/yolov5/blob/develop/utils/datasets.py\n+ \"\"\"\n+ bbox = sample['gt_bbox']\n+ label = sample['gt_class']\n+ # rotate bbox\n+ n = len(bbox)\n+ xy = np.ones((n * 4, 3), dtype=np.float32)\n+ xy[:, :2] = bbox[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2)\n+ # xy = xy @ M.T\n+ xy = np.matmul(xy, M.T)\n+ if perspective:\n+ xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8)\n+ else:\n+ xy = xy[:, :2].reshape(n, 8)\n+ # get new bboxes\n+ x = xy[:, [0, 2, 4, 6]]\n+ y = xy[:, [1, 3, 5, 7]]\n+ bbox = np.concatenate(\n+ (x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T\n+ # clip boxes\n+ mask = filter_bbox(bbox, w, h, area_thr)\n+ sample['gt_bbox'] = bbox[mask]\n+ sample['gt_class'] = sample['gt_class'][mask]\n+ if 'is_crowd' in sample:\n+ sample['is_crowd'] = sample['is_crowd'][mask]\n+ if 'difficult' in sample:\n+ sample['difficult'] = sample['difficult'][mask]\n+ return sample\n+\n+\n+def filter_bbox(bbox, w, h, area_thr=0.25, wh_thr=2, ar_thr=20):\n+ \"\"\"\n+ filter bbox, refer to https://github.com/ultralytics/yolov5/blob/develop/utils/datasets.py\n+ \"\"\"\n+ # clip boxes\n+ area1 = (bbox[:, 2:4] - bbox[:, 0:2]).prod(1)\n+ bbox[:, [0, 2]] = bbox[:, [0, 2]].clip(0, w)\n+ bbox[:, [1, 3]] = bbox[:, [1, 3]].clip(0, h)\n+ # compute\n+ area2 = (bbox[:, 2:4] - bbox[:, 0:2]).prod(1)\n+ area_ratio = area2 / (area1 + 1e-16)\n+ wh = bbox[:, 2:4] - bbox[:, 0:2]\n+ ar_ratio = np.maximum(wh[:, 1] / (wh[:, 0] + 1e-16),\n+ wh[:, 0] / (wh[:, 1] + 1e-16))\n+ mask = (area_ratio > area_thr) & (\n+ (wh > wh_thr).all(1)) & (ar_ratio < ar_thr)\n+ return mask\n" }, { "change_type": "MODIFY", "old_path": "ppdet/data/transform/operators.py", "new_path": "ppdet/data/transform/operators.py", "diff": "@@ -45,7 +45,7 @@ from .op_helper import (satisfy_sample_constraint, filter_and_process,\ngenerate_sample_bbox, clip_bbox, data_anchor_sampling,\nsatisfy_sample_constraint_coverage, crop_image_sampling,\ngenerate_sample_bbox_square, bbox_area_sampling,\n- is_poly, gaussian_radius, draw_gaussian)\n+ is_poly, gaussian_radius, draw_gaussian, transform_bbox)\nfrom ppdet.utils.logger import setup_logger\nlogger = setup_logger(__name__)\n@@ -1767,8 +1767,8 @@ class DebugVisibleImage(BaseOperator):\nraise TypeError(\"{}: input type is invalid.\".format(self))\ndef apply(self, sample, context=None):\n- image = Image.open(sample['im_file']).convert('RGB')\n- out_file_name = sample['im_file'].split('/')[-1]\n+ image = Image.fromarray(sample['image'].astype(np.uint8))\n+ out_file_name = '{:012d}.jpg'.format(sample['im_id'][0])\nwidth = sample['w']\nheight = sample['h']\ngt_bbox = sample['gt_bbox']\n@@ -2350,3 +2350,181 @@ class RandomResizeCrop(BaseOperator):\nsample['gt_segm'] = np.asarray(masks).astype(np.uint8)\nreturn sample\n+\n+\n+class RandomPerspective(BaseOperator):\n+ \"\"\"\n+ Rotate, tranlate, scale, shear and perspect image and bboxes randomly,\n+ refer to https://github.com/ultralytics/yolov5/blob/develop/utils/datasets.py\n+\n+ Args:\n+ degree (int): rotation degree, uniformly sampled in [-degree, degree]\n+ translate (float): translate fraction, translate_x and translate_y are uniformly sampled\n+ in [0.5 - translate, 0.5 + translate]\n+ scale (float): scale factor, uniformly sampled in [1 - scale, 1 + scale]\n+ shear (int): shear degree, shear_x and shear_y are uniformly sampled in [-shear, shear]\n+ perspective (float): perspective_x and perspective_y are uniformly sampled in [-perspective, perspective]\n+ area_thr (float): the area threshold of bbox to be kept after transformation, default 0.25\n+ fill_value (tuple): value used in case of a constant border, default (114, 114, 114)\n+ \"\"\"\n+\n+ def __init__(self,\n+ degree=10,\n+ translate=0.1,\n+ scale=0.1,\n+ shear=10,\n+ perspective=0.0,\n+ border=[0, 0],\n+ area_thr=0.25,\n+ fill_value=(114, 114, 114)):\n+ super(RandomPerspective, self).__init__()\n+ self.degree = degree\n+ self.translate = translate\n+ self.scale = scale\n+ self.shear = shear\n+ self.perspective = perspective\n+ self.border = border\n+ self.area_thr = area_thr\n+ self.fill_value = fill_value\n+\n+ def apply(self, sample, context=None):\n+ im = sample['image']\n+ height = im.shape[0] + self.border[0] * 2\n+ width = im.shape[1] + self.border[1] * 2\n+\n+ # center\n+ C = np.eye(3)\n+ C[0, 2] = -im.shape[1] / 2\n+ C[1, 2] = -im.shape[0] / 2\n+\n+ # perspective\n+ P = np.eye(3)\n+ P[2, 0] = random.uniform(-self.perspective, self.perspective)\n+ P[2, 1] = random.uniform(-self.perspective, self.perspective)\n+\n+ # Rotation and scale\n+ R = np.eye(3)\n+ a = random.uniform(-self.degree, self.degree)\n+ s = random.uniform(1 - self.scale, 1 + self.scale)\n+ R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)\n+\n+ # Shear\n+ S = np.eye(3)\n+ # shear x (deg)\n+ S[0, 1] = math.tan(\n+ random.uniform(-self.shear, self.shear) * math.pi / 180)\n+ # shear y (deg)\n+ S[1, 0] = math.tan(\n+ random.uniform(-self.shear, self.shear) * math.pi / 180)\n+\n+ # Translation\n+ T = np.eye(3)\n+ T[0, 2] = random.uniform(0.5 - self.translate,\n+ 0.5 + self.translate) * width\n+ T[1, 2] = random.uniform(0.5 - self.translate,\n+ 0.5 + self.translate) * height\n+\n+ # matmul\n+ # M = T @ S @ R @ P @ C\n+ M = np.eye(3)\n+ for cM in [T, S, R, P, C]:\n+ M = np.matmul(M, cM)\n+\n+ if (self.border[0] != 0) or (self.border[1] != 0) or (\n+ M != np.eye(3)).any():\n+ if self.perspective:\n+ im = cv2.warpPerspective(\n+ im, M, dsize=(width, height), borderValue=self.fill_value)\n+ else:\n+ im = cv2.warpAffine(\n+ im,\n+ M[:2],\n+ dsize=(width, height),\n+ borderValue=self.fill_value)\n+\n+ sample['image'] = im\n+ if sample['gt_bbox'].shape[0] > 0:\n+ sample = transform_bbox(\n+ sample,\n+ M,\n+ width,\n+ height,\n+ area_thr=self.area_thr,\n+ perspective=self.perspective)\n+\n+ return sample\n+\n+\n+@register_op\n+class Mosaic(BaseOperator):\n+ \"\"\"\n+ Mosaic Data Augmentation, refer to https://github.com/ultralytics/yolov5/blob/develop/utils/datasets.py\n+\n+ \"\"\"\n+\n+ def __init__(self,\n+ target_size,\n+ mosaic_border=None,\n+ fill_value=(114, 114, 114)):\n+ super(Mosaic, self).__init__()\n+ self.target_size = target_size\n+ if mosaic_border is None:\n+ mosaic_border = (-target_size // 2, -target_size // 2)\n+ self.mosaic_border = mosaic_border\n+ self.fill_value = fill_value\n+\n+ def __call__(self, sample, context=None):\n+ if not isinstance(sample, Sequence):\n+ return sample\n+\n+ s = self.target_size\n+ yc, xc = [\n+ int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border\n+ ]\n+ boxes = [x['gt_bbox'] for x in sample]\n+ labels = [x['gt_class'] for x in sample]\n+ for i in range(len(sample)):\n+ im = sample[i]['image']\n+ h, w, c = im.shape\n+\n+ if i == 0: # top left\n+ image = np.ones(\n+ (s * 2, s * 2, c), dtype=np.uint8) * self.fill_value\n+ # xmin, ymin, xmax, ymax (dst image)\n+ x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc\n+ # xmin, ymin, xmax, ymax (src image)\n+ x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h\n+ elif i == 1: # top right\n+ x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc\n+ x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h\n+ elif i == 2: # bottom left\n+ x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)\n+ x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(\n+ y2a - y1a, h)\n+ elif i == 3: # bottom right\n+ x1a, y1a, x2a, y2a = xc, yc, min(xc + w,\n+ s * 2), min(s * 2, yc + h)\n+ x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)\n+\n+ image[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b]\n+ padw = x1a - x1b\n+ padh = y1a - y1b\n+ boxes[i] = boxes[i] + (padw, padh, padw, padh)\n+\n+ boxes = np.concatenate(boxes, axis=0)\n+ boxes = np.clip(boxes, 0, s * 2)\n+ labels = np.concatenate(labels, axis=0)\n+ if 'is_crowd' in sample[0]:\n+ is_crowd = np.concatenate([x['is_crowd'] for x in sample], axis=0)\n+ if 'difficult' in sample[0]:\n+ difficult = np.concatenate([x['difficult'] for x in sample], axis=0)\n+ sample = sample[0]\n+ sample['image'] = image.astype(np.uint8)\n+ sample['gt_bbox'] = boxes\n+ sample['gt_class'] = labels\n+ if 'is_crowd' in sample:\n+ sample['is_crowd'] = is_crowd\n+ if 'difficult' in sample:\n+ sample['difficult'] = difficult\n+\n+ return sample\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add mosaic data augmentation (#3185)
499,333
30.06.2021 01:18:06
-28,800
39ff9f2f4729d1727966670bc7d80766b44713c8
fix score threshold in mot_infer
[ { "change_type": "MODIFY", "old_path": "deploy/python/mot_infer.py", "new_path": "deploy/python/mot_infer.py", "diff": "@@ -93,7 +93,7 @@ class MOT_Detector(object):\ninputs = create_inputs(im, im_info)\nreturn inputs\n- def postprocess(self, pred_dets, pred_embs):\n+ def postprocess(self, pred_dets, pred_embs, threshold):\nonline_targets = self.tracker.update(pred_dets, pred_embs)\nonline_tlwhs, online_ids = [], []\nonline_scores = []\n@@ -101,6 +101,7 @@ class MOT_Detector(object):\ntlwh = t.tlwh\ntid = t.track_id\ntscore = t.score\n+ if tscore < threshold: continue\nvertical = tlwh[2] / tlwh[3] > 1.6\nif tlwh[2] * tlwh[3] > self.tracker.min_box_area and not vertical:\nonline_tlwhs.append(tlwh)\n@@ -137,8 +138,8 @@ class MOT_Detector(object):\nself.det_times.inference_time_s.end(repeats=repeats)\nself.det_times.postprocess_time_s.start()\n- online_tlwhs, online_scores, online_ids = self.postprocess(pred_dets,\n- pred_embs)\n+ online_tlwhs, online_scores, online_ids = self.postprocess(\n+ pred_dets, pred_embs, threshold)\nself.det_times.postprocess_time_s.end()\nself.det_times.img_num += 1\nreturn online_tlwhs, online_scores, online_ids\n@@ -363,7 +364,8 @@ def predict_video(detector, camera_id):\nonline_ids,\nonline_scores,\nframe_id=frame_id,\n- fps=fps)\n+ fps=fps,\n+ threhold=FLAGS.threshold)\nif FLAGS.save_images:\nsave_dir = os.path.join(FLAGS.output_dir, video_name.split('.')[-2])\nif not os.path.exists(save_dir):\n" }, { "change_type": "MODIFY", "old_path": "ppdet/engine/tracker.py", "new_path": "ppdet/engine/tracker.py", "diff": "@@ -112,7 +112,8 @@ class Tracker(object):\ndataloader,\nsave_dir=None,\nshow_image=False,\n- frame_rate=30):\n+ frame_rate=30,\n+ draw_threshold=0):\nif save_dir:\nif not os.path.exists(save_dir): os.makedirs(save_dir)\ntracker = self.model.tracker\n@@ -140,6 +141,7 @@ class Tracker(object):\ntlwh = t.tlwh\ntid = t.track_id\ntscore = t.score\n+ if tscore < draw_threshold: continue\nvertical = tlwh[2] / tlwh[3] > 1.6\nif tlwh[2] * tlwh[3] > tracker.min_box_area and not vertical:\nonline_tlwhs.append(tlwh)\n@@ -162,7 +164,8 @@ class Tracker(object):\nsave_dir=None,\nshow_image=False,\nframe_rate=30,\n- det_file=''):\n+ det_file='',\n+ draw_threshold=0):\nif save_dir:\nif not os.path.exists(save_dir): os.makedirs(save_dir)\ntracker = self.model.tracker\n@@ -191,6 +194,7 @@ class Tracker(object):\ndets = dets_list[frame_id]\nbbox_tlwh = paddle.to_tensor(dets['bbox'], dtype='float32')\npred_scores = paddle.to_tensor(dets['score'], dtype='float32')\n+ if pred_scores < draw_threshold: continue\nif bbox_tlwh.shape[0] > 0:\npred_bboxes = paddle.concat(\n(bbox_tlwh[:, 0:2],\n@@ -343,7 +347,8 @@ class Tracker(object):\nsave_images=False,\nsave_videos=True,\nshow_image=False,\n- det_results_dir=''):\n+ det_results_dir='',\n+ draw_threshold=0.5):\nif not os.path.exists(output_dir): os.makedirs(output_dir)\nresult_root = os.path.join(output_dir, 'mot_results')\nif not os.path.exists(result_root): os.makedirs(result_root)\n@@ -369,7 +374,8 @@ class Tracker(object):\ndataloader,\nsave_dir=save_dir,\nshow_image=show_image,\n- frame_rate=frame_rate)\n+ frame_rate=frame_rate,\n+ draw_threshold=draw_threshold)\nelif model_type in ['DeepSORT']:\nresults, nf, ta, tc = self._eval_seq_sde(\ndataloader,\n@@ -377,7 +383,8 @@ class Tracker(object):\nshow_image=show_image,\nframe_rate=frame_rate,\ndet_file=os.path.join(det_results_dir,\n- '{}.txt'.format(seq)))\n+ '{}.txt'.format(seq)),\n+ draw_threshold=draw_threshold)\nelse:\nraise ValueError(model_type)\n" }, { "change_type": "MODIFY", "old_path": "tools/infer_mot.py", "new_path": "tools/infer_mot.py", "diff": "@@ -68,6 +68,11 @@ def parse_args():\n'--show_image',\naction='store_true',\nhelp='Show tracking results (image).')\n+ parser.add_argument(\n+ \"--draw_threshold\",\n+ type=float,\n+ default=0.5,\n+ help=\"Threshold to reserve the result for visualization.\")\nargs = parser.parse_args()\nreturn args\n@@ -94,7 +99,8 @@ def run(FLAGS, cfg):\nsave_images=FLAGS.save_images,\nsave_videos=FLAGS.save_videos,\nshow_image=FLAGS.show_image,\n- det_results_dir=FLAGS.det_results_dir)\n+ det_results_dir=FLAGS.det_results_dir,\n+ draw_threshold=FLAGS.draw_threshold)\ndef main():\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix score threshold in mot_infer (#3444)
499,333
30.06.2021 15:45:11
-28,800
3a497c5f4c031e164ddf95acd75dad70d6ea9a3e
add image_shape in testreader for topdown models
[ { "change_type": "MODIFY", "old_path": "configs/keypoint/hrnet/dark_hrnet_w32_256x192.yml", "new_path": "configs/keypoint/hrnet/dark_hrnet_w32_256x192.yml", "diff": "@@ -128,6 +128,8 @@ EvalReader:\ndrop_empty: false\nTestReader:\n+ inputs_def:\n+ image_shape: [3, *train_height, *train_width]\nsample_transforms:\n- Decode: {}\n- TopDownEvalAffine:\n" }, { "change_type": "MODIFY", "old_path": "configs/keypoint/hrnet/dark_hrnet_w32_384x288.yml", "new_path": "configs/keypoint/hrnet/dark_hrnet_w32_384x288.yml", "diff": "@@ -132,6 +132,8 @@ EvalReader:\ndrop_empty: false\nTestReader:\n+ inputs_def:\n+ image_shape: [3, *train_height, *train_width]\nsample_transforms:\n- Decode: {}\n- TopDownEvalAffine:\n" }, { "change_type": "MODIFY", "old_path": "configs/keypoint/hrnet/dark_hrnet_w48_256x192.yml", "new_path": "configs/keypoint/hrnet/dark_hrnet_w48_256x192.yml", "diff": "@@ -128,6 +128,8 @@ EvalReader:\ndrop_empty: false\nTestReader:\n+ inputs_def:\n+ image_shape: [3, *train_height, *train_width]\nsample_transforms:\n- Decode: {}\n- TopDownEvalAffine:\n" }, { "change_type": "MODIFY", "old_path": "configs/keypoint/hrnet/hrnet_w32_256x192.yml", "new_path": "configs/keypoint/hrnet/hrnet_w32_256x192.yml", "diff": "@@ -127,6 +127,8 @@ EvalReader:\nbatch_size: 16\nTestReader:\n+ inputs_def:\n+ image_shape: [3, *train_height, *train_width]\nsample_transforms:\n- Decode: {}\n- TopDownEvalAffine:\n" }, { "change_type": "MODIFY", "old_path": "configs/keypoint/hrnet/hrnet_w32_256x256_mpii.yml", "new_path": "configs/keypoint/hrnet/hrnet_w32_256x256_mpii.yml", "diff": "@@ -118,6 +118,8 @@ EvalReader:\nbatch_size: 16\nTestReader:\n+ inputs_def:\n+ image_shape: [3, *train_height, *train_width]\nsample_transforms:\n- Decode: {}\n- TopDownEvalAffine:\n" }, { "change_type": "MODIFY", "old_path": "configs/keypoint/hrnet/hrnet_w32_384x288.yml", "new_path": "configs/keypoint/hrnet/hrnet_w32_384x288.yml", "diff": "@@ -128,6 +128,8 @@ EvalReader:\nbatch_size: 16\nTestReader:\n+ inputs_def:\n+ image_shape: [3, *train_height, *train_width]\nsample_transforms:\n- Decode: {}\n- TopDownEvalAffine:\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add image_shape in testreader for topdown models (#3543)
499,339
03.07.2021 16:16:46
-28,800
cb56821264f6a3290085dcc28a27b8b60cfcfd59
add find_unused_parameters in fleet init
[ { "change_type": "MODIFY", "old_path": "ppdet/engine/env.py", "new_path": "ppdet/engine/env.py", "diff": "@@ -26,8 +26,10 @@ from paddle.distributed import fleet\n__all__ = ['init_parallel_env', 'set_random_seed', 'init_fleet_env']\n-def init_fleet_env():\n- fleet.init(is_collective=True)\n+def init_fleet_env(find_unused_parameters=False):\n+ strategy = fleet.DistributedStrategy()\n+ strategy.find_unused_parameters = find_unused_parameters\n+ fleet.init(is_collective=True, strategy=strategy)\ndef init_parallel_env():\n" }, { "change_type": "MODIFY", "old_path": "tools/train.py", "new_path": "tools/train.py", "diff": "@@ -90,7 +90,7 @@ def parse_args():\ndef run(FLAGS, cfg):\n# init fleet environment\nif cfg.fleet:\n- init_fleet_env()\n+ init_fleet_env(cfg.get('find_unused_parameters', False))\nelse:\n# init parallel environment if nranks > 1\ninit_parallel_env()\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add find_unused_parameters in fleet init (#3570)
499,304
05.07.2021 15:26:17
-28,800
4619f414e43e6fad5b0cd5bc31e2fdc783b8e975
fix export model with solov2_enhance
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/solov2_head.py", "new_path": "ppdet/modeling/heads/solov2_head.py", "diff": "@@ -252,7 +252,7 @@ class SOLOv2Head(nn.Layer):\nbias_attr=ParamAttr(initializer=Constant(\nvalue=float(-np.log((1 - 0.01) / 0.01))))))\n- if self.drop_block:\n+ if self.drop_block and self.training:\nself.drop_block_fun = DropBlock(\nblock_size=3, keep_prob=0.9, name='solo_cate.dropblock')\n@@ -324,13 +324,13 @@ class SOLOv2Head(nn.Layer):\nfor kernel_layer in self.kernel_pred_convs:\nkernel_feat = F.relu(kernel_layer(kernel_feat))\n- if self.drop_block:\n+ if self.drop_block and self.training:\nkernel_feat = self.drop_block_fun(kernel_feat)\nkernel_pred = self.solo_kernel(kernel_feat)\n# cate branch\nfor cate_layer in self.cate_pred_convs:\ncate_feat = F.relu(cate_layer(cate_feat))\n- if self.drop_block:\n+ if self.drop_block and self.training:\ncate_feat = self.drop_block_fun(cate_feat)\ncate_pred = self.solo_cate(cate_feat)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix export model with solov2_enhance (#3590)
499,304
06.07.2021 10:32:43
-28,800
2c57d210e4e00fb69f887bb6a855616529e714b6
add faster rcnn prune config
[ { "change_type": "ADD", "old_path": null, "new_path": "configs/slim/prune/faster_rcnn_r50_fpn_prune_fpgm.yml", "diff": "+pretrain_weights: https://paddledet.bj.bcebos.com/models/faster_rcnn_r50_fpn_1x_coco.pdparams\n+slim: Pruner\n+\n+Pruner:\n+ criterion: fpgm\n+ pruned_params: ['conv2d_27.w_0', 'conv2d_28.w_0', 'conv2d_29.w_0',\n+ 'conv2d_30.w_0', 'conv2d_31.w_0', 'conv2d_32.w_0',\n+ 'conv2d_33.w_0', 'conv2d_34.w_0', 'conv2d_35.w_0',\n+ 'conv2d_36.w_0', 'conv2d_37.w_0', 'conv2d_38.w_0',\n+ 'conv2d_39.w_0', 'conv2d_40.w_0', 'conv2d_41.w_0',\n+ 'conv2d_42.w_0', 'conv2d_43.w_0', 'conv2d_44.w_0',\n+ 'conv2d_45.w_0', 'conv2d_46.w_0', 'conv2d_47.w_0',\n+ 'conv2d_48.w_0', 'conv2d_49.w_0', 'conv2d_50.w_0',\n+ 'conv2d_51.w_0', 'conv2d_52.w_0']\n+ pruned_ratios: [0.1,0.2,0.2,0.2,0.2,0.1,0.2,0.3,0.3,0.3,0.2,0.1,0.3,0.4,0.4,0.4,0.4,0.3,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4]\n+ print_params: False\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add faster rcnn prune config (#3599)
499,298
07.07.2021 11:38:30
-28,800
84f1cc971fade5c41fa593cfabb4549f340d3906
[MOT] fix mot identity gttxt doc
[ { "change_type": "MODIFY", "old_path": "configs/mot/README.md", "new_path": "configs/mot/README.md", "diff": "@@ -163,7 +163,7 @@ In the annotation text, each line is describing a bounding box and has the follo\n```\n**Notes:**\n- `class` should be `0`. Only single-class multi-object tracking is supported now.\n-- `identity` is an integer from `0` to `num_identities - 1`(`num_identities` is the total number of instances of objects in the dataset), or `-1` if this box has no identity annotation.\n+- `identity` is an integer from `1` to `num_identities`(`num_identities` is the total number of instances of objects in the dataset), or `-1` if this box has no identity annotation.\n- `[x_center] [y_center] [width] [height]` are the center coordinates, width and height, note that they are normalized by the width/height of the image, so they are floating point numbers ranging from 0 to 1.\n### Dataset Directory\n" }, { "change_type": "MODIFY", "old_path": "docs/tutorials/PrepareMOTDataSet.md", "new_path": "docs/tutorials/PrepareMOTDataSet.md", "diff": "@@ -39,7 +39,7 @@ In the annotation text, each line is describing a bounding box and has the follo\n```\n**Notes:**\n- `class` should be `0`. Only single-class multi-object tracking is supported now.\n-- `identity` is an integer from `0` to `num_identities - 1`(`num_identities` is the total number of instances of objects in the dataset), or `-1` if this box has no identity annotation.\n+- `identity` is an integer from `1` to `num_identities`(`num_identities` is the total number of instances of objects in the dataset), or `-1` if this box has no identity annotation.\n- `[x_center] [y_center] [width] [height]` are the center coordinates, width and height, note that they are normalized by the width/height of the image, so they are floating point numbers ranging from 0 to 1.\n@@ -124,16 +124,18 @@ imExt=.jpg\nEach line in `gt.txt` describes a bounding box, with the format as follows:\n```\n-[frame_id],[identity],[bb_left],[bb_top],[width],[height],[x],[y],[z]\n+[frame_id],[identity],[bb_left],[bb_top],[width],[height],[score],[label],[vis_ratio]\n```\n**Notes:**:\n- `frame_id` is the current frame id.\n-- `identity` is an integer from `0` to `num_identities - 1`(`num_identities` is the total number of instances of objects in the dataset), or `-1` if this box has no identity annotation.\n+- `identity` is an integer from `1` to `num_identities`(`num_identities` is the total number of instances of objects in the dataset), or `-1` if this box has no identity annotation.\n- `bb_left` is the x coordinate of the left boundary of the target box\n- `bb_top` is the Y coordinate of the upper boundary of the target box\n- `width, height` are the pixel width and height\n-- `x,y,z` are only used in 3D, default to `-1` in 2D.\n-\n+- `score` acts as a flag whether the entry is to be considered. A value of 0 means that this particular instance is ignored in the evaluation, while a value of 1 is used to mark it as active. `1` by default.\n+- `label` is the type of object annotated, use `1` as default because only single-class multi-object tracking is supported now. There are other classes of object in MOT-16, but they are treated as ignore.\n+- `vis_ratio` is the visibility ratio of each bounding box. This can be due to occlusion by another\n+static or moving object, or due to image border cropping. `1` by default.\n#### labels_with_ids\nAnnotations of these datasets are provided in a unified format. Every image has a corresponding annotation text. Given an image path, the annotation text path can be generated by replacing the string `images` with `labels_with_ids` and replacing `.jpg` with `.txt`.\n@@ -144,7 +146,7 @@ In the annotation text, each line is describing a bounding box and has the follo\n```\n**Notes:**\n- `class` should be `0`. Only single-class multi-object tracking is supported now.\n-- `identity` is an integer from `0` to `num_identities - 1`(`num_identities` is the total number of instances of objects in the dataset), or `-1` if this box has no identity annotation.\n+- `identity` is an integer from `1` to `num_identities`(`num_identities` is the total number of instances of objects in the dataset), or `-1` if this box has no identity annotation.\n- `[x_center] [y_center] [width] [height]` are the center coordinates, width and height, note that they are normalized by the width/height of the image, so they are floating point numbers ranging from 0 to 1.\nGenerate the corresponding `labels_with_ids` with following command:\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[MOT] fix mot identity gttxt doc (#3584)
499,298
07.07.2021 11:38:56
-28,800
43515234055b75115c1b623316c50e1a98b78590
[MOT] fix read_mot_results
[ { "change_type": "MODIFY", "old_path": "ppdet/metrics/mot_metrics.py", "new_path": "ppdet/metrics/mot_metrics.py", "diff": "@@ -48,14 +48,14 @@ def read_mot_results(filename, is_gt=False, is_ignore=False):\nbox_size = float(linelist[4]) * float(linelist[5])\nif is_gt:\n- if 'MOT16-' in filename or 'MOT17-' in filename:\n+ if 'MOT16-' in filename or 'MOT17-' in filename or 'MOT15-' in filename or 'MOT20-' in filename:\nlabel = int(float(linelist[7]))\nmark = int(float(linelist[6]))\nif mark == 0 or label not in valid_labels:\ncontinue\nscore = 1\nelif is_ignore:\n- if 'MOT16-' in filename or 'MOT17-' in filename:\n+ if 'MOT16-' in filename or 'MOT17-' in filename or 'MOT15-' in filename or 'MOT20-' in filename:\nlabel = int(float(linelist[7]))\nvis_ratio = float(linelist[8])\nif label not in ignore_labels and vis_ratio >= 0:\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[MOT] fix read_mot_results (#3586)
499,298
08.07.2021 00:30:48
-28,800
2ecc6525a478f278060c4b531e6d6d5927ff6371
[MOT] fix jde tensorrt deploy
[ { "change_type": "MODIFY", "old_path": "ppdet/engine/export_utils.py", "new_path": "ppdet/engine/export_utils.py", "diff": "@@ -40,7 +40,7 @@ TRT_MIN_SUBGRAPH = {\n'HigherHRNet': 3,\n'HRNet': 3,\n'DeepSORT': 3,\n- 'JDE': 3,\n+ 'JDE': 10,\n'FairMOT': 5,\n}\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/layers.py", "new_path": "ppdet/modeling/layers.py", "diff": "@@ -927,37 +927,41 @@ class JDEBox(object):\ngy2 = gy + gh * 0.5\nreturn paddle.stack([gx1, gy1, gx2, gy2], axis=1)\n- def decode_delta_map(self, delta_map, anchors):\n- nB, nA, nGh, nGw, _ = delta_map.shape[:]\n- anchor_mesh = self.generate_anchor(nGh, nGw, anchors)\n- # only support bs=1\n+ def decode_delta_map(self, nA, nGh, nGw, delta_map, anchor_vec):\n+ anchor_mesh = self.generate_anchor(nGh, nGw, anchor_vec)\nanchor_mesh = paddle.unsqueeze(anchor_mesh, 0)\n-\npred_list = self.decode_delta(\npaddle.reshape(\ndelta_map, shape=[-1, 4]),\npaddle.reshape(\nanchor_mesh, shape=[-1, 4]))\n- pred_map = paddle.reshape(pred_list, shape=[nB, nA * nGh * nGw, 4])\n+ pred_map = paddle.reshape(pred_list, shape=[nA * nGh * nGw, 4])\nreturn pred_map\ndef _postprocessing_by_level(self, nA, stride, head_out, anchor_vec):\n- boxes_shape = head_out.shape\n- nB, nGh, nGw = 1, boxes_shape[-2], boxes_shape[-1]\n- # only support bs=1\n+ boxes_shape = head_out.shape # [nB, nA*6, nGh, nGw]\n+ nGh, nGw = boxes_shape[-2], boxes_shape[-1]\n+ nB = 1 # TODO: only support bs=1 now\n+ boxes_list, scores_list = [], []\n+ for idx in range(nB):\np = paddle.reshape(\n- head_out, shape=[nB, nA, self.num_classes + 5, nGh, nGw])\n- p = paddle.transpose(p, perm=[0, 1, 3, 4, 2]) # [nB, 4, nGh, nGw, 6]\n- p_box = p[:, :, :, :, :4]\n- boxes = self.decode_delta_map(p_box, anchor_vec) # [nB, 4*nGh*nGw, 4]\n- boxes = boxes * stride\n+ head_out[idx], shape=[nA, self.num_classes + 5, nGh, nGw])\n+ p = paddle.transpose(p, perm=[0, 2, 3, 1]) # [nA, nGh, nGw, 6]\n+ delta_map = p[:, :, :, :4]\n+ boxes = self.decode_delta_map(nA, nGh, nGw, delta_map, anchor_vec)\n+ # [nA * nGh * nGw, 4]\n+ boxes_list.append(boxes * stride)\np_conf = paddle.transpose(\n- p[:, :, :, :, 4:6], perm=[0, 4, 1, 2, 3]) # [nB, 2, 4, 19, 34]\n+ p[:, :, :, 4:6], perm=[3, 0, 1, 2]) # [2, nA, nGh, nGw]\np_conf = F.softmax(\n- p_conf, axis=1)[:, 1, :, :, :].unsqueeze(-1) # [nB, 4, 19, 34, 1]\n- scores = paddle.reshape(p_conf, shape=[nB, nA * nGh * nGw, 1])\n- return boxes, scores\n+ p_conf, axis=0)[1, :, :, :].unsqueeze(-1) # [nA, nGh, nGw, 1]\n+ scores = paddle.reshape(p_conf, shape=[nA * nGh * nGw, 1])\n+ scores_list.append(scores)\n+\n+ boxes_results = paddle.stack(boxes_list)\n+ scores_results = paddle.stack(scores_list)\n+ return boxes_results, scores_results\ndef __call__(self, yolo_head_out, anchors):\nbbox_pred_list = []\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/reid/jde_embedding_head.py", "new_path": "ppdet/modeling/reid/jde_embedding_head.py", "diff": "@@ -175,8 +175,7 @@ class JDEEmbeddingHead(nn.Layer):\nfor i, p_ide in enumerate(ide_outs):\np_ide = p_ide.transpose((0, 2, 3, 1))\n- p_ide_repeat = paddle.tile(\n- p_ide.unsqueeze(axis=0), [1, self.anchor_scales, 1, 1, 1])\n+ p_ide_repeat = paddle.tile(p_ide, [self.anchor_scales, 1, 1, 1])\nembedding = F.normalize(p_ide_repeat, axis=-1)\nemb = paddle.reshape(embedding, [-1, self.embedding_dim])\nemb_outs.append(emb)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[MOT] fix jde tensorrt deploy (#3617)
499,304
08.07.2021 11:17:01
-28,800
c101056c64e88d8014888dfadea5d6122b93f77e
thanks for Mandroide contributions
[ { "change_type": "MODIFY", "old_path": "README_en.md", "new_path": "README_en.md", "diff": "@@ -282,6 +282,7 @@ PaddleDetection is released under the [Apache 2.0 license](LICENSE).\n## Contributing\nContributions are highly welcomed and we would really appreciate your feedback!!\n+- Thanks [Mandroide](https://github.com/Mandroide) for cleaning the code and unifying some function interface.\n## Citation\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
thanks for Mandroide contributions (#3632)
499,301
08.07.2021 12:45:23
-28,800
c8f8a3b0347a955d7da0023824f06962a40d3ebe
Thanks for FL77N for contribution of sparse-rcnn
[ { "change_type": "MODIFY", "old_path": "README_en.md", "new_path": "README_en.md", "diff": "@@ -283,6 +283,7 @@ PaddleDetection is released under the [Apache 2.0 license](LICENSE).\nContributions are highly welcomed and we would really appreciate your feedback!!\n- Thanks [Mandroide](https://github.com/Mandroide) for cleaning the code and unifying some function interface.\n+- Thanks [FL77N](https://github.com/FL77N/) for contributing the code of `Sparse-RCNN` model.\n## Citation\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
Thanks for FL77N for contribution of sparse-rcnn (#3634)
499,301
08.07.2021 12:46:49
-28,800
a13180fc409af6cf47bbb87ce07f3b90cabc50c1
fix import problem of _convert_attention_mask
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/layers.py", "new_path": "ppdet/modeling/layers.py", "diff": "@@ -31,8 +31,6 @@ from . import ops\nfrom .initializer import xavier_uniform_, constant_\nfrom paddle.vision.ops import DeformConv2D\n-from paddle.nn.layer import transformer\n-_convert_attention_mask = transformer._convert_attention_mask\ndef _to_list(l):\n@@ -1195,6 +1193,27 @@ class Concat(nn.Layer):\nreturn 'dim={}'.format(self.dim)\n+def _convert_attention_mask(attn_mask, dtype):\n+ \"\"\"\n+ Convert the attention mask to the target dtype we expect.\n+ Parameters:\n+ attn_mask (Tensor, optional): A tensor used in multi-head attention\n+ to prevents attention to some unwanted positions, usually the\n+ paddings or the subsequent positions. It is a tensor with shape\n+ broadcasted to `[batch_size, n_head, sequence_length, sequence_length]`.\n+ When the data type is bool, the unwanted positions have `False`\n+ values and the others have `True` values. When the data type is\n+ int, the unwanted positions have 0 values and the others have 1\n+ values. When the data type is float, the unwanted positions have\n+ `-INF` values and the others have 0 values. It can be None when\n+ nothing wanted or needed to be prevented attention to. Default None.\n+ dtype (VarType): The target type of `attn_mask` we expect.\n+ Returns:\n+ Tensor: A Tensor with shape same as input `attn_mask`, with data type `dtype`.\n+ \"\"\"\n+ return nn.layer.transformer._convert_attention_mask(attn_mask, dtype)\n+\n+\nclass MultiHeadAttention(nn.Layer):\n\"\"\"\nAttention mapps queries and a set of key-value pairs to outputs, and\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/transformers/detr_transformer.py", "new_path": "ppdet/modeling/transformers/detr_transformer.py", "diff": "@@ -18,11 +18,10 @@ from __future__ import print_function\nimport paddle\nimport paddle.nn as nn\n-from paddle.nn.layer.transformer import _convert_attention_mask\nimport paddle.nn.functional as F\nfrom ppdet.core.workspace import register\n-from ..layers import MultiHeadAttention\n+from ..layers import MultiHeadAttention, _convert_attention_mask\nfrom .position_encoding import PositionEmbedding\nfrom .utils import *\nfrom ..initializer import *\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix import problem of _convert_attention_mask (#3631)
499,331
08.07.2021 14:06:33
-28,800
1f6087ad38992977827427751f4c90ca824531fc
update spine_cooc data
[ { "change_type": "MODIFY", "old_path": "configs/dota/s2anet_1x_spine.yml", "new_path": "configs/dota/s2anet_1x_spine.yml", "diff": "@@ -21,6 +21,8 @@ S2ANetHead:\nfeat_in: 256\nfeat_out: 256\nnum_classes: 9\n- align_conv_type: 'DCN' # AlignConv Conv\n+ align_conv_type: 'AlignConv' # AlignConv Conv\nalign_conv_size: 3\nuse_sigmoid_cls: True\n+ reg_loss_weight: [1.0, 1.0, 1.0, 1.0, 1.05]\n+ cls_loss_weight: [1.05, 1.0]\n" }, { "change_type": "MODIFY", "old_path": "ppdet/utils/download.py", "new_path": "ppdet/utils/download.py", "diff": "@@ -95,7 +95,7 @@ DATASETS = {\n'49ce5a9b5ad0d6266163cd01de4b018e', ), ], ['annotations', 'images']),\n'spine_coco': ([(\n'https://paddledet.bj.bcebos.com/data/spine_coco.tar',\n- '03030f42d9b6202a6e425d4becefda0d', ), ], ['annotations', 'images']),\n+ '7ed69ae73f842cd2a8cf4f58dc3c5535', ), ], ['annotations', 'images']),\n'mot': (),\n'objects365': ()\n}\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
update spine_cooc data (#3635)
499,339
08.07.2021 17:08:18
-28,800
b40f00cbc9960107201e5ce748991a704b15d01e
fix deprecated api in ssd loss
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/losses/ssd_loss.py", "new_path": "ppdet/modeling/losses/ssd_loss.py", "diff": "@@ -67,18 +67,15 @@ class SSDLoss(nn.Layer):\nious = iou_similarity(gt_bbox.reshape((-1, 4)), prior_boxes).reshape(\n(batch_size, -1, num_priors))\n- # Calculate the number of object per sample.\n- num_object = (ious.sum(axis=-1) > 0).astype('int64').sum(axis=-1)\n-\n# For each prior box, get the max IoU of all GTs.\nprior_max_iou, prior_argmax_iou = ious.max(axis=1), ious.argmax(axis=1)\n# For each GT, get the max IoU of all prior boxes.\ngt_max_iou, gt_argmax_iou = ious.max(axis=2), ious.argmax(axis=2)\n# Gather target bbox and label according to 'prior_argmax_iou' index.\n- batch_ind = paddle.arange(\n- 0, batch_size, dtype='int64').unsqueeze(-1).tile([1, num_priors])\n- prior_argmax_iou = paddle.stack([batch_ind, prior_argmax_iou], axis=-1)\n+ batch_ind = paddle.arange(end=batch_size, dtype='int64').unsqueeze(-1)\n+ prior_argmax_iou = paddle.stack(\n+ [batch_ind.tile([1, num_priors]), prior_argmax_iou], axis=-1)\ntargets_bbox = paddle.gather_nd(gt_bbox, prior_argmax_iou)\ntargets_label = paddle.gather_nd(gt_label, prior_argmax_iou)\n# Assign negative\n@@ -89,14 +86,14 @@ class SSDLoss(nn.Layer):\nbg_index_tensor, targets_label)\n# Ensure each GT can match the max IoU prior box.\n- for i in range(batch_size):\n- if num_object[i] > 0:\n- targets_bbox[i] = paddle.scatter(\n- targets_bbox[i], gt_argmax_iou[i, :int(num_object[i])],\n- gt_bbox[i, :int(num_object[i])])\n- targets_label[i] = paddle.scatter(\n- targets_label[i], gt_argmax_iou[i, :int(num_object[i])],\n- gt_label[i, :int(num_object[i])])\n+ batch_ind = (batch_ind * num_priors + gt_argmax_iou).flatten()\n+ targets_bbox = paddle.scatter(\n+ targets_bbox.reshape([-1, 4]), batch_ind,\n+ gt_bbox.reshape([-1, 4])).reshape([batch_size, -1, 4])\n+ targets_label = paddle.scatter(\n+ targets_label.reshape([-1, 1]), batch_ind,\n+ gt_label.reshape([-1, 1])).reshape([batch_size, -1, 1])\n+ targets_label[:, :1] = bg_index\n# Encode box\nprior_boxes = prior_boxes.unsqueeze(0).tile([batch_size, 1, 1])\n@@ -107,12 +104,16 @@ class SSDLoss(nn.Layer):\nreturn targets_bbox, targets_label\n- def _mine_hard_example(self, conf_loss, targets_label, bg_index):\n+ def _mine_hard_example(self,\n+ conf_loss,\n+ targets_label,\n+ bg_index,\n+ mine_neg_ratio=0.01):\npos = (targets_label != bg_index).astype(conf_loss.dtype)\nnum_pos = pos.sum(axis=1, keepdim=True)\nneg = (targets_label == bg_index).astype(conf_loss.dtype)\n- conf_loss = conf_loss.clone() * neg\n+ conf_loss = conf_loss.detach() * neg\nloss_idx = conf_loss.argsort(axis=1, descending=True)\nidx_rank = loss_idx.argsort(axis=1)\nnum_negs = []\n@@ -120,9 +121,11 @@ class SSDLoss(nn.Layer):\ncur_num_pos = num_pos[i]\nnum_neg = paddle.clip(\ncur_num_pos * self.neg_pos_ratio, max=pos.shape[1])\n+ num_neg = num_neg if num_neg > 0 else paddle.to_tensor(\n+ [pos.shape[1] * mine_neg_ratio])\nnum_negs.append(num_neg)\n- num_neg = paddle.stack(num_negs).expand_as(idx_rank)\n- neg_mask = (idx_rank < num_neg).astype(conf_loss.dtype)\n+ num_negs = paddle.stack(num_negs).expand_as(idx_rank)\n+ neg_mask = (idx_rank < num_negs).astype(conf_loss.dtype)\nreturn (neg_mask + pos).astype('bool')\n@@ -141,22 +144,26 @@ class SSDLoss(nn.Layer):\n# Compute regression loss.\n# Select positive samples.\n- bbox_mask = (targets_label != bg_index).astype(boxes.dtype)\n- loc_loss = bbox_mask * F.smooth_l1_loss(\n- boxes, targets_bbox, reduction='none')\n- loc_loss = loc_loss.sum() * self.loc_loss_weight\n+ bbox_mask = paddle.tile(targets_label != bg_index, [1, 1, 4])\n+ if bbox_mask.astype(boxes.dtype).sum() > 0:\n+ location = paddle.masked_select(boxes, bbox_mask)\n+ targets_bbox = paddle.masked_select(targets_bbox, bbox_mask)\n+ loc_loss = F.smooth_l1_loss(location, targets_bbox, reduction='sum')\n+ loc_loss = loc_loss * self.loc_loss_weight\n+ else:\n+ loc_loss = paddle.zeros([1])\n# Compute confidence loss.\n- conf_loss = F.softmax_with_cross_entropy(scores, targets_label)\n+ conf_loss = F.cross_entropy(scores, targets_label, reduction=\"none\")\n# Mining hard examples.\nlabel_mask = self._mine_hard_example(\nconf_loss.squeeze(-1), targets_label.squeeze(-1), bg_index)\n- conf_loss = conf_loss * label_mask.unsqueeze(-1).astype(conf_loss.dtype)\n+ conf_loss = paddle.masked_select(conf_loss, label_mask.unsqueeze(-1))\nconf_loss = conf_loss.sum() * self.conf_loss_weight\n# Compute overall weighted loss.\nnormalizer = (targets_label != bg_index).astype('float32').sum().clip(\nmin=1)\n- loss = (conf_loss + loc_loss) / (normalizer + 1e-9)\n+ loss = (conf_loss + loc_loss) / normalizer\nreturn loss\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix deprecated api in ssd loss (#3592)
499,298
09.07.2021 19:20:23
-28,800
a82faaa2302c7035a7512876cab50e0a4b510668
[MOT] fix deepsort deploy trt infer
[ { "change_type": "MODIFY", "old_path": "deploy/python/mot_sde_infer.py", "new_path": "deploy/python/mot_sde_infer.py", "diff": "@@ -193,6 +193,7 @@ class SDE_ReID(object):\nmodel_dir,\ndevice='CPU',\nrun_mode='fluid',\n+ batch_size=50,\ntrt_min_shape=1,\ntrt_max_shape=1088,\ntrt_opt_shape=608,\n@@ -203,6 +204,7 @@ class SDE_ReID(object):\nself.predictor, self.config = load_predictor(\nmodel_dir,\nrun_mode=run_mode,\n+ batch_size=batch_size,\nmin_subgraph_size=self.pred_config.min_subgraph_size,\ndevice=device,\nuse_dynamic_shape=self.pred_config.use_dynamic_shape,\n@@ -214,10 +216,12 @@ class SDE_ReID(object):\nenable_mkldnn=enable_mkldnn)\nself.det_times = Timer()\nself.cpu_mem, self.gpu_mem, self.gpu_util = 0, 0, 0\n+ self.batch_size = batch_size\nassert pred_config.tracker, \"Tracking model should have tracker\"\nself.tracker = DeepSORTTracker()\ndef preprocess(self, crops):\n+ crops = crops[:self.batch_size]\ninputs = {}\ninputs['crops'] = np.array(crops).astype('float32')\nreturn inputs\n@@ -423,6 +427,7 @@ def main():\nFLAGS.reid_model_dir,\ndevice=FLAGS.device,\nrun_mode=FLAGS.run_mode,\n+ batch_size=FLAGS.reid_batch_size,\ntrt_min_shape=FLAGS.trt_min_shape,\ntrt_max_shape=FLAGS.trt_max_shape,\ntrt_opt_shape=FLAGS.trt_opt_shape,\n" }, { "change_type": "MODIFY", "old_path": "deploy/python/utils.py", "new_path": "deploy/python/utils.py", "diff": "@@ -114,6 +114,11 @@ def argsparser():\ndefault=None,\nhelp=(\"Directory include:'model.pdiparams', 'model.pdmodel', \"\n\"'infer_cfg.yml', created by tools/export_model.py.\"))\n+ parser.add_argument(\n+ \"--reid_batch_size\",\n+ type=int,\n+ default=50,\n+ help=\"max batch_size for reid model inference.\")\nparser.add_argument(\n'--use_dark',\ntype=bool,\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[MOT] fix deepsort deploy trt infer (#3652)
499,333
12.07.2021 15:57:49
-28,800
9a3bb36664c2b13f8fcefdd19ebf8fc55a821941
support no_labeling training on voc
[ { "change_type": "MODIFY", "old_path": "ppdet/data/source/coco.py", "new_path": "ppdet/data/source/coco.py", "diff": "@@ -38,7 +38,7 @@ class COCODataSet(DetDataset):\nallow_empty (bool): whether to load empty entry. False as default\nempty_ratio (float): the ratio of empty record number to total\nrecord's, if empty_ratio is out of [0. ,1.), do not sample the\n- records. 1. as default\n+ records and use all the empty entries. 1. as default\n\"\"\"\ndef __init__(self,\n@@ -63,7 +63,8 @@ class COCODataSet(DetDataset):\nif self.empty_ratio < 0. or self.empty_ratio >= 1.:\nreturn records\nimport random\n- sample_num = int(num * self.empty_ratio / (1 - self.empty_ratio))\n+ sample_num = min(\n+ int(num * self.empty_ratio / (1 - self.empty_ratio)), len(records))\nrecords = random.sample(records, sample_num)\nreturn records\n" }, { "change_type": "MODIFY", "old_path": "ppdet/data/source/voc.py", "new_path": "ppdet/data/source/voc.py", "diff": "@@ -42,6 +42,10 @@ class VOCDataSet(DetDataset):\nsample_num (int): number of samples to load, -1 means all.\nlabel_list (str): if use_default_label is False, will load\nmapping between category and class index.\n+ allow_empty (bool): whether to load empty entry. False as default\n+ empty_ratio (float): the ratio of empty record number to total\n+ record's, if empty_ratio is out of [0. ,1.), do not sample the\n+ records and use all the empty entries. 1. as default\n\"\"\"\ndef __init__(self,\n@@ -50,7 +54,9 @@ class VOCDataSet(DetDataset):\nanno_path=None,\ndata_fields=['image'],\nsample_num=-1,\n- label_list=None):\n+ label_list=None,\n+ allow_empty=False,\n+ empty_ratio=1.):\nsuper(VOCDataSet, self).__init__(\ndataset_dir=dataset_dir,\nimage_dir=image_dir,\n@@ -58,6 +64,18 @@ class VOCDataSet(DetDataset):\ndata_fields=data_fields,\nsample_num=sample_num)\nself.label_list = label_list\n+ self.allow_empty = allow_empty\n+ self.empty_ratio = empty_ratio\n+\n+ def _sample_empty(self, records, num):\n+ # if empty_ratio is out of [0. ,1.), do not sample the records\n+ if self.empty_ratio < 0. or self.empty_ratio >= 1.:\n+ return records\n+ import random\n+ sample_num = min(\n+ int(num * self.empty_ratio / (1 - self.empty_ratio)), len(records))\n+ records = random.sample(records, sample_num)\n+ return records\ndef parse_dataset(self, ):\nanno_path = os.path.join(self.dataset_dir, self.anno_path)\n@@ -66,6 +84,7 @@ class VOCDataSet(DetDataset):\n# mapping category name to class id\n# first_class:0, second_class:1, ...\nrecords = []\n+ empty_records = []\nct = 0\ncname2cid = {}\nif self.label_list:\n@@ -164,15 +183,19 @@ class VOCDataSet(DetDataset):\nif k in self.data_fields:\nvoc_rec[k] = v\n- if len(objs) != 0:\n+ if len(objs) == 0:\n+ empty_records.append(voc_rec)\n+ else:\nrecords.append(voc_rec)\nct += 1\nif self.sample_num > 0 and ct >= self.sample_num:\nbreak\n- assert len(records) > 0, 'not found any voc record in %s' % (\n- self.anno_path)\n+ assert ct > 0, 'not found any voc record in %s' % (self.anno_path)\nlogger.debug('{} samples in file {}'.format(ct, anno_path))\n+ if len(empty_records) > 0:\n+ empty_records = self._sample_empty(empty_records, len(records))\n+ records += empty_records\nself.roidbs, self.cname2cid = records, cname2cid\ndef get_label_list(self):\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
support no_labeling training on voc (#3668)
499,333
13.07.2021 19:26:49
-28,800
37572ab7b034258ff3492ed059ef30ae624835d3
fix output in cpp infer
[ { "change_type": "MODIFY", "old_path": "deploy/cpp/src/main.cc", "new_path": "deploy/cpp/src/main.cc", "diff": "@@ -248,15 +248,17 @@ void PredictImage(const std::vector<std::string> all_img_paths,\nint item_start_idx = 0;\nfor (int i = 0; i < left_image_cnt; i++) {\n- std::cout << all_img_paths.at(idx * batch_size + i) << \" bbox_num \" << bbox_num[i] << std::endl;\n- if (bbox_num[i] <= 1) {\n- continue;\n- }\n+ cv::Mat im = batch_imgs[i];\n+ std::vector<PaddleDetection::ObjectResult> im_result;\n+ int detect_num = 0;\n+\nfor (int j = 0; j < bbox_num[i]; j++) {\nPaddleDetection::ObjectResult item = result[item_start_idx + j];\n- if (item.confidence < threshold) {\n+ if (item.confidence < threshold || item.class_id == -1) {\ncontinue;\n}\n+ detect_num += 1;\n+ im_result.push_back(item);\nif (item.rect.size() > 6){\nis_rbox = true;\nprintf(\"class=%d confidence=%.4f rect=[%d %d %d %d %d %d %d %d]\\n\",\n@@ -281,20 +283,9 @@ void PredictImage(const std::vector<std::string> all_img_paths,\nitem.rect[3]);\n}\n}\n+ std::cout << all_img_paths.at(idx * batch_size + i) << \" The number of detected box: \" << detect_num << std::endl;\nitem_start_idx = item_start_idx + bbox_num[i];\n- }\n// Visualization result\n- int bbox_idx = 0;\n- for (int bs = 0; bs < batch_imgs.size(); bs++) {\n- if (bbox_num[bs] <= 1) {\n- continue;\n- }\n- cv::Mat im = batch_imgs[bs];\n- std::vector<PaddleDetection::ObjectResult> im_result;\n- for (int k = 0; k < bbox_num[bs]; k++) {\n- im_result.push_back(result[bbox_idx+k]);\n- }\n- bbox_idx += bbox_num[bs];\ncv::Mat vis_img = PaddleDetection::VisualizeResult(\nim, im_result, labels, colormap, is_rbox);\nstd::vector<int> compression_params;\n@@ -304,7 +295,7 @@ void PredictImage(const std::vector<std::string> all_img_paths,\nif (output_dir.rfind(OS_PATH_SEP) != output_dir.size() - 1) {\noutput_path += OS_PATH_SEP;\n}\n- std::string image_file_path = all_img_paths.at(idx * batch_size + bs);\n+ std::string image_file_path = all_img_paths.at(idx * batch_size + i);\noutput_path += image_file_path.substr(image_file_path.find_last_of('/') + 1);\ncv::imwrite(output_path, vis_img, compression_params);\nprintf(\"Visualized output saved as %s\\n\", output_path.c_str());\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix output in cpp infer (#3676)
499,339
14.07.2021 18:51:57
-28,800
dfc40ee0ae4794fe623eff850a5a8d7b8df609c9
add DETR
[ { "change_type": "ADD", "old_path": null, "new_path": "configs/detr/_base_/detr_r50.yml", "diff": "+architecture: DETR\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_cos_pretrained.pdparams\n+hidden_dim: 256\n+\n+\n+DETR:\n+ backbone: ResNet\n+ transformer: DETRTransformer\n+ detr_head: DETRHead\n+ post_process: DETRBBoxPostProcess\n+\n+\n+ResNet:\n+ # index 0 stands for res2\n+ depth: 50\n+ norm_type: bn\n+ freeze_at: 0\n+ return_idx: [3]\n+ lr_mult_list: [0.0, 0.1, 0.1, 0.1]\n+ num_stages: 4\n+\n+\n+DETRTransformer:\n+ num_queries: 100\n+ position_embed_type: sine\n+ nhead: 8\n+ num_encoder_layers: 6\n+ num_decoder_layers: 6\n+ dim_feedforward: 2048\n+ dropout: 0.1\n+ activation: relu\n+\n+\n+DETRHead:\n+ num_mlp_layers: 3\n+\n+\n+DETRLoss:\n+ loss_coeff: {class: 1, bbox: 5, giou: 2, no_object: 0.1, mask: 1, dice: 1}\n+ aux_loss: True\n+\n+\n+HungarianMatcher:\n+ matcher_coeff: {class: 1, bbox: 5, giou: 2}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "configs/detr/_base_/detr_reader.yml", "diff": "+worker_num: 0\n+TrainReader:\n+ sample_transforms:\n+ - Decode: {}\n+ - RandomFlip: {prob: 0.5}\n+ - RandomSelect: { transforms1: [ RandomShortSideResize: { short_side_sizes: [ 480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800 ], max_size: 1333 } ],\n+ transforms2: [\n+ RandomShortSideResize: { short_side_sizes: [ 400, 500, 600 ] },\n+ RandomSizeCrop: { min_size: 384, max_size: 600 },\n+ RandomShortSideResize: { short_side_sizes: [ 480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800 ], max_size: 1333 } ]\n+ }\n+ - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n+ - NormalizeBox: {}\n+ - BboxXYXY2XYWH: {}\n+ - Permute: {}\n+ batch_transforms:\n+ - PadMaskBatch: {pad_to_stride: -1, return_pad_mask: true}\n+ batch_size: 2\n+ shuffle: true\n+ drop_last: true\n+ collate_batch: false\n+ use_shared_memory: false\n+\n+\n+EvalReader:\n+ sample_transforms:\n+ - Decode: {}\n+ - Resize: {target_size: [800, 1333], keep_ratio: True}\n+ - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n+ - Permute: {}\n+ batch_transforms:\n+ - PadMaskBatch: {pad_to_stride: -1, return_pad_mask: true}\n+ batch_size: 1\n+ shuffle: false\n+ drop_last: false\n+ drop_empty: false\n+\n+\n+TestReader:\n+ sample_transforms:\n+ - Decode: {}\n+ - Resize: {target_size: [800, 1333], keep_ratio: True}\n+ - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n+ - Permute: {}\n+ batch_transforms:\n+ - PadMaskBatch: {pad_to_stride: -1, return_pad_mask: true}\n+ batch_size: 1\n+ shuffle: false\n+ drop_last: false\n" }, { "change_type": "ADD", "old_path": null, "new_path": "configs/detr/_base_/optimizer_1x.yml", "diff": "+epoch: 500\n+\n+LearningRate:\n+ base_lr: 0.0001\n+ schedulers:\n+ - !PiecewiseDecay\n+ gamma: 0.1\n+ milestones: [400]\n+ use_warmup: false\n+\n+OptimizerBuilder:\n+ clip_grad_by_norm: 0.1\n+ regularizer: false\n+ optimizer:\n+ type: AdamW\n+ weight_decay: 0.0001\n" }, { "change_type": "ADD", "old_path": null, "new_path": "configs/detr/detr_r50_1x_coco.yml", "diff": "+_BASE_: [\n+ '../datasets/coco_detection.yml',\n+ '../runtime.yml',\n+ '_base_/optimizer_1x.yml',\n+ '_base_/detr_r50.yml',\n+ '_base_/detr_reader.yml',\n+]\n+weights: output/detr_r50_1x_coco/model_final\n" }, { "change_type": "MODIFY", "old_path": "ppdet/data/transform/batch_operators.py", "new_path": "ppdet/data/transform/batch_operators.py", "diff": "@@ -33,7 +33,7 @@ logger = setup_logger(__name__)\n__all__ = [\n'PadBatch', 'BatchRandomResize', 'Gt2YoloTarget', 'Gt2FCOSTarget',\n- 'Gt2TTFTarget', 'Gt2Solov2Target', 'Gt2SparseRCNNTarget'\n+ 'Gt2TTFTarget', 'Gt2Solov2Target', 'Gt2SparseRCNNTarget', 'PadMaskBatch'\n]\n@@ -764,10 +764,79 @@ class Gt2SparseRCNNTarget(BaseOperator):\nimg_whwh = np.array([w, h, w, h], dtype=np.int32)\nsample[\"img_whwh\"] = img_whwh\nif \"scale_factor\" in sample:\n- sample[\"scale_factor_wh\"] = np.array([sample[\"scale_factor\"][1], sample[\"scale_factor\"][0]],\n+ sample[\"scale_factor_wh\"] = np.array(\n+ [sample[\"scale_factor\"][1], sample[\"scale_factor\"][0]],\ndtype=np.float32)\nsample.pop(\"scale_factor\")\nelse:\n- sample[\"scale_factor_wh\"] = np.array([1.0, 1.0], dtype=np.float32)\n+ sample[\"scale_factor_wh\"] = np.array(\n+ [1.0, 1.0], dtype=np.float32)\n+\n+ return samples\n+\n+\n+@register_op\n+class PadMaskBatch(BaseOperator):\n+ \"\"\"\n+ Pad a batch of samples so they can be divisible by a stride.\n+ The layout of each image should be 'CHW'.\n+ Args:\n+ pad_to_stride (int): If `pad_to_stride > 0`, pad zeros to ensure\n+ height and width is divisible by `pad_to_stride`.\n+ return_pad_mask (bool): If `return_pad_mask = True`, return\n+ `pad_mask` for transformer.\n+ \"\"\"\n+\n+ def __init__(self, pad_to_stride=0, return_pad_mask=False):\n+ super(PadMaskBatch, self).__init__()\n+ self.pad_to_stride = pad_to_stride\n+ self.return_pad_mask = return_pad_mask\n+\n+ def __call__(self, samples, context=None):\n+ \"\"\"\n+ Args:\n+ samples (list): a batch of sample, each is dict.\n+ \"\"\"\n+ coarsest_stride = self.pad_to_stride\n+\n+ max_shape = np.array([data['image'].shape for data in samples]).max(\n+ axis=0)\n+ if coarsest_stride > 0:\n+ max_shape[1] = int(\n+ np.ceil(max_shape[1] / coarsest_stride) * coarsest_stride)\n+ max_shape[2] = int(\n+ np.ceil(max_shape[2] / coarsest_stride) * coarsest_stride)\n+\n+ for data in samples:\n+ im = data['image']\n+ im_c, im_h, im_w = im.shape[:]\n+ padding_im = np.zeros(\n+ (im_c, max_shape[1], max_shape[2]), dtype=np.float32)\n+ padding_im[:, :im_h, :im_w] = im\n+ data['image'] = padding_im\n+ if 'semantic' in data and data['semantic'] is not None:\n+ semantic = data['semantic']\n+ padding_sem = np.zeros(\n+ (1, max_shape[1], max_shape[2]), dtype=np.float32)\n+ padding_sem[:, :im_h, :im_w] = semantic\n+ data['semantic'] = padding_sem\n+ if 'gt_segm' in data and data['gt_segm'] is not None:\n+ gt_segm = data['gt_segm']\n+ padding_segm = np.zeros(\n+ (gt_segm.shape[0], max_shape[1], max_shape[2]),\n+ dtype=np.uint8)\n+ padding_segm[:, :im_h, :im_w] = gt_segm\n+ data['gt_segm'] = padding_segm\n+ if self.return_pad_mask:\n+ padding_mask = np.zeros(\n+ (max_shape[1], max_shape[2]), dtype=np.float32)\n+ padding_mask[:im_h, :im_w] = 1.\n+ data['pad_mask'] = padding_mask\n+\n+ if 'gt_rbox2poly' in data and data['gt_rbox2poly'] is not None:\n+ # ploy to rbox\n+ polys = data['gt_rbox2poly']\n+ rbox = bbox_utils.poly2rbox(polys)\n+ data['gt_rbox'] = rbox\nreturn samples\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add DETR (#3690)
499,395
19.07.2021 10:42:25
-28,800
b33838c0f56489d33a35348217925180c2b79d36
add yolov3 original model
[ { "change_type": "ADD", "old_path": null, "new_path": "configs/yolov3/yolov3_darknet53_original_270e_coco.yml", "diff": "+_BASE_: [\n+ '../datasets/coco_detection.yml',\n+ '../runtime.yml',\n+ '_base_/optimizer_270e.yml',\n+ '_base_/yolov3_darknet53.yml',\n+ '_base_/yolov3_reader.yml',\n+]\n+\n+snapshot_epoch: 5\n+weights: output/yolov3_darknet53_270e_coco/model_final\n+\n+norm_type: bn\n+\n+YOLOv3Loss:\n+ ignore_thresh: 0.5\n+ downsample: [32, 16, 8]\n+ label_smooth: false\n+\n+TrainReader:\n+ inputs_def:\n+ num_max_boxes: 50\n+ sample_transforms:\n+ - Decode: {}\n+ - RandomDistort: {}\n+ - RandomExpand: {fill_value: [123.675, 116.28, 103.53], ratio: 2.0}\n+ - RandomCrop: {}\n+ - RandomFlip: {}\n+ batch_transforms:\n+ - BatchRandomResize: {target_size: [320, 352, 384, 416, 448, 480, 512, 544, 576, 608], random_size: True, random_interp: True, keep_ratio: False}\n+ - NormalizeBox: {}\n+ - PadBox: {num_max_boxes: 50}\n+ - BboxXYXY2XYWH: {}\n+ - NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n+ - Permute: {}\n+ - Gt2YoloTarget: {anchor_masks: [[6, 7, 8], [3, 4, 5], [0, 1, 2]], anchors: [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], [59, 119], [116, 90], [156, 198], [373, 326]], downsample_ratios: [32, 16, 8], iou_thresh: 0.5}\n+ batch_size: 8\n+ shuffle: true\n+ drop_last: true\n+ mixup_epoch: -1\n+ use_shared_memory: true\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add yolov3 original model (#3643)
499,339
19.07.2021 18:56:24
-28,800
e8aeb802a901730ccc11ae1653a4a71249f9b46e
[transformer] add Deformable DETR base code
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/detr_head.py", "new_path": "ppdet/modeling/heads/detr_head.py", "diff": "@@ -21,9 +21,10 @@ import paddle.nn as nn\nimport paddle.nn.functional as F\nfrom ppdet.core.workspace import register\nimport pycocotools.mask as mask_util\n-from ..initializer import linear_init_\n+from ..initializer import linear_init_, constant_\n+from ..transformers.utils import inverse_sigmoid\n-__all__ = ['DETRHead']\n+__all__ = ['DETRHead', 'DeformableDETRHead']\nclass MLP(nn.Layer):\n@@ -275,3 +276,77 @@ class DETRHead(nn.Layer):\ngt_mask=gt_mask)\nelse:\nreturn (outputs_bbox[-1], outputs_logit[-1], outputs_seg)\n+\n+\n+@register\n+class DeformableDETRHead(nn.Layer):\n+ __shared__ = ['num_classes', 'hidden_dim']\n+ __inject__ = ['loss']\n+\n+ def __init__(self,\n+ num_classes=80,\n+ hidden_dim=512,\n+ nhead=8,\n+ num_mlp_layers=3,\n+ loss='DETRLoss'):\n+ super(DeformableDETRHead, self).__init__()\n+ self.num_classes = num_classes\n+ self.hidden_dim = hidden_dim\n+ self.nhead = nhead\n+ self.loss = loss\n+\n+ self.score_head = nn.Linear(hidden_dim, self.num_classes)\n+ self.bbox_head = MLP(hidden_dim,\n+ hidden_dim,\n+ output_dim=4,\n+ num_layers=num_mlp_layers)\n+\n+ self._reset_parameters()\n+\n+ def _reset_parameters(self):\n+ linear_init_(self.score_head)\n+ constant_(self.score_head.bias, -4.595)\n+ constant_(self.bbox_head.layers[-1].weight)\n+ bias = paddle.zeros_like(self.bbox_head.layers[-1].bias)\n+ bias[2:] = -2.0\n+ self.bbox_head.layers[-1].bias.set_value(bias)\n+\n+ @classmethod\n+ def from_config(cls, cfg, hidden_dim, nhead, input_shape):\n+ return {'hidden_dim': hidden_dim, 'nhead': nhead}\n+\n+ def forward(self, out_transformer, body_feats, inputs=None):\n+ r\"\"\"\n+ Args:\n+ out_transformer (Tuple): (feats: [num_levels, batch_size,\n+ num_queries, hidden_dim],\n+ memory: [batch_size,\n+ \\sum_{l=0}^{L-1} H_l \\cdot W_l, hidden_dim],\n+ reference_points: [batch_size, num_queries, 2])\n+ body_feats (List(Tensor)): list[[B, C, H, W]]\n+ inputs (dict): dict(inputs)\n+ \"\"\"\n+ feats, memory, reference_points = out_transformer\n+ reference_points = inverse_sigmoid(reference_points.unsqueeze(0))\n+ outputs_bbox = self.bbox_head(feats)\n+\n+ # It's equivalent to \"outputs_bbox[:, :, :, :2] += reference_points\",\n+ # but the gradient is wrong in paddle.\n+ outputs_bbox = paddle.concat(\n+ [\n+ outputs_bbox[:, :, :, :2] + reference_points,\n+ outputs_bbox[:, :, :, 2:]\n+ ],\n+ axis=-1)\n+\n+ outputs_bbox = F.sigmoid(outputs_bbox)\n+ outputs_logit = self.score_head(feats)\n+\n+ if self.training:\n+ assert inputs is not None\n+ assert 'gt_bbox' in inputs and 'gt_class' in inputs\n+\n+ return self.loss(outputs_bbox, outputs_logit, inputs['gt_bbox'],\n+ inputs['gt_class'])\n+ else:\n+ return (outputs_bbox[-1], outputs_logit[-1], None)\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/post_process.py", "new_path": "ppdet/modeling/post_process.py", "diff": "@@ -532,14 +532,25 @@ class DETRBBoxPostProcess(object):\nscores = F.sigmoid(logits) if self.use_focal_loss else F.softmax(\nlogits)[:, :, :-1]\n- scores, labels = scores.max(-1), scores.argmax(-1)\n+ if not self.use_focal_loss:\n+ scores, labels = scores.max(-1), scores.argmax(-1)\nif scores.shape[1] > self.num_top_queries:\n- scores, index = paddle.topk(scores, self.num_top_queries, axis=-1)\n+ scores, index = paddle.topk(\n+ scores, self.num_top_queries, axis=-1)\nlabels = paddle.stack(\n[paddle.gather(l, i) for l, i in zip(labels, index)])\nbbox_pred = paddle.stack(\n[paddle.gather(b, i) for b, i in zip(bbox_pred, index)])\n+ else:\n+ scores, index = paddle.topk(\n+ scores.reshape([logits.shape[0], -1]),\n+ self.num_top_queries,\n+ axis=-1)\n+ labels = index % logits.shape[2]\n+ index = index // logits.shape[2]\n+ bbox_pred = paddle.stack(\n+ [paddle.gather(b, i) for b, i in zip(bbox_pred, index)])\nbbox_pred = paddle.concat(\n[\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/transformers/__init__.py", "new_path": "ppdet/modeling/transformers/__init__.py", "diff": "@@ -16,8 +16,10 @@ from . import detr_transformer\nfrom . import utils\nfrom . import matchers\nfrom . import position_encoding\n+from . import deformable_transformer\nfrom .detr_transformer import *\nfrom .utils import *\nfrom .matchers import *\nfrom .position_encoding import *\n+from .deformable_transformer import *\n" }, { "change_type": "ADD", "old_path": null, "new_path": "ppdet/modeling/transformers/deformable_transformer.py", "diff": "+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from __future__ import absolute_import\n+from __future__ import division\n+from __future__ import print_function\n+\n+import math\n+import paddle\n+import paddle.nn as nn\n+import paddle.nn.functional as F\n+from paddle import ParamAttr\n+\n+from ppdet.core.workspace import register\n+from ..layers import MultiHeadAttention\n+from .position_encoding import PositionEmbedding\n+from .utils import _get_clones, deformable_attention_core_func\n+from ..initializer import linear_init_, constant_, xavier_uniform_, normal_\n+\n+__all__ = ['DeformableTransformer']\n+\n+\n+class MSDeformableAttention(nn.Layer):\n+ def __init__(self,\n+ embed_dim=256,\n+ num_heads=8,\n+ num_levels=4,\n+ num_points=4,\n+ lr_mult=0.1):\n+ \"\"\"\n+ Multi-Scale Deformable Attention Module\n+ \"\"\"\n+ super(MSDeformableAttention, self).__init__()\n+ self.embed_dim = embed_dim\n+ self.num_heads = num_heads\n+ self.num_levels = num_levels\n+ self.num_points = num_points\n+ self.total_points = num_heads * num_levels * num_points\n+\n+ self.head_dim = embed_dim // num_heads\n+ assert self.head_dim * num_heads == self.embed_dim, \"embed_dim must be divisible by num_heads\"\n+\n+ self.sampling_offsets = nn.Linear(\n+ embed_dim,\n+ self.total_points * 2,\n+ weight_attr=ParamAttr(learning_rate=lr_mult),\n+ bias_attr=ParamAttr(learning_rate=lr_mult))\n+\n+ self.attention_weights = nn.Linear(embed_dim, self.total_points)\n+ self.value_proj = nn.Linear(embed_dim, embed_dim)\n+ self.output_proj = nn.Linear(embed_dim, embed_dim)\n+\n+ self._reset_parameters()\n+\n+ def _reset_parameters(self):\n+ # sampling_offsets\n+ constant_(self.sampling_offsets.weight)\n+ thetas = paddle.arange(\n+ self.num_heads,\n+ dtype=paddle.float32) * (2.0 * math.pi / self.num_heads)\n+ grid_init = paddle.stack([thetas.cos(), thetas.sin()], -1)\n+ grid_init = grid_init / grid_init.abs().max(-1, keepdim=True)\n+ grid_init = grid_init.reshape([self.num_heads, 1, 1, 2]).tile(\n+ [1, self.num_levels, self.num_points, 1])\n+ scaling = paddle.arange(\n+ 1, self.num_points + 1,\n+ dtype=paddle.float32).reshape([1, 1, -1, 1])\n+ grid_init *= scaling\n+ self.sampling_offsets.bias.set_value(grid_init.flatten())\n+ # attention_weights\n+ constant_(self.attention_weights.weight)\n+ constant_(self.attention_weights.bias)\n+ # proj\n+ xavier_uniform_(self.value_proj.weight)\n+ constant_(self.value_proj.bias)\n+ xavier_uniform_(self.output_proj.weight)\n+ constant_(self.output_proj.bias)\n+\n+ def forward(self,\n+ query,\n+ reference_points,\n+ value,\n+ value_spatial_shapes,\n+ value_mask=None):\n+ \"\"\"\n+ Args:\n+ query (Tensor): [bs, query_length, C]\n+ reference_points (Tensor): [bs, query_length, n_levels, 2], range in [0, 1], top-left (0,0),\n+ bottom-right (1, 1), including padding area\n+ value (Tensor): [bs, value_length, C]\n+ value_spatial_shapes (Tensor): [n_levels, 2], [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})]\n+ value_mask (Tensor): [bs, value_length], True for non-padding elements, False for padding elements\n+\n+ Returns:\n+ output (Tensor): [bs, Length_{query}, C]\n+ \"\"\"\n+ bs, Len_q = query.shape[:2]\n+ Len_v = value.shape[1]\n+ assert int(value_spatial_shapes.prod(1).sum()) == Len_v\n+\n+ value = self.value_proj(value)\n+ if value_mask is not None:\n+ value_mask = value_mask.astype(value.dtype).unsqueeze(-1)\n+ value *= value_mask\n+ value = value.reshape([bs, Len_v, self.num_heads, self.head_dim])\n+\n+ sampling_offsets = self.sampling_offsets(query).reshape(\n+ [bs, Len_q, self.num_heads, self.num_levels, self.num_points, 2])\n+ attention_weights = self.attention_weights(query).reshape(\n+ [bs, Len_q, self.num_heads, self.num_levels * self.num_points])\n+ attention_weights = F.softmax(attention_weights, -1).reshape(\n+ [bs, Len_q, self.num_heads, self.num_levels, self.num_points])\n+\n+ offset_normalizer = value_spatial_shapes.flip([1]).reshape(\n+ [1, 1, 1, self.num_levels, 1, 2])\n+ sampling_locations = reference_points.reshape([\n+ bs, Len_q, 1, self.num_levels, 1, 2\n+ ]) + sampling_offsets / offset_normalizer\n+\n+ output = deformable_attention_core_func(\n+ value, value_spatial_shapes, sampling_locations, attention_weights)\n+ output = self.output_proj(output)\n+\n+ return output\n+\n+\n+class DeformableTransformerEncoderLayer(nn.Layer):\n+ def __init__(self,\n+ d_model=256,\n+ n_head=8,\n+ dim_feedforward=1024,\n+ dropout=0.1,\n+ activation=\"relu\",\n+ n_levels=4,\n+ n_points=4,\n+ weight_attr=None,\n+ bias_attr=None):\n+ super(DeformableTransformerEncoderLayer, self).__init__()\n+ # self attention\n+ self.self_attn = MSDeformableAttention(d_model, n_head, n_levels,\n+ n_points)\n+ self.dropout1 = nn.Dropout(dropout)\n+ self.norm1 = nn.LayerNorm(d_model)\n+ # ffn\n+ self.linear1 = nn.Linear(d_model, dim_feedforward, weight_attr,\n+ bias_attr)\n+ self.activation = getattr(F, activation)\n+ self.dropout2 = nn.Dropout(dropout)\n+ self.linear2 = nn.Linear(dim_feedforward, d_model, weight_attr,\n+ bias_attr)\n+ self.dropout3 = nn.Dropout(dropout)\n+ self.norm2 = nn.LayerNorm(d_model)\n+ self._reset_parameters()\n+\n+ def _reset_parameters(self):\n+ linear_init_(self.linear1)\n+ linear_init_(self.linear2)\n+ xavier_uniform_(self.linear1.weight)\n+ xavier_uniform_(self.linear2.weight)\n+\n+ def with_pos_embed(self, tensor, pos):\n+ return tensor if pos is None else tensor + pos\n+\n+ def forward_ffn(self, src):\n+ src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))\n+ src = src + self.dropout3(src2)\n+ src = self.norm2(src)\n+ return src\n+\n+ def forward(self,\n+ src,\n+ reference_points,\n+ spatial_shapes,\n+ src_mask=None,\n+ pos_embed=None):\n+ # self attention\n+ src2 = self.self_attn(\n+ self.with_pos_embed(src, pos_embed), reference_points, src,\n+ spatial_shapes, src_mask)\n+ src = src + self.dropout1(src2)\n+ src = self.norm1(src)\n+ # ffn\n+ src = self.forward_ffn(src)\n+\n+ return src\n+\n+\n+class DeformableTransformerEncoder(nn.Layer):\n+ def __init__(self, encoder_layer, num_layers):\n+ super(DeformableTransformerEncoder, self).__init__()\n+ self.layers = _get_clones(encoder_layer, num_layers)\n+ self.num_layers = num_layers\n+\n+ @staticmethod\n+ def get_reference_points(spatial_shapes, valid_ratios):\n+ valid_ratios = valid_ratios.unsqueeze(1)\n+ reference_points = []\n+ for i, (H, W) in enumerate(spatial_shapes.tolist()):\n+ ref_y, ref_x = paddle.meshgrid(\n+ paddle.linspace(0.5, H - 0.5, H),\n+ paddle.linspace(0.5, W - 0.5, W))\n+ ref_y = ref_y.flatten().unsqueeze(0) / (valid_ratios[:, :, i, 1] *\n+ H)\n+ ref_x = ref_x.flatten().unsqueeze(0) / (valid_ratios[:, :, i, 0] *\n+ W)\n+ reference_points.append(paddle.stack((ref_x, ref_y), axis=-1))\n+ reference_points = paddle.concat(reference_points, 1).unsqueeze(2)\n+ reference_points = reference_points * valid_ratios\n+ return reference_points\n+\n+ def forward(self,\n+ src,\n+ spatial_shapes,\n+ src_mask=None,\n+ pos_embed=None,\n+ valid_ratios=None):\n+ output = src\n+ if valid_ratios is None:\n+ valid_ratios = paddle.ones(\n+ [src.shape[0], spatial_shapes.shape[0], 2])\n+ reference_points = self.get_reference_points(spatial_shapes,\n+ valid_ratios)\n+ for layer in self.layers:\n+ output = layer(output, reference_points, spatial_shapes, src_mask,\n+ pos_embed)\n+\n+ return output\n+\n+\n+class DeformableTransformerDecoderLayer(nn.Layer):\n+ def __init__(self,\n+ d_model=256,\n+ n_head=8,\n+ dim_feedforward=1024,\n+ dropout=0.1,\n+ activation=\"relu\",\n+ n_levels=4,\n+ n_points=4,\n+ weight_attr=None,\n+ bias_attr=None):\n+ super(DeformableTransformerDecoderLayer, self).__init__()\n+\n+ # self attention\n+ self.self_attn = MultiHeadAttention(d_model, n_head, dropout=dropout)\n+ self.dropout1 = nn.Dropout(dropout)\n+ self.norm1 = nn.LayerNorm(d_model)\n+\n+ # cross attention\n+ self.cross_attn = MSDeformableAttention(d_model, n_head, n_levels,\n+ n_points)\n+ self.dropout2 = nn.Dropout(dropout)\n+ self.norm2 = nn.LayerNorm(d_model)\n+\n+ # ffn\n+ self.linear1 = nn.Linear(d_model, dim_feedforward, weight_attr,\n+ bias_attr)\n+ self.activation = getattr(F, activation)\n+ self.dropout3 = nn.Dropout(dropout)\n+ self.linear2 = nn.Linear(dim_feedforward, d_model, weight_attr,\n+ bias_attr)\n+ self.dropout4 = nn.Dropout(dropout)\n+ self.norm3 = nn.LayerNorm(d_model)\n+ self._reset_parameters()\n+\n+ def _reset_parameters(self):\n+ linear_init_(self.linear1)\n+ linear_init_(self.linear2)\n+ xavier_uniform_(self.linear1.weight)\n+ xavier_uniform_(self.linear2.weight)\n+\n+ def with_pos_embed(self, tensor, pos):\n+ return tensor if pos is None else tensor + pos\n+\n+ def forward_ffn(self, tgt):\n+ tgt2 = self.linear2(self.dropout3(self.activation(self.linear1(tgt))))\n+ tgt = tgt + self.dropout4(tgt2)\n+ tgt = self.norm3(tgt)\n+ return tgt\n+\n+ def forward(self,\n+ tgt,\n+ reference_points,\n+ memory,\n+ memory_spatial_shapes,\n+ memory_mask=None,\n+ query_pos_embed=None):\n+ # self attention\n+ q = k = self.with_pos_embed(tgt, query_pos_embed)\n+ tgt2 = self.self_attn(q, k, value=tgt)\n+ tgt = tgt + self.dropout1(tgt2)\n+ tgt = self.norm1(tgt)\n+\n+ # cross attention\n+ tgt2 = self.cross_attn(\n+ self.with_pos_embed(tgt, query_pos_embed), reference_points, memory,\n+ memory_spatial_shapes, memory_mask)\n+ tgt = tgt + self.dropout2(tgt2)\n+ tgt = self.norm2(tgt)\n+\n+ # ffn\n+ tgt = self.forward_ffn(tgt)\n+\n+ return tgt\n+\n+\n+class DeformableTransformerDecoder(nn.Layer):\n+ def __init__(self, decoder_layer, num_layers, return_intermediate=False):\n+ super(DeformableTransformerDecoder, self).__init__()\n+ self.layers = _get_clones(decoder_layer, num_layers)\n+ self.num_layers = num_layers\n+ self.return_intermediate = return_intermediate\n+\n+ def forward(self,\n+ tgt,\n+ reference_points,\n+ memory,\n+ memory_spatial_shapes,\n+ memory_mask=None,\n+ query_pos_embed=None):\n+ output = tgt\n+ intermediate = []\n+ for lid, layer in enumerate(self.layers):\n+ output = layer(output, reference_points, memory,\n+ memory_spatial_shapes, memory_mask, query_pos_embed)\n+\n+ if self.return_intermediate:\n+ intermediate.append(output)\n+\n+ if self.return_intermediate:\n+ return paddle.stack(intermediate)\n+\n+ return output.unsqueeze(0)\n+\n+\n+@register\n+class DeformableTransformer(nn.Layer):\n+ __shared__ = ['hidden_dim']\n+\n+ def __init__(self,\n+ num_queries=300,\n+ position_embed_type='sine',\n+ return_intermediate_dec=True,\n+ backbone_num_channels=[512, 1024, 2048],\n+ num_feature_levels=4,\n+ num_encoder_points=4,\n+ num_decoder_points=4,\n+ hidden_dim=256,\n+ nhead=8,\n+ num_encoder_layers=6,\n+ num_decoder_layers=6,\n+ dim_feedforward=1024,\n+ dropout=0.1,\n+ activation=\"relu\",\n+ lr_mult=0.1,\n+ weight_attr=None,\n+ bias_attr=None):\n+ super(DeformableTransformer, self).__init__()\n+ assert position_embed_type in ['sine', 'learned'], \\\n+ f'ValueError: position_embed_type not supported {position_embed_type}!'\n+ assert len(backbone_num_channels) <= num_feature_levels\n+\n+ self.hidden_dim = hidden_dim\n+ self.nhead = nhead\n+ self.num_feature_levels = num_feature_levels\n+\n+ encoder_layer = DeformableTransformerEncoderLayer(\n+ hidden_dim, nhead, dim_feedforward, dropout, activation,\n+ num_feature_levels, num_encoder_points, weight_attr, bias_attr)\n+ self.encoder = DeformableTransformerEncoder(encoder_layer,\n+ num_encoder_layers)\n+\n+ decoder_layer = DeformableTransformerDecoderLayer(\n+ hidden_dim, nhead, dim_feedforward, dropout, activation,\n+ num_feature_levels, num_decoder_points, weight_attr, bias_attr)\n+ self.decoder = DeformableTransformerDecoder(\n+ decoder_layer, num_decoder_layers, return_intermediate_dec)\n+\n+ self.level_embed = nn.Embedding(num_feature_levels, hidden_dim)\n+ self.tgt_embed = nn.Embedding(num_queries, hidden_dim)\n+ self.query_pos_embed = nn.Embedding(num_queries, hidden_dim)\n+\n+ self.reference_points = nn.Linear(\n+ hidden_dim,\n+ 2,\n+ weight_attr=ParamAttr(learning_rate=lr_mult),\n+ bias_attr=ParamAttr(learning_rate=lr_mult))\n+\n+ self.input_proj = nn.LayerList()\n+ for in_channels in backbone_num_channels:\n+ self.input_proj.append(\n+ nn.Sequential(\n+ nn.Conv2D(\n+ in_channels,\n+ hidden_dim,\n+ kernel_size=1,\n+ weight_attr=weight_attr,\n+ bias_attr=bias_attr),\n+ nn.GroupNorm(32, hidden_dim)))\n+ in_channels = backbone_num_channels[-1]\n+ for _ in range(num_feature_levels - len(backbone_num_channels)):\n+ self.input_proj.append(\n+ nn.Sequential(\n+ nn.Conv2D(\n+ in_channels,\n+ hidden_dim,\n+ kernel_size=3,\n+ stride=2,\n+ padding=1,\n+ weight_attr=weight_attr,\n+ bias_attr=bias_attr),\n+ nn.GroupNorm(32, hidden_dim)))\n+ in_channels = hidden_dim\n+\n+ self.position_embedding = PositionEmbedding(\n+ hidden_dim // 2,\n+ normalize=True if position_embed_type == 'sine' else False,\n+ embed_type=position_embed_type,\n+ offset=-0.5)\n+\n+ self._reset_parameters()\n+\n+ def _reset_parameters(self):\n+ normal_(self.level_embed.weight)\n+ normal_(self.tgt_embed.weight)\n+ normal_(self.query_pos_embed.weight)\n+ xavier_uniform_(self.reference_points.weight)\n+ constant_(self.reference_points.bias)\n+ for l in self.input_proj:\n+ xavier_uniform_(l[0].weight)\n+ constant_(l[0].bias)\n+\n+ @classmethod\n+ def from_config(cls, cfg, input_shape):\n+ return {'backbone_num_channels': [i.channels for i in input_shape], }\n+\n+ def _get_valid_ratio(self, mask):\n+ mask = mask.astype(paddle.float32)\n+ _, H, W = mask.shape\n+ valid_ratio_h = paddle.sum(mask[:, :, 0], 1) / H\n+ valid_ratio_w = paddle.sum(mask[:, 0, :], 1) / W\n+ valid_ratio = paddle.stack([valid_ratio_w, valid_ratio_h], -1)\n+ return valid_ratio\n+\n+ def forward(self, src_feats, src_mask=None):\n+ srcs = []\n+ for i in range(len(src_feats)):\n+ srcs.append(self.input_proj[i](src_feats[i]))\n+ if self.num_feature_levels > len(srcs):\n+ len_srcs = len(srcs)\n+ for i in range(len_srcs, self.num_feature_levels):\n+ if i == len_srcs:\n+ srcs.append(self.input_proj[i](src_feats[-1]))\n+ else:\n+ srcs.append(self.input_proj[i](srcs[-1]))\n+ src_flatten = []\n+ mask_flatten = []\n+ lvl_pos_embed_flatten = []\n+ spatial_shapes = []\n+ valid_ratios = []\n+ for level, src in enumerate(srcs):\n+ bs, c, h, w = src.shape\n+ spatial_shapes.append([h, w])\n+ src = src.flatten(2).transpose([0, 2, 1])\n+ src_flatten.append(src)\n+ if src_mask is not None:\n+ mask = F.interpolate(\n+ src_mask.unsqueeze(0).astype(src.dtype),\n+ size=(h, w))[0].astype('bool')\n+ else:\n+ mask = paddle.ones([bs, h, w], dtype='bool')\n+ valid_ratios.append(self._get_valid_ratio(mask))\n+ pos_embed = self.position_embedding(mask).flatten(2).transpose(\n+ [0, 2, 1])\n+ lvl_pos_embed = pos_embed + self.level_embed.weight[level].reshape(\n+ [1, 1, -1])\n+ lvl_pos_embed_flatten.append(lvl_pos_embed)\n+ mask = mask.astype(src.dtype).flatten(1)\n+ mask_flatten.append(mask)\n+ src_flatten = paddle.concat(src_flatten, 1)\n+ mask_flatten = paddle.concat(mask_flatten, 1)\n+ lvl_pos_embed_flatten = paddle.concat(lvl_pos_embed_flatten, 1)\n+ # [l, 2]\n+ spatial_shapes = paddle.to_tensor(spatial_shapes, dtype='int64')\n+ # [b, l, 2]\n+ valid_ratios = paddle.stack(valid_ratios, 1)\n+\n+ # encoder\n+ memory = self.encoder(src_flatten, spatial_shapes, mask_flatten,\n+ lvl_pos_embed_flatten, valid_ratios)\n+\n+ # prepare input for decoder\n+ bs, _, c = memory.shape\n+ query_embed = self.query_pos_embed.weight.unsqueeze(0).tile([bs, 1, 1])\n+ tgt = self.tgt_embed.weight.unsqueeze(0).tile([bs, 1, 1])\n+ reference_points = F.sigmoid(self.reference_points(query_embed))\n+ reference_points_input = reference_points.unsqueeze(\n+ 2) * valid_ratios.unsqueeze(1)\n+\n+ # decoder\n+ hs = self.decoder(tgt, reference_points_input, memory, spatial_shapes,\n+ mask_flatten, query_embed)\n+\n+ return (hs, memory, reference_points)\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/transformers/position_encoding.py", "new_path": "ppdet/modeling/transformers/position_encoding.py", "diff": "@@ -32,11 +32,14 @@ class PositionEmbedding(nn.Layer):\nnormalize=True,\nscale=None,\nembed_type='sine',\n- num_embeddings=50):\n+ num_embeddings=50,\n+ offset=0.):\nsuper(PositionEmbedding, self).__init__()\nassert embed_type in ['sine', 'learned']\nself.embed_type = embed_type\n+ self.offset = offset\n+ self.eps = 1e-6\nif self.embed_type == 'sine':\nself.num_pos_feats = num_pos_feats\nself.temperature = temperature\n@@ -65,9 +68,10 @@ class PositionEmbedding(nn.Layer):\ny_embed = mask.cumsum(1, dtype='float32')\nx_embed = mask.cumsum(2, dtype='float32')\nif self.normalize:\n- eps = 1e-6\n- y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale\n- x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale\n+ y_embed = (y_embed + self.offset) / (\n+ y_embed[:, -1:, :] + self.eps) * self.scale\n+ x_embed = (x_embed + self.offset) / (\n+ x_embed[:, :, -1:] + self.eps) * self.scale\ndim_t = 2 * (paddle.arange(self.num_pos_feats) //\n2).astype('float32')\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/transformers/utils.py", "new_path": "ppdet/modeling/transformers/utils.py", "diff": "@@ -25,7 +25,8 @@ from ..bbox_utils import bbox_overlaps\n__all__ = [\n'_get_clones', 'bbox_overlaps', 'bbox_cxcywh_to_xyxy',\n- 'bbox_xyxy_to_cxcywh', 'sigmoid_focal_loss'\n+ 'bbox_xyxy_to_cxcywh', 'sigmoid_focal_loss', 'inverse_sigmoid',\n+ 'deformable_attention_core_func'\n]\n@@ -55,3 +56,51 @@ def sigmoid_focal_loss(logit, label, normalizer=1.0, alpha=0.25, gamma=2.0):\nalpha_t = alpha * label + (1 - alpha) * (1 - label)\nloss = alpha_t * loss\nreturn loss.mean(1).sum() / normalizer\n+\n+\n+def inverse_sigmoid(x, eps=1e-6):\n+ x = x.clip(min=0., max=1.)\n+ return paddle.log(x / (1 - x + eps) + eps)\n+\n+\n+def deformable_attention_core_func(value, value_spatial_shapes,\n+ sampling_locations, attention_weights):\n+ \"\"\"\n+ Args:\n+ value (Tensor): [bs, value_length, n_head, c]\n+ value_spatial_shapes (Tensor): [n_levels, 2]\n+ sampling_locations (Tensor): [bs, query_length, n_head, n_levels, n_points, 2]\n+ attention_weights (Tensor): [bs, query_length, n_head, n_levels, n_points]\n+\n+ Returns:\n+ output (Tensor): [bs, Length_{query}, C]\n+ \"\"\"\n+ bs, Len_v, n_head, c = value.shape\n+ _, Len_q, n_head, n_levels, n_points, _ = sampling_locations.shape\n+\n+ value_list = value.split(value_spatial_shapes.prod(1).tolist(), axis=1)\n+ sampling_grids = 2 * sampling_locations - 1\n+ sampling_value_list = []\n+ for level, (h, w) in enumerate(value_spatial_shapes.tolist()):\n+ # N_, H_*W_, M_, D_ -> N_, H_*W_, M_*D_ -> N_, M_*D_, H_*W_ -> N_*M_, D_, H_, W_\n+ value_l_ = value_list[level].flatten(2).transpose(\n+ [0, 2, 1]).reshape([bs * n_head, c, h, w])\n+ # N_, Lq_, M_, P_, 2 -> N_, M_, Lq_, P_, 2 -> N_*M_, Lq_, P_, 2\n+ sampling_grid_l_ = sampling_grids[:, :, :, level].transpose(\n+ [0, 2, 1, 3, 4]).flatten(0, 1)\n+ # N_*M_, D_, Lq_, P_\n+ sampling_value_l_ = F.grid_sample(\n+ value_l_,\n+ sampling_grid_l_,\n+ mode='bilinear',\n+ padding_mode='zeros',\n+ align_corners=False)\n+ sampling_value_list.append(sampling_value_l_)\n+ # (N_, Lq_, M_, L_, P_) -> (N_, M_, Lq_, L_, P_) -> (N_*M_, 1, Lq_, L_*P_)\n+ attention_weights = attention_weights.transpose([0, 2, 1, 3, 4]).reshape(\n+ [bs * n_head, 1, Len_q, n_levels * n_points])\n+ output = (paddle.stack(\n+ sampling_value_list, axis=-2).flatten(-2) *\n+ attention_weights).sum(-1).reshape([bs, n_head * c, Len_q])\n+\n+ return output.transpose([0, 2, 1])\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[transformer] add Deformable DETR base code (#3718)
499,339
20.07.2021 15:10:46
-28,800
320c6eeab7930a0350f928fb35ddfc43e18a6c84
[transformer] add readme and deformable configs
[ { "change_type": "ADD", "old_path": null, "new_path": "configs/deformable_detr/README.md", "diff": "+# Deformable DETR\n+\n+## Introduction\n+\n+\n+Deformable DETR is an object detection model based on DETR. We reproduced the model of the paper.\n+\n+\n+## Model Zoo\n+\n+| Backbone | Model | Images/GPU | Inf time (fps) | Box AP | Config | Download |\n+|:------:|:--------:|:--------:|:--------------:|:------:|:------:|:--------:|\n+| R-50 | Deformable DETR | 2 | --- | 44.1 | [config](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/configs/deformable_detr/deformable_detr_r50_1x_coco.yml) | [model](https://paddledet.bj.bcebos.com/models/deformable_detr_r50_1x_coco.pdparams) |\n+\n+**Notes:**\n+\n+- Deformable DETR is trained on COCO train2017 dataset and evaluated on val2017 results of `mAP(IoU=0.5:0.95)`.\n+- Deformable DETR uses 8GPU to train 50 epochs.\n+\n+## Citations\n+```\n+@inproceedings{\n+zhu2021deformable,\n+title={Deformable DETR: Deformable Transformers for End-to-End Object Detection},\n+author={Xizhou Zhu and Weijie Su and Lewei Lu and Bin Li and Xiaogang Wang and Jifeng Dai},\n+booktitle={International Conference on Learning Representations},\n+year={2021},\n+url={https://openreview.net/forum?id=gZ9hCDWe6ke}\n+}\n+```\n" }, { "change_type": "ADD", "old_path": null, "new_path": "configs/deformable_detr/_base_/deformable_detr_r50.yml", "diff": "+architecture: DETR\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_vb_normal_pretrained.pdparams\n+hidden_dim: 256\n+use_focal_loss: True\n+\n+\n+DETR:\n+ backbone: ResNet\n+ transformer: DeformableTransformer\n+ detr_head: DeformableDETRHead\n+ post_process: DETRBBoxPostProcess\n+\n+\n+ResNet:\n+ # index 0 stands for res2\n+ depth: 50\n+ norm_type: bn\n+ freeze_at: 0\n+ return_idx: [1, 2, 3]\n+ lr_mult_list: [0.0, 0.1, 0.1, 0.1]\n+ num_stages: 4\n+\n+\n+DeformableTransformer:\n+ num_queries: 300\n+ position_embed_type: sine\n+ nhead: 8\n+ num_encoder_layers: 6\n+ num_decoder_layers: 6\n+ dim_feedforward: 1024\n+ dropout: 0.1\n+ activation: relu\n+ num_feature_levels: 4\n+ num_encoder_points: 4\n+ num_decoder_points: 4\n+\n+\n+DeformableDETRHead:\n+ num_mlp_layers: 3\n+\n+\n+DETRLoss:\n+ loss_coeff: {class: 2, bbox: 5, giou: 2, mask: 1, dice: 1}\n+ aux_loss: True\n+\n+\n+HungarianMatcher:\n+ matcher_coeff: {class: 2, bbox: 5, giou: 2}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "configs/deformable_detr/_base_/deformable_detr_reader.yml", "diff": "+worker_num: 0\n+TrainReader:\n+ sample_transforms:\n+ - Decode: {}\n+ - RandomFlip: {prob: 0.5}\n+ - RandomSelect: { transforms1: [ RandomShortSideResize: { short_side_sizes: [ 480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800 ], max_size: 1333 } ],\n+ transforms2: [\n+ RandomShortSideResize: { short_side_sizes: [ 400, 500, 600 ] },\n+ RandomSizeCrop: { min_size: 384, max_size: 600 },\n+ RandomShortSideResize: { short_side_sizes: [ 480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800 ], max_size: 1333 } ]\n+ }\n+ - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n+ - NormalizeBox: {}\n+ - BboxXYXY2XYWH: {}\n+ - Permute: {}\n+ batch_transforms:\n+ - PadMaskBatch: {pad_to_stride: -1, return_pad_mask: true}\n+ batch_size: 2\n+ shuffle: true\n+ drop_last: true\n+ collate_batch: false\n+ use_shared_memory: false\n+\n+\n+EvalReader:\n+ sample_transforms:\n+ - Decode: {}\n+ - Resize: {target_size: [800, 1333], keep_ratio: True}\n+ - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n+ - Permute: {}\n+ batch_transforms:\n+ - PadMaskBatch: {pad_to_stride: -1, return_pad_mask: true}\n+ batch_size: 1\n+ shuffle: false\n+ drop_last: false\n+ drop_empty: false\n+\n+\n+TestReader:\n+ sample_transforms:\n+ - Decode: {}\n+ - Resize: {target_size: [800, 1333], keep_ratio: True}\n+ - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n+ - Permute: {}\n+ batch_transforms:\n+ - PadMaskBatch: {pad_to_stride: -1, return_pad_mask: true}\n+ batch_size: 1\n+ shuffle: false\n+ drop_last: false\n" }, { "change_type": "ADD", "old_path": null, "new_path": "configs/deformable_detr/_base_/deformable_optimizer_1x.yml", "diff": "+epoch: 50\n+\n+LearningRate:\n+ base_lr: 0.0002\n+ schedulers:\n+ - !PiecewiseDecay\n+ gamma: 0.1\n+ milestones: [40]\n+ use_warmup: false\n+\n+OptimizerBuilder:\n+ clip_grad_by_norm: 0.1\n+ regularizer: false\n+ optimizer:\n+ type: AdamW\n+ weight_decay: 0.0001\n" }, { "change_type": "ADD", "old_path": null, "new_path": "configs/deformable_detr/deformable_detr_r50_1x_coco.yml", "diff": "+_BASE_: [\n+ '../datasets/coco_detection.yml',\n+ '../runtime.yml',\n+ '_base_/deformable_optimizer_1x.yml',\n+ '_base_/deformable_detr_r50.yml',\n+ '_base_/deformable_detr_reader.yml',\n+]\n+weights: output/deformable_detr_r50_1x_coco/model_final\n" }, { "change_type": "ADD", "old_path": null, "new_path": "configs/detr/README.md", "diff": "+# DETR\n+\n+## Introduction\n+\n+\n+DETR is an object detection model based on transformer. We reproduced the model of the paper.\n+\n+\n+## Model Zoo\n+\n+| Backbone | Model | Images/GPU | Inf time (fps) | Box AP | Config | Download |\n+|:------:|:--------:|:--------:|:--------------:|:------:|:------:|:--------:|\n+| R-50 | DETR | 4 | --- | 42.3 | [config](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/configs/detr/detr_r50_1x_coco.yml) | [model](https://paddledet.bj.bcebos.com/models/detr_r50_1x_coco.pdparams) |\n+\n+**Notes:**\n+\n+- DETR is trained on COCO train2017 dataset and evaluated on val2017 results of `mAP(IoU=0.5:0.95)`.\n+- DETR uses 8GPU to train 500 epochs.\n+\n+## Citations\n+```\n+@inproceedings{detr,\n+ author = {Nicolas Carion and\n+ Francisco Massa and\n+ Gabriel Synnaeve and\n+ Nicolas Usunier and\n+ Alexander Kirillov and\n+ Sergey Zagoruyko},\n+ title = {End-to-End Object Detection with Transformers},\n+ booktitle = {ECCV},\n+ year = {2020}\n+}\n+```\n" }, { "change_type": "MODIFY", "old_path": "configs/detr/_base_/detr_r50.yml", "new_path": "configs/detr/_base_/detr_r50.yml", "diff": "architecture: DETR\n-pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_cos_pretrained.pdparams\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_vb_normal_pretrained.pdparams\nhidden_dim: 256\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[transformer] add readme and deformable configs (#3720)
499,298
20.07.2021 18:25:01
-28,800
35e02553b2c18a10085a3d0f41cb66ae2380e72b
[MOT] update install dependency
[ { "change_type": "MODIFY", "old_path": "requirements.txt", "new_path": "requirements.txt", "diff": "@@ -14,3 +14,4 @@ lap\nsklearn\nmotmetrics\nopenpyxl\n+cython_bbox\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[MOT] update install dependency (#3726)
499,298
21.07.2021 10:12:01
-28,800
11e4d27e66458697a965c0332fd993e70791cd8b
fix operators typo
[ { "change_type": "MODIFY", "old_path": "ppdet/data/transform/operators.py", "new_path": "ppdet/data/transform/operators.py", "diff": "@@ -33,7 +33,7 @@ import math\nimport numpy as np\nimport os\nimport copy\n-\n+import logging\nimport cv2\nfrom PIL import Image, ImageDraw\n@@ -194,7 +194,7 @@ class RandomErasingImage(BaseOperator):\nArgs:\nprob (float): probability to carry out random erasing\nlower (float): lower limit of the erasing area ratio\n- heigher (float): upper limit of the erasing area ratio\n+ higher (float): upper limit of the erasing area ratio\naspect_ratio (float): aspect ratio of the erasing region\n\"\"\"\nsuper(RandomErasingImage, self).__init__()\n@@ -646,7 +646,7 @@ class Resize(BaseOperator):\nmask = mask_util.decode(rle)\nmask = cv2.resize(\n- image,\n+ mask,\nNone,\nNone,\nfx=im_scale_x,\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix operators typo (#3732)
499,331
21.07.2021 18:56:15
-28,800
17b2c531051cd362d57174859854583a08f7e701
set mAP=0.0 as default
[ { "change_type": "MODIFY", "old_path": "ppdet/metrics/map_utils.py", "new_path": "ppdet/metrics/map_utils.py", "diff": "@@ -238,7 +238,7 @@ class DetectionMAP(object):\n\"\"\"\nself.class_score_poss = [[] for _ in range(self.class_num)]\nself.class_gt_counts = [0] * self.class_num\n- self.mAP = None\n+ self.mAP = 0.0\ndef accumulate(self):\n\"\"\"\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
set mAP=0.0 as default (#3739)
499,395
22.07.2021 14:28:56
-28,800
1ff63c09d6294b60650fd899f6687545810ed7da
fix expand shape problem
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/necks/yolo_fpn.py", "new_path": "ppdet/modeling/necks/yolo_fpn.py", "diff": "@@ -24,28 +24,23 @@ __all__ = ['YOLOv3FPN', 'PPYOLOFPN', 'PPYOLOTinyFPN', 'PPYOLOPAN']\ndef add_coord(x, data_format):\n- b = x.shape[0]\nif data_format == 'NCHW':\n- h = x.shape[2]\n- w = x.shape[3]\n+ b, _, h, w = paddle.shape(x)\nelse:\n- h = x.shape[1]\n- w = x.shape[2]\n+ b, h, w, _ = paddle.shape(x)\n- gx = paddle.arange(w, dtype='float32') / (w - 1.) * 2.0 - 1.\n- if data_format == 'NCHW':\n- gx = gx.reshape([1, 1, 1, w]).expand([b, 1, h, w])\n- else:\n- gx = gx.reshape([1, 1, w, 1]).expand([b, h, w, 1])\n- gx.stop_gradient = True\n+ gx = paddle.arange(w, dtype=x.dtype) / ((w - 1.) * 2.0) - 1.\n+ gy = paddle.arange(h, dtype=x.dtype) / ((h - 1.) * 2.0) - 1.\n- gy = paddle.arange(h, dtype='float32') / (h - 1.) * 2.0 - 1.\nif data_format == 'NCHW':\n+ gx = gx.reshape([1, 1, 1, w]).expand([b, 1, h, w])\ngy = gy.reshape([1, 1, h, 1]).expand([b, 1, h, w])\nelse:\n+ gx = gx.reshape([1, 1, w, 1]).expand([b, h, w, 1])\ngy = gy.reshape([1, h, 1, 1]).expand([b, h, w, 1])\n- gy.stop_gradient = True\n+ gx.stop_gradient = True\n+ gy.stop_gradient = True\nreturn gx, gy\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix expand shape problem (#3744)
499,395
23.07.2021 11:17:52
-28,800
8019fa9e547ccba9dd0183b33b2e081716d6e545
fix expand problem again
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/necks/yolo_fpn.py", "new_path": "ppdet/modeling/necks/yolo_fpn.py", "diff": "@@ -24,10 +24,11 @@ __all__ = ['YOLOv3FPN', 'PPYOLOFPN', 'PPYOLOTinyFPN', 'PPYOLOPAN']\ndef add_coord(x, data_format):\n+ shape = paddle.shape(x)\nif data_format == 'NCHW':\n- b, _, h, w = paddle.shape(x)\n+ b, h, w = shape[0], shape[2], shape[3]\nelse:\n- b, h, w, _ = paddle.shape(x)\n+ b, h, w = shape[0], shape[1], shape[2]\ngx = paddle.arange(w, dtype=x.dtype) / ((w - 1.) * 2.0) - 1.\ngy = paddle.arange(h, dtype=x.dtype) / ((h - 1.) * 2.0) - 1.\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix expand problem again (#3752)
499,348
27.07.2021 10:17:09
-28,800
4b1e40309c24d99b8b1139b4b3210f4b48c79803
change dark_w32_384x288 batchsize to 32; modify np.pad to support lower numpy version
[ { "change_type": "MODIFY", "old_path": "configs/keypoint/hrnet/dark_hrnet_w32_384x288.yml", "new_path": "configs/keypoint/hrnet/dark_hrnet_w32_384x288.yml", "diff": "@@ -111,7 +111,7 @@ TrainReader:\nstd: *global_std\nis_scale: true\n- Permute: {}\n- batch_size: 64\n+ batch_size: 32\nshuffle: true\ndrop_last: false\n" }, { "change_type": "MODIFY", "old_path": "deploy/python/keypoint_postprocess.py", "new_path": "deploy/python/keypoint_postprocess.py", "diff": "@@ -106,6 +106,7 @@ class HrHRNetPostProcess(object):\n# pad the cost matrix, otherwise new pose are ignored\nif num_valid > num_clusters:\ncost = np.pad(cost, ((0, 0), (0, num_valid - num_clusters)),\n+ 'constant',\nconstant_values=((0, 0), (0, 1e-10)))\nrows, cols = linear_sum_assignment(cost)\nfor y, x in zip(rows, cols):\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/architectures/keypoint_hrhrnet.py", "new_path": "ppdet/modeling/architectures/keypoint_hrhrnet.py", "diff": "@@ -35,7 +35,7 @@ class HigherHRNet(BaseArch):\ndef __init__(self,\nbackbone='HRNet',\n- hrhrnet_head='HigherHRNetHead',\n+ hrhrnet_head='HrHRNetHead',\npost_process='HrHRNetPostProcess',\neval_flip=True,\nflip_perm=None,\n@@ -224,6 +224,7 @@ class HrHRNetPostProcess(object):\n# pad the cost matrix, otherwise new pose are ignored\nif num_valid > num_clusters:\ncost = np.pad(cost, ((0, 0), (0, num_valid - num_clusters)),\n+ 'constant',\nconstant_values=((0, 0), (0, 1e-10)))\nrows, cols = linear_sum_assignment(cost)\nfor y, x in zip(rows, cols):\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
change dark_w32_384x288 batchsize to 32; modify np.pad to support lower numpy version (#3786)
499,331
27.07.2021 20:15:42
-28,800
4ea5b435c62d0f8c8cd5476285fda4e20537e63f
add ce script, and add coco_ce dataset
[ { "change_type": "MODIFY", "old_path": "ppdet/utils/download.py", "new_path": "ppdet/utils/download.py", "diff": "@@ -97,7 +97,10 @@ DATASETS = {\n'https://paddledet.bj.bcebos.com/data/spine_coco.tar',\n'7ed69ae73f842cd2a8cf4f58dc3c5535', ), ], ['annotations', 'images']),\n'mot': (),\n- 'objects365': ()\n+ 'objects365': (),\n+ 'coco_ce': ([(\n+ 'https://paddledet.bj.bcebos.com/data/coco_ce.tar',\n+ 'eadd1b79bc2f069f2744b1dd4e0c0329', ), ], [])\n}\nDOWNLOAD_RETRY_LIMIT = 3\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/ppdet_params.txt", "diff": "+===========================train_params===========================\n+model_name:yolov3_darknet53_270e_coco\n+python:python3.7\n+gpu_list:0|0,1\n+Global.use_gpu:True|True\n+Global.auto_cast:False\n+Global.epoch_num:lite_train_infer=2|whole_train_infer=300\n+Global.save_model_dir:./output/\n+Train.loader.batch_size_per_card:lite_train_infer=2|whole_train_infer=4\n+Global.pretrained_model:null\n+train_model_name:latest\n+train_infer_img_dir:./dataset/coco_ce/\n+null:null\n+##\n+trainer:norm_train|pact_train\n+norm_train:tools/train.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml -o\n+pact_train:null\n+fpgm_train:null\n+distill_train:null\n+null:null\n+null:null\n+##\n+===========================eval_params===========================\n+eval:tools/eval.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml -o\n+null:null\n+##\n+===========================infer_params===========================\n+Global.save_inference_dir:./output/\n+Global.pretrained_model:\n+norm_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml -o\n+quant_export:deploy/slim/quantization/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml -o\n+fpgm_export:deploy/slim/prune/export_prune_model.py\n+distill_export:null\n+null:null\n+null:null\n+##\n+inference:deploy/python/infer.py\n+--device:cpu|gpu\n+--enable_mkldnn:False|True\n+--cpu_threads:1|4\n+--batch_size:1|2\n+--use_tensorrt:null\n+--run_mode:fluid\n+--model_dir:./output_inference/yolov3_darknet53_270e_coco/\n+--image_dir:./demo1/\n+--save_log_path:null\n+--run_benchmark:True\n+null:null\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test/test.sh", "diff": "+#!/bin/bash\n+FILENAME=$1\n+# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer']\n+MODE=$2\n+\n+dataline=$(cat ${FILENAME})\n+\n+# parser params\n+IFS=$'\\n'\n+lines=(${dataline})\n+\n+function func_parser_key(){\n+ strs=$1\n+ IFS=\":\"\n+ array=(${strs})\n+ tmp=${array[0]}\n+ echo ${tmp}\n+}\n+function func_parser_value(){\n+ strs=$1\n+ IFS=\":\"\n+ array=(${strs})\n+ tmp=${array[1]}\n+ echo ${tmp}\n+}\n+function func_set_params(){\n+ key=$1\n+ value=$2\n+ if [ ${key} = \"null\" ];then\n+ echo \" \"\n+ elif [[ ${value} = \"null\" ]] || [[ ${value} = \" \" ]] || [ ${#value} -le 0 ];then\n+ echo \" \"\n+ else\n+ echo \"${key}=${value}\"\n+ fi\n+}\n+function func_parser_params(){\n+ strs=$1\n+ IFS=\":\"\n+ array=(${strs})\n+ key=${array[0]}\n+ tmp=${array[1]}\n+ IFS=\"|\"\n+ res=\"\"\n+ for _params in ${tmp[*]}; do\n+ IFS=\"=\"\n+ array=(${_params})\n+ mode=${array[0]}\n+ value=${array[1]}\n+ if [[ ${mode} = ${MODE} ]]; then\n+ IFS=\"|\"\n+ #echo $(func_set_params \"${mode}\" \"${value}\")\n+ echo $value\n+ break\n+ fi\n+ IFS=\"|\"\n+ done\n+ echo ${res}\n+}\n+function status_check(){\n+ last_status=$1 # the exit code\n+ run_command=$2\n+ run_log=$3\n+ if [ $last_status -eq 0 ]; then\n+ echo -e \"\\033[33m Run successfully with command - ${run_command}! \\033[0m\" | tee -a ${run_log}\n+ else\n+ echo -e \"\\033[33m Run failed with command - ${run_command}! \\033[0m\" | tee -a ${run_log}\n+ fi\n+}\n+\n+IFS=$'\\n'\n+# The training params\n+model_name=$(func_parser_value \"${lines[1]}\")\n+python=$(func_parser_value \"${lines[2]}\")\n+gpu_list=$(func_parser_value \"${lines[3]}\")\n+train_use_gpu_key=$(func_parser_key \"${lines[4]}\")\n+train_use_gpu_value=$(func_parser_value \"${lines[4]}\")\n+autocast_list=$(func_parser_value \"${lines[5]}\")\n+autocast_key=$(func_parser_key \"${lines[5]}\")\n+epoch_key=$(func_parser_key \"${lines[6]}\")\n+epoch_num=$(func_parser_params \"${lines[6]}\")\n+save_model_key=$(func_parser_key \"${lines[7]}\")\n+train_batch_key=$(func_parser_key \"${lines[8]}\")\n+train_batch_value=$(func_parser_params \"${lines[8]}\")\n+pretrain_model_key=$(func_parser_key \"${lines[9]}\")\n+pretrain_model_value=$(func_parser_value \"${lines[9]}\")\n+train_model_name=$(func_parser_value \"${lines[10]}\")\n+train_infer_img_dir=$(func_parser_value \"${lines[11]}\")\n+train_param_key1=$(func_parser_key \"${lines[12]}\")\n+train_param_value1=$(func_parser_value \"${lines[12]}\")\n+\n+trainer_list=$(func_parser_value \"${lines[14]}\")\n+trainer_norm=$(func_parser_key \"${lines[15]}\")\n+norm_trainer=$(func_parser_value \"${lines[15]}\")\n+pact_key=$(func_parser_key \"${lines[16]}\")\n+pact_trainer=$(func_parser_value \"${lines[16]}\")\n+fpgm_key=$(func_parser_key \"${lines[17]}\")\n+fpgm_trainer=$(func_parser_value \"${lines[17]}\")\n+distill_key=$(func_parser_key \"${lines[18]}\")\n+distill_trainer=$(func_parser_value \"${lines[18]}\")\n+trainer_key1=$(func_parser_key \"${lines[19]}\")\n+trainer_value1=$(func_parser_value \"${lines[19]}\")\n+trainer_key2=$(func_parser_key \"${lines[20]}\")\n+trainer_value2=$(func_parser_value \"${lines[20]}\")\n+\n+eval_py=$(func_parser_value \"${lines[23]}\")\n+eval_key1=$(func_parser_key \"${lines[24]}\")\n+eval_value1=$(func_parser_value \"${lines[24]}\")\n+\n+save_infer_key=$(func_parser_key \"${lines[27]}\")\n+export_weight=$(func_parser_key \"${lines[28]}\")\n+norm_export=$(func_parser_value \"${lines[29]}\")\n+pact_export=$(func_parser_value \"${lines[30]}\")\n+fpgm_export=$(func_parser_value \"${lines[31]}\")\n+distill_export=$(func_parser_value \"${lines[32]}\")\n+export_key1=$(func_parser_key \"${lines[33]}\")\n+export_value1=$(func_parser_value \"${lines[33]}\")\n+export_key2=$(func_parser_key \"${lines[34]}\")\n+export_value2=$(func_parser_value \"${lines[34]}\")\n+\n+inference_py=$(func_parser_value \"${lines[36]}\")\n+use_gpu_key=$(func_parser_key \"${lines[37]}\")\n+use_gpu_list=$(func_parser_value \"${lines[37]}\")\n+use_mkldnn_key=$(func_parser_key \"${lines[38]}\")\n+use_mkldnn_list=$(func_parser_value \"${lines[38]}\")\n+cpu_threads_key=$(func_parser_key \"${lines[39]}\")\n+cpu_threads_list=$(func_parser_value \"${lines[39]}\")\n+batch_size_key=$(func_parser_key \"${lines[40]}\")\n+batch_size_list=$(func_parser_value \"${lines[40]}\")\n+use_trt_key=$(func_parser_key \"${lines[41]}\")\n+use_trt_list=$(func_parser_value \"${lines[41]}\")\n+precision_key=$(func_parser_key \"${lines[42]}\")\n+precision_list=$(func_parser_value \"${lines[42]}\")\n+infer_model_key=$(func_parser_key \"${lines[43]}\")\n+infer_model=$(func_parser_value \"${lines[43]}\")\n+image_dir_key=$(func_parser_key \"${lines[44]}\")\n+infer_img_dir=$(func_parser_value \"${lines[44]}\")\n+save_log_key=$(func_parser_key \"${lines[45]}\")\n+benchmark_key=$(func_parser_key \"${lines[46]}\")\n+benchmark_value=$(func_parser_value \"${lines[46]}\")\n+infer_key1=$(func_parser_key \"${lines[47]}\")\n+infer_value1=$(func_parser_value \"${lines[47]}\")\n+\n+LOG_PATH=\"./tests/output\"\n+mkdir -p ${LOG_PATH}\n+status_log=\"${LOG_PATH}/results.log\"\n+\n+\n+function func_inference(){\n+ IFS='|'\n+ _python=$1\n+ _script=$2\n+ _model_dir=$3\n+ _log_path=$4\n+ _img_dir=$5\n+ _flag_quant=$6\n+ # inference\n+ for use_gpu in ${use_gpu_list[*]}; do\n+ if [ ${use_gpu} = \"False\" ] || [ ${use_gpu} = \"cpu\" ]; then\n+ for use_mkldnn in ${use_mkldnn_list[*]}; do\n+ if [ ${use_mkldnn} = \"False\" ] && [ ${_flag_quant} = \"True\" ]; then\n+ continue\n+ fi\n+ for threads in ${cpu_threads_list[*]}; do\n+ for batch_size in ${batch_size_list[*]}; do\n+ _save_log_path=\"${_log_path}/infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_${batch_size}.log\"\n+ set_infer_data=$(func_set_params \"${image_dir_key}\" \"${_img_dir}\")\n+ set_benchmark=$(func_set_params \"${benchmark_key}\" \"${benchmark_value}\")\n+ set_batchsize=$(func_set_params \"${batch_size_key}\" \"${batch_size}\")\n+ set_cpu_threads=$(func_set_params \"${cpu_threads_key}\" \"${threads}\")\n+ set_model_dir=$(func_set_params \"${infer_model_key}\" \"${_model_dir}\")\n+ set_infer_params1=$(func_set_params \"${infer_key1}\" \"${infer_value1}\")\n+ command=\"${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} 2>&1 | tee ${_save_log_path} \"\n+ echo $command\n+ #eval $command\n+ #status_check $? \"${command}\" \"${status_log}\"\n+ done\n+ done\n+ done\n+ elif [ ${use_gpu} = \"True\" ] || [ ${use_gpu} = \"gpu\" ]; then\n+ for use_trt in ${use_trt_list[*]}; do\n+ for precision in ${precision_list[*]}; do\n+ if [ ${use_trt} = \"False\" ] && [ ${precision} != \"fp32\" ]; then\n+ continue\n+ fi\n+ if [[ ${use_trt} = \"False\" || ${precision} != \"int8\" ]] && [ ${_flag_quant} = \"True\" ]; then\n+ continue\n+ fi\n+ for batch_size in ${batch_size_list[*]}; do\n+ _save_log_path=\"${_log_path}/infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log\"\n+ set_infer_data=$(func_set_params \"${image_dir_key}\" \"${_img_dir}\")\n+ set_benchmark=$(func_set_params \"${benchmark_key}\" \"${benchmark_value}\")\n+ set_batchsize=$(func_set_params \"${batch_size_key}\" \"${batch_size}\")\n+ set_tensorrt=$(func_set_params \"${use_trt_key}\" \"${use_trt}\")\n+ set_precision=$(func_set_params \"${precision_key}\" \"${precision}\")\n+ set_model_dir=$(func_set_params \"${infer_model_key}\" \"${_model_dir}\")\n+ #command=\"${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} 2>&1 | tee ${_save_log_path}\"\n+ command=\"${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} > ${_save_log_path}\"\n+ eval $command\n+ status_check $? \"${command}\" \"${status_log}\"\n+ done\n+ done\n+ done\n+ else\n+ echo \"Currently does not support hardware other than CPU and GPU\"\n+ fi\n+ done\n+}\n+\n+if [ ${MODE} = \"infer\" ]; then\n+ GPUID=$3\n+ if [ ${#GPUID} -le 0 ];then\n+ env=\" \"\n+ else\n+ env=\"export CUDA_VISIBLE_DEVICES=${GPUID}\"\n+ fi\n+ echo $env\n+ #run inference\n+ func_inference \"${python}\" \"${inference_py}\" \"${infer_model}\" \"${LOG_PATH}\" \"${infer_img_dir}\" \"False\"\n+\n+else\n+ IFS=\"|\"\n+ export Count=0\n+ USE_GPU_KEY=(${train_use_gpu_value})\n+ for gpu in ${gpu_list[*]}; do\n+ use_gpu=${USE_GPU_KEY[Count]}\n+ Count=$(($Count + 1))\n+ if [ ${gpu} = \"-1\" ];then\n+ env=\"\"\n+ elif [ ${#gpu} -le 1 ];then\n+ env=\"export CUDA_VISIBLE_DEVICES=${gpu}\"\n+ eval ${env}\n+ elif [ ${#gpu} -le 15 ];then\n+ IFS=\",\"\n+ array=(${gpu})\n+ env=\"export CUDA_VISIBLE_DEVICES=${array[0]}\"\n+ IFS=\"|\"\n+ else\n+ IFS=\";\"\n+ array=(${gpu})\n+ ips=${array[0]}\n+ gpu=${array[1]}\n+ IFS=\"|\"\n+ env=\" \"\n+ fi\n+ for autocast in ${autocast_list[*]}; do\n+ for trainer in ${trainer_list[*]}; do\n+ flag_quant=False\n+ if [ ${trainer} = ${pact_key} ]; then\n+ run_train=${pact_trainer}\n+ run_export=${pact_export}\n+ flag_quant=True\n+ elif [ ${trainer} = \"${fpgm_key}\" ]; then\n+ run_train=${fpgm_trainer}\n+ run_export=${fpgm_export}\n+ elif [ ${trainer} = \"${distill_key}\" ]; then\n+ run_train=${distill_trainer}\n+ run_export=${distill_export}\n+ elif [ ${trainer} = ${trainer_key1} ]; then\n+ run_train=${trainer_value1}\n+ run_export=${export_value1}\n+ elif [[ ${trainer} = ${trainer_key2} ]]; then\n+ run_train=${trainer_value2}\n+ run_export=${export_value2}\n+ else\n+ run_train=${norm_trainer}\n+ run_export=${norm_export}\n+ fi\n+\n+ if [ ${run_train} = \"null\" ]; then\n+ continue\n+ fi\n+\n+ set_autocast=$(func_set_params \"${autocast_key}\" \"${autocast}\")\n+ set_epoch=$(func_set_params \"${epoch_key}\" \"${epoch_num}\")\n+ set_pretrain=$(func_set_params \"${pretrain_model_key}\" \"${pretrain_model_value}\")\n+ set_batchsize=$(func_set_params \"${train_batch_key}\" \"${train_batch_value}\")\n+ set_train_params1=$(func_set_params \"${train_param_key1}\" \"${train_param_value1}\")\n+ set_use_gpu=$(func_set_params \"${train_use_gpu_key}\" \"${use_gpu}\")\n+ save_log=\"${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}\"\n+\n+ # load pretrain from norm training if current trainer is pact or fpgm trainer\n+ if [ ${trainer} = ${pact_key} ] || [ ${trainer} = ${fpgm_key} ]; then\n+ set_pretrain=\"${load_norm_train_model}\"\n+ fi\n+\n+ set_save_model=$(func_set_params \"${save_model_key}\" \"${save_log}\")\n+ if [ ${#gpu} -le 2 ];then # train with cpu or single gpu\n+ cmd=\"${python} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1} \"\n+ elif [ ${#gpu} -le 15 ];then # train with multi-gpu\n+ cmd=\"${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1}\"\n+ else # train with multi-machine\n+ cmd=\"${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${set_save_model} ${set_pretrain} ${set_epoch} ${set_autocast} ${set_batchsize} ${set_train_params1}\"\n+ fi\n+ # run train\n+ eval \"unset CUDA_VISIBLE_DEVICES\"\n+ eval $cmd\n+ status_check $? \"${cmd}\" \"${status_log}\"\n+\n+ set_eval_pretrain=$(func_set_params \"${pretrain_model_key}\" \"${save_log}/${train_model_name}\")\n+ # save norm trained models to set pretrain for pact training and fpgm training\n+ if [ ${trainer} = ${trainer_norm} ]; then\n+ load_norm_train_model=${set_eval_pretrain}\n+ fi\n+ # run eval\n+ if [ ${eval_py} != \"null\" ]; then\n+ set_eval_params1=$(func_set_params \"${eval_key1}\" \"${eval_value1}\")\n+ eval_cmd=\"${python} ${eval_py} ${set_eval_pretrain} ${set_use_gpu} ${set_eval_params1}\"\n+ eval $eval_cmd\n+ status_check $? \"${eval_cmd}\" \"${status_log}\"\n+ fi\n+ # run export model\n+ if [ ${run_export} != \"null\" ]; then\n+ # run export model\n+ save_infer_path=\"${save_log}\"\n+ export_cmd=\"${python} ${run_export} ${export_weight}=${save_log}/${train_model_name} ${save_infer_key}=${save_infer_path}\"\n+ eval $export_cmd\n+ status_check $? \"${export_cmd}\" \"${status_log}\"\n+\n+ #run inference\n+ eval $env\n+ save_infer_path=\"${save_log}\"\n+ func_inference \"${python}\" \"${inference_py}\" \"${save_infer_path}\" \"${LOG_PATH}\" \"${train_infer_img_dir}\" \"${flag_quant}\"\n+ eval \"unset CUDA_VISIBLE_DEVICES\"\n+ fi\n+ done # done with: for trainer in ${trainer_list[*]}; do\n+ done # done with: for autocast in ${autocast_list[*]}; do\n+ done # done with: for gpu in ${gpu_list[*]}; do\n+fi # end if [ ${MODE} = \"infer\" ]; then\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add ce script, and add coco_ce dataset (#3755)
499,333
28.07.2021 15:21:50
-28,800
36b48e9eb9e8f05bf3a95bdaca9342d30581f209
clean param name
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/backbones/blazenet.py", "new_path": "ppdet/modeling/backbones/blazenet.py", "diff": "@@ -55,25 +55,14 @@ class ConvBNLayer(nn.Layer):\npadding=padding,\ngroups=num_groups,\nweight_attr=ParamAttr(\n- learning_rate=conv_lr,\n- initializer=KaimingNormal(),\n- name=name + \"_weights\"),\n+ learning_rate=conv_lr, initializer=KaimingNormal()),\nbias_attr=False)\n- param_attr = ParamAttr(name=name + \"_bn_scale\")\n- bias_attr = ParamAttr(name=name + \"_bn_offset\")\nif norm_type == 'sync_bn':\n- self._batch_norm = nn.SyncBatchNorm(\n- out_channels, weight_attr=param_attr, bias_attr=bias_attr)\n+ self._batch_norm = nn.SyncBatchNorm(out_channels)\nelse:\nself._batch_norm = nn.BatchNorm(\n- out_channels,\n- act=None,\n- param_attr=param_attr,\n- bias_attr=bias_attr,\n- use_global_stats=False,\n- moving_mean_name=name + '_bn_mean',\n- moving_variance_name=name + '_bn_variance')\n+ out_channels, act=None, use_global_stats=False)\ndef forward(self, x):\nx = self._conv(x)\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/backbones/ghostnet.py", "new_path": "ppdet/modeling/backbones/ghostnet.py", "diff": "@@ -100,21 +100,15 @@ class SEBlock(nn.Layer):\nnum_channels,\nmed_ch,\nweight_attr=ParamAttr(\n- learning_rate=lr_mult,\n- initializer=Uniform(-stdv, stdv),\n- name=name + \"_1_weights\"),\n- bias_attr=ParamAttr(\n- learning_rate=lr_mult, name=name + \"_1_offset\"))\n+ learning_rate=lr_mult, initializer=Uniform(-stdv, stdv)),\n+ bias_attr=ParamAttr(learning_rate=lr_mult))\nstdv = 1.0 / math.sqrt(med_ch * 1.0)\nself.excitation = Linear(\nmed_ch,\nnum_channels,\nweight_attr=ParamAttr(\n- learning_rate=lr_mult,\n- initializer=Uniform(-stdv, stdv),\n- name=name + \"_2_weights\"),\n- bias_attr=ParamAttr(\n- learning_rate=lr_mult, name=name + \"_2_offset\"))\n+ learning_rate=lr_mult, initializer=Uniform(-stdv, stdv)),\n+ bias_attr=ParamAttr(learning_rate=lr_mult))\ndef forward(self, inputs):\npool = self.pool2d_gap(inputs)\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/backbones/hrnet.py", "new_path": "ppdet/modeling/backbones/hrnet.py", "diff": "@@ -51,31 +51,23 @@ class ConvNormLayer(nn.Layer):\nstride=stride,\npadding=(filter_size - 1) // 2,\ngroups=1,\n- weight_attr=ParamAttr(\n- name=name + \"_weights\", initializer=Normal(\n+ weight_attr=ParamAttr(initializer=Normal(\nmean=0., std=0.01)),\nbias_attr=False)\nnorm_lr = 0. if freeze_norm else 1.\n- norm_name = name + '_bn'\nparam_attr = ParamAttr(\n- name=norm_name + \"_scale\",\n- learning_rate=norm_lr,\n- regularizer=L2Decay(norm_decay))\n+ learning_rate=norm_lr, regularizer=L2Decay(norm_decay))\nbias_attr = ParamAttr(\n- name=norm_name + \"_offset\",\n- learning_rate=norm_lr,\n- regularizer=L2Decay(norm_decay))\n+ learning_rate=norm_lr, regularizer=L2Decay(norm_decay))\nglobal_stats = True if freeze_norm else False\nif norm_type in ['bn', 'sync_bn']:\nself.norm = nn.BatchNorm(\nch_out,\nparam_attr=param_attr,\nbias_attr=bias_attr,\n- use_global_stats=global_stats,\n- moving_mean_name=norm_name + '_mean',\n- moving_variance_name=norm_name + '_variance')\n+ use_global_stats=global_stats)\nelif norm_type == 'gn':\nself.norm = nn.GroupNorm(\nnum_groups=norm_groups,\n@@ -375,17 +367,13 @@ class SELayer(nn.Layer):\nself.squeeze = Linear(\nnum_channels,\nmed_ch,\n- weight_attr=ParamAttr(\n- initializer=Uniform(-stdv, stdv), name=name + \"_sqz_weights\"),\n- bias_attr=ParamAttr(name=name + '_sqz_offset'))\n+ weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)))\nstdv = 1.0 / math.sqrt(med_ch * 1.0)\nself.excitation = Linear(\nmed_ch,\nnum_filters,\n- weight_attr=ParamAttr(\n- initializer=Uniform(-stdv, stdv), name=name + \"_exc_weights\"),\n- bias_attr=ParamAttr(name=name + '_exc_offset'))\n+ weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)))\ndef forward(self, input):\npool = self.pool2d_gap(input)\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/backbones/mobilenet_v3.py", "new_path": "ppdet/modeling/backbones/mobilenet_v3.py", "diff": "@@ -62,21 +62,17 @@ class ConvBNLayer(nn.Layer):\npadding=padding,\ngroups=num_groups,\nweight_attr=ParamAttr(\n- learning_rate=lr_mult,\n- regularizer=L2Decay(conv_decay),\n- name=name + \"_weights\"),\n+ learning_rate=lr_mult, regularizer=L2Decay(conv_decay)),\nbias_attr=False)\nnorm_lr = 0. if freeze_norm else lr_mult\nparam_attr = ParamAttr(\nlearning_rate=norm_lr,\nregularizer=L2Decay(norm_decay),\n- name=name + \"_bn_scale\",\ntrainable=False if freeze_norm else True)\nbias_attr = ParamAttr(\nlearning_rate=norm_lr,\nregularizer=L2Decay(norm_decay),\n- name=name + \"_bn_offset\",\ntrainable=False if freeze_norm else True)\nglobal_stats = True if freeze_norm else False\nif norm_type == 'sync_bn':\n@@ -88,9 +84,7 @@ class ConvBNLayer(nn.Layer):\nact=None,\nparam_attr=param_attr,\nbias_attr=bias_attr,\n- use_global_stats=global_stats,\n- moving_mean_name=name + '_bn_mean',\n- moving_variance_name=name + '_bn_variance')\n+ use_global_stats=global_stats)\nnorm_params = self.bn.parameters()\nif freeze_norm:\nfor param in norm_params:\n@@ -203,13 +197,9 @@ class SEModule(nn.Layer):\nstride=1,\npadding=0,\nweight_attr=ParamAttr(\n- learning_rate=lr_mult,\n- regularizer=L2Decay(conv_decay),\n- name=name + \"_1_weights\"),\n+ learning_rate=lr_mult, regularizer=L2Decay(conv_decay)),\nbias_attr=ParamAttr(\n- learning_rate=lr_mult,\n- regularizer=L2Decay(conv_decay),\n- name=name + \"_1_offset\"))\n+ learning_rate=lr_mult, regularizer=L2Decay(conv_decay)))\nself.conv2 = nn.Conv2D(\nin_channels=mid_channels,\nout_channels=channel,\n@@ -217,13 +207,9 @@ class SEModule(nn.Layer):\nstride=1,\npadding=0,\nweight_attr=ParamAttr(\n- learning_rate=lr_mult,\n- regularizer=L2Decay(conv_decay),\n- name=name + \"_2_weights\"),\n+ learning_rate=lr_mult, regularizer=L2Decay(conv_decay)),\nbias_attr=ParamAttr(\n- learning_rate=lr_mult,\n- regularizer=L2Decay(conv_decay),\n- name=name + \"_2_offset\"))\n+ learning_rate=lr_mult, regularizer=L2Decay(conv_decay)))\ndef forward(self, inputs):\noutputs = self.avg_pool(inputs)\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/backbones/vgg.py", "new_path": "ppdet/modeling/backbones/vgg.py", "diff": "@@ -30,9 +30,7 @@ class ConvBlock(nn.Layer):\nout_channels=out_channels,\nkernel_size=3,\nstride=1,\n- padding=1,\n- weight_attr=ParamAttr(name=name + \"1_weights\"),\n- bias_attr=ParamAttr(name=name + \"1_bias\"))\n+ padding=1)\nself.conv_out_list = []\nfor i in range(1, groups):\nconv_out = self.add_sublayer(\n@@ -42,10 +40,7 @@ class ConvBlock(nn.Layer):\nout_channels=out_channels,\nkernel_size=3,\nstride=1,\n- padding=1,\n- weight_attr=ParamAttr(\n- name=name + \"{}_weights\".format(i + 1)),\n- bias_attr=ParamAttr(name=name + \"{}_bias\".format(i + 1))))\n+ padding=1))\nself.conv_out_list.append(conv_out)\nself.pool = MaxPool2D(\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/fcos_head.py", "new_path": "ppdet/modeling/heads/fcos_head.py", "diff": "@@ -151,12 +151,9 @@ class FCOSHead(nn.Layer):\nkernel_size=3,\nstride=1,\npadding=1,\n- weight_attr=ParamAttr(\n- name=conv_cls_name + \"_weights\",\n- initializer=Normal(\n+ weight_attr=ParamAttr(initializer=Normal(\nmean=0., std=0.01)),\nbias_attr=ParamAttr(\n- name=conv_cls_name + \"_bias\",\ninitializer=Constant(value=bias_init_value))))\nconv_reg_name = \"fcos_head_reg\"\n@@ -168,13 +165,9 @@ class FCOSHead(nn.Layer):\nkernel_size=3,\nstride=1,\npadding=1,\n- weight_attr=ParamAttr(\n- name=conv_reg_name + \"_weights\",\n- initializer=Normal(\n+ weight_attr=ParamAttr(initializer=Normal(\nmean=0., std=0.01)),\n- bias_attr=ParamAttr(\n- name=conv_reg_name + \"_bias\",\n- initializer=Constant(value=0))))\n+ bias_attr=ParamAttr(initializer=Constant(value=0))))\nconv_centerness_name = \"fcos_head_centerness\"\nself.fcos_head_centerness = self.add_sublayer(\n@@ -185,13 +178,9 @@ class FCOSHead(nn.Layer):\nkernel_size=3,\nstride=1,\npadding=1,\n- weight_attr=ParamAttr(\n- name=conv_centerness_name + \"_weights\",\n- initializer=Normal(\n+ weight_attr=ParamAttr(initializer=Normal(\nmean=0., std=0.01)),\n- bias_attr=ParamAttr(\n- name=conv_centerness_name + \"_bias\",\n- initializer=Constant(value=0))))\n+ bias_attr=ParamAttr(initializer=Constant(value=0))))\nself.scales_regs = []\nfor i in range(len(self.fpn_stride)):\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/necks/blazeface_fpn.py", "new_path": "ppdet/modeling/necks/blazeface_fpn.py", "diff": "@@ -51,25 +51,14 @@ class ConvBNLayer(nn.Layer):\npadding=padding,\ngroups=num_groups,\nweight_attr=ParamAttr(\n- learning_rate=conv_lr,\n- initializer=KaimingNormal(),\n- name=name + \"_weights\"),\n+ learning_rate=conv_lr, initializer=KaimingNormal()),\nbias_attr=False)\n- param_attr = ParamAttr(name=name + \"_bn_scale\")\n- bias_attr = ParamAttr(name=name + \"_bn_offset\")\nif norm_type == 'sync_bn':\n- self._batch_norm = nn.SyncBatchNorm(\n- out_channels, weight_attr=param_attr, bias_attr=bias_attr)\n+ self._batch_norm = nn.SyncBatchNorm(out_channels)\nelse:\nself._batch_norm = nn.BatchNorm(\n- out_channels,\n- act=None,\n- param_attr=param_attr,\n- bias_attr=bias_attr,\n- use_global_stats=False,\n- moving_mean_name=name + '_bn_mean',\n- moving_variance_name=name + '_bn_variance')\n+ out_channels, act=None, use_global_stats=False)\ndef forward(self, x):\nx = self._conv(x)\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/necks/hrfpn.py", "new_path": "ppdet/modeling/necks/hrfpn.py", "diff": "import paddle\nimport paddle.nn.functional as F\n-from paddle import ParamAttr\nimport paddle.nn as nn\nfrom ppdet.core.workspace import register\nfrom ..shape_spec import ShapeSpec\n@@ -53,7 +52,6 @@ class HRFPN(nn.Layer):\nin_channels=in_channel,\nout_channels=out_channel,\nkernel_size=1,\n- weight_attr=ParamAttr(name='hrfpn_reduction_weights'),\nbias_attr=False)\nif share_conv:\n@@ -62,7 +60,6 @@ class HRFPN(nn.Layer):\nout_channels=out_channel,\nkernel_size=3,\npadding=1,\n- weight_attr=ParamAttr(name='fpn_conv_weights'),\nbias_attr=False)\nelse:\nself.fpn_conv = []\n@@ -75,7 +72,6 @@ class HRFPN(nn.Layer):\nout_channels=out_channel,\nkernel_size=3,\npadding=1,\n- weight_attr=ParamAttr(name=conv_name + \"_weights\"),\nbias_attr=False))\nself.fpn_conv.append(conv)\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/reid/jde_embedding_head.py", "new_path": "ppdet/modeling/reid/jde_embedding_head.py", "diff": "@@ -92,9 +92,7 @@ class JDEEmbeddingHead(nn.Layer):\nkernel_size=3,\nstride=1,\npadding=1,\n- weight_attr=ParamAttr(name=name + '.conv.weights'),\n- bias_attr=ParamAttr(\n- name=name + '.conv.bias', regularizer=L2Decay(0.))))\n+ bias_attr=ParamAttr(regularizer=L2Decay(0.))))\nself.identify_outputs.append(identify_output)\nloss_p_cls = self.add_sublayer('cls.{}'.format(i), LossParam(-4.15))\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/reid/pyramidal_embedding.py", "new_path": "ppdet/modeling/reid/pyramidal_embedding.py", "diff": "@@ -89,16 +89,12 @@ class PCBPyramid(nn.Layer):\nif idx_branches >= sum(self.num_in_each_level[0:idx_levels + 1]):\nidx_levels += 1\n- name = \"Linear_branch_id_{}\".format(idx_branches)\nfc = nn.Linear(\nin_features=num_conv_out_channels,\nout_features=self.num_classes,\n- weight_attr=ParamAttr(\n- name=name + \"_weights\",\n- initializer=Normal(\n+ weight_attr=ParamAttr(initializer=Normal(\nmean=0., std=0.001)),\n- bias_attr=ParamAttr(\n- name=name + \"_bias\", initializer=Constant(value=0.)))\n+ bias_attr=ParamAttr(initializer=Constant(value=0.)))\npyramid_fc_list.append(fc)\nreturn pyramid_conv_list, pyramid_fc_list\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/reid/resnet.py", "new_path": "ppdet/modeling/reid/resnet.py", "diff": "@@ -50,23 +50,13 @@ class ConvBNLayer(nn.Layer):\ndilation=dilation,\ngroups=groups,\nweight_attr=ParamAttr(\n- name=name + \"_weights\",\nlearning_rate=lr_mult,\ninitializer=Normal(0, math.sqrt(2. / conv_stdv))),\nbias_attr=False,\ndata_format=data_format)\n- if name == \"conv1\":\n- bn_name = \"bn_\" + name\n- else:\n- bn_name = \"bn\" + name[3:]\n+\nself._batch_norm = nn.BatchNorm(\n- num_filters,\n- act=act,\n- param_attr=ParamAttr(name=bn_name + \"_scale\"),\n- bias_attr=ParamAttr(bn_name + \"_offset\"),\n- moving_mean_name=bn_name + \"_mean\",\n- moving_variance_name=bn_name + \"_variance\",\n- data_layout=data_format)\n+ num_filters, act=act, data_layout=data_format)\ndef forward(self, inputs):\ny = self._conv(inputs)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
clean param name (#3799)
499,298
29.07.2021 12:29:21
-28,800
9a109fbf4f1b57e4122b60e9613619aa9b49fd42
[MOT] fix mot infer video
[ { "change_type": "MODIFY", "old_path": "deploy/python/mot_jde_infer.py", "new_path": "deploy/python/mot_jde_infer.py", "diff": "@@ -221,6 +221,7 @@ def predict_video(detector, camera_id):\nif not os.path.exists(FLAGS.output_dir):\nos.makedirs(FLAGS.output_dir)\nout_path = os.path.join(FLAGS.output_dir, video_name)\n+ if not FLAGS.save_images:\nwriter = cv2.VideoWriter(out_path, fourcc, fps, (width, height))\nframe_id = 0\ntimer = MOTTimer()\n@@ -236,7 +237,7 @@ def predict_video(detector, camera_id):\nresults.append((frame_id + 1, online_tlwhs, online_scores, online_ids))\nfps = 1. / timer.average_time\n- online_im = mot_vis.plot_tracking(\n+ im = mot_vis.plot_tracking(\nframe,\nonline_tlwhs,\nonline_ids,\n@@ -249,11 +250,11 @@ def predict_video(detector, camera_id):\nos.makedirs(save_dir)\ncv2.imwrite(\nos.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),\n- online_im)\n+ im)\n+ else:\n+ writer.write(im)\nframe_id += 1\nprint('detect frame:%d' % (frame_id))\n- im = np.array(online_im)\n- writer.write(im)\nif camera_id != -1:\ncv2.imshow('Tracking Detection', im)\nif cv2.waitKey(1) & 0xFF == ord('q'):\n@@ -262,6 +263,14 @@ def predict_video(detector, camera_id):\nresult_filename = os.path.join(FLAGS.output_dir,\nvideo_name.split('.')[-2] + '.txt')\nwrite_mot_results(result_filename, results)\n+\n+ if FLAGS.save_images:\n+ save_dir = os.path.join(FLAGS.output_dir, video_name.split('.')[-2])\n+ cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -vf \"scale=trunc(iw/2)*2:trunc(ih/2)*2\" {}'.format(\n+ save_dir, out_path)\n+ os.system(cmd_str)\n+ print('Save video in {}.'.format(out_path))\n+ else:\nwriter.release()\n" }, { "change_type": "MODIFY", "old_path": "deploy/python/mot_keypoint_unite_infer.py", "new_path": "deploy/python/mot_keypoint_unite_infer.py", "diff": "@@ -137,6 +137,7 @@ def mot_keypoint_unite_predict_video(mot_model,\nif not os.path.exists(FLAGS.output_dir):\nos.makedirs(FLAGS.output_dir)\nout_path = os.path.join(FLAGS.output_dir, video_name)\n+ if not FLAGS.save_images:\nwriter = cv2.VideoWriter(out_path, fourcc, fps, (width, height))\nframe_id = 0\ntimer_mot = FPSTimer()\n@@ -202,7 +203,7 @@ def mot_keypoint_unite_predict_video(mot_model,\nos.makedirs(save_dir)\ncv2.imwrite(\nos.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), im)\n-\n+ else:\nwriter.write(im)\nif camera_id != -1:\ncv2.imshow('Tracking and keypoint results', im)\n@@ -212,6 +213,14 @@ def mot_keypoint_unite_predict_video(mot_model,\nresult_filename = os.path.join(FLAGS.output_dir,\nvideo_name.split('.')[-2] + '.txt')\nwrite_mot_results(result_filename, mot_results)\n+\n+ if FLAGS.save_images:\n+ save_dir = os.path.join(FLAGS.output_dir, video_name.split('.')[-2])\n+ cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -vf \"scale=trunc(iw/2)*2:trunc(ih/2)*2\" {}'.format(\n+ save_dir, out_path)\n+ os.system(cmd_str)\n+ print('Save video in {}.'.format(out_path))\n+ else:\nwriter.release()\n" }, { "change_type": "MODIFY", "old_path": "deploy/python/mot_sde_infer.py", "new_path": "deploy/python/mot_sde_infer.py", "diff": "@@ -356,6 +356,7 @@ def predict_video(detector, reid_model, camera_id):\nif not os.path.exists(FLAGS.output_dir):\nos.makedirs(FLAGS.output_dir)\nout_path = os.path.join(FLAGS.output_dir, video_name)\n+ if not FLAGS.save_images:\nwriter = cv2.VideoWriter(out_path, fourcc, fps, (width, height))\nframe_id = 0\ntimer = MOTTimer()\n@@ -379,7 +380,7 @@ def predict_video(detector, reid_model, camera_id):\nresults.append((frame_id + 1, online_tlwhs, online_scores, online_ids))\nfps = 1. / timer.average_time\n- online_im = mot_vis.plot_tracking(\n+ im = mot_vis.plot_tracking(\nframe,\nonline_tlwhs,\nonline_ids,\n@@ -392,11 +393,11 @@ def predict_video(detector, reid_model, camera_id):\nos.makedirs(save_dir)\ncv2.imwrite(\nos.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),\n- online_im)\n+ im)\n+ else:\n+ writer.write(im)\nframe_id += 1\nprint('detect frame:%d' % (frame_id))\n- im = np.array(online_im)\n- writer.write(im)\nif camera_id != -1:\ncv2.imshow('Tracking Detection', im)\nif cv2.waitKey(1) & 0xFF == ord('q'):\n@@ -405,6 +406,14 @@ def predict_video(detector, reid_model, camera_id):\nresult_filename = os.path.join(FLAGS.output_dir,\nvideo_name.split('.')[-2] + '.txt')\nwrite_mot_results(result_filename, results)\n+\n+ if FLAGS.save_images:\n+ save_dir = os.path.join(FLAGS.output_dir, video_name.split('.')[-2])\n+ cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -vf \"scale=trunc(iw/2)*2:trunc(ih/2)*2\" {}'.format(\n+ save_dir, out_path)\n+ os.system(cmd_str)\n+ print('Save video in {}.'.format(out_path))\n+ else:\nwriter.release()\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[MOT] fix mot infer video (#3805)
499,301
29.07.2021 14:18:32
-28,800
383ffa1491269c7aaa9f2df95029a82c028a7140
fix weights path error
[ { "change_type": "MODIFY", "old_path": "configs/sparse_rcnn/sparse_rcnn_r50_fpn_3x_pro100_coco.yml", "new_path": "configs/sparse_rcnn/sparse_rcnn_r50_fpn_3x_pro100_coco.yml", "diff": "@@ -7,4 +7,4 @@ _BASE_: [\n]\nnum_classes: 80\n-weights: output/sparse_rcnn_r50b_fpn_pro100/model_final\n+weights: output/sparse_rcnn_r50_fpn_3x_pro100_coco/model_final\n" }, { "change_type": "MODIFY", "old_path": "configs/sparse_rcnn/sparse_rcnn_r50_fpn_3x_pro300_coco.yml", "new_path": "configs/sparse_rcnn/sparse_rcnn_r50_fpn_3x_pro300_coco.yml", "diff": "@@ -7,7 +7,7 @@ _BASE_: [\n]\nnum_classes: 80\n-weights: output/sparse_rcnn_r50b_fpn_pro300/model_final\n+weights: output/sparse_rcnn_r50_fpn_3x_pro300_coco/model_final\nsnapshot_epoch: 1\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix weights path error (#3812)
499,298
30.07.2021 18:57:00
-28,800
0a2153c9bf584086670abda5d741b904577c60bf
[mot] fix mot pose unite deploy cpu mkldnn
[ { "change_type": "MODIFY", "old_path": "deploy/python/mot_jde_infer.py", "new_path": "deploy/python/mot_jde_infer.py", "diff": "@@ -91,6 +91,8 @@ class JDE_Detector(Detector):\ndef postprocess(self, pred_dets, pred_embs, threshold):\nonline_targets = self.tracker.update(pred_dets, pred_embs)\n+ if online_targets == []:\n+ return [pred_dets[0][:4]], [pred_dets[0][4]], [1]\nonline_tlwhs, online_ids = [], []\nonline_scores = []\nfor t in online_targets:\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[mot] fix mot pose unite deploy cpu mkldnn (#3837)
499,298
30.07.2021 20:20:27
-28,800
25d4a853aca1db3f1ccbb33193562e86847ce054
fix pose deploy infer timebug
[ { "change_type": "MODIFY", "old_path": "deploy/python/utils.py", "new_path": "deploy/python/utils.py", "diff": "@@ -170,15 +170,16 @@ class Timer(Times):\nprint(\"total_time(ms): {}, img_num: {}\".format(total_time * 1000,\nself.img_num))\npreprocess_time = round(\n- self.preprocess_time_s.value() / self.img_num,\n+ self.preprocess_time_s.value() / max(1, self.img_num),\n4) if average else self.preprocess_time_s.value()\npostprocess_time = round(\n- self.postprocess_time_s.value() / self.img_num,\n+ self.postprocess_time_s.value() / max(1, self.img_num),\n4) if average else self.postprocess_time_s.value()\n- inference_time = round(self.inference_time_s.value() / self.img_num,\n+ inference_time = round(self.inference_time_s.value() /\n+ max(1, self.img_num),\n4) if average else self.inference_time_s.value()\n- average_latency = total_time / self.img_num\n+ average_latency = total_time / max(1, self.img_num)\nprint(\"average latency time(ms): {:.2f}, QPS: {:2f}\".format(\naverage_latency * 1000, 1 / average_latency))\nprint(\n@@ -189,13 +190,13 @@ class Timer(Times):\ndef report(self, average=False):\ndic = {}\ndic['preprocess_time_s'] = round(\n- self.preprocess_time_s.value() / self.img_num,\n+ self.preprocess_time_s.value() / max(1, self.img_num),\n4) if average else self.preprocess_time_s.value()\ndic['postprocess_time_s'] = round(\n- self.postprocess_time_s.value() / self.img_num,\n+ self.postprocess_time_s.value() / max(1, self.img_num),\n4) if average else self.postprocess_time_s.value()\ndic['inference_time_s'] = round(\n- self.inference_time_s.value() / self.img_num,\n+ self.inference_time_s.value() / max(1, self.img_num),\n4) if average else self.inference_time_s.value()\ndic['img_num'] = self.img_num\ntotal_time = self.preprocess_time_s.value(\n@@ -228,3 +229,4 @@ def get_current_memory_mb():\nmeminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)\ngpu_mem = meminfo.used / 1024. / 1024.\nreturn round(cpu_mem, 4), round(gpu_mem, 4), round(gpu_percent, 4)\n+\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix pose deploy infer timebug (#3841)
499,298
02.08.2021 11:03:30
-28,800
1adca6bf677b9d8c4d9640c00e1cbf5df35ca334
fix hrnet SELayer import
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/backbones/hrnet.py", "new_path": "ppdet/modeling/backbones/hrnet.py", "diff": "import paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\n+from paddle.nn import AdaptiveAvgPool2D, Linear, Uniform\nfrom paddle.regularizer import L2Decay\nfrom paddle import ParamAttr\nfrom paddle.nn.initializer import Normal\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix hrnet SELayer import (#3855)
499,298
02.08.2021 15:35:15
-28,800
ed0cd8da117fcc23349fc648abe1bac94ee2eeaf
fix hrnet import Uniform
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/backbones/hrnet.py", "new_path": "ppdet/modeling/backbones/hrnet.py", "diff": "import paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\n-from paddle.nn import AdaptiveAvgPool2D, Linear, Uniform\n+from paddle.nn import AdaptiveAvgPool2D, Linear\nfrom paddle.regularizer import L2Decay\nfrom paddle import ParamAttr\n-from paddle.nn.initializer import Normal\n+from paddle.nn.initializer import Normal, Uniform\nfrom numbers import Integral\nimport math\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix hrnet import Uniform (#3859)
499,301
03.08.2021 13:10:00
-28,800
63ead1e4980738742c188078af24a2d889180dd7
Fix args for infer
[ { "change_type": "MODIFY", "old_path": "configs/sparse_rcnn/_base_/sparse_rcnn_reader.yml", "new_path": "configs/sparse_rcnn/_base_/sparse_rcnn_reader.yml", "diff": "@@ -39,6 +39,6 @@ TestReader:\n- Permute: {}\nbatch_transforms:\n- PadBatch: {pad_to_stride: 32}\n- - Gt2SparseRCNNTarget: {train: false}\n+ - Gt2SparseRCNNTarget: {}\nbatch_size: 1\nshuffle: false\n" }, { "change_type": "MODIFY", "old_path": "ppdet/data/transform/batch_operators.py", "new_path": "ppdet/data/transform/batch_operators.py", "diff": "@@ -895,7 +895,6 @@ class Gt2SparseRCNNTarget(BaseOperator):\nsample[\"scale_factor_wh\"] = np.array(\n[sample[\"scale_factor\"][1], sample[\"scale_factor\"][0]],\ndtype=np.float32)\n- sample.pop(\"scale_factor\")\nelse:\nsample[\"scale_factor_wh\"] = np.array(\n[1.0, 1.0], dtype=np.float32)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
Fix args for infer (#3839)
499,313
03.08.2021 15:48:09
-28,800
6e8607b4c042453dc8ea5c2ffb244cc7e792dbc6
unify dist download
[ { "change_type": "MODIFY", "old_path": "ppdet/utils/checkpoint.py", "new_path": "ppdet/utils/checkpoint.py", "diff": "@@ -55,41 +55,6 @@ def _get_unique_endpoints(trainer_endpoints):\nreturn unique_endpoints\n-def get_weights_path_dist(path):\n- env = os.environ\n- if 'PADDLE_TRAINERS_NUM' in env and 'PADDLE_TRAINER_ID' in env:\n- trainer_id = int(env['PADDLE_TRAINER_ID'])\n- num_trainers = int(env['PADDLE_TRAINERS_NUM'])\n- if num_trainers <= 1:\n- path = get_weights_path(path)\n- else:\n- from ppdet.utils.download import map_path, WEIGHTS_HOME\n- weight_path = map_path(path, WEIGHTS_HOME)\n- lock_path = weight_path + '.lock'\n- if not os.path.exists(weight_path):\n- from paddle.distributed import ParallelEnv\n- unique_endpoints = _get_unique_endpoints(ParallelEnv()\n- .trainer_endpoints[:])\n- try:\n- os.makedirs(os.path.dirname(weight_path))\n- except OSError as e:\n- if e.errno != errno.EEXIST:\n- raise\n- with open(lock_path, 'w'): # touch\n- os.utime(lock_path, None)\n- if ParallelEnv().current_endpoint in unique_endpoints:\n- get_weights_path(path)\n- os.remove(lock_path)\n- else:\n- while os.path.exists(lock_path):\n- time.sleep(1)\n- path = weight_path\n- else:\n- path = get_weights_path(path)\n-\n- return path\n-\n-\ndef _strip_postfix(path):\npath, ext = os.path.splitext(path)\nassert ext in ['', '.pdparams', '.pdopt', '.pdmodel'], \\\n@@ -99,7 +64,7 @@ def _strip_postfix(path):\ndef load_weight(model, weight, optimizer=None):\nif is_url(weight):\n- weight = get_weights_path_dist(weight)\n+ weight = get_weights_path(weight)\npath = _strip_postfix(weight)\npdparam_path = path + '.pdparams'\n@@ -205,7 +170,7 @@ def match_state_dict(model_state_dict, weight_state_dict):\ndef load_pretrain_weight(model, pretrain_weight):\nif is_url(pretrain_weight):\n- pretrain_weight = get_weights_path_dist(pretrain_weight)\n+ pretrain_weight = get_weights_path(pretrain_weight)\npath = _strip_postfix(pretrain_weight)\nif not (os.path.isdir(path) or os.path.isfile(path) or\n" }, { "change_type": "MODIFY", "old_path": "ppdet/utils/download.py", "new_path": "ppdet/utils/download.py", "diff": "@@ -20,6 +20,7 @@ import os\nimport os.path as osp\nimport sys\nimport yaml\n+import time\nimport shutil\nimport requests\nimport tqdm\n@@ -29,6 +30,7 @@ import binascii\nimport tarfile\nimport zipfile\n+from paddle.utils.download import _get_unique_endpoints\nfrom ppdet.core.workspace import BASE_KEY\nfrom .logger import setup_logger\nfrom .voc_utils import create_list\n@@ -147,8 +149,8 @@ def get_config_path(url):\ncfg_url = parse_url(cfg_url)\n# 3. download and decompress\n- cfg_fullname = _download(cfg_url, osp.dirname(CONFIGS_HOME))\n- _decompress(cfg_fullname)\n+ cfg_fullname = _download_dist(cfg_url, osp.dirname(CONFIGS_HOME))\n+ _decompress_dist(cfg_fullname)\n# 4. check config file existing\nif os.path.isfile(path):\n@@ -284,12 +286,12 @@ def get_path(url, root_dir, md5sum=None, check_exist=True):\nelse:\nos.remove(fullpath)\n- fullname = _download(url, root_dir, md5sum)\n+ fullname = _download_dist(url, root_dir, md5sum)\n# new weights format which postfix is 'pdparams' not\n# need to decompress\nif osp.splitext(fullname)[-1] not in ['.pdparams', '.yml']:\n- _decompress(fullname)\n+ _decompress_dist(fullname)\nreturn fullpath, False\n@@ -384,6 +386,38 @@ def _download(url, path, md5sum=None):\nreturn fullname\n+def _download_dist(url, path, md5sum=None):\n+ env = os.environ\n+ if 'PADDLE_TRAINERS_NUM' in env and 'PADDLE_TRAINER_ID' in env:\n+ trainer_id = int(env['PADDLE_TRAINER_ID'])\n+ num_trainers = int(env['PADDLE_TRAINERS_NUM'])\n+ if num_trainers <= 1:\n+ return _download(url, path, md5sum)\n+ else:\n+ fname = osp.split(url)[-1]\n+ fullname = osp.join(path, fname)\n+ lock_path = fullname + '.download.lock'\n+\n+ if not osp.isdir(path):\n+ os.makedirs(path)\n+\n+ if not osp.exists(fullname):\n+ from paddle.distributed import ParallelEnv\n+ unique_endpoints = _get_unique_endpoints(ParallelEnv()\n+ .trainer_endpoints[:])\n+ with open(lock_path, 'w'): # touch\n+ os.utime(lock_path, None)\n+ if ParallelEnv().current_endpoint in unique_endpoints:\n+ _download(url, path, md5sum)\n+ os.remove(lock_path)\n+ else:\n+ while os.path.exists(lock_path):\n+ time.sleep(1)\n+ return fullname\n+ else:\n+ return _download(url, path, md5sum)\n+\n+\ndef _check_exist_file_md5(filename, md5sum, url):\n# if md5sum is None, and file to check is weights file,\n# read md5um from url and check, else check md5sum directly\n@@ -461,6 +495,30 @@ def _decompress(fname):\nos.remove(fname)\n+def _decompress_dist(fname):\n+ env = os.environ\n+ if 'PADDLE_TRAINERS_NUM' in env and 'PADDLE_TRAINER_ID' in env:\n+ trainer_id = int(env['PADDLE_TRAINER_ID'])\n+ num_trainers = int(env['PADDLE_TRAINERS_NUM'])\n+ if num_trainers <= 1:\n+ _decompress(fname)\n+ else:\n+ lock_path = fname + '.decompress.lock'\n+ from paddle.distributed import ParallelEnv\n+ unique_endpoints = _get_unique_endpoints(ParallelEnv()\n+ .trainer_endpoints[:])\n+ with open(lock_path, 'w'): # touch\n+ os.utime(lock_path, None)\n+ if ParallelEnv().current_endpoint in unique_endpoints:\n+ _decompress(fname)\n+ os.remove(lock_path)\n+ else:\n+ while os.path.exists(lock_path):\n+ time.sleep(1)\n+ else:\n+ _decompress(fname)\n+\n+\ndef _move_and_merge_tree(src, dst):\n\"\"\"\nMove src directory to dst, if dst is already exists,\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
unify dist download (#3866)
499,339
03.08.2021 19:56:25
-28,800
2c47839bd75d8b70c4f4b976bfeea15a82586b84
[doc fix] fix detr docs, test=document_fix
[ { "change_type": "MODIFY", "old_path": "configs/deformable_detr/README.md", "new_path": "configs/deformable_detr/README.md", "diff": "@@ -17,6 +17,12 @@ Deformable DETR is an object detection model based on DETR. We reproduced the mo\n- Deformable DETR is trained on COCO train2017 dataset and evaluated on val2017 results of `mAP(IoU=0.5:0.95)`.\n- Deformable DETR uses 8GPU to train 50 epochs.\n+GPU multi-card training\n+```bash\n+export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7\n+python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/deformable_detr/deformable_detr_r50_1x_coco.yml --fleet -o find_unused_parameters=True\n+```\n+\n## Citations\n```\n@inproceedings{\n" }, { "change_type": "MODIFY", "old_path": "configs/detr/README.md", "new_path": "configs/detr/README.md", "diff": "@@ -17,6 +17,12 @@ DETR is an object detection model based on transformer. We reproduced the model\n- DETR is trained on COCO train2017 dataset and evaluated on val2017 results of `mAP(IoU=0.5:0.95)`.\n- DETR uses 8GPU to train 500 epochs.\n+GPU multi-card training\n+```bash\n+export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7\n+python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/detr/detr_r50_1x_coco.yml --fleet -o find_unused_parameters=True\n+```\n+\n## Citations\n```\n@inproceedings{detr,\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[doc fix] fix detr docs, test=document_fix (#3872)
499,313
04.08.2021 15:18:15
-28,800
23674478c477534013d4a83555b66c54409252d9
fix download hang
[ { "change_type": "MODIFY", "old_path": "ppdet/utils/download.py", "new_path": "ppdet/utils/download.py", "diff": "@@ -412,7 +412,7 @@ def _download_dist(url, path, md5sum=None):\nos.remove(lock_path)\nelse:\nwhile os.path.exists(lock_path):\n- time.sleep(1)\n+ time.sleep(0.5)\nreturn fullname\nelse:\nreturn _download(url, path, md5sum)\n@@ -507,14 +507,26 @@ def _decompress_dist(fname):\nfrom paddle.distributed import ParallelEnv\nunique_endpoints = _get_unique_endpoints(ParallelEnv()\n.trainer_endpoints[:])\n+ # NOTE(dkp): _decompress_dist always performed after\n+ # _download_dist, in _download_dist sub-trainers is waiting\n+ # for download lock file release with sleeping, if decompress\n+ # prograss is very fast and finished with in the sleeping gap\n+ # time, e.g in tiny dataset such as coco_ce, spine_coco, main\n+ # trainer may finish decompress and release lock file, so we\n+ # only craete lock file in main trainer and all sub-trainer\n+ # wait 1s for main trainer to create lock file, for 1s is\n+ # twice as sleeping gap, this waiting time can keep all\n+ # trainer pipeline in order\n+ # **change this if you have more elegent methods**\n+ if ParallelEnv().current_endpoint in unique_endpoints:\nwith open(lock_path, 'w'): # touch\nos.utime(lock_path, None)\n- if ParallelEnv().current_endpoint in unique_endpoints:\n_decompress(fname)\nos.remove(lock_path)\nelse:\n- while os.path.exists(lock_path):\ntime.sleep(1)\n+ while os.path.exists(lock_path):\n+ time.sleep(0.5)\nelse:\n_decompress(fname)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix download hang (#3874)
499,333
05.08.2021 15:43:29
-28,800
77c616d08c0f2d822b533d020bfc58df5206b52e
fix no label training in bs2
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/proposal_generator/target.py", "new_path": "ppdet/modeling/proposal_generator/target.py", "diff": "@@ -215,7 +215,8 @@ def generate_proposal_target(rpn_rois,\nif gt_bbox.shape[0] > 0:\nsampled_bbox = paddle.gather(gt_bbox, sampled_gt_ind)\nelse:\n- sampled_bbox = paddle.zeros([0, 4], dtype='float32')\n+ num = rois_per_image.shape[0]\n+ sampled_bbox = paddle.zeros([num, 4], dtype='float32')\nrois_per_image.stop_gradient = True\nsampled_gt_ind.stop_gradient = True\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix no label training in bs2 (#3890)
499,331
08.08.2021 00:59:36
-28,800
c7ef5eb0a8e4f03c8fd67ec4b3cd3674ae2d2ab0
fix detach bug
[ { "change_type": "MODIFY", "old_path": "configs/dota/s2anet_1x_spine.yml", "new_path": "configs/dota/s2anet_1x_spine.yml", "diff": "@@ -27,3 +27,4 @@ S2ANetHead:\nreg_loss_weight: [1.0, 1.0, 1.0, 1.0, 1.05]\ncls_loss_weight: [1.05, 1.0]\nreg_loss_type: 'l1'\n+ is_training: True\n" }, { "change_type": "MODIFY", "old_path": "configs/dota/s2anet_alignconv_2x_dota.yml", "new_path": "configs/dota/s2anet_alignconv_2x_dota.yml", "diff": "@@ -24,3 +24,4 @@ S2ANetHead:\nreg_loss_weight: [1.0, 1.0, 1.0, 1.0, 1.05]\ncls_loss_weight: [1.05, 1.0]\nreg_loss_type: 'l1'\n+ is_training: True\n" }, { "change_type": "MODIFY", "old_path": "configs/dota/s2anet_conv_2x_dota.yml", "new_path": "configs/dota/s2anet_conv_2x_dota.yml", "diff": "@@ -21,3 +21,4 @@ S2ANetHead:\nuse_sigmoid_cls: True\nreg_loss_weight: [1.0, 1.0, 1.0, 1.0, 1.1]\ncls_loss_weight: [1.1, 1.05]\n+ is_training: True\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/s2anet_head.py", "new_path": "ppdet/modeling/heads/s2anet_head.py", "diff": "@@ -228,9 +228,10 @@ class S2ANetHead(nn.Layer):\nalign_conv_size=3,\nuse_sigmoid_cls=True,\nanchor_assign=RBoxAssigner().__dict__,\n- reg_loss_weight=[1.0, 1.0, 1.0, 1.0, 1.0],\n- cls_loss_weight=[1.0, 1.0],\n- reg_loss_type='l1'):\n+ reg_loss_weight=[1.0, 1.0, 1.0, 1.0, 1.1],\n+ cls_loss_weight=[1.1, 1.05],\n+ reg_loss_type='l1',\n+ is_training=True):\nsuper(S2ANetHead, self).__init__()\nself.stacked_convs = stacked_convs\nself.feat_in = feat_in\n@@ -256,6 +257,7 @@ class S2ANetHead(nn.Layer):\nself.alpha = 1.0\nself.beta = 1.0\nself.reg_loss_type = reg_loss_type\n+ self.is_training = is_training\nself.s2anet_head_out = None\n@@ -446,10 +448,12 @@ class S2ANetHead(nn.Layer):\ninit_anchors = self.rect2rbox(init_anchors)\nself.base_anchors_list.append(init_anchors)\n- fam_reg1 = fam_reg\n+ if self.is_training:\n+ refine_anchor = self.bbox_decode(fam_reg.detach(), init_anchors)\n+ else:\n+ fam_reg1 = fam_reg.clone()\nfam_reg1.stop_gradient = True\nrefine_anchor = self.bbox_decode(fam_reg1, init_anchors)\n- #refine_anchor = self.bbox_decode(fam_reg.detach(), init_anchors)\nself.refine_anchor_list.append(refine_anchor)\n@@ -615,19 +619,13 @@ class S2ANetHead(nn.Layer):\niou = rbox_iou(fam_bbox_decode, bbox_gt_bboxes)\niou = paddle.diag(iou)\n- if reg_loss_type == 'iou':\n- EPS = paddle.to_tensor(\n- 1e-8, dtype='float32', stop_gradient=True)\n- iou_factor = -1.0 * paddle.log(iou + EPS) / (fam_bbox + EPS)\n- iou_factor.stop_gradient = True\n- #fam_bbox = fam_bbox * iou_factor\n- elif reg_loss_type == 'gwd':\n+ if reg_loss_type == 'gwd':\nbbox_gt_bboxes_level = bbox_gt_bboxes[st_idx:st_idx +\nfeat_anchor_num, :]\nfam_bbox_total = self.gwd_loss(fam_bbox_decode,\nbbox_gt_bboxes_level)\nfam_bbox_total = fam_bbox_total * feat_bbox_weights\n- fam_bbox_total = paddle.sum(fam_bbox_total)\n+ fam_bbox_total = paddle.sum(fam_bbox_total) / num_total_samples\nfam_bbox_losses.append(fam_bbox_total)\nst_idx += feat_anchor_num\n@@ -735,19 +733,13 @@ class S2ANetHead(nn.Layer):\niou = rbox_iou(odm_bbox_decode, bbox_gt_bboxes)\niou = paddle.diag(iou)\n- if reg_loss_type == 'iou':\n- EPS = paddle.to_tensor(\n- 1e-8, dtype='float32', stop_gradient=True)\n- iou_factor = -1.0 * paddle.log(iou + EPS) / (odm_bbox + EPS)\n- iou_factor.stop_gradient = True\n- # odm_bbox = odm_bbox * iou_factor\n- elif reg_loss_type == 'gwd':\n+ if reg_loss_type == 'gwd':\nbbox_gt_bboxes_level = bbox_gt_bboxes[st_idx:st_idx +\nfeat_anchor_num, :]\nodm_bbox_total = self.gwd_loss(odm_bbox_decode,\nbbox_gt_bboxes_level)\nodm_bbox_total = odm_bbox_total * feat_bbox_weights\n- odm_bbox_total = paddle.sum(odm_bbox_total)\n+ odm_bbox_total = paddle.sum(odm_bbox_total) / num_total_samples\nodm_bbox_losses.append(odm_bbox_total)\nst_idx += feat_anchor_num\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix detach bug (#3912)
499,299
09.08.2021 10:45:56
-28,800
879c90b6d0420410973f5e22932417d174ef45a9
add lite-hrnet_256x192 to keypoint model_zoo
[ { "change_type": "MODIFY", "old_path": "README_en.md", "new_path": "README_en.md", "diff": "@@ -251,6 +251,7 @@ The relationship between COCO mAP and FPS on Tesla V100 of representative models\n- [Keypoint detection](configs/keypoint)\n- HigherHRNet\n- HRNet\n+ - LiteHRNet\n- [Multi-Object Tracking](configs/mot/README.md)\n- [DeepSORT](configs/mot/deepsort/README.md)\n- [JDE](configs/mot/jde/README.md)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add lite-hrnet_256x192 to keypoint model_zoo (#3907)
499,313
10.08.2021 19:43:24
-28,800
78e6c09bd17d4174b48dc6f3205871af8f6a297a
fix anno not found in infer mode
[ { "change_type": "MODIFY", "old_path": "ppdet/data/source/dataset.py", "new_path": "ppdet/data/source/dataset.py", "diff": "@@ -149,7 +149,12 @@ class ImageFolder(DetDataset):\nself.sample_num = sample_num\ndef check_or_download_dataset(self):\n- return\n+ if self.dataset_dir:\n+ # NOTE: ImageFolder is only used for prediction, in\n+ # infer mode, image_dir is set by set_images\n+ # so we only check anno_path here\n+ self.dataset_dir = get_dataset_path(self.dataset_dir,\n+ self.anno_path, None)\ndef parse_dataset(self, ):\nif not self.roidbs:\n" }, { "change_type": "MODIFY", "old_path": "ppdet/utils/download.py", "new_path": "ppdet/utils/download.py", "diff": "@@ -195,6 +195,10 @@ def get_dataset_path(path, annotation, image_dir):\n\"Please apply and download the dataset following docs/tutorials/PrepareMOTDataSet.md\".\nformat(name))\n+ if name == \"spine_coco\":\n+ if _dataset_exists(data_dir, annotation, image_dir):\n+ return data_dir\n+\n# For voc, only check dir VOCdevkit/VOC2012, VOCdevkit/VOC2007\nif name in ['voc', 'fruit', 'roadsign_voc']:\nexists = True\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix anno not found in infer mode (#3929)
499,339
11.08.2021 15:05:29
-28,800
b273cf279ea0a3558f47d64a5433dc32b349ae9b
add merge_config
[ { "change_type": "MODIFY", "old_path": "tools/export_model.py", "new_path": "tools/export_model.py", "diff": "@@ -103,6 +103,8 @@ def main():\nif FLAGS.slim_config:\ncfg = build_slim_model(cfg, FLAGS.slim_config, mode='test')\n+ # FIXME: Temporarily solve the priority problem of FLAGS.opt\n+ merge_config(FLAGS.opt)\ncheck_config(cfg)\ncheck_gpu(cfg.use_gpu)\ncheck_version()\n" }, { "change_type": "MODIFY", "old_path": "tools/train.py", "new_path": "tools/train.py", "diff": "@@ -128,6 +128,8 @@ def main():\nif FLAGS.slim_config:\ncfg = build_slim_model(cfg, FLAGS.slim_config)\n+ # FIXME: Temporarily solve the priority problem of FLAGS.opt\n+ merge_config(FLAGS.opt)\ncheck.check_config(cfg)\ncheck.check_gpu(cfg.use_gpu)\ncheck.check_version()\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add merge_config (#3938)
499,298
11.08.2021 17:28:51
-28,800
32b1929ce16dda9eb62da0c70526476cd4f1d787
add BasicBlock dcn
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/backbones/resnet.py", "new_path": "ppdet/modeling/backbones/resnet.py", "diff": "@@ -186,7 +186,6 @@ class BasicBlock(nn.Layer):\ndcn_v2=False,\nstd_senet=False):\nsuper(BasicBlock, self).__init__()\n- assert dcn_v2 is False, \"Not implemented yet.\"\nassert groups == 1 and base_width == 64, 'BasicBlock only supports groups=1 and base_width=64'\nself.shortcut = shortcut\n@@ -239,7 +238,8 @@ class BasicBlock(nn.Layer):\nnorm_type=norm_type,\nnorm_decay=norm_decay,\nfreeze_norm=freeze_norm,\n- lr=lr)\n+ lr=lr,\n+ dcn_v2=dcn_v2)\nself.std_senet = std_senet\nif self.std_senet:\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add BasicBlock dcn (#3947)
499,298
12.08.2021 10:05:30
-28,800
d1c6c1e1a7f7dda73a957f301209567d0848fcb8
fix mot typo
[ { "change_type": "MODIFY", "old_path": "deploy/python/mot_jde_infer.py", "new_path": "deploy/python/mot_jde_infer.py", "diff": "@@ -251,8 +251,7 @@ def predict_video(detector, camera_id):\nif not os.path.exists(save_dir):\nos.makedirs(save_dir)\ncv2.imwrite(\n- os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),\n- im)\n+ os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), im)\nelse:\nwriter.write(im)\nframe_id += 1\n" }, { "change_type": "MODIFY", "old_path": "deploy/python/mot_sde_infer.py", "new_path": "deploy/python/mot_sde_infer.py", "diff": "@@ -392,8 +392,7 @@ def predict_video(detector, reid_model, camera_id):\nif not os.path.exists(save_dir):\nos.makedirs(save_dir)\ncv2.imwrite(\n- os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),\n- im)\n+ os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), im)\nelse:\nwriter.write(im)\nframe_id += 1\n" }, { "change_type": "MODIFY", "old_path": "deploy/python/utils.py", "new_path": "deploy/python/utils.py", "diff": "@@ -229,4 +229,3 @@ def get_current_memory_mb():\nmeminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)\ngpu_mem = meminfo.used / 1024. / 1024.\nreturn round(cpu_mem, 4), round(gpu_mem, 4), round(gpu_percent, 4)\n-\n" }, { "change_type": "MODIFY", "old_path": "deploy/serving/test_client.py", "new_path": "deploy/serving/test_client.py", "diff": "@@ -41,4 +41,3 @@ fetch_map = client.predict(\nprint(fetch_map)\nfetch_map[\"image\"] = sys.argv[1]\npostprocess(fetch_map)\n-\n" }, { "change_type": "MODIFY", "old_path": "ppdet/data/transform/mot_operators.py", "new_path": "ppdet/data/transform/mot_operators.py", "diff": "@@ -108,7 +108,9 @@ class LetterBoxResize(BaseOperator):\nif not isinstance(im, np.ndarray):\nraise TypeError(\"{}: image type is not numpy.\".format(self))\nif len(im.shape) != 3:\n- raise ImageError('{}: image is not 3-dimensional.'.format(self))\n+ from PIL import UnidentifiedImageError\n+ raise UnidentifiedImageError(\n+ '{}: image is not 3-dimensional.'.format(self))\n# apply image\nheight, width = self.target_size\n@@ -135,7 +137,7 @@ class MOTRandomAffine(BaseOperator):\nArgs:\ndegrees (list[2]): the rotate range to apply, transform range is [min, max]\n- translate (list[2]): the translate range to apply, ransform range is [min, max]\n+ translate (list[2]): the translate range to apply, transform range is [min, max]\nscale (list[2]): the scale range to apply, transform range is [min, max]\nshear (list[2]): the shear range to apply, transform range is [min, max]\nborderValue (list[3]): value used in case of a constant border when appling\n" }, { "change_type": "MODIFY", "old_path": "ppdet/engine/trainer.py", "new_path": "ppdet/engine/trainer.py", "diff": "@@ -17,6 +17,7 @@ from __future__ import division\nfrom __future__ import print_function\nimport os\n+import sys\nimport copy\nimport time\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix mot typo (#3953)
499,395
13.08.2021 12:06:15
-28,800
6abad1604639ebcefdfdca139aa6b2dc8f2afe35
fix shape to avoid trt problem
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/necks/yolo_fpn.py", "new_path": "ppdet/modeling/necks/yolo_fpn.py", "diff": "@@ -24,11 +24,11 @@ __all__ = ['YOLOv3FPN', 'PPYOLOFPN', 'PPYOLOTinyFPN', 'PPYOLOPAN']\ndef add_coord(x, data_format):\n- shape = paddle.shape(x)\n+ b = paddle.shape(x)[0]\nif data_format == 'NCHW':\n- b, h, w = shape[0], shape[2], shape[3]\n+ h, w = x.shape[2], x.shape[3]\nelse:\n- b, h, w = shape[0], shape[1], shape[2]\n+ h, w = x.shape[1], x.shape[2]\ngx = paddle.arange(w, dtype=x.dtype) / ((w - 1.) * 2.0) - 1.\ngy = paddle.arange(h, dtype=x.dtype) / ((h - 1.) * 2.0) - 1.\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix shape to avoid trt problem (#3962)
499,298
13.08.2021 12:55:08
-28,800
0140dbef4bdc6e515a28a1771834f0729dd7163b
update fairmot doc
[ { "change_type": "MODIFY", "old_path": "configs/mot/fairmot/_base_/optimizer_30e.yml", "new_path": "configs/mot/fairmot/_base_/optimizer_30e.yml", "diff": "epoch: 30\nLearningRate:\n- base_lr: 0.0004\n+ base_lr: 0.0001\nschedulers:\n- !PiecewiseDecay\ngamma: 0.1\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
update fairmot doc (#3970)
499,299
16.08.2021 17:51:13
-28,800
7d8a5d9f2d1c74ee87d8606031bc89aea24e900e
add two lite-hrnet models
[ { "change_type": "ADD", "old_path": null, "new_path": "configs/keypoint/lite_hrnet/lite_hrnet_18_384x288_coco.yml", "diff": "+use_gpu: true\n+log_iter: 5\n+save_dir: output\n+snapshot_epoch: 10\n+weights: output/lite_hrnet_18_384x288_coco/model_final\n+epoch: 210\n+num_joints: &num_joints 17\n+pixel_std: &pixel_std 200\n+metric: KeyPointTopDownCOCOEval\n+num_classes: 1\n+train_height: &train_height 384\n+train_width: &train_width 288\n+trainsize: &trainsize [*train_width, *train_height]\n+hmsize: &hmsize [72, 96]\n+flip_perm: &flip_perm [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]\n+\n+\n+#####model\n+architecture: TopDownHRNet\n+\n+TopDownHRNet:\n+ backbone: LiteHRNet\n+ post_process: HRNetPostProcess\n+ flip_perm: *flip_perm\n+ num_joints: *num_joints\n+ width: &width 40\n+ loss: KeyPointMSELoss\n+ use_dark: false\n+\n+LiteHRNet:\n+ network_type: lite_18\n+ freeze_at: -1\n+ freeze_norm: false\n+ return_idx: [0]\n+\n+KeyPointMSELoss:\n+ use_target_weight: true\n+ loss_scale: 1.0\n+\n+#####optimizer\n+LearningRate:\n+ base_lr: 0.002\n+ schedulers:\n+ - !PiecewiseDecay\n+ milestones: [170, 200]\n+ gamma: 0.1\n+ - !LinearWarmup\n+ start_factor: 0.001\n+ steps: 500\n+\n+OptimizerBuilder:\n+ optimizer:\n+ type: Adam\n+ regularizer:\n+ factor: 0.0\n+ type: L2\n+\n+\n+#####data\n+TrainDataset:\n+ !KeypointTopDownCocoDataset\n+ image_dir: train2017\n+ anno_path: annotations/person_keypoints_train2017.json\n+ dataset_dir: dataset/coco\n+ num_joints: *num_joints\n+ trainsize: *trainsize\n+ pixel_std: *pixel_std\n+ use_gt_bbox: True\n+\n+\n+EvalDataset:\n+ !KeypointTopDownCocoDataset\n+ image_dir: val2017\n+ anno_path: annotations/person_keypoints_val2017.json\n+ dataset_dir: dataset/coco\n+ num_joints: *num_joints\n+ trainsize: *trainsize\n+ pixel_std: *pixel_std\n+ use_gt_bbox: True\n+ image_thre: 0.0\n+\n+\n+TestDataset:\n+ !ImageFolder\n+ anno_path: dataset/coco/keypoint_imagelist.txt\n+\n+worker_num: 2\n+global_mean: &global_mean [0.485, 0.456, 0.406]\n+global_std: &global_std [0.229, 0.224, 0.225]\n+TrainReader:\n+ sample_transforms:\n+ - RandomFlipHalfBodyTransform:\n+ scale: 0.25\n+ rot: 30\n+ num_joints_half_body: 8\n+ prob_half_body: 0.3\n+ pixel_std: *pixel_std\n+ trainsize: *trainsize\n+ upper_body_ids: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n+ flip_pairs: *flip_perm\n+ - TopDownAffine:\n+ trainsize: *trainsize\n+ - ToHeatmapsTopDown:\n+ hmsize: *hmsize\n+ sigma: 3\n+ batch_transforms:\n+ - NormalizeImage:\n+ mean: *global_mean\n+ std: *global_std\n+ is_scale: true\n+ - Permute: {}\n+ batch_size: 32\n+ shuffle: true\n+ drop_last: false\n+\n+EvalReader:\n+ sample_transforms:\n+ - TopDownAffine:\n+ trainsize: *trainsize\n+ batch_transforms:\n+ - NormalizeImage:\n+ mean: *global_mean\n+ std: *global_std\n+ is_scale: true\n+ - Permute: {}\n+ batch_size: 16\n+\n+TestReader:\n+ inputs_def:\n+ image_shape: [3, *train_height, *train_width]\n+ sample_transforms:\n+ - Decode: {}\n+ - TopDownEvalAffine:\n+ trainsize: *trainsize\n+ - NormalizeImage:\n+ mean: *global_mean\n+ std: *global_std\n+ is_scale: true\n+ - Permute: {}\n+ batch_size: 1\n" }, { "change_type": "ADD", "old_path": null, "new_path": "configs/keypoint/lite_hrnet/lite_hrnet_30_384x288_coco.yml", "diff": "+use_gpu: true\n+log_iter: 5\n+save_dir: output\n+snapshot_epoch: 10\n+weights: output/lite_hrnet_30_384x288_coco/model_final\n+epoch: 210\n+num_joints: &num_joints 17\n+pixel_std: &pixel_std 200\n+metric: KeyPointTopDownCOCOEval\n+num_classes: 1\n+train_height: &train_height 384\n+train_width: &train_width 288\n+trainsize: &trainsize [*train_width, *train_height]\n+hmsize: &hmsize [72, 96]\n+flip_perm: &flip_perm [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]\n+\n+\n+#####model\n+architecture: TopDownHRNet\n+\n+TopDownHRNet:\n+ backbone: LiteHRNet\n+ post_process: HRNetPostProcess\n+ flip_perm: *flip_perm\n+ num_joints: *num_joints\n+ width: &width 40\n+ loss: KeyPointMSELoss\n+ use_dark: false\n+\n+LiteHRNet:\n+ network_type: lite_30\n+ freeze_at: -1\n+ freeze_norm: false\n+ return_idx: [0]\n+\n+KeyPointMSELoss:\n+ use_target_weight: true\n+ loss_scale: 1.0\n+\n+#####optimizer\n+LearningRate:\n+ base_lr: 0.002\n+ schedulers:\n+ - !PiecewiseDecay\n+ milestones: [170, 200]\n+ gamma: 0.1\n+ - !LinearWarmup\n+ start_factor: 0.001\n+ steps: 500\n+\n+OptimizerBuilder:\n+ optimizer:\n+ type: Adam\n+ regularizer:\n+ factor: 0.0\n+ type: L2\n+\n+\n+#####data\n+TrainDataset:\n+ !KeypointTopDownCocoDataset\n+ image_dir: train2017\n+ anno_path: annotations/person_keypoints_train2017.json\n+ dataset_dir: dataset/coco\n+ num_joints: *num_joints\n+ trainsize: *trainsize\n+ pixel_std: *pixel_std\n+ use_gt_bbox: True\n+\n+\n+EvalDataset:\n+ !KeypointTopDownCocoDataset\n+ image_dir: val2017\n+ anno_path: annotations/person_keypoints_val2017.json\n+ dataset_dir: dataset/coco\n+ num_joints: *num_joints\n+ trainsize: *trainsize\n+ pixel_std: *pixel_std\n+ use_gt_bbox: True\n+ image_thre: 0.0\n+\n+\n+TestDataset:\n+ !ImageFolder\n+ anno_path: dataset/coco/keypoint_imagelist.txt\n+\n+worker_num: 2\n+global_mean: &global_mean [0.485, 0.456, 0.406]\n+global_std: &global_std [0.229, 0.224, 0.225]\n+TrainReader:\n+ sample_transforms:\n+ - RandomFlipHalfBodyTransform:\n+ scale: 0.25\n+ rot: 30\n+ num_joints_half_body: 8\n+ prob_half_body: 0.3\n+ pixel_std: *pixel_std\n+ trainsize: *trainsize\n+ upper_body_ids: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n+ flip_pairs: *flip_perm\n+ - TopDownAffine:\n+ trainsize: *trainsize\n+ - ToHeatmapsTopDown:\n+ hmsize: *hmsize\n+ sigma: 3\n+ batch_transforms:\n+ - NormalizeImage:\n+ mean: *global_mean\n+ std: *global_std\n+ is_scale: true\n+ - Permute: {}\n+ batch_size: 32\n+ shuffle: true\n+ drop_last: false\n+\n+EvalReader:\n+ sample_transforms:\n+ - TopDownAffine:\n+ trainsize: *trainsize\n+ batch_transforms:\n+ - NormalizeImage:\n+ mean: *global_mean\n+ std: *global_std\n+ is_scale: true\n+ - Permute: {}\n+ batch_size: 16\n+\n+TestReader:\n+ inputs_def:\n+ image_shape: [3, *train_height, *train_width]\n+ sample_transforms:\n+ - Decode: {}\n+ - TopDownEvalAffine:\n+ trainsize: *trainsize\n+ - NormalizeImage:\n+ mean: *global_mean\n+ std: *global_std\n+ is_scale: true\n+ - Permute: {}\n+ batch_size: 1\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add two lite-hrnet models (#3983)
499,298
17.08.2021 14:14:38
-28,800
ab5e750d1ead2b49a6f6047d896683034046bbc1
[MOT] fix doc gpu nums and ffmpeg
[ { "change_type": "MODIFY", "old_path": "configs/mot/README.md", "new_path": "configs/mot/README.md", "diff": "@@ -42,6 +42,7 @@ pip install -r requirements.txt\n**Notes:**\n- Install `cython_bbox` for Windows: `pip install -e git+https://github.com/samson-wang/cython_bbox.git#egg=cython-bbox`. You can refer to this [tutorial](https://stackoverflow.com/questions/60349980/is-there-a-way-to-install-cython-bbox-for-windows).\n- Evaluation on Windows CUDA 11 environment may not be normally. It will be repaired as soon as possible. You can change to CUDA 10.2 or CUDA 10.1 environment for normal evaluation.\n+- Please make sure that [ffmpeg](https://ffmpeg.org/ffmpeg.html) is installed first, on Linux(Ubuntu) platform you can directly install it by the following command:`apt-get update && apt-get install -y ffmpeg`.\n## Model Zoo\n@@ -129,7 +130,7 @@ If you use a stronger detection model, you can get better results. Each txt is t\n| DLA-34 | 1088x608 | 74.8 | 74.4 | 930 | 7038 | 37994 | - | [model](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/mot/fairmot/fairmot_dla34_30e_1088x608.yml) |\n**Notes:**\n- FairMOT used 8 GPUs for training and mini-batch size as 6 on each GPU, and trained for 30 epoches.\n+ FairMOT used 2 GPUs for training and mini-batch size as 6 on each GPU, and trained for 30 epoches.\n## Feature Tracking Model\n@@ -229,10 +230,10 @@ dataset/mot\n### 1. Training\n-Training FairMOT on 8 GPUs with following command\n+Training FairMOT on 2 GPUs with following command\n```bash\n-python -m paddle.distributed.launch --log_dir=./fairmot_dla34_30e_1088x608/ --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/mot/fairmot/fairmot_dla34_30e_1088x608.yml\n+python -m paddle.distributed.launch --log_dir=./fairmot_dla34_30e_1088x608/ --gpus 0,1 tools/train.py -c configs/mot/fairmot/fairmot_dla34_30e_1088x608.yml\n```\n### 2. Evaluation\n" }, { "change_type": "MODIFY", "old_path": "ppdet/data/source/mot.py", "new_path": "ppdet/data/source/mot.py", "diff": "@@ -376,11 +376,8 @@ def video2frames(video_path, outpath, frame_rate, **kargs):\n]\ncmd = ''.join(cmd) + _dict2str(kargs)\n- try:\n- os.system(cmd)\n- except:\n- raise RuntimeError('ffmpeg process video: {} error'.format(vid_name))\n- sys.stdout.flush()\n+ if os.system(cmd) != 0:\n+ raise RuntimeError('ffmpeg process video: {} error'.format(video_path))\nsys.exit(-1)\nsys.stdout.flush()\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[MOT] fix doc gpu nums and ffmpeg (#3995)
499,313
25.08.2021 15:03:27
-28,800
3c9bcfab868f50636685f4921fed483159ff5393
udpate verison require & fix typo
[ { "change_type": "MODIFY", "old_path": "README_en.md", "new_path": "README_en.md", "diff": "@@ -96,7 +96,7 @@ Based on the high performance core of PaddlePaddle, advantages of training speed\n</ul>\n</ul>\n<ul>\n- <li><b>Face-Detction</b></li>\n+ <li><b>Face-Detection</b></li>\n<ul>\n<li>FaceBoxes</li>\n<li>BlazeFace</li>\n@@ -262,7 +262,7 @@ The relationship between COCO mAP and FPS on Tesla V100 of representative models\n- [Vehicle detection](configs/vehicle/README.md)\n- Competition Plan\n- [Objects365 2019 Challenge champion model](static/docs/featured_model/champion_model/CACascadeRCNN.md)\n- - [Best single model of Open Images 2019-Object Detction](static/docs/featured_model/champion_model/OIDV5_BASELINE_MODEL.md)\n+ - [Best single model of Open Images 2019-Object Detection](static/docs/featured_model/champion_model/OIDV5_BASELINE_MODEL.md)\n## Applications\n" }, { "change_type": "MODIFY", "old_path": "docs/tutorials/INSTALL.md", "new_path": "docs/tutorials/INSTALL.md", "diff": "@@ -22,6 +22,7 @@ Dependency of PaddleDetection and PaddlePaddle:\n| PaddleDetection version | PaddlePaddle version | tips |\n| :----------------: | :---------------: | :-------: |\n+| release/2.2 | >= 2.1.2 | Dygraph mode is set as default |\n| release/2.1 | >= 2.1.0 | Dygraph mode is set as default |\n| release/2.0 | >= 2.0.1 | Dygraph mode is set as default |\n| release/2.0-rc | >= 2.0.1 | -- |\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
udpate verison require & fix typo (#4023)
499,313
26.08.2021 10:38:42
-28,800
18a1f7303e61d25761fceb0e0f71bf753c80adc5
set highest import priority for current dir
[ { "change_type": "MODIFY", "old_path": "tools/anchor_cluster.py", "new_path": "tools/anchor_cluster.py", "diff": "@@ -20,8 +20,7 @@ import os\nimport sys\n# add python path of PadleDetection to sys.path\nparent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))\n-if parent_path not in sys.path:\n- sys.path.append(parent_path)\n+sys.path.insert(0, parent_path)\nfrom ppdet.utils.logger import setup_logger\nlogger = setup_logger('ppdet.anchor_cluster')\n" }, { "change_type": "MODIFY", "old_path": "tools/eval.py", "new_path": "tools/eval.py", "diff": "@@ -21,8 +21,7 @@ import sys\n# add python path of PadleDetection to sys.path\nparent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))\n-if parent_path not in sys.path:\n- sys.path.append(parent_path)\n+sys.path.insert(0, parent_path)\n# ignore warning log\nimport warnings\n" }, { "change_type": "MODIFY", "old_path": "tools/eval_mot.py", "new_path": "tools/eval_mot.py", "diff": "@@ -21,8 +21,7 @@ import sys\n# add python path of PadleDetection to sys.path\nparent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))\n-if parent_path not in sys.path:\n- sys.path.append(parent_path)\n+sys.path.insert(0, parent_path)\n# ignore warning log\nimport warnings\n" }, { "change_type": "MODIFY", "old_path": "tools/export_model.py", "new_path": "tools/export_model.py", "diff": "@@ -21,8 +21,7 @@ import sys\n# add python path of PadleDetection to sys.path\nparent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))\n-if parent_path not in sys.path:\n- sys.path.append(parent_path)\n+sys.path.insert(0, parent_path)\n# ignore warning log\nimport warnings\n" }, { "change_type": "MODIFY", "old_path": "tools/infer.py", "new_path": "tools/infer.py", "diff": "@@ -21,8 +21,7 @@ import sys\n# add python path of PadleDetection to sys.path\nparent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))\n-if parent_path not in sys.path:\n- sys.path.append(parent_path)\n+sys.path.insert(0, parent_path)\n# ignore warning log\nimport warnings\n" }, { "change_type": "MODIFY", "old_path": "tools/infer_mot.py", "new_path": "tools/infer_mot.py", "diff": "@@ -21,8 +21,7 @@ import sys\n# add python path of PadleDetection to sys.path\nparent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))\n-if parent_path not in sys.path:\n- sys.path.append(parent_path)\n+sys.path.insert(0, parent_path)\n# ignore warning log\nimport warnings\n" }, { "change_type": "MODIFY", "old_path": "tools/train.py", "new_path": "tools/train.py", "diff": "@@ -21,8 +21,7 @@ import sys\n# add python path of PadleDetection to sys.path\nparent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))\n-if parent_path not in sys.path:\n- sys.path.append(parent_path)\n+sys.path.insert(0, parent_path)\n# ignore warning log\nimport warnings\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
set highest import priority for current dir (#4019)
499,339
26.08.2021 16:44:41
-28,800
3b564170a8cd8ee05ca4aa1f23db9e3ea8a45f3e
[ssd] add MLPerf ssd model
[ { "change_type": "ADD", "old_path": null, "new_path": "configs/ssd/_base_/optimizer_70e.yml", "diff": "+epoch: 70\n+\n+LearningRate:\n+ base_lr: 0.05\n+ schedulers:\n+ - !PiecewiseDecay\n+ milestones: [48, 60]\n+ gamma: [0.1, 0.1]\n+ use_warmup: false\n+\n+OptimizerBuilder:\n+ optimizer:\n+ momentum: 0.9\n+ type: Momentum\n+ regularizer:\n+ factor: 0.0005\n+ type: L2\n" }, { "change_type": "ADD", "old_path": null, "new_path": "configs/ssd/_base_/ssd_r34_300.yml", "diff": "+architecture: SSD\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet34_pretrained.pdparams\n+\n+SSD:\n+ backbone: ResNet\n+ ssd_head: SSDHead\n+ post_process: BBoxPostProcess\n+ r34_backbone: True\n+\n+ResNet:\n+ # index 0 stands for res2\n+ depth: 34\n+ norm_type: bn\n+ freeze_norm: False\n+ freeze_at: -1\n+ return_idx: [2]\n+ num_stages: 3\n+\n+SSDHead:\n+ anchor_generator:\n+ steps: [8, 16, 32, 64, 100, 300]\n+ aspect_ratios: [[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]]\n+ min_sizes: [21.0, 45.0, 99.0, 153.0, 207.0, 261.0]\n+ max_sizes: [45.0, 99.0, 153.0, 207.0, 261.0, 315.0]\n+ offset: 0.5\n+ clip: True\n+ min_max_aspect_ratios_order: True\n+ use_extra_head: True\n+\n+BBoxPostProcess:\n+ decode:\n+ name: SSDBox\n+ nms:\n+ name: MultiClassNMS\n+ keep_top_k: 200\n+ score_threshold: 0.05\n+ nms_threshold: 0.5\n+ nms_top_k: 400\n" }, { "change_type": "ADD", "old_path": null, "new_path": "configs/ssd/_base_/ssd_r34_reader.yml", "diff": "+worker_num: 3\n+TrainReader:\n+ inputs_def:\n+ num_max_boxes: 90\n+ sample_transforms:\n+ - Decode: {}\n+ - RandomCrop: {num_attempts: 1}\n+ - RandomFlip: {}\n+ - Resize: {target_size: [300, 300], keep_ratio: False, interp: 1}\n+ - RandomDistort: {brightness: [0.875, 1.125, 0.5], random_apply: False}\n+ - NormalizeBox: {}\n+ - PadBox: {num_max_boxes: 90}\n+ - NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: true}\n+ - Permute: {}\n+ batch_size: 64\n+ shuffle: true\n+ drop_last: true\n+ use_shared_memory: true\n+\n+\n+EvalReader:\n+ sample_transforms:\n+ - Decode: {}\n+ - Resize: {target_size: [300, 300], keep_ratio: False, interp: 1}\n+ - NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: true}\n+ - Permute: {}\n+ batch_size: 1\n+\n+\n+TestReader:\n+ inputs_def:\n+ image_shape: [3, 300, 300]\n+ sample_transforms:\n+ - Decode: {}\n+ - Resize: {target_size: [300, 300], keep_ratio: False, interp: 1}\n+ - NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: true}\n+ - Permute: {}\n+ batch_size: 1\n" }, { "change_type": "ADD", "old_path": null, "new_path": "configs/ssd/ssd_r34_70e_coco.yml", "diff": "+_BASE_: [\n+ '../datasets/coco_detection.yml',\n+ '../runtime.yml',\n+ '_base_/optimizer_70e.yml',\n+ '_base_/ssd_r34_300.yml',\n+ '_base_/ssd_r34_reader.yml',\n+]\n+weights: output/ssd_r34_70e_coco/model_final\n+\n+log_iter: 100\n+snapshot_epoch: 5\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/architectures/ssd.py", "new_path": "ppdet/modeling/architectures/ssd.py", "diff": "@@ -36,11 +36,19 @@ class SSD(BaseArch):\n__category__ = 'architecture'\n__inject__ = ['post_process']\n- def __init__(self, backbone, ssd_head, post_process):\n+ def __init__(self, backbone, ssd_head, post_process, r34_backbone=False):\nsuper(SSD, self).__init__()\nself.backbone = backbone\nself.ssd_head = ssd_head\nself.post_process = post_process\n+ self.r34_backbone = r34_backbone\n+ if self.r34_backbone:\n+ from ppdet.modeling.backbones.resnet import ResNet\n+ assert isinstance(self.backbone, ResNet) and \\\n+ self.backbone.depth == 34, \\\n+ \"If you set r34_backbone=True, please use ResNet-34 as backbone.\"\n+ self.backbone.res_layers[2].blocks[0].branch2a.conv._stride = [1, 1]\n+ self.backbone.res_layers[2].blocks[0].short.conv._stride = [1, 1]\n@classmethod\ndef from_config(cls, cfg, *args, **kwargs):\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/ssd_head.py", "new_path": "ppdet/modeling/heads/ssd_head.py", "diff": "@@ -28,7 +28,7 @@ class SepConvLayer(nn.Layer):\nout_channels,\nkernel_size=3,\npadding=1,\n- conv_decay=0):\n+ conv_decay=0.):\nsuper(SepConvLayer, self).__init__()\nself.dw_conv = nn.Conv2D(\nin_channels=in_channels,\n@@ -61,6 +61,35 @@ class SepConvLayer(nn.Layer):\nreturn x\n+class SSDExtraHead(nn.Layer):\n+ def __init__(self,\n+ in_channels=256,\n+ out_channels=([256, 512], [256, 512], [128, 256], [128, 256],\n+ [128, 256]),\n+ strides=(2, 2, 2, 1, 1),\n+ paddings=(1, 1, 1, 0, 0)):\n+ super(SSDExtraHead, self).__init__()\n+ self.convs = nn.LayerList()\n+ for out_channel, stride, padding in zip(out_channels, strides,\n+ paddings):\n+ self.convs.append(\n+ self._make_layers(in_channels, out_channel[0], out_channel[1],\n+ stride, padding))\n+ in_channels = out_channel[-1]\n+\n+ def _make_layers(self, c_in, c_hidden, c_out, stride_3x3, padding_3x3):\n+ return nn.Sequential(\n+ nn.Conv2D(c_in, c_hidden, 1),\n+ nn.ReLU(),\n+ nn.Conv2D(c_hidden, c_out, 3, stride_3x3, padding_3x3), nn.ReLU())\n+\n+ def forward(self, x):\n+ out = [x]\n+ for conv_layer in self.convs:\n+ out.append(conv_layer(out[-1]))\n+ return out\n+\n+\n@register\nclass SSDHead(nn.Layer):\n\"\"\"\n@@ -75,6 +104,7 @@ class SSDHead(nn.Layer):\nuse_sepconv (bool): Use SepConvLayer if true\nconv_decay (float): Conv regularization coeff\nloss (object): 'SSDLoss' instance\n+ use_extra_head (bool): If use ResNet34 as baskbone, you should set `use_extra_head`=True\n\"\"\"\n__shared__ = ['num_classes']\n@@ -88,13 +118,19 @@ class SSDHead(nn.Layer):\npadding=1,\nuse_sepconv=False,\nconv_decay=0.,\n- loss='SSDLoss'):\n+ loss='SSDLoss',\n+ use_extra_head=False):\nsuper(SSDHead, self).__init__()\n# add background class\nself.num_classes = num_classes + 1\nself.in_channels = in_channels\nself.anchor_generator = anchor_generator\nself.loss = loss\n+ self.use_extra_head = use_extra_head\n+\n+ if self.use_extra_head:\n+ self.ssd_extra_head = SSDExtraHead()\n+ self.in_channels = [256, 512, 512, 256, 256, 256]\nif isinstance(anchor_generator, dict):\nself.anchor_generator = AnchorGeneratorSSD(**anchor_generator)\n@@ -108,7 +144,7 @@ class SSDHead(nn.Layer):\nbox_conv = self.add_sublayer(\nbox_conv_name,\nnn.Conv2D(\n- in_channels=in_channels[i],\n+ in_channels=self.in_channels[i],\nout_channels=num_prior * 4,\nkernel_size=kernel_size,\npadding=padding))\n@@ -116,7 +152,7 @@ class SSDHead(nn.Layer):\nbox_conv = self.add_sublayer(\nbox_conv_name,\nSepConvLayer(\n- in_channels=in_channels[i],\n+ in_channels=self.in_channels[i],\nout_channels=num_prior * 4,\nkernel_size=kernel_size,\npadding=padding,\n@@ -128,7 +164,7 @@ class SSDHead(nn.Layer):\nscore_conv = self.add_sublayer(\nscore_conv_name,\nnn.Conv2D(\n- in_channels=in_channels[i],\n+ in_channels=self.in_channels[i],\nout_channels=num_prior * self.num_classes,\nkernel_size=kernel_size,\npadding=padding))\n@@ -136,7 +172,7 @@ class SSDHead(nn.Layer):\nscore_conv = self.add_sublayer(\nscore_conv_name,\nSepConvLayer(\n- in_channels=in_channels[i],\n+ in_channels=self.in_channels[i],\nout_channels=num_prior * self.num_classes,\nkernel_size=kernel_size,\npadding=padding,\n@@ -148,9 +184,13 @@ class SSDHead(nn.Layer):\nreturn {'in_channels': [i.channels for i in input_shape], }\ndef forward(self, feats, image, gt_bbox=None, gt_class=None):\n+ if self.use_extra_head:\n+ assert len(feats) == 1, \\\n+ (\"If you set use_extra_head=True, backbone feature \"\n+ \"list length should be 1.\")\n+ feats = self.ssd_extra_head(feats[0])\nbox_preds = []\ncls_scores = []\n- prior_boxes = []\nfor feat, box_conv, score_conv in zip(feats, self.box_convs,\nself.score_convs):\nbox_pred = box_conv(feat)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[ssd] add MLPerf ssd model (#4055)
499,304
30.08.2021 13:59:48
-28,800
ea5c89dd8bed191bfcc2fea70d6946ffbcf17491
fix GFL export model with slice
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/gfl_head.py", "new_path": "ppdet/modeling/heads/gfl_head.py", "diff": "@@ -419,7 +419,9 @@ class GFLHead(nn.Layer):\nmlvl_scores = []\nfor stride, cls_score, bbox_pred in zip(self.fpn_stride, cls_scores,\nbbox_preds):\n- featmap_size = cls_score.shape[:2]\n+ featmap_size = [\n+ paddle.shape(cls_score)[0], paddle.shape(cls_score)[1]\n+ ]\ny, x = self.get_single_level_center_point(\nfeatmap_size, stride, cell_offset=cell_offset)\ncenter_points = paddle.stack([x, y], axis=-1)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix GFL export model with slice (#4083)
499,298
30.08.2021 14:04:15
-28,800
abad2953a8e1a5dd2f4e691752e53eadd6a2fade
[MOT] fix ffmpeg
[ { "change_type": "MODIFY", "old_path": "deploy/python/mot_jde_infer.py", "new_path": "deploy/python/mot_jde_infer.py", "diff": "@@ -267,7 +267,7 @@ def predict_video(detector, camera_id):\nif FLAGS.save_images:\nsave_dir = os.path.join(FLAGS.output_dir, video_name.split('.')[-2])\n- cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -vf \"scale=trunc(iw/2)*2:trunc(ih/2)*2\" {}'.format(\n+ cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg {}'.format(\nsave_dir, out_path)\nos.system(cmd_str)\nprint('Save video in {}.'.format(out_path))\n" }, { "change_type": "MODIFY", "old_path": "deploy/python/mot_keypoint_unite_infer.py", "new_path": "deploy/python/mot_keypoint_unite_infer.py", "diff": "@@ -216,7 +216,7 @@ def mot_keypoint_unite_predict_video(mot_model,\nif FLAGS.save_images:\nsave_dir = os.path.join(FLAGS.output_dir, video_name.split('.')[-2])\n- cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -vf \"scale=trunc(iw/2)*2:trunc(ih/2)*2\" {}'.format(\n+ cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg {}'.format(\nsave_dir, out_path)\nos.system(cmd_str)\nprint('Save video in {}.'.format(out_path))\n" }, { "change_type": "MODIFY", "old_path": "deploy/python/mot_sde_infer.py", "new_path": "deploy/python/mot_sde_infer.py", "diff": "@@ -408,7 +408,7 @@ def predict_video(detector, reid_model, camera_id):\nif FLAGS.save_images:\nsave_dir = os.path.join(FLAGS.output_dir, video_name.split('.')[-2])\n- cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -vf \"scale=trunc(iw/2)*2:trunc(ih/2)*2\" {}'.format(\n+ cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg {}'.format(\nsave_dir, out_path)\nos.system(cmd_str)\nprint('Save video in {}.'.format(out_path))\n" }, { "change_type": "MODIFY", "old_path": "ppdet/engine/tracker.py", "new_path": "ppdet/engine/tracker.py", "diff": "@@ -331,7 +331,7 @@ class Tracker(object):\nif save_videos:\noutput_video_path = os.path.join(save_dir, '..',\n'{}_vis.mp4'.format(seq))\n- cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -vf \"scale=trunc(iw/2)*2:trunc(ih/2)*2\" {}'.format(\n+ cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg {}'.format(\nsave_dir, output_video_path)\nos.system(cmd_str)\nlogger.info('Save video in {}.'.format(output_video_path))\n@@ -449,7 +449,7 @@ class Tracker(object):\nif save_videos:\noutput_video_path = os.path.join(save_dir, '..',\n'{}_vis.mp4'.format(seq))\n- cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -vf \"scale=trunc(iw/2)*2:trunc(ih/2)*2\" {}'.format(\n+ cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg {}'.format(\nsave_dir, output_video_path)\nos.system(cmd_str)\nlogger.info('Save video in {}'.format(output_video_path))\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[MOT] fix ffmpeg (#4078)
499,298
30.08.2021 14:05:34
-28,800
2c2eaf93b44e5905096ac5311332dff34ae1ba89
[MOT] update mot kitticars modelzoo
[ { "change_type": "MODIFY", "old_path": "configs/mot/README.md", "new_path": "configs/mot/README.md", "diff": "@@ -118,7 +118,7 @@ If you use a stronger detection model, you can get better results. Each txt is t\n| backbone | input shape | MOTA | IDF1 | IDS | FP | FN | FPS | download | config |\n| :--------------| :------- | :----: | :----: | :----: | :----: | :----: | :------: | :----: |:-----: |\n| DLA-34(paper) | 1088x608 | 83.3 | 81.9 | 544 | 3822 | 14095 | - | - | - |\n-| DLA-34 | 1088x608 | 83.7 | 83.3 | 435 | 3829 | 13764 | - | [model](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/mot/fairmot/fairmot_dla34_30e_1088x608.yml) |\n+| DLA-34 | 1088x608 | 83.2 | 83.1 | 499 | 3861 | 14223 | - | [model](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/mot/fairmot/fairmot_dla34_30e_1088x608.yml) |\n### FairMOT Results on MOT-16 Test Set\n@@ -126,7 +126,7 @@ If you use a stronger detection model, you can get better results. Each txt is t\n| backbone | input shape | MOTA | IDF1 | IDS | FP | FN | FPS | download | config |\n| :--------------| :------- | :----: | :----: | :----: | :----: | :----: | :------: | :----: |:-----: |\n| DLA-34(paper) | 1088x608 | 74.9 | 72.8 | 1074 | - | - | 25.9 | - | - |\n-| DLA-34 | 1088x608 | 74.8 | 74.4 | 930 | 7038 | 37994 | - | [model](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/mot/fairmot/fairmot_dla34_30e_1088x608.yml) |\n+| DLA-34 | 1088x608 | 75.0 | 74.7 | 919 | 7934 | 36747 | - | [model](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/mot/fairmot/fairmot_dla34_30e_1088x608.yml) |\n**Notes:**\nFairMOT used 2 GPUs for training and mini-batch size as 6 on each GPU, and trained for 30 epoches.\n@@ -151,7 +151,7 @@ If you use a stronger detection model, you can get better results. Each txt is t\n| backbone | input shape | MOTA | FPS | download | config |\n| :--------------| :------- | :-----: | :-----: | :------: | :----: |\n-| DLA-34 | 1088x608 | 67.9 | - |[model](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608_kitticars.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/mot/kitticars/fairmot_dla34_30e_1088x608_kitticars.yml) |\n+| DLA-34 | 1088x608 | 53.9 | - |[model](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608_kitticars.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/mot/kitticars/fairmot_dla34_30e_1088x608_kitticars.yml) |\n## Dataset Preparation\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[MOT] update mot kitticars modelzoo (#4084)
499,304
30.08.2021 19:29:28
-28,800
f74aa666c54d200f790a477cd0fc349d0478c8ed
fix cascade_mask_rcnn export model
[ { "change_type": "MODIFY", "old_path": "ppdet/engine/export_utils.py", "new_path": "ppdet/engine/export_utils.py", "diff": "@@ -122,7 +122,8 @@ def _dump_infer_config(config, path, image_shape, model):\nformat(infer_arch) +\n'Please set TRT_MIN_SUBGRAPH in ppdet/engine/export_utils.py')\nos._exit(0)\n- if 'Mask' in infer_arch:\n+ if 'mask_head' in config[config['architecture']] and config[config[\n+ 'architecture']]['mask_head']:\ninfer_cfg['mask'] = True\nlabel_arch = 'detection_arch'\nif infer_arch in KEYPOINT_ARCH:\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix cascade_mask_rcnn export model (#4088)
499,348
31.08.2021 14:18:11
-28,800
8d6e1137ff75bc55a701c8700b0f36414a1f3bb2
delay coco data load
[ { "change_type": "MODIFY", "old_path": "ppdet/data/source/keypoint_coco.py", "new_path": "ppdet/data/source/keypoint_coco.py", "diff": "@@ -63,6 +63,9 @@ class KeypointBottomUpBaseDataset(DetDataset):\nself.ann_info['num_joints'] = num_joints\nself.img_ids = []\n+ def parse_dataset(self):\n+ pass\n+\ndef __len__(self):\n\"\"\"Get dataset length.\"\"\"\nreturn len(self.img_ids)\n@@ -136,26 +139,30 @@ class KeypointBottomUpCocoDataset(KeypointBottomUpBaseDataset):\nsuper().__init__(dataset_dir, image_dir, anno_path, num_joints,\ntransform, shard, test_mode)\n- ann_file = os.path.join(dataset_dir, anno_path)\n- self.coco = COCO(ann_file)\n+ self.ann_file = os.path.join(dataset_dir, anno_path)\n+ self.shard = shard\n+ self.test_mode = test_mode\n+\n+ def parse_dataset(self):\n+ self.coco = COCO(self.ann_file)\nself.img_ids = self.coco.getImgIds()\n- if not test_mode:\n+ if not self.test_mode:\nself.img_ids = [\nimg_id for img_id in self.img_ids\nif len(self.coco.getAnnIds(\nimgIds=img_id, iscrowd=None)) > 0\n]\n- blocknum = int(len(self.img_ids) / shard[1])\n- self.img_ids = self.img_ids[(blocknum * shard[0]):(blocknum * (shard[0]\n- + 1))]\n+ blocknum = int(len(self.img_ids) / self.shard[1])\n+ self.img_ids = self.img_ids[(blocknum * self.shard[0]):(blocknum * (\n+ self.shard[0] + 1))]\nself.num_images = len(self.img_ids)\nself.id2name, self.name2id = self._get_mapping_id_name(self.coco.imgs)\nself.dataset_name = 'coco'\ncat_ids = self.coco.getCatIds()\nself.catid2clsid = dict({catid: i for i, catid in enumerate(cat_ids)})\n- print(f'=> num_images: {self.num_images}')\n+ print('=> num_images: {}'.format(self.num_images))\n@staticmethod\ndef _get_mapping_id_name(imgs):\n@@ -301,20 +308,23 @@ class KeypointBottomUpCrowdPoseDataset(KeypointBottomUpCocoDataset):\nsuper().__init__(dataset_dir, image_dir, anno_path, num_joints,\ntransform, shard, test_mode)\n- ann_file = os.path.join(dataset_dir, anno_path)\n+ self.ann_file = os.path.join(dataset_dir, anno_path)\n+ self.shard = shard\n+ self.test_mode = test_mode\n- self.coco = COCO(ann_file)\n+ def parse_dataset(self):\n+ self.coco = COCO(self.ann_file)\nself.img_ids = self.coco.getImgIds()\n- if not test_mode:\n+ if not self.test_mode:\nself.img_ids = [\nimg_id for img_id in self.img_ids\nif len(self.coco.getAnnIds(\nimgIds=img_id, iscrowd=None)) > 0\n]\n- blocknum = int(len(self.img_ids) / shard[1])\n- self.img_ids = self.img_ids[(blocknum * shard[0]):(blocknum * (shard[0]\n- + 1))]\n+ blocknum = int(len(self.img_ids) / self.shard[1])\n+ self.img_ids = self.img_ids[(blocknum * self.shard[0]):(blocknum * (\n+ self.shard[0] + 1))]\nself.num_images = len(self.img_ids)\nself.id2name, self.name2id = self._get_mapping_id_name(self.coco.imgs)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
delay coco data load (#4095)
499,333
01.09.2021 10:37:46
-28,800
69e909b4c918bb6ce6a4a9486e040c054ee88b4a
assign on cpu
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/proposal_generator/target.py", "new_path": "ppdet/modeling/proposal_generator/target.py", "diff": "@@ -27,7 +27,8 @@ def rpn_anchor_target(anchors,\nbatch_size=1,\nignore_thresh=-1,\nis_crowd=None,\n- weights=[1., 1., 1., 1.]):\n+ weights=[1., 1., 1., 1.],\n+ assign_on_cpu=False):\ntgt_labels = []\ntgt_bboxes = []\ntgt_deltas = []\n@@ -37,7 +38,7 @@ def rpn_anchor_target(anchors,\n# Step1: match anchor and gt_bbox\nmatches, match_labels = label_box(\nanchors, gt_bbox, rpn_positive_overlap, rpn_negative_overlap, True,\n- ignore_thresh, is_crowd_i)\n+ ignore_thresh, is_crowd_i, assign_on_cpu)\n# Step2: sample anchor\nfg_inds, bg_inds = subsample_labels(match_labels, rpn_batch_size_per_im,\nrpn_fg_fraction, 0, use_random)\n@@ -70,7 +71,12 @@ def label_box(anchors,\nnegative_overlap,\nallow_low_quality,\nignore_thresh,\n- is_crowd=None):\n+ is_crowd=None,\n+ assign_on_cpu=False):\n+ if assign_on_cpu:\n+ with paddle.fluid.framework._dygraph_place_guard(paddle.CPUPlace()):\n+ iou = bbox_overlaps(gt_boxes, anchors)\n+ else:\niou = bbox_overlaps(gt_boxes, anchors)\nn_gt = gt_boxes.shape[0]\nif n_gt == 0 or is_crowd is None:\n@@ -176,7 +182,8 @@ def generate_proposal_target(rpn_rois,\nis_crowd=None,\nuse_random=True,\nis_cascade=False,\n- cascade_iou=0.5):\n+ cascade_iou=0.5,\n+ assign_on_cpu=False):\nrois_with_gt = []\ntgt_labels = []\n@@ -201,7 +208,8 @@ def generate_proposal_target(rpn_rois,\n# Step1: label bbox\nmatches, match_labels = label_box(bbox, gt_bbox, fg_thresh, bg_thresh,\n- False, ignore_thresh, is_crowd_i)\n+ False, ignore_thresh, is_crowd_i,\n+ assign_on_cpu)\n# Step2: sample bbox\nsampled_inds, sampled_gt_classes = sample_bbox(\nmatches, match_labels, gt_class, batch_size_per_im, fg_fraction,\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/proposal_generator/target_layer.py", "new_path": "ppdet/modeling/proposal_generator/target_layer.py", "diff": "@@ -22,6 +22,7 @@ import numpy as np\n@register\n@serializable\nclass RPNTargetAssign(object):\n+ __shared__ = ['assign_on_cpu']\n\"\"\"\nRPN targets assignment module\n@@ -48,6 +49,8 @@ class RPNTargetAssign(object):\nif the value is larger than zero.\nuse_random (bool): Use random sampling to choose foreground and\nbackground boxes, default true.\n+ assign_on_cpu (bool): In case the number of gt box is too large,\n+ compute IoU on CPU, default false.\n\"\"\"\ndef __init__(self,\n@@ -56,7 +59,8 @@ class RPNTargetAssign(object):\npositive_overlap=0.7,\nnegative_overlap=0.3,\nignore_thresh=-1.,\n- use_random=True):\n+ use_random=True,\n+ assign_on_cpu=False):\nsuper(RPNTargetAssign, self).__init__()\nself.batch_size_per_im = batch_size_per_im\nself.fg_fraction = fg_fraction\n@@ -64,6 +68,7 @@ class RPNTargetAssign(object):\nself.negative_overlap = negative_overlap\nself.ignore_thresh = ignore_thresh\nself.use_random = use_random\n+ self.assign_on_cpu = assign_on_cpu\ndef __call__(self, inputs, anchors):\n\"\"\"\n@@ -74,9 +79,17 @@ class RPNTargetAssign(object):\nis_crowd = inputs.get('is_crowd', None)\nbatch_size = len(gt_boxes)\ntgt_labels, tgt_bboxes, tgt_deltas = rpn_anchor_target(\n- anchors, gt_boxes, self.batch_size_per_im, self.positive_overlap,\n- self.negative_overlap, self.fg_fraction, self.use_random,\n- batch_size, self.ignore_thresh, is_crowd)\n+ anchors,\n+ gt_boxes,\n+ self.batch_size_per_im,\n+ self.positive_overlap,\n+ self.negative_overlap,\n+ self.fg_fraction,\n+ self.use_random,\n+ batch_size,\n+ self.ignore_thresh,\n+ is_crowd,\n+ assign_on_cpu=self.assign_on_cpu)\nnorm = self.batch_size_per_im * batch_size\nreturn tgt_labels, tgt_bboxes, tgt_deltas, norm\n@@ -84,7 +97,7 @@ class RPNTargetAssign(object):\n@register\nclass BBoxAssigner(object):\n- __shared__ = ['num_classes']\n+ __shared__ = ['num_classes', 'assign_on_cpu']\n\"\"\"\nRCNN targets assignment module\n@@ -113,6 +126,8 @@ class BBoxAssigner(object):\ncascade_iou (list[iou]): The list of overlap to select foreground and\nbackground of each stage, which is only used In Cascade RCNN.\nnum_classes (int): The number of class.\n+ assign_on_cpu (bool): In case the number of gt box is too large,\n+ compute IoU on CPU, default false.\n\"\"\"\ndef __init__(self,\n@@ -123,7 +138,8 @@ class BBoxAssigner(object):\nignore_thresh=-1.,\nuse_random=True,\ncascade_iou=[0.5, 0.6, 0.7],\n- num_classes=80):\n+ num_classes=80,\n+ assign_on_cpu=False):\nsuper(BBoxAssigner, self).__init__()\nself.batch_size_per_im = batch_size_per_im\nself.fg_fraction = fg_fraction\n@@ -133,6 +149,7 @@ class BBoxAssigner(object):\nself.use_random = use_random\nself.cascade_iou = cascade_iou\nself.num_classes = num_classes\n+ self.assign_on_cpu = assign_on_cpu\ndef __call__(self,\nrpn_rois,\n@@ -149,7 +166,7 @@ class BBoxAssigner(object):\nrpn_rois, gt_classes, gt_boxes, self.batch_size_per_im,\nself.fg_fraction, self.fg_thresh, self.bg_thresh, self.num_classes,\nself.ignore_thresh, is_crowd, self.use_random, is_cascade,\n- self.cascade_iou[stage])\n+ self.cascade_iou[stage], self.assign_on_cpu)\nrois = outs[0]\nrois_num = outs[-1]\n# tgt_labels, tgt_bboxes, tgt_gt_inds\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
assign on cpu (#4099)
499,333
01.09.2021 12:25:58
-28,800
e49827047e1aee55530edc59554c88965f6c77fa
refine set_device
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/proposal_generator/target.py", "new_path": "ppdet/modeling/proposal_generator/target.py", "diff": "@@ -74,8 +74,9 @@ def label_box(anchors,\nis_crowd=None,\nassign_on_cpu=False):\nif assign_on_cpu:\n- with paddle.fluid.framework._dygraph_place_guard(paddle.CPUPlace()):\n+ paddle.set_device(\"cpu\")\niou = bbox_overlaps(gt_boxes, anchors)\n+ paddle.set_device(\"gpu\")\nelse:\niou = bbox_overlaps(gt_boxes, anchors)\nn_gt = gt_boxes.shape[0]\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
refine set_device (#4102)
499,395
02.09.2021 14:21:41
-28,800
90d4a0659bf3e3b806b8f7fa7e081142715b1e2c
modify VOCDataSet and default value of allow_empty
[ { "change_type": "MODIFY", "old_path": "ppdet/data/source/coco.py", "new_path": "ppdet/data/source/coco.py", "diff": "@@ -48,7 +48,7 @@ class COCODataSet(DetDataset):\ndata_fields=['image'],\nsample_num=-1,\nload_crowd=False,\n- allow_empty=False,\n+ allow_empty=True,\nempty_ratio=1.):\nsuper(COCODataSet, self).__init__(dataset_dir, image_dir, anno_path,\ndata_fields, sample_num)\n@@ -243,7 +243,7 @@ class COCODataSet(DetDataset):\nbreak\nassert ct > 0, 'not found any coco record in %s' % (anno_path)\nlogger.debug('{} samples in file {}'.format(ct, anno_path))\n- if len(empty_records) > 0:\n+ if self.allow_empty and len(empty_records) > 0:\nempty_records = self._sample_empty(empty_records, len(records))\nrecords += empty_records\nself.roidbs = records\n" }, { "change_type": "MODIFY", "old_path": "ppdet/data/source/voc.py", "new_path": "ppdet/data/source/voc.py", "diff": "@@ -55,7 +55,7 @@ class VOCDataSet(DetDataset):\ndata_fields=['image'],\nsample_num=-1,\nlabel_list=None,\n- allow_empty=False,\n+ allow_empty=True,\nempty_ratio=1.):\nsuper(VOCDataSet, self).__init__(\ndataset_dir=dataset_dir,\n@@ -131,11 +131,13 @@ class VOCDataSet(DetDataset):\n'Illegal width: {} or height: {} in annotation, '\n'and {} will be ignored'.format(im_w, im_h, xml_file))\ncontinue\n- gt_bbox = []\n- gt_class = []\n- gt_score = []\n- difficult = []\n- for i, obj in enumerate(objs):\n+\n+ num_bbox, i = len(objs), 0\n+ gt_bbox = np.zeros((num_bbox, 4), dtype=np.float32)\n+ gt_class = np.zeros((num_bbox, 1), dtype=np.int32)\n+ gt_score = np.zeros((num_bbox, 1), dtype=np.float32)\n+ difficult = np.zeros((num_bbox, 1), dtype=np.int32)\n+ for obj in objs:\ncname = obj.find('name').text\n# user dataset may not contain difficult field\n@@ -152,19 +154,20 @@ class VOCDataSet(DetDataset):\nx2 = min(im_w - 1, x2)\ny2 = min(im_h - 1, y2)\nif x2 > x1 and y2 > y1:\n- gt_bbox.append([x1, y1, x2, y2])\n- gt_class.append([cname2cid[cname]])\n- gt_score.append([1.])\n- difficult.append([_difficult])\n+ gt_bbox[i, :] = [x1, y1, x2, y2]\n+ gt_class[i, 0] = cname2cid[cname]\n+ gt_score[i, 0] = 1.\n+ difficult[i, 0] = _difficult\n+ i += 1\nelse:\nlogger.warning(\n'Found an invalid bbox in annotations: xml_file: {}'\n', x1: {}, y1: {}, x2: {}, y2: {}.'.format(\nxml_file, x1, y1, x2, y2))\n- gt_bbox = np.array(gt_bbox).astype('float32')\n- gt_class = np.array(gt_class).astype('int32')\n- gt_score = np.array(gt_score).astype('float32')\n- difficult = np.array(difficult).astype('int32')\n+ gt_bbox = gt_bbox[:i, :]\n+ gt_class = gt_class[:i, :]\n+ gt_score = gt_score[:i, :]\n+ difficult = difficult[:i, :]\nvoc_rec = {\n'im_file': img_file,\n@@ -193,7 +196,7 @@ class VOCDataSet(DetDataset):\nbreak\nassert ct > 0, 'not found any voc record in %s' % (self.anno_path)\nlogger.debug('{} samples in file {}'.format(ct, anno_path))\n- if len(empty_records) > 0:\n+ if self.allow_empty and len(empty_records) > 0:\nempty_records = self._sample_empty(empty_records, len(records))\nrecords += empty_records\nself.roidbs, self.cname2cid = records, cname2cid\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
modify VOCDataSet and default value of allow_empty (#4096)
499,395
03.09.2021 16:27:10
-28,800
aff7391f685915c1e589c4db0f8125d4e5bb45af
add image shape for TestReader of s2anet
[ { "change_type": "MODIFY", "old_path": "configs/dota/_base_/s2anet_reader.yml", "new_path": "configs/dota/_base_/s2anet_reader.yml", "diff": "@@ -29,6 +29,8 @@ EvalReader:\nTestReader:\n+ inputs_def:\n+ image_shape: [3, 1024, 1024]\nsample_transforms:\n- Decode: {}\n- Resize: {interp: 2, target_size: [1024, 1024], keep_ratio: True}\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add image shape for TestReader of s2anet (#4109)
499,301
05.09.2021 10:20:42
-28,800
339fe7168a8982588c2fed10bd4cea4ea9947565
update drop_path_rate
[ { "change_type": "MODIFY", "old_path": "configs/faster_rcnn/_base_/faster_rcnn_swin_transformer.yml", "new_path": "configs/faster_rcnn/_base_/faster_rcnn_swin_transformer.yml", "diff": "@@ -13,10 +13,9 @@ SwinTransformer:\nnum_heads: [3, 6, 12, 24]\nwindow_size: 7\nape: false\n- drop_path_rate: 0.1\n+ drop_path_rate: 0.2\npatch_norm: true\nout_indices: [0,1,2,3]\n- drop_path_rate: 0.1\npretrained: https://paddledet.bj.bcebos.com/models/pretrained/swin_tiny_patch4_window7_224.pdparams\nFPN:\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/backbones/swin_transformer.py", "new_path": "ppdet/modeling/backbones/swin_transformer.py", "diff": "@@ -673,9 +673,6 @@ class SwinTransformer(nn.Layer):\nelse: #model in local path\npath = pretrained\nself.set_state_dict(paddle.load(path))\n- print('###################################################')\n- print('###############Success load the mode###############')\n- print('###################################################')\ndef _freeze_stages(self):\nif self.frozen_stages >= 0:\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
update drop_path_rate (#4101)
499,299
07.09.2021 20:02:22
-28,800
00beb736c0091347957be826bb9aef2bd79e04bb
fix gather op out_of_range_error in Solov2 cpu inference
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/solov2_head.py", "new_path": "ppdet/modeling/heads/solov2_head.py", "diff": "@@ -488,6 +488,9 @@ class SOLOv2Head(nn.Layer):\nfill_value=self.segm_strides[_ind],\ndtype=\"int32\"))\nstrides = paddle.concat(strides)\n+ strides = paddle.concat(\n+ [strides, paddle.zeros(\n+ shape=[1], dtype='int32')])\nstrides = paddle.gather(strides, index=inds[:, 0])\n# mask encoding.\n" }, { "change_type": "MODIFY", "old_path": "static/ppdet/modeling/anchor_heads/solov2_head.py", "new_path": "static/ppdet/modeling/anchor_heads/solov2_head.py", "diff": "@@ -379,6 +379,9 @@ class SOLOv2Head(object):\ndtype=\"float32\",\nvalue=self.segm_strides[_ind]))\nstrides = fluid.layers.concat(strides)\n+ strides = fluid.layers.concat(\n+ [strides, fluid.layers.zeros(\n+ shape=[1], dtype='float32')])\nstrides = fluid.layers.gather(strides, index=inds[:, 0])\n# mask encoding.\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix gather op out_of_range_error in Solov2 cpu inference (#4140)
499,333
08.09.2021 19:35:17
-28,800
f2ab17c056c4832327fa59796613c3d272bfda97
revert default value of allow_empty
[ { "change_type": "MODIFY", "old_path": "ppdet/data/source/coco.py", "new_path": "ppdet/data/source/coco.py", "diff": "@@ -48,7 +48,7 @@ class COCODataSet(DetDataset):\ndata_fields=['image'],\nsample_num=-1,\nload_crowd=False,\n- allow_empty=True,\n+ allow_empty=False,\nempty_ratio=1.):\nsuper(COCODataSet, self).__init__(dataset_dir, image_dir, anno_path,\ndata_fields, sample_num)\n" }, { "change_type": "MODIFY", "old_path": "ppdet/data/source/voc.py", "new_path": "ppdet/data/source/voc.py", "diff": "@@ -55,7 +55,7 @@ class VOCDataSet(DetDataset):\ndata_fields=['image'],\nsample_num=-1,\nlabel_list=None,\n- allow_empty=True,\n+ allow_empty=False,\nempty_ratio=1.):\nsuper(VOCDataSet, self).__init__(\ndataset_dir=dataset_dir,\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
revert default value of allow_empty (#4150)
499,339
09.09.2021 11:01:49
-28,800
197b5a9b348e463958715c83cfa12ca840dd29ed
[dev] add bifpn in necks
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/necks/__init__.py", "new_path": "ppdet/modeling/necks/__init__.py", "diff": "@@ -18,6 +18,7 @@ from . import hrfpn\nfrom . import ttf_fpn\nfrom . import centernet_fpn\nfrom . import pan\n+from . import bifpn\nfrom .fpn import *\nfrom .yolo_fpn import *\n@@ -26,3 +27,4 @@ from .ttf_fpn import *\nfrom .centernet_fpn import *\nfrom .blazeface_fpn import *\nfrom .pan import *\n+from .bifpn import *\n" }, { "change_type": "ADD", "old_path": null, "new_path": "ppdet/modeling/necks/bifpn.py", "diff": "+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import paddle\n+import paddle.nn as nn\n+import paddle.nn.functional as F\n+from paddle import ParamAttr\n+from paddle.nn.initializer import Constant\n+\n+from ppdet.core.workspace import register, serializable\n+from ppdet.modeling.layers import ConvNormLayer\n+from ..shape_spec import ShapeSpec\n+\n+__all__ = ['BiFPN']\n+\n+\n+class SeparableConvLayer(nn.Layer):\n+ def __init__(self,\n+ in_channels,\n+ out_channels=None,\n+ kernel_size=3,\n+ norm_type='bn',\n+ norm_groups=32,\n+ act='swish'):\n+ super(SeparableConvLayer, self).__init__()\n+ assert norm_type in ['bn', 'sync_bn', 'gn', None]\n+ assert act in ['swish', 'relu', None]\n+\n+ self.in_channels = in_channels\n+ if out_channels is None:\n+ self.out_channels = self.in_channels\n+ self.norm_type = norm_type\n+ self.norm_groups = norm_groups\n+ self.depthwise_conv = nn.Conv2D(\n+ in_channels,\n+ in_channels,\n+ kernel_size,\n+ padding=kernel_size // 2,\n+ groups=in_channels,\n+ bias_attr=False)\n+ self.pointwise_conv = nn.Conv2D(in_channels, self.out_channels, 1)\n+\n+ # norm type\n+ if self.norm_type == 'bn':\n+ self.norm = nn.BatchNorm2D(self.out_channels)\n+ elif self.norm_type == 'sync_bn':\n+ self.norm = nn.SyncBatchNorm(self.out_channels)\n+ elif self.norm_type == 'gn':\n+ self.norm = nn.GroupNorm(\n+ num_groups=self.norm_groups, num_channels=self.out_channels)\n+\n+ # activation\n+ if act == 'swish':\n+ self.act = nn.Swish()\n+ elif act == 'relu':\n+ self.act = nn.ReLU()\n+\n+ def forward(self, x):\n+ if self.act is not None:\n+ x = self.act(x)\n+ out = self.depthwise_conv(x)\n+ out = self.pointwise_conv(out)\n+ if self.norm_type is not None:\n+ out = self.norm(out)\n+ return out\n+\n+\n+class BiFPNCell(nn.Layer):\n+ def __init__(self,\n+ channels=256,\n+ num_levels=5,\n+ eps=1e-5,\n+ use_weighted_fusion=True,\n+ kernel_size=3,\n+ norm_type='bn',\n+ norm_groups=32,\n+ act='swish'):\n+ super(BiFPNCell, self).__init__()\n+ self.channels = channels\n+ self.num_levels = num_levels\n+ self.eps = eps\n+ self.use_weighted_fusion = use_weighted_fusion\n+\n+ # up\n+ self.conv_up = nn.LayerList([\n+ SeparableConvLayer(\n+ self.channels,\n+ kernel_size=kernel_size,\n+ norm_type=norm_type,\n+ norm_groups=norm_groups,\n+ act=act) for _ in range(self.num_levels - 1)\n+ ])\n+ # down\n+ self.conv_down = nn.LayerList([\n+ SeparableConvLayer(\n+ self.channels,\n+ kernel_size=kernel_size,\n+ norm_type=norm_type,\n+ norm_groups=norm_groups,\n+ act=act) for _ in range(self.num_levels - 1)\n+ ])\n+\n+ if self.use_weighted_fusion:\n+ self.up_weights = self.create_parameter(\n+ shape=[self.num_levels - 1, 2],\n+ attr=ParamAttr(initializer=Constant(1.)))\n+ self.down_weights = self.create_parameter(\n+ shape=[self.num_levels - 1, 3],\n+ attr=ParamAttr(initializer=Constant(1.)))\n+\n+ def _feature_fusion_cell(self,\n+ conv_layer,\n+ lateral_feat,\n+ sampling_feat,\n+ route_feat=None,\n+ weights=None):\n+ if self.use_weighted_fusion:\n+ weights = F.relu(weights)\n+ weights = weights / (weights.sum() + self.eps)\n+ if route_feat is not None:\n+ out_feat = weights[0] * lateral_feat + \\\n+ weights[1] * sampling_feat + \\\n+ weights[2] * route_feat\n+ else:\n+ out_feat = weights[0] * lateral_feat + \\\n+ weights[1] * sampling_feat\n+ else:\n+ if route_feat is not None:\n+ out_feat = lateral_feat + sampling_feat + route_feat\n+ else:\n+ out_feat = lateral_feat + sampling_feat\n+\n+ out_feat = conv_layer(out_feat)\n+ return out_feat\n+\n+ def forward(self, feats):\n+ # feats: [P3 - P7]\n+ lateral_feats = []\n+\n+ # up\n+ up_feature = feats[-1]\n+ for i, feature in enumerate(feats[::-1]):\n+ if i == 0:\n+ lateral_feats.append(feature)\n+ else:\n+ shape = paddle.shape(feature)\n+ up_feature = F.interpolate(\n+ up_feature, size=[shape[2], shape[3]])\n+ lateral_feature = self._feature_fusion_cell(\n+ self.conv_up[i - 1],\n+ feature,\n+ up_feature,\n+ weights=self.up_weights[i - 1]\n+ if self.use_weighted_fusion else None)\n+ lateral_feats.append(lateral_feature)\n+ up_feature = lateral_feature\n+\n+ out_feats = []\n+ # down\n+ down_feature = lateral_feats[-1]\n+ for i, (lateral_feature,\n+ route_feature) in enumerate(zip(lateral_feats[::-1], feats)):\n+ if i == 0:\n+ out_feats.append(lateral_feature)\n+ else:\n+ down_feature = F.max_pool2d(down_feature, 3, 2, 1)\n+ if i == len(feats) - 1:\n+ route_feature = None\n+ weights = self.down_weights[\n+ i - 1][:2] if self.use_weighted_fusion else None\n+ else:\n+ weights = self.down_weights[\n+ i - 1] if self.use_weighted_fusion else None\n+ out_feature = self._feature_fusion_cell(\n+ self.conv_down[i - 1],\n+ lateral_feature,\n+ down_feature,\n+ route_feature,\n+ weights=weights)\n+ out_feats.append(out_feature)\n+ down_feature = out_feature\n+\n+ return out_feats\n+\n+\n+@register\n+@serializable\n+class BiFPN(nn.Layer):\n+ \"\"\"\n+ Bidirectional Feature Pyramid Network, see https://arxiv.org/abs/1911.09070\n+\n+ Args:\n+ in_channels (list[int]): input channels of each level which can be\n+ derived from the output shape of backbone by from_config.\n+ out_channel (int): output channel of each level.\n+ num_extra_levels (int): the number of extra stages added to the last level.\n+ default: 2\n+ fpn_strides (List): The stride of each level.\n+ num_stacks (int): the number of stacks for BiFPN, default: 1.\n+ use_weighted_fusion (bool): use weighted feature fusion in BiFPN, default: True.\n+ norm_type (string|None): the normalization type in BiFPN module. If\n+ norm_type is None, norm will not be used after conv and if\n+ norm_type is string, bn, gn, sync_bn are available. default: bn.\n+ norm_groups (int): if you use gn, set this param.\n+ act (string|None): the activation function of BiFPN.\n+ \"\"\"\n+\n+ def __init__(self,\n+ in_channels=(512, 1024, 2048),\n+ out_channel=256,\n+ num_extra_levels=2,\n+ fpn_strides=[8, 16, 32, 64, 128],\n+ num_stacks=1,\n+ use_weighted_fusion=True,\n+ norm_type='bn',\n+ norm_groups=32,\n+ act='swish'):\n+ super(BiFPN, self).__init__()\n+ assert num_stacks > 0, \"The number of stacks of BiFPN is at least 1.\"\n+ assert norm_type in ['bn', 'sync_bn', 'gn', None]\n+ assert act in ['swish', 'relu', None]\n+ assert num_extra_levels >= 0, \\\n+ \"The `num_extra_levels` must be non negative(>=0).\"\n+\n+ self.in_channels = in_channels\n+ self.out_channel = out_channel\n+ self.num_extra_levels = num_extra_levels\n+ self.num_stacks = num_stacks\n+ self.use_weighted_fusion = use_weighted_fusion\n+ self.norm_type = norm_type\n+ self.norm_groups = norm_groups\n+ self.act = act\n+ self.num_levels = len(self.in_channels) + self.num_extra_levels\n+ if len(fpn_strides) != self.num_levels:\n+ for i in range(self.num_extra_levels):\n+ fpn_strides += [fpn_strides[-1] * 2]\n+ self.fpn_strides = fpn_strides\n+\n+ self.lateral_convs = nn.LayerList()\n+ for in_c in in_channels:\n+ self.lateral_convs.append(\n+ ConvNormLayer(in_c, self.out_channel, 1, 1))\n+ if self.num_extra_levels > 0:\n+ self.extra_convs = nn.LayerList()\n+ for i in range(self.num_extra_levels):\n+ if i == 0:\n+ self.extra_convs.append(\n+ ConvNormLayer(self.in_channels[-1], self.out_channel, 3,\n+ 2))\n+ else:\n+ self.extra_convs.append(nn.MaxPool2D(3, 2, 1))\n+\n+ self.bifpn_cells = nn.LayerList()\n+ for i in range(self.num_stacks):\n+ self.bifpn_cells.append(\n+ BiFPNCell(\n+ self.out_channel,\n+ self.num_levels,\n+ use_weighted_fusion=self.use_weighted_fusion,\n+ norm_type=self.norm_type,\n+ norm_groups=self.norm_groups,\n+ act=self.act))\n+\n+ @classmethod\n+ def from_config(cls, cfg, input_shape):\n+ return {\n+ 'in_channels': [i.channels for i in input_shape],\n+ 'fpn_strides': [i.stride for i in input_shape]\n+ }\n+\n+ @property\n+ def out_shape(self):\n+ return [\n+ ShapeSpec(\n+ channels=self.out_channel, stride=s) for s in self.fpn_strides\n+ ]\n+\n+ def forward(self, feats):\n+ assert len(feats) == len(self.in_channels)\n+ fpn_feats = []\n+ for conv_layer, feature in zip(self.lateral_convs, feats):\n+ fpn_feats.append(conv_layer(feature))\n+ if self.num_extra_levels > 0:\n+ feat = feats[-1]\n+ for conv_layer in self.extra_convs:\n+ feat = conv_layer(feat)\n+ fpn_feats.append(feat)\n+\n+ for bifpn_cell in self.bifpn_cells:\n+ fpn_feats = bifpn_cell(fpn_feats)\n+ return fpn_feats\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[dev] add bifpn in necks (#4148)
499,339
14.09.2021 15:56:15
-28,800
b19807f852b337e61d20e53ac1fa5e0bc88464b7
[fp16] fix bug when use drop block in fp16
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/layers.py", "new_path": "ppdet/modeling/layers.py", "diff": "@@ -279,7 +279,7 @@ class DropBlock(nn.Layer):\nfor s in shape:\ngamma *= s / (s - self.block_size + 1)\n- matrix = paddle.cast(paddle.rand(x.shape, x.dtype) < gamma, x.dtype)\n+ matrix = paddle.cast(paddle.rand(x.shape) < gamma, x.dtype)\nmask_inv = F.max_pool2d(\nmatrix,\nself.block_size,\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[fp16] fix bug when use drop block in fp16 (#4178)