author
int64 658
755k
| date
stringlengths 19
19
| timezone
int64 -46,800
43.2k
| hash
stringlengths 40
40
| message
stringlengths 5
490
| mods
list | language
stringclasses 20
values | license
stringclasses 3
values | repo
stringlengths 5
68
| original_message
stringlengths 12
491
|
---|---|---|---|---|---|---|---|---|---|
499,298 |
13.02.2021 23:21:04
| -28,800 |
28e5a3abc6999b00ed36dc54ba74937223a79b29
|
fix RCNNBox, test=dygraph
|
[
{
"change_type": "MODIFY",
"old_path": "dygraph/ppdet/modeling/layers.py",
"new_path": "dygraph/ppdet/modeling/layers.py",
"diff": "@@ -318,7 +318,8 @@ class RCNNBox(object):\norigin_shape = paddle.concat(origin_shape_list)\n- # [N, C*4]\n+ # bbox_pred.shape: [N, C*4]\n+ # C=num_classes in faster/mask rcnn(bbox_head), C=1 in cascade rcnn(cascade_head)\nbbox = paddle.concat(roi)\nif bbox.shape[0] == 0:\nbbox = paddle.zeros([0, bbox_pred.shape[1]], dtype='float32')\n@@ -326,10 +327,9 @@ class RCNNBox(object):\nbbox = delta2bbox(bbox_pred, bbox, self.prior_box_var)\nscores = cls_prob[:, :-1]\n- # [N*C, 4]\n-\n- bbox_num_class = bbox.shape[1] // 4\n- bbox = paddle.reshape(bbox, [-1, bbox_num_class, 4])\n+ # bbox.shape: [N, C, 4]\n+ # bbox.shape[1] must be equal to scores.shape[1]\n+ bbox_num_class = bbox.shape[1]\nif bbox_num_class == 1:\nbbox = paddle.tile(bbox, [1, self.num_classes, 1])\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix RCNNBox, test=dygraph (#2215)
|
499,315 |
15.02.2021 19:19:29
| -28,800 |
edbad8ed3aeb4a4ccd7730dc7d2faba71a6a02a2
|
replace plain_formatter
|
[
{
"change_type": "MODIFY",
"old_path": "dygraph/ppdet/utils/logger.py",
"new_path": "dygraph/ppdet/utils/logger.py",
"diff": "@@ -65,7 +65,7 @@ def setup_logger(name=\"ppdet\", output=None):\nos.makedirs(os.path.dirname(filename))\nfh = logging.FileHandler(filename, mode='a')\nfh.setLevel(logging.DEBUG)\n- fh.setFormatter(plain_formatter)\n+ fh.setFormatter(logging.Formatter())\nlogger.addHandler(fh)\nlogger_initialized.append(name)\nreturn logger\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
replace plain_formatter (#2212)
|
499,298 |
17.02.2021 18:19:07
| -28,800 |
5b6bebf23e223aef97e8a1f49f0e9e210ca981e4
|
fix dcn r101vd config, test=dygraph
|
[
{
"change_type": "MODIFY",
"old_path": "dygraph/configs/dcn/faster_rcnn_dcn_r101_vd_fpn_1x_coco.yml",
"new_path": "dygraph/configs/dcn/faster_rcnn_dcn_r101_vd_fpn_1x_coco.yml",
"diff": "_BASE_: [\n'faster_rcnn_dcn_r50_fpn_1x_coco.yml',\n]\n-pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNet101_pretrained.tar\n-weights: output/faster_rcnn_dcn_r101_fpn_1x_coco/model_final\n+pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNet101_vd_pretrained.tar\n+weights: output/faster_rcnn_dcn_r101_vd_fpn_1x_coco/model_final\nResNet:\n# index 0 stands for res2\n"
},
{
"change_type": "MODIFY",
"old_path": "dygraph/configs/dcn/mask_rcnn_dcn_r101_vd_fpn_1x_coco.yml",
"new_path": "dygraph/configs/dcn/mask_rcnn_dcn_r101_vd_fpn_1x_coco.yml",
"diff": "_BASE_: [\n'mask_rcnn_dcn_r50_fpn_1x_coco.yml',\n]\n-pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNet101_pretrained.tar\n-weights: output/mask_rcnn_dcn_r101_fpn_1x_coco/model_final\n+pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNet101_vd_pretrained.tar\n+weights: output/mask_rcnn_dcn_r101_vd_fpn_1x_coco/model_final\nResNet:\n# index 0 stands for res2\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix dcn r101vd config, test=dygraph (#2223)
|
499,395 |
20.02.2021 19:59:32
| -28,800 |
5164dfd267bff8f598349475e74a8d7c92f377ec
|
fix icafe 21567: DIoULossYolo passing parameters problem
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/losses/diou_loss_yolo.py",
"new_path": "ppdet/modeling/losses/diou_loss_yolo.py",
"diff": "@@ -54,7 +54,8 @@ class DiouLossYolo(IouLoss):\nanchors,\ndownsample_ratio,\nbatch_size,\n- eps=1.e-10):\n+ eps=1.e-10,\n+ **kwargs):\n'''\nArgs:\nx | y | w | h ([Variables]): the output of yolov3 for encoded x|y|w|h\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/losses/yolo_loss.py",
"new_path": "ppdet/modeling/losses/yolo_loss.py",
"diff": "@@ -180,9 +180,19 @@ class YOLOv3Loss(object):\nloss_h = fluid.layers.abs(h - th) * tscale_tobj\nloss_h = fluid.layers.reduce_sum(loss_h, dim=[1, 2, 3])\nif self._iou_loss is not None:\n- loss_iou = self._iou_loss(x, y, w, h, tx, ty, tw, th, anchors,\n- downsample, self._train_batch_size,\n- scale_x_y)\n+ loss_iou = self._iou_loss(\n+ x,\n+ y,\n+ w,\n+ h,\n+ tx,\n+ ty,\n+ tw,\n+ th,\n+ anchors,\n+ downsample,\n+ self._train_batch_size,\n+ scale_x_y=scale_x_y)\nloss_iou = loss_iou * tscale_tobj\nloss_iou = fluid.layers.reduce_sum(loss_iou, dim=[1, 2, 3])\nloss_ious.append(fluid.layers.reduce_mean(loss_iou))\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix icafe 21567: DIoULossYolo passing parameters problem (#2159)
|
499,304 |
20.02.2021 20:09:15
| -28,800 |
79b1d807dbc55bb336fb95f9244165cd03e75761
|
add attr pixel_offset ingenerate_proposals,distribute_fpn_proposals, test=dygraph
|
[
{
"change_type": "MODIFY",
"old_path": "dygraph/ppdet/modeling/ops.py",
"new_path": "dygraph/ppdet/modeling/ops.py",
"diff": "@@ -451,6 +451,7 @@ def distribute_fpn_proposals(fpn_rois,\nmax_level,\nrefer_level,\nrefer_scale,\n+ pixel_offset=False,\nrois_num=None,\nname=None):\n\"\"\"\n@@ -525,7 +526,8 @@ def distribute_fpn_proposals(fpn_rois,\nif in_dygraph_mode():\nassert rois_num is not None, \"rois_num should not be None in dygraph mode.\"\nattrs = ('min_level', min_level, 'max_level', max_level, 'refer_level',\n- refer_level, 'refer_scale', refer_scale)\n+ refer_level, 'refer_scale', refer_scale, 'pixel_offset',\n+ pixel_offset)\nmulti_rois, restore_ind, rois_num_per_level = core.ops.distribute_fpn_proposals(\nfpn_rois, rois_num, num_lvl, num_lvl, *attrs)\nreturn multi_rois, restore_ind, rois_num_per_level\n@@ -564,7 +566,8 @@ def distribute_fpn_proposals(fpn_rois,\n'min_level': min_level,\n'max_level': max_level,\n'refer_level': refer_level,\n- 'refer_scale': refer_scale\n+ 'refer_scale': refer_scale,\n+ 'pixel_offset': pixel_offset\n})\nreturn multi_rois, restore_ind, rois_num_per_level\n@@ -1409,6 +1412,7 @@ def generate_proposals(scores,\nnms_thresh=0.5,\nmin_size=0.1,\neta=1.0,\n+ pixel_offset=False,\nreturn_rois_num=False,\nname=None):\n\"\"\"\n@@ -1483,7 +1487,8 @@ def generate_proposals(scores,\nif in_dygraph_mode():\nassert return_rois_num, \"return_rois_num should be True in dygraph mode.\"\nattrs = ('pre_nms_topN', pre_nms_top_n, 'post_nms_topN', post_nms_top_n,\n- 'nms_thresh', nms_thresh, 'min_size', min_size, 'eta', eta)\n+ 'nms_thresh', nms_thresh, 'min_size', min_size, 'eta', eta,\n+ 'pixel_offset', pixel_offset)\nrpn_rois, rpn_roi_probs, rpn_rois_num = core.ops.generate_proposals_v2(\nscores, bbox_deltas, im_shape, anchors, variances, *attrs)\nreturn rpn_rois, rpn_roi_probs, rpn_rois_num\n@@ -1530,7 +1535,8 @@ def generate_proposals(scores,\n'post_nms_topN': post_nms_top_n,\n'nms_thresh': nms_thresh,\n'min_size': min_size,\n- 'eta': eta\n+ 'eta': eta,\n+ 'pixel_offset': pixel_offset\n},\noutputs=outputs)\nrpn_rois.stop_gradient = True\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add attr pixel_offset ingenerate_proposals,distribute_fpn_proposals, test=dygraph (#2245)
|
499,304 |
21.02.2021 20:41:31
| -28,800 |
79e2436a8be51ee468f226c31f40e8dc72656df3
|
fix distribute train in cascade_rcnn
|
[
{
"change_type": "MODIFY",
"old_path": "dygraph/ppdet/modeling/heads/bbox_head.py",
"new_path": "dygraph/ppdet/modeling/heads/bbox_head.py",
"diff": "@@ -265,10 +265,6 @@ class BBoxHead(nn.Layer):\nreg_name = 'loss_bbox_reg'\nloss_bbox = {}\n- if fg_inds.numel() == 0:\n- loss_bbox[cls_name] = paddle.to_tensor(0., dtype='float32')\n- loss_bbox[reg_name] = paddle.to_tensor(0., dtype='float32')\n- return loss_bbox\nif cls_agnostic_bbox_reg:\nreg_delta = paddle.gather(deltas, fg_inds)\nelse:\n"
},
{
"change_type": "MODIFY",
"old_path": "dygraph/ppdet/modeling/heads/cascade_head.py",
"new_path": "dygraph/ppdet/modeling/heads/cascade_head.py",
"diff": "@@ -196,6 +196,16 @@ class CascadeHead(BBoxHead):\nif self.training:\nrois, rois_num, targets = self.bbox_assigner(\nrois, rois_num, inputs, i, is_cascade=True)\n+ tgt_labels = targets[0]\n+ tgt_labels = paddle.concat(tgt_labels) if len(\n+ tgt_labels) > 1 else tgt_labels[0]\n+ tgt_labels.stop_gradient = True\n+ fg_inds = paddle.nonzero(\n+ paddle.logical_and(tgt_labels >= 0, tgt_labels <\n+ self.num_classes)).flatten()\n+ if fg_inds.numel() == 0:\n+ targets_list.append(targets_list[-1])\n+ else:\ntargets_list.append(targets)\nrois_feat = self.roi_extractor(body_feats, rois, rois_num)\n@@ -227,6 +237,8 @@ class CascadeHead(BBoxHead):\nclip_box = clip_bbox(boxes_per_image, im_shape[i])\nif self.training:\nkeep = nonempty_bbox(clip_box)\n+ if keep.shape[0] == 0:\n+ continue\nclip_box = paddle.gather(clip_box, keep)\nrois.append(clip_box)\nrois_num = paddle.concat([paddle.shape(r)[0] for r in rois])\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix distribute train in cascade_rcnn (#2246)
|
499,313 |
22.02.2021 13:27:55
| -28,800 |
07b75a88bd36ae3f70c5ad0c7add8508fc79af67
|
fix switch_ir_optim in deploy/infer
|
[
{
"change_type": "MODIFY",
"old_path": "dygraph/deploy/python/infer.py",
"new_path": "dygraph/deploy/python/infer.py",
"diff": "@@ -316,7 +316,11 @@ def load_predictor(model_dir,\n# initial GPU memory(M), device ID\nconfig.enable_use_gpu(200, 0)\n# optimize graph and fuse op\n- config.switch_ir_optim(True)\n+ # FIXME(dkp): ir optimize may prune variable inside graph\n+ # and incur error in Paddle 2.0, e.g. in SSDLite\n+ # FCOS model, set as False currently and should\n+ # be set as True after switch_ir_optim fixed\n+ config.switch_ir_optim(False)\nelse:\nconfig.disable_gpu()\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix switch_ir_optim in deploy/infer (#2230)
|
499,304 |
23.02.2021 15:55:41
| -28,800 |
7c3def89a2d92a565417d0ca110bccf5fbcc5a5f
|
fix some model link in dygraph
|
[
{
"change_type": "ADD",
"old_path": null,
"new_path": "dygraph/configs/dcn/cascade_rcnn_dcn_r50_fpn_1x_coco.yml",
"diff": "+_BASE_: [\n+ '../datasets/coco_detection.yml',\n+ '../runtime.yml',\n+ '../cascade_rcnn/_base_/optimizer_1x.yml',\n+ '../cascade_rcnn/_base_/cascade_rcnn_r50_fpn.yml',\n+ '../cascade_rcnn/_base_/cascade_fpn_reader.yml',\n+]\n+weights: output/cascade_rcnn_dcn_r50_fpn_1x_coco/model_final\n+\n+ResNet:\n+ depth: 50\n+ norm_type: bn\n+ freeze_at: 0\n+ return_idx: [0,1,2,3]\n+ num_stages: 4\n+ dcn_v2_stages: [1,2,3]\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "dygraph/configs/dcn/cascade_rcnn_dcn_x101_vd_64x4d_fpn_1x_coco.yml",
"diff": "+_BASE_: [\n+ 'cascade_rcnn_dcn_r50_fpn_1x_coco.yml',\n+]\n+pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNeXt101_vd_64x4d_pretrained.tar\n+weights: output/cascade_rcnn_dcn_x101_vd_64x4d_fpn_1x_coco/model_final\n+\n+ResNet:\n+ depth: 101\n+ groups: 64\n+ base_width: 4\n+ base_channels: 64\n+ variant: d\n+ norm_type: bn\n+ freeze_at: 0\n+ return_idx: [0,1,2,3]\n+ num_stages: 4\n+ dcn_v2_stages: [1,2,3]\n"
},
{
"change_type": "MODIFY",
"old_path": "dygraph/configs/hrnet/README.md",
"new_path": "dygraph/configs/hrnet/README.md",
"diff": "| Backbone | Type | Image/gpu | Lr schd | Inf time (fps) | Box AP | Mask AP | Download | Configs |\n| :---------------------- | :------------- | :-------: | :-----: | :------------: | :----: | :-----: | :----------------------------------------------------------: | :-----: |\n-| HRNetV2p_W18 | Faster | 1 | 1x | - | 36.8 | - | [model](https://paddlemodels.bj.bcebos.com/object_detection/dygraph/faster_rcnn_hrnetv2p_w18_1x_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/master/dygraph/configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.yml) |\n-| HRNetV2p_W18 | Faster | 1 | 2x | - | 39.0 | - | [model](https://paddlemodels.bj.bcebos.com/object_detection/dygraph/faster_rcnn_hrnetv2p_w18_2x_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/master/dygraph/configs/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco.yml) |\n+| HRNetV2p_W18 | Faster | 1 | 1x | - | 36.8 | - | [model](https://paddledet.bj.bcebos.com/models/faster_rcnn_hrnetv2p_w18_1x_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/master/dygraph/configs/hrnet/faster_rcnn_hrnetv2p_w18_1x_coco.yml) |\n+| HRNetV2p_W18 | Faster | 1 | 2x | - | 39.0 | - | [model](https://paddledet.bj.bcebos.com/models/faster_rcnn_hrnetv2p_w18_2x_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/master/dygraph/configs/hrnet/faster_rcnn_hrnetv2p_w18_2x_coco.yml) |\n"
},
{
"change_type": "MODIFY",
"old_path": "dygraph/configs/mask_rcnn/_base_/mask_rcnn_r50.yml",
"new_path": "dygraph/configs/mask_rcnn/_base_/mask_rcnn_r50.yml",
"diff": "@@ -79,6 +79,7 @@ MaskHead:\nshare_bbox_feat: true\nMaskFeat:\n+ num_convs: 0\nout_channels: 256\nMaskAssigner:\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "dygraph/configs/mask_rcnn/mask_rcnn_r50_vd_fpn_1x_coco.yml",
"diff": "+_BASE_: [\n+ 'mask_rcnn_r50_fpn_1x_coco.yml',\n+]\n+\n+pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_vd_pretrained.tar\n+weights: output/mask_rcnn_r50_vd_fpn_1x_coco/model_final\n+\n+ResNet:\n+ # index 0 stands for res2\n+ depth: 50\n+ variant: d\n+ norm_type: bn\n+ freeze_at: 0\n+ return_idx: [0,1,2,3]\n+ num_stages: 4\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "dygraph/configs/mask_rcnn/mask_rcnn_x101_vd_64x4d_fpn_2x_coco.yml",
"diff": "+_BASE_: [\n+ 'mask_rcnn_r50_fpn_1x_coco.yml',\n+]\n+\n+pretrain_weights: https://paddle-imagenet-models-name.bj.bcebos.com/ResNeXt101_vd_64x4d_pretrained.tar\n+weights: output/mask_rcnn_x101_vd_64x4d_fpn_2x_coco/model_final\n+\n+ResNet:\n+ # for ResNeXt: groups, base_width, base_channels\n+ depth: 101\n+ variant: d\n+ groups: 64\n+ base_width: 4\n+ base_channels: 64\n+ norm_type: bn\n+ freeze_at: 0\n+ return_idx: [0,1,2,3]\n+ num_stages: 4\n+\n+epoch: 24\n+LearningRate:\n+ base_lr: 0.01\n+ schedulers:\n+ - !PiecewiseDecay\n+ gamma: 0.1\n+ milestones: [16, 22]\n+ - !LinearWarmup\n+ start_factor: 0.1\n+ steps: 1000\n"
},
{
"change_type": "MODIFY",
"old_path": "dygraph/configs/solov2/README.md",
"new_path": "dygraph/configs/solov2/README.md",
"diff": "@@ -19,8 +19,8 @@ SOLOv2 (Segmenting Objects by Locations) is a fast instance segmentation framewo\n| BlendMask | R50-FPN | True | 3x | 37.8 | 13.5 | V100 | - | - |\n| SOLOv2 (Paper) | R50-FPN | False | 1x | 34.8 | 18.5 | V100 | - | - |\n| SOLOv2 (Paper) | X101-DCN-FPN | True | 3x | 42.4 | 5.9 | V100 | - | - |\n-| SOLOv2 | R50-FPN | False | 1x | 35.5 | 21.9 | V100 | [model](https://paddlemodels.bj.bcebos.com/object_detection/dygraph/solov2_r50_fpn_1x_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/master/dygraph/configs/solov2/solov2_r50_fpn_1x_coco.yml) |\n-| SOLOv2 | R50-FPN | True | 3x | 37.9 | 21.9 | V100 | [model](https://paddlemodels.bj.bcebos.com/object_detection/dygraph/solov2_r50_fpn_3x_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/master/dygraph/configs/solov2/solov2_r50_fpn_3x_coco.yml) |\n+| SOLOv2 | R50-FPN | False | 1x | 35.5 | 21.9 | V100 | [model](https://paddledet.bj.bcebos.com/models/solov2_r50_fpn_1x_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/master/dygraph/configs/solov2/solov2_r50_fpn_1x_coco.yml) |\n+| SOLOv2 | R50-FPN | True | 3x | 38.0 | 21.9 | V100 | [model](https://paddledet.bj.bcebos.com/models/solov2_r50_fpn_3x_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/master/dygraph/configs/solov2/solov2_r50_fpn_3x_coco.yml) |\n**Notes:**\n"
},
{
"change_type": "MODIFY",
"old_path": "dygraph/ppdet/modeling/heads/cascade_head.py",
"new_path": "dygraph/ppdet/modeling/heads/cascade_head.py",
"diff": "@@ -237,8 +237,6 @@ class CascadeHead(BBoxHead):\nclip_box = clip_bbox(boxes_per_image, im_shape[i])\nif self.training:\nkeep = nonempty_bbox(clip_box)\n- if keep.shape[0] == 0:\n- continue\nclip_box = paddle.gather(clip_box, keep)\nrois.append(clip_box)\nrois_num = paddle.concat([paddle.shape(r)[0] for r in rois])\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix some model link in dygraph (#2256)
|
499,313 |
23.02.2021 15:58:37
| -28,800 |
1a66a9d292f65fec0a355264e627e3bcc3d80870
|
add 2.0rc changelog
|
[
{
"change_type": "MODIFY",
"old_path": "README_en.md",
"new_path": "README_en.md",
"diff": "@@ -260,7 +260,7 @@ All these models can be get in [Model Zoo](#ModelZoo)\n## Updates\n-v0.5.0 was released at `11/2020`, add SOLOv2, etc. And add Android mobile demo, add PACT quantization strategy, add object detection full-process documentation and Jetson platform deployment tutorials, and improved ease of use, fix many known bugs, etc. Please refer to [change log](docs/CHANGELOG.md) for details.\n+v2.0-rc was released at `02/2021`, add dygraph version, which supports RCNN, YOLOv3, PP-YOLO, SSD/SSDLite, FCOS, TTFNet, SOLOv2, etc. supports model pruning and quantization, supports deploying and accelerating by TensorRT, etc. Please refer to [change log](docs/CHANGELOG.md) for details.\n## License\n"
},
{
"change_type": "MODIFY",
"old_path": "dygraph/configs/fcos/README.md",
"new_path": "dygraph/configs/fcos/README.md",
"diff": "@@ -19,6 +19,7 @@ FCOS (Fully Convolutional One-Stage Object Detection) is a fast anchor-free obje\n**Notes:**\n- FCOS is trained on COCO train2017 dataset and evaluated on val2017 results of `mAP(IoU=0.5:0.95)`.\n+- FCOS training performace is dependented on Paddle develop branch, performance reproduction shoule based on [Paddle daily version](https://www.paddlepaddle.org.cn/documentation/docs/zh/install/Tables.html#whl-dev) or Paddle 2.0.1(will be published on 2021.03), performace will loss slightly is training base on Paddle 2.0.0\n## Citations\n```\n"
},
{
"change_type": "MODIFY",
"old_path": "dygraph/configs/solov2/README.md",
"new_path": "dygraph/configs/solov2/README.md",
"diff": "@@ -25,6 +25,7 @@ SOLOv2 (Segmenting Objects by Locations) is a fast instance segmentation framewo\n**Notes:**\n- SOLOv2 is trained on COCO train2017 dataset and evaluated on val2017 results of `mAP(IoU=0.5:0.95)`.\n+- SOLOv2 training performace is dependented on Paddle develop branch, performance reproduction shoule based on [Paddle daily version](https://www.paddlepaddle.org.cn/documentation/docs/zh/install/Tables.html#whl-dev) or Paddle 2.0.1(will be published on 2021.03), performace will loss slightly is training base on Paddle 2.0.0\n## Citations\n```\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add 2.0rc changelog (#2254)
|
499,395 |
23.02.2021 21:30:12
| -28,800 |
72b7f9ef644d0b7dc9cb3c3c96eb8d26b8d61cc3
|
modify transform op of yolo and solo configs
|
[
{
"change_type": "MODIFY",
"old_path": "dygraph/configs/fcos/fcos_r50_fpn_multiscale_2x_coco.yml",
"new_path": "dygraph/configs/fcos/fcos_r50_fpn_multiscale_2x_coco.yml",
"diff": "@@ -10,17 +10,13 @@ weights: output/fcos_r50_fpn_multiscale_2x_coco/model_final\nTrainReader:\nsample_transforms:\n- - DecodeOp: {}\n- - RandomFlipOp: {prob: 0.5}\n- - NormalizeImageOp: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n- - ResizeImage:\n- target_size: [640, 672, 704, 736, 768, 800]\n- max_size: 1333\n- interp: 1\n- use_cv2: true\n- - PermuteOp: {}\n+ - Decode: {}\n+ - RandomFlip: {prob: 0.5}\n+ - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n+ - RandomResize: {target_size: [[640, 1333], [672, 1333], [704, 1333], [736, 1333], [768, 1333], [800, 1333]], keep_ratio: true, interp: 1}\n+ - Permute: {}\nbatch_transforms:\n- - PadBatchOp: {pad_to_stride: 128}\n+ - PadBatch: {pad_to_stride: 128}\n- Gt2FCOSTarget:\nobject_sizes_boundary: [64, 128, 256, 512]\ncenter_sampling_radius: 1.5\n"
},
{
"change_type": "MODIFY",
"old_path": "dygraph/configs/ppyolo/ppyolo_mbv3_large_coco.yml",
"new_path": "dygraph/configs/ppyolo/ppyolo_mbv3_large_coco.yml",
"diff": "@@ -13,24 +13,24 @@ TrainReader:\ninputs_def:\nnum_max_boxes: 90\nsample_transforms:\n- - DecodeOp: {}\n- - MixupOp: {alpha: 1.5, beta: 1.5}\n- - RandomDistortOp: {}\n- - RandomExpandOp: {fill_value: [123.675, 116.28, 103.53]}\n- - RandomCropOp: {}\n- - RandomFlipOp: {}\n+ - Decode: {}\n+ - Mixup: {alpha: 1.5, beta: 1.5}\n+ - RandomDistort: {}\n+ - RandomExpand: {fill_value: [123.675, 116.28, 103.53]}\n+ - RandomCrop: {}\n+ - RandomFlip: {}\nbatch_transforms:\n- - BatchRandomResizeOp:\n+ - BatchRandomResize:\ntarget_size: [224, 256, 288, 320, 352, 384, 416, 448, 480, 512]\nrandom_size: True\nrandom_interp: True\nkeep_ratio: False\n- - NormalizeBoxOp: {}\n- - PadBoxOp: {num_max_boxes: 90}\n- - BboxXYXY2XYWHOp: {}\n- - NormalizeImageOp: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n- - PermuteOp: {}\n- - Gt2YoloTargetOp:\n+ - NormalizeBox: {}\n+ - PadBox: {num_max_boxes: 90}\n+ - BboxXYXY2XYWH: {}\n+ - NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n+ - Permute: {}\n+ - Gt2YoloTarget:\nanchor_masks: [[3, 4, 5], [0, 1, 2]]\nanchors: [[11, 18], [34, 47], [51, 126], [115, 71], [120, 195], [254, 235]]\ndownsample_ratios: [32, 16]\n@@ -42,10 +42,10 @@ TrainReader:\nEvalReader:\nsample_transforms:\n- - DecodeOp: {}\n- - ResizeOp: {target_size: [320, 320], keep_ratio: False, interp: 2}\n- - NormalizeImageOp: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n- - PermuteOp: {}\n+ - Decode: {}\n+ - Resize: {target_size: [320, 320], keep_ratio: False, interp: 2}\n+ - NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n+ - Permute: {}\nbatch_size: 8\ndrop_empty: false\n@@ -53,10 +53,10 @@ TestReader:\ninputs_def:\nimage_shape: [3, 320, 320]\nsample_transforms:\n- - DecodeOp: {}\n- - ResizeOp: {target_size: [320, 320], keep_ratio: False, interp: 2}\n- - NormalizeImageOp: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n- - PermuteOp: {}\n+ - Decode: {}\n+ - Resize: {target_size: [320, 320], keep_ratio: False, interp: 2}\n+ - NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n+ - Permute: {}\nbatch_size: 1\nepoch: 270\n"
},
{
"change_type": "MODIFY",
"old_path": "dygraph/configs/ppyolo/ppyolo_mbv3_small_coco.yml",
"new_path": "dygraph/configs/ppyolo/ppyolo_mbv3_small_coco.yml",
"diff": "@@ -13,24 +13,24 @@ TrainReader:\ninputs_def:\nnum_max_boxes: 90\nsample_transforms:\n- - DecodeOp: {}\n- - MixupOp: {alpha: 1.5, beta: 1.5}\n- - RandomDistortOp: {}\n- - RandomExpandOp: {fill_value: [123.675, 116.28, 103.53]}\n- - RandomCropOp: {}\n- - RandomFlipOp: {}\n+ - Decode: {}\n+ - Mixup: {alpha: 1.5, beta: 1.5}\n+ - RandomDistort: {}\n+ - RandomExpand: {fill_value: [123.675, 116.28, 103.53]}\n+ - RandomCrop: {}\n+ - RandomFlip: {}\nbatch_transforms:\n- - BatchRandomResizeOp:\n+ - BatchRandomResize:\ntarget_size: [224, 256, 288, 320, 352, 384, 416, 448, 480, 512]\nrandom_size: True\nrandom_interp: True\nkeep_ratio: False\n- - NormalizeBoxOp: {}\n- - PadBoxOp: {num_max_boxes: 90}\n- - BboxXYXY2XYWHOp: {}\n- - NormalizeImageOp: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n- - PermuteOp: {}\n- - Gt2YoloTargetOp:\n+ - NormalizeBox: {}\n+ - PadBox: {num_max_boxes: 90}\n+ - BboxXYXY2XYWH: {}\n+ - NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n+ - Permute: {}\n+ - Gt2YoloTarget:\nanchor_masks: [[3, 4, 5], [0, 1, 2]]\nanchors: [[11, 18], [34, 47], [51, 126], [115, 71], [120, 195], [254, 235]]\ndownsample_ratios: [32, 16]\n@@ -42,10 +42,10 @@ TrainReader:\nEvalReader:\nsample_transforms:\n- - DecodeOp: {}\n- - ResizeOp: {target_size: [320, 320], keep_ratio: False, interp: 2}\n- - NormalizeImageOp: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n- - PermuteOp: {}\n+ - Decode: {}\n+ - Resize: {target_size: [320, 320], keep_ratio: False, interp: 2}\n+ - NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n+ - Permute: {}\nbatch_size: 8\ndrop_empty: false\n@@ -53,10 +53,10 @@ TestReader:\ninputs_def:\nimage_shape: [3, 320, 320]\nsample_transforms:\n- - DecodeOp: {}\n- - ResizeOp: {target_size: [320, 320], keep_ratio: False, interp: 2}\n- - NormalizeImageOp: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n- - PermuteOp: {}\n+ - Decode: {}\n+ - Resize: {target_size: [320, 320], keep_ratio: False, interp: 2}\n+ - NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n+ - Permute: {}\nbatch_size: 1\nepoch: 270\n"
},
{
"change_type": "MODIFY",
"old_path": "dygraph/configs/ppyolo/ppyolo_r18vd_coco.yml",
"new_path": "dygraph/configs/ppyolo/ppyolo_r18vd_coco.yml",
"diff": "@@ -11,27 +11,27 @@ weights: output/ppyolo_r18vd_coco/model_final\nTrainReader:\nsample_transforms:\n- - DecodeOp: {}\n- - MixupOp: {alpha: 1.5, beta: 1.5}\n- - RandomDistortOp: {}\n- - RandomExpandOp: {fill_value: [123.675, 116.28, 103.53]}\n- - RandomCropOp: {}\n- - RandomFlipOp: {}\n+ - Decode: {}\n+ - Mixup: {alpha: 1.5, beta: 1.5}\n+ - RandomDistort: {}\n+ - RandomExpand: {fill_value: [123.675, 116.28, 103.53]}\n+ - RandomCrop: {}\n+ - RandomFlip: {}\nbatch_transforms:\n- - BatchRandomResizeOp:\n+ - BatchRandomResize:\ntarget_size: [320, 352, 384, 416, 448, 480, 512, 544, 576, 608]\nrandom_size: True\nrandom_interp: True\nkeep_ratio: False\n- - NormalizeBoxOp: {}\n- - PadBoxOp: {num_max_boxes: 50}\n- - BboxXYXY2XYWHOp: {}\n- - NormalizeImageOp:\n+ - NormalizeBox: {}\n+ - PadBox: {num_max_boxes: 50}\n+ - BboxXYXY2XYWH: {}\n+ - NormalizeImage:\nmean: [0.485, 0.456, 0.406]\nstd: [0.229, 0.224, 0.225]\nis_scale: True\n- - PermuteOp: {}\n- - Gt2YoloTargetOp:\n+ - Permute: {}\n+ - Gt2YoloTarget:\nanchor_masks: [[3, 4, 5], [0, 1, 2]]\nanchors: [[10, 14], [23, 27], [37, 58], [81, 82], [135, 169], [344, 319]]\ndownsample_ratios: [32, 16]\n@@ -42,10 +42,10 @@ TrainReader:\nEvalReader:\nsample_transforms:\n- - DecodeOp: {}\n- - ResizeOp: {target_size: [512, 512], keep_ratio: False, interp: 2}\n- - NormalizeImageOp: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n- - PermuteOp: {}\n+ - Decode: {}\n+ - Resize: {target_size: [512, 512], keep_ratio: False, interp: 2}\n+ - NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n+ - Permute: {}\nbatch_size: 8\ndrop_empty: false\n@@ -53,10 +53,10 @@ TestReader:\ninputs_def:\nimage_shape: [3, 512, 512]\nsample_transforms:\n- - DecodeOp: {}\n- - ResizeOp: {target_size: [512, 512], keep_ratio: False, interp: 2}\n- - NormalizeImageOp: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n- - PermuteOp: {}\n+ - Decode: {}\n+ - Resize: {target_size: [512, 512], keep_ratio: False, interp: 2}\n+ - NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n+ - Permute: {}\nbatch_size: 1\nepoch: 270\n"
},
{
"change_type": "MODIFY",
"old_path": "dygraph/configs/solov2/solov2_r50_fpn_3x_coco.yml",
"new_path": "dygraph/configs/solov2/solov2_r50_fpn_3x_coco.yml",
"diff": "@@ -20,17 +20,17 @@ LearningRate:\nTrainReader:\nsample_transforms:\n- - DecodeOp: {}\n+ - Decode: {}\n- Poly2Mask: {}\n- - RandomResizeOp: {interp: 1,\n+ - RandomResize: {interp: 1,\ntarget_size: [[640, 1333], [672, 1333], [704, 1333], [736, 1333], [768, 1333], [800, 1333]],\nkeep_ratio: True}\n- - RandomFlipOp: {}\n- - NormalizeImageOp: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n- - PermuteOp: {}\n+ - RandomFlip: {}\n+ - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n+ - Permute: {}\nbatch_transforms:\n- - PadBatchOp: {pad_to_stride: 32}\n- - Gt2Solov2TargetOp: {num_grids: [40, 36, 24, 16, 12],\n+ - PadBatch: {pad_to_stride: 32}\n+ - Gt2Solov2Target: {num_grids: [40, 36, 24, 16, 12],\nscale_ranges: [[1, 96], [48, 192], [96, 384], [192, 768], [384, 2048]],\ncoord_sigma: 0.2}\nbatch_size: 2\n"
},
{
"change_type": "MODIFY",
"old_path": "dygraph/configs/yolov3/yolov3_mobilenet_v1_270e_voc.yml",
"new_path": "dygraph/configs/yolov3/yolov3_mobilenet_v1_270e_voc.yml",
"diff": "@@ -13,24 +13,24 @@ TrainReader:\ninputs_def:\nnum_max_boxes: 50\nsample_transforms:\n- - DecodeOp: {}\n- - MixupOp: {alpha: 1.5, beta: 1.5}\n- - RandomDistortOp: {}\n- - RandomExpandOp: {fill_value: [123.675, 116.28, 103.53]}\n- - RandomCropOp: {}\n- - RandomFlipOp: {}\n+ - Decode: {}\n+ - Mixup: {alpha: 1.5, beta: 1.5}\n+ - RandomDistort: {}\n+ - RandomExpand: {fill_value: [123.675, 116.28, 103.53]}\n+ - RandomCrop: {}\n+ - RandomFlip: {}\nbatch_transforms:\n- - BatchRandomResizeOp:\n+ - BatchRandomResize:\ntarget_size: [320, 352, 384, 416, 448, 480, 512, 544, 576, 608]\nrandom_size: True\nrandom_interp: True\nkeep_ratio: False\n- - NormalizeBoxOp: {}\n- - PadBoxOp: {num_max_boxes: 50}\n- - BboxXYXY2XYWHOp: {}\n- - NormalizeImageOp: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n- - PermuteOp: {}\n- - Gt2YoloTargetOp:\n+ - NormalizeBox: {}\n+ - PadBox: {num_max_boxes: 50}\n+ - BboxXYXY2XYWH: {}\n+ - NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n+ - Permute: {}\n+ - Gt2YoloTarget:\nanchor_masks: [[6, 7, 8], [3, 4, 5], [0, 1, 2]]\nanchors: [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], [59, 119], [116, 90], [156, 198], [373, 326]]\ndownsample_ratios: [32, 16, 8]\n"
},
{
"change_type": "MODIFY",
"old_path": "dygraph/configs/yolov3/yolov3_mobilenet_v1_roadsign.yml",
"new_path": "dygraph/configs/yolov3/yolov3_mobilenet_v1_roadsign.yml",
"diff": "@@ -19,24 +19,24 @@ TrainReader:\ninputs_def:\nnum_max_boxes: 50\nsample_transforms:\n- - DecodeOp: {}\n- - MixupOp: {alpha: 1.5, beta: 1.5}\n- - RandomDistortOp: {}\n- - RandomExpandOp: {fill_value: [123.675, 116.28, 103.53]}\n- - RandomCropOp: {}\n- - RandomFlipOp: {}\n+ - Decode: {}\n+ - Mixup: {alpha: 1.5, beta: 1.5}\n+ - RandomDistort: {}\n+ - RandomExpand: {fill_value: [123.675, 116.28, 103.53]}\n+ - RandomCrop: {}\n+ - RandomFlip: {}\nbatch_transforms:\n- - BatchRandomResizeOp:\n+ - BatchRandomResize:\ntarget_size: [320, 352, 384, 416, 448, 480, 512, 544, 576, 608]\nrandom_size: True\nrandom_interp: True\nkeep_ratio: False\n- - NormalizeBoxOp: {}\n- - PadBoxOp: {num_max_boxes: 50}\n- - BboxXYXY2XYWHOp: {}\n- - NormalizeImageOp: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n- - PermuteOp: {}\n- - Gt2YoloTargetOp:\n+ - NormalizeBox: {}\n+ - PadBox: {num_max_boxes: 50}\n+ - BboxXYXY2XYWH: {}\n+ - NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n+ - Permute: {}\n+ - Gt2YoloTarget:\nanchor_masks: [[6, 7, 8], [3, 4, 5], [0, 1, 2]]\nanchors: [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], [59, 119], [116, 90], [156, 198], [373, 326]]\ndownsample_ratios: [32, 16, 8]\n"
},
{
"change_type": "MODIFY",
"old_path": "dygraph/configs/yolov3/yolov3_mobilenet_v3_large_270e_voc.yml",
"new_path": "dygraph/configs/yolov3/yolov3_mobilenet_v3_large_270e_voc.yml",
"diff": "@@ -13,24 +13,24 @@ TrainReader:\ninputs_def:\nnum_max_boxes: 50\nsample_transforms:\n- - DecodeOp: {}\n- - MixupOp: {alpha: 1.5, beta: 1.5}\n- - RandomDistortOp: {}\n- - RandomExpandOp: {fill_value: [123.675, 116.28, 103.53]}\n- - RandomCropOp: {}\n- - RandomFlipOp: {}\n+ - Decode: {}\n+ - Mixup: {alpha: 1.5, beta: 1.5}\n+ - RandomDistort: {}\n+ - RandomExpand: {fill_value: [123.675, 116.28, 103.53]}\n+ - RandomCrop: {}\n+ - RandomFlip: {}\nbatch_transforms:\n- - BatchRandomResizeOp:\n+ - BatchRandomResize:\ntarget_size: [320, 352, 384, 416, 448, 480, 512, 544, 576, 608]\nrandom_size: True\nrandom_interp: True\nkeep_ratio: False\n- - NormalizeBoxOp: {}\n- - PadBoxOp: {num_max_boxes: 50}\n- - BboxXYXY2XYWHOp: {}\n- - NormalizeImageOp: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n- - PermuteOp: {}\n- - Gt2YoloTargetOp:\n+ - NormalizeBox: {}\n+ - PadBox: {num_max_boxes: 50}\n+ - BboxXYXY2XYWH: {}\n+ - NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n+ - Permute: {}\n+ - Gt2YoloTarget:\nanchor_masks: [[6, 7, 8], [3, 4, 5], [0, 1, 2]]\nanchors: [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], [59, 119], [116, 90], [156, 198], [373, 326]]\ndownsample_ratios: [32, 16, 8]\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
modify transform op of yolo and solo configs (#2260)
|
499,304 |
02.03.2021 20:05:01
| -28,800 |
be4269fbaace8f2dad5a23e60a1451e82b12b946
|
fix RCNN model deploy infer
|
[
{
"change_type": "MODIFY",
"old_path": "dygraph/ppdet/modeling/heads/roi_extractor.py",
"new_path": "dygraph/ppdet/modeling/heads/roi_extractor.py",
"diff": "@@ -79,7 +79,6 @@ class RoIAlign(object):\nsampling_ratio=self.sampling_ratio,\nrois_num=rois_num_dist[lvl],\naligned=self.aligned)\n- if roi_feat.shape[0] > 0:\nrois_feat_list.append(roi_feat)\nrois_feat_shuffle = paddle.concat(rois_feat_list)\nrois_feat = paddle.gather(rois_feat_shuffle, restore_index)\n"
},
{
"change_type": "MODIFY",
"old_path": "dygraph/ppdet/modeling/post_process.py",
"new_path": "dygraph/ppdet/modeling/post_process.py",
"diff": "@@ -85,9 +85,6 @@ class BBoxPostProcess(object):\nscale_y, scale_x = scale_factor[i][0], scale_factor[i][1]\nscale = paddle.concat([scale_x, scale_y, scale_x, scale_y])\nexpand_scale = paddle.expand(scale, [bbox_num[i], 4])\n- # TODO: Because paddle.expand transform error when dygraph\n- # to static, use reshape to avoid mistakes.\n- expand_scale = paddle.reshape(expand_scale, [bbox_num[i], 4])\norigin_shape_list.append(expand_shape)\nscale_factor_list.append(expand_scale)\n@@ -158,7 +155,7 @@ class MaskPostProcess(object):\n# TODO: support bs > 1 and mask output dtype is bool\npred_result = paddle.zeros(\n[num_mask, origin_shape[0][0], origin_shape[0][1]], dtype='int32')\n- if bboxes.shape[0] == 0:\n+ if bbox_num == 1 and bboxes[0][0] == -1:\nreturn pred_result\n# TODO: optimize chunk paste\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix RCNN model deploy infer (#2276)
|
499,304 |
10.03.2021 18:41:57
| -28,800 |
0f47172d4985e22f48f8211e197b9f66b9378232
|
fix x2coco shell for windows
|
[
{
"change_type": "MODIFY",
"old_path": "tools/x2coco.py",
"new_path": "tools/x2coco.py",
"diff": "@@ -45,17 +45,14 @@ class MyEncoder(json.JSONEncoder):\nreturn super(MyEncoder, self).default(obj)\n-def getbbox(self, points):\n- polygons = points\n- mask = self.polygons_to_mask([self.height, self.width], polygons)\n- return self.mask2box(mask)\n-\n-\ndef images_labelme(data, num):\nimage = {}\nimage['height'] = data['imageHeight']\nimage['width'] = data['imageWidth']\nimage['id'] = num + 1\n+ if '\\\\' in data['imagePath']:\n+ image['file_name'] = data['imagePath'].split('\\\\')[-1]\n+ else:\nimage['file_name'] = data['imagePath'].split('/')[-1]\nreturn image\n@@ -369,20 +366,26 @@ def main():\ntotal_num = len(glob.glob(osp.join(args.json_input_dir, '*.json')))\nif args.train_proportion != 0:\ntrain_num = int(total_num * args.train_proportion)\n- os.makedirs(args.output_dir + '/train')\n+ out_dir = args.output_dir + '/train'\n+ if not os.path.exists(out_dir):\n+ os.makedirs(out_dir)\nelse:\ntrain_num = 0\nif args.val_proportion == 0.0:\nval_num = 0\ntest_num = total_num - train_num\n- if args.test_proportion != 0.0:\n- os.makedirs(args.output_dir + '/test')\n+ out_dir = args.output_dir + '/test'\n+ if args.test_proportion != 0.0 and not os.path.exists(out_dir):\n+ os.makedirs(out_dir)\nelse:\nval_num = int(total_num * args.val_proportion)\ntest_num = total_num - train_num - val_num\n- os.makedirs(args.output_dir + '/val')\n- if args.test_proportion != 0.0:\n- os.makedirs(args.output_dir + '/test')\n+ val_out_dir = args.output_dir + '/val'\n+ if not os.path.exists(val_out_dir):\n+ os.makedirs(val_out_dir)\n+ test_out_dir = args.output_dir + '/test'\n+ if args.test_proportion != 0.0 and not os.path.exists(test_out_dir):\n+ os.makedirs(test_out_dir)\ncount = 1\nfor img_name in os.listdir(args.image_input_dir):\nif count <= train_num:\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix x2coco shell for windows (#2322)
|
499,304 |
11.03.2021 15:17:14
| -28,800 |
d82484d5a8182d542358e4a65a151085c6d91044
|
fix some voc2coco mistakes in x2coco shell
|
[
{
"change_type": "MODIFY",
"old_path": "tools/x2coco.py",
"new_path": "tools/x2coco.py",
"diff": "@@ -230,8 +230,8 @@ def voc_get_coco_annotation(obj, label2id):\nassert label in label2id, \"label is not in label2id.\"\ncategory_id = label2id[label]\nbndbox = obj.find('bndbox')\n- xmin = float(bndbox.findtext('xmin')) - 1\n- ymin = float(bndbox.findtext('ymin')) - 1\n+ xmin = float(bndbox.findtext('xmin'))\n+ ymin = float(bndbox.findtext('ymin'))\nxmax = float(bndbox.findtext('xmax'))\nymax = float(bndbox.findtext('ymax'))\nassert xmax > xmin and ymax > ymin, \"Box size error.\"\n@@ -263,15 +263,14 @@ def voc_xmls_to_cocojson(annotation_paths, label2id, output_dir, output_file):\nann_root = ann_tree.getroot()\nimg_info = voc_get_image_info(ann_root, im_id)\n- im_id += 1\n- img_id = img_info['id']\noutput_json_dict['images'].append(img_info)\nfor obj in ann_root.findall('object'):\nann = voc_get_coco_annotation(obj=obj, label2id=label2id)\n- ann.update({'image_id': img_id, 'id': bnd_id})\n+ ann.update({'image_id': im_id, 'id': bnd_id})\noutput_json_dict['annotations'].append(ann)\nbnd_id = bnd_id + 1\n+ im_id += 1\nfor label, label_id in label2id.items():\ncategory_info = {'supercategory': 'none', 'id': label_id, 'name': label}\n@@ -285,7 +284,9 @@ def voc_xmls_to_cocojson(annotation_paths, label2id, output_dir, output_file):\ndef main():\nparser = argparse.ArgumentParser(\nformatter_class=argparse.ArgumentDefaultsHelpFormatter)\n- parser.add_argument('--dataset_type', help='the type of dataset')\n+ parser.add_argument(\n+ '--dataset_type',\n+ help='the type of dataset, can be `voc`, `labelme` or `cityscape`')\nparser.add_argument('--json_input_dir', help='input annotated directory')\nparser.add_argument('--image_input_dir', help='image directory')\nparser.add_argument(\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix some voc2coco mistakes in x2coco shell (#2327)
|
499,395 |
11.03.2021 19:44:29
| -28,800 |
04340a59b1232d7ffa359d3f9b3a42d20941c13e
|
fix hang bugs while training using multi machine
|
[
{
"change_type": "MODIFY",
"old_path": "dygraph/ppdet/utils/checkpoint.py",
"new_path": "dygraph/ppdet/utils/checkpoint.py",
"diff": "@@ -41,6 +41,21 @@ def is_url(path):\nor path.startswith('ppdet://')\n+def _get_unique_endpoints(trainer_endpoints):\n+ # Sorting is to avoid different environmental variables for each card\n+ trainer_endpoints.sort()\n+ ips = set()\n+ unique_endpoints = set()\n+ for endpoint in trainer_endpoints:\n+ ip = endpoint.split(\":\")[0]\n+ if ip in ips:\n+ continue\n+ ips.add(ip)\n+ unique_endpoints.add(endpoint)\n+ logger.info(\"unique_endpoints {}\".format(unique_endpoints))\n+ return unique_endpoints\n+\n+\ndef get_weights_path_dist(path):\nenv = os.environ\nif 'PADDLE_TRAINERS_NUM' in env and 'PADDLE_TRAINER_ID' in env:\n@@ -53,6 +68,9 @@ def get_weights_path_dist(path):\nweight_path = map_path(path, WEIGHTS_HOME)\nlock_path = weight_path + '.lock'\nif not os.path.exists(weight_path):\n+ from paddle.distributed import ParallelEnv\n+ unique_endpoints = _get_unique_endpoints(ParallelEnv()\n+ .trainer_endpoints[:])\ntry:\nos.makedirs(os.path.dirname(weight_path))\nexcept OSError as e:\n@@ -60,7 +78,7 @@ def get_weights_path_dist(path):\nraise\nwith open(lock_path, 'w'): # touch\nos.utime(lock_path, None)\n- if trainer_id == 0:\n+ if ParallelEnv().current_endpoint in unique_endpoints:\nget_weights_path(path)\nos.remove(lock_path)\nelse:\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix hang bugs while training using multi machine (#2284)
|
499,304 |
11.03.2021 20:07:19
| -28,800 |
aa2500cf4d18d4b00c669be2b9d272c57e660709
|
fix obj365 label and config
|
[
{
"change_type": "MODIFY",
"old_path": "configs/obj365/cascade_rcnn_cls_aware_r200_vd_fpn_dcnv2_nonlocal_softnms.yml",
"new_path": "configs/obj365/cascade_rcnn_cls_aware_r200_vd_fpn_dcnv2_nonlocal_softnms.yml",
"diff": "@@ -189,7 +189,7 @@ TestReader:\nfields: ['image', 'im_info', 'im_id', 'im_shape']\ndataset:\n!ImageFolder\n- anno_path: annotations/instances_val2017.json\n+ anno_path: dataset/coco/objects365_label.txt\nsample_transforms:\n- !DecodeImage\nto_rgb: true\n"
},
{
"change_type": "MODIFY",
"old_path": "configs/obj365/cascade_rcnn_dcnv2_se154_vd_fpn_gn_cas.yml",
"new_path": "configs/obj365/cascade_rcnn_dcnv2_se154_vd_fpn_gn_cas.yml",
"diff": "@@ -221,10 +221,12 @@ EvalReader:\nworker_num: 2\nTestReader:\n+ inputs_def:\n+ fields: ['image', 'im_info', 'im_id', 'im_shape']\nbatch_size: 1\ndataset:\n!ImageFolder\n- anno_path: dataset/obj365/annotations/val.json\n+ anno_path: dataset/coco/objects365_label.txt\nsample_transforms:\n- !DecodeImage\nto_rgb: True\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "dataset/coco/objects365_label.txt",
"diff": "+person\n+sneakers\n+chair\n+hat\n+lamp\n+bottle\n+cabinet/shelf\n+cup\n+car\n+glasses\n+picture/frame\n+desk\n+handbag\n+street lights\n+book\n+plate\n+helmet\n+leather shoes\n+pillow\n+glove\n+potted plant\n+bracelet\n+flower\n+tv\n+storage box\n+vase\n+bench\n+wine glass\n+boots\n+bowl\n+dining table\n+umbrella\n+boat\n+flag\n+speaker\n+trash bin/can\n+stool\n+backpack\n+couch\n+belt\n+carpet\n+basket\n+towel/napkin\n+slippers\n+barrel/bucket\n+coffee table\n+suv\n+toy\n+tie\n+bed\n+traffic light\n+pen/pencil\n+microphone\n+sandals\n+canned\n+necklace\n+mirror\n+faucet\n+bicycle\n+bread\n+high heels\n+ring\n+van\n+watch\n+sink\n+horse\n+fish\n+apple\n+camera\n+candle\n+teddy bear\n+cake\n+motorcycle\n+wild bird\n+laptop\n+knife\n+traffic sign\n+cell phone\n+paddle\n+truck\n+cow\n+power outlet\n+clock\n+drum\n+fork\n+bus\n+hanger\n+nightstand\n+pot/pan\n+sheep\n+guitar\n+traffic cone\n+tea pot\n+keyboard\n+tripod\n+hockey\n+fan\n+dog\n+spoon\n+blackboard/whiteboard\n+balloon\n+air conditioner\n+cymbal\n+mouse\n+telephone\n+pickup truck\n+orange\n+banana\n+airplane\n+luggage\n+skis\n+soccer\n+trolley\n+oven\n+remote\n+baseball glove\n+paper towel\n+refrigerator\n+train\n+tomato\n+machinery vehicle\n+tent\n+shampoo/shower gel\n+head phone\n+lantern\n+donut\n+cleaning products\n+sailboat\n+tangerine\n+pizza\n+kite\n+computer box\n+elephant\n+toiletries\n+gas stove\n+broccoli\n+toilet\n+stroller\n+shovel\n+baseball bat\n+microwave\n+skateboard\n+surfboard\n+surveillance camera\n+gun\n+life saver\n+cat\n+lemon\n+liquid soap\n+zebra\n+duck\n+sports car\n+giraffe\n+pumpkin\n+piano\n+stop sign\n+radiator\n+converter\n+tissue\n+carrot\n+washing machine\n+vent\n+cookies\n+cutting/chopping board\n+tennis racket\n+candy\n+skating and skiing shoes\n+scissors\n+folder\n+baseball\n+strawberry\n+bow tie\n+pigeon\n+pepper\n+coffee machine\n+bathtub\n+snowboard\n+suitcase\n+grapes\n+ladder\n+pear\n+american football\n+basketball\n+potato\n+paint brush\n+printer\n+billiards\n+fire hydrant\n+goose\n+projector\n+sausage\n+fire extinguisher\n+extension cord\n+facial mask\n+tennis ball\n+chopsticks\n+electronic stove and gas stove\n+pie\n+frisbee\n+kettle\n+hamburger\n+golf club\n+cucumber\n+clutch\n+blender\n+tong\n+slide\n+hot dog\n+toothbrush\n+facial cleanser\n+mango\n+deer\n+egg\n+violin\n+marker\n+ship\n+chicken\n+onion\n+ice cream\n+tape\n+wheelchair\n+plum\n+bar soap\n+scale\n+watermelon\n+cabbage\n+router/modem\n+golf ball\n+pine apple\n+crane\n+fire truck\n+peach\n+cello\n+notepaper\n+tricycle\n+toaster\n+helicopter\n+green beans\n+brush\n+carriage\n+cigar\n+earphone\n+penguin\n+hurdle\n+swing\n+radio\n+CD\n+parking meter\n+swan\n+garlic\n+french fries\n+horn\n+avocado\n+saxophone\n+trumpet\n+sandwich\n+cue\n+kiwi fruit\n+bear\n+fishing rod\n+cherry\n+tablet\n+green vegetables\n+nuts\n+corn\n+key\n+screwdriver\n+globe\n+broom\n+pliers\n+volleyball\n+hammer\n+eggplant\n+trophy\n+dates\n+board eraser\n+rice\n+tape measure/ruler\n+dumbbell\n+hamimelon\n+stapler\n+camel\n+lettuce\n+goldfish\n+meat balls\n+medal\n+toothpaste\n+antelope\n+shrimp\n+rickshaw\n+trombone\n+pomegranate\n+coconut\n+jellyfish\n+mushroom\n+calculator\n+treadmill\n+butterfly\n+egg tart\n+cheese\n+pig\n+pomelo\n+race car\n+rice cooker\n+tuba\n+crosswalk sign\n+papaya\n+hair drier\n+green onion\n+chips\n+dolphin\n+sushi\n+urinal\n+donkey\n+electric drill\n+spring rolls\n+tortoise/turtle\n+parrot\n+flute\n+measuring cup\n+shark\n+steak\n+poker card\n+binoculars\n+llama\n+radish\n+noodles\n+yak\n+mop\n+crab\n+microscope\n+barbell\n+bread/bun\n+baozi\n+lion\n+red cabbage\n+polar bear\n+lighter\n+seal\n+mangosteen\n+comb\n+eraser\n+pitaya\n+scallop\n+pencil case\n+saw\n+table tennis paddle\n+okra\n+starfish\n+eagle\n+monkey\n+durian\n+game board\n+rabbit\n+french horn\n+ambulance\n+asparagus\n+hoverboard\n+pasta\n+target\n+hotair balloon\n+chainsaw\n+lobster\n+iron\n+flashlight\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/utils/coco_eval.py",
"new_path": "ppdet/utils/coco_eval.py",
"diff": "@@ -492,7 +492,34 @@ def get_category_info(anno_file=None,\nreturn coco17_category_info(with_background)\nelse:\nlogger.info(\"Load categories from {}\".format(anno_file))\n+ if anno_file.endswith('.json'):\nreturn get_category_info_from_anno(anno_file, with_background)\n+ else:\n+ return get_category_info_from_txt(anno_file, with_background)\n+\n+\n+def get_category_info_from_txt(anno_file, with_background=True):\n+ \"\"\"\n+ Get class id to category id map and category id\n+ to category name map from txt file.\n+\n+ args:\n+ anno_file (str): label txt file path.\n+ with_background (bool, default True):\n+ whether load background as class 0.\n+ \"\"\"\n+ with open(anno_file, \"r\") as f:\n+ catid_list = f.readlines()\n+ clsid2catid = {}\n+ catid2name = {}\n+ for i, catid in enumerate(catid_list):\n+ catid = catid.strip('\\n\\t\\r')\n+ clsid2catid[i + int(with_background)] = i + 1\n+ catid2name[i + int(with_background)] = catid\n+ if with_background:\n+ clsid2catid.update({0: 0})\n+ catid2name.update({0: 'background'})\n+ return clsid2catid, catid2name\ndef get_category_info_from_anno(anno_file, with_background=True):\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix obj365 label and config (#2330)
|
499,304 |
15.03.2021 10:22:38
| -28,800 |
73acfad8c91d427b63ee8a2091ec18f913993523
|
support compute per-category AP and PR curve
|
[
{
"change_type": "MODIFY",
"old_path": "dygraph/ppdet/engine/trainer.py",
"new_path": "dygraph/ppdet/engine/trainer.py",
"diff": "@@ -115,19 +115,23 @@ class Trainer(object):\nif self.mode == 'test':\nself._metrics = []\nreturn\n+ classwise = self.cfg['classwise'] if 'classwise' in self.cfg else False\nif self.cfg.metric == 'COCO':\n# TODO: bias should be unified\nbias = self.cfg['bias'] if 'bias' in self.cfg else 0\nself._metrics = [\nCOCOMetric(\n- anno_file=self.dataset.get_anno(), bias=bias)\n+ anno_file=self.dataset.get_anno(),\n+ classwise=classwise,\n+ bias=bias)\n]\nelif self.cfg.metric == 'VOC':\nself._metrics = [\nVOCMetric(\n- anno_file=self.dataset.get_anno(),\n+ label_list=self.dataset.get_label_list(),\nclass_num=self.cfg.num_classes,\n- map_type=self.cfg.map_type)\n+ map_type=self.cfg.map_type,\n+ classwise=classwise)\n]\nelif self.cfg.metric == 'WiderFace':\nmulti_scale = self.cfg.multi_scale_eval if 'multi_scale_eval' in self.cfg else True\n"
},
{
"change_type": "MODIFY",
"old_path": "dygraph/ppdet/metrics/coco_utils.py",
"new_path": "dygraph/ppdet/metrics/coco_utils.py",
"diff": "@@ -17,8 +17,12 @@ from __future__ import division\nfrom __future__ import print_function\nimport os\n+import sys\n+import numpy as np\n+import itertools\nfrom ppdet.py_op.post_process import get_det_res, get_seg_res, get_solov2_segm_res\n+from ppdet.metrics.map_utils import draw_pr_curve\nfrom ppdet.utils.logger import setup_logger\nlogger = setup_logger(__name__)\n@@ -59,7 +63,8 @@ def cocoapi_eval(jsonfile,\nstyle,\ncoco_gt=None,\nanno_file=None,\n- max_dets=(100, 300, 1000)):\n+ max_dets=(100, 300, 1000),\n+ classwise=False):\n\"\"\"\nArgs:\njsonfile: Evaluation json file, eg: bbox.json, mask.json.\n@@ -68,6 +73,7 @@ def cocoapi_eval(jsonfile,\neg: coco_gt = COCO(anno_file)\nanno_file: COCO annotations file.\nmax_dets: COCO evaluation maxDets.\n+ classwise: whether per-category AP and draw P-R Curve or not.\n\"\"\"\nassert coco_gt != None or anno_file != None\nfrom pycocotools.coco import COCO\n@@ -86,4 +92,51 @@ def cocoapi_eval(jsonfile,\ncoco_eval.evaluate()\ncoco_eval.accumulate()\ncoco_eval.summarize()\n+ if classwise:\n+ # Compute per-category AP and PR curve\n+ try:\n+ from terminaltables import AsciiTable\n+ except Exception as e:\n+ logger.error(\n+ 'terminaltables not found, plaese install terminaltables. '\n+ 'for example: `pip install terminaltables`.')\n+ raise e\n+ precisions = coco_eval.eval['precision']\n+ cat_ids = coco_gt.getCatIds()\n+ # precision: (iou, recall, cls, area range, max dets)\n+ assert len(cat_ids) == precisions.shape[2]\n+ results_per_category = []\n+ for idx, catId in enumerate(cat_ids):\n+ # area range index 0: all area ranges\n+ # max dets index -1: typically 100 per image\n+ nm = coco_gt.loadCats(catId)[0]\n+ precision = precisions[:, :, idx, 0, -1]\n+ precision = precision[precision > -1]\n+ if precision.size:\n+ ap = np.mean(precision)\n+ else:\n+ ap = float('nan')\n+ results_per_category.append(\n+ (str(nm[\"name\"]), '{:0.3f}'.format(float(ap))))\n+ pr_array = precisions[0, :, idx, 0, 2]\n+ recall_array = np.arange(0.0, 1.01, 0.01)\n+ draw_pr_curve(\n+ pr_array,\n+ recall_array,\n+ out_dir=style + '_pr_curve',\n+ file_name='{}_precision_recall_curve.jpg'.format(nm[\"name\"]))\n+\n+ num_columns = min(6, len(results_per_category) * 2)\n+ results_flatten = list(itertools.chain(*results_per_category))\n+ headers = ['category', 'AP'] * (num_columns // 2)\n+ results_2d = itertools.zip_longest(\n+ * [results_flatten[i::num_columns] for i in range(num_columns)])\n+ table_data = [headers]\n+ table_data += [result for result in results_2d]\n+ table = AsciiTable(table_data)\n+ logger.info('Per-category of {} AP: \\n{}'.format(style, table.table))\n+ logger.info(\"per-category PR curve has output to {} folder.\".format(\n+ style + '_pr_curve'))\n+ # flush coco evaluation result\n+ sys.stdout.flush()\nreturn coco_eval.stats\n"
},
{
"change_type": "MODIFY",
"old_path": "dygraph/ppdet/metrics/map_utils.py",
"new_path": "dygraph/ppdet/metrics/map_utils.py",
"diff": "@@ -17,13 +17,42 @@ from __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n+import os\nimport sys\nimport numpy as np\n+import itertools\nfrom ppdet.utils.logger import setup_logger\nlogger = setup_logger(__name__)\n-__all__ = ['bbox_area', 'jaccard_overlap', 'prune_zero_padding', 'DetectionMAP']\n+__all__ = [\n+ 'draw_pr_curve', 'bbox_area', 'jaccard_overlap', 'prune_zero_padding',\n+ 'DetectionMAP'\n+]\n+\n+\n+def draw_pr_curve(precision,\n+ recall,\n+ iou=0.5,\n+ out_dir='pr_curve',\n+ file_name='precision_recall_curve.jpg'):\n+ if not os.path.exists(out_dir):\n+ os.makedirs(out_dir)\n+ output_path = os.path.join(out_dir, file_name)\n+ try:\n+ import matplotlib.pyplot as plt\n+ except Exception as e:\n+ logger.error('Matplotlib not found, plaese install matplotlib.'\n+ 'for example: `pip install matplotlib`.')\n+ raise e\n+ plt.cla()\n+ plt.figure('P-R Curve')\n+ plt.title('Precision/Recall Curve(IoU={})'.format(iou))\n+ plt.xlabel('Recall')\n+ plt.ylabel('Precision')\n+ plt.grid(True)\n+ plt.plot(recall, precision)\n+ plt.savefig(output_path)\ndef bbox_area(bbox, is_bbox_normalized):\n@@ -84,6 +113,8 @@ class DetectionMAP(object):\nis normalized to range[0, 1]. Default False.\nevaluate_difficult (bool): whether to evaluate\ndifficult bounding boxes. Default False.\n+ classwise (bool): whether per-category AP and draw\n+ P-R Curve or not.\n\"\"\"\ndef __init__(self,\n@@ -91,7 +122,9 @@ class DetectionMAP(object):\noverlap_thresh=0.5,\nmap_type='11point',\nis_bbox_normalized=False,\n- evaluate_difficult=False):\n+ evaluate_difficult=False,\n+ catid2name=None,\n+ classwise=False):\nself.class_num = class_num\nself.overlap_thresh = overlap_thresh\nassert map_type in ['11point', 'integral'], \\\n@@ -100,6 +133,10 @@ class DetectionMAP(object):\nself.map_type = map_type\nself.is_bbox_normalized = is_bbox_normalized\nself.evaluate_difficult = evaluate_difficult\n+ self.classwise = classwise\n+ self.classes = []\n+ for cname in catid2name.values():\n+ self.classes.append(cname)\nself.reset()\ndef update(self, bbox, score, label, gt_box, gt_label, difficult=None):\n@@ -155,6 +192,7 @@ class DetectionMAP(object):\n\"\"\"\nmAP = 0.\nvalid_cnt = 0\n+ eval_results = []\nfor score_pos, count in zip(self.class_score_poss,\nself.class_gt_counts):\nif count == 0: continue\n@@ -170,6 +208,7 @@ class DetectionMAP(object):\nprecision.append(float(ac_tp) / (ac_tp + ac_fp))\nrecall.append(float(ac_tp) / count)\n+ one_class_ap = 0.0\nif self.map_type == '11point':\nmax_precisions = [0.] * 11\nstart_idx = len(precision) - 1\n@@ -183,23 +222,29 @@ class DetectionMAP(object):\nelse:\nif max_precisions[j] < precision[i]:\nmax_precisions[j] = precision[i]\n- mAP += sum(max_precisions) / 11.\n+ one_class_ap = sum(max_precisions) / 11.\n+ mAP += one_class_ap\nvalid_cnt += 1\nelif self.map_type == 'integral':\nimport math\n- ap = 0.\nprev_recall = 0.\nfor i in range(len(precision)):\nrecall_gap = math.fabs(recall[i] - prev_recall)\nif recall_gap > 1e-6:\n- ap += precision[i] * recall_gap\n+ one_class_ap += precision[i] * recall_gap\nprev_recall = recall[i]\n- mAP += ap\n+ mAP += one_class_ap\nvalid_cnt += 1\nelse:\nlogger.error(\"Unspported mAP type {}\".format(self.map_type))\nsys.exit(1)\n-\n+ eval_results.append({\n+ 'class': self.classes[valid_cnt - 1],\n+ 'ap': one_class_ap,\n+ 'precision': precision,\n+ 'recall': recall,\n+ })\n+ self.eval_results = eval_results\nself.mAP = mAP / float(valid_cnt) if valid_cnt > 0 else mAP\ndef get_map(self):\n@@ -208,6 +253,39 @@ class DetectionMAP(object):\n\"\"\"\nif self.mAP is None:\nlogger.error(\"mAP is not calculated.\")\n+ if self.classwise:\n+ # Compute per-category AP and PR curve\n+ try:\n+ from terminaltables import AsciiTable\n+ except Exception as e:\n+ logger.error(\n+ 'terminaltables not found, plaese install terminaltables. '\n+ 'for example: `pip install terminaltables`.')\n+ raise e\n+ results_per_category = []\n+ for eval_result in self.eval_results:\n+ results_per_category.append(\n+ (str(eval_result['class']),\n+ '{:0.3f}'.format(float(eval_result['ap']))))\n+ draw_pr_curve(\n+ eval_result['precision'],\n+ eval_result['recall'],\n+ out_dir='voc_pr_curve',\n+ file_name='{}_precision_recall_curve.jpg'.format(\n+ eval_result['class']))\n+\n+ num_columns = min(6, len(results_per_category) * 2)\n+ results_flatten = list(itertools.chain(*results_per_category))\n+ headers = ['category', 'AP'] * (num_columns // 2)\n+ results_2d = itertools.zip_longest(* [\n+ results_flatten[i::num_columns] for i in range(num_columns)\n+ ])\n+ table_data = [headers]\n+ table_data += [result for result in results_2d]\n+ table = AsciiTable(table_data)\n+ logger.info('Per-category of VOC AP: \\n{}'.format(table.table))\n+ logger.info(\n+ \"per-category PR curve has output to voc_pr_curve folder.\")\nreturn self.mAP\ndef _get_tp_fp_accum(self, score_pos_list):\n"
},
{
"change_type": "MODIFY",
"old_path": "dygraph/ppdet/metrics/metrics.py",
"new_path": "dygraph/ppdet/metrics/metrics.py",
"diff": "@@ -63,6 +63,7 @@ class COCOMetric(Metric):\n\"anno_file {} not a file\".format(anno_file)\nself.anno_file = anno_file\nself.clsid2catid, self.catid2name = get_categories('COCO', anno_file)\n+ self.classwise = kwargs.get('classwise', False)\n# TODO: bias should be unified\nself.bias = kwargs.get('bias', 0)\nself.reset()\n@@ -98,7 +99,10 @@ class COCOMetric(Metric):\nlogger.info('The bbox result is saved to bbox.json.')\nbbox_stats = cocoapi_eval(\n- 'bbox.json', 'bbox', anno_file=self.anno_file)\n+ 'bbox.json',\n+ 'bbox',\n+ anno_file=self.anno_file,\n+ classwise=self.classwise)\nself.eval_results['bbox'] = bbox_stats\nsys.stdout.flush()\n@@ -108,7 +112,10 @@ class COCOMetric(Metric):\nlogger.info('The mask result is saved to mask.json.')\nseg_stats = cocoapi_eval(\n- 'mask.json', 'segm', anno_file=self.anno_file)\n+ 'mask.json',\n+ 'segm',\n+ anno_file=self.anno_file,\n+ classwise=self.classwise)\nself.eval_results['mask'] = seg_stats\nsys.stdout.flush()\n@@ -118,7 +125,10 @@ class COCOMetric(Metric):\nlogger.info('The segm result is saved to segm.json.')\nseg_stats = cocoapi_eval(\n- 'segm.json', 'segm', anno_file=self.anno_file)\n+ 'segm.json',\n+ 'segm',\n+ anno_file=self.anno_file,\n+ classwise=self.classwise)\nself.eval_results['mask'] = seg_stats\nsys.stdout.flush()\n@@ -131,16 +141,16 @@ class COCOMetric(Metric):\nclass VOCMetric(Metric):\ndef __init__(self,\n- anno_file,\n+ label_list,\nclass_num=20,\noverlap_thresh=0.5,\nmap_type='11point',\nis_bbox_normalized=False,\n- evaluate_difficult=False):\n- assert os.path.isfile(anno_file), \\\n- \"anno_file {} not a file\".format(anno_file)\n- self.anno_file = anno_file\n- self.clsid2catid, self.catid2name = get_categories('VOC', anno_file)\n+ evaluate_difficult=False,\n+ classwise=False):\n+ assert os.path.isfile(label_list), \\\n+ \"label_list {} not a file\".format(label_list)\n+ self.clsid2catid, self.catid2name = get_categories('VOC', label_list)\nself.overlap_thresh = overlap_thresh\nself.map_type = map_type\n@@ -150,7 +160,9 @@ class VOCMetric(Metric):\noverlap_thresh=overlap_thresh,\nmap_type=map_type,\nis_bbox_normalized=is_bbox_normalized,\n- evaluate_difficult=evaluate_difficult)\n+ evaluate_difficult=evaluate_difficult,\n+ catid2name=self.catid2name,\n+ classwise=classwise)\nself.reset()\n"
},
{
"change_type": "MODIFY",
"old_path": "dygraph/requirements.txt",
"new_path": "dygraph/requirements.txt",
"diff": "@@ -5,3 +5,4 @@ opencv-python\nPyYAML\nshapely\nscipy\n+terminaltables\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "dygraph/tools/eval.py",
"new_path": "dygraph/tools/eval.py",
"diff": "@@ -64,6 +64,11 @@ def parse_args():\naction=\"store_true\",\nhelp=\"whether add bias or not while getting w and h\")\n+ parser.add_argument(\n+ \"--classwise\",\n+ action=\"store_true\",\n+ help=\"whether per-category AP and draw P-R Curve or not.\")\n+\nargs = parser.parse_args()\nreturn args\n@@ -88,6 +93,7 @@ def main():\ncfg = load_config(FLAGS.config)\n# TODO: bias should be unified\ncfg['bias'] = 1 if FLAGS.bias else 0\n+ cfg['classwise'] = True if FLAGS.classwise else False\nmerge_config(FLAGS.opt)\nif FLAGS.slim_config:\nslim_cfg = load_config(FLAGS.slim_config)\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
support compute per-category AP and PR curve (#2346)
|
499,400 |
15.03.2021 17:35:29
| -28,800 |
8635791f0699a98e70de7b3b096ab60c9cdce443
|
Fix pruning export
|
[
{
"change_type": "MODIFY",
"old_path": "slim/prune/export_model.py",
"new_path": "slim/prune/export_model.py",
"diff": "@@ -63,6 +63,9 @@ def main():\ntest_fetches = model.test(feed_vars)\ninfer_prog = infer_prog.clone(True)\n+ exe.run(startup_prog)\n+ checkpoint.load_checkpoint(exe, infer_prog, cfg.weights)\n+\npruned_params = FLAGS.pruned_params\nassert (\nFLAGS.pruned_params is not None\n@@ -90,13 +93,9 @@ def main():\nlogger.info(\"pruned FLOPS: {}\".format(\nfloat(base_flops - pruned_flops) / base_flops))\n- exe.run(startup_prog)\n- checkpoint.load_checkpoint(exe, infer_prog, cfg.weights)\n-\ndump_infer_config(FLAGS, cfg)\nsave_infer_model(FLAGS, exe, feed_vars, test_fetches, infer_prog)\n-\nif __name__ == '__main__':\nenable_static_mode()\nparser = ArgsParser()\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
Fix pruning export (#2350)
|
499,304 |
17.03.2021 12:45:46
| -28,800 |
4f6ffb4092b896f39191ab46ef646c16d86aa3a8
|
fix use vdl bug in infer and export model
|
[
{
"change_type": "MODIFY",
"old_path": "dygraph/ppdet/engine/trainer.py",
"new_path": "dygraph/ppdet/engine/trainer.py",
"diff": "@@ -101,7 +101,7 @@ class Trainer(object):\ndef _init_callbacks(self):\nif self.mode == 'train':\nself._callbacks = [LogPrinter(self), Checkpointer(self)]\n- if self.cfg.use_vdl:\n+ if 'use_vdl' in self.cfg and self.cfg.use_vdl:\nself._callbacks.append(VisualDLWriter(self))\nself._compose_callback = ComposeCallback(self._callbacks)\nelif self.mode == 'eval':\n@@ -109,7 +109,7 @@ class Trainer(object):\nif self.cfg.metric == 'WiderFace':\nself._callbacks.append(WiferFaceEval(self))\nself._compose_callback = ComposeCallback(self._callbacks)\n- elif self.mode == 'test' and self.cfg.use_vdl:\n+ elif self.mode == 'test' and 'use_vdl' in self.cfg and self.cfg.use_vdl:\nself._callbacks = [VisualDLWriter(self)]\nself._compose_callback = ComposeCallback(self._callbacks)\nelse:\n@@ -349,6 +349,7 @@ class Trainer(object):\nint(outs['im_id']), catid2name,\ndraw_threshold)\nself.status['result_image'] = np.array(image.copy())\n+ if self._compose_callback:\nself._compose_callback.on_step_end(self.status)\n# save image with detection\nsave_name = self._get_save_image_name(output_dir, image_path)\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix use vdl bug in infer and export model (#2365)
|
499,301 |
18.03.2021 09:43:58
| -28,800 |
2b8638d1d6fdf4b17276594c8c0eb35fdbe0652d
|
fix errors of usage and some names
|
[
{
"change_type": "MODIFY",
"old_path": "dygraph/ppdet/data/source/dataset.py",
"new_path": "dygraph/ppdet/data/source/dataset.py",
"diff": "@@ -79,7 +79,7 @@ class DetDataset(Dataset):\nself._epoch = epoch_id\ndef parse_dataset(self, ):\n- raise NotImplemented(\n+ raise NotImplementedError(\n\"Need to implement parse_dataset method of Dataset\")\ndef get_anno(self):\n@@ -94,13 +94,13 @@ def _is_valid_file(f, extensions=('.jpg', '.jpeg', '.png', '.bmp')):\ndef _make_dataset(dir):\ndir = os.path.expanduser(dir)\n- if not os.path.isdir(d):\n+ if not os.path.isdir(dir):\nraise ('{} should be a dir'.format(dir))\nimages = []\nfor root, _, fnames in sorted(os.walk(dir, followlinks=True)):\nfor fname in sorted(fnames):\npath = os.path.join(root, fname)\n- if is_valid_file(path):\n+ if _is_valid_file(path):\nimages.append(path)\nreturn images\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix errors of usage and some names (#2351)
|
499,395 |
18.03.2021 14:45:59
| -28,800 |
626d2fa159efb392bac4102f8acd0148b24290f5
|
support nhwc data_format in yolov3
|
[
{
"change_type": "MODIFY",
"old_path": "dygraph/ppdet/modeling/architectures/meta_arch.py",
"new_path": "dygraph/ppdet/modeling/architectures/meta_arch.py",
"diff": "@@ -12,10 +12,14 @@ __all__ = ['BaseArch']\n@register\nclass BaseArch(nn.Layer):\n- def __init__(self):\n+ def __init__(self, data_format='NCHW'):\nsuper(BaseArch, self).__init__()\n+ self.data_format = data_format\ndef forward(self, inputs):\n+ if self.data_format == 'NHWC':\n+ image = inputs['image']\n+ inputs['image'] = paddle.transpose(image, [0, 2, 3, 1])\nself.inputs = inputs\nself.model_arch()\n"
},
{
"change_type": "MODIFY",
"old_path": "dygraph/ppdet/modeling/architectures/yolo.py",
"new_path": "dygraph/ppdet/modeling/architectures/yolo.py",
"diff": "@@ -11,14 +11,16 @@ __all__ = ['YOLOv3']\n@register\nclass YOLOv3(BaseArch):\n__category__ = 'architecture'\n+ __shared__ = ['data_format']\n__inject__ = ['post_process']\ndef __init__(self,\nbackbone='DarkNet',\nneck='YOLOv3FPN',\nyolo_head='YOLOv3Head',\n- post_process='BBoxPostProcess'):\n- super(YOLOv3, self).__init__()\n+ post_process='BBoxPostProcess',\n+ data_format='NCHW'):\n+ super(YOLOv3, self).__init__(data_format=data_format)\nself.backbone = backbone\nself.neck = neck\nself.yolo_head = yolo_head\n"
},
{
"change_type": "MODIFY",
"old_path": "dygraph/ppdet/modeling/backbones/darknet.py",
"new_path": "dygraph/ppdet/modeling/backbones/darknet.py",
"diff": "@@ -35,7 +35,8 @@ class ConvBNLayer(nn.Layer):\nnorm_type='bn',\nnorm_decay=0.,\nact=\"leaky\",\n- name=None):\n+ name=None,\n+ data_format='NCHW'):\nsuper(ConvBNLayer, self).__init__()\nself.conv = nn.Conv2D(\n@@ -46,9 +47,14 @@ class ConvBNLayer(nn.Layer):\npadding=padding,\ngroups=groups,\nweight_attr=ParamAttr(name=name + '.conv.weights'),\n+ data_format=data_format,\nbias_attr=False)\nself.batch_norm = batch_norm(\n- ch_out, norm_type=norm_type, norm_decay=norm_decay, name=name)\n+ ch_out,\n+ norm_type=norm_type,\n+ norm_decay=norm_decay,\n+ name=name,\n+ data_format=data_format)\nself.act = act\ndef forward(self, inputs):\n@@ -68,7 +74,8 @@ class DownSample(nn.Layer):\npadding=1,\nnorm_type='bn',\nnorm_decay=0.,\n- name=None):\n+ name=None,\n+ data_format='NCHW'):\nsuper(DownSample, self).__init__()\n@@ -80,6 +87,7 @@ class DownSample(nn.Layer):\npadding=padding,\nnorm_type=norm_type,\nnorm_decay=norm_decay,\n+ data_format=data_format,\nname=name)\nself.ch_out = ch_out\n@@ -89,7 +97,13 @@ class DownSample(nn.Layer):\nclass BasicBlock(nn.Layer):\n- def __init__(self, ch_in, ch_out, norm_type='bn', norm_decay=0., name=None):\n+ def __init__(self,\n+ ch_in,\n+ ch_out,\n+ norm_type='bn',\n+ norm_decay=0.,\n+ name=None,\n+ data_format='NCHW'):\nsuper(BasicBlock, self).__init__()\nself.conv1 = ConvBNLayer(\n@@ -100,6 +114,7 @@ class BasicBlock(nn.Layer):\npadding=0,\nnorm_type=norm_type,\nnorm_decay=norm_decay,\n+ data_format=data_format,\nname=name + '.0')\nself.conv2 = ConvBNLayer(\nch_in=ch_out,\n@@ -109,6 +124,7 @@ class BasicBlock(nn.Layer):\npadding=1,\nnorm_type=norm_type,\nnorm_decay=norm_decay,\n+ data_format=data_format,\nname=name + '.1')\ndef forward(self, inputs):\n@@ -125,7 +141,8 @@ class Blocks(nn.Layer):\ncount,\nnorm_type='bn',\nnorm_decay=0.,\n- name=None):\n+ name=None,\n+ data_format='NCHW'):\nsuper(Blocks, self).__init__()\nself.basicblock0 = BasicBlock(\n@@ -133,6 +150,7 @@ class Blocks(nn.Layer):\nch_out,\nnorm_type=norm_type,\nnorm_decay=norm_decay,\n+ data_format=data_format,\nname=name + '.0')\nself.res_out_list = []\nfor i in range(1, count):\n@@ -144,6 +162,7 @@ class Blocks(nn.Layer):\nch_out,\nnorm_type=norm_type,\nnorm_decay=norm_decay,\n+ data_format=data_format,\nname=block_name))\nself.res_out_list.append(res_out)\nself.ch_out = ch_out\n@@ -161,7 +180,7 @@ DarkNet_cfg = {53: ([1, 2, 8, 8, 4])}\n@register\n@serializable\nclass DarkNet(nn.Layer):\n- __shared__ = ['norm_type']\n+ __shared__ = ['norm_type', 'data_format']\ndef __init__(self,\ndepth=53,\n@@ -169,7 +188,8 @@ class DarkNet(nn.Layer):\nreturn_idx=[2, 3, 4],\nnum_stages=5,\nnorm_type='bn',\n- norm_decay=0.):\n+ norm_decay=0.,\n+ data_format='NCHW'):\nsuper(DarkNet, self).__init__()\nself.depth = depth\nself.freeze_at = freeze_at\n@@ -185,6 +205,7 @@ class DarkNet(nn.Layer):\npadding=1,\nnorm_type=norm_type,\nnorm_decay=norm_decay,\n+ data_format=data_format,\nname='yolo_input')\nself.downsample0 = DownSample(\n@@ -192,6 +213,7 @@ class DarkNet(nn.Layer):\nch_out=32 * 2,\nnorm_type=norm_type,\nnorm_decay=norm_decay,\n+ data_format=data_format,\nname='yolo_input.downsample')\nself._out_channels = []\n@@ -208,6 +230,7 @@ class DarkNet(nn.Layer):\nstage,\nnorm_type=norm_type,\nnorm_decay=norm_decay,\n+ data_format=data_format,\nname=name))\nself.darknet_conv_block_list.append(conv_block)\nif i in return_idx:\n@@ -221,6 +244,7 @@ class DarkNet(nn.Layer):\nch_out=32 * (2**(i + 2)),\nnorm_type=norm_type,\nnorm_decay=norm_decay,\n+ data_format=data_format,\nname=down_name))\nself.downsample_list.append(downsample)\n"
},
{
"change_type": "MODIFY",
"old_path": "dygraph/ppdet/modeling/heads/yolo_head.py",
"new_path": "dygraph/ppdet/modeling/heads/yolo_head.py",
"diff": "@@ -16,7 +16,7 @@ def _de_sigmoid(x, eps=1e-7):\n@register\nclass YOLOv3Head(nn.Layer):\n- __shared__ = ['num_classes']\n+ __shared__ = ['num_classes', 'data_format']\n__inject__ = ['loss']\ndef __init__(self,\n@@ -26,7 +26,8 @@ class YOLOv3Head(nn.Layer):\nnum_classes=80,\nloss='YOLOv3Loss',\niou_aware=False,\n- iou_aware_factor=0.4):\n+ iou_aware_factor=0.4,\n+ data_format='NCHW'):\nsuper(YOLOv3Head, self).__init__()\nself.num_classes = num_classes\nself.loss = loss\n@@ -36,6 +37,7 @@ class YOLOv3Head(nn.Layer):\nself.parse_anchor(anchors, anchor_masks)\nself.num_outputs = len(self.anchors)\n+ self.data_format = data_format\nself.yolo_outputs = []\nfor i in range(len(self.anchors)):\n@@ -53,6 +55,7 @@ class YOLOv3Head(nn.Layer):\nkernel_size=1,\nstride=1,\npadding=0,\n+ data_format=data_format,\nweight_attr=ParamAttr(name=name + '.conv.weights'),\nbias_attr=ParamAttr(\nname=name + '.conv.bias', regularizer=L2Decay(0.))))\n@@ -73,6 +76,8 @@ class YOLOv3Head(nn.Layer):\nyolo_outputs = []\nfor i, feat in enumerate(feats):\nyolo_output = self.yolo_outputs[i](feat)\n+ if self.data_format == 'NHWC':\n+ yolo_output = paddle.transpose(yolo_output, [0, 3, 1, 2])\nyolo_outputs.append(yolo_output)\nif self.training:\n"
},
{
"change_type": "MODIFY",
"old_path": "dygraph/ppdet/modeling/necks/yolo_fpn.py",
"new_path": "dygraph/ppdet/modeling/necks/yolo_fpn.py",
"diff": "@@ -26,7 +26,7 @@ __all__ = ['YOLOv3FPN', 'PPYOLOFPN']\nclass YoloDetBlock(nn.Layer):\n- def __init__(self, ch_in, channel, norm_type, name):\n+ def __init__(self, ch_in, channel, norm_type, name, data_format='NCHW'):\nsuper(YoloDetBlock, self).__init__()\nself.ch_in = ch_in\nself.channel = channel\n@@ -51,6 +51,7 @@ class YoloDetBlock(nn.Layer):\nfilter_size=filter_size,\npadding=(filter_size - 1) // 2,\nnorm_type=norm_type,\n+ data_format=data_format,\nname=name + post_name))\nself.tip = ConvBNLayer(\n@@ -59,6 +60,7 @@ class YoloDetBlock(nn.Layer):\nfilter_size=3,\npadding=1,\nnorm_type=norm_type,\n+ data_format=data_format,\nname=name + '.tip')\ndef forward(self, inputs):\n@@ -68,7 +70,14 @@ class YoloDetBlock(nn.Layer):\nclass SPP(nn.Layer):\n- def __init__(self, ch_in, ch_out, k, pool_size, norm_type, name):\n+ def __init__(self,\n+ ch_in,\n+ ch_out,\n+ k,\n+ pool_size,\n+ norm_type,\n+ name,\n+ data_format='NCHW'):\nsuper(SPP, self).__init__()\nself.pool = []\nfor size in pool_size:\n@@ -78,10 +87,17 @@ class SPP(nn.Layer):\nkernel_size=size,\nstride=1,\npadding=size // 2,\n+ data_format=data_format,\nceil_mode=False))\nself.pool.append(pool)\nself.conv = ConvBNLayer(\n- ch_in, ch_out, k, padding=k // 2, norm_type=norm_type, name=name)\n+ ch_in,\n+ ch_out,\n+ k,\n+ padding=k // 2,\n+ norm_type=norm_type,\n+ name=name,\n+ data_format=data_format)\ndef forward(self, x):\nouts = [x]\n@@ -93,30 +109,46 @@ class SPP(nn.Layer):\nclass DropBlock(nn.Layer):\n- def __init__(self, block_size, keep_prob, name):\n+ def __init__(self, block_size, keep_prob, name, data_format='NCHW'):\nsuper(DropBlock, self).__init__()\nself.block_size = block_size\nself.keep_prob = keep_prob\nself.name = name\n+ self.data_format = data_format\ndef forward(self, x):\nif not self.training or self.keep_prob == 1:\nreturn x\nelse:\ngamma = (1. - self.keep_prob) / (self.block_size**2)\n- for s in x.shape[2:]:\n+ if self.data_format == 'NCHW':\n+ shape = x.shape[2:]\n+ else:\n+ shape = x.shape[1:3]\n+ for s in shape:\ngamma *= s / (s - self.block_size + 1)\nmatrix = paddle.cast(paddle.rand(x.shape, x.dtype) < gamma, x.dtype)\nmask_inv = F.max_pool2d(\n- matrix, self.block_size, stride=1, padding=self.block_size // 2)\n+ matrix,\n+ self.block_size,\n+ stride=1,\n+ padding=self.block_size // 2,\n+ data_format=self.data_format)\nmask = 1. - mask_inv\ny = x * mask * (mask.numel() / mask.sum())\nreturn y\nclass CoordConv(nn.Layer):\n- def __init__(self, ch_in, ch_out, filter_size, padding, norm_type, name):\n+ def __init__(self,\n+ ch_in,\n+ ch_out,\n+ filter_size,\n+ padding,\n+ norm_type,\n+ name,\n+ data_format='NCHW'):\nsuper(CoordConv, self).__init__()\nself.conv = ConvBNLayer(\nch_in + 2,\n@@ -124,36 +156,53 @@ class CoordConv(nn.Layer):\nfilter_size=filter_size,\npadding=padding,\nnorm_type=norm_type,\n+ data_format=data_format,\nname=name)\n+ self.data_format = data_format\ndef forward(self, x):\nb = x.shape[0]\n+ if self.data_format == 'NCHW':\nh = x.shape[2]\nw = x.shape[3]\n+ else:\n+ h = x.shape[1]\n+ w = x.shape[2]\ngx = paddle.arange(w, dtype='float32') / (w - 1.) * 2.0 - 1.\n+ if self.data_format == 'NCHW':\ngx = gx.reshape([1, 1, 1, w]).expand([b, 1, h, w])\n+ else:\n+ gx = gx.reshape([1, 1, w, 1]).expand([b, h, w, 1])\ngx.stop_gradient = True\ngy = paddle.arange(h, dtype='float32') / (h - 1.) * 2.0 - 1.\n+ if self.data_format == 'NCHW':\ngy = gy.reshape([1, 1, h, 1]).expand([b, 1, h, w])\n+ else:\n+ gy = gy.reshape([1, h, 1, 1]).expand([b, h, w, 1])\ngy.stop_gradient = True\n+ if self.data_format == 'NCHW':\ny = paddle.concat([x, gx, gy], axis=1)\n+ else:\n+ y = paddle.concat([x, gx, gy], axis=-1)\ny = self.conv(y)\nreturn y\nclass PPYOLODetBlock(nn.Layer):\n- def __init__(self, cfg, name):\n+ def __init__(self, cfg, name, data_format='NCHW'):\nsuper(PPYOLODetBlock, self).__init__()\nself.conv_module = nn.Sequential()\nfor idx, (conv_name, layer, args, kwargs) in enumerate(cfg[:-1]):\n- kwargs.update(name='{}.{}'.format(name, conv_name))\n+ kwargs.update(\n+ name='{}.{}'.format(name, conv_name), data_format=data_format)\nself.conv_module.add_sublayer(conv_name, layer(*args, **kwargs))\nconv_name, layer, args, kwargs = cfg[-1]\n- kwargs.update(name='{}.{}'.format(name, conv_name))\n+ kwargs.update(\n+ name='{}.{}'.format(name, conv_name), data_format=data_format)\nself.tip = layer(*args, **kwargs)\ndef forward(self, inputs):\n@@ -165,9 +214,12 @@ class PPYOLODetBlock(nn.Layer):\n@register\n@serializable\nclass YOLOv3FPN(nn.Layer):\n- __shared__ = ['norm_type']\n+ __shared__ = ['norm_type', 'data_format']\n- def __init__(self, in_channels=[256, 512, 1024], norm_type='bn'):\n+ def __init__(self,\n+ in_channels=[256, 512, 1024],\n+ norm_type='bn',\n+ data_format='NCHW'):\nsuper(YOLOv3FPN, self).__init__()\nassert len(in_channels) > 0, \"in_channels length should > 0\"\nself.in_channels = in_channels\n@@ -176,6 +228,7 @@ class YOLOv3FPN(nn.Layer):\nself._out_channels = []\nself.yolo_blocks = []\nself.routes = []\n+ self.data_format = data_format\nfor i in range(self.num_blocks):\nname = 'yolo_block.{}'.format(i)\nin_channel = in_channels[-i - 1]\n@@ -187,6 +240,7 @@ class YOLOv3FPN(nn.Layer):\nin_channel,\nchannel=512 // (2**i),\nnorm_type=norm_type,\n+ data_format=data_format,\nname=name))\nself.yolo_blocks.append(yolo_block)\n# tip layer output channel doubled\n@@ -203,6 +257,7 @@ class YOLOv3FPN(nn.Layer):\nstride=1,\npadding=0,\nnorm_type=norm_type,\n+ data_format=data_format,\nname=name))\nself.routes.append(route)\n@@ -212,13 +267,17 @@ class YOLOv3FPN(nn.Layer):\nyolo_feats = []\nfor i, block in enumerate(blocks):\nif i > 0:\n+ if self.data_format == 'NCHW':\nblock = paddle.concat([route, block], axis=1)\n+ else:\n+ block = paddle.concat([route, block], axis=-1)\nroute, tip = self.yolo_blocks[i](block)\nyolo_feats.append(tip)\nif i < self.num_blocks - 1:\nroute = self.routes[i](route)\n- route = F.interpolate(route, scale_factor=2.)\n+ route = F.interpolate(\n+ route, scale_factor=2., data_format=self.data_format)\nreturn yolo_feats\n@@ -234,9 +293,13 @@ class YOLOv3FPN(nn.Layer):\n@register\n@serializable\nclass PPYOLOFPN(nn.Layer):\n- __shared__ = ['norm_type']\n+ __shared__ = ['norm_type', 'data_format']\n- def __init__(self, in_channels=[512, 1024, 2048], norm_type='bn', **kwargs):\n+ def __init__(self,\n+ in_channels=[512, 1024, 2048],\n+ norm_type='bn',\n+ data_format='NCHW',\n+ **kwargs):\nsuper(PPYOLOFPN, self).__init__()\nassert len(in_channels) > 0, \"in_channels length should > 0\"\nself.in_channels = in_channels\n@@ -332,6 +395,7 @@ class PPYOLOFPN(nn.Layer):\nstride=1,\npadding=0,\nnorm_type=norm_type,\n+ data_format=data_format,\nname=name))\nself.routes.append(route)\n@@ -341,13 +405,17 @@ class PPYOLOFPN(nn.Layer):\nyolo_feats = []\nfor i, block in enumerate(blocks):\nif i > 0:\n+ if self.data_format == 'NCHW':\nblock = paddle.concat([route, block], axis=1)\n+ else:\n+ block = paddle.concat([route, block], axis=-1)\nroute, tip = self.yolo_blocks[i](block)\nyolo_feats.append(tip)\nif i < self.num_blocks - 1:\nroute = self.routes[i](route)\n- route = F.interpolate(route, scale_factor=2.)\n+ route = F.interpolate(\n+ route, scale_factor=2., data_format=self.data_format)\nreturn yolo_feats\n"
},
{
"change_type": "MODIFY",
"old_path": "dygraph/ppdet/modeling/ops.py",
"new_path": "dygraph/ppdet/modeling/ops.py",
"diff": "@@ -44,7 +44,12 @@ __all__ = [\n]\n-def batch_norm(ch, norm_type='bn', norm_decay=0., initializer=None, name=None):\n+def batch_norm(ch,\n+ norm_type='bn',\n+ norm_decay=0.,\n+ initializer=None,\n+ name=None,\n+ data_format='NCHW'):\nbn_name = name + '.bn'\nif norm_type == 'sync_bn':\nbatch_norm = nn.SyncBatchNorm\n@@ -58,7 +63,8 @@ def batch_norm(ch, norm_type='bn', norm_decay=0., initializer=None, name=None):\ninitializer=initializer,\nregularizer=L2Decay(norm_decay)),\nbias_attr=ParamAttr(\n- name=bn_name + '.offset', regularizer=L2Decay(norm_decay)))\n+ name=bn_name + '.offset', regularizer=L2Decay(norm_decay)),\n+ data_format=data_format)\n@paddle.jit.not_to_static\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
support nhwc data_format in yolov3 (#2336)
|
499,333 |
19.03.2021 15:28:30
| -28,800 |
588d78b4bd3b4588805356e183080478df6a989e
|
cherry-pick fix cascade
|
[
{
"change_type": "MODIFY",
"old_path": "dygraph/ppdet/modeling/heads/bbox_head.py",
"new_path": "dygraph/ppdet/modeling/heads/bbox_head.py",
"diff": "@@ -265,6 +265,11 @@ class BBoxHead(nn.Layer):\nreg_name = 'loss_bbox_reg'\nloss_bbox = {}\n+ loss_weight = 1.\n+ if fg_inds.numel() == 0:\n+ fg_inds = paddle.zeros([1], dtype='int32')\n+ loss_weight = 0.\n+\nif cls_agnostic_bbox_reg:\nreg_delta = paddle.gather(deltas, fg_inds)\nelse:\n@@ -291,8 +296,8 @@ class BBoxHead(nn.Layer):\nloss_bbox_reg = paddle.abs(reg_delta - reg_target).sum(\n) / tgt_labels.shape[0]\n- loss_bbox[cls_name] = loss_bbox_cls\n- loss_bbox[reg_name] = loss_bbox_reg\n+ loss_bbox[cls_name] = loss_bbox_cls * loss_weight\n+ loss_bbox[reg_name] = loss_bbox_reg * loss_weight\nreturn loss_bbox\n"
},
{
"change_type": "MODIFY",
"old_path": "dygraph/ppdet/modeling/heads/cascade_head.py",
"new_path": "dygraph/ppdet/modeling/heads/cascade_head.py",
"diff": "@@ -196,16 +196,6 @@ class CascadeHead(BBoxHead):\nif self.training:\nrois, rois_num, targets = self.bbox_assigner(\nrois, rois_num, inputs, i, is_cascade=True)\n- tgt_labels = targets[0]\n- tgt_labels = paddle.concat(tgt_labels) if len(\n- tgt_labels) > 1 else tgt_labels[0]\n- tgt_labels.stop_gradient = True\n- fg_inds = paddle.nonzero(\n- paddle.logical_and(tgt_labels >= 0, tgt_labels <\n- self.num_classes)).flatten()\n- if fg_inds.numel() == 0:\n- targets_list.append(targets_list[-1])\n- else:\ntargets_list.append(targets)\nrois_feat = self.roi_extractor(body_feats, rois, rois_num)\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
cherry-pick fix cascade (#2381)
|
499,333 |
19.03.2021 20:50:19
| -28,800 |
a2f57beba49e9c65c03bb9fd83c58bcbbdc4fbf2
|
fix training in cascade_rcnn
|
[
{
"change_type": "MODIFY",
"old_path": "dygraph/ppdet/modeling/heads/cascade_head.py",
"new_path": "dygraph/ppdet/modeling/heads/cascade_head.py",
"diff": "@@ -227,6 +227,8 @@ class CascadeHead(BBoxHead):\nclip_box = clip_bbox(boxes_per_image, im_shape[i])\nif self.training:\nkeep = nonempty_bbox(clip_box)\n+ if keep.shape[0] == 0:\n+ keep = paddle.zeros([1], dtype='int32')\nclip_box = paddle.gather(clip_box, keep)\nrois.append(clip_box)\nrois_num = paddle.concat([paddle.shape(r)[0] for r in rois])\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix training in cascade_rcnn (#2386)
|
499,395 |
22.03.2021 21:49:03
| -28,800 |
118e8b75233651205630e9aa3d9fa1cc5944d2a2
|
fix data format in ppyolo
|
[
{
"change_type": "MODIFY",
"old_path": "dygraph/ppdet/modeling/necks/yolo_fpn.py",
"new_path": "dygraph/ppdet/modeling/necks/yolo_fpn.py",
"diff": "@@ -313,6 +313,7 @@ class PPYOLOFPN(nn.Layer):\nself.spp = kwargs.get('spp', False)\nself.conv_block_num = kwargs.get('conv_block_num', 2)\n+ self.data_format = data_format\nif self.coord_conv:\nConvLayer = CoordConv\nelse:\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix data format in ppyolo (#2403)
|
499,304 |
26.03.2021 19:51:11
| -28,800 |
9262d2767fef89044c7e7b3610b6b4a21b40638c
|
add SSLD model
|
[
{
"change_type": "MODIFY",
"old_path": "configs/solov2/README.md",
"new_path": "configs/solov2/README.md",
"diff": "@@ -19,8 +19,8 @@ SOLOv2 (Segmenting Objects by Locations) is a fast instance segmentation framewo\n| BlendMask | R50-FPN | True | 3x | 37.8 | 13.5 | V100 | - | - |\n| SOLOv2 (Paper) | R50-FPN | False | 1x | 34.8 | 18.5 | V100 | - | - |\n| SOLOv2 (Paper) | X101-DCN-FPN | True | 3x | 42.4 | 5.9 | V100 | - | - |\n-| SOLOv2 | R50-FPN | False | 1x | 35.5 | 21.9 | V100 | [model](https://paddledet.bj.bcebos.com/models/solov2_r50_fpn_1x_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/master/dygraph/configs/solov2/solov2_r50_fpn_1x_coco.yml) |\n-| SOLOv2 | R50-FPN | True | 3x | 38.0 | 21.9 | V100 | [model](https://paddledet.bj.bcebos.com/models/solov2_r50_fpn_3x_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/master/dygraph/configs/solov2/solov2_r50_fpn_3x_coco.yml) |\n+| SOLOv2 | R50-FPN | False | 1x | 35.5 | 21.9 | V100 | [model](https://paddledet.bj.bcebos.com/models/solov2_r50_fpn_1x_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/solov2/solov2_r50_fpn_1x_coco.yml) |\n+| SOLOv2 | R50-FPN | True | 3x | 38.0 | 21.9 | V100 | [model](https://paddledet.bj.bcebos.com/models/solov2_r50_fpn_3x_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/solov2/solov2_r50_fpn_3x_coco.yml) |\n**Notes:**\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/ssld/cascade_mask_rcnn_r50_vd_fpn_ssld_1x_coco.yml",
"diff": "+_BASE_: [\n+ '../datasets/coco_instance.yml',\n+ '../runtime.yml',\n+ '../cascade_rcnn/_base_/optimizer_1x.yml',\n+ '../cascade_rcnn/_base_/cascade_mask_rcnn_r50_fpn.yml',\n+ '../cascade_rcnn/_base_/cascade_mask_fpn_reader.yml',\n+]\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_vd_ssld_v2_pretrained.pdparams\n+weights: output/cascade_mask_rcnn_r50_vd_fpn_ssld_1x_coco/model_final\n+\n+ResNet:\n+ depth: 50\n+ variant: d\n+ norm_type: bn\n+ freeze_at: 0\n+ return_idx: [0,1,2,3]\n+ num_stages: 4\n+ lr_mult_list: [0.05, 0.05, 0.1, 0.15]\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/ssld/cascade_mask_rcnn_r50_vd_fpn_ssld_2x_coco.yml",
"diff": "+_BASE_: [\n+ '../datasets/coco_instance.yml',\n+ '../runtime.yml',\n+ '../cascade_rcnn/_base_/optimizer_1x.yml',\n+ '../cascade_rcnn/_base_/cascade_mask_rcnn_r50_fpn.yml',\n+ '../cascade_rcnn/_base_/cascade_mask_fpn_reader.yml',\n+]\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_vd_ssld_v2_pretrained.pdparams\n+weights: output/cascade_mask_rcnn_r50_vd_fpn_ssld_2x_coco/model_final\n+\n+ResNet:\n+ depth: 50\n+ variant: d\n+ norm_type: bn\n+ freeze_at: 0\n+ return_idx: [0,1,2,3]\n+ num_stages: 4\n+ lr_mult_list: [0.05, 0.05, 0.1, 0.15]\n+\n+epoch: 24\n+LearningRate:\n+ base_lr: 0.01\n+ schedulers:\n+ - !PiecewiseDecay\n+ gamma: 0.1\n+ milestones: [12, 22]\n+ - !LinearWarmup\n+ start_factor: 0.1\n+ steps: 1000\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/ssld/cascade_rcnn_r50_vd_fpn_ssld_1x_coco.yml",
"diff": "+_BASE_: [\n+ '../datasets/coco_detection.yml',\n+ '../runtime.yml',\n+ '../cascade_rcnn/_base_/optimizer_1x.yml',\n+ '../cascade_rcnn/_base_/cascade_rcnn_r50_fpn.yml',\n+ '../cascade_rcnn/_base_/cascade_fpn_reader.yml',\n+]\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_vd_ssld_v2_pretrained.pdparams\n+weights: output/cascade_rcnn_r50_vd_fpn_ssld_1x_coco/model_final\n+\n+ResNet:\n+ depth: 50\n+ variant: d\n+ norm_type: bn\n+ freeze_at: 0\n+ return_idx: [0,1,2,3]\n+ num_stages: 4\n+ lr_mult_list: [0.05, 0.05, 0.1, 0.15]\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/ssld/cascade_rcnn_r50_vd_fpn_ssld_2x_coco.yml",
"diff": "+_BASE_: [\n+ '../datasets/coco_detection.yml',\n+ '../runtime.yml',\n+ '../cascade_rcnn/_base_/optimizer_1x.yml',\n+ '../cascade_rcnn/_base_/cascade_rcnn_r50_fpn.yml',\n+ '../cascade_rcnn/_base_/cascade_fpn_reader.yml',\n+]\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_vd_ssld_v2_pretrained.pdparams\n+weights: output/cascade_rcnn_r50_vd_fpn_ssld_2x_coco/model_final\n+\n+ResNet:\n+ depth: 50\n+ variant: d\n+ norm_type: bn\n+ freeze_at: 0\n+ return_idx: [0,1,2,3]\n+ num_stages: 4\n+ lr_mult_list: [0.05, 0.05, 0.1, 0.15]\n+\n+epoch: 24\n+LearningRate:\n+ base_lr: 0.01\n+ schedulers:\n+ - !PiecewiseDecay\n+ gamma: 0.1\n+ milestones: [12, 22]\n+ - !LinearWarmup\n+ start_factor: 0.1\n+ steps: 1000\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/ssld/faster_rcnn_r50_vd_fpn_ssld_1x_coco.yml",
"diff": "+_BASE_: [\n+ '../datasets/coco_detection.yml',\n+ '../runtime.yml',\n+ '../faster_rcnn/_base_/optimizer_1x.yml',\n+ '../faster_rcnn/_base_/faster_rcnn_r50_fpn.yml',\n+ '../faster_rcnn/_base_/faster_fpn_reader.yml',\n+]\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_vd_ssld_v2_pretrained.pdparams\n+weights: output/faster_rcnn_r50_vd_fpn_ssld_1x_coco/model_final\n+\n+ResNet:\n+ depth: 50\n+ variant: d\n+ norm_type: bn\n+ freeze_at: 0\n+ return_idx: [0,1,2,3]\n+ num_stages: 4\n+ lr_mult_list: [0.05, 0.05, 0.1, 0.15]\n+\n+epoch: 12\n+LearningRate:\n+ base_lr: 0.01\n+ schedulers:\n+ - !PiecewiseDecay\n+ gamma: 0.1\n+ milestones: [8, 11]\n+ - !LinearWarmup\n+ start_factor: 0.1\n+ steps: 1000\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/ssld/faster_rcnn_r50_vd_fpn_ssld_2x_coco.yml",
"diff": "+_BASE_: [\n+ '../datasets/coco_detection.yml',\n+ '../runtime.yml',\n+ '../faster_rcnn/_base_/optimizer_1x.yml',\n+ '../faster_rcnn/_base_/faster_rcnn_r50_fpn.yml',\n+ '../faster_rcnn/_base_/faster_fpn_reader.yml',\n+]\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_vd_ssld_v2_pretrained.pdparams\n+weights: output/faster_rcnn_r50_vd_fpn_ssld_2x_coco/model_final\n+\n+ResNet:\n+ depth: 50\n+ variant: d\n+ norm_type: bn\n+ freeze_at: 0\n+ return_idx: [0,1,2,3]\n+ num_stages: 4\n+ lr_mult_list: [0.05, 0.05, 0.1, 0.15]\n+\n+epoch: 24\n+LearningRate:\n+ base_lr: 0.01\n+ schedulers:\n+ - !PiecewiseDecay\n+ gamma: 0.1\n+ milestones: [12, 22]\n+ - !LinearWarmup\n+ start_factor: 0.1\n+ steps: 1000\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/ssld/mask_rcnn_r50_vd_fpn_ssld_1x_coco.yml",
"diff": "+_BASE_: [\n+ '../datasets/coco_instance.yml',\n+ '../runtime.yml',\n+ '../mask_rcnn/_base_/optimizer_1x.yml',\n+ '../mask_rcnn/_base_/mask_rcnn_r50_fpn.yml',\n+ '../mask_rcnn/_base_/mask_fpn_reader.yml',\n+]\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_vd_ssld_v2_pretrained.pdparams\n+weights: output/mask_rcnn_r50_vd_fpn_ssld_1x_coco/model_final\n+\n+ResNet:\n+ depth: 50\n+ variant: d\n+ norm_type: bn\n+ freeze_at: 0\n+ return_idx: [0,1,2,3]\n+ num_stages: 4\n+ lr_mult_list: [0.05, 0.05, 0.1, 0.15]\n+\n+epoch: 12\n+LearningRate:\n+ base_lr: 0.01\n+ schedulers:\n+ - !PiecewiseDecay\n+ gamma: 0.1\n+ milestones: [8, 11]\n+ - !LinearWarmup\n+ start_factor: 0.1\n+ steps: 1000\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/ssld/mask_rcnn_r50_vd_fpn_ssld_2x_coco.yml",
"diff": "+_BASE_: [\n+ '../datasets/coco_instance.yml',\n+ '../runtime.yml',\n+ '../mask_rcnn/_base_/optimizer_1x.yml',\n+ '../mask_rcnn/_base_/mask_rcnn_r50_fpn.yml',\n+ '../mask_rcnn/_base_/mask_fpn_reader.yml',\n+]\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_vd_ssld_v2_pretrained.pdparams\n+weights: output/mask_rcnn_r50_vd_fpn_ssld_2x_coco/model_final\n+\n+ResNet:\n+ depth: 50\n+ variant: d\n+ norm_type: bn\n+ freeze_at: 0\n+ return_idx: [0,1,2,3]\n+ num_stages: 4\n+ lr_mult_list: [0.05, 0.05, 0.1, 0.15]\n+\n+epoch: 24\n+LearningRate:\n+ base_lr: 0.01\n+ schedulers:\n+ - !PiecewiseDecay\n+ gamma: 0.1\n+ milestones: [12, 22]\n+ - !LinearWarmup\n+ start_factor: 0.1\n+ steps: 1000\n"
},
{
"change_type": "ADD",
"old_path": "docs/images/ssld_model.png",
"new_path": "docs/images/ssld_model.png",
"diff": "Binary files /dev/null and b/docs/images/ssld_model.png differ\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add SSLD model (#2421)
|
499,333 |
27.03.2021 12:49:52
| -28,800 |
5ba7e85a4eb68cf0d88b942e9f537b9d2b34f049
|
update install doc, test=document_fix
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/data/source/dataset.py",
"new_path": "ppdet/data/source/dataset.py",
"diff": "@@ -129,6 +129,9 @@ class ImageFolder(DetDataset):\nself.roidbs = None\nself.sample_num = sample_num\n+ def check_or_download_dataset(self):\n+ return\n+\ndef parse_dataset(self, ):\nif not self.roidbs:\nself.roidbs = self._load_images()\n"
},
{
"change_type": "MODIFY",
"old_path": "requirements.txt",
"new_path": "requirements.txt",
"diff": "@@ -6,3 +6,4 @@ PyYAML\nshapely\nscipy\nterminaltables\n+pycocotools\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
update install doc, test=document_fix (#2441)
|
499,333 |
29.03.2021 14:36:47
| -28,800 |
f5aba69cce87dda09f8d577e51039c7b8208c1cb
|
update dist api
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/engine/callbacks.py",
"new_path": "ppdet/engine/callbacks.py",
"diff": "@@ -23,7 +23,7 @@ import six\nimport numpy as np\nimport paddle\n-from paddle.distributed import ParallelEnv\n+import paddle.distributed as dist\nfrom ppdet.utils.checkpoint import save_model\nfrom ppdet.optimizer import ModelEMA\n@@ -81,7 +81,7 @@ class LogPrinter(Callback):\nsuper(LogPrinter, self).__init__(model)\ndef on_step_end(self, status):\n- if ParallelEnv().nranks < 2 or ParallelEnv().local_rank == 0:\n+ if dist.get_world_size() < 2 or dist.get_rank() == 0:\nmode = status['mode']\nif mode == 'train':\nepoch_id = status['epoch_id']\n@@ -129,7 +129,7 @@ class LogPrinter(Callback):\nlogger.info(\"Eval iter: {}\".format(step_id))\ndef on_epoch_end(self, status):\n- if ParallelEnv().nranks < 2 or ParallelEnv().local_rank == 0:\n+ if dist.get_world_size() < 2 or dist.get_rank() == 0:\nmode = status['mode']\nif mode == 'eval':\nsample_num = status['sample_num']\n@@ -160,7 +160,7 @@ class Checkpointer(Callback):\nepoch_id = status['epoch_id']\nweight = None\nsave_name = None\n- if ParallelEnv().nranks < 2 or ParallelEnv().local_rank == 0:\n+ if dist.get_world_size() < 2 or dist.get_rank() == 0:\nif mode == 'train':\nend_epoch = self.model.cfg.epoch\nif epoch_id % self.model.cfg.snapshot_epoch == 0 or epoch_id == end_epoch - 1:\n@@ -224,7 +224,7 @@ class VisualDLWriter(Callback):\ndef on_step_end(self, status):\nmode = status['mode']\n- if ParallelEnv().nranks < 2 or ParallelEnv().local_rank == 0:\n+ if dist.get_world_size() < 2 or dist.get_rank() == 0:\nif mode == 'train':\ntraining_staus = status['training_staus']\nfor loss_name, loss_value in training_staus.get().items():\n@@ -248,7 +248,7 @@ class VisualDLWriter(Callback):\ndef on_epoch_end(self, status):\nmode = status['mode']\n- if ParallelEnv().nranks < 2 or ParallelEnv().local_rank == 0:\n+ if dist.get_world_size() < 2 or dist.get_rank() == 0:\nif mode == 'eval':\nfor metric in self.model._metrics:\nfor key, map_value in metric.get_results().items():\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/engine/env.py",
"new_path": "ppdet/engine/env.py",
"diff": "@@ -21,7 +21,7 @@ import random\nimport numpy as np\nimport paddle\n-from paddle.distributed import ParallelEnv, fleet\n+from paddle.distributed import fleet\n__all__ = ['init_parallel_env', 'set_random_seed', 'init_fleet_env']\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/engine/trainer.py",
"new_path": "ppdet/engine/trainer.py",
"diff": "@@ -24,7 +24,8 @@ import numpy as np\nfrom PIL import Image\nimport paddle\n-from paddle.distributed import ParallelEnv, fleet\n+import paddle.distributed as dist\n+from paddle.distributed import fleet\nfrom paddle import amp\nfrom paddle.static import InputSpec\n@@ -84,8 +85,8 @@ class Trainer(object):\nself.optimizer = create('OptimizerBuilder')(self.lr,\nself.model.parameters())\n- self._nranks = ParallelEnv().nranks\n- self._local_rank = ParallelEnv().local_rank\n+ self._nranks = dist.get_world_size()\n+ self._local_rank = dist.get_rank()\nself.status = {}\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/utils/logger.py",
"new_path": "ppdet/utils/logger.py",
"diff": "@@ -17,7 +17,7 @@ import logging\nimport os\nimport sys\n-from paddle.distributed import ParallelEnv\n+import paddle.distributed as dist\n__all__ = ['setup_logger']\n@@ -47,7 +47,7 @@ def setup_logger(name=\"ppdet\", output=None):\n\"[%(asctime)s] %(name)s %(levelname)s: %(message)s\",\ndatefmt=\"%m/%d %H:%M:%S\")\n# stdout logging: master only\n- local_rank = ParallelEnv().local_rank\n+ local_rank = dist.get_rank()\nif local_rank == 0:\nch = logging.StreamHandler(stream=sys.stdout)\nch.setLevel(logging.DEBUG)\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/eval.py",
"new_path": "tools/eval.py",
"diff": "@@ -27,7 +27,6 @@ import warnings\nwarnings.filterwarnings('ignore')\nimport paddle\n-from paddle.distributed import ParallelEnv\nfrom ppdet.core.workspace import load_config, merge_config\nfrom ppdet.utils.check import check_gpu, check_version, check_config\n@@ -115,8 +114,7 @@ def main():\ncheck_gpu(cfg.use_gpu)\ncheck_version()\n- place = 'gpu:{}'.format(ParallelEnv().dev_id) if cfg.use_gpu else 'cpu'\n- place = paddle.set_device(place)\n+ place = paddle.set_device('gpu' if cfg.use_gpu else 'cpu')\nrun(FLAGS, cfg)\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/infer.py",
"new_path": "tools/infer.py",
"diff": "@@ -27,7 +27,6 @@ warnings.filterwarnings('ignore')\nimport glob\nimport paddle\n-from paddle.distributed import ParallelEnv\nfrom ppdet.core.workspace import load_config, merge_config\nfrom ppdet.engine import Trainer\nfrom ppdet.utils.check import check_gpu, check_version, check_config\n@@ -140,8 +139,7 @@ def main():\ncheck_gpu(cfg.use_gpu)\ncheck_version()\n- place = 'gpu:{}'.format(ParallelEnv().dev_id) if cfg.use_gpu else 'cpu'\n- place = paddle.set_device(place)\n+ place = paddle.set_device('gpu' if cfg.use_gpu else 'cpu')\nrun(FLAGS, cfg)\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/train.py",
"new_path": "tools/train.py",
"diff": "@@ -29,7 +29,6 @@ import random\nimport numpy as np\nimport paddle\n-from paddle.distributed import ParallelEnv\nfrom ppdet.core.workspace import load_config, merge_config, create\nfrom ppdet.utils.checkpoint import load_weight, load_pretrain_weight\n@@ -122,8 +121,7 @@ def main():\ncheck.check_gpu(cfg.use_gpu)\ncheck.check_version()\n- place = 'gpu:{}'.format(ParallelEnv().dev_id) if cfg.use_gpu else 'cpu'\n- place = paddle.set_device(place)\n+ place = paddle.set_device('gpu' if cfg.use_gpu else 'cpu')\nrun(FLAGS, cfg)\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
update dist api (#2443)
|
499,331 |
29.03.2021 15:56:26
| -28,800 |
c8e4a3d792212ed37c084ab62dd25a7042dd328e
|
fix tensorrt bug of ssd, update doc of tensorrt, test=documet_fix
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/engine/export_utils.py",
"new_path": "ppdet/engine/export_utils.py",
"diff": "@@ -28,7 +28,7 @@ logger = setup_logger('ppdet.engine')\n# Global dictionary\nTRT_MIN_SUBGRAPH = {\n'YOLO': 3,\n- 'SSD': 40,\n+ 'SSD': 60,\n'RCNN': 40,\n'RetinaNet': 40,\n'EfficientDet': 40,\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix tensorrt bug of ssd, update doc of tensorrt, test=documet_fix (#2410)
|
499,396 |
29.03.2021 20:37:08
| -28,800 |
cfbba55cf0e7cf0bfab78d2e4234898c5bd125d6
|
fis the issue in function _get_shared_memory_size_in_M in the case of that there are multiple 'shm' space
|
[
{
"change_type": "ADD",
"old_path": "ppdet/.DS_Store",
"new_path": "ppdet/.DS_Store",
"diff": "Binary files /dev/null and b/ppdet/.DS_Store differ\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/data/shm_utils.py",
"new_path": "ppdet/data/shm_utils.py",
"diff": "@@ -58,10 +58,10 @@ def _get_shared_memory_size_in_M():\nelif len(shm_infos) == 1:\nreturn _parse_size_in_M(shm_infos[0][3])\nelse:\n- shm_infos = [si for si in shm_infos \\\n- if si[-1] == SHM_DEFAULT_MOUNT]\n- if len(shm_infos) == 0:\n- return _parse_size_in_M(shm_infos[0][3])\n+ default_mount_infos = [\n+ si for si in shm_infos if si[-1] == SHM_DEFAULT_MOUNT\n+ ]\n+ if default_mount_infos:\n+ return _parse_size_in_M(default_mount_infos[0][3])\nelse:\n- return max([_parse_size_in_M(si[3]) \\\n- for si in shm_infos])\n+ return max([_parse_size_in_M(si[3]) for si in shm_infos])\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fis the issue in function _get_shared_memory_size_in_M in the case of that there are multiple 'shm' space (#2434)
|
499,304 |
30.03.2021 21:49:02
| -28,800 |
707461b935d327b23d3a0f3c9dedf540663f3831
|
support python eval in face_detection
|
[
{
"change_type": "MODIFY",
"old_path": ".gitignore",
"new_path": ".gitignore",
"diff": "@@ -26,7 +26,7 @@ __pycache__/\n/lib64/\n/output/\n/inference_model/\n-/dygraph/output_inference/\n+/output_inference/\n/parts/\n/sdist/\n/var/\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
support python eval in face_detection (#2469)
|
499,304 |
31.03.2021 09:48:11
| -28,800 |
d9f3d9c6721f7eb7e615ebbbc6e79dd69b0013e8
|
fix christmas app bug
|
[
{
"change_type": "ADD",
"old_path": null,
"new_path": "static/application/christmas/element_source/background/1.json",
"diff": "+{\"path\":\"/Users/yuzhiliang/Downloads/docsmall-2/12.png\",\"outputs\":{\"object\":[{\"name\":\"local\",\"bndbox\":{\"xmin\":282,\"ymin\":366,\"xmax\":3451,\"ymax\":4603}}]},\"time_labeled\":1608631688933,\"labeled\":true,\"size\":{\"width\":3714,\"height\":5725,\"depth\":3}}\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "static/application/christmas/element_source/background/2.json",
"diff": "+{\"path\":\"/Users/yuzhiliang/Downloads/docsmall-2/2.png\",\"outputs\":{\"object\":[{\"name\":\"local\",\"bndbox\":{\"xmin\":336,\"ymin\":512,\"xmax\":3416,\"ymax\":4672}}]},\"time_labeled\":1608631696021,\"labeled\":true,\"size\":{\"width\":3714,\"height\":5275,\"depth\":3}}\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "static/application/christmas/element_source/background/3.json",
"diff": "+{\"path\":\"/Users/yuzhiliang/Downloads/docsmall-2/3.png\",\"outputs\":{\"object\":[{\"name\":\"local\",\"bndbox\":{\"xmin\":376,\"ymin\":352,\"xmax\":3448,\"ymax\":4544}}]},\"time_labeled\":1608631701740,\"labeled\":true,\"size\":{\"width\":3714,\"height\":5275,\"depth\":3}}\n\\ No newline at end of file\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix christmas app bug (#2474)
|
499,339 |
31.03.2021 21:11:58
| -28,800 |
8847982855db9ee8cca350215c6a8549ddcbe43e
|
add ssd comments test=develop
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/architectures/ssd.py",
"new_path": "ppdet/modeling/architectures/ssd.py",
"diff": "@@ -24,6 +24,15 @@ __all__ = ['SSD']\n@register\nclass SSD(BaseArch):\n+ \"\"\"\n+ Single Shot MultiBox Detector, see https://arxiv.org/abs/1512.02325\n+\n+ Args:\n+ backbone (nn.Layer): backbone instance\n+ ssd_head (nn.Layer): `SSDHead` instance\n+ post_process (object): `BBoxPostProcess` instance\n+ \"\"\"\n+\n__category__ = 'architecture'\n__inject__ = ['post_process']\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/heads/ssd_head.py",
"new_path": "ppdet/modeling/heads/ssd_head.py",
"diff": "@@ -68,6 +68,20 @@ class SepConvLayer(nn.Layer):\n@register\nclass SSDHead(nn.Layer):\n+ \"\"\"\n+ SSDHead\n+\n+ Args:\n+ num_classes (int): Number of classes\n+ in_channels (list): Number of channels per input feature\n+ anchor_generator (dict): Configuration of 'AnchorGeneratorSSD' instance\n+ kernel_size (int): Conv kernel size\n+ padding (int): Conv padding\n+ use_sepconv (bool): Use SepConvLayer if true\n+ conv_decay (float): Conv regularization coeff\n+ loss (object): 'SSDLoss' instance\n+ \"\"\"\n+\n__shared__ = ['num_classes']\n__inject__ = ['anchor_generator', 'loss']\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/losses/ssd_loss.py",
"new_path": "ppdet/modeling/losses/ssd_loss.py",
"diff": "@@ -28,6 +28,21 @@ __all__ = ['SSDLoss']\n@register\nclass SSDLoss(nn.Layer):\n+ \"\"\"\n+ SSDLoss\n+\n+ Args:\n+ match_type (str): The type of matching method, should be\n+ 'bipartite' or 'per_prediction'. None ('bipartite') by default.\n+ overlap_threshold (float32, optional): If `match_type` is 'per_prediction',\n+ this threshold is to determine the extra matching bboxes based\n+ on the maximum distance, 0.5 by default.\n+ neg_pos_ratio (float): The ratio of negative samples / positive samples.\n+ neg_overlap (float): The overlap threshold of negative samples.\n+ loc_loss_weight (float): The weight of loc_loss.\n+ conf_loss_weight (float): The weight of conf_loss.\n+ \"\"\"\n+\ndef __init__(self,\nmatch_type='per_prediction',\noverlap_threshold=0.5,\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add ssd comments test=develop (#2466)
|
499,313 |
01.04.2021 11:09:27
| -28,800 |
46ecfc35b6fd4e9b78a4bf3203119e194f411e40
|
fix COCO error in validation in train
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/engine/trainer.py",
"new_path": "ppdet/engine/trainer.py",
"diff": "@@ -128,11 +128,23 @@ class Trainer(object):\nbias = self.cfg['bias'] if 'bias' in self.cfg else 0\noutput_eval = self.cfg['output_eval'] \\\nif 'output_eval' in self.cfg else None\n+\n+ # pass clsid2catid info to metric instance to avoid multiple loading\n+ # annotation file\nclsid2catid = {v: k for k, v in self.dataset.catid2clsid.items()} \\\nif self.mode == 'eval' else None\n+\n+ # when do validation in train, annotation file should be get from\n+ # EvalReader instead of self.dataset(which is TrainReader)\n+ anno_file = self.dataset.get_anno()\n+ if self.mode == 'train' and validate:\n+ eval_dataset = self.cfg['EvalDataset']\n+ eval_dataset.check_or_download_dataset()\n+ anno_file = eval_dataset.get_anno()\n+\nself._metrics = [\nCOCOMetric(\n- anno_file=self.dataset.get_anno(),\n+ anno_file=anno_file,\nclsid2catid=clsid2catid,\nclasswise=classwise,\noutput_eval=output_eval,\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix COCO error in validation in train (#2483)
|
499,395 |
06.04.2021 14:34:07
| -28,800 |
e6e8603b8e5a42f198965b8b512f094b5eff4f6d
|
fix cpp infer for lack of gflags
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/cpp/src/main.cc",
"new_path": "deploy/cpp/src/main.cc",
"diff": "#endif\n#include \"include/object_detector.h\"\n+#include <gflags/gflags.h>\nDEFINE_string(model_dir, \"\", \"Path of inference model\");\n"
},
{
"change_type": "MODIFY",
"old_path": "static/deploy/cpp/src/main.cc",
"new_path": "static/deploy/cpp/src/main.cc",
"diff": "#endif\n#include \"include/object_detector.h\"\n+#include <gflags/gflags.h>\nDEFINE_string(model_dir, \"\", \"Path of inference model\");\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix cpp infer for lack of gflags (#2499)
|
499,395 |
08.04.2021 14:35:41
| -28,800 |
4d566f1c8a6b7226fce6f4557f57cbd0fbdeb5b9
|
yolo annotations
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/architectures/yolo.py",
"new_path": "ppdet/modeling/architectures/yolo.py",
"diff": "@@ -20,6 +20,16 @@ class YOLOv3(BaseArch):\nyolo_head='YOLOv3Head',\npost_process='BBoxPostProcess',\ndata_format='NCHW'):\n+ \"\"\"\n+ YOLOv3 network, see https://arxiv.org/abs/1804.02767\n+\n+ Args:\n+ backbone (nn.Layer): backbone instance\n+ neck (nn.Layer): neck instance\n+ yolo_head (nn.Layer): anchor_head instance\n+ bbox_post_process (object): `BBoxPostProcess` instance\n+ data_format (str): data format, NCHW or NHWC\n+ \"\"\"\nsuper(YOLOv3, self).__init__(data_format=data_format)\nself.backbone = backbone\nself.neck = neck\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/backbones/darknet.py",
"new_path": "ppdet/modeling/backbones/darknet.py",
"diff": "@@ -37,6 +37,22 @@ class ConvBNLayer(nn.Layer):\nact=\"leaky\",\nname=None,\ndata_format='NCHW'):\n+ \"\"\"\n+ conv + bn + activation layer\n+\n+ Args:\n+ ch_in (int): input channel\n+ ch_out (int): output channel\n+ filter_size (int): filter size, default 3\n+ stride (int): stride, default 1\n+ groups (int): number of groups of conv layer, default 1\n+ padding (int): padding size, default 0\n+ norm_type (str): batch norm type, default bn\n+ norm_decay (str): decay for weight and bias of batch norm layer, default 0.\n+ act (str): activation function type, default 'leaky', which means leaky_relu\n+ name (str): layer name\n+ data_format (str): data format, NCHW or NHWC\n+ \"\"\"\nsuper(ConvBNLayer, self).__init__()\nself.conv = nn.Conv2D(\n@@ -75,6 +91,20 @@ class DownSample(nn.Layer):\nnorm_decay=0.,\nname=None,\ndata_format='NCHW'):\n+ \"\"\"\n+ downsample layer\n+\n+ Args:\n+ ch_in (int): input channel\n+ ch_out (int): output channel\n+ filter_size (int): filter size, default 3\n+ stride (int): stride, default 2\n+ padding (int): padding size, default 1\n+ norm_type (str): batch norm type, default bn\n+ norm_decay (str): decay for weight and bias of batch norm layer, default 0.\n+ name (str): layer name\n+ data_format (str): data format, NCHW or NHWC\n+ \"\"\"\nsuper(DownSample, self).__init__()\n@@ -103,6 +133,18 @@ class BasicBlock(nn.Layer):\nnorm_decay=0.,\nname=None,\ndata_format='NCHW'):\n+ \"\"\"\n+ BasicBlock layer of DarkNet\n+\n+ Args:\n+ ch_in (int): input channel\n+ ch_out (int): output channel\n+ norm_type (str): batch norm type, default bn\n+ norm_decay (str): decay for weight and bias of batch norm layer, default 0.\n+ name (str): layer name\n+ data_format (str): data format, NCHW or NHWC\n+ \"\"\"\n+\nsuper(BasicBlock, self).__init__()\nself.conv1 = ConvBNLayer(\n@@ -142,6 +184,18 @@ class Blocks(nn.Layer):\nnorm_decay=0.,\nname=None,\ndata_format='NCHW'):\n+ \"\"\"\n+ Blocks layer, which consist of some BaickBlock layers\n+\n+ Args:\n+ ch_in (int): input channel\n+ ch_out (int): output channel\n+ count (int): number of BasicBlock layer\n+ norm_type (str): batch norm type, default bn\n+ norm_decay (str): decay for weight and bias of batch norm layer, default 0.\n+ name (str): layer name\n+ data_format (str): data format, NCHW or NHWC\n+ \"\"\"\nsuper(Blocks, self).__init__()\nself.basicblock0 = BasicBlock(\n@@ -189,6 +243,18 @@ class DarkNet(nn.Layer):\nnorm_type='bn',\nnorm_decay=0.,\ndata_format='NCHW'):\n+ \"\"\"\n+ Darknet, see https://pjreddie.com/darknet/yolo/\n+\n+ Args:\n+ depth (int): depth of network\n+ freeze_at (int): freeze the backbone at which stage\n+ filter_size (int): filter size, default 3\n+ return_idx (list): index of stages whose feature maps are returned\n+ norm_type (str): batch norm type, default bn\n+ norm_decay (str): decay for weight and bias of batch norm layer, default 0.\n+ data_format (str): data format, NCHW or NHWC\n+ \"\"\"\nsuper(DarkNet, self).__init__()\nself.depth = depth\nself.freeze_at = freeze_at\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/heads/yolo_head.py",
"new_path": "ppdet/modeling/heads/yolo_head.py",
"diff": "@@ -28,6 +28,18 @@ class YOLOv3Head(nn.Layer):\niou_aware=False,\niou_aware_factor=0.4,\ndata_format='NCHW'):\n+ \"\"\"\n+ Head for YOLOv3 network\n+\n+ Args:\n+ num_classes (int): number of foreground classes\n+ anchors (list): anchors\n+ anchor_masks (list): anchor masks\n+ loss (object): YOLOv3Loss instance\n+ iou_aware (bool): whether to use iou_aware\n+ iou_aware_factor (float): iou aware factor\n+ data_format (str): data format, NCHW or NHWC\n+ \"\"\"\nsuper(YOLOv3Head, self).__init__()\nself.num_classes = num_classes\nself.loss = loss\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/losses/yolo_loss.py",
"new_path": "ppdet/modeling/losses/yolo_loss.py",
"diff": "@@ -46,6 +46,18 @@ class YOLOv3Loss(nn.Layer):\nscale_x_y=1.,\niou_loss=None,\niou_aware_loss=None):\n+ \"\"\"\n+ YOLOv3Loss layer\n+\n+ Args:\n+ num_calsses (int): number of foreground classes\n+ ignore_thresh (float): threshold to ignore confidence loss\n+ label_smooth (bool): whether to use label smoothing\n+ downsample (list): downsample ratio for each detection block\n+ scale_x_y (float): scale_x_y factor\n+ iou_loss (object): IoULoss instance\n+ iou_aware_loss (object): IouAwareLoss instance\n+ \"\"\"\nsuper(YOLOv3Loss, self).__init__()\nself.num_classes = num_classes\nself.ignore_thresh = ignore_thresh\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/necks/yolo_fpn.py",
"new_path": "ppdet/modeling/necks/yolo_fpn.py",
"diff": "@@ -27,6 +27,16 @@ __all__ = ['YOLOv3FPN', 'PPYOLOFPN']\nclass YoloDetBlock(nn.Layer):\ndef __init__(self, ch_in, channel, norm_type, name, data_format='NCHW'):\n+ \"\"\"\n+ YOLODetBlock layer for yolov3, see https://arxiv.org/abs/1804.02767\n+\n+ Args:\n+ ch_in (int): input channel\n+ channel (int): base channel\n+ norm_type (str): batch norm type\n+ name (str): layer name\n+ data_format (str): data format, NCHW or NHWC\n+ \"\"\"\nsuper(YoloDetBlock, self).__init__()\nself.ch_in = ch_in\nself.channel = channel\n@@ -78,6 +88,17 @@ class SPP(nn.Layer):\nnorm_type,\nname,\ndata_format='NCHW'):\n+ \"\"\"\n+ SPP layer, which consist of four pooling layer follwed by conv layer\n+\n+ Args:\n+ ch_in (int): input channel of conv layer\n+ ch_out (int): output channel of conv layer\n+ k (int): kernel size of conv layer\n+ norm_type (str): batch norm type\n+ name (str): layer name\n+ data_format (str): data format, NCHW or NHWC\n+ \"\"\"\nsuper(SPP, self).__init__()\nself.pool = []\nfor size in pool_size:\n@@ -110,6 +131,15 @@ class SPP(nn.Layer):\nclass DropBlock(nn.Layer):\ndef __init__(self, block_size, keep_prob, name, data_format='NCHW'):\n+ \"\"\"\n+ DropBlock layer, see https://arxiv.org/abs/1810.12890\n+\n+ Args:\n+ block_size (int): block size\n+ keep_prob (int): keep probability\n+ name (str): layer name\n+ data_format (str): data format, NCHW or NHWC\n+ \"\"\"\nsuper(DropBlock, self).__init__()\nself.block_size = block_size\nself.keep_prob = keep_prob\n@@ -149,6 +179,19 @@ class CoordConv(nn.Layer):\nnorm_type,\nname,\ndata_format='NCHW'):\n+ \"\"\"\n+ CoordConv layer\n+\n+ Args:\n+ ch_in (int): input channel\n+ ch_out (int): output channel\n+ filter_size (int): filter size, default 3\n+ padding (int): padding size, default 0\n+ norm_type (str): batch norm type, default bn\n+ name (str): layer name\n+ data_format (str): data format, NCHW or NHWC\n+\n+ \"\"\"\nsuper(CoordConv, self).__init__()\nself.conv = ConvBNLayer(\nch_in + 2,\n@@ -193,6 +236,14 @@ class CoordConv(nn.Layer):\nclass PPYOLODetBlock(nn.Layer):\ndef __init__(self, cfg, name, data_format='NCHW'):\n+ \"\"\"\n+ PPYOLODetBlock layer\n+\n+ Args:\n+ cfg (list): layer configs for this block\n+ name (str): block name\n+ data_format (str): data format, NCHW or NHWC\n+ \"\"\"\nsuper(PPYOLODetBlock, self).__init__()\nself.conv_module = nn.Sequential()\nfor idx, (conv_name, layer, args, kwargs) in enumerate(cfg[:-1]):\n@@ -220,6 +271,15 @@ class YOLOv3FPN(nn.Layer):\nin_channels=[256, 512, 1024],\nnorm_type='bn',\ndata_format='NCHW'):\n+ \"\"\"\n+ YOLOv3FPN layer\n+\n+ Args:\n+ in_channels (list): input channels for fpn\n+ norm_type (str): batch norm type, default bn\n+ data_format (str): data format, NCHW or NHWC\n+\n+ \"\"\"\nsuper(YOLOv3FPN, self).__init__()\nassert len(in_channels) > 0, \"in_channels length should > 0\"\nself.in_channels = in_channels\n@@ -300,6 +360,16 @@ class PPYOLOFPN(nn.Layer):\nnorm_type='bn',\ndata_format='NCHW',\n**kwargs):\n+ \"\"\"\n+ PPYOLOFPN layer\n+\n+ Args:\n+ in_channels (list): input channels for fpn\n+ norm_type (str): batch norm type, default bn\n+ data_format (str): data format, NCHW or NHWC\n+ kwargs: extra key-value pairs, such as parameter of DropBlock and spp\n+\n+ \"\"\"\nsuper(PPYOLOFPN, self).__init__()\nassert len(in_channels) > 0, \"in_channels length should > 0\"\nself.in_channels = in_channels\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
yolo annotations (#2542)
|
499,395 |
10.04.2021 14:59:54
| -28,800 |
7b8c9eab565e997013db1e02ef575d04a50093a0
|
fix COCODataset while eval without annotations
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/data/source/coco.py",
"new_path": "ppdet/data/source/coco.py",
"diff": "@@ -90,6 +90,13 @@ class COCODataSet(DetDataset):\nim_w, im_h, img_id))\ncontinue\n+ coco_rec = {\n+ 'im_file': im_path,\n+ 'im_id': np.array([img_id]),\n+ 'h': im_h,\n+ 'w': im_w,\n+ } if 'image' in self.data_fields else {}\n+\nif not self.load_image_only:\nins_anno_ids = coco.getAnnIds(imgIds=[img_id], iscrowd=False)\ninstances = coco.loadAnns(ins_anno_ids)\n@@ -161,13 +168,6 @@ class COCODataSet(DetDataset):\nif has_segmentation and not any(gt_poly):\ncontinue\n- coco_rec = {\n- 'im_file': im_path,\n- 'im_id': np.array([img_id]),\n- 'h': im_h,\n- 'w': im_w,\n- } if 'image' in self.data_fields else {}\n-\nif is_rbox_anno:\ngt_rec = {\n'is_crowd': is_crowd,\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix COCODataset while eval without annotations (#2571)
|
499,395 |
12.04.2021 11:09:50
| -28,800 |
e3d703d7feb1ce3bae19abe68925339149fda93c
|
fix gridmask op and modify some comments
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/data/transform/gridmask_utils.py",
"new_path": "ppdet/data/transform/gridmask_utils.py",
"diff": "@@ -45,7 +45,7 @@ class GridMask(object):\nself.prob = self.st_prob * min(1, 1.0 * curr_iter / self.upper_iter)\nif np.random.rand() > self.prob:\nreturn x\n- _, h, w = x.shape\n+ h, w, _ = x.shape\nhh = int(1.5 * h)\nww = int(1.5 * w)\nd = np.random.randint(2, h)\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/data/transform/operators.py",
"new_path": "ppdet/data/transform/operators.py",
"diff": "@@ -165,7 +165,7 @@ class Permute(BaseOperator):\n@register_op\nclass Lighting(BaseOperator):\n\"\"\"\n- Lighting the imagen by eigenvalues and eigenvectors\n+ Lighting the image by eigenvalues and eigenvectors\nArgs:\neigval (list): eigenvalues\neigvec (list): eigenvectors\n@@ -1799,12 +1799,13 @@ class Pad(BaseOperator):\noffsets=None,\nfill_value=(127.5, 127.5, 127.5)):\n\"\"\"\n- Pad image to a specified size or multiple of size_divisor. random target_size and interpolation method\n+ Pad image to a specified size or multiple of size_divisor.\nArgs:\nsize (int, Sequence): image target size, if None, pad to multiple of size_divisor, default None\nsize_divisor (int): size divisor, default 32\npad_mode (int): pad mode, currently only supports four modes [-1, 0, 1, 2]. if -1, use specified offsets\nif 0, only pad to right and bottom. if 1, pad according to center. if 2, only pad left and top\n+ offsets (list): [offset_x, offset_y], specify offset while padding, only supported pad_mode=-1\nfill_value (bool): rgb value of pad area, default (127.5, 127.5, 127.5)\n\"\"\"\nsuper(Pad, self).__init__()\n"
},
{
"change_type": "MODIFY",
"old_path": "static/ppdet/data/transform/gridmask_utils.py",
"new_path": "static/ppdet/data/transform/gridmask_utils.py",
"diff": "@@ -45,7 +45,7 @@ class GridMask(object):\nself.prob = self.st_prob * min(1, 1.0 * curr_iter / self.upper_iter)\nif np.random.rand() > self.prob:\nreturn x\n- _, h, w = x.shape\n+ h, w, _ = x.shape\nhh = int(1.5 * h)\nww = int(1.5 * w)\nd = np.random.randint(2, h)\n"
},
{
"change_type": "MODIFY",
"old_path": "static/ppdet/data/transform/operators.py",
"new_path": "static/ppdet/data/transform/operators.py",
"diff": "@@ -626,7 +626,7 @@ class GridMaskOp(BaseOperator):\nsample['curr_iter'])\nif not batch_input:\nsamples = samples[0]\n- return sample\n+ return samples\n@register_op\n@@ -2100,7 +2100,7 @@ class BboxXYXY2XYWH(BaseOperator):\n@register_op\nclass Lighting(BaseOperator):\n\"\"\"\n- Lighting the imagen by eigenvalues and eigenvectors\n+ Lighting the image by eigenvalues and eigenvectors\nArgs:\neigval (list): eigenvalues\neigvec (list): eigenvectors\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix gridmask op and modify some comments (#2558)
|
499,339 |
13.04.2021 19:17:49
| -28,800 |
62d8263650adf499188e8236bc32894d25ad8c91
|
add save only in eval/train test=develop
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/engine/trainer.py",
"new_path": "ppdet/engine/trainer.py",
"diff": "@@ -125,6 +125,8 @@ class Trainer(object):\nbias = self.cfg['bias'] if 'bias' in self.cfg else 0\noutput_eval = self.cfg['output_eval'] \\\nif 'output_eval' in self.cfg else None\n+ save_prediction_only = self.cfg['save_prediction_only'] \\\n+ if 'save_prediction_only' in self.cfg else False\n# pass clsid2catid info to metric instance to avoid multiple loading\n# annotation file\n@@ -145,7 +147,8 @@ class Trainer(object):\nclsid2catid=clsid2catid,\nclasswise=classwise,\noutput_eval=output_eval,\n- bias=bias)\n+ bias=bias,\n+ save_prediction_only=save_prediction_only)\n]\nelif self.cfg.metric == 'VOC':\nself._metrics = [\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/metrics/metrics.py",
"new_path": "ppdet/metrics/metrics.py",
"diff": "@@ -69,6 +69,7 @@ class COCOMetric(Metric):\nself.output_eval = kwargs.get('output_eval', None)\n# TODO: bias should be unified\nself.bias = kwargs.get('bias', 0)\n+ self.save_prediction_only = kwargs.get('save_prediction_only', False)\nself.reset()\ndef reset(self):\n@@ -104,6 +105,10 @@ class COCOMetric(Metric):\njson.dump(self.results['bbox'], f)\nlogger.info('The bbox result is saved to bbox.json.')\n+ if self.save_prediction_only:\n+ logger.info('The bbox result is saved to {} and do not '\n+ 'evaluate the mAP.'.format(output))\n+ else:\nbbox_stats = cocoapi_eval(\noutput,\n'bbox',\n@@ -120,6 +125,10 @@ class COCOMetric(Metric):\njson.dump(self.results['mask'], f)\nlogger.info('The mask result is saved to mask.json.')\n+ if self.save_prediction_only:\n+ logger.info('The mask result is saved to {} and do not '\n+ 'evaluate the mAP.'.format(output))\n+ else:\nseg_stats = cocoapi_eval(\noutput,\n'segm',\n@@ -136,6 +145,10 @@ class COCOMetric(Metric):\njson.dump(self.results['segm'], f)\nlogger.info('The segm result is saved to segm.json.')\n+ if self.save_prediction_only:\n+ logger.info('The segm result is saved to {} and do not '\n+ 'evaluate the mAP.'.format(output))\n+ else:\nseg_stats = cocoapi_eval(\noutput,\n'segm',\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/eval.py",
"new_path": "tools/eval.py",
"diff": "@@ -66,6 +66,12 @@ def parse_args():\naction=\"store_true\",\nhelp=\"whether per-category AP and draw P-R Curve or not.\")\n+ parser.add_argument(\n+ '--save_prediction_only',\n+ action='store_true',\n+ default=False,\n+ help='Whether to save the evaluation results only')\n+\nargs = parser.parse_args()\nreturn args\n@@ -102,6 +108,7 @@ def main():\ncfg['bias'] = 1 if FLAGS.bias else 0\ncfg['classwise'] = True if FLAGS.classwise else False\ncfg['output_eval'] = FLAGS.output_eval\n+ cfg['save_prediction_only'] = FLAGS.save_prediction_only\nmerge_config(FLAGS.opt)\nplace = paddle.set_device('gpu' if cfg.use_gpu else 'cpu')\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/train.py",
"new_path": "tools/train.py",
"diff": "@@ -75,6 +75,11 @@ def parse_args():\ntype=str,\ndefault=\"vdl_log_dir/scalar\",\nhelp='VisualDL logging directory for scalar.')\n+ parser.add_argument(\n+ '--save_prediction_only',\n+ action='store_true',\n+ default=False,\n+ help='Whether to save the evaluation results only')\nargs = parser.parse_args()\nreturn args\n@@ -110,6 +115,7 @@ def main():\ncfg['fleet'] = FLAGS.fleet\ncfg['use_vdl'] = FLAGS.use_vdl\ncfg['vdl_log_dir'] = FLAGS.vdl_log_dir\n+ cfg['save_prediction_only'] = FLAGS.save_prediction_only\nmerge_config(FLAGS.opt)\nplace = paddle.set_device('gpu' if cfg.use_gpu else 'cpu')\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add save only in eval/train test=develop (#2604)
|
499,304 |
13.04.2021 20:01:10
| -28,800 |
0ed43ce60423632fb5218da476bebad5c740c17c
|
fix quant export_model
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/slim/__init__.py",
"new_path": "ppdet/slim/__init__.py",
"diff": "@@ -54,6 +54,7 @@ def build_slim_model(cfg, slim_cfg, mode='train'):\nload_pretrain_weight(model, cfg.pretrain_weights)\nslim = create(cfg.slim)\ncfg['model'] = slim(model)\n+ cfg['slim'] = slim\nif mode != 'train':\nload_pretrain_weight(cfg['model'], cfg.weights)\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix quant export_model (#2606)
|
499,304 |
14.04.2021 11:27:58
| -28,800 |
2d949d191966a55f5a36f0407d3110871e642add
|
fix bbox_loss in cascade rcnn
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/heads/cascade_head.py",
"new_path": "ppdet/modeling/heads/cascade_head.py",
"diff": "@@ -137,7 +137,7 @@ class CascadeXConvNormHead(nn.Layer):\n@register\nclass CascadeHead(BBoxHead):\n__shared__ = ['num_classes', 'num_cascade_stages']\n- __inject__ = ['bbox_assigner']\n+ __inject__ = ['bbox_assigner', 'bbox_loss']\n\"\"\"\nCascade RCNN bbox head\n@@ -161,7 +161,8 @@ class CascadeHead(BBoxHead):\nnum_classes=80,\nbbox_weight=[[10., 10., 5., 5.], [20.0, 20.0, 10.0, 10.0],\n[30.0, 30.0, 15.0, 15.0]],\n- num_cascade_stages=3):\n+ num_cascade_stages=3,\n+ bbox_loss=None):\nnn.Layer.__init__(self, )\nself.head = head\nself.roi_extractor = roi_extractor\n@@ -172,6 +173,7 @@ class CascadeHead(BBoxHead):\nself.num_classes = num_classes\nself.bbox_weight = bbox_weight\nself.num_cascade_stages = num_cascade_stages\n+ self.bbox_loss = bbox_loss\nself.bbox_score_list = []\nself.bbox_delta_list = []\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix bbox_loss in cascade rcnn (#2614)
|
499,313 |
14.04.2021 20:40:41
| -28,800 |
f24c1b05e6070bbf571e538aed217e32ba873072
|
fix re-download not trigger when weights update
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/utils/download.py",
"new_path": "ppdet/utils/download.py",
"diff": "@@ -23,6 +23,8 @@ import shutil\nimport requests\nimport tqdm\nimport hashlib\n+import base64\n+import binascii\nimport tarfile\nimport zipfile\n@@ -257,12 +259,13 @@ def get_path(url, root_dir, md5sum=None, check_exist=True):\nif fullpath.find(k) >= 0:\nfullpath = osp.join(osp.split(fullpath)[0], v)\n- exist_flag = False\nif osp.exists(fullpath) and check_exist:\n- exist_flag = True\n+ if _check_exist_file_md5(fullpath, md5sum, url):\nlogger.debug(\"Found {}\".format(fullpath))\n+ return fullpath, True\nelse:\n- exist_flag = False\n+ os.remove(fullpath)\n+\nfullname = _download(url, root_dir, md5sum)\n# new weights format which postfix is 'pdparams' not\n@@ -270,7 +273,7 @@ def get_path(url, root_dir, md5sum=None, check_exist=True):\nif osp.splitext(fullname)[-1] not in ['.pdparams', '.yml']:\n_decompress(fullname)\n- return fullpath, exist_flag\n+ return fullpath, False\ndef download_dataset(path, dataset=None):\n@@ -324,7 +327,8 @@ def _download(url, path, md5sum=None):\nfullname = osp.join(path, fname)\nretry_cnt = 0\n- while not (osp.exists(fullname) and _md5check(fullname, md5sum)):\n+ while not (osp.exists(fullname) and _check_exist_file_md5(fullname, md5sum,\n+ url)):\nif retry_cnt < DOWNLOAD_RETRY_LIMIT:\nretry_cnt += 1\nelse:\n@@ -355,10 +359,32 @@ def _download(url, path, md5sum=None):\nif chunk:\nf.write(chunk)\nshutil.move(tmp_fullname, fullname)\n-\nreturn fullname\n+def _check_exist_file_md5(filename, md5sum, url):\n+ # if md5sum is None, and file to check is weights file,\n+ # read md5um from url and check, else check md5sum directly\n+ return _md5check_from_url(filename, url) if md5sum is None \\\n+ and filename.endswith('pdparams') \\\n+ else _md5check(filename, md5sum)\n+\n+\n+def _md5check_from_url(filename, url):\n+ # For weights in bcebos URLs, MD5 value is contained\n+ # in request header as 'content_md5'\n+ req = requests.get(url, stream=True)\n+ content_md5 = req.headers.get('content-md5')\n+ req.close()\n+ if not content_md5 or _md5check(\n+ filename,\n+ binascii.hexlify(base64.b64decode(content_md5.strip('\"'))).decode(\n+ )):\n+ return True\n+ else:\n+ return False\n+\n+\ndef _md5check(fullname, md5sum=None):\nif md5sum is None:\nreturn True\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix re-download not trigger when weights update (#2617)
|
499,304 |
15.04.2021 10:29:17
| -28,800 |
133375ebda742812cc298cacfa0080b772f4b5b4
|
supplement trt_int8 function
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/cpp/src/main.cc",
"new_path": "deploy/cpp/src/main.cc",
"diff": "@@ -37,7 +37,7 @@ DEFINE_string(image_path, \"\", \"Path of input image\");\nDEFINE_string(video_path, \"\", \"Path of input video\");\nDEFINE_bool(use_gpu, false, \"Infering with GPU or CPU\");\nDEFINE_bool(use_camera, false, \"Use camera or not\");\n-DEFINE_string(run_mode, \"fluid\", \"Mode of running(fluid/trt_fp32/trt_fp16)\");\n+DEFINE_string(run_mode, \"fluid\", \"Mode of running(fluid/trt_fp32/trt_fp16/trt_int8)\");\nDEFINE_int32(gpu_id, 0, \"Device id of GPU to execute\");\nDEFINE_int32(camera_id, -1, \"Device id of camera to predict\");\nDEFINE_bool(run_benchmark, false, \"Whether to predict a image_file repeatedly for benchmark\");\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/python/infer.py",
"new_path": "deploy/python/infer.py",
"diff": "@@ -321,7 +321,7 @@ def load_predictor(model_dir,\nArgs:\nmodel_dir (str): root path of __model__ and __params__\nuse_gpu (bool): whether use gpu\n- run_mode (str): mode of running(fluid/trt_fp32/trt_fp16)\n+ run_mode (str): mode of running(fluid/trt_fp32/trt_fp16/trt_int8)\nuse_dynamic_shape (bool): use dynamic shape or not\ntrt_min_shape (int): min shape for dynamic shape in trt\ntrt_max_shape (int): max shape for dynamic shape in trt\n@@ -335,11 +335,6 @@ def load_predictor(model_dir,\nraise ValueError(\n\"Predict by TensorRT mode: {}, expect use_gpu==True, but use_gpu == {}\"\n.format(run_mode, use_gpu))\n- if run_mode == 'trt_int8' and not os.path.exists(\n- os.path.join(model_dir, '_opt_cache')):\n- raise ValueError(\n- \"TensorRT int8 must calibration first, and model_dir must has _opt_cache dir\"\n- )\nuse_calib_mode = True if run_mode == 'trt_int8' else False\nconfig = Config(\nos.path.join(model_dir, 'model.pdmodel'),\n@@ -512,7 +507,7 @@ if __name__ == '__main__':\n\"--run_mode\",\ntype=str,\ndefault='fluid',\n- help=\"mode of running(fluid/trt_fp32/trt_fp16)\")\n+ help=\"mode of running(fluid/trt_fp32/trt_fp16/trt_int8)\")\nparser.add_argument(\n\"--use_gpu\",\ntype=ast.literal_eval,\n"
},
{
"change_type": "DELETE",
"old_path": "deploy/python/trt_int8_calib.py",
"new_path": null,
"diff": "-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n-#\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\n-# you may not use this file except in compliance with the License.\n-# You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing, software\n-# distributed under the License is distributed on an \"AS IS\" BASIS,\n-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-# See the License for the specific language governing permissions and\n-# limitations under the License.\n-\n-import os\n-import argparse\n-import time\n-import yaml\n-import ast\n-from functools import reduce\n-\n-from PIL import Image\n-import cv2\n-import numpy as np\n-import glob\n-import paddle\n-from preprocess import preprocess, Resize, NormalizeImage, Permute, PadStride\n-from visualize import visualize_box_mask\n-from paddle.inference import Config\n-from paddle.inference import create_predictor\n-\n-# Global dictionary\n-SUPPORT_MODELS = {\n- 'YOLO',\n- 'RCNN',\n- 'SSD',\n- 'FCOS',\n- 'SOLOv2',\n- 'TTFNet',\n-}\n-\n-\n-class Detector(object):\n- \"\"\"\n- Args:\n- config (object): config of model, defined by `Config(model_dir)`\n- model_dir (str): root path of model.pdiparams, model.pdmodel and infer_cfg.yml\n- use_gpu (bool): whether use gpu\n- \"\"\"\n-\n- def __init__(self, pred_config, model_dir, use_gpu=False):\n- self.pred_config = pred_config\n- self.predictor = load_predictor(\n- model_dir,\n- min_subgraph_size=self.pred_config.min_subgraph_size,\n- use_gpu=use_gpu)\n-\n- def preprocess(self, im):\n- preprocess_ops = []\n- for op_info in self.pred_config.preprocess_infos:\n- new_op_info = op_info.copy()\n- op_type = new_op_info.pop('type')\n- preprocess_ops.append(eval(op_type)(**new_op_info))\n- im, im_info = preprocess(im, preprocess_ops,\n- self.pred_config.input_shape)\n- inputs = create_inputs(im, im_info)\n- return inputs\n-\n- def postprocess(self, np_boxes, np_masks, inputs, threshold=0.5):\n- # postprocess output of predictor\n- results = {}\n- if self.pred_config.arch in ['Face']:\n- h, w = inputs['im_shape']\n- scale_y, scale_x = inputs['scale_factor']\n- w, h = float(h) / scale_y, float(w) / scale_x\n- np_boxes[:, 2] *= h\n- np_boxes[:, 3] *= w\n- np_boxes[:, 4] *= h\n- np_boxes[:, 5] *= w\n- results['boxes'] = np_boxes\n- if np_masks is not None:\n- results['masks'] = np_masks\n- return results\n-\n- def predict(self,\n- image,\n- threshold=0.5,\n- warmup=0,\n- repeats=1,\n- run_benchmark=False):\n- '''\n- Args:\n- image (str/np.ndarray): path of image/ np.ndarray read by cv2\n- threshold (float): threshold of predicted box' score\n- Returns:\n- results (dict): include 'boxes': np.ndarray: shape:[N,6], N: number of box,\n- matix element:[class, score, x_min, y_min, x_max, y_max]\n- MaskRCNN's results include 'masks': np.ndarray:\n- shape: [N, im_h, im_w]\n- '''\n- inputs = self.preprocess(image)\n- np_boxes, np_masks = None, None\n- input_names = self.predictor.get_input_names()\n- for i in range(len(input_names)):\n- input_tensor = self.predictor.get_input_handle(input_names[i])\n- input_tensor.copy_from_cpu(inputs[input_names[i]])\n-\n- for i in range(warmup):\n- self.predictor.run()\n- output_names = self.predictor.get_output_names()\n- boxes_tensor = self.predictor.get_output_handle(output_names[0])\n- np_boxes = boxes_tensor.copy_to_cpu()\n- if self.pred_config.mask:\n- masks_tensor = self.predictor.get_output_handle(output_names[2])\n- np_masks = masks_tensor.copy_to_cpu()\n-\n- t1 = time.time()\n- for i in range(repeats):\n- self.predictor.run()\n- output_names = self.predictor.get_output_names()\n- boxes_tensor = self.predictor.get_output_handle(output_names[0])\n- np_boxes = boxes_tensor.copy_to_cpu()\n- if self.pred_config.mask:\n- masks_tensor = self.predictor.get_output_handle(output_names[2])\n- np_masks = masks_tensor.copy_to_cpu()\n- t2 = time.time()\n- ms = (t2 - t1) * 1000.0 / repeats\n- print(\"Inference: {} ms per batch image\".format(ms))\n-\n- # do not perform postprocess in benchmark mode\n- results = []\n- if not run_benchmark:\n- if reduce(lambda x, y: x * y, np_boxes.shape) < 6:\n- print('[WARNNING] No object detected.')\n- results = {'boxes': np.array([])}\n- else:\n- results = self.postprocess(\n- np_boxes, np_masks, inputs, threshold=threshold)\n-\n- return results\n-\n-\n-def create_inputs(im, im_info):\n- \"\"\"generate input for different model type\n- Args:\n- im (np.ndarray): image (np.ndarray)\n- im_info (dict): info of image\n- model_arch (str): model type\n- Returns:\n- inputs (dict): input of model\n- \"\"\"\n- inputs = {}\n- inputs['image'] = np.array((im, )).astype('float32')\n- inputs['im_shape'] = np.array((im_info['im_shape'], )).astype('float32')\n- inputs['scale_factor'] = np.array(\n- (im_info['scale_factor'], )).astype('float32')\n-\n- return inputs\n-\n-\n-class PredictConfig():\n- \"\"\"set config of preprocess, postprocess and visualize\n- Args:\n- model_dir (str): root path of model.yml\n- \"\"\"\n-\n- def __init__(self, model_dir):\n- # parsing Yaml config for Preprocess\n- deploy_file = os.path.join(model_dir, 'infer_cfg.yml')\n- with open(deploy_file) as f:\n- yml_conf = yaml.safe_load(f)\n- self.check_model(yml_conf)\n- self.arch = yml_conf['arch']\n- self.preprocess_infos = yml_conf['Preprocess']\n- self.min_subgraph_size = yml_conf['min_subgraph_size']\n- self.labels = yml_conf['label_list']\n- self.mask = False\n- if 'mask' in yml_conf:\n- self.mask = yml_conf['mask']\n- self.input_shape = yml_conf['image_shape']\n- self.print_config()\n-\n- def check_model(self, yml_conf):\n- \"\"\"\n- Raises:\n- ValueError: loaded model not in supported model type\n- \"\"\"\n- for support_model in SUPPORT_MODELS:\n- if support_model in yml_conf['arch']:\n- return True\n- raise ValueError(\"Unsupported arch: {}, expect {}\".format(yml_conf[\n- 'arch'], SUPPORT_MODELS))\n-\n- def print_config(self):\n- print('----------- Model Configuration -----------')\n- print('%s: %s' % ('Model Arch', self.arch))\n- print('%s: ' % ('Transform Order'))\n- for op_info in self.preprocess_infos:\n- print('--%s: %s' % ('transform op', op_info['type']))\n- print('--------------------------------------------')\n-\n-\n-def load_predictor(model_dir, batch_size=1, use_gpu=False, min_subgraph_size=3):\n- \"\"\"set AnalysisConfig, generate AnalysisPredictor\n- Args:\n- model_dir (str): root path of __model__ and __params__\n- use_gpu (bool): whether use gpu\n- Returns:\n- predictor (PaddlePredictor): AnalysisPredictor\n- Raises:\n- ValueError: predict by TensorRT need use_gpu == True.\n- \"\"\"\n- run_mode = 'trt_int8'\n- if not use_gpu and not run_mode == 'fluid':\n- raise ValueError(\n- \"Predict by TensorRT mode: {}, expect use_gpu==True, but use_gpu == {}\"\n- .format(run_mode, use_gpu))\n- config = Config(\n- os.path.join(model_dir, 'model.pdmodel'),\n- os.path.join(model_dir, 'model.pdiparams'))\n- precision_map = {\n- 'trt_int8': Config.Precision.Int8,\n- 'trt_fp32': Config.Precision.Float32,\n- 'trt_fp16': Config.Precision.Half\n- }\n- if use_gpu:\n- # initial GPU memory(M), device ID\n- config.enable_use_gpu(200, 0)\n- # optimize graph and fuse op\n- config.switch_ir_optim(True)\n- else:\n- config.disable_gpu()\n-\n- if run_mode in precision_map.keys():\n- config.enable_tensorrt_engine(\n- workspace_size=1 << 10,\n- max_batch_size=batch_size,\n- min_subgraph_size=min_subgraph_size,\n- precision_mode=precision_map[run_mode],\n- use_static=False,\n- use_calib_mode=True)\n-\n- # disable print log when predict\n- config.disable_glog_info()\n- # enable shared memory\n- config.enable_memory_optim()\n- # disable feed, fetch OP, needed by zero_copy_run\n- config.switch_use_feed_fetch_ops(False)\n- predictor = create_predictor(config)\n- return predictor\n-\n-\n-def print_arguments(args):\n- print('----------- Running Arguments -----------')\n- for arg, value in sorted(vars(args).items()):\n- print('%s: %s' % (arg, value))\n- print('------------------------------------------')\n-\n-\n-def predict_image_dir(detector):\n- for image_file in glob.glob(FLAGS.image_dir + '/*.jpg'):\n- print('image_file is', image_file)\n- results = detector.predict(image_file, threshold=0.5)\n-\n-\n-def main():\n- pred_config = PredictConfig(FLAGS.model_dir)\n- detector = Detector(pred_config, FLAGS.model_dir, use_gpu=FLAGS.use_gpu)\n- # predict from image\n- if FLAGS.image_dir != '':\n- predict_image_dir(detector)\n-\n-\n-if __name__ == '__main__':\n- paddle.enable_static()\n- parser = argparse.ArgumentParser(description=__doc__)\n- parser.add_argument(\n- \"--model_dir\",\n- type=str,\n- default=None,\n- help=(\"Directory include:'model.pdiparams', 'model.pdmodel', \"\n- \"'infer_cfg.yml', created by tools/export_model.py.\"),\n- required=True)\n- parser.add_argument(\n- \"--image_dir\", type=str, default='', help=\"Directory of image file.\")\n- parser.add_argument(\n- \"--use_gpu\",\n- type=ast.literal_eval,\n- default=False,\n- help=\"Whether to predict with GPU.\")\n- print('err?')\n- parser.add_argument(\n- \"--output_dir\",\n- type=str,\n- default=\"output\",\n- help=\"Directory of output visualization files.\")\n- FLAGS = parser.parse_args()\n- print_arguments(FLAGS)\n-\n- main()\n"
},
{
"change_type": "MODIFY",
"old_path": "static/deploy/cpp/src/main.cc",
"new_path": "static/deploy/cpp/src/main.cc",
"diff": "@@ -199,8 +199,8 @@ int main(int argc, char** argv) {\nreturn -1;\n}\nif (!(FLAGS_run_mode == \"fluid\" || FLAGS_run_mode == \"trt_fp32\"\n- || FLAGS_run_mode == \"trt_fp16\")) {\n- std::cout << \"run_mode should be 'fluid', 'trt_fp32' or 'trt_fp16'.\";\n+ || FLAGS_run_mode == \"trt_fp16\" || FLAGS_run_mode == \"trt_int8\")) {\n+ std::cout << \"run_mode should be 'fluid', 'trt_fp32', 'trt_fp16' or 'trt_int8'.\";\nreturn -1;\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "static/deploy/cpp/src/object_detector.cc",
"new_path": "static/deploy/cpp/src/object_detector.cc",
"diff": "@@ -32,17 +32,16 @@ void ObjectDetector::LoadModel(const std::string& model_dir,\nconfig.SetModel(prog_file, params_file);\nif (use_gpu) {\nconfig.EnableUseGpu(100, gpu_id);\n+ config.SwitchIrOptim(true);\nif (run_mode != \"fluid\") {\nauto precision = paddle::AnalysisConfig::Precision::kFloat32;\nif (run_mode == \"trt_fp16\") {\nprecision = paddle::AnalysisConfig::Precision::kHalf;\n} else if (run_mode == \"trt_int8\") {\n- printf(\"TensorRT int8 mode is not supported now, \"\n- \"please use 'trt_fp32' or 'trt_fp16' instead\");\n+ precision = paddle::AnalysisConfig::Precision::kInt8;\n+ use_calib_mode = true;\n} else {\n- if (run_mode != \"trt_fp32\") {\n- printf(\"run_mode should be 'fluid', 'trt_fp32' or 'trt_fp16'\");\n- }\n+ printf(\"run_mode should be 'fluid', 'trt_fp32', 'trt_fp16' or 'trt_int8'\");\n}\nconfig.EnableTensorRtEngine(\n1 << 10,\n@@ -50,7 +49,7 @@ void ObjectDetector::LoadModel(const std::string& model_dir,\nmin_subgraph_size,\nprecision,\nfalse,\n- false);\n+ use_calib_mode);\n}\n} else {\nconfig.DisableGpu();\n"
},
{
"change_type": "MODIFY",
"old_path": "static/deploy/python/infer.py",
"new_path": "static/deploy/python/infer.py",
"diff": "@@ -393,9 +393,7 @@ def load_predictor(model_dir,\nraise ValueError(\n\"Predict by TensorRT mode: {}, expect use_gpu==True, but use_gpu == {}\"\n.format(run_mode, use_gpu))\n- if run_mode == 'trt_int8':\n- raise ValueError(\"TensorRT int8 mode is not supported now, \"\n- \"please use trt_fp32 or trt_fp16 instead.\")\n+ use_calib_mode = True if run_mode == 'trt_int8' else False\nprecision_map = {\n'trt_int8': fluid.core.AnalysisConfig.Precision.Int8,\n'trt_fp32': fluid.core.AnalysisConfig.Precision.Float32,\n@@ -419,7 +417,7 @@ def load_predictor(model_dir,\nmin_subgraph_size=min_subgraph_size,\nprecision_mode=precision_map[run_mode],\nuse_static=False,\n- use_calib_mode=False)\n+ use_calib_mode=use_calib_mode)\n# disable print log when predict\nconfig.disable_glog_info()\n@@ -574,7 +572,7 @@ if __name__ == '__main__':\n\"--run_mode\",\ntype=str,\ndefault='fluid',\n- help=\"mode of running(fluid/trt_fp32/trt_fp16)\")\n+ help=\"mode of running(fluid/trt_fp32/trt_fp16/trt_int8)\")\nparser.add_argument(\n\"--use_gpu\",\ntype=ast.literal_eval,\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
supplement trt_int8 function (#2619)
|
499,333 |
15.04.2021 11:14:45
| -28,800 |
bd5d850981add643c276dd372ec8256d855fa73a
|
fix res5
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/backbones/resnet.py",
"new_path": "ppdet/modeling/backbones/resnet.py",
"diff": "@@ -590,8 +590,9 @@ class Res5Head(nn.Layer):\nif depth < 50:\nfeat_in = 256\nna = NameAdapter(self)\n+ block = BottleNeck if depth >= 50 else BasicBlock\nself.res5 = Blocks(\n- depth, feat_in, feat_out, count=3, name_adapter=na, stage_num=5)\n+ block, feat_in, feat_out, count=3, name_adapter=na, stage_num=5)\nself.feat_out = feat_out if depth < 50 else feat_out * 4\n@property\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix res5 (#2631)
|
499,304 |
15.04.2021 13:58:28
| -28,800 |
ac701833d3421d5f0ffc88b1dc6df6fc5053e61b
|
fix yolo config
|
[
{
"change_type": "MODIFY",
"old_path": "configs/ppyolo/ppyolo_r50vd_dcn_voc.yml",
"new_path": "configs/ppyolo/ppyolo_r50vd_dcn_voc.yml",
"diff": "@@ -11,13 +11,13 @@ weights: output/ppyolo_r50vd_dcn_voc/model_final\nTrainReader:\nbatch_transforms:\n- - BatchRandomResizeOp: {target_size: [320, 352, 384, 416, 448, 480, 512, 544, 576, 608], random_size: True, random_interp: True, keep_ratio: False}\n- - NormalizeBoxOp: {}\n- - PadBoxOp: {num_max_boxes: 50}\n- - BboxXYXY2XYWHOp: {}\n- - NormalizeImageOp: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n- - PermuteOp: {}\n- - Gt2YoloTargetOp: {anchor_masks: [[6, 7, 8], [3, 4, 5], [0, 1, 2]], anchors: [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], [59, 119], [116, 90], [156, 198], [373, 326]], downsample_ratios: [32, 16, 8], num_classes: 20}\n+ - BatchRandomResize: {target_size: [320, 352, 384, 416, 448, 480, 512, 544, 576, 608], random_size: True, random_interp: True, keep_ratio: False}\n+ - NormalizeBox: {}\n+ - PadBox: {num_max_boxes: 50}\n+ - BboxXYXY2XYWH: {}\n+ - NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n+ - Permute: {}\n+ - Gt2YoloTarget: {anchor_masks: [[6, 7, 8], [3, 4, 5], [0, 1, 2]], anchors: [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], [59, 119], [116, 90], [156, 198], [373, 326]], downsample_ratios: [32, 16, 8], num_classes: 20}\nmixup_epoch: 350\nbatch_size: 12\n"
},
{
"change_type": "MODIFY",
"old_path": "configs/slim/prune/yolov3_prune_fpgm.yml",
"new_path": "configs/slim/prune/yolov3_prune_fpgm.yml",
"diff": "# Weights of yolov3_mobilenet_v1_voc\npretrain_weights: https://paddledet.bj.bcebos.com/models/yolov3_mobilenet_v1_270e_voc.pdparams\n-weight_type: resume\nslim: Pruner\nPruner:\n"
},
{
"change_type": "MODIFY",
"old_path": "configs/slim/prune/yolov3_prune_l1_norm.yml",
"new_path": "configs/slim/prune/yolov3_prune_l1_norm.yml",
"diff": "# Weights of yolov3_mobilenet_v1_voc\npretrain_weights: https://paddledet.bj.bcebos.com/models/yolov3_mobilenet_v1_270e_voc.pdparams\n-weight_type: resume\nslim: Pruner\nPruner:\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix yolo config (#2635)
|
499,304 |
15.04.2021 20:28:09
| -28,800 |
6ad5973d0fbefbe2ce16c50974f49b5b37c87251
|
fix ignore warning
|
[
{
"change_type": "MODIFY",
"old_path": "tools/eval.py",
"new_path": "tools/eval.py",
"diff": "@@ -22,6 +22,10 @@ parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))\nif parent_path not in sys.path:\nsys.path.append(parent_path)\n+# ignore warning log\n+import warnings\n+warnings.filterwarnings('ignore')\n+\nimport paddle\nfrom ppdet.core.workspace import load_config, merge_config\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/export_model.py",
"new_path": "tools/export_model.py",
"diff": "@@ -21,6 +21,10 @@ parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))\nif parent_path not in sys.path:\nsys.path.append(parent_path)\n+# ignore warning log\n+import warnings\n+warnings.filterwarnings('ignore')\n+\nimport paddle\nfrom ppdet.core.workspace import load_config, merge_config\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/infer.py",
"new_path": "tools/infer.py",
"diff": "@@ -21,6 +21,9 @@ parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))\nif parent_path not in sys.path:\nsys.path.append(parent_path)\n+# ignore warning log\n+import warnings\n+warnings.filterwarnings('ignore')\nimport glob\nimport paddle\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/train.py",
"new_path": "tools/train.py",
"diff": "@@ -24,6 +24,9 @@ if parent_path not in sys.path:\nimport random\nimport numpy as np\n+# ignore warning log\n+import warnings\n+warnings.filterwarnings('ignore')\nimport paddle\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix ignore warning (#2647)
|
499,331 |
15.04.2021 22:45:43
| -28,800 |
19e1e65201bc9d84ab231454e767c820fcad7e97
|
fix bug of import and update doc
|
[
{
"change_type": "MODIFY",
"old_path": "configs/datasets/dota.yml",
"new_path": "configs/datasets/dota.yml",
"diff": "@@ -5,16 +5,16 @@ TrainDataset:\n!COCODataSet\nimage_dir: trainval_split/images\nanno_path: trainval_split/s2anet_trainval_paddle_coco.json\n- dataset_dir: /paddle/dataset/DOTA_1024_s2anet\n+ dataset_dir: dataset/DOTA_1024_s2anet\ndata_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd', 'gt_rbox']\nEvalDataset:\n!COCODataSet\nimage_dir: trainval_split/images\nanno_path: trainval_split/s2anet_trainval_paddle_coco.json\n- dataset_dir: /paddle/dataset/DOTA_1024_s2anet/\n+ dataset_dir: dataset/DOTA_1024_s2anet/\nTestDataset:\n!ImageFolder\nanno_path: trainval_split/s2anet_trainval_paddle_coco.json\n- dataset_dir: /paddle/dataset/DOTA_1024_s2anet/\n+ dataset_dir: dataset/DOTA_1024_s2anet/\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/data/transform/batch_operators.py",
"new_path": "ppdet/data/transform/batch_operators.py",
"diff": "@@ -27,6 +27,7 @@ from .operators import register_op, BaseOperator, Resize\nfrom .op_helper import jaccard_overlap, gaussian2D\nfrom scipy import ndimage\n+from ppdet.modeling import bbox_utils\nfrom ppdet.utils.logger import setup_logger\nlogger = setup_logger(__name__)\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/data/transform/operators.py",
"new_path": "ppdet/data/transform/operators.py",
"diff": "@@ -39,6 +39,7 @@ from PIL import Image, ImageEnhance, ImageDraw\nfrom ppdet.core.workspace import serializable\nfrom ppdet.modeling.layers import AnchorGrid\n+from ppdet.modeling import bbox_utils\nfrom .op_helper import (satisfy_sample_constraint, filter_and_process,\ngenerate_sample_bbox, clip_bbox, data_anchor_sampling,\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix bug of import and update doc (#2642)
|
499,304 |
15.04.2021 22:49:03
| -28,800 |
2bf965f0a2cc33cdb7b7d38c1daf274a92547c7f
|
fix use_calib_mode in cpp infer
|
[
{
"change_type": "MODIFY",
"old_path": "static/deploy/cpp/src/object_detector.cc",
"new_path": "static/deploy/cpp/src/object_detector.cc",
"diff": "@@ -33,6 +33,7 @@ void ObjectDetector::LoadModel(const std::string& model_dir,\nif (use_gpu) {\nconfig.EnableUseGpu(100, gpu_id);\nconfig.SwitchIrOptim(true);\n+ bool use_calib_mode = false;\nif (run_mode != \"fluid\") {\nauto precision = paddle::AnalysisConfig::Precision::kFloat32;\nif (run_mode == \"trt_fp16\") {\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix use_calib_mode in cpp infer (#2650)
|
499,333 |
16.04.2021 10:23:14
| -28,800 |
6c79e88ccec2f649fc19d39cd3f85b86e90851d0
|
fix ttfhead & doc link
|
[
{
"change_type": "MODIFY",
"old_path": "configs/fcos/README.md",
"new_path": "configs/fcos/README.md",
"diff": "@@ -19,7 +19,6 @@ FCOS (Fully Convolutional One-Stage Object Detection) is a fast anchor-free obje\n**Notes:**\n- FCOS is trained on COCO train2017 dataset and evaluated on val2017 results of `mAP(IoU=0.5:0.95)`.\n-- FCOS training performace is dependented on Paddle develop branch, performance reproduction shoule based on [Paddle daily version](https://www.paddlepaddle.org.cn/documentation/docs/zh/install/Tables.html#whl-dev) or Paddle 2.0.1(will be published on 2021.03), performace will loss slightly is training base on Paddle 2.0.0\n## Citations\n```\n"
},
{
"change_type": "MODIFY",
"old_path": "configs/pedestrian/README.md",
"new_path": "configs/pedestrian/README.md",
"diff": "@@ -5,7 +5,7 @@ We provide some models implemented by PaddlePaddle to detect objects in specific\n| Task | Algorithm | Box AP | Download | Configs |\n|:---------------------|:---------:|:------:| :-------------------------------------------------------------------------------------: |:------:|\n-| Pedestrian Detection | YOLOv3 | 51.8 | [model](https://paddledet.bj.bcebos.com/models/pedestrian_yolov3_darknet.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/master/dygraph/configs/pedestrian/pedestrian_yolov3_darknet.yml) |\n+| Pedestrian Detection | YOLOv3 | 51.8 | [model](https://paddledet.bj.bcebos.com/models/pedestrian_yolov3_darknet.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/pedestrian/pedestrian_yolov3_darknet.yml) |\n## Pedestrian Detection\n@@ -17,7 +17,7 @@ The network for detecting vehicles is YOLOv3, the backbone of which is Dacknet53\n### 2. Configuration for training\n-PaddleDetection provides users with a configuration file [yolov3_darknet53_270e_coco.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/master/dygraph/configs/yolov3/yolov3_darknet53_270e_coco.yml) to train YOLOv3 on the COCO dataset, compared with this file, we modify some parameters as followed to conduct the training for pedestrian detection:\n+PaddleDetection provides users with a configuration file [yolov3_darknet53_270e_coco.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/configs/yolov3/yolov3_darknet53_270e_coco.yml) to train YOLOv3 on the COCO dataset, compared with this file, we modify some parameters as followed to conduct the training for pedestrian detection:\n* num_classes: 1\n* dataset_dir: dataset/pedestrian\n@@ -45,6 +45,6 @@ python -u tools/infer.py -c configs/pedestrian/pedestrian_yolov3_darknet.yml \\\nSome inference results are visualized below:\n-\n+\n-\n+\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/data/source/dataset.py",
"new_path": "ppdet/data/source/dataset.py",
"diff": "@@ -77,6 +77,11 @@ class DetDataset(Dataset):\ncopy.deepcopy(self.roidbs[np.random.randint(n)])\nfor _ in range(3)\n]\n+ if isinstance(roidb, Sequence):\n+ for r in roidb:\n+ r['curr_iter'] = self._curr_iter\n+ else:\n+ roidb['curr_iter'] = self._curr_iter\nroidb['curr_iter'] = self._curr_iter\nself._curr_iter += 1\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/heads/ttf_head.py",
"new_path": "ppdet/modeling/heads/ttf_head.py",
"diff": "@@ -72,8 +72,7 @@ class HMHead(nn.Layer):\nin_channels=ch_in if i == 0 else ch_out,\nout_channels=ch_out,\nkernel_size=3,\n- weight_attr=ParamAttr(initializer=Normal(0, 0.01)),\n- name='hm.' + name))\n+ weight_attr=ParamAttr(initializer=Normal(0, 0.01))))\nelse:\nhead_conv.add_sublayer(\nname,\n@@ -151,8 +150,7 @@ class WHHead(nn.Layer):\nin_channels=ch_in if i == 0 else ch_out,\nout_channels=ch_out,\nkernel_size=3,\n- weight_attr=ParamAttr(initializer=Normal(0, 0.01)),\n- name='wh.' + name))\n+ weight_attr=ParamAttr(initializer=Normal(0, 0.01))))\nelse:\nhead_conv.add_sublayer(\nname,\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix ttfhead & doc link (#2654)
|
499,331 |
16.04.2021 10:53:08
| -28,800 |
5aafa1b674a0a037531639e485b6311bebfb88da
|
fix stride data type
|
[
{
"change_type": "MODIFY",
"old_path": "configs/faster_rcnn/_base_/faster_reader.yml",
"new_path": "configs/faster_rcnn/_base_/faster_reader.yml",
"diff": "@@ -7,7 +7,7 @@ TrainReader:\n- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n- Permute: {}\nbatch_transforms:\n- - PadBatch: {pad_to_stride: -1.}\n+ - PadBatch: {pad_to_stride: -1}\nbatch_size: 1\nshuffle: true\ndrop_last: true\n@@ -21,7 +21,7 @@ EvalReader:\n- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n- Permute: {}\nbatch_transforms:\n- - PadBatch: {pad_to_stride: -1.}\n+ - PadBatch: {pad_to_stride: -1}\nbatch_size: 1\nshuffle: false\ndrop_last: false\n@@ -35,7 +35,7 @@ TestReader:\n- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n- Permute: {}\nbatch_transforms:\n- - PadBatch: {pad_to_stride: -1.}\n+ - PadBatch: {pad_to_stride: -1}\nbatch_size: 1\nshuffle: false\ndrop_last: false\n"
},
{
"change_type": "MODIFY",
"old_path": "configs/mask_rcnn/_base_/mask_reader.yml",
"new_path": "configs/mask_rcnn/_base_/mask_reader.yml",
"diff": "@@ -7,7 +7,7 @@ TrainReader:\n- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n- Permute: {}\nbatch_transforms:\n- - PadBatch: {pad_to_stride: -1.}\n+ - PadBatch: {pad_to_stride: -1}\nbatch_size: 1\nshuffle: true\ndrop_last: true\n@@ -22,7 +22,7 @@ EvalReader:\n- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n- Permute: {}\nbatch_transforms:\n- - PadBatch: {pad_to_stride: -1.}\n+ - PadBatch: {pad_to_stride: -1}\nbatch_size: 1\nshuffle: false\ndrop_last: false\n@@ -36,7 +36,7 @@ TestReader:\n- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n- Permute: {}\nbatch_transforms:\n- - PadBatch: {pad_to_stride: -1.}\n+ - PadBatch: {pad_to_stride: -1}\nbatch_size: 1\nshuffle: false\ndrop_last: false\n"
},
{
"change_type": "MODIFY",
"old_path": "static/configs/faster_reader.yml",
"new_path": "static/configs/faster_reader.yml",
"diff": "@@ -26,7 +26,7 @@ TrainReader:\nchannel_first: true\nbatch_transforms:\n- !PadBatch\n- pad_to_stride: -1.\n+ pad_to_stride: -1\nuse_padded_im_info: false\nbatch_size: 1\nshuffle: true\n"
},
{
"change_type": "MODIFY",
"old_path": "static/configs/mask_reader.yml",
"new_path": "static/configs/mask_reader.yml",
"diff": "@@ -27,7 +27,7 @@ TrainReader:\nchannel_first: true\nbatch_transforms:\n- !PadBatch\n- pad_to_stride: -1.\n+ pad_to_stride: -1\nuse_padded_im_info: false\nbatch_size: 1\nshuffle: true\n"
},
{
"change_type": "MODIFY",
"old_path": "static/configs/mask_reader_cocome.yml",
"new_path": "static/configs/mask_reader_cocome.yml",
"diff": "@@ -27,7 +27,7 @@ TrainReader:\nchannel_first: true\nbatch_transforms:\n- !PadBatch\n- pad_to_stride: -1.\n+ pad_to_stride: -1\nuse_padded_im_info: false\nbatch_size: 1\nshuffle: true\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix stride data type (#2644)
|
499,298 |
16.04.2021 22:17:12
| -28,800 |
abd1aefb7c7270d0c30947dd6060cfcafea31eac
|
fix yolo config modelzoo, test=document_fix
|
[
{
"change_type": "MODIFY",
"old_path": "configs/ppyolo/_base_/ppyolo_mbv3_large.yml",
"new_path": "configs/ppyolo/_base_/ppyolo_mbv3_large.yml",
"diff": "@@ -18,7 +18,7 @@ MobileNetV3:\nfeature_maps: [13, 16]\nPPYOLOFPN:\n- feat_channels: [160, 368]\n+ in_channels: [160, 368]\ncoord_conv: true\nconv_block_num: 0\nspp: true\n"
},
{
"change_type": "MODIFY",
"old_path": "configs/ppyolo/_base_/ppyolo_mbv3_small.yml",
"new_path": "configs/ppyolo/_base_/ppyolo_mbv3_small.yml",
"diff": "@@ -18,7 +18,7 @@ MobileNetV3:\nfeature_maps: [9, 12]\nPPYOLOFPN:\n- feat_channels: [96, 304]\n+ in_channels: [96, 304]\ncoord_conv: true\nconv_block_num: 0\nspp: true\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix yolo config modelzoo, test=document_fix (#2663)
|
499,395 |
16.04.2021 22:27:55
| -28,800 |
56bb6f7660597449f1a169b3f4f3744db5d92cb2
|
modify ppyolo docs, test=document_fix
|
[
{
"change_type": "MODIFY",
"old_path": "configs/ppyolo/README.md",
"new_path": "configs/ppyolo/README.md",
"diff": "@@ -49,8 +49,8 @@ PP-YOLO improved performance and speed of YOLOv3 with following methods:\n| PP-YOLO | 4 | 32 | ResNet18vd | 512 | 29.2 | 29.5 | 357.1 | 657.9 | [model](https://paddledet.bj.bcebos.com/models/ppyolo_r18vd_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/ppyolo/ppyolo_r18vd_coco.yml) |\n| PP-YOLO | 4 | 32 | ResNet18vd | 416 | 28.6 | 28.9 | 409.8 | 719.4 | [model](https://paddledet.bj.bcebos.com/models/ppyolo_r18vd_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/ppyolo/ppyolo_r18vd_coco.yml) |\n| PP-YOLO | 4 | 32 | ResNet18vd | 320 | 26.2 | 26.4 | 480.7 | 763.4 | [model](https://paddledet.bj.bcebos.com/models/ppyolo_r18vd_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/ppyolo/ppyolo_r18vd_coco.yml) |\n-| PP-YOLOv2 | 8 | 12 | ResNet50vd | 640 | 49.1 | 49.5 | - | - | [model](https://paddledet.bj.bcebos.com/models/ppyolov2_r50vd_dcn_365e_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml) |\n-| PP-YOLOv2 | 8 | 12 | ResNet101vd | 640 | 49.7 | 50.1 | - | - | [model](https://paddledet.bj.bcebos.com/models/ppyolov2_r101vd_dcn_365e_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/ppyolo/ppyolov2_r101vd_dcn_365e_coco.yml) |\n+| PP-YOLOv2 | 8 | 12 | ResNet50vd | 640 | 49.1 | 49.5 | 68.9 | 106.5 | [model](https://paddledet.bj.bcebos.com/models/ppyolov2_r50vd_dcn_365e_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml) |\n+| PP-YOLOv2 | 8 | 12 | ResNet101vd | 640 | 49.7 | 50.3 | 49.5 | 87.0 | [model](https://paddledet.bj.bcebos.com/models/ppyolov2_r101vd_dcn_365e_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/ppyolo/ppyolov2_r101vd_dcn_365e_coco.yml) |\n**Notes:**\n@@ -140,10 +140,10 @@ Inference images in single GPU with following commands, use `--infer_img` to inf\n```bash\n# inference single image\n-CUDA_VISIBLE_DEVICES=0 python tools/infer.py configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams --infer_img=../demo/000000014439_640x640.jpg\n+CUDA_VISIBLE_DEVICES=0 python tools/infer.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams --infer_img=demo/000000014439_640x640.jpg\n# inference all images in the directory\n-CUDA_VISIBLE_DEVICES=0 python tools/infer.py configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams --infer_dir=../demo\n+CUDA_VISIBLE_DEVICES=0 python tools/infer.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams --infer_dir=demo\n```\n### 4. Inferece deployment\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
modify ppyolo docs, test=document_fix (#2666)
|
499,395 |
17.04.2021 00:31:08
| -28,800 |
762d0152a9c4ee05fc42028ec42a972a594ed704
|
modify link and fps of ppyolo, test=document_fix
|
[
{
"change_type": "MODIFY",
"old_path": "static/configs/ppyolo/README.md",
"new_path": "static/configs/ppyolo/README.md",
"diff": "@@ -53,8 +53,8 @@ PP-YOLO improved performance and speed of YOLOv3 with following methods:\n| PP-YOLO | 4 | 32 | ResNet18vd | 512 | 29.3 | 29.5 | 357.1 | 657.9 | [model](https://paddlemodels.bj.bcebos.com/object_detection/ppyolo_r18vd.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/static/configs/ppyolo/ppyolo_r18vd.yml) |\n| PP-YOLO | 4 | 32 | ResNet18vd | 416 | 28.6 | 28.9 | 409.8 | 719.4 | [model](https://paddlemodels.bj.bcebos.com/object_detection/ppyolo_r18vd.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/static/configs/ppyolo/ppyolo_r18vd.yml) |\n| PP-YOLO | 4 | 32 | ResNet18vd | 320 | 26.2 | 26.4 | 480.7 | 763.4 | [model](https://paddlemodels.bj.bcebos.com/object_detection/ppyolo_r18vd.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/static/configs/ppyolo/ppyolo_r18vd.yml) |\n-| PP-YOLOv2 | 8 | 12 | ResNet50vd | 640 | 49.1 | 49.5 | - | - | [model](https://paddlemodels.bj.bcebos.com/object_detection/ppyolov2_r50vd_dcn.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/static/configs/ppyolo/ppyolov2_r50vd_dcn.yml) |\n-| PP-YOLOv2 | 8 | 12 | ResNet101vd | 640 | 49.7 | 50.1 | - | - | [model](https://paddlemodels.bj.bcebos.com/object_detection/ppyolov2_r101vd_dcn.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/static/configs/ppyolo/ppyolov2_r101vd_dcn.yml) |\n+| PP-YOLOv2 | 8 | 12 | ResNet50vd | 640 | 49.1 | 49.5 | 68.9 | 106.5 | [model](https://paddlemodels.bj.bcebos.com/object_detection/ppyolov2_r50vd_dcn.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/static/configs/ppyolo/ppyolov2_r50vd_dcn.yml) |\n+| PP-YOLOv2 | 8 | 12 | ResNet101vd | 640 | 49.7 | 50.3 | 49.5 | 87.0 | [model](https://paddlemodels.bj.bcebos.com/object_detection/ppyolov2_r101vd_dcn.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/static/configs/ppyolo/ppyolov2_r101vd_dcn.yml) |\n**Notes:**\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
modify link and fps of ppyolo, test=document_fix (#2671)
|
499,333 |
17.04.2021 00:42:47
| -28,800 |
edd24ca12844364b8feb50e6b180722403f9501f
|
simplify log of loading weights
|
[
{
"change_type": "MODIFY",
"old_path": "README_en.md",
"new_path": "README_en.md",
"diff": "@@ -188,7 +188,7 @@ The relationship between COCO mAP and FPS on Tesla V100 of representative models\n- `PP-YOLO` achieves mAP of 45.9% on COCO and 72.9FPS on Tesla V100. Both precision and speed surpass [YOLOv4](https://arxiv.org/abs/2004.10934)\n-- `PP-YOLO v2` is optimized version of `PP-YOLO` which has mAP of 49.5% and 60FPS on Tesla V100\n+- `PP-YOLO v2` is optimized version of `PP-YOLO` which has mAP of 49.5% and 68.9FPS on Tesla V100\n- All these models can be get in [Model Zoo](#ModelZoo)\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/data/source/dataset.py",
"new_path": "ppdet/data/source/dataset.py",
"diff": "@@ -82,7 +82,6 @@ class DetDataset(Dataset):\nr['curr_iter'] = self._curr_iter\nelse:\nroidb['curr_iter'] = self._curr_iter\n- roidb['curr_iter'] = self._curr_iter\nself._curr_iter += 1\nreturn self.transform(roidb)\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/utils/checkpoint.py",
"new_path": "ppdet/utils/checkpoint.py",
"diff": "@@ -157,28 +157,21 @@ def load_pretrain_weight(model, pretrain_weight):\nweights_path = path + '.pdparams'\nparam_state_dict = paddle.load(weights_path)\n- lack_backbone_weights_cnt = 0\n- lack_modules = set()\n- for name, weight in model_dict.items():\n- if name in param_state_dict.keys():\n- if weight.shape != list(param_state_dict[name].shape):\n+ ignore_weights = set()\n+\n+ for name, weight in param_state_dict.items():\n+ if name in model_dict.keys():\n+ if list(weight.shape) != list(model_dict[name].shape):\nlogger.info(\n'{} not used, shape {} unmatched with {} in model.'.format(\n- name, list(param_state_dict[name].shape), weight.shape))\n- param_state_dict.pop(name, None)\n+ name, weight.shape, list(model_dict[name].shape)))\n+ ignore_weights.add(name)\nelse:\n- lack_modules.add(name.split('.')[0])\n- if name.find('backbone') >= 0:\n- logger.info('Lack backbone weights: {}'.format(name))\n- lack_backbone_weights_cnt += 1\n-\n- if lack_backbone_weights_cnt > 0:\n- logger.info('Lack {} weights in backbone.'.format(\n- lack_backbone_weights_cnt))\n-\n- if len(lack_modules) > 0:\n- logger.info('Lack weights of modules: {}'.format(', '.join(\n- list(lack_modules))))\n+ logger.info('Redundant weight {} and ignore it.'.format(name))\n+ ignore_weights.add(name)\n+\n+ for weight in ignore_weights:\n+ param_state_dict.pop(weight, None)\nmodel.set_dict(param_state_dict)\nlogger.info('Finish loading model weights: {}'.format(weights_path))\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
simplify log of loading weights (#2673)
|
499,298 |
17.04.2021 22:48:20
| -28,800 |
f0bf5be8d401334ff755725354c5990e2d132166
|
add bias test doc for yolo ppyolo pafnet(#2675)
|
[
{
"change_type": "MODIFY",
"old_path": "configs/ppyolo/README.md",
"new_path": "configs/ppyolo/README.md",
"diff": "@@ -132,7 +132,27 @@ CUDA_VISIBLE_DEVICES=0 python tools/eval.py -c configs/ppyolo/ppyolo_test.yml -o\nEvaluation results will be saved in `bbox.json`, compress it into a `zip` package and upload to [COCO dataset evaluation](https://competitions.codalab.org/competitions/20794#participate) to evaluate.\n-**NOTE:** `configs/ppyolo/ppyolo_test.yml` is only used for evaluation on COCO test-dev2017 dataset, could not be used for training or COCO val2017 dataset evaluating.\n+**NOTE 1:** `configs/ppyolo/ppyolo_test.yml` is only used for evaluation on COCO test-dev2017 dataset, could not be used for training or COCO val2017 dataset evaluating.\n+\n+**NOTE 2:** Due to the overall upgrade of the dynamic graph framework, the following weight models published by paddledetection need to be evaluated by adding the -- bias field, such as\n+\n+```bash\n+# use weights released in PaddleDetection model zoo\n+CUDA_VISIBLE_DEVICES=0 python tools/eval.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams --bias\n+```\n+These models are:\n+\n+1.ppyolo_r50vd_dcn_1x_coco\n+\n+2.ppyolo_r50vd_dcn_voc\n+\n+3.ppyolo_r18vd_coco\n+\n+4.ppyolo_mbv3_large_coco\n+\n+5.ppyolo_mbv3_small_coco\n+\n+6.ppyolo_tiny_650e_coco\n### 3. Inference\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add bias test doc for yolo ppyolo pafnet(#2675)
|
499,304 |
19.04.2021 11:12:34
| -28,800 |
56aa92d5e8e106374975d6ff5668f774e23aa1ef
|
fix wider_face init
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/data/source/__init__.py",
"new_path": "ppdet/data/source/__init__.py",
"diff": "# limitations under the License.\nfrom . import coco\n-# TODO add voc and widerface dataset\nfrom . import voc\n-#from . import widerface\n+from . import widerface\nfrom . import category\nfrom .coco import *\nfrom .voc import *\n-#from .widerface import *\n+from .widerface import *\nfrom .category import *\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix wider_face init (#2686)
|
499,395 |
19.04.2021 11:20:40
| -28,800 |
085791a225315cd0653c63fa7580ac520602a2a9
|
fix ppyolo docs, test=document_fix
|
[
{
"change_type": "MODIFY",
"old_path": "configs/ppyolo/README.md",
"new_path": "configs/ppyolo/README.md",
"diff": "@@ -175,7 +175,7 @@ For inference deployment or benchmard, model exported with `tools/export_model.p\npython tools/export_model.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams\n# inference with Paddle Inference library\n-CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyolo_r50vd_dcn_1x_coco --image_file=../demo/000000014439_640x640.jpg --use_gpu=True\n+CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyolo_r50vd_dcn_1x_coco --image_file=demo/000000014439_640x640.jpg --use_gpu=True\n```\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix ppyolo docs, test=document_fix (#2689)
|
499,395 |
19.04.2021 11:27:21
| -28,800 |
ecc33160dc7fa54ecc0f9aeb9fc9f50cfab61c74
|
modify gridmask op
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/data/transform/gridmask_utils.py",
"new_path": "ppdet/data/transform/gridmask_utils.py",
"diff": "@@ -45,7 +45,8 @@ class Gridmask(object):\nself.prob = self.st_prob * min(1, 1.0 * curr_iter / self.upper_iter)\nif np.random.rand() > self.prob:\nreturn x\n- h, w, _ = x.shape\n+ # image should be C, H, W format\n+ _, h, w = x.shape\nhh = int(1.5 * h)\nww = int(1.5 * w)\nd = np.random.randint(2, h)\n"
},
{
"change_type": "MODIFY",
"old_path": "static/ppdet/data/transform/gridmask_utils.py",
"new_path": "static/ppdet/data/transform/gridmask_utils.py",
"diff": "@@ -45,7 +45,8 @@ class GridMask(object):\nself.prob = self.st_prob * min(1, 1.0 * curr_iter / self.upper_iter)\nif np.random.rand() > self.prob:\nreturn x\n- h, w, _ = x.shape\n+ # image should be C, H, W format\n+ _, h, w = x.shape\nhh = int(1.5 * h)\nww = int(1.5 * w)\nd = np.random.randint(2, h)\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
modify gridmask op (#2692)
|
499,333 |
19.04.2021 13:21:39
| -28,800 |
67e0d761e1afa8d274d5b3a75fbe3ccccf49244a
|
fix gridmask
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/data/transform/gridmask_utils.py",
"new_path": "ppdet/data/transform/gridmask_utils.py",
"diff": "@@ -45,8 +45,7 @@ class Gridmask(object):\nself.prob = self.st_prob * min(1, 1.0 * curr_iter / self.upper_iter)\nif np.random.rand() > self.prob:\nreturn x\n- # image should be C, H, W format\n- _, h, w = x.shape\n+ h, w, _ = x.shape\nhh = int(1.5 * h)\nww = int(1.5 * w)\nd = np.random.randint(2, h)\n@@ -74,7 +73,7 @@ class Gridmask(object):\nif self.mode == 1:\nmask = 1 - mask\n- mask = np.expand_dims(mask, axis=0)\n+ mask = np.expand_dims(mask, axis=-1)\nif self.offset:\noffset = (2 * (np.random.rand(h, w) - 0.5)).astype(np.float32)\nx = (x * mask + offset * (1 - mask)).astype(x.dtype)\n"
},
{
"change_type": "MODIFY",
"old_path": "static/ppdet/data/transform/gridmask_utils.py",
"new_path": "static/ppdet/data/transform/gridmask_utils.py",
"diff": "@@ -46,7 +46,7 @@ class GridMask(object):\nif np.random.rand() > self.prob:\nreturn x\n# image should be C, H, W format\n- _, h, w = x.shape\n+ h, w, _ = x.shape\nhh = int(1.5 * h)\nww = int(1.5 * w)\nd = np.random.randint(2, h)\n@@ -74,7 +74,7 @@ class GridMask(object):\nif self.mode == 1:\nmask = 1 - mask\n- mask = np.expand_dims(mask, axis=0)\n+ mask = np.expand_dims(mask, axis=-1)\nif self.offset:\noffset = (2 * (np.random.rand(h, w) - 0.5)).astype(np.float32)\nx = (x * mask + offset * (1 - mask)).astype(x.dtype)\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix gridmask (#2694)
|
499,313 |
21.04.2021 10:12:37
| -28,800 |
765e80c038caafa78816f4955067df96c4fa8e92
|
fix voc difficult not found & map_res empty
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/data/source/voc.py",
"new_path": "ppdet/data/source/voc.py",
"diff": "@@ -117,7 +117,12 @@ class VOCDataSet(DetDataset):\ndifficult = []\nfor i, obj in enumerate(objs):\ncname = obj.find('name').text\n- _difficult = int(obj.find('difficult').text)\n+\n+ # user dataset may not contain difficult field\n+ _difficult = obj.find('difficult')\n+ _difficult = int(\n+ _difficult.text) if _difficult is not None else 0\n+\nx1 = float(obj.find('bndbox').find('xmin').text)\ny1 = float(obj.find('bndbox').find('ymin').text)\nx2 = float(obj.find('bndbox').find('xmax').text)\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/engine/callbacks.py",
"new_path": "ppdet/engine/callbacks.py",
"diff": "@@ -179,6 +179,11 @@ class Checkpointer(Callback):\nfor metric in self.model._metrics:\nmap_res = metric.get_results()\nkey = 'bbox' if 'bbox' in map_res else 'mask'\n+ if key not in map_res:\n+ logger.warn(\"Evaluation results empty, this may be due to \" \\\n+ \"training iterations being too few or not \" \\\n+ \"loading the correct weights.\")\n+ return\nif map_res[key][0] > self.best_ap:\nself.best_ap = map_res[key][0]\nsave_name = 'best_model'\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix voc difficult not found & map_res empty (#2714)
|
499,395 |
25.04.2021 11:27:19
| -28,800 |
ee25d70189a98db4310312d5190216cb610f74b5
|
eval with ema weight while training
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/engine/callbacks.py",
"new_path": "ppdet/engine/callbacks.py",
"diff": "@@ -26,7 +26,6 @@ import paddle\nimport paddle.distributed as dist\nfrom ppdet.utils.checkpoint import save_model\n-from ppdet.optimizer import ModelEMA\nfrom ppdet.utils.logger import setup_logger\nlogger = setup_logger('ppdet.engine')\n@@ -143,20 +142,12 @@ class Checkpointer(Callback):\nsuper(Checkpointer, self).__init__(model)\ncfg = self.model.cfg\nself.best_ap = 0.\n- self.use_ema = ('use_ema' in cfg and cfg['use_ema'])\nself.save_dir = os.path.join(self.model.cfg.save_dir,\nself.model.cfg.filename)\nif hasattr(self.model.model, 'student_model'):\nself.weight = self.model.model.student_model\nelse:\nself.weight = self.model.model\n- if self.use_ema:\n- self.ema = ModelEMA(\n- cfg['ema_decay'], self.weight, use_thres_step=True)\n-\n- def on_step_end(self, status):\n- if self.use_ema:\n- self.ema.update(self.weight)\ndef on_epoch_end(self, status):\n# Checkpointer only performed during training\n@@ -170,9 +161,6 @@ class Checkpointer(Callback):\nif epoch_id % self.model.cfg.snapshot_epoch == 0 or epoch_id == end_epoch - 1:\nsave_name = str(\nepoch_id) if epoch_id != end_epoch - 1 else \"model_final\"\n- if self.use_ema:\n- weight = self.ema.apply()\n- else:\nweight = self.weight\nelif mode == 'eval':\nif 'save_best_model' in status and status['save_best_model']:\n@@ -187,9 +175,6 @@ class Checkpointer(Callback):\nif map_res[key][0] > self.best_ap:\nself.best_ap = map_res[key][0]\nsave_name = 'best_model'\n- if self.use_ema:\n- weight = self.ema.apply()\n- else:\nweight = self.weight\nlogger.info(\"Best test {} ap is {:0.3f}.\".format(\nkey, self.best_ap))\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/engine/trainer.py",
"new_path": "ppdet/engine/trainer.py",
"diff": "@@ -28,6 +28,7 @@ import paddle.distributed as dist\nfrom paddle.distributed import fleet\nfrom paddle import amp\nfrom paddle.static import InputSpec\n+from ppdet.optimizer import ModelEMA\nfrom ppdet.core.workspace import create\nfrom ppdet.utils.checkpoint import load_weight, load_pretrain_weight\n@@ -61,6 +62,11 @@ class Trainer(object):\nself.model = self.cfg.model\nself.is_loaded_weights = True\n+ self.use_ema = ('use_ema' in cfg and cfg['use_ema'])\n+ if self.use_ema:\n+ self.ema = ModelEMA(\n+ cfg['ema_decay'], self.model, use_thres_step=True)\n+\n# build data loader\nself.dataset = cfg['{}Dataset'.format(self.mode.capitalize())]\nif self.mode == 'train':\n@@ -281,8 +287,15 @@ class Trainer(object):\nself.status['batch_time'].update(time.time() - iter_tic)\nself._compose_callback.on_step_end(self.status)\n+ if self.use_ema:\n+ self.ema.update(self.model)\niter_tic = time.time()\n+ # apply ema weight on model\n+ if self.use_ema:\n+ weight = self.model.state_dict()\n+ self.model.set_dict(self.ema.apply())\n+\nself._compose_callback.on_epoch_end(self.status)\nif validate and (self._nranks < 2 or self._local_rank == 0) \\\n@@ -303,6 +316,10 @@ class Trainer(object):\nself.status['save_best_model'] = True\nself._eval_with_loader(self._eval_loader)\n+ # restore origin weight on model\n+ if self.use_ema:\n+ self.model.set_dict(weight)\n+\ndef _eval_with_loader(self, loader):\nsample_num = 0\ntic = time.time()\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
eval with ema weight while training (#2747)
|
499,348 |
26.04.2021 16:26:54
| -28,800 |
e76236514896a5a8a90ab90a9bb4d46b87d2725b
|
pose bottomup higherhrnet: model
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/architectures/__init__.py",
"new_path": "ppdet/modeling/architectures/__init__.py",
"diff": "@@ -15,6 +15,7 @@ from . import fcos\nfrom . import solov2\nfrom . import ttfnet\nfrom . import s2anet\n+from . import keypoint_hrhrnet\nfrom .meta_arch import *\nfrom .faster_rcnn import *\n@@ -26,3 +27,4 @@ from .fcos import *\nfrom .solov2 import *\nfrom .ttfnet import *\nfrom .s2anet import *\n+from .keypoint_hrhrnet import *\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "ppdet/modeling/architectures/keypoint_hrhrnet.py",
"diff": "+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from __future__ import absolute_import\n+from __future__ import division\n+from __future__ import print_function\n+\n+from scipy.optimize import linear_sum_assignment\n+from collections import abc, defaultdict\n+import numpy as np\n+import paddle\n+\n+from ppdet.core.workspace import register, create, serializable\n+from .meta_arch import BaseArch\n+from .. import layers as L\n+from ..keypoint_utils import transpred\n+\n+__all__ = ['HigherHrnet']\n+\n+\n+@register\n+class HigherHrnet(BaseArch):\n+ __category__ = 'architecture'\n+\n+ def __init__(self,\n+ backbone='Hrnet',\n+ hrhrnet_head='HigherHrnetHead',\n+ post_process='HrHrnetPostProcess',\n+ eval_flip=True,\n+ flip_perm=None):\n+ \"\"\"\n+ HigherHrnet network, see https://arxiv.org/abs/\n+\n+ Args:\n+ backbone (nn.Layer): backbone instance\n+ hrhrnet_head (nn.Layer): keypoint_head instance\n+ bbox_post_process (object): `BBoxPostProcess` instance\n+ \"\"\"\n+ super(HigherHrnet, self).__init__()\n+ self.backbone = backbone\n+ self.hrhrnet_head = hrhrnet_head\n+ self.post_process = HrHrnetPostProcess()\n+ self.flip = eval_flip\n+ self.flip_perm = paddle.to_tensor(flip_perm)\n+ self.deploy = False\n+\n+ @classmethod\n+ def from_config(cls, cfg, *args, **kwargs):\n+ # backbone\n+ backbone = create(cfg['backbone'])\n+ # head\n+ kwargs = {'input_shape': backbone.out_shape}\n+ hrhrnet_head = create(cfg['hrhrnet_head'], **kwargs)\n+ post_process = create(cfg['post_process'])\n+\n+ return {\n+ 'backbone': backbone,\n+ \"hrhrnet_head\": hrhrnet_head,\n+ \"post_process\": post_process,\n+ }\n+\n+ def _forward(self):\n+ batchsize = self.inputs['image'].shape[0]\n+ if self.flip and not self.training and not self.deploy:\n+ self.inputs['image'] = paddle.concat(\n+ (self.inputs['image'], paddle.flip(self.inputs['image'], [3])))\n+ body_feats = self.backbone(self.inputs)\n+\n+ if self.training:\n+ return self.hrhrnet_head(body_feats, self.inputs)\n+ else:\n+ outputs = self.hrhrnet_head(body_feats)\n+ if self.deploy:\n+ return outputs, [1]\n+ if self.flip:\n+ outputs = [paddle.split(o, 2) for o in outputs]\n+ output_rflip = [\n+ paddle.flip(paddle.gather(o[1], self.flip_perm, 1), [3])\n+ for o in outputs\n+ ]\n+ output1 = [o[0] for o in outputs]\n+ heatmap = (output1[0] + output_rflip[0]) / 2.\n+ tagmaps = [output1[1], output_rflip[1]]\n+ outputs = [heatmap] + tagmaps\n+\n+ res_lst = []\n+ bboxnums = []\n+ for idx in range(batchsize):\n+ item = [o[idx:(idx + 1)] for o in outputs]\n+\n+ h = self.inputs['im_shape'][idx, 0].numpy().item()\n+ w = self.inputs['im_shape'][idx, 1].numpy().item()\n+ kpts, scores = self.post_process(item, h, w)\n+ res_lst.append([kpts, scores])\n+ bboxnums.append(1)\n+\n+ return res_lst, bboxnums\n+\n+ def get_loss(self):\n+ return self._forward()\n+\n+ def get_pred(self):\n+ outputs = {}\n+ res_lst, bboxnums = self._forward()\n+ outputs['keypoint'] = res_lst\n+ outputs['bbox_num'] = bboxnums\n+ return outputs\n+\n+\n+@register\n+@serializable\n+class HrHrnetPostProcess(object):\n+ def __init__(self, max_num_people=30, heat_thresh=0.2, tag_thresh=1.):\n+ self.interpolate = L.Upsample(2, mode='bilinear')\n+ self.pool = L.MaxPool(5, 1, 2)\n+ self.max_num_people = max_num_people\n+ self.heat_thresh = heat_thresh\n+ self.tag_thresh = tag_thresh\n+\n+ def lerp(self, j, y, x, heatmap):\n+ H, W = heatmap.shape[-2:]\n+ left = np.clip(x - 1, 0, W - 1)\n+ right = np.clip(x + 1, 0, W - 1)\n+ up = np.clip(y - 1, 0, H - 1)\n+ down = np.clip(y + 1, 0, H - 1)\n+ offset_y = np.where(heatmap[j, down, x] > heatmap[j, up, x], 0.25,\n+ -0.25)\n+ offset_x = np.where(heatmap[j, y, right] > heatmap[j, y, left], 0.25,\n+ -0.25)\n+ return offset_y + 0.5, offset_x + 0.5\n+\n+ def __call__(self, inputs, original_height, original_width):\n+\n+ # resize to image size\n+ inputs = [self.interpolate(x) for x in inputs]\n+ # aggregate\n+ heatmap = inputs[0]\n+ if len(inputs) == 3:\n+ tagmap = paddle.concat(\n+ (inputs[1].unsqueeze(4), inputs[2].unsqueeze(4)), axis=4)\n+ else:\n+ tagmap = inputs[1].unsqueeze(4)\n+\n+ N, J, H, W = heatmap.shape\n+ assert N == 1, \"only support batch size 1\"\n+ # topk\n+ maximum = self.pool(heatmap)\n+ maxmap = heatmap * (heatmap == maximum)\n+ maxmap = maxmap.reshape([N, J, -1])\n+ heat_k, inds_k = maxmap.topk(self.max_num_people, axis=2)\n+ heatmap = heatmap[0].cpu().detach().numpy()\n+ tagmap = tagmap[0].cpu().detach().numpy()\n+ heats = heat_k[0].cpu().detach().numpy()\n+ inds_np = inds_k[0].cpu().detach().numpy()\n+ y = inds_np // W\n+ x = inds_np % W\n+ tags = tagmap[np.arange(J)[None, :].repeat(self.max_num_people),\n+ y.flatten(), x.flatten()].reshape(J, -1, tagmap.shape[-1])\n+ coords = np.stack((y, x), axis=2)\n+ # threshold\n+ mask = heats > self.heat_thresh\n+ # cluster\n+ cluster = defaultdict(lambda: {\n+ 'coords': np.zeros((J, 2), dtype=np.float32),\n+ 'scores': np.zeros(J, dtype=np.float32),\n+ 'tags': []\n+ })\n+ for jid, m in enumerate(mask):\n+ num_valid = m.sum()\n+ if num_valid == 0:\n+ continue\n+ valid_inds = np.where(m)[0]\n+ valid_tags = tags[jid, m, :]\n+ if len(cluster) == 0: # initialize\n+ for i in valid_inds:\n+ tag = tags[jid, i]\n+ key = tag[0]\n+ cluster[key]['tags'].append(tag)\n+ cluster[key]['scores'][jid] = heats[jid, i]\n+ cluster[key]['coords'][jid] = coords[jid, i]\n+ continue\n+ candidates = list(cluster.keys())[:self.max_num_people]\n+ centroids = [\n+ np.mean(\n+ cluster[k]['tags'], axis=0) for k in candidates\n+ ]\n+ num_clusters = len(centroids)\n+ # shape is (num_valid, num_clusters, tag_dim)\n+ dist = valid_tags[:, None, :] - np.array(centroids)[None, ...]\n+ l2_dist = np.linalg.norm(dist, ord=2, axis=2)\n+ # modulate dist with heat value, see `use_detection_val`\n+ cost = np.round(l2_dist) * 100 - heats[jid, m, None]\n+ # pad the cost matrix, otherwise new pose are ignored\n+ if num_valid > num_clusters:\n+ cost = np.pad(cost, ((0, 0), (0, num_valid - num_clusters)),\n+ constant_values=((0, 0), (0, 1e-10)))\n+ rows, cols = linear_sum_assignment(cost)\n+ for y, x in zip(rows, cols):\n+ tag = tags[jid, y]\n+ if y < num_valid and x < num_clusters and \\\n+ l2_dist[y, x] < self.tag_thresh:\n+ key = candidates[x] # merge to cluster\n+ else:\n+ key = tag[0] # initialize new cluster\n+ cluster[key]['tags'].append(tag)\n+ cluster[key]['scores'][jid] = heats[jid, y]\n+ cluster[key]['coords'][jid] = coords[jid, y]\n+\n+ # shape is [k, J, 2] and [k, J]\n+ pose_tags = np.array([cluster[k]['tags'] for k in cluster])\n+ pose_coords = np.array([cluster[k]['coords'] for k in cluster])\n+ pose_scores = np.array([cluster[k]['scores'] for k in cluster])\n+ valid = pose_scores > 0\n+\n+ pose_kpts = np.zeros((pose_scores.shape[0], J, 3), dtype=np.float32)\n+ if valid.sum() == 0:\n+ return pose_kpts, pose_kpts\n+\n+ # refine coords\n+ valid_coords = pose_coords[valid].astype(np.int32)\n+ y = valid_coords[..., 0].flatten()\n+ x = valid_coords[..., 1].flatten()\n+ _, j = np.nonzero(valid)\n+ offsets = self.lerp(j, y, x, heatmap)\n+ pose_coords[valid, 0] += offsets[0]\n+ pose_coords[valid, 1] += offsets[1]\n+\n+ # mean score before salvage\n+ mean_score = pose_scores.mean(axis=1)\n+ pose_kpts[valid, 2] = pose_scores[valid]\n+\n+ # TODO can we remove the outermost loop altogether\n+ # salvage missing joints\n+\n+ if True:\n+ for pid, coords in enumerate(pose_coords):\n+ # vj = np.nonzero(valid[pid])[0]\n+ # vyx = coords[valid[pid]].astype(np.int32)\n+ # tag_mean = tagmap[vj, vyx[:, 0], vyx[:, 1]].mean(axis=0)\n+\n+ tag_mean = np.array(pose_tags[pid]).mean(\n+ axis=0) #TODO: replace tagmap sample by history record\n+\n+ norm = np.sum((tagmap - tag_mean)**2, axis=3)**0.5\n+ score = heatmap - np.round(norm) # (J, H, W)\n+ flat_score = score.reshape(J, -1)\n+ max_inds = np.argmax(flat_score, axis=1)\n+ max_scores = np.max(flat_score, axis=1)\n+ salvage_joints = (pose_scores[pid] == 0) & (max_scores > 0)\n+ if salvage_joints.sum() == 0:\n+ continue\n+ y = max_inds[salvage_joints] // W\n+ x = max_inds[salvage_joints] % W\n+ offsets = self.lerp(salvage_joints.nonzero()[0], y, x, heatmap)\n+ y = y.astype(np.float32) + offsets[0]\n+ x = x.astype(np.float32) + offsets[1]\n+ pose_coords[pid][salvage_joints, 0] = y\n+ pose_coords[pid][salvage_joints, 1] = x\n+ pose_kpts[pid][salvage_joints, 2] = max_scores[salvage_joints]\n+ pose_kpts[..., :2] = transpred(pose_coords[..., :2][..., ::-1],\n+ original_height, original_width,\n+ min(H, W))\n+ return pose_kpts, mean_score\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/backbones/hrnet.py",
"new_path": "ppdet/modeling/backbones/hrnet.py",
"diff": "@@ -688,6 +688,7 @@ class HRNet(nn.Layer):\nhas_se=self.has_se,\nnorm_decay=norm_decay,\nfreeze_norm=freeze_norm,\n+ multi_scale_output=len(return_idx) > 1,\nname=\"st4\")\ndef forward(self, inputs):\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/heads/__init__.py",
"new_path": "ppdet/modeling/heads/__init__.py",
"diff": "@@ -23,6 +23,7 @@ from . import ttf_head\nfrom . import cascade_head\nfrom . import face_head\nfrom . import s2anet_head\n+from . import keypoint_hrhrnet_head\nfrom .bbox_head import *\nfrom .mask_head import *\n@@ -35,3 +36,4 @@ from .ttf_head import *\nfrom .cascade_head import *\nfrom .face_head import *\nfrom .s2anet_head import *\n+from .keypoint_hrhrnet_head import *\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "ppdet/modeling/heads/keypoint_hrhrnet_head.py",
"diff": "+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import paddle\n+import paddle.nn as nn\n+import paddle.nn.functional as F\n+from ppdet.core.workspace import register\n+from .. import layers as L\n+from ..backbones.hrnet import BasicBlock\n+\n+\n+@register\n+class HrHrnetHead(nn.Layer):\n+ __inject__ = ['loss']\n+\n+ def __init__(self, num_joints, loss='HrHrnetLoss', swahr=False, width=32):\n+ \"\"\"\n+ Head for HigherHrnet network\n+\n+ Args:\n+ num_joints (int): number of keypoints\n+ hrloss (object): HrHrnetLoss instance\n+ swahr (bool): whether to use swahr\n+ width (int): hrnet channel width\n+ \"\"\"\n+ super(HrHrnetHead, self).__init__()\n+ self.loss = loss\n+\n+ self.num_joints = num_joints\n+ num_featout1 = num_joints * 2\n+ num_featout2 = num_joints\n+ self.swahr = swahr\n+ self.conv1 = L.Conv2d(width, num_featout1, 1, 1, 0, bias=True)\n+ self.conv2 = L.Conv2d(width, num_featout2, 1, 1, 0, bias=True)\n+ self.deconv = nn.Sequential(\n+ L.ConvTranspose2d(\n+ num_featout1 + width, width, 4, 2, 1, 0, bias=False),\n+ L.BatchNorm2d(width),\n+ L.ReLU())\n+ self.blocks = nn.Sequential(*(BasicBlock(\n+ num_channels=width,\n+ num_filters=width,\n+ has_se=False,\n+ freeze_norm=False,\n+ name='HrHrnetHead_{}'.format(i)) for i in range(4)))\n+\n+ self.interpolate = L.Upsample(2, mode='bilinear')\n+ self.concat = L.Concat(dim=1)\n+ if swahr:\n+ self.scalelayer0 = nn.Sequential(\n+ L.Conv2d(\n+ width, num_joints, 1, 1, 0, bias=True),\n+ L.BatchNorm2d(num_joints),\n+ L.ReLU(),\n+ L.Conv2d(\n+ num_joints,\n+ num_joints,\n+ 9,\n+ 1,\n+ 4,\n+ groups=num_joints,\n+ bias=True))\n+ self.scalelayer1 = nn.Sequential(\n+ L.Conv2d(\n+ width, num_joints, 1, 1, 0, bias=True),\n+ L.BatchNorm2d(num_joints),\n+ L.ReLU(),\n+ L.Conv2d(\n+ num_joints,\n+ num_joints,\n+ 9,\n+ 1,\n+ 4,\n+ groups=num_joints,\n+ bias=True))\n+\n+ def forward(self, feats, targets=None):\n+ x1 = feats[0]\n+ xo1 = self.conv1(x1)\n+ x2 = self.blocks(self.deconv(self.concat((x1, xo1))))\n+ xo2 = self.conv2(x2)\n+ num_joints = self.num_joints\n+ if self.training:\n+ if self.swahr:\n+ so1 = self.scalelayer0(x1)\n+ so2 = self.scalelayer1(x2)\n+ hrhrnet_outputs = ([xo1[:, :num_joints], so1], [xo2, so2],\n+ xo1[:, num_joints:])\n+ return self.loss(hrhrnet_outputs, targets)\n+ else:\n+ hrhrnet_outputs = (xo1[:, :num_joints], xo2,\n+ xo1[:, num_joints:])\n+ return self.loss(hrhrnet_outputs, targets)\n+\n+ # averaged heatmap, upsampled tagmap\n+ upsampled = self.interpolate(xo1)\n+ avg = (upsampled[:, :num_joints] + xo2[:, :num_joints]) / 2\n+ return avg, upsampled[:, num_joints:]\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "ppdet/modeling/keypoint_utils.py",
"diff": "+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import cv2\n+import numpy as np\n+\n+\n+def get_affine_mat_kernel(h, w, s, inv=False):\n+ if w < h:\n+ w_ = s\n+ h_ = int(np.ceil((s / w * h) / 64.) * 64)\n+ scale_w = w\n+ scale_h = h_ / w_ * w\n+\n+ else:\n+ h_ = s\n+ w_ = int(np.ceil((s / h * w) / 64.) * 64)\n+ scale_h = h\n+ scale_w = w_ / h_ * h\n+\n+ center = np.array([np.round(w / 2.), np.round(h / 2.)])\n+\n+ size_resized = (w_, h_)\n+ trans = get_affine_transform(\n+ center, np.array([scale_w, scale_h]), 0, size_resized, inv=inv)\n+\n+ return trans, size_resized\n+\n+\n+def get_affine_transform(center,\n+ input_size,\n+ rot,\n+ output_size,\n+ shift=(0., 0.),\n+ inv=False):\n+ \"\"\"Get the affine transform matrix, given the center/scale/rot/output_size.\n+\n+ Args:\n+ center (np.ndarray[2, ]): Center of the bounding box (x, y).\n+ scale (np.ndarray[2, ]): Scale of the bounding box\n+ wrt [width, height].\n+ rot (float): Rotation angle (degree).\n+ output_size (np.ndarray[2, ]): Size of the destination heatmaps.\n+ shift (0-100%): Shift translation ratio wrt the width/height.\n+ Default (0., 0.).\n+ inv (bool): Option to inverse the affine transform direction.\n+ (inv=False: src->dst or inv=True: dst->src)\n+\n+ Returns:\n+ np.ndarray: The transform matrix.\n+ \"\"\"\n+ assert len(center) == 2\n+ assert len(input_size) == 2\n+ assert len(output_size) == 2\n+ assert len(shift) == 2\n+\n+ scale_tmp = input_size\n+\n+ shift = np.array(shift)\n+ src_w = scale_tmp[0]\n+ dst_w = output_size[0]\n+ dst_h = output_size[1]\n+\n+ rot_rad = np.pi * rot / 180\n+ src_dir = rotate_point([0., src_w * -0.5], rot_rad)\n+ dst_dir = np.array([0., dst_w * -0.5])\n+\n+ src = np.zeros((3, 2), dtype=np.float32)\n+ src[0, :] = center + scale_tmp * shift\n+ src[1, :] = center + src_dir + scale_tmp * shift\n+ src[2, :] = _get_3rd_point(src[0, :], src[1, :])\n+\n+ dst = np.zeros((3, 2), dtype=np.float32)\n+ dst[0, :] = [dst_w * 0.5, dst_h * 0.5]\n+ dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir\n+ dst[2, :] = _get_3rd_point(dst[0, :], dst[1, :])\n+\n+ if inv:\n+ trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))\n+ else:\n+ trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))\n+\n+ return trans\n+\n+\n+def _get_3rd_point(a, b):\n+ \"\"\"To calculate the affine matrix, three pairs of points are required. This\n+ function is used to get the 3rd point, given 2D points a & b.\n+\n+ The 3rd point is defined by rotating vector `a - b` by 90 degrees\n+ anticlockwise, using b as the rotation center.\n+\n+ Args:\n+ a (np.ndarray): point(x,y)\n+ b (np.ndarray): point(x,y)\n+\n+ Returns:\n+ np.ndarray: The 3rd point.\n+ \"\"\"\n+ assert len(a) == 2\n+ assert len(b) == 2\n+ direction = a - b\n+ third_pt = b + np.array([-direction[1], direction[0]], dtype=np.float32)\n+\n+ return third_pt\n+\n+\n+def rotate_point(pt, angle_rad):\n+ \"\"\"Rotate a point by an angle.\n+\n+ Args:\n+ pt (list[float]): 2 dimensional point to be rotated\n+ angle_rad (float): rotation angle by radian\n+\n+ Returns:\n+ list[float]: Rotated point.\n+ \"\"\"\n+ assert len(pt) == 2\n+ sn, cs = np.sin(angle_rad), np.cos(angle_rad)\n+ new_x = pt[0] * cs - pt[1] * sn\n+ new_y = pt[0] * sn + pt[1] * cs\n+ rotated_pt = [new_x, new_y]\n+\n+ return rotated_pt\n+\n+\n+def transpred(kpts, h, w, s):\n+ trans, _ = get_affine_mat_kernel(h, w, s, inv=True)\n+\n+ return warp_affine_joints(kpts[..., :2].copy(), trans)\n+\n+\n+def warp_affine_joints(joints, mat):\n+ \"\"\"Apply affine transformation defined by the transform matrix on the\n+ joints.\n+\n+ Args:\n+ joints (np.ndarray[..., 2]): Origin coordinate of joints.\n+ mat (np.ndarray[3, 2]): The affine matrix.\n+\n+ Returns:\n+ matrix (np.ndarray[..., 2]): Result coordinate of joints.\n+ \"\"\"\n+ joints = np.array(joints)\n+ shape = joints.shape\n+ joints = joints.reshape(-1, 2)\n+ return np.dot(np.concatenate(\n+ (joints, joints[:, 0:1] * 0 + 1), axis=1),\n+ mat.T).reshape(shape)\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/layers.py",
"new_path": "ppdet/modeling/layers.py",
"diff": "@@ -950,3 +950,101 @@ class MaskMatrixNMS(object):\ncate_scores = paddle.gather(cate_scores, index=sort_inds)\ncate_labels = paddle.gather(cate_labels, index=sort_inds)\nreturn seg_preds, cate_scores, cate_labels\n+\n+\n+def Conv2d(in_channels,\n+ out_channels,\n+ kernel_size,\n+ stride=1,\n+ padding=0,\n+ dilation=1,\n+ groups=1,\n+ bias=True,\n+ weight_init=Normal(std=0.001),\n+ bias_init=Constant(0.)):\n+ weight_attr = paddle.framework.ParamAttr(initializer=weight_init)\n+ if bias:\n+ bias_attr = paddle.framework.ParamAttr(initializer=bias_init)\n+ else:\n+ bias_attr = False\n+ conv = nn.Conv2D(\n+ in_channels,\n+ out_channels,\n+ kernel_size,\n+ stride,\n+ padding,\n+ dilation,\n+ groups,\n+ weight_attr=weight_attr,\n+ bias_attr=bias_attr)\n+ return conv\n+\n+\n+def ConvTranspose2d(in_channels,\n+ out_channels,\n+ kernel_size,\n+ stride=1,\n+ padding=0,\n+ output_padding=0,\n+ groups=1,\n+ bias=True,\n+ dilation=1,\n+ weight_init=Normal(std=0.001),\n+ bias_init=Constant(0.)):\n+ weight_attr = paddle.framework.ParamAttr(initializer=weight_init)\n+ if bias:\n+ bias_attr = paddle.framework.ParamAttr(initializer=bias_init)\n+ else:\n+ bias_attr = False\n+ conv = nn.Conv2DTranspose(\n+ in_channels,\n+ out_channels,\n+ kernel_size,\n+ stride,\n+ padding,\n+ output_padding,\n+ dilation,\n+ groups,\n+ weight_attr=weight_attr,\n+ bias_attr=bias_attr)\n+ return conv\n+\n+\n+def BatchNorm2d(num_features, eps=1e-05, momentum=0.9, affine=True):\n+ if not affine:\n+ weight_attr = False\n+ bias_attr = False\n+ else:\n+ weight_attr = None\n+ bias_attr = None\n+ batchnorm = nn.BatchNorm2D(\n+ num_features,\n+ momentum,\n+ eps,\n+ weight_attr=weight_attr,\n+ bias_attr=bias_attr)\n+ return batchnorm\n+\n+\n+def ReLU():\n+ return nn.ReLU()\n+\n+\n+def Upsample(scale_factor=None, mode='nearest', align_corners=False):\n+ return nn.Upsample(None, scale_factor, mode, align_corners)\n+\n+\n+def MaxPool(kernel_size, stride, padding, ceil_mode=False):\n+ return nn.MaxPool2D(kernel_size, stride, padding, ceil_mode=ceil_mode)\n+\n+\n+class Concat(nn.Layer):\n+ def __init__(self, dim=0):\n+ super(Concat, self).__init__()\n+ self.dim = dim\n+\n+ def forward(self, inputs):\n+ return paddle.concat(inputs, axis=self.dim)\n+\n+ def extra_repr(self):\n+ return 'dim={}'.format(self.dim)\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/losses/__init__.py",
"new_path": "ppdet/modeling/losses/__init__.py",
"diff": "@@ -19,6 +19,7 @@ from . import ssd_loss\nfrom . import fcos_loss\nfrom . import solov2_loss\nfrom . import ctfocal_loss\n+from . import keypoint_loss\nfrom .yolo_loss import *\nfrom .iou_aware_loss import *\n@@ -27,3 +28,4 @@ from .ssd_loss import *\nfrom .fcos_loss import *\nfrom .solov2_loss import *\nfrom .ctfocal_loss import *\n+from .keypoint_loss import *\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "ppdet/modeling/losses/keypoint_loss.py",
"diff": "+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from __future__ import absolute_import\n+from __future__ import division\n+from __future__ import print_function\n+\n+from itertools import cycle, islice\n+from collections import abc\n+import paddle\n+import paddle.nn as nn\n+import paddle.nn.functional as F\n+from ppdet.core.workspace import register, serializable\n+\n+__all__ = ['HrHrnetLoss']\n+\n+\n+@register\n+@serializable\n+class HrHrnetLoss(nn.Layer):\n+ def __init__(self, num_joints, swahr):\n+ \"\"\"\n+ HrHrnetLoss layer\n+\n+ Args:\n+ num_joints (int): number of keypoints\n+ \"\"\"\n+ super(HrHrnetLoss, self).__init__()\n+ if swahr:\n+ self.heatmaploss = HeatMapSWAHRLoss(num_joints)\n+ else:\n+ self.heatmaploss = HeatMapLoss()\n+ self.aeloss = AELoss()\n+ self.ziploss = ZipLoss(\n+ [self.heatmaploss, self.heatmaploss, self.aeloss])\n+\n+ def forward(self, inputs, records):\n+ targets = []\n+ targets.append([records['heatmap_gt1x'], records['mask_1x']])\n+ targets.append([records['heatmap_gt2x'], records['mask_2x']])\n+ targets.append(records['tagmap'])\n+ keypoint_losses = dict()\n+ loss = self.ziploss(inputs, targets)\n+ keypoint_losses['heatmap_loss'] = loss[0] + loss[1]\n+ keypoint_losses['pull_loss'] = loss[2][0]\n+ keypoint_losses['push_loss'] = loss[2][1]\n+ keypoint_losses['loss'] = recursive_sum(loss)\n+ return keypoint_losses\n+\n+\n+class HeatMapLoss(object):\n+ def __init__(self, loss_factor=1.0):\n+ super(HeatMapLoss, self).__init__()\n+ self.loss_factor = loss_factor\n+\n+ def __call__(self, preds, targets):\n+ heatmap, mask = targets\n+ loss = ((preds - heatmap)**2 * mask.cast('float').unsqueeze(1))\n+ loss = paddle.clip(loss, min=0, max=2).mean()\n+ loss *= self.loss_factor\n+ return loss\n+\n+\n+class HeatMapSWAHRLoss(object):\n+ def __init__(self, num_joints, loss_factor=1.0):\n+ super(HeatMapSWAHRLoss, self).__init__()\n+ self.loss_factor = loss_factor\n+ self.num_joints = num_joints\n+\n+ def __call__(self, preds, targets):\n+ heatmaps_gt, mask = targets\n+ heatmaps_pred = preds[0]\n+ scalemaps_pred = preds[1]\n+\n+ heatmaps_scaled_gt = paddle.where(heatmaps_gt > 0, 0.5 * heatmaps_gt * (\n+ 1 + (1 +\n+ (scalemaps_pred - 1.) * paddle.log(heatmaps_gt + 1e-10))**2),\n+ heatmaps_gt)\n+\n+ regularizer_loss = paddle.mean(\n+ paddle.pow((scalemaps_pred - 1.) * (heatmaps_gt > 0).astype(float),\n+ 2))\n+ omiga = 0.01\n+ # thres = 2**(-1/omiga), threshold for positive weight\n+ hm_weight = heatmaps_scaled_gt**(\n+ omiga\n+ ) * paddle.abs(1 - heatmaps_pred) + paddle.abs(heatmaps_pred) * (\n+ 1 - heatmaps_scaled_gt**(omiga))\n+\n+ loss = (((heatmaps_pred - heatmaps_scaled_gt)**2) *\n+ mask.cast('float').unsqueeze(1)) * hm_weight\n+ loss = loss.mean()\n+ loss = self.loss_factor * (loss + 1.0 * regularizer_loss)\n+ return loss\n+\n+\n+class AELoss(object):\n+ def __init__(self, pull_factor=0.001, push_factor=0.001):\n+ super(AELoss, self).__init__()\n+ self.pull_factor = pull_factor\n+ self.push_factor = push_factor\n+\n+ def apply_single(self, pred, tagmap):\n+ if tagmap.numpy()[:, :, 3].sum() == 0:\n+ return (paddle.zeros([1]), paddle.zeros([1]))\n+ nonzero = paddle.nonzero(tagmap[:, :, 3] > 0)\n+ if nonzero.shape[0] == 0:\n+ return (paddle.zeros([1]), paddle.zeros([1]))\n+ p_inds = paddle.unique(nonzero[:, 0])\n+ num_person = p_inds.shape[0]\n+ if num_person == 0:\n+ return (paddle.zeros([1]), paddle.zeros([1]))\n+\n+ pull = 0\n+ tagpull_num = 0\n+ embs_all = []\n+ person_unvalid = 0\n+ for person_idx in p_inds.numpy():\n+ valid_single = tagmap[person_idx.item()]\n+ validkpts = paddle.nonzero(valid_single[:, 3] > 0)\n+ valid_single = paddle.index_select(valid_single, validkpts)\n+ emb = paddle.gather_nd(pred, valid_single[:, :3])\n+ if emb.shape[0] == 1:\n+ person_unvalid += 1\n+ mean = paddle.mean(emb, axis=0)\n+ embs_all.append(mean)\n+ pull += paddle.mean(paddle.pow(emb - mean, 2), axis=0)\n+ tagpull_num += emb.shape[0]\n+ pull /= max(num_person - person_unvalid, 1)\n+ if num_person < 2:\n+ return pull, paddle.zeros([1])\n+\n+ embs_all = paddle.stack(embs_all)\n+ A = embs_all.expand([num_person, num_person])\n+ B = A.transpose([1, 0])\n+ diff = A - B\n+\n+ diff = paddle.pow(diff, 2)\n+ push = paddle.exp(-diff)\n+ push = paddle.sum(push) - num_person\n+\n+ push /= 2 * num_person * (num_person - 1)\n+ return pull, push\n+\n+ def __call__(self, preds, tagmaps):\n+ bs = preds.shape[0]\n+ losses = [self.apply_single(preds[i], tagmaps[i]) for i in range(bs)]\n+ pull = self.pull_factor * sum(loss[0] for loss in losses) / len(losses)\n+ push = self.push_factor * sum(loss[1] for loss in losses) / len(losses)\n+ return pull, push\n+\n+\n+class ZipLoss(object):\n+ def __init__(self, loss_funcs):\n+ super(ZipLoss, self).__init__()\n+ self.loss_funcs = loss_funcs\n+\n+ def __call__(self, inputs, targets):\n+ assert len(self.loss_funcs) == len(targets) >= len(inputs)\n+\n+ def zip_repeat(*args):\n+ longest = max(map(len, args))\n+ filled = [islice(cycle(x), longest) for x in args]\n+ return zip(*filled)\n+\n+ return tuple(\n+ fn(x, y)\n+ for x, y, fn in zip_repeat(inputs, targets, self.loss_funcs))\n+\n+\n+def recursive_sum(inputs):\n+ if isinstance(inputs, abc.Sequence):\n+ return sum([recursive_sum(x) for x in inputs])\n+ return inputs\n"
},
{
"change_type": "MODIFY",
"old_path": "requirements.txt",
"new_path": "requirements.txt",
"diff": "@@ -7,4 +7,5 @@ shapely\nscipy\nterminaltables\npycocotools\n+xtcocotools==1.6\nsetuptools>=42.0.0\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
pose bottomup higherhrnet: model (#2638)
|
499,395 |
26.04.2021 18:02:22
| -28,800 |
5298ee05b398e19980a52e89a5f12e6e63d3afe7
|
fix feat_channel and polish voc config
|
[
{
"change_type": "MODIFY",
"old_path": "configs/ppyolo/_base_/ppyolo_r18vd.yml",
"new_path": "configs/ppyolo/_base_/ppyolo_r18vd.yml",
"diff": "@@ -19,7 +19,6 @@ ResNet:\nnorm_decay: 0.\nPPYOLOFPN:\n- feat_channels: [512, 512]\ndrop_block: true\nblock_size: 3\nkeep_prob: 0.9\n"
},
{
"change_type": "MODIFY",
"old_path": "configs/ppyolo/ppyolo_r50vd_dcn_voc.yml",
"new_path": "configs/ppyolo/ppyolo_r50vd_dcn_voc.yml",
"diff": "@@ -10,14 +10,6 @@ snapshot_epoch: 83\nweights: output/ppyolo_r50vd_dcn_voc/model_final\nTrainReader:\n- batch_transforms:\n- - BatchRandomResize: {target_size: [320, 352, 384, 416, 448, 480, 512, 544, 576, 608], random_size: True, random_interp: True, keep_ratio: False}\n- - NormalizeBox: {}\n- - PadBox: {num_max_boxes: 50}\n- - BboxXYXY2XYWH: {}\n- - NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n- - Permute: {}\n- - Gt2YoloTarget: {anchor_masks: [[6, 7, 8], [3, 4, 5], [0, 1, 2]], anchors: [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], [59, 119], [116, 90], [156, 198], [373, 326]], downsample_ratios: [32, 16, 8], num_classes: 20}\nmixup_epoch: 350\nbatch_size: 12\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix feat_channel and polish voc config (#2765)
|
499,395 |
26.04.2021 18:04:16
| -28,800 |
63b9d6ee0d8f4b51c1adea4e68a383efb2a19b9f
|
add anchor_cluster and mmodify docs
|
[
{
"change_type": "MODIFY",
"old_path": "configs/ppyolo/README.md",
"new_path": "configs/ppyolo/README.md",
"diff": "@@ -108,6 +108,12 @@ Training PP-YOLO on 8 GPUs with following command(all commands should be run und\npython -m paddle.distributed.launch --log_dir=./ppyolo_dygraph/ --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml &>ppyolo_dygraph.log 2>&1 &\n```\n+optional: Run `tools/anchor_cluster.py` to get anchors suitable for your dataset, and modify the anchor setting in model configuration file and reader configuration file, such as `configs/ppyolo/_base_/ppyolo_tiny.yml` and `configs/ppyolo/_base_/ppyolo_tiny_reader.yml`.\n+\n+``` bash\n+python tools/anchor_cluster.py -c configs/ppyolo/ppyolo_tiny_650e_coco.yml -n 9 -s 320 -m v2 -i 1000\n+```\n+\n### 2. Evaluation\nEvaluating PP-YOLO on COCO val2017 dataset in single GPU with following commands:\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "tools/anchor_cluster.py",
"diff": "+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from __future__ import absolute_import\n+from __future__ import division\n+from __future__ import print_function\n+\n+import os\n+import sys\n+# add python path of PadleDetection to sys.path\n+parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))\n+if parent_path not in sys.path:\n+ sys.path.append(parent_path)\n+\n+from ppdet.utils.logger import setup_logger\n+logger = setup_logger('ppdet.anchor_cluster')\n+\n+from scipy.cluster.vq import kmeans\n+import random\n+import numpy as np\n+from tqdm import tqdm\n+\n+from ppdet.utils.cli import ArgsParser\n+from ppdet.utils.check import check_gpu, check_version, check_config\n+from ppdet.core.workspace import load_config, merge_config, create\n+\n+\n+class BaseAnchorCluster(object):\n+ def __init__(self, n, cache_path, cache, verbose=True):\n+ \"\"\"\n+ Base Anchor Cluster\n+\n+ Args:\n+ n (int): number of clusters\n+ cache_path (str): cache directory path\n+ cache (bool): whether using cache\n+ verbose (bool): whether print results\n+ \"\"\"\n+ super(BaseAnchorCluster, self).__init__()\n+ self.n = n\n+ self.cache_path = cache_path\n+ self.cache = cache\n+ self.verbose = verbose\n+\n+ def print_result(self, centers):\n+ raise NotImplementedError('%s.print_result is not available' %\n+ self.__class__.__name__)\n+\n+ def get_whs(self):\n+ whs_cache_path = os.path.join(self.cache_path, 'whs.npy')\n+ shapes_cache_path = os.path.join(self.cache_path, 'shapes.npy')\n+ if self.cache and os.path.exists(whs_cache_path) and os.path.exists(\n+ shapes_cache_path):\n+ self.whs = np.load(whs_cache_path)\n+ self.shapes = np.load(shapes_cache_path)\n+ return self.whs, self.shapes\n+ whs = np.zeros((0, 2))\n+ shapes = np.zeros((0, 2))\n+ self.dataset.parse_dataset()\n+ roidbs = self.dataset.roidbs\n+ for rec in tqdm(roidbs):\n+ h, w = rec['h'], rec['w']\n+ bbox = rec['gt_bbox']\n+ wh = bbox[:, 2:4] - bbox[:, 0:2] + 1\n+ wh = wh / np.array([[w, h]])\n+ shape = np.ones_like(wh) * np.array([[w, h]])\n+ whs = np.vstack((whs, wh))\n+ shapes = np.vstack((shapes, shape))\n+\n+ if self.cache:\n+ os.makedirs(self.cache_path, exist_ok=True)\n+ np.save(whs_cache_path, whs)\n+ np.save(shapes_cache_path, shapes)\n+\n+ self.whs = whs\n+ self.shapes = shapes\n+ return self.whs, self.shapes\n+\n+ def calc_anchors(self):\n+ raise NotImplementedError('%s.calc_anchors is not available' %\n+ self.__class__.__name__)\n+\n+ def __call__(self):\n+ self.get_whs()\n+ centers = self.calc_anchors()\n+ if self.verbose:\n+ self.print_result(centers)\n+ return centers\n+\n+\n+class YOLOv2AnchorCluster(BaseAnchorCluster):\n+ def __init__(self,\n+ n,\n+ dataset,\n+ size,\n+ cache_path,\n+ cache,\n+ iters=1000,\n+ verbose=True):\n+ super(YOLOv2AnchorCluster, self).__init__(\n+ n, cache_path, cache, verbose=verbose)\n+ \"\"\"\n+ YOLOv2 Anchor Cluster\n+\n+ Reference:\n+ https://github.com/AlexeyAB/darknet/blob/master/scripts/gen_anchors.py\n+\n+ Args:\n+ n (int): number of clusters\n+ dataset (DataSet): DataSet instance, VOC or COCO\n+ size (list): [w, h]\n+ cache_path (str): cache directory path\n+ cache (bool): whether using cache\n+ iters (int): kmeans algorithm iters\n+ verbose (bool): whether print results\n+ \"\"\"\n+ self.dataset = dataset\n+ self.size = size\n+ self.iters = iters\n+\n+ def print_result(self, centers):\n+ logger.info('%d anchor cluster result: [w, h]' % self.n)\n+ for w, h in centers:\n+ logger.info('[%d, %d]' % (round(w), round(h)))\n+\n+ def metric(self, whs, centers):\n+ wh1 = whs[:, None]\n+ wh2 = centers[None]\n+ inter = np.minimum(wh1, wh2).prod(2)\n+ return inter / (wh1.prod(2) + wh2.prod(2) - inter)\n+\n+ def kmeans_expectation(self, whs, centers, assignments):\n+ dist = self.metric(whs, centers)\n+ new_assignments = dist.argmax(1)\n+ converged = (new_assignments == assignments).all()\n+ return converged, new_assignments\n+\n+ def kmeans_maximizations(self, whs, centers, assignments):\n+ new_centers = np.zeros_like(centers)\n+ for i in range(centers.shape[0]):\n+ mask = (assignments == i)\n+ if mask.sum():\n+ new_centers[i, :] = whs[mask].mean(0)\n+ return new_centers\n+\n+ def calc_anchors(self):\n+ self.whs = self.whs * np.array([self.size])\n+ # random select k centers\n+ whs, n, iters = self.whs, self.n, self.iters\n+ logger.info('Running kmeans for %d anchors on %d points...' %\n+ (n, len(whs)))\n+ idx = np.random.choice(whs.shape[0], size=n, replace=False)\n+ centers = whs[idx]\n+ assignments = np.zeros(whs.shape[0:1]) * -1\n+ # kmeans\n+ if n == 1:\n+ return self.kmeans_maximizations(whs, centers, assignments)\n+\n+ pbar = tqdm(range(iters), desc='Cluster anchors with k-means algorithm')\n+ for _ in pbar:\n+ # E step\n+ converged, assignments = self.kmeans_expectation(whs, centers,\n+ assignments)\n+ if converged:\n+ logger.info('kmeans algorithm has converged')\n+ break\n+ # M step\n+ centers = self.kmeans_maximizations(whs, centers, assignments)\n+ ious = self.metric(whs, centers)\n+ pbar.desc = 'avg_iou: %.4f' % (ious.max(1).mean())\n+\n+ centers = sorted(centers, key=lambda x: x[0] * x[1])\n+ return centers\n+\n+\n+class YOLOv5AnchorCluster(BaseAnchorCluster):\n+ def __init__(self,\n+ n,\n+ dataset,\n+ size,\n+ cache_path,\n+ cache,\n+ iters=300,\n+ gen_iters=1000,\n+ thresh=0.25,\n+ verbose=True):\n+ super(YOLOv5AnchorCluster, self).__init__(\n+ n, cache_path, cache, verbose=verbose)\n+ \"\"\"\n+ YOLOv5 Anchor Cluster\n+\n+ Reference:\n+ https://github.com/ultralytics/yolov5/blob/master/utils/general.py\n+\n+ Args:\n+ n (int): number of clusters\n+ dataset (DataSet): DataSet instance, VOC or COCO\n+ size (list): [w, h]\n+ cache_path (str): cache directory path\n+ cache (bool): whether using cache\n+ iters (int): iters of kmeans algorithm\n+ gen_iters (int): iters of genetic algorithm\n+ threshold (float): anchor scale threshold\n+ verbose (bool): whether print results\n+ \"\"\"\n+ self.dataset = dataset\n+ self.size = size\n+ self.iters = iters\n+ self.gen_iters = gen_iters\n+ self.thresh = thresh\n+\n+ def print_result(self, centers):\n+ whs = self.whs\n+ centers = centers[np.argsort(centers.prod(1))]\n+ x, best = self.metric(whs, centers)\n+ bpr, aat = (\n+ best > self.thresh).mean(), (x > self.thresh).mean() * self.n\n+ logger.info(\n+ 'thresh=%.2f: %.4f best possible recall, %.2f anchors past thr' %\n+ (self.thresh, bpr, aat))\n+ logger.info(\n+ 'n=%g, img_size=%s, metric_all=%.3f/%.3f-mean/best, past_thresh=%.3f-mean: '\n+ % (self.n, self.size, x.mean(), best.mean(),\n+ x[x > self.thresh].mean()))\n+ logger.info('%d anchor cluster result: [w, h]' % self.n)\n+ for w, h in centers:\n+ logger.info('[%d, %d]' % (round(w), round(h)))\n+\n+ def metric(self, whs, centers):\n+ r = whs[:, None] / centers[None]\n+ x = np.minimum(r, 1. / r).min(2)\n+ return x, x.max(1)\n+\n+ def fitness(self, whs, centers):\n+ _, best = self.metric(whs, centers)\n+ return (best * (best > self.thresh)).mean()\n+\n+ def calc_anchors(self):\n+ self.whs = self.whs * self.shapes / self.shapes.max(\n+ 1, keepdims=True) * np.array([self.size])\n+ wh0 = self.whs\n+ i = (wh0 < 3.0).any(1).sum()\n+ if i:\n+ logger.warn('Extremely small objects found. %d of %d'\n+ 'labels are < 3 pixels in width or height' %\n+ (i, len(wh0)))\n+\n+ wh = wh0[(wh0 >= 2.0).any(1)]\n+ logger.info('Running kmeans for %g anchors on %g points...' %\n+ (self.n, len(wh)))\n+ s = wh.std(0)\n+ centers, dist = kmeans(wh / s, self.n, iter=self.iters)\n+ centers *= s\n+\n+ f, sh, mp, s = self.fitness(wh, centers), centers.shape, 0.9, 0.1\n+ pbar = tqdm(\n+ range(self.gen_iters),\n+ desc='Evolving anchors with Genetic Algorithm')\n+ for _ in pbar:\n+ v = np.ones(sh)\n+ while (v == 1).all():\n+ v = ((np.random.random(sh) < mp) * np.random.random() *\n+ np.random.randn(*sh) * s + 1).clip(0.3, 3.0)\n+ new_centers = (centers.copy() * v).clip(min=2.0)\n+ new_f = self.fitness(wh, new_centers)\n+ if new_f > f:\n+ f, centers = new_f, new_centers.copy()\n+ pbar.desc = 'Evolving anchors with Genetic Algorithm: fitness = %.4f' % f\n+\n+ return centers\n+\n+\n+def main():\n+ parser = ArgsParser()\n+ parser.add_argument(\n+ '--n', '-n', default=9, type=int, help='num of clusters')\n+ parser.add_argument(\n+ '--iters',\n+ '-i',\n+ default=1000,\n+ type=int,\n+ help='num of iterations for kmeans')\n+ parser.add_argument(\n+ '--gen_iters',\n+ '-gi',\n+ default=1000,\n+ type=int,\n+ help='num of iterations for genetic algorithm')\n+ parser.add_argument(\n+ '--thresh',\n+ '-t',\n+ default=0.25,\n+ type=float,\n+ help='anchor scale threshold')\n+ parser.add_argument(\n+ '--verbose', '-v', default=True, type=bool, help='whether print result')\n+ parser.add_argument(\n+ '--size',\n+ '-s',\n+ default=None,\n+ type=str,\n+ help='image size: w,h, using comma as delimiter')\n+ parser.add_argument(\n+ '--method',\n+ '-m',\n+ default='v2',\n+ type=str,\n+ help='cluster method, [v2, v5] are supported now')\n+ parser.add_argument(\n+ '--cache_path', default='cache', type=str, help='cache path')\n+ parser.add_argument(\n+ '--cache', action='store_true', help='whether use cache')\n+ FLAGS = parser.parse_args()\n+\n+ cfg = load_config(FLAGS.config)\n+ merge_config(FLAGS.opt)\n+ check_config(cfg)\n+ # check if set use_gpu=True in paddlepaddle cpu version\n+ check_gpu(cfg.use_gpu)\n+ # check if paddlepaddle version is satisfied\n+ check_version()\n+\n+ # get dataset\n+ dataset = cfg['TrainDataset']\n+ if FLAGS.size:\n+ if ',' in FLAGS.size:\n+ size = list(map(int, FLAGS.size.split(',')))\n+ assert len(size) == 2, \"the format of size is incorrect\"\n+ else:\n+ size = int(FLAGS.size)\n+ size = [size, size]\n+ elif 'inputs_def' in cfg['TrainReader'] and 'image_shape' in cfg[\n+ 'TrainReader']['inputs_def']:\n+ size = cfg['TrainReader']['inputs_def']['image_shape'][1:]\n+ else:\n+ raise ValueError('size is not specified')\n+\n+ if FLAGS.method == 'v2':\n+ cluster = YOLOv2AnchorCluster(FLAGS.n, dataset, size, FLAGS.cache_path,\n+ FLAGS.cache, FLAGS.iters, FLAGS.verbose)\n+ elif FLAGS.method == 'v5':\n+ cluster = YOLOv5AnchorCluster(FLAGS.n, dataset, size, FLAGS.cache_path,\n+ FLAGS.cache, FLAGS.iters, FLAGS.gen_iters,\n+ FLAGS.thresh, FLAGS.verbose)\n+ else:\n+ raise ValueError('cluster method: %s is not supported' % FLAGS.method)\n+\n+ anchors = cluster()\n+\n+\n+if __name__ == \"__main__\":\n+ main()\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add anchor_cluster and mmodify docs (#2768)
|
499,304 |
27.04.2021 17:01:14
| -28,800 |
c0404286126561daffd66718d5d5dad8a8544715
|
fix downlaod weights dir of static model
|
[
{
"change_type": "MODIFY",
"old_path": "static/ppdet/utils/download.py",
"new_path": "static/ppdet/utils/download.py",
"diff": "@@ -37,7 +37,7 @@ __all__ = [\n'create_voc_list'\n]\n-WEIGHTS_HOME = osp.expanduser(\"~/.cache/paddle/weights\")\n+WEIGHTS_HOME = osp.expanduser(\"~/.cache/paddle/weights/static\")\nDATASET_HOME = osp.expanduser(\"~/.cache/paddle/dataset\")\n# dict of {dataset_name: (download_info, sub_dirs)}\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix downlaod weights dir of static model (#2775)
|
499,395 |
06.05.2021 11:08:49
| -28,800 |
2b7209c386d71691cbc25d0fd5b61358f728a084
|
fix num_classes
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/data/reader.py",
"new_path": "ppdet/data/reader.py",
"diff": "@@ -44,9 +44,11 @@ class Compose(object):\nfor t in self.transforms:\nfor k, v in t.items():\nop_cls = getattr(transform, k)\n- self.transforms_cls.append(op_cls(**v))\n- if hasattr(op_cls, 'num_classes'):\n- op_cls.num_classes = num_classes\n+ f = op_cls(**v)\n+ if hasattr(f, 'num_classes'):\n+ f.num_classes = num_classes\n+\n+ self.transforms_cls.append(f)\ndef __call__(self, data):\nfor f in self.transforms_cls:\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix num_classes (#2844)
|
499,333 |
06.05.2021 13:34:10
| -28,800 |
22c6e80aab3c5512fd3a461bbba405254a63d353
|
support voc format in fcos & ttfnet
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/data/transform/batch_operators.py",
"new_path": "ppdet/data/transform/batch_operators.py",
"diff": "@@ -452,9 +452,10 @@ class Gt2FCOSTarget(BaseOperator):\nsample['centerness{}'.format(lvl)] = np.reshape(\nctn_targets_by_level[lvl], newshape=[grid_h, grid_w, 1])\n- sample.pop('is_crowd')\n- sample.pop('gt_class')\n- sample.pop('gt_bbox')\n+ sample.pop('is_crowd', None)\n+ sample.pop('difficult', None)\n+ sample.pop('gt_class', None)\n+ sample.pop('gt_bbox', None)\nreturn samples\n@@ -531,11 +532,11 @@ class Gt2TTFTarget(BaseOperator):\nsample['ttf_heatmap'] = heatmap\nsample['ttf_box_target'] = box_target\nsample['ttf_reg_weight'] = reg_weight\n- sample.pop('is_crowd')\n- sample.pop('gt_class')\n- sample.pop('gt_bbox')\n- if 'gt_score' in sample:\n- sample.pop('gt_score')\n+ sample.pop('is_crowd', None)\n+ sample.pop('difficult', None)\n+ sample.pop('gt_class', None)\n+ sample.pop('gt_bbox', None)\n+ sample.pop('gt_score', None)\nreturn samples\ndef draw_truncate_gaussian(self, heatmap, center, h_radius, w_radius):\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
support voc format in fcos & ttfnet (#2875)
|
499,395 |
07.05.2021 11:52:16
| -28,800 |
0adc553aaa10f105bb338d4505c8bdc447025724
|
fix problem while applying ema without training
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/optimizer.py",
"new_path": "ppdet/optimizer.py",
"diff": "@@ -248,6 +248,8 @@ class ModelEMA(object):\nself.step += 1\ndef apply(self):\n+ if self.step == 0:\n+ return self.state_dict\nstate_dict = dict()\nfor k, v in self.state_dict.items():\nv = v / (1 - self._decay**self.step)\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix problem while applying ema without training (#2839)
|
499,304 |
07.05.2021 12:46:34
| -28,800 |
99579e6a04d0245d38f4535aa4bb81c07a7d3506
|
remove condition block && some todo
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/python/visualize.py",
"new_path": "deploy/python/visualize.py",
"diff": "@@ -38,10 +38,10 @@ def visualize_box_mask(im, results, labels, threshold=0.5):\nim = Image.open(im).convert('RGB')\nelse:\nim = Image.fromarray(im)\n- if 'masks' in results and 'boxes' in results:\n+ if 'masks' in results and 'boxes' in results and len(results['boxes']) > 0:\nim = draw_mask(\nim, results['boxes'], results['masks'], labels, threshold=threshold)\n- if 'boxes' in results:\n+ if 'boxes' in results and len(results['boxes']) > 0:\nim = draw_box(im, results['boxes'], labels, threshold=threshold)\nif 'segm' in results:\nim = draw_segm(\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/heads/solov2_head.py",
"new_path": "ppdet/modeling/heads/solov2_head.py",
"diff": "@@ -446,9 +446,6 @@ class SOLOv2Head(nn.Layer):\ny = paddle.zeros(shape=paddle.shape(cate_preds), dtype='float32')\ninds = paddle.where(cate_preds > self.score_threshold, cate_preds, y)\ninds = paddle.nonzero(inds)\n- if paddle.shape(inds)[0] == 0:\n- out = paddle.full(shape=[1], fill_value=-1)\n- return out, out, out\ncate_preds = paddle.reshape(cate_preds, shape=[-1])\n# Prevent empty and increase fake data\nind_a = paddle.cast(paddle.shape(kernel_preds)[0], 'int64')\n@@ -530,6 +527,5 @@ class SOLOv2Head(nn.Layer):\nalign_corners=False,\nalign_mode=0),\naxis=[0])\n- # TODO: support bool type\n- seg_masks = paddle.cast(seg_masks > self.mask_threshold, 'int32')\n+ seg_masks = paddle.cast(seg_masks > self.mask_threshold, 'uint8')\nreturn seg_masks, cate_labels, cate_scores\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/post_process.py",
"new_path": "ppdet/modeling/post_process.py",
"diff": "@@ -60,14 +60,6 @@ class BBoxPostProcess(object):\nelse:\nbbox_pred, bbox_num = self.decode(head_out, rois, im_shape,\nscale_factor)\n-\n- # Prevent empty bbox_pred from decode or NMS.\n- # Bboxes and score before NMS may be empty due to the score threshold.\n- if bbox_pred.shape[0] == 0:\n- bbox_pred = paddle.to_tensor(\n- np.array(\n- [[-1, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype='float32'))\n- bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))\nreturn bbox_pred, bbox_num\ndef get_pred(self, bboxes, bbox_num, im_shape, scale_factor):\n@@ -155,10 +147,6 @@ class MaskPostProcess(object):\ngx = paddle.expand(img_x, [N, img_y.shape[1], img_x.shape[2]])\ngy = paddle.expand(img_y, [N, img_y.shape[1], img_x.shape[2]])\n- # TODO: Because paddle.expand transform error when dygraph\n- # to static, use reshape to avoid mistakes.\n- gx = paddle.reshape(gx, [N, img_y.shape[1], img_x.shape[2]])\n- gy = paddle.reshape(gy, [N, img_y.shape[1], img_x.shape[2]])\ngrid = paddle.stack([gx, gy], axis=3)\nimg_masks = F.grid_sample(masks, grid, align_corners=False)\nreturn img_masks[:, 0]\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
remove condition block && some todo (#2882)
|
499,348 |
07.05.2021 17:48:24
| -28,800 |
90bfe0094f7e60237fac7c2142ced53f8ab6f4be
|
add hrnet mdoeling;
rename all Hrnet to HRNet
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/architectures/__init__.py",
"new_path": "ppdet/modeling/architectures/__init__.py",
"diff": "@@ -16,6 +16,7 @@ from . import solov2\nfrom . import ttfnet\nfrom . import s2anet\nfrom . import keypoint_hrhrnet\n+from . import keypoint_hrnet\nfrom .meta_arch import *\nfrom .faster_rcnn import *\n@@ -28,3 +29,4 @@ from .solov2 import *\nfrom .ttfnet import *\nfrom .s2anet import *\nfrom .keypoint_hrhrnet import *\n+from .keypoint_hrnet import *\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "ppdet/modeling/architectures/keypoint_hrnet.py",
"diff": "+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from __future__ import absolute_import\n+from __future__ import division\n+from __future__ import print_function\n+\n+import paddle\n+import numpy as np\n+import math\n+from ppdet.core.workspace import register, create\n+from .meta_arch import BaseArch\n+from ..keypoint_utils import transform_preds\n+from .. import layers as L\n+\n+__all__ = ['TopDownHRNet']\n+\n+\n+@register\n+class TopDownHRNet(BaseArch):\n+ __category__ = 'architecture'\n+ __inject__ = ['loss']\n+\n+ def __init__(self,\n+ width,\n+ num_joints,\n+ backbone='HRNet',\n+ loss='KeyPointMSELoss',\n+ post_process='HRNetPostProcess',\n+ flip_perm=None,\n+ flip=False,\n+ shift_heatmap=True):\n+ \"\"\"\n+ HRNnet network, see https://arxiv.org/abs/1902.09212\n+\n+ Args:\n+ backbone (nn.Layer): backbone instance\n+ post_process (object): `HRNetPostProcess` instance\n+ flip_perm (list): The left-right joints exchange order list\n+ \"\"\"\n+ super(TopDownHRNet, self).__init__()\n+ self.backbone = backbone\n+ self.post_process = HRNetPostProcess()\n+ self.loss = loss\n+ self.flip_perm = flip_perm\n+ self.flip = flip\n+ self.final_conv = L.Conv2d(width, num_joints, 1, 1, 0, bias=True)\n+ self.shift_heatmap = shift_heatmap\n+\n+ @classmethod\n+ def from_config(cls, cfg, *args, **kwargs):\n+ # backbone\n+ backbone = create(cfg['backbone'])\n+\n+ return {'backbone': backbone, }\n+\n+ def _forward(self):\n+ feats = self.backbone(self.inputs)\n+ hrnet_outputs = self.final_conv(feats[0])\n+\n+ if self.training:\n+ return self.loss(hrnet_outputs, self.inputs)\n+ else:\n+ if self.flip:\n+ self.inputs['image'] = self.inputs['image'].flip([3])\n+ feats = backbone(inputs)\n+ output_flipped = self.final_conv(feats)\n+ output_flipped = self.flip_back(output_flipped.numpy(),\n+ flip_perm)\n+ output_flipped = paddle.to_tensor(output_flipped.copy())\n+ if self.shift_heatmap:\n+ output_flipped[:, :, :, 1:] = output_flipped.clone(\n+ )[:, :, :, 0:-1]\n+ output = (output + output_flipped) * 0.5\n+ preds, maxvals = self.post_process(hrnet_outputs, self.inputs)\n+ return preds, maxvals\n+\n+ def get_loss(self):\n+ return self._forward()\n+\n+ def get_pred(self):\n+ preds, maxvals = self._forward()\n+ output = {'kpt_coord': preds, 'kpt_score': maxvals}\n+ return output\n+\n+\n+class HRNetPostProcess(object):\n+ def flip_back(self, output_flipped, matched_parts):\n+ assert output_flipped.ndim == 4,\\\n+ 'output_flipped should be [batch_size, num_joints, height, width]'\n+\n+ output_flipped = output_flipped[:, :, :, ::-1]\n+\n+ for pair in matched_parts:\n+ tmp = output_flipped[:, pair[0], :, :].copy()\n+ output_flipped[:, pair[0], :, :] = output_flipped[:, pair[1], :, :]\n+ output_flipped[:, pair[1], :, :] = tmp\n+\n+ return output_flipped\n+\n+ def get_max_preds(self, heatmaps):\n+ '''get predictions from score maps\n+\n+ Args:\n+ heatmaps: numpy.ndarray([batch_size, num_joints, height, width])\n+\n+ Returns:\n+ preds: numpy.ndarray([batch_size, num_joints, 2]), keypoints coords\n+ maxvals: numpy.ndarray([batch_size, num_joints, 2]), the maximum confidence of the keypoints\n+ '''\n+ assert isinstance(heatmaps,\n+ np.ndarray), 'heatmaps should be numpy.ndarray'\n+ assert heatmaps.ndim == 4, 'batch_images should be 4-ndim'\n+\n+ batch_size = heatmaps.shape[0]\n+ num_joints = heatmaps.shape[1]\n+ width = heatmaps.shape[3]\n+ heatmaps_reshaped = heatmaps.reshape((batch_size, num_joints, -1))\n+ idx = np.argmax(heatmaps_reshaped, 2)\n+ maxvals = np.amax(heatmaps_reshaped, 2)\n+\n+ maxvals = maxvals.reshape((batch_size, num_joints, 1))\n+ idx = idx.reshape((batch_size, num_joints, 1))\n+\n+ preds = np.tile(idx, (1, 1, 2)).astype(np.float32)\n+\n+ preds[:, :, 0] = (preds[:, :, 0]) % width\n+ preds[:, :, 1] = np.floor((preds[:, :, 1]) / width)\n+\n+ pred_mask = np.tile(np.greater(maxvals, 0.0), (1, 1, 2))\n+ pred_mask = pred_mask.astype(np.float32)\n+\n+ preds *= pred_mask\n+\n+ return preds, maxvals\n+\n+ def get_final_preds(self, heatmaps, center, scale):\n+ \"\"\"the highest heatvalue location with a quarter offset in the\n+ direction from the highest response to the second highest response.\n+\n+ Args:\n+ heatmaps (numpy.ndarray): The predicted heatmaps\n+ center (numpy.ndarray): The boxes center\n+ scale (numpy.ndarray): The scale factor\n+\n+ Returns:\n+ preds: numpy.ndarray([batch_size, num_joints, 2]), keypoints coords\n+ maxvals: numpy.ndarray([batch_size, num_joints, 2]), the maximum confidence of the keypoints\n+ \"\"\"\n+\n+ coords, maxvals = self.get_max_preds(heatmaps)\n+\n+ heatmap_height = heatmaps.shape[2]\n+ heatmap_width = heatmaps.shape[3]\n+\n+ for n in range(coords.shape[0]):\n+ for p in range(coords.shape[1]):\n+ hm = heatmaps[n][p]\n+ px = int(math.floor(coords[n][p][0] + 0.5))\n+ py = int(math.floor(coords[n][p][1] + 0.5))\n+ if 1 < px < heatmap_width - 1 and 1 < py < heatmap_height - 1:\n+ diff = np.array([\n+ hm[py][px + 1] - hm[py][px - 1],\n+ hm[py + 1][px] - hm[py - 1][px]\n+ ])\n+ coords[n][p] += np.sign(diff) * .25\n+ preds = coords.copy()\n+\n+ # Transform back\n+ for i in range(coords.shape[0]):\n+ preds[i] = transform_preds(coords[i], center[i], scale[i],\n+ [heatmap_width, heatmap_height])\n+\n+ return preds, maxvals\n+\n+ def __call__(self, output, inputs):\n+ preds, maxvals = self.get_final_preds(\n+ output.numpy(), inputs['center'].numpy(), inputs['scale'].numpy())\n+\n+ return preds, maxvals\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/heads/keypoint_hrhrnet_head.py",
"new_path": "ppdet/modeling/heads/keypoint_hrhrnet_head.py",
"diff": "@@ -21,20 +21,20 @@ from ..backbones.hrnet import BasicBlock\n@register\n-class HrHrnetHead(nn.Layer):\n+class HrHRNetHead(nn.Layer):\n__inject__ = ['loss']\n- def __init__(self, num_joints, loss='HrHrnetLoss', swahr=False, width=32):\n+ def __init__(self, num_joints, loss='HrHRNetLoss', swahr=False, width=32):\n\"\"\"\n- Head for HigherHrnet network\n+ Head for HigherHRNet network\nArgs:\nnum_joints (int): number of keypoints\n- hrloss (object): HrHrnetLoss instance\n+ hrloss (object): HrHRNetLoss instance\nswahr (bool): whether to use swahr\nwidth (int): hrnet channel width\n\"\"\"\n- super(HrHrnetHead, self).__init__()\n+ super(HrHRNetHead, self).__init__()\nself.loss = loss\nself.num_joints = num_joints\n@@ -53,7 +53,7 @@ class HrHrnetHead(nn.Layer):\nnum_filters=width,\nhas_se=False,\nfreeze_norm=False,\n- name='HrHrnetHead_{}'.format(i)) for i in range(4)))\n+ name='HrHRNetHead_{}'.format(i)) for i in range(4)))\nself.interpolate = L.Upsample(2, mode='bilinear')\nself.concat = L.Concat(dim=1)\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/keypoint_utils.py",
"new_path": "ppdet/modeling/keypoint_utils.py",
"diff": "@@ -158,3 +158,145 @@ def warp_affine_joints(joints, mat):\nreturn np.dot(np.concatenate(\n(joints, joints[:, 0:1] * 0 + 1), axis=1),\nmat.T).reshape(shape)\n+\n+\n+def affine_transform(pt, t):\n+ new_pt = np.array([pt[0], pt[1], 1.]).T\n+ new_pt = np.dot(t, new_pt)\n+ return new_pt[:2]\n+\n+\n+def transform_preds(coords, center, scale, output_size):\n+ target_coords = np.zeros(coords.shape)\n+ trans = get_affine_transform(center, scale * 200, 0, output_size, inv=1)\n+ for p in range(coords.shape[0]):\n+ target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans)\n+ return target_coords\n+\n+\n+def oks_iou(g, d, a_g, a_d, sigmas=None, in_vis_thre=None):\n+ if not isinstance(sigmas, np.ndarray):\n+ sigmas = np.array([\n+ .26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07,\n+ .87, .87, .89, .89\n+ ]) / 10.0\n+ vars = (sigmas * 2)**2\n+ xg = g[0::3]\n+ yg = g[1::3]\n+ vg = g[2::3]\n+ ious = np.zeros((d.shape[0]))\n+ for n_d in range(0, d.shape[0]):\n+ xd = d[n_d, 0::3]\n+ yd = d[n_d, 1::3]\n+ vd = d[n_d, 2::3]\n+ dx = xd - xg\n+ dy = yd - yg\n+ e = (dx**2 + dy**2) / vars / ((a_g + a_d[n_d]) / 2 + np.spacing(1)) / 2\n+ if in_vis_thre is not None:\n+ ind = list(vg > in_vis_thre) and list(vd > in_vis_thre)\n+ e = e[ind]\n+ ious[n_d] = np.sum(np.exp(-e)) / e.shape[0] if e.shape[0] != 0 else 0.0\n+ return ious\n+\n+\n+def oks_nms(kpts_db, thresh, sigmas=None, in_vis_thre=None):\n+ \"\"\"greedily select boxes with high confidence and overlap with current maximum <= thresh\n+ rule out overlap >= thresh\n+\n+ Args:\n+ kpts_db (list): The predicted keypoints within the image\n+ thresh (float): The threshold to select the boxes\n+ sigmas (np.array): The variance to calculate the oks iou\n+ Default: None\n+ in_vis_thre (float): The threshold to select the high confidence boxes\n+ Default: None\n+\n+ Return:\n+ keep (list): indexes to keep\n+ \"\"\"\n+\n+ if len(kpts_db) == 0:\n+ return []\n+\n+ scores = np.array([kpts_db[i]['score'] for i in range(len(kpts_db))])\n+ kpts = np.array(\n+ [kpts_db[i]['keypoints'].flatten() for i in range(len(kpts_db))])\n+ areas = np.array([kpts_db[i]['area'] for i in range(len(kpts_db))])\n+\n+ order = scores.argsort()[::-1]\n+\n+ keep = []\n+ while order.size > 0:\n+ i = order[0]\n+ keep.append(i)\n+\n+ oks_ovr = oks_iou(kpts[i], kpts[order[1:]], areas[i], areas[order[1:]],\n+ sigmas, in_vis_thre)\n+\n+ inds = np.where(oks_ovr <= thresh)[0]\n+ order = order[inds + 1]\n+\n+ return keep\n+\n+\n+def rescore(overlap, scores, thresh, type='gaussian'):\n+ assert overlap.shape[0] == scores.shape[0]\n+ if type == 'linear':\n+ inds = np.where(overlap >= thresh)[0]\n+ scores[inds] = scores[inds] * (1 - overlap[inds])\n+ else:\n+ scores = scores * np.exp(-overlap**2 / thresh)\n+\n+ return scores\n+\n+\n+def soft_oks_nms(kpts_db, thresh, sigmas=None, in_vis_thre=None):\n+ \"\"\"greedily select boxes with high confidence and overlap with current maximum <= thresh\n+ rule out overlap >= thresh\n+\n+ Args:\n+ kpts_db (list): The predicted keypoints within the image\n+ thresh (float): The threshold to select the boxes\n+ sigmas (np.array): The variance to calculate the oks iou\n+ Default: None\n+ in_vis_thre (float): The threshold to select the high confidence boxes\n+ Default: None\n+\n+ Return:\n+ keep (list): indexes to keep\n+ \"\"\"\n+\n+ if len(kpts_db) == 0:\n+ return []\n+\n+ scores = np.array([kpts_db[i]['score'] for i in range(len(kpts_db))])\n+ kpts = np.array(\n+ [kpts_db[i]['keypoints'].flatten() for i in range(len(kpts_db))])\n+ areas = np.array([kpts_db[i]['area'] for i in range(len(kpts_db))])\n+\n+ order = scores.argsort()[::-1]\n+ scores = scores[order]\n+\n+ # max_dets = order.size\n+ max_dets = 20\n+ keep = np.zeros(max_dets, dtype=np.intp)\n+ keep_cnt = 0\n+ while order.size > 0 and keep_cnt < max_dets:\n+ i = order[0]\n+\n+ oks_ovr = oks_iou(kpts[i], kpts[order[1:]], areas[i], areas[order[1:]],\n+ sigmas, in_vis_thre)\n+\n+ order = order[1:]\n+ scores = rescore(oks_ovr, scores[1:], thresh)\n+\n+ tmp = scores.argsort()[::-1]\n+ order = order[tmp]\n+ scores = scores[tmp]\n+\n+ keep[keep_cnt] = i\n+ keep_cnt += 1\n+\n+ keep = keep[:keep_cnt]\n+\n+ return keep\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/losses/keypoint_loss.py",
"new_path": "ppdet/modeling/losses/keypoint_loss.py",
"diff": "@@ -23,20 +23,59 @@ import paddle.nn as nn\nimport paddle.nn.functional as F\nfrom ppdet.core.workspace import register, serializable\n-__all__ = ['HrHrnetLoss']\n+__all__ = ['HrHRNetLoss', 'KeyPointMSELoss']\n@register\n@serializable\n-class HrHrnetLoss(nn.Layer):\n+class KeyPointMSELoss(nn.Layer):\n+ def __init__(self, use_target_weight=True):\n+ \"\"\"\n+ KeyPointMSELoss layer\n+\n+ Args:\n+ use_target_weight (bool): whether to use target weight\n+ \"\"\"\n+ super(KeyPointMSELoss, self).__init__()\n+ self.criterion = nn.MSELoss(reduction='mean')\n+ self.use_target_weight = use_target_weight\n+\n+ def forward(self, output, records):\n+ target = records['target']\n+ target_weight = records['target_weight']\n+ batch_size = output.shape[0]\n+ num_joints = output.shape[1]\n+ heatmaps_pred = output.reshape(\n+ (batch_size, num_joints, -1)).split(num_joints, 1)\n+ heatmaps_gt = target.reshape(\n+ (batch_size, num_joints, -1)).split(num_joints, 1)\n+ loss = 0\n+\n+ for idx in range(num_joints):\n+ heatmap_pred = heatmaps_pred[idx].squeeze()\n+ heatmap_gt = heatmaps_gt[idx].squeeze()\n+ if self.use_target_weight:\n+ loss += 0.5 * self.criterion(\n+ heatmap_pred.multiply(target_weight[:, idx]),\n+ heatmap_gt.multiply(target_weight[:, idx]))\n+ else:\n+ loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt)\n+ keypoint_losses = dict()\n+ keypoint_losses['loss'] = loss / num_joints\n+ return keypoint_losses\n+\n+\n+@register\n+@serializable\n+class HrHRNetLoss(nn.Layer):\ndef __init__(self, num_joints, swahr):\n\"\"\"\n- HrHrnetLoss layer\n+ HrHRNetLoss layer\nArgs:\nnum_joints (int): number of keypoints\n\"\"\"\n- super(HrHrnetLoss, self).__init__()\n+ super(HrHRNetLoss, self).__init__()\nif swahr:\nself.heatmaploss = HeatMapSWAHRLoss(num_joints)\nelse:\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add hrnet mdoeling; (#2889)
rename all Hrnet to HRNet
|
499,348 |
07.05.2021 20:44:20
| -28,800 |
a06a6258fa0227a90c6406bc0372132c10f40aea
|
add hrnet data, engine, metrics; rename Hrnet to HRNet
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/data/source/keypoint_coco.py",
"new_path": "ppdet/data/source/keypoint_coco.py",
"diff": "import os\nimport cv2\nimport numpy as np\n+import json\nimport copy\n-# TODO: unify xtococotools and pycocotools\nimport pycocotools\nfrom pycocotools.coco import COCO\nfrom .dataset import DetDataset\n@@ -317,4 +317,341 @@ class KeypointBottomUpCrowdPoseDataset(KeypointBottomUpCocoDataset):\nself.id2name, self.name2id = self._get_mapping_id_name(self.coco.imgs)\nself.dataset_name = 'crowdpose'\n- print(f'=> num_images: {self.num_images}')\n+ print('=> num_images: {}'.format(self.num_images))\n+\n+\n+@serializable\n+class KeypointTopDownBaseDataset(DetDataset):\n+ \"\"\"Base class for top_down datasets.\n+\n+ All datasets should subclass it.\n+ All subclasses should overwrite:\n+ Methods:`_get_db`\n+\n+ Args:\n+ dataset_dir (str): Root path to the dataset.\n+ image_dir (str): Path to a directory where images are held.\n+ anno_path (str): Relative path to the annotation file.\n+ num_joints (int): keypoint numbers\n+ transform (composed(operators)): A sequence of data transforms.\n+ \"\"\"\n+\n+ def __init__(self,\n+ dataset_dir,\n+ image_dir,\n+ anno_path,\n+ num_joints,\n+ transform=[]):\n+ super().__init__(dataset_dir, image_dir, anno_path)\n+ self.image_info = {}\n+ self.ann_info = {}\n+\n+ self.img_prefix = os.path.join(dataset_dir, image_dir)\n+ self.transform = transform\n+\n+ self.ann_info['num_joints'] = num_joints\n+ self.db = []\n+\n+ def __len__(self):\n+ \"\"\"Get dataset length.\"\"\"\n+ return len(self.db)\n+\n+ def _get_db(self):\n+ \"\"\"Get a sample\"\"\"\n+ raise NotImplementedError\n+\n+ def __getitem__(self, idx):\n+ \"\"\"Prepare sample for training given the index.\"\"\"\n+ records = copy.deepcopy(self.db[idx])\n+ records['image'] = cv2.imread(records['image_file'], cv2.IMREAD_COLOR |\n+ cv2.IMREAD_IGNORE_ORIENTATION)\n+ records['image'] = cv2.cvtColor(records['image'], cv2.COLOR_BGR2RGB)\n+ records['score'] = records['score'] if 'score' in records else 1\n+ records = self.transform(records)\n+ # print('records', records)\n+ return records\n+\n+\n+@register\n+@serializable\n+class KeypointTopDownCocoDataset(KeypointTopDownBaseDataset):\n+ \"\"\"COCO dataset for top-down pose estimation.\n+\n+ The dataset loads raw features and apply specified transforms\n+ to return a dict containing the image tensors and other information.\n+\n+ COCO keypoint indexes:\n+\n+ 0: 'nose',\n+ 1: 'left_eye',\n+ 2: 'right_eye',\n+ 3: 'left_ear',\n+ 4: 'right_ear',\n+ 5: 'left_shoulder',\n+ 6: 'right_shoulder',\n+ 7: 'left_elbow',\n+ 8: 'right_elbow',\n+ 9: 'left_wrist',\n+ 10: 'right_wrist',\n+ 11: 'left_hip',\n+ 12: 'right_hip',\n+ 13: 'left_knee',\n+ 14: 'right_knee',\n+ 15: 'left_ankle',\n+ 16: 'right_ankle'\n+\n+ Args:\n+ dataset_dir (str): Root path to the dataset.\n+ image_dir (str): Path to a directory where images are held.\n+ anno_path (str): Relative path to the annotation file.\n+ num_joints (int): Keypoint numbers\n+ trainsize (list):[w, h] Image target size\n+ transform (composed(operators)): A sequence of data transforms.\n+ bbox_file (str): Path to a detection bbox file\n+ Default: None.\n+ use_gt_bbox (bool): Whether to use ground truth bbox\n+ Default: True.\n+ pixel_std (int): The pixel std of the scale\n+ Default: 200.\n+ image_thre (float): The threshold to filter the detection box\n+ Default: 0.0.\n+ \"\"\"\n+\n+ def __init__(self,\n+ dataset_dir,\n+ image_dir,\n+ anno_path,\n+ num_joints,\n+ trainsize,\n+ transform=[],\n+ bbox_file=None,\n+ use_gt_bbox=True,\n+ pixel_std=200,\n+ image_thre=0.0):\n+ super().__init__(dataset_dir, image_dir, anno_path, num_joints,\n+ transform)\n+\n+ self.bbox_file = bbox_file\n+ self.use_gt_bbox = use_gt_bbox\n+ self.trainsize = trainsize\n+ self.pixel_std = pixel_std\n+ self.image_thre = image_thre\n+ self.dataset_name = 'coco'\n+\n+ def parse_dataset(self):\n+ if self.use_gt_bbox:\n+ self.db = self._load_coco_keypoint_annotations()\n+ else:\n+ self.db = self._load_coco_person_detection_results()\n+\n+ def _load_coco_keypoint_annotations(self):\n+ coco = COCO(self.get_anno())\n+ img_ids = coco.getImgIds()\n+ gt_db = []\n+ for index in img_ids:\n+ im_ann = coco.loadImgs(index)[0]\n+ width = im_ann['width']\n+ height = im_ann['height']\n+ file_name = im_ann['file_name']\n+ im_id = int(im_ann[\"id\"])\n+\n+ annIds = coco.getAnnIds(imgIds=index, iscrowd=False)\n+ objs = coco.loadAnns(annIds)\n+\n+ valid_objs = []\n+ for obj in objs:\n+ x, y, w, h = obj['bbox']\n+ x1 = np.max((0, x))\n+ y1 = np.max((0, y))\n+ x2 = np.min((width - 1, x1 + np.max((0, w - 1))))\n+ y2 = np.min((height - 1, y1 + np.max((0, h - 1))))\n+ if obj['area'] > 0 and x2 >= x1 and y2 >= y1:\n+ obj['clean_bbox'] = [x1, y1, x2 - x1, y2 - y1]\n+ valid_objs.append(obj)\n+ objs = valid_objs\n+\n+ rec = []\n+ for obj in objs:\n+ if max(obj['keypoints']) == 0:\n+ continue\n+\n+ joints = np.zeros(\n+ (self.ann_info['num_joints'], 3), dtype=np.float)\n+ joints_vis = np.zeros(\n+ (self.ann_info['num_joints'], 3), dtype=np.float)\n+ for ipt in range(self.ann_info['num_joints']):\n+ joints[ipt, 0] = obj['keypoints'][ipt * 3 + 0]\n+ joints[ipt, 1] = obj['keypoints'][ipt * 3 + 1]\n+ joints[ipt, 2] = 0\n+ t_vis = obj['keypoints'][ipt * 3 + 2]\n+ if t_vis > 1:\n+ t_vis = 1\n+ joints_vis[ipt, 0] = t_vis\n+ joints_vis[ipt, 1] = t_vis\n+ joints_vis[ipt, 2] = 0\n+\n+ center, scale = self._box2cs(obj['clean_bbox'][:4])\n+ rec.append({\n+ 'image_file': os.path.join(self.img_prefix, file_name),\n+ 'center': center,\n+ 'scale': scale,\n+ 'joints': joints,\n+ 'joints_vis': joints_vis,\n+ 'im_id': im_id,\n+ })\n+ gt_db.extend(rec)\n+\n+ return gt_db\n+\n+ def _box2cs(self, box):\n+ x, y, w, h = box[:4]\n+ center = np.zeros((2), dtype=np.float32)\n+ center[0] = x + w * 0.5\n+ center[1] = y + h * 0.5\n+ aspect_ratio = self.trainsize[0] * 1.0 / self.trainsize[1]\n+\n+ if w > aspect_ratio * h:\n+ h = w * 1.0 / aspect_ratio\n+ elif w < aspect_ratio * h:\n+ w = h * aspect_ratio\n+ scale = np.array(\n+ [w * 1.0 / self.pixel_std, h * 1.0 / self.pixel_std],\n+ dtype=np.float32)\n+ if center[0] != -1:\n+ scale = scale * 1.25\n+\n+ return center, scale\n+\n+ def _load_coco_person_detection_results(self):\n+ all_boxes = None\n+ bbox_file_path = os.path.join(self.dataset_dir, self.bbox_file)\n+ with open(bbox_file_path, 'r') as f:\n+ all_boxes = json.load(f)\n+\n+ if not all_boxes:\n+ print('=> Load %s fail!' % bbox_file_path)\n+ return None\n+\n+ kpt_db = []\n+ for n_img in range(0, len(all_boxes)):\n+ det_res = all_boxes[n_img]\n+ if det_res['category_id'] != 1:\n+ continue\n+ file_name = det_res[\n+ 'filename'] if 'filename' in det_res else '%012d.jpg' % det_res[\n+ 'image_id']\n+ img_name = os.path.join(self.img_prefix, file_name)\n+ box = det_res['bbox']\n+ score = det_res['score']\n+ im_id = int(det_res['image_id'])\n+\n+ if score < self.image_thre:\n+ continue\n+\n+ center, scale = self._box2cs(box)\n+ joints = np.zeros((self.ann_info['num_joints'], 3), dtype=np.float)\n+ joints_vis = np.ones(\n+ (self.ann_info['num_joints'], 3), dtype=np.float)\n+ kpt_db.append({\n+ 'image_file': img_name,\n+ 'im_id': im_id,\n+ 'center': center,\n+ 'scale': scale,\n+ 'score': score,\n+ 'joints': joints,\n+ 'joints_vis': joints_vis,\n+ })\n+\n+ return kpt_db\n+\n+\n+@register\n+@serializable\n+class KeypointTopDownMPIIDataset(KeypointTopDownBaseDataset):\n+ \"\"\"MPII dataset for topdown pose estimation.\n+\n+ The dataset loads raw features and apply specified transforms\n+ to return a dict containing the image tensors and other information.\n+\n+ MPII keypoint indexes::\n+\n+ 0: 'right_ankle',\n+ 1: 'right_knee',\n+ 2: 'right_hip',\n+ 3: 'left_hip',\n+ 4: 'left_knee',\n+ 5: 'left_ankle',\n+ 6: 'pelvis',\n+ 7: 'thorax',\n+ 8: 'upper_neck',\n+ 9: 'head_top',\n+ 10: 'right_wrist',\n+ 11: 'right_elbow',\n+ 12: 'right_shoulder',\n+ 13: 'left_shoulder',\n+ 14: 'left_elbow',\n+ 15: 'left_wrist',\n+\n+ Args:\n+ dataset_dir (str): Root path to the dataset.\n+ image_dir (str): Path to a directory where images are held.\n+ anno_path (str): Relative path to the annotation file.\n+ num_joints (int): Keypoint numbers\n+ trainsize (list):[w, h] Image target size\n+ transform (composed(operators)): A sequence of data transforms.\n+ \"\"\"\n+\n+ def __init__(self,\n+ dataset_dir,\n+ image_dir,\n+ anno_path,\n+ num_joints,\n+ transform=[]):\n+ super().__init__(dataset_dir, image_dir, anno_path, num_joints,\n+ transform)\n+\n+ self.dataset_name = 'mpii'\n+\n+ def parse_dataset(self):\n+ with open(self.get_anno()) as anno_file:\n+ anno = json.load(anno_file)\n+\n+ gt_db = []\n+ for a in anno:\n+ image_name = a['image']\n+ im_id = a['image_id'] if 'image_id' in a else int(\n+ os.path.splitext(image_name)[0])\n+\n+ c = np.array(a['center'], dtype=np.float)\n+ s = np.array([a['scale'], a['scale']], dtype=np.float)\n+\n+ # Adjust center/scale slightly to avoid cropping limbs\n+ if c[0] != -1:\n+ c[1] = c[1] + 15 * s[1]\n+ s = s * 1.25\n+ c = c - 1\n+\n+ joints = np.zeros((self.ann_info['num_joints'], 3), dtype=np.float)\n+ joints_vis = np.zeros(\n+ (self.ann_info['num_joints'], 3), dtype=np.float)\n+ if 'joints' in a:\n+ joints_ = np.array(a['joints'])\n+ joints_[:, 0:2] = joints_[:, 0:2] - 1\n+ joints_vis_ = np.array(a['joints_vis'])\n+ assert len(joints_) == self.ann_info[\n+ 'num_joints'], 'joint num diff: {} vs {}'.format(\n+ len(joints_), self.ann_info['num_joints'])\n+\n+ joints[:, 0:2] = joints_[:, 0:2]\n+ joints_vis[:, 0] = joints_vis_[:]\n+ joints_vis[:, 1] = joints_vis_[:]\n+\n+ gt_db.append({\n+ 'image_file': os.path.join(self.img_prefix, image_name),\n+ 'im_id': im_id,\n+ 'center': c,\n+ 'scale': s,\n+ 'joints': joints,\n+ 'joints_vis': joints_vis\n+ })\n+ self.db = gt_db\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/data/transform/keypoint_operators.py",
"new_path": "ppdet/data/transform/keypoint_operators.py",
"diff": "@@ -29,7 +29,7 @@ import math\nimport copy\nimport os\n-from ...modeling.keypoint_utils import get_affine_mat_kernel, warp_affine_joints\n+from ...modeling.keypoint_utils import get_affine_mat_kernel, warp_affine_joints, get_affine_transform, affine_transform\nfrom ppdet.core.workspace import serializable\nfrom ppdet.utils.logger import setup_logger\nlogger = setup_logger(__name__)\n@@ -38,7 +38,8 @@ registered_ops = []\n__all__ = [\n'RandomAffine', 'KeyPointFlip', 'TagGenerate', 'ToHeatmaps',\n- 'NormalizePermute', 'EvalAffine'\n+ 'NormalizePermute', 'EvalAffine', 'RandomFlipHalfBodyTransform',\n+ 'TopDownAffine', 'ToHeatmapsTopDown'\n]\n@@ -403,3 +404,229 @@ class ToHeatmaps(object):\nrecords['mask_{}x'.format(idx + 1)] = mask\ndel records['mask']\nreturn records\n+\n+\n+@register_keypointop\n+class RandomFlipHalfBodyTransform(object):\n+ \"\"\"apply data augment to image and coords\n+ to achieve the flip, scale, rotate and half body transform effect for training image\n+\n+ Args:\n+ trainsize (list):[w, h], Image target size\n+ upper_body_ids (list): The upper body joint ids\n+ flip_pairs (list): The left-right joints exchange order list\n+ pixel_std (int): The pixel std of the scale\n+ scale (float): The scale factor to transform the image\n+ rot (int): The rotate factor to transform the image\n+ num_joints_half_body (int): The joints threshold of the half body transform\n+ prob_half_body (float): The threshold of the half body transform\n+ flip (bool): Whether to flip the image\n+\n+ Returns:\n+ records(dict): contain the image and coords after tranformed\n+\n+ \"\"\"\n+\n+ def __init__(self,\n+ trainsize,\n+ upper_body_ids,\n+ flip_pairs,\n+ pixel_std,\n+ scale=0.35,\n+ rot=40,\n+ num_joints_half_body=8,\n+ prob_half_body=0.3,\n+ flip=True,\n+ rot_prob=0.6):\n+ super(RandomFlipHalfBodyTransform, self).__init__()\n+ self.trainsize = trainsize\n+ self.upper_body_ids = upper_body_ids\n+ self.flip_pairs = flip_pairs\n+ self.pixel_std = pixel_std\n+ self.scale = scale\n+ self.rot = rot\n+ self.num_joints_half_body = num_joints_half_body\n+ self.prob_half_body = prob_half_body\n+ self.flip = flip\n+ self.aspect_ratio = trainsize[0] * 1.0 / trainsize[1]\n+ self.rot_prob = rot_prob\n+\n+ def halfbody_transform(self, joints, joints_vis):\n+ upper_joints = []\n+ lower_joints = []\n+ for joint_id in range(joints.shape[0]):\n+ if joints_vis[joint_id][0] > 0:\n+ if joint_id in self.upper_body_ids:\n+ upper_joints.append(joints[joint_id])\n+ else:\n+ lower_joints.append(joints[joint_id])\n+ if np.random.randn() < 0.5 and len(upper_joints) > 2:\n+ selected_joints = upper_joints\n+ else:\n+ selected_joints = lower_joints if len(\n+ lower_joints) > 2 else upper_joints\n+ if len(selected_joints) < 2:\n+ return None, None\n+ selected_joints = np.array(selected_joints, dtype=np.float32)\n+ center = selected_joints.mean(axis=0)[:2]\n+ left_top = np.amin(selected_joints, axis=0)\n+ right_bottom = np.amax(selected_joints, axis=0)\n+ w = right_bottom[0] - left_top[0]\n+ h = right_bottom[1] - left_top[1]\n+ if w > self.aspect_ratio * h:\n+ h = w * 1.0 / self.aspect_ratio\n+ elif w < self.aspect_ratio * h:\n+ w = h * self.aspect_ratio\n+ scale = np.array(\n+ [w * 1.0 / self.pixel_std, h * 1.0 / self.pixel_std],\n+ dtype=np.float32)\n+ scale = scale * 1.5\n+\n+ return center, scale\n+\n+ def flip_joints(self, joints, joints_vis, width, matched_parts):\n+ joints[:, 0] = width - joints[:, 0] - 1\n+ for pair in matched_parts:\n+ joints[pair[0], :], joints[pair[1], :] = \\\n+ joints[pair[1], :], joints[pair[0], :].copy()\n+ joints_vis[pair[0], :], joints_vis[pair[1], :] = \\\n+ joints_vis[pair[1], :], joints_vis[pair[0], :].copy()\n+\n+ return joints * joints_vis, joints_vis\n+\n+ def __call__(self, records):\n+ image = records['image']\n+ joints = records['joints']\n+ joints_vis = records['joints_vis']\n+ c = records['center']\n+ s = records['scale']\n+ r = 0\n+ if (np.sum(joints_vis[:, 0]) > self.num_joints_half_body and\n+ np.random.rand() < self.prob_half_body):\n+ c_half_body, s_half_body = self.halfbody_transform(joints,\n+ joints_vis)\n+ if c_half_body is not None and s_half_body is not None:\n+ c, s = c_half_body, s_half_body\n+ sf = self.scale\n+ rf = self.rot\n+ s = s * np.clip(np.random.randn() * sf + 1, 1 - sf, 1 + sf)\n+ r = np.clip(np.random.randn() * rf, -rf * 2,\n+ rf * 2) if np.random.random() <= self.rot_prob else 0\n+\n+ if self.flip and np.random.random() <= 0.5:\n+ image = image[:, ::-1, :]\n+ joints, joints_vis = self.flip_joints(\n+ joints, joints_vis, image.shape[1], self.flip_pairs)\n+ c[0] = image.shape[1] - c[0] - 1\n+ records['image'] = image\n+ records['joints'] = joints\n+ records['joints_vis'] = joints_vis\n+ records['center'] = c\n+ records['scale'] = s\n+ records['rotate'] = r\n+\n+ return records\n+\n+\n+@register_keypointop\n+class TopDownAffine(object):\n+ \"\"\"apply affine transform to image and coords\n+\n+ Args:\n+ trainsize (list): [w, h], the standard size used to train\n+ records(dict): the dict contained the image and coords\n+\n+ Returns:\n+ records (dict): contain the image and coords after tranformed\n+\n+ \"\"\"\n+\n+ def __init__(self, trainsize):\n+ self.trainsize = trainsize\n+\n+ def __call__(self, records):\n+ image = records['image']\n+ joints = records['joints']\n+ joints_vis = records['joints_vis']\n+ rot = records['rotate'] if \"rotate\" in records else 0\n+ trans = get_affine_transform(records['center'], records['scale'] * 200,\n+ rot, self.trainsize)\n+ image = cv2.warpAffine(\n+ image,\n+ trans, (int(self.trainsize[0]), int(self.trainsize[1])),\n+ flags=cv2.INTER_LINEAR)\n+ for i in range(joints.shape[0]):\n+ if joints_vis[i, 0] > 0.0:\n+ joints[i, 0:2] = affine_transform(joints[i, 0:2], trans)\n+ records['image'] = image\n+ records['joints'] = joints\n+\n+ return records\n+\n+\n+@register_keypointop\n+class ToHeatmapsTopDown(object):\n+ \"\"\"to generate the gaussin heatmaps of keypoint for heatmap loss\n+\n+ Args:\n+ hmsize (list): [w, h] output heatmap's size\n+ sigma (float): the std of gaussin kernel genereted\n+ records(dict): the dict contained the image and coords\n+\n+ Returns:\n+ records (dict): contain the heatmaps used to heatmaploss\n+\n+ \"\"\"\n+\n+ def __init__(self, hmsize, sigma):\n+ super(ToHeatmapsTopDown, self).__init__()\n+ self.hmsize = np.array(hmsize)\n+ self.sigma = sigma\n+\n+ def __call__(self, records):\n+ joints = records['joints']\n+ joints_vis = records['joints_vis']\n+ num_joints = joints.shape[0]\n+ image_size = np.array(\n+ [records['image'].shape[1], records['image'].shape[0]])\n+ target_weight = np.ones((num_joints, 1), dtype=np.float32)\n+ target_weight[:, 0] = joints_vis[:, 0]\n+ target = np.zeros(\n+ (num_joints, self.hmsize[1], self.hmsize[0]), dtype=np.float32)\n+ tmp_size = self.sigma * 3\n+ for joint_id in range(num_joints):\n+ feat_stride = image_size / self.hmsize\n+ mu_x = int(joints[joint_id][0] / feat_stride[0] + 0.5)\n+ mu_y = int(joints[joint_id][1] / feat_stride[1] + 0.5)\n+ # Check that any part of the gaussian is in-bounds\n+ ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]\n+ br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]\n+ if ul[0] >= self.hmsize[0] or ul[1] >= self.hmsize[1] or br[\n+ 0] < 0 or br[1] < 0:\n+ # If not, just return the image as is\n+ target_weight[joint_id] = 0\n+ continue\n+ # # Generate gaussian\n+ size = 2 * tmp_size + 1\n+ x = np.arange(0, size, 1, np.float32)\n+ y = x[:, np.newaxis]\n+ x0 = y0 = size // 2\n+ # The gaussian is not normalized, we want the center value to equal 1\n+ g = np.exp(-((x - x0)**2 + (y - y0)**2) / (2 * self.sigma**2))\n+\n+ # Usable gaussian range\n+ g_x = max(0, -ul[0]), min(br[0], self.hmsize[0]) - ul[0]\n+ g_y = max(0, -ul[1]), min(br[1], self.hmsize[1]) - ul[1]\n+ # Image range\n+ img_x = max(0, ul[0]), min(br[0], self.hmsize[0])\n+ img_y = max(0, ul[1]), min(br[1], self.hmsize[1])\n+\n+ v = target_weight[joint_id]\n+ if v > 0.5:\n+ target[joint_id][img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[\n+ 0]:g_y[1], g_x[0]:g_x[1]]\n+ records['target'] = target\n+ records['target_weight'] = target_weight\n+ del records['joints'], records['joints_vis']\n+\n+ return records\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/engine/callbacks.py",
"new_path": "ppdet/engine/callbacks.py",
"diff": "@@ -166,7 +166,12 @@ class Checkpointer(Callback):\nif 'save_best_model' in status and status['save_best_model']:\nfor metric in self.model._metrics:\nmap_res = metric.get_results()\n- key = 'bbox' if 'bbox' in map_res else 'mask'\n+ if 'bbox' in map_res:\n+ key = 'bbox'\n+ elif 'keypoint' in map_res:\n+ key = 'keypoint'\n+ else:\n+ key = 'mask'\nif key not in map_res:\nlogger.warn(\"Evaluation results empty, this may be due to \" \\\n\"training iterations being too few or not \" \\\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/engine/export_utils.py",
"new_path": "ppdet/engine/export_utils.py",
"diff": "@@ -37,10 +37,11 @@ TRT_MIN_SUBGRAPH = {\n'TTFNet': 3,\n'FCOS': 16,\n'SOLOv2': 60,\n- 'HigherHrnet': 40,\n+ 'HigherHRNet': 3,\n+ 'HRNet': 3,\n}\n-KEYPOINT_ARCH = ['HigherHrnet', 'Hrnet']\n+KEYPOINT_ARCH = ['HigherHRNet', 'TopDownHRNet']\ndef _parse_reader(reader_cfg, dataset_cfg, metric, arch, image_shape):\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/engine/trainer.py",
"new_path": "ppdet/engine/trainer.py",
"diff": "@@ -33,7 +33,7 @@ from ppdet.optimizer import ModelEMA\nfrom ppdet.core.workspace import create\nfrom ppdet.utils.checkpoint import load_weight, load_pretrain_weight\nfrom ppdet.utils.visualizer import visualize_results, save_result\n-from ppdet.metrics import Metric, COCOMetric, VOCMetric, WiderFaceMetric, get_infer_results\n+from ppdet.metrics import Metric, COCOMetric, VOCMetric, WiderFaceMetric, get_infer_results, KeyPointTopDownCOCOEval\nfrom ppdet.data.source.category import get_categories\nimport ppdet.utils.stats as stats\n@@ -173,6 +173,15 @@ class Trainer(object):\nanno_file=self.dataset.get_anno(),\nmulti_scale=multi_scale)\n]\n+ elif self.cfg.metric == 'KeyPointTopDownCOCOEval':\n+ eval_dataset = self.cfg['EvalDataset']\n+ eval_dataset.check_or_download_dataset()\n+ anno_file = eval_dataset.get_anno()\n+ self._metrics = [\n+ KeyPointTopDownCOCOEval(anno_file,\n+ len(eval_dataset), self.cfg.num_joints,\n+ self.cfg.save_dir)\n+ ]\nelse:\nlogger.warn(\"Metric not support for metric type {}\".format(\nself.cfg.metric))\n@@ -374,6 +383,7 @@ class Trainer(object):\nself.status['step_id'] = step_id\n# forward\nouts = self.model(data)\n+\nfor key in ['im_shape', 'scale_factor', 'im_id']:\nouts[key] = data[key]\nfor key, value in outs.items():\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/metrics/__init__.py",
"new_path": "ppdet/metrics/__init__.py",
"diff": "# limitations under the License.\nfrom . import metrics\n-\n+from . import keypoint_metrics\nfrom .metrics import *\n+from .keypoint_metrics import *\n-__all__ = metrics.__all__\n+__all__ = metrics.__all__ + keypoint_metrics.__all__\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "ppdet/metrics/keypoint_metrics.py",
"diff": "+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import copy\n+import os\n+import json\n+from collections import OrderedDict\n+from collections import defaultdict\n+import numpy as np\n+from pycocotools.coco import COCO\n+from pycocotools.cocoeval import COCOeval\n+from ..modeling.keypoint_utils import oks_nms\n+\n+__all__ = ['KeyPointTopDownCOCOEval']\n+\n+\n+class KeyPointTopDownCOCOEval(object):\n+ def __init__(self,\n+ anno_file,\n+ num_samples,\n+ num_joints,\n+ output_eval,\n+ iou_type='keypoints',\n+ in_vis_thre=0.2,\n+ oks_thre=0.9):\n+ super(KeyPointTopDownCOCOEval, self).__init__()\n+ self.coco = COCO(anno_file)\n+ self.num_samples = num_samples\n+ self.num_joints = num_joints\n+ self.iou_type = iou_type\n+ self.in_vis_thre = in_vis_thre\n+ self.oks_thre = oks_thre\n+ self.output_eval = output_eval\n+ self.res_file = os.path.join(output_eval, \"keypoints_results.json\")\n+ self.reset()\n+\n+ def reset(self):\n+ self.results = {\n+ 'all_preds': np.zeros(\n+ (self.num_samples, self.num_joints, 3), dtype=np.float32),\n+ 'all_boxes': np.zeros((self.num_samples, 6)),\n+ 'image_path': []\n+ }\n+ self.eval_results = {}\n+ self.idx = 0\n+\n+ def update(self, inputs, outputs):\n+ kpt_coord = outputs['kpt_coord']\n+ kpt_score = outputs['kpt_score']\n+ num_images = inputs['image'].shape[0]\n+ self.results['all_preds'][self.idx:self.idx + num_images, :, 0:\n+ 2] = kpt_coord[:, :, 0:2]\n+ self.results['all_preds'][self.idx:self.idx + num_images, :, 2:\n+ 3] = kpt_score\n+ self.results['all_boxes'][self.idx:self.idx + num_images, 0:2] = inputs[\n+ 'center'].numpy()[:, 0:2]\n+ self.results['all_boxes'][self.idx:self.idx + num_images, 2:4] = inputs[\n+ 'scale'].numpy()[:, 0:2]\n+ self.results['all_boxes'][self.idx:self.idx + num_images, 4] = np.prod(\n+ inputs['scale'].numpy() * 200, 1)\n+ self.results['all_boxes'][self.idx:self.idx + num_images,\n+ 5] = np.squeeze(inputs['score'].numpy())\n+ self.results['image_path'].extend(inputs['im_id'].numpy())\n+\n+ self.idx += num_images\n+\n+ def _write_coco_keypoint_results(self, keypoints):\n+ data_pack = [{\n+ 'cat_id': 1,\n+ 'cls': 'person',\n+ 'ann_type': 'keypoints',\n+ 'keypoints': keypoints\n+ }]\n+ results = self._coco_keypoint_results_one_category_kernel(data_pack[0])\n+ if not os.path.exists(self.output_eval):\n+ os.makedirs(self.output_eval)\n+ with open(self.res_file, 'w') as f:\n+ json.dump(results, f, sort_keys=True, indent=4)\n+ try:\n+ json.load(open(self.res_file))\n+ except Exception:\n+ content = []\n+ with open(self.res_file, 'r') as f:\n+ for line in f:\n+ content.append(line)\n+ content[-1] = ']'\n+ with open(self.res_file, 'w') as f:\n+ for c in content:\n+ f.write(c)\n+\n+ def _coco_keypoint_results_one_category_kernel(self, data_pack):\n+ cat_id = data_pack['cat_id']\n+ keypoints = data_pack['keypoints']\n+ cat_results = []\n+\n+ for img_kpts in keypoints:\n+ if len(img_kpts) == 0:\n+ continue\n+\n+ _key_points = np.array(\n+ [img_kpts[k]['keypoints'] for k in range(len(img_kpts))])\n+ _key_points = _key_points.reshape(_key_points.shape[0], -1)\n+\n+ result = [{\n+ 'image_id': img_kpts[k]['image'],\n+ 'category_id': cat_id,\n+ 'keypoints': list(_key_points[k]),\n+ 'score': img_kpts[k]['score'],\n+ 'center': list(img_kpts[k]['center']),\n+ 'scale': list(img_kpts[k]['scale'])\n+ } for k in range(len(img_kpts))]\n+ cat_results.extend(result)\n+\n+ return cat_results\n+\n+ def get_final_results(self, preds, all_boxes, img_path):\n+ _kpts = []\n+ for idx, kpt in enumerate(preds):\n+ _kpts.append({\n+ 'keypoints': kpt,\n+ 'center': all_boxes[idx][0:2],\n+ 'scale': all_boxes[idx][2:4],\n+ 'area': all_boxes[idx][4],\n+ 'score': all_boxes[idx][5],\n+ 'image': int(img_path[idx])\n+ })\n+ # image x person x (keypoints)\n+ kpts = defaultdict(list)\n+ for kpt in _kpts:\n+ kpts[kpt['image']].append(kpt)\n+\n+ # rescoring and oks nms\n+ num_joints = preds.shape[1]\n+ in_vis_thre = self.in_vis_thre\n+ oks_thre = self.oks_thre\n+ oks_nmsed_kpts = []\n+ for img in kpts.keys():\n+ img_kpts = kpts[img]\n+ for n_p in img_kpts:\n+ box_score = n_p['score']\n+ kpt_score = 0\n+ valid_num = 0\n+ for n_jt in range(0, num_joints):\n+ t_s = n_p['keypoints'][n_jt][2]\n+ if t_s > in_vis_thre:\n+ kpt_score = kpt_score + t_s\n+ valid_num = valid_num + 1\n+ if valid_num != 0:\n+ kpt_score = kpt_score / valid_num\n+ # rescoring\n+ n_p['score'] = kpt_score * box_score\n+\n+ keep = oks_nms([img_kpts[i] for i in range(len(img_kpts))],\n+ oks_thre)\n+\n+ if len(keep) == 0:\n+ oks_nmsed_kpts.append(img_kpts)\n+ else:\n+ oks_nmsed_kpts.append([img_kpts[_keep] for _keep in keep])\n+\n+ self._write_coco_keypoint_results(oks_nmsed_kpts)\n+\n+ def accumulate(self):\n+ self.get_final_results(self.results['all_preds'],\n+ self.results['all_boxes'],\n+ self.results['image_path'])\n+ coco_dt = self.coco.loadRes(self.res_file)\n+ coco_eval = COCOeval(self.coco, coco_dt, 'keypoints')\n+ coco_eval.params.useSegm = None\n+ coco_eval.evaluate()\n+ coco_eval.accumulate()\n+ coco_eval.summarize()\n+\n+ keypoint_stats = []\n+ for ind in range(len(coco_eval.stats)):\n+ keypoint_stats.append((coco_eval.stats[ind]))\n+ self.eval_results['keypoint'] = keypoint_stats\n+\n+ def log(self):\n+ stats_names = [\n+ 'AP', 'Ap .5', 'AP .75', 'AP (M)', 'AP (L)', 'AR', 'AR .5',\n+ 'AR .75', 'AR (M)', 'AR (L)'\n+ ]\n+ num_values = len(stats_names)\n+ print(' '.join(['| {}'.format(name) for name in stats_names]) + ' |')\n+ print('|---' * (num_values + 1) + '|')\n+\n+ print(' '.join([\n+ '| {:.3f}'.format(value) for value in self.eval_results['keypoint']\n+ ]) + ' |')\n+\n+ def get_results(self):\n+ return self.eval_results\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/optimizer.py",
"new_path": "ppdet/optimizer.py",
"diff": "@@ -139,6 +139,7 @@ class LinearWarmup(object):\nboundary = []\nvalue = []\nfor i in range(self.steps + 1):\n+ if self.steps > 0:\nalpha = i / self.steps\nfactor = self.start_factor * (1 - alpha) + alpha\nlr = base_lr * factor\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add hrnet data, engine, metrics; rename Hrnet to HRNet (#2890)
|
499,333 |
08.05.2021 10:22:13
| -28,800 |
c7c0568fa8df67695c38f9a27e25b069af96f12d
|
add ags module
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/heads/ttf_head.py",
"new_path": "ppdet/modeling/heads/ttf_head.py",
"diff": "@@ -200,6 +200,9 @@ class TTFHead(nn.Layer):\nlite_head(bool): whether use lite version. False by default.\nnorm_type (string): norm type, 'sync_bn', 'bn', 'gn' are optional.\nbn by default\n+ ags_module(bool): whether use AGS module to reweight location feature.\n+ false by default.\n+\n\"\"\"\n__shared__ = ['num_classes', 'down_ratio', 'norm_type']\n@@ -218,7 +221,8 @@ class TTFHead(nn.Layer):\ndown_ratio=4,\ndcn_head=False,\nlite_head=False,\n- norm_type='bn'):\n+ norm_type='bn',\n+ ags_module=False):\nsuper(TTFHead, self).__init__()\nself.in_channels = in_channels\nself.hm_head = HMHead(in_channels, hm_head_planes, num_classes,\n@@ -230,6 +234,7 @@ class TTFHead(nn.Layer):\nself.wh_offset_base = wh_offset_base\nself.down_ratio = down_ratio\n+ self.ags_module = ags_module\n@classmethod\ndef from_config(cls, cfg, input_shape):\n@@ -253,6 +258,12 @@ class TTFHead(nn.Layer):\ntarget = paddle.gather_nd(target, index)\nreturn pred, target, weight\n+ def filter_loc_by_weight(self, score, weight):\n+ index = paddle.nonzero(weight > 0)\n+ index.stop_gradient = True\n+ score = paddle.gather_nd(score, index)\n+ return score\n+\ndef get_loss(self, pred_hm, pred_wh, target_hm, box_target, target_weight):\npred_hm = paddle.clip(F.sigmoid(pred_hm), 1e-4, 1 - 1e-4)\nhm_loss = self.hm_loss(pred_hm, target_hm)\n@@ -274,10 +285,24 @@ class TTFHead(nn.Layer):\nboxes = paddle.transpose(box_target, [0, 2, 3, 1])\nboxes.stop_gradient = True\n+ if self.ags_module:\n+ pred_hm_max = paddle.max(pred_hm, axis=1, keepdim=True)\n+ pred_hm_max_softmax = F.softmax(pred_hm_max, axis=1)\n+ pred_hm_max_softmax = paddle.transpose(pred_hm_max_softmax,\n+ [0, 2, 3, 1])\n+ pred_hm_max_softmax = self.filter_loc_by_weight(pred_hm_max_softmax,\n+ mask)\n+ else:\n+ pred_hm_max_softmax = None\n+\npred_boxes, boxes, mask = self.filter_box_by_weight(pred_boxes, boxes,\nmask)\nmask.stop_gradient = True\n- wh_loss = self.wh_loss(pred_boxes, boxes, iou_weight=mask.unsqueeze(1))\n+ wh_loss = self.wh_loss(\n+ pred_boxes,\n+ boxes,\n+ iou_weight=mask.unsqueeze(1),\n+ loc_reweight=pred_hm_max_softmax)\nwh_loss = wh_loss / avg_factor\nttf_loss = {'hm_loss': hm_loss, 'wh_loss': wh_loss}\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/losses/iou_loss.py",
"new_path": "ppdet/modeling/losses/iou_loss.py",
"diff": "@@ -110,7 +110,7 @@ class GIoULoss(object):\nreturn iou, overlap, union\n- def __call__(self, pbox, gbox, iou_weight=1.):\n+ def __call__(self, pbox, gbox, iou_weight=1., loc_reweight=None):\nx1, y1, x2, y2 = paddle.split(pbox, num_or_sections=4, axis=-1)\nx1g, y1g, x2g, y2g = paddle.split(gbox, num_or_sections=4, axis=-1)\nbox1 = [x1, y1, x2, y2]\n@@ -123,6 +123,12 @@ class GIoULoss(object):\narea_c = (xc2 - xc1) * (yc2 - yc1) + self.eps\nmiou = iou - ((area_c - union) / area_c)\n+ if loc_reweight is not None:\n+ loc_reweight = paddle.reshape(loc_reweight, shape=(-1, 1))\n+ loc_thresh = 0.9\n+ giou = 1 - (1 - loc_thresh\n+ ) * miou - loc_thresh * miou * loc_reweight\n+ else:\ngiou = 1 - miou\nif self.reduction == 'none':\nloss = giou\n"
},
{
"change_type": "MODIFY",
"old_path": "static/ppdet/modeling/anchor_heads/ttf_head.py",
"new_path": "static/ppdet/modeling/anchor_heads/ttf_head.py",
"diff": "@@ -67,6 +67,8 @@ class TTFHead(object):\nkeep_prob(float): keep_prob parameter for drop_block. 0.9 by default.\nfusion_method (string): Method to fusion upsample and lateral branch.\n'add' and 'concat' are optional, add by default\n+ ags_module(bool): whether use AGS module to reweight location feature.\n+ false by default.\n\"\"\"\n__inject__ = ['wh_loss']\n@@ -93,7 +95,8 @@ class TTFHead(object):\ndrop_block=False,\nblock_size=3,\nkeep_prob=0.9,\n- fusion_method='add'):\n+ fusion_method='add',\n+ ags_module=False):\nsuper(TTFHead, self).__init__()\nself.head_conv = head_conv\nself.num_classes = num_classes\n@@ -119,6 +122,7 @@ class TTFHead(object):\nself.block_size = block_size\nself.keep_prob = keep_prob\nself.fusion_method = fusion_method\n+ self.ags_module = ags_module\ndef shortcut(self, x, out_c, layer_num, kernel_size=3, padding=1,\nname=None):\n@@ -359,6 +363,12 @@ class TTFHead(object):\ntarget = fluid.layers.gather_nd(target, index)\nreturn pred, target, weight\n+ def filter_loc_by_weight(self, score, weight):\n+ index = fluid.layers.where(weight > 0)\n+ index.stop_gradient = True\n+ score = fluid.layers.gather_nd(score, index)\n+ return score\n+\ndef get_loss(self, pred_hm, pred_wh, target_hm, box_target, target_weight):\ntry:\npred_hm = paddle.clip(fluid.layers.sigmoid(pred_hm), 1e-4, 1 - 1e-4)\n@@ -387,11 +397,25 @@ class TTFHead(object):\nboxes = fluid.layers.transpose(box_target, [0, 2, 3, 1])\nboxes.stop_gradient = True\n+ if self.ags_module:\n+ pred_hm_max = fluid.layers.reduce_max(pred_hm, dim=1, keep_dim=True)\n+ pred_hm_max_softmax = fluid.layers.softmax(pred_hm_max, axis=1)\n+ pred_hm_max_softmax = fluid.layers.transpose(pred_hm_max_softmax,\n+ [0, 2, 3, 1])\n+ pred_hm_max_softmax = self.filter_loc_by_weight(pred_hm_max_softmax,\n+ mask)\n+ else:\n+ pred_hm_max_softmax = None\n+\npred_boxes, boxes, mask = self.filter_box_by_weight(pred_boxes, boxes,\nmask)\nmask.stop_gradient = True\nwh_loss = self.wh_loss(\n- pred_boxes, boxes, outside_weight=mask, use_transform=False)\n+ pred_boxes,\n+ boxes,\n+ loc_reweight=pred_hm_max_softmax,\n+ outside_weight=mask,\n+ use_transform=False)\nwh_loss = wh_loss / avg_factor\nttf_loss = {'hm_loss': hm_loss, 'wh_loss': wh_loss}\n"
},
{
"change_type": "MODIFY",
"old_path": "static/ppdet/modeling/losses/giou_loss.py",
"new_path": "static/ppdet/modeling/losses/giou_loss.py",
"diff": "@@ -89,6 +89,7 @@ class GiouLoss(object):\ninside_weight=None,\noutside_weight=None,\nbbox_reg_weight=[0.1, 0.1, 0.2, 0.2],\n+ loc_reweight=None,\nuse_transform=True):\neps = 1.e-10\nif use_transform:\n@@ -134,11 +135,19 @@ class GiouLoss(object):\nelif outside_weight is not None:\niou_weights = outside_weight\n+ if loc_reweight is not None:\n+ loc_reweight = fluid.layers.reshape(loc_reweight, shape=(-1, 1))\n+ loc_thresh = 0.9\n+ giou = 1 - (1 - loc_thresh\n+ ) * miouk - loc_thresh * miouk * loc_reweight\n+ else:\n+ giou = 1 - miouk\n+\nif self.do_average:\n- miouk = fluid.layers.reduce_mean((1 - miouk) * iou_weights)\n+ miouk = fluid.layers.reduce_mean(giou * iou_weights)\nelse:\niou_distance = fluid.layers.elementwise_mul(\n- 1 - miouk, iou_weights, axis=0)\n+ giou, iou_weights, axis=0)\nmiouk = fluid.layers.reduce_sum(iou_distance)\nif self.use_class_weight:\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add ags module (#2885)
|
499,333 |
08.05.2021 13:57:06
| -28,800 |
00775c89df7f69aa28daf1acef7a49f02eb250f6
|
fix loading rcnn pretrain
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/utils/checkpoint.py",
"new_path": "ppdet/utils/checkpoint.py",
"diff": "@@ -159,6 +159,16 @@ def load_pretrain_weight(model, pretrain_weight):\nparam_state_dict = paddle.load(weights_path)\nignore_weights = set()\n+ # hack: fit for faster rcnn. Pretrain weights contain prefix of 'backbone'\n+ # while res5 module is located in bbox_head.head. Replace the prefix of\n+ # res5 with 'bbox_head.head' to load pretrain weights correctly.\n+ for k in param_state_dict.keys():\n+ if 'backbone.res5' in k:\n+ new_k = k.replace('backbone', 'bbox_head.head')\n+ if new_k in model_dict.keys():\n+ value = param_state_dict.pop(k)\n+ param_state_dict[new_k] = value\n+\nfor name, weight in param_state_dict.items():\nif name in model_dict.keys():\nif list(weight.shape) != list(model_dict[name].shape):\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix loading rcnn pretrain (#2897)
|
499,348 |
08.05.2021 15:22:22
| -28,800 |
f331f0a102b7f981866ae42daa4a140b59545cf7
|
add configs of hrhrnet&hrnet
|
[
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/keypoint/higherhrnet/higherhrnet_hrnet_w32_512.yml",
"diff": "+use_gpu: true\n+log_iter: 1\n+save_dir: output\n+snapshot_epoch: 10\n+weights: output/higherhrnet_hrnet_v1_512/290\n+epoch: 300\n+num_joints: &num_joints 17\n+flip_perm: &flip_perm [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]\n+input_size: &input_size 512\n+hm_size: &hm_size 128\n+hm_size_2x: &hm_size_2x 256\n+max_people: &max_people 30\n+metric: COCO\n+IouType: keypoints\n+num_classes: 1\n+\n+\n+#####model\n+architecture: HigherHRNet\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/Trunc_HRNet_W32_C_pretrained.pdparams\n+\n+HigherHRNet:\n+ backbone: HRNet\n+ hrhrnet_head: HrHRNetHead\n+ post_process: HrHRNetPostProcess\n+ flip_perm: *flip_perm\n+ eval_flip: true\n+\n+HRNet:\n+ width: &width 32\n+ freeze_at: -1\n+ freeze_norm: false\n+ return_idx: [0]\n+\n+HrHRNetHead:\n+ num_joints: *num_joints\n+ width: *width\n+ loss: HrHRNetLoss\n+ swahr: false\n+\n+HrHRNetLoss:\n+ num_joints: *num_joints\n+ swahr: false\n+\n+\n+#####optimizer\n+LearningRate:\n+ base_lr: 0.001\n+ schedulers:\n+ - !PiecewiseDecay\n+ milestones: [200, 260]\n+ gamma: 0.1\n+ - !LinearWarmup\n+ start_factor: 0.001\n+ steps: 1000\n+\n+OptimizerBuilder:\n+ optimizer:\n+ type: Adam\n+ regularizer:\n+\n+\n+#####data\n+TrainDataset:\n+ !KeypointBottomUpCocoDataset\n+ image_dir: train2017\n+ anno_path: annotations/person_keypoints_train2017.json\n+ dataset_dir: dataset/coco\n+ num_joints: *num_joints\n+\n+EvalDataset:\n+ !KeypointBottomUpCocoDataset\n+ image_dir: val2017\n+ anno_path: annotations/person_keypoints_val2017.json\n+ dataset_dir: dataset/coco\n+ num_joints: *num_joints\n+ test_mode: true\n+\n+TestDataset:\n+ !ImageFolder\n+ anno_path: dataset/coco/keypoint_imagelist.txt\n+\n+worker_num: 8\n+global_mean: &global_mean [0.485, 0.456, 0.406]\n+global_std: &global_std [0.229, 0.224, 0.225]\n+TrainReader:\n+ sample_transforms:\n+ - RandomAffine:\n+ max_degree: 30\n+ scale: [0.75, 1.5]\n+ max_shift: 0.2\n+ trainsize: *input_size\n+ hmsize: [*hm_size, *hm_size_2x]\n+ - KeyPointFlip:\n+ flip_prob: 0.5\n+ flip_permutation: *flip_perm\n+ hmsize: [*hm_size, *hm_size_2x]\n+ - ToHeatmaps:\n+ num_joints: *num_joints\n+ hmsize: [*hm_size, *hm_size_2x]\n+ sigma: 2\n+ - TagGenerate:\n+ num_joints: *num_joints\n+ max_people: *max_people\n+ - NormalizePermute:\n+ mean: *global_mean\n+ std: *global_std\n+ batch_size: 20\n+ shuffle: true\n+ drop_last: true\n+ use_shared_memory: true\n+\n+EvalReader:\n+ sample_transforms:\n+ - EvalAffine:\n+ size: *input_size\n+ - NormalizeImage:\n+ mean: *global_mean\n+ std: *global_std\n+ is_scale: true\n+ - Permute: {}\n+ batch_size: 1\n+ drop_empty: false\n+\n+TestReader:\n+ sample_transforms:\n+ - Decode: {}\n+ - EvalAffine:\n+ size: *input_size\n+ - NormalizeImage:\n+ mean: *global_mean\n+ std: *global_std\n+ is_scale: true\n+ - Permute: {}\n+ batch_size: 1\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/keypoint/higherhrnet/higherhrnet_hrnet_w32_512_swahr.yml",
"diff": "+use_gpu: true\n+log_iter: 10\n+save_dir: output\n+snapshot_epoch: 10\n+weights: output/higherhrnet_hrnet_v1_512/model_final\n+epoch: 300\n+num_joints: &num_joints 17\n+flip_perm: &flip_perm [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]\n+input_size: &input_size 512\n+hm_size: &hm_size 128\n+hm_size_2x: &hm_size_2x 256\n+max_people: &max_people 30\n+metric: COCO\n+IouType: keypoints\n+num_classes: 1\n+\n+\n+#####model\n+architecture: HigherHRNet\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/Trunc_HRNet_W32_C_pretrained.pdparams\n+\n+HigherHRNet:\n+ backbone: HRNet\n+ hrhrnet_head: HrHRNetHead\n+ post_process: HrHRNetPostProcess\n+ flip_perm: *flip_perm\n+ eval_flip: true\n+\n+HRNet:\n+ width: &width 32\n+ freeze_at: -1\n+ freeze_norm: false\n+ return_idx: [0]\n+\n+HrHRNetHead:\n+ num_joints: *num_joints\n+ width: *width\n+ loss: HrHRNetLoss\n+ swahr: true\n+\n+HrHRNetLoss:\n+ num_joints: *num_joints\n+ swahr: true\n+\n+\n+#####optimizer\n+LearningRate:\n+ base_lr: 0.001\n+ schedulers:\n+ - !PiecewiseDecay\n+ milestones: [200, 260]\n+ gamma: 0.1\n+ - !LinearWarmup\n+ start_factor: 0.001\n+ steps: 1000\n+\n+OptimizerBuilder:\n+ optimizer:\n+ type: Adam\n+ regularizer:\n+\n+\n+#####data\n+TrainDataset:\n+ !KeypointBottomUpCocoDataset\n+ image_dir: train2017\n+ anno_path: annotations/person_keypoints_train2017.json\n+ dataset_dir: dataset/coco\n+ num_joints: *num_joints\n+\n+EvalDataset:\n+ !KeypointBottomUpCocoDataset\n+ image_dir: val2017\n+ anno_path: annotations/person_keypoints_val2017.json\n+ dataset_dir: dataset/coco\n+ num_joints: *num_joints\n+ test_mode: true\n+\n+TestDataset:\n+ !ImageFolder\n+ anno_path: dataset/coco/keypoint_imagelist.txt\n+\n+worker_num: 8\n+global_mean: &global_mean [0.485, 0.456, 0.406]\n+global_std: &global_std [0.229, 0.224, 0.225]\n+TrainReader:\n+ sample_transforms:\n+ - RandomAffine:\n+ max_degree: 30\n+ scale: [0.75, 1.5]\n+ max_shift: 0.2\n+ trainsize: *input_size\n+ hmsize: [*hm_size, *hm_size_2x]\n+ - KeyPointFlip:\n+ flip_prob: 0.5\n+ flip_permutation: *flip_perm\n+ hmsize: [*hm_size, *hm_size_2x]\n+ - ToHeatmaps:\n+ num_joints: *num_joints\n+ hmsize: [*hm_size, *hm_size_2x]\n+ sigma: 2\n+ - TagGenerate:\n+ num_joints: *num_joints\n+ max_people: *max_people\n+ - NormalizePermute:\n+ mean: *global_mean\n+ std: *global_std\n+ batch_size: 16\n+ shuffle: true\n+ drop_last: true\n+ use_shared_memory: true\n+\n+EvalReader:\n+ sample_transforms:\n+ - EvalAffine:\n+ size: *input_size\n+ - NormalizeImage:\n+ mean: *global_mean\n+ std: *global_std\n+ is_scale: true\n+ - Permute: {}\n+ batch_size: 1\n+ drop_empty: false\n+\n+TestReader:\n+ sample_transforms:\n+ - Decode: {}\n+ - EvalAffine:\n+ size: *input_size\n+ - NormalizeImage:\n+ mean: *global_mean\n+ std: *global_std\n+ is_scale: true\n+ - Permute: {}\n+ batch_size: 1\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/keypoint/hrnet/hrnet_coco_256x192.yml",
"diff": "+use_gpu: true\n+log_iter: 5\n+save_dir: output\n+snapshot_epoch: 10\n+weights: output/hrnet_coco_256x192/50\n+epoch: 210\n+num_joints: &num_joints 17\n+pixel_std: &pixel_std 200\n+metric: KeyPointTopDownCOCOEval\n+num_classes: 1\n+train_height: &train_height 256\n+train_width: &train_width 192\n+trainsize: &trainsize [*train_width, *train_height]\n+hmsize: &hmsize [48, 64]\n+flip_perm: &flip_perm [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]\n+\n+\n+#####model\n+architecture: TopDownHRNet\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/Trunc_HRNet_W32_C_pretrained.pdparams\n+\n+TopDownHRNet:\n+ backbone: HRNet\n+ post_process: HRNetPostProcess\n+ flip_perm: *flip_perm\n+ num_joints: *num_joints\n+ width: &width 32\n+ loss: KeyPointMSELoss\n+\n+HRNet:\n+ width: *width\n+ freeze_at: -1\n+ freeze_norm: false\n+ return_idx: [0]\n+\n+KeyPointMSELoss:\n+ use_target_weight: true\n+\n+\n+#####optimizer\n+LearningRate:\n+ base_lr: 0.0005\n+ schedulers:\n+ - !PiecewiseDecay\n+ milestones: [170, 200]\n+ gamma: 0.1\n+ - !LinearWarmup\n+ start_factor: 0.001\n+ steps: 1000\n+\n+OptimizerBuilder:\n+ optimizer:\n+ type: Adam\n+ regularizer:\n+ factor: 0.0\n+ type: L2\n+\n+\n+#####data\n+TrainDataset:\n+ !KeypointTopDownCocoDataset\n+ image_dir: train2017\n+ anno_path: annotations/person_keypoints_train2017.json\n+ dataset_dir: dataset/coco\n+ num_joints: *num_joints\n+ trainsize: *trainsize\n+ pixel_std: *pixel_std\n+ use_gt_bbox: True\n+\n+\n+EvalDataset:\n+ !KeypointTopDownCocoDataset\n+ image_dir: val2017\n+ anno_path: annotations/person_keypoints_val2017.json\n+ dataset_dir: dataset/coco\n+ bbox_file: person_detection_results/COCO_val2017_detections_AP_H_56_person.json\n+ num_joints: *num_joints\n+ trainsize: *trainsize\n+ pixel_std: *pixel_std\n+ use_gt_bbox: True\n+ image_thre: 0.0\n+\n+\n+TestDataset:\n+ !ImageFolder\n+ anno_path: dataset/coco/keypoint_imagelist.txt\n+\n+worker_num: 2\n+global_mean: &global_mean [0.485, 0.456, 0.406]\n+global_std: &global_std [0.229, 0.224, 0.225]\n+TrainReader:\n+ sample_transforms:\n+ - RandomFlipHalfBodyTransform:\n+ scale: 0.5\n+ rot: 40\n+ num_joints_half_body: 8\n+ prob_half_body: 0.3\n+ pixel_std: *pixel_std\n+ trainsize: *trainsize\n+ upper_body_ids: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n+ flip_pairs: *flip_perm\n+ - TopDownAffine:\n+ trainsize: *trainsize\n+ - ToHeatmapsTopDown:\n+ hmsize: *hmsize\n+ sigma: 2\n+ batch_transforms:\n+ - NormalizeImage:\n+ mean: *global_mean\n+ std: *global_std\n+ is_scale: true\n+ - Permute: {}\n+ batch_size: 64\n+ shuffle: true\n+ drop_last: false\n+\n+EvalReader:\n+ sample_transforms:\n+ - TopDownAffine:\n+ trainsize: *trainsize\n+ - ToHeatmapsTopDown:\n+ hmsize: *hmsize\n+ sigma: 2\n+ batch_transforms:\n+ - NormalizeImage:\n+ mean: *global_mean\n+ std: *global_std\n+ is_scale: true\n+ - Permute: {}\n+ batch_size: 16\n+ drop_empty: false\n+\n+TestReader:\n+ sample_transforms:\n+ - Decode: {}\n+ - TopDownEvalAffine:\n+ trainsize: *trainsize\n+ - NormalizeImage:\n+ mean: *global_mean\n+ std: *global_std\n+ is_scale: true\n+ - Permute: {}\n+ batch_size: 1\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add configs of hrhrnet&hrnet (#2817)
|
499,304 |
08.05.2021 17:02:03
| -28,800 |
7ebb6f2945bcd82fd215e295313586e42cfbb4cf
|
add static lite demo
|
[
{
"change_type": "ADD",
"old_path": null,
"new_path": "static/deploy/lite/Makefile",
"diff": "+ARM_ABI = arm8\n+export ARM_ABI\n+\n+include ../Makefile.def\n+\n+LITE_ROOT=../../../\n+\n+THIRD_PARTY_DIR=${LITE_ROOT}/third_party\n+\n+OPENCV_VERSION=opencv4.1.0\n+\n+OPENCV_LIBS = ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/arm64-v8a/libs/libopencv_imgcodecs.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/arm64-v8a/libs/libopencv_imgproc.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/arm64-v8a/libs/libopencv_core.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/libtegra_hal.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/liblibjpeg-turbo.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/liblibwebp.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/liblibpng.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/liblibjasper.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/liblibtiff.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/libIlmImf.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/libtbb.a \\\n+ ${THIRD_PARTY_DIR}/${OPENCV_VERSION}/arm64-v8a/3rdparty/libs/libcpufeatures.a\n+\n+OPENCV_INCLUDE = -I../../../third_party/${OPENCV_VERSION}/arm64-v8a/include\n+\n+CXX_INCLUDES = $(INCLUDES) ${OPENCV_INCLUDE} -I$(LITE_ROOT)/cxx/include\n+\n+CXX_LIBS = ${OPENCV_LIBS} -L$(LITE_ROOT)/cxx/lib/ -lpaddle_light_api_shared $(SYSTEM_LIBS)\n+\n+###############################################################\n+# How to use one of static libaray: #\n+# `libpaddle_api_full_bundled.a` #\n+# `libpaddle_api_light_bundled.a` #\n+###############################################################\n+# Note: default use lite's shared library. #\n+###############################################################\n+# 1. Comment above line using `libpaddle_light_api_shared.so`\n+# 2. Undo comment below line using `libpaddle_api_light_bundled.a`\n+\n+#CXX_LIBS = $(LITE_ROOT)/cxx/lib/libpaddle_api_light_bundled.a $(SYSTEM_LIBS)\n+\n+detect_system: fetch_opencv detect_system.o\n+ $(CC) $(SYSROOT_LINK) $(CXXFLAGS_LINK) detect_system.o -o detect_system $(CXX_LIBS) $(LDFLAGS)\n+\n+detect_system.o: run_detection.cc\n+ $(CC) $(SYSROOT_COMPLILE) $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o detect_system.o -c run_detection.cc\n+\n+fetch_opencv:\n+ @ test -d ${THIRD_PARTY_DIR} || mkdir ${THIRD_PARTY_DIR}\n+ @ test -e ${THIRD_PARTY_DIR}/${OPENCV_VERSION}.tar.gz || \\\n+ (echo \"fetch opencv libs\" && \\\n+ wget -P ${THIRD_PARTY_DIR} https://paddle-inference-dist.bj.bcebos.com/${OPENCV_VERSION}.tar.gz)\n+ @ test -d ${THIRD_PARTY_DIR}/${OPENCV_VERSION} || \\\n+ tar -zxvf ${THIRD_PARTY_DIR}/${OPENCV_VERSION}.tar.gz -C ${THIRD_PARTY_DIR}\n+\n+\n+.PHONY: clean\n+clean:\n+ rm -f detect_system.o\n+ rm -f detect_system\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "static/deploy/lite/coco_label_list.txt",
"diff": "+person\n+bicycle\n+car\n+motorcycle\n+airplane\n+bus\n+train\n+truck\n+boat\n+traffic light\n+fire hydrant\n+stop sign\n+parking meter\n+bench\n+bird\n+cat\n+dog\n+horse\n+sheep\n+cow\n+elephant\n+bear\n+zebra\n+giraffe\n+backpack\n+umbrella\n+handbag\n+tie\n+suitcase\n+frisbee\n+skis\n+snowboard\n+sports ball\n+kite\n+baseball bat\n+baseball glove\n+skateboard\n+surfboard\n+tennis racket\n+bottle\n+wine glass\n+cup\n+fork\n+knife\n+spoon\n+bowl\n+banana\n+apple\n+sandwich\n+orange\n+broccoli\n+carrot\n+hot dog\n+pizza\n+donut\n+cake\n+chair\n+couch\n+potted plant\n+bed\n+dining table\n+toilet\n+tv\n+laptop\n+mouse\n+remote\n+keyboard\n+cell phone\n+microwave\n+oven\n+toaster\n+sink\n+refrigerator\n+book\n+clock\n+vase\n+scissors\n+teddy bear\n+hair drier\n+toothbrush\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "static/deploy/lite/config_ppyolo_tiny.txt",
"diff": "+model_file ./ppyolo_tiny.nb\n+label_path ./coco_label_list.txt\n+num_threads 1\n+precision fp32\n+enable_benchmark 1\n+arch YOLO\n+image_shape 3,320,320\n+Resize 320,320\n+keep_ratio False\n+mean 0.485,0.456,0.406\n+std 0.229,0.224,0.225\n+PadStride 0\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "static/deploy/lite/run_detection.cc",
"diff": "+// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+\n+#include <fstream>\n+#include <iostream>\n+#include <vector>\n+#include <chrono>\n+#include <numeric>\n+#include \"opencv2/core.hpp\"\n+#include \"opencv2/imgcodecs.hpp\"\n+#include \"opencv2/imgproc.hpp\"\n+#include \"paddle_api.h\" // NOLINT\n+\n+\n+using namespace paddle::lite_api; // NOLINT\n+using namespace std;\n+\n+struct Object {\n+ cv::Rect rec;\n+ int class_id;\n+ float prob;\n+};\n+\n+// Object for storing all preprocessed data\n+struct ImageBlob {\n+ // image width and height\n+ std::vector<float> im_shape_;\n+ // Buffer for image data after preprocessing\n+ const float* im_data_;\n+ std::vector<float> mean_;\n+ std::vector<float> scale_;\n+};\n+\n+void PrintBenchmarkLog(std::vector<double> det_time,\n+ std::map<std::string, std::string> config,\n+ int img_num) {\n+ std::cout << \"----------------- Config info ------------------\" << std::endl;\n+ std::cout << \"runtime_device: armv8\" << std::endl;\n+ std::cout << \"precision: \" << config.at(\"precision\") << std::endl;\n+\n+ std::cout << \"num_threads: \" << config.at(\"num_threads\") << std::endl;\n+ std::cout << \"---------------- Data info ---------------------\" << std::endl;\n+ std::cout << \"batch_size: \" << 1 << std::endl;\n+ std::cout << \"---------------- Model info --------------------\" << std::endl;\n+ std::cout << \"Model_name: \" << config.at(\"model_file\") << std::endl;\n+ std::cout << \"---------------- Perf info ---------------------\" << std::endl;\n+ std::cout << \"Total number of predicted data: \" << img_num\n+ << \" and total time spent(s): \"\n+ << std::accumulate(det_time.begin(), det_time.end(), 0) << std::endl;\n+ std::cout << \"preproce_time(ms): \" << det_time[0] / img_num\n+ << \", inference_time(ms): \" << det_time[1] / img_num\n+ << \", postprocess_time(ms): \" << det_time[2] << std::endl;\n+}\n+\n+std::vector<std::string> LoadLabels(const std::string &path) {\n+ std::ifstream file;\n+ std::vector<std::string> labels;\n+ file.open(path);\n+ while (file) {\n+ std::string line;\n+ std::getline(file, line);\n+ std::string::size_type pos = line.find(\" \");\n+ if (pos != std::string::npos) {\n+ line = line.substr(pos);\n+ }\n+ labels.push_back(line);\n+ }\n+ file.clear();\n+ file.close();\n+ return labels;\n+}\n+\n+std::vector<std::string> ReadDict(std::string path) {\n+ std::ifstream in(path);\n+ std::string filename;\n+ std::string line;\n+ std::vector<std::string> m_vec;\n+ if (in) {\n+ while (getline(in, line)) {\n+ m_vec.push_back(line);\n+ }\n+ } else {\n+ std::cout << \"no such file\" << std::endl;\n+ }\n+ return m_vec;\n+}\n+\n+std::vector<std::string> split(const std::string &str,\n+ const std::string &delim) {\n+ std::vector<std::string> res;\n+ if (\"\" == str)\n+ return res;\n+ char *strs = new char[str.length() + 1];\n+ std::strcpy(strs, str.c_str());\n+\n+ char *d = new char[delim.length() + 1];\n+ std::strcpy(d, delim.c_str());\n+\n+ char *p = std::strtok(strs, d);\n+ while (p) {\n+ string s = p;\n+ res.push_back(s);\n+ p = std::strtok(NULL, d);\n+ }\n+\n+ return res;\n+}\n+\n+std::map<std::string, std::string> LoadConfigTxt(std::string config_path) {\n+ auto config = ReadDict(config_path);\n+\n+ std::map<std::string, std::string> dict;\n+ for (int i = 0; i < config.size(); i++) {\n+ std::vector<std::string> res = split(config[i], \" \");\n+ dict[res[0]] = res[1];\n+ }\n+ return dict;\n+}\n+\n+void PrintConfig(const std::map<std::string, std::string> &config) {\n+ std::cout << \"=======PaddleDetection lite demo config======\" << std::endl;\n+ for (auto iter = config.begin(); iter != config.end(); iter++) {\n+ std::cout << iter->first << \" : \" << iter->second << std::endl;\n+ }\n+ std::cout << \"===End of PaddleDetection lite demo config===\" << std::endl;\n+}\n+\n+\n+// fill tensor with mean and scale and trans layout: nhwc -> nchw, neon speed up\n+void neon_mean_scale(const float* din,\n+ float* dout,\n+ int size,\n+ const std::vector<float> mean,\n+ const std::vector<float> scale) {\n+ if (mean.size() != 3 || scale.size() != 3) {\n+ std::cerr << \"[ERROR] mean or scale size must equal to 3\\n\";\n+ exit(1);\n+ }\n+ float32x4_t vmean0 = vdupq_n_f32(mean[0]);\n+ float32x4_t vmean1 = vdupq_n_f32(mean[1]);\n+ float32x4_t vmean2 = vdupq_n_f32(mean[2]);\n+ float32x4_t vscale0 = vdupq_n_f32(1.f / scale[0]);\n+ float32x4_t vscale1 = vdupq_n_f32(1.f / scale[1]);\n+ float32x4_t vscale2 = vdupq_n_f32(1.f / scale[2]);\n+ float* dout_c0 = dout;\n+ float* dout_c1 = dout + size;\n+ float* dout_c2 = dout + size * 2;\n+ int i = 0;\n+ for (; i < size - 3; i += 4) {\n+ float32x4x3_t vin3 = vld3q_f32(din);\n+ float32x4_t vsub0 = vsubq_f32(vin3.val[0], vmean0);\n+ float32x4_t vsub1 = vsubq_f32(vin3.val[1], vmean1);\n+ float32x4_t vsub2 = vsubq_f32(vin3.val[2], vmean2);\n+ float32x4_t vs0 = vmulq_f32(vsub0, vscale0);\n+ float32x4_t vs1 = vmulq_f32(vsub1, vscale1);\n+ float32x4_t vs2 = vmulq_f32(vsub2, vscale2);\n+ vst1q_f32(dout_c0, vs0);\n+ vst1q_f32(dout_c1, vs1);\n+ vst1q_f32(dout_c2, vs2);\n+\n+ din += 12;\n+ dout_c0 += 4;\n+ dout_c1 += 4;\n+ dout_c2 += 4;\n+ }\n+ for (; i < size; i++) {\n+ *(dout_c0++) = (*(din++) - mean[0]) * scale[0];\n+ *(dout_c0++) = (*(din++) - mean[1]) * scale[1];\n+ *(dout_c0++) = (*(din++) - mean[2]) * scale[2];\n+ }\n+}\n+\n+std::vector<Object> visualize_result(\n+ const float* data,\n+ int count,\n+ float thresh,\n+ cv::Mat& image,\n+ const std::vector<std::string> &class_names) {\n+ if (data == nullptr) {\n+ std::cerr << \"[ERROR] data can not be nullptr\\n\";\n+ exit(1);\n+ }\n+ std::vector<Object> rect_out;\n+ for (int iw = 0; iw < count; iw++) {\n+ int oriw = image.cols;\n+ int orih = image.rows;\n+ if (data[1] > thresh) {\n+ Object obj;\n+ int x = static_cast<int>(data[2]);\n+ int y = static_cast<int>(data[3]);\n+ int w = static_cast<int>(data[4] - data[2] + 1);\n+ int h = static_cast<int>(data[5] - data[3] + 1);\n+ cv::Rect rec_clip =\n+ cv::Rect(x, y, w, h) & cv::Rect(0, 0, image.cols, image.rows);\n+ obj.class_id = static_cast<int>(data[0]);\n+ obj.prob = data[1];\n+ obj.rec = rec_clip;\n+ if (w > 0 && h > 0 && obj.prob <= 1) {\n+ rect_out.push_back(obj);\n+ cv::rectangle(image, rec_clip, cv::Scalar(0, 0, 255), 1, cv::LINE_AA);\n+ std::string str_prob = std::to_string(obj.prob);\n+ std::string text = std::string(class_names[obj.class_id]) + \": \" +\n+ str_prob.substr(0, str_prob.find(\".\") + 4);\n+ int font_face = cv::FONT_HERSHEY_COMPLEX_SMALL;\n+ double font_scale = 1.f;\n+ int thickness = 1;\n+ cv::Size text_size =\n+ cv::getTextSize(text, font_face, font_scale, thickness, nullptr);\n+ float new_font_scale = w * 0.5 * font_scale / text_size.width;\n+ text_size = cv::getTextSize(\n+ text, font_face, new_font_scale, thickness, nullptr);\n+ cv::Point origin;\n+ origin.x = x + 3;\n+ origin.y = y + text_size.height + 3;\n+ cv::putText(image,\n+ text,\n+ origin,\n+ font_face,\n+ new_font_scale,\n+ cv::Scalar(0, 255, 255),\n+ thickness,\n+ cv::LINE_AA);\n+\n+ std::cout << \"detection, image size: \" << image.cols << \", \"\n+ << image.rows\n+ << \", detect object: \" << class_names[obj.class_id]\n+ << \", score: \" << obj.prob << \", location: x=\" << x\n+ << \", y=\" << y << \", width=\" << w << \", height=\" << h\n+ << std::endl;\n+ }\n+ }\n+ data += 6;\n+ }\n+ return rect_out;\n+}\n+\n+// Load Model and create model predictor\n+std::shared_ptr<PaddlePredictor> LoadModel(std::string model_file,\n+ int num_theads) {\n+ MobileConfig config;\n+ config.set_threads(num_theads);\n+ config.set_model_from_file(model_file);\n+\n+ std::shared_ptr<PaddlePredictor> predictor =\n+ CreatePaddlePredictor<MobileConfig>(config);\n+ return predictor;\n+}\n+\n+ImageBlob prepare_imgdata(const cv::Mat& img,\n+ std::map<std::string,\n+ std::string> config) {\n+ ImageBlob img_data;\n+ std::vector<int> target_size_;\n+ std::vector<std::string> size_str = split(config.at(\"Resize\"), \",\");\n+ transform(size_str.begin(), size_str.end(), back_inserter(target_size_),\n+ [](std::string const& s){return stoi(s);});\n+ int width = target_size_[0];\n+ int height = target_size_[1];\n+ img_data.im_shape_ = {\n+ static_cast<float>(target_size_[0]),\n+ static_cast<float>(target_size_[1])\n+ };\n+\n+ std::vector<float> mean_;\n+ std::vector<float> scale_;\n+ std::vector<std::string> mean_str = split(config.at(\"mean\"), \",\");\n+ std::vector<std::string> std_str = split(config.at(\"std\"), \",\");\n+ transform(mean_str.begin(), mean_str.end(), back_inserter(mean_),\n+ [](std::string const& s){return stof(s);});\n+ transform(std_str.begin(), std_str.end(), back_inserter(scale_),\n+ [](std::string const& s){return stof(s);});\n+ img_data.mean_ = mean_;\n+ img_data.scale_ = scale_;\n+ return img_data;\n+}\n+\n+\n+void preprocess(const cv::Mat& img, const ImageBlob img_data, float* data) {\n+ cv::Mat rgb_img;\n+ cv::cvtColor(img, rgb_img, cv::COLOR_BGR2RGB);\n+ cv::resize(\n+ rgb_img, rgb_img, cv::Size(img_data.im_shape_[0],img_data.im_shape_[1]),\n+ 0.f, 0.f, cv::INTER_CUBIC);\n+ cv::Mat imgf;\n+ rgb_img.convertTo(imgf, CV_32FC3, 1 / 255.f);\n+ const float* dimg = reinterpret_cast<const float*>(imgf.data);\n+ neon_mean_scale(\n+ dimg, data, int(img_data.im_shape_[0] * img_data.im_shape_[1]),\n+ img_data.mean_, img_data.scale_);\n+}\n+\n+\n+void RunModel(std::map<std::string, std::string> config,\n+ std::string img_path,\n+ const int repeats,\n+ std::vector<double>* times) {\n+\n+ std::string model_file = config.at(\"model_file\");\n+ std::string label_path = config.at(\"label_path\");\n+ // Load Labels\n+ std::vector<std::string> class_names = LoadLabels(label_path);\n+\n+ auto predictor = LoadModel(model_file, stoi(config.at(\"num_threads\")));\n+ cv::Mat img = imread(img_path, cv::IMREAD_COLOR);\n+ auto img_data = prepare_imgdata(img, config);\n+\n+ auto preprocess_start = std::chrono::steady_clock::now();\n+ // 1. Prepare input data from image\n+ // input 0\n+ std::unique_ptr<Tensor> input_tensor0(std::move(predictor->GetInput(0)));\n+ input_tensor0->Resize({1, 3, img_data.im_shape_[0], img_data.im_shape_[1]});\n+ auto* data0 = input_tensor0->mutable_data<float>();\n+ preprocess(img, img_data, data0);\n+\n+ // input1\n+ std::unique_ptr<Tensor> input_tensor1(std::move(predictor->GetInput(1)));\n+ input_tensor1->Resize({1, 2});\n+ auto* data1 = input_tensor1->mutable_data<int>();\n+ data1[0] = img_data.im_shape_[0];\n+ data1[1] = img_data.im_shape_[1];\n+\n+ auto preprocess_end = std::chrono::steady_clock::now();\n+\n+ // 2. Run predictor\n+ // warm up\n+ for (int i = 0; i < repeats / 2; i++)\n+ {\n+ predictor->Run();\n+ }\n+\n+ auto inference_start = std::chrono::steady_clock::now();\n+ for (int i = 0; i < repeats; i++)\n+ {\n+ predictor->Run();\n+ }\n+ auto inference_end = std::chrono::steady_clock::now();\n+ // 3. Get output and post process\n+ auto postprocess_start = std::chrono::steady_clock::now();\n+ std::unique_ptr<const Tensor> output_tensor(\n+ std::move(predictor->GetOutput(0)));\n+ const float* outptr = output_tensor->data<float>();\n+ auto shape_out = output_tensor->shape();\n+ int64_t cnt = 1;\n+ for (auto& i : shape_out) {\n+ cnt *= i;\n+ }\n+ auto rec_out = visualize_result(\n+ outptr, static_cast<int>(cnt / 6), 0.5f, img, class_names);\n+ std::string result_name =\n+ img_path.substr(0, img_path.find(\".\")) + \"_result.jpg\";\n+ cv::imwrite(result_name, img);\n+ auto postprocess_end = std::chrono::steady_clock::now();\n+ std::chrono::duration<float> prep_diff = preprocess_end - preprocess_start;\n+ times->push_back(double(prep_diff.count() * 1000));\n+ std::chrono::duration<float> infer_diff = inference_end - inference_start;\n+ times->push_back(double(infer_diff.count() / repeats * 1000));\n+ std::chrono::duration<float> post_diff = postprocess_end - postprocess_start;\n+ times->push_back(double(post_diff.count() * 1000));\n+}\n+\n+int main(int argc, char** argv) {\n+ if (argc < 3) {\n+ std::cerr << \"[ERROR] usage: \" << argv[0] << \" config_path image_path\\n\";\n+ exit(1);\n+ }\n+ std::string config_path = argv[1];\n+ std::string img_path = argv[2];\n+\n+ // load config\n+ auto config = LoadConfigTxt(config_path);\n+ PrintConfig(config);\n+\n+ bool enable_benchmark = bool(stoi(config.at(\"enable_benchmark\")));\n+ int repeats = enable_benchmark ? 50 : 1;\n+ std::vector<double> det_times;\n+ RunModel(config, img_path, repeats, &det_times);\n+ PrintBenchmarkLog(det_times, config, 1);\n+ return 0;\n+}\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add static lite demo (#2899)
|
499,304 |
10.05.2021 11:17:26
| -28,800 |
7aff0dcee04bfbaf15d3f1cf79ed25484eeac381
|
add ppyolov2 voc config
|
[
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/ppyolo/ppyolov2_r50vd_dcn_voc.yml",
"diff": "+_BASE_: [\n+ '../datasets/voc.yml',\n+ '../runtime.yml',\n+ './_base_/ppyolov2_r50vd_dcn.yml',\n+ './_base_/optimizer_365e.yml',\n+ './_base_/ppyolov2_reader.yml',\n+]\n+\n+snapshot_epoch: 83\n+weights: output/ppyolov2_r50vd_dcn_voc/model_final\n+\n+TrainReader:\n+ mixup_epoch: 350\n+ batch_size: 12\n+\n+# set collate_batch to false because ground-truth info is needed\n+# on voc dataset and should not collate data in batch when batch size\n+# is larger than 1.\n+EvalReader:\n+ collate_batch: false\n+\n+epoch: 583\n+\n+LearningRate:\n+ base_lr: 0.00333\n+ schedulers:\n+ - !PiecewiseDecay\n+ gamma: 0.1\n+ milestones:\n+ - 466\n+ - 516\n+ - !LinearWarmup\n+ start_factor: 0.\n+ steps: 4000\n+\n+OptimizerBuilder:\n+ optimizer:\n+ momentum: 0.9\n+ type: Momentum\n+ regularizer:\n+ factor: 0.0005\n+ type: L2\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add ppyolov2 voc config (#2916)
|
499,333 |
10.05.2021 17:30:08
| -28,800 |
75d29d16e953af12666c2a1e63f69bc83cc727aa
|
refine lr schedule & download message
|
[
{
"change_type": "MODIFY",
"old_path": "configs/ssd/_base_/optimizer_120e.yml",
"new_path": "configs/ssd/_base_/optimizer_120e.yml",
"diff": "@@ -5,7 +5,7 @@ LearningRate:\nschedulers:\n- !PiecewiseDecay\nmilestones: [40, 60, 80, 100]\n- values: [0.001, 0.0005, 0.00025, 0.0001, 0.00001]\n+ gamma: [0.5, 0.5, 0.4, 0.1]\nuse_warmup: false\nOptimizerBuilder:\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/optimizer.py",
"new_path": "ppdet/optimizer.py",
"diff": "@@ -113,7 +113,7 @@ class PiecewiseDecay(object):\nreturn optimizer.lr.PiecewiseDecay(boundary, self.values)\n# value is computed by self.gamma\n- if value is not None:\n+ value = value if value is not None else [base_lr]\nfor i in self.gamma:\nvalue.append(base_lr * i)\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/utils/download.py",
"new_path": "ppdet/utils/download.py",
"diff": "@@ -293,14 +293,14 @@ def _dataset_exists(path, annotation, image_dir):\nCheck if user define dataset exists\n\"\"\"\nif not osp.exists(path):\n- logger.debug(\"Config dataset_dir {} is not exits, \"\n+ logger.warning(\"Config dataset_dir {} is not exits, \"\n\"dataset config is not valid\".format(path))\nreturn False\nif annotation:\nannotation_path = osp.join(path, annotation)\nif not osp.isfile(annotation_path):\n- logger.debug(\"Config annotation {} is not a \"\n+ logger.warning(\"Config annotation {} is not a \"\n\"file, dataset config is not \"\n\"valid\".format(annotation_path))\nreturn False\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
refine lr schedule & download message (#2921)
|
499,333 |
10.05.2021 18:56:54
| -28,800 |
03326eea40fcb0df271a82c08c9e9be10b710866
|
update poly resize
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/data/transform/operators.py",
"new_path": "ppdet/data/transform/operators.py",
"diff": "@@ -634,7 +634,7 @@ class Resize(BaseOperator):\ndef apply_segm(self, segms, im_size, scale):\ndef _resize_poly(poly, im_scale_x, im_scale_y):\n- resized_poly = np.array(poly)\n+ resized_poly = np.array(poly).astype('float32')\nresized_poly[0::2] *= im_scale_x\nresized_poly[1::2] *= im_scale_y\nreturn resized_poly.tolist()\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
update poly resize (#2929)
|
499,348 |
10.05.2021 19:55:08
| -28,800 |
8336430146c460d2777b89df7fbffc3e9a20fe1c
|
hrnet fix
|
[
{
"change_type": "MODIFY",
"old_path": "configs/keypoint/higherhrnet/higherhrnet_hrnet_w32_512_swahr.yml",
"new_path": "configs/keypoint/higherhrnet/higherhrnet_hrnet_w32_512_swahr.yml",
"diff": "@@ -2,7 +2,7 @@ use_gpu: true\nlog_iter: 10\nsave_dir: output\nsnapshot_epoch: 10\n-weights: output/higherhrnet_hrnet_v1_512/model_final\n+weights: output/higherhrnet_hrnet_w32_512_swahr/model_final\nepoch: 300\nnum_joints: &num_joints 17\nflip_perm: &flip_perm [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]\n"
},
{
"change_type": "MODIFY",
"old_path": "configs/keypoint/hrnet/hrnet_coco_256x192.yml",
"new_path": "configs/keypoint/hrnet/hrnet_coco_256x192.yml",
"diff": "@@ -2,7 +2,7 @@ use_gpu: true\nlog_iter: 5\nsave_dir: output\nsnapshot_epoch: 10\n-weights: output/hrnet_coco_256x192/50\n+weights: output/hrnet_coco_256x192/model_final\nepoch: 210\nnum_joints: &num_joints 17\npixel_std: &pixel_std 200\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/data/source/category.py",
"new_path": "ppdet/data/source/category.py",
"diff": "@@ -26,7 +26,7 @@ logger = setup_logger(__name__)\n__all__ = ['get_categories']\n-def get_categories(metric_type, arch, anno_file=None):\n+def get_categories(metric_type, anno_file=None, arch=None):\n\"\"\"\nGet class id to category id map and category id\nto category name map from annotation file.\n@@ -83,6 +83,9 @@ def get_categories(metric_type, arch, anno_file=None):\nelif metric_type.lower() == 'widerface':\nreturn _widerface_category()\n+ elif metric_type.lower() == 'keypointtopdowncocoeval':\n+ return (None, {'id': 'keypoint'})\n+\nelse:\nraise ValueError(\"unknown metric type {}\".format(metric_type))\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/data/transform/keypoint_operators.py",
"new_path": "ppdet/data/transform/keypoint_operators.py",
"diff": "@@ -39,7 +39,7 @@ registered_ops = []\n__all__ = [\n'RandomAffine', 'KeyPointFlip', 'TagGenerate', 'ToHeatmaps',\n'NormalizePermute', 'EvalAffine', 'RandomFlipHalfBodyTransform',\n- 'TopDownAffine', 'ToHeatmapsTopDown'\n+ 'TopDownAffine', 'ToHeatmapsTopDown', 'TopDownEvalAffine'\n]\n@@ -564,6 +564,38 @@ class TopDownAffine(object):\nreturn records\n+@register_keypointop\n+class TopDownEvalAffine(object):\n+ \"\"\"apply affine transform to image and coords\n+\n+ Args:\n+ trainsize (list): [w, h], the standard size used to train\n+ records(dict): the dict contained the image and coords\n+\n+ Returns:\n+ records (dict): contain the image and coords after tranformed\n+\n+ \"\"\"\n+\n+ def __init__(self, trainsize):\n+ self.trainsize = trainsize\n+\n+ def __call__(self, records):\n+ image = records['image']\n+ rot = 0\n+ imshape = records['im_shape'][::-1]\n+ center = imshape / 2.\n+ scale = imshape\n+ trans = get_affine_transform(center, scale, rot, self.trainsize)\n+ image = cv2.warpAffine(\n+ image,\n+ trans, (int(self.trainsize[0]), int(self.trainsize[1])),\n+ flags=cv2.INTER_LINEAR)\n+ records['image'] = image\n+\n+ return records\n+\n+\n@register_keypointop\nclass ToHeatmapsTopDown(object):\n\"\"\"to generate the gaussin heatmaps of keypoint for heatmap loss\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/engine/export_utils.py",
"new_path": "ppdet/engine/export_utils.py",
"diff": "@@ -49,7 +49,7 @@ def _parse_reader(reader_cfg, dataset_cfg, metric, arch, image_shape):\nanno_file = dataset_cfg.get_anno()\n- clsid2catid, catid2name = get_categories(metric, arch, anno_file)\n+ clsid2catid, catid2name = get_categories(metric, anno_file, arch)\nlabel_list = [str(cat) for cat in catid2name.values()]\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/engine/trainer.py",
"new_path": "ppdet/engine/trainer.py",
"diff": "@@ -392,6 +392,7 @@ class Trainer(object):\nbatch_res = get_infer_results(outs, clsid2catid)\nbbox_num = outs['bbox_num']\n+\nstart = 0\nfor i, im_id in enumerate(outs['im_id']):\nimage_path = imid2path[int(im_id)]\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/metrics/keypoint_metrics.py",
"new_path": "ppdet/metrics/keypoint_metrics.py",
"diff": "@@ -56,13 +56,11 @@ class KeyPointTopDownCOCOEval(object):\nself.idx = 0\ndef update(self, inputs, outputs):\n- kpt_coord = outputs['kpt_coord']\n- kpt_score = outputs['kpt_score']\n+ kpts, _ = outputs['keypoint'][0]\n+\nnum_images = inputs['image'].shape[0]\nself.results['all_preds'][self.idx:self.idx + num_images, :, 0:\n- 2] = kpt_coord[:, :, 0:2]\n- self.results['all_preds'][self.idx:self.idx + num_images, :, 2:\n- 3] = kpt_score\n+ 3] = kpts[:, :, 0:3]\nself.results['all_boxes'][self.idx:self.idx + num_images, 0:2] = inputs[\n'center'].numpy()[:, 0:2]\nself.results['all_boxes'][self.idx:self.idx + num_images, 2:4] = inputs[\n@@ -115,7 +113,7 @@ class KeyPointTopDownCOCOEval(object):\nresult = [{\n'image_id': img_kpts[k]['image'],\n'category_id': cat_id,\n- 'keypoints': list(_key_points[k]),\n+ 'keypoints': _key_points[k].tolist(),\n'score': img_kpts[k]['score'],\n'center': list(img_kpts[k]['center']),\n'scale': list(img_kpts[k]['scale'])\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/architectures/keypoint_hrnet.py",
"new_path": "ppdet/modeling/architectures/keypoint_hrnet.py",
"diff": "@@ -39,7 +39,7 @@ class TopDownHRNet(BaseArch):\nloss='KeyPointMSELoss',\npost_process='HRNetPostProcess',\nflip_perm=None,\n- flip=False,\n+ flip=True,\nshift_heatmap=True):\n\"\"\"\nHRNnet network, see https://arxiv.org/abs/1902.09212\n@@ -57,6 +57,7 @@ class TopDownHRNet(BaseArch):\nself.flip = flip\nself.final_conv = L.Conv2d(width, num_joints, 1, 1, 0, bias=True)\nself.shift_heatmap = shift_heatmap\n+ self.deploy = False\n@classmethod\ndef from_config(cls, cfg, *args, **kwargs):\n@@ -71,31 +72,37 @@ class TopDownHRNet(BaseArch):\nif self.training:\nreturn self.loss(hrnet_outputs, self.inputs)\n+ elif self.deploy:\n+ return hrnet_outputs\nelse:\nif self.flip:\nself.inputs['image'] = self.inputs['image'].flip([3])\n- feats = backbone(inputs)\n- output_flipped = self.final_conv(feats)\n+ feats = self.backbone(self.inputs)\n+ output_flipped = self.final_conv(feats[0])\noutput_flipped = self.flip_back(output_flipped.numpy(),\n- flip_perm)\n+ self.flip_perm)\noutput_flipped = paddle.to_tensor(output_flipped.copy())\nif self.shift_heatmap:\noutput_flipped[:, :, :, 1:] = output_flipped.clone(\n)[:, :, :, 0:-1]\n- output = (output + output_flipped) * 0.5\n- preds, maxvals = self.post_process(hrnet_outputs, self.inputs)\n- return preds, maxvals\n+ hrnet_outputs = (hrnet_outputs + output_flipped) * 0.5\n+ imshape = (self.inputs['im_shape'].numpy()\n+ )[:, ::-1] if 'im_shape' in self.inputs else None\n+ center = self.inputs['center'].numpy(\n+ ) if 'center' in self.inputs else np.round(imshape / 2.)\n+ scale = self.inputs['scale'].numpy(\n+ ) if 'scale' in self.inputs else imshape / 200.\n+ outputs = self.post_process(hrnet_outputs, center, scale)\n+ return outputs\ndef get_loss(self):\nreturn self._forward()\ndef get_pred(self):\n- preds, maxvals = self._forward()\n- output = {'kpt_coord': preds, 'kpt_score': maxvals}\n- return output\n-\n+ res_lst = self._forward()\n+ outputs = {'keypoint': res_lst}\n+ return outputs\n-class HRNetPostProcess(object):\ndef flip_back(self, output_flipped, matched_parts):\nassert output_flipped.ndim == 4,\\\n'output_flipped should be [batch_size, num_joints, height, width]'\n@@ -109,6 +116,8 @@ class HRNetPostProcess(object):\nreturn output_flipped\n+\n+class HRNetPostProcess(object):\ndef get_max_preds(self, heatmaps):\n'''get predictions from score maps\n@@ -156,7 +165,7 @@ class HRNetPostProcess(object):\nReturns:\npreds: numpy.ndarray([batch_size, num_joints, 2]), keypoints coords\n- maxvals: numpy.ndarray([batch_size, num_joints, 2]), the maximum confidence of the keypoints\n+ maxvals: numpy.ndarray([batch_size, num_joints, 1]), the maximum confidence of the keypoints\n\"\"\"\ncoords, maxvals = self.get_max_preds(heatmaps)\n@@ -184,8 +193,11 @@ class HRNetPostProcess(object):\nreturn preds, maxvals\n- def __call__(self, output, inputs):\n- preds, maxvals = self.get_final_preds(\n- output.numpy(), inputs['center'].numpy(), inputs['scale'].numpy())\n-\n- return preds, maxvals\n+ def __call__(self, output, center, scale):\n+ preds, maxvals = self.get_final_preds(output.numpy(), center, scale)\n+ outputs = [[\n+ np.concatenate(\n+ (preds, maxvals), axis=-1), np.mean(\n+ maxvals, axis=1)\n+ ]]\n+ return outputs\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/utils/visualizer.py",
"new_path": "ppdet/utils/visualizer.py",
"diff": "@@ -246,7 +246,6 @@ def draw_pose(image, results, visual_thread=0.6, save_name='pose.jpg'):\nskeletons = np.array([item['keypoints'] for item in results]).reshape(-1,\n17, 3)\n- scores = [item['score'] for item in results]\nimg = np.array(image).astype('float32')\ncanvas = img.copy()\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
hrnet fix (#2920)
|
499,304 |
11.05.2021 11:32:38
| -28,800 |
385f9bbd62252d07aabb0fbd49cf33b2597d0ca0
|
fix static prune bug
|
[
{
"change_type": "MODIFY",
"old_path": "static/slim/prune/prune.py",
"new_path": "static/slim/prune/prune.py",
"diff": "@@ -203,21 +203,7 @@ def main():\nassert FLAGS.prune_criterion in ['l1_norm', 'geometry_median'], \\\n\"unsupported prune criterion {}\".format(FLAGS.prune_criterion)\npruner = Pruner(criterion=FLAGS.prune_criterion)\n- train_prog = pruner.prune(\n- train_prog,\n- fluid.global_scope(),\n- params=pruned_params,\n- ratios=pruned_ratios,\n- place=place,\n- only_graph=False)[0]\n-\n- compiled_train_prog = fluid.CompiledProgram(train_prog).with_data_parallel(\n- loss_name=loss.name,\n- build_strategy=build_strategy,\n- exec_strategy=exec_strategy)\n-\nif FLAGS.eval:\n-\nbase_flops = flops(eval_prog)\neval_prog = pruner.prune(\neval_prog,\n@@ -232,6 +218,19 @@ def main():\npruned_flops))\ncompiled_eval_prog = fluid.CompiledProgram(eval_prog)\n+ train_prog = pruner.prune(\n+ train_prog,\n+ fluid.global_scope(),\n+ params=pruned_params,\n+ ratios=pruned_ratios,\n+ place=place,\n+ only_graph=False)[0]\n+\n+ compiled_train_prog = fluid.CompiledProgram(train_prog).with_data_parallel(\n+ loss_name=loss.name,\n+ build_strategy=build_strategy,\n+ exec_strategy=exec_strategy)\n+\nif FLAGS.resume_checkpoint:\ncheckpoint.load_checkpoint(exe, train_prog, FLAGS.resume_checkpoint)\nstart_iter = checkpoint.global_step()\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix static prune bug (#2933)
|
499,395 |
11.05.2021 21:13:03
| -28,800 |
30dda1ea5e38b59f267012dcd89d643edb38dee8
|
fix some wrong links, test=document_fix
|
[
{
"change_type": "MODIFY",
"old_path": "docs/tutorials/INSTALL.md",
"new_path": "docs/tutorials/INSTALL.md",
"diff": "@@ -123,7 +123,7 @@ OK\n```\n# Predict an image by GPU\nexport CUDA_VISIBLE_DEVICES=0\n-python tools/infer.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o use_gpu=true weights=https://paddlemodels.bj.bcebos.com/object_detection/ppyolo.pdparams --infer_img=demo/000000014439.jpg\n+python tools/infer.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o use_gpu=true weights=https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams --infer_img=demo/000000014439.jpg\n```\nAn image of the same name with the predicted result will be generated under the `output` folder.\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix some wrong links, test=document_fix (#2948)
|
499,304 |
12.05.2021 09:51:55
| -28,800 |
9623cb9e00e9412d51b52c55ad002bfb9b8e0120
|
fix parameter of static cpp_infer
|
[
{
"change_type": "MODIFY",
"old_path": "static/deploy/cpp/src/main.cc",
"new_path": "static/deploy/cpp/src/main.cc",
"diff": "DEFINE_string(model_dir, \"\", \"Path of inference model\");\n-DEFINE_string(image_path, \"\", \"Path of input image\");\n+DEFINE_string(image_file, \"\", \"Path of input image\");\nDEFINE_string(video_path, \"\", \"Path of input video\");\nDEFINE_bool(use_gpu, false, \"Infering with GPU or CPU\");\nDEFINE_bool(use_camera, false, \"Use camera or not\");\n@@ -193,9 +193,9 @@ int main(int argc, char** argv) {\n// Parsing command-line\ngoogle::ParseCommandLineFlags(&argc, &argv, true);\nif (FLAGS_model_dir.empty()\n- || (FLAGS_image_path.empty() && FLAGS_video_path.empty())) {\n+ || (FLAGS_image_file.empty() && FLAGS_video_path.empty())) {\nstd::cout << \"Usage: ./main --model_dir=/PATH/TO/INFERENCE_MODEL/ \"\n- << \"--image_path=/PATH/TO/INPUT/IMAGE/\" << std::endl;\n+ << \"--image_file=/PATH/TO/INPUT/IMAGE/\" << std::endl;\nreturn -1;\n}\nif (!(FLAGS_run_mode == \"fluid\" || FLAGS_run_mode == \"trt_fp32\"\n@@ -210,11 +210,11 @@ int main(int argc, char** argv) {\n// Do inference on input video or image\nif (!FLAGS_video_path.empty() || FLAGS_use_camera) {\nPredictVideo(FLAGS_video_path, &det);\n- } else if (!FLAGS_image_path.empty()) {\n+ } else if (!FLAGS_image_file.empty()) {\nif (!PathExists(FLAGS_output_dir)) {\nMkDirs(FLAGS_output_dir);\n}\n- PredictImage(FLAGS_image_path, FLAGS_threshold, FLAGS_run_benchmark, &det, FLAGS_output_dir);\n+ PredictImage(FLAGS_image_file, FLAGS_threshold, FLAGS_run_benchmark, &det, FLAGS_output_dir);\n}\nreturn 0;\n}\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix parameter of static cpp_infer (#2955)
|
499,313 |
12.05.2021 11:29:10
| -28,800 |
1aa7e2e40f0cf3a859ba89b476218108dc4b9d9a
|
fix static notice & GETTING_STARTED
|
[
{
"change_type": "MODIFY",
"old_path": "docs/tutorials/GETTING_STARTED.md",
"new_path": "docs/tutorials/GETTING_STARTED.md",
"diff": "@@ -28,9 +28,9 @@ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7\npython -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.yml\n# GPU evaluation\nexport CUDA_VISIBLE_DEVICES=0\n-python tools/eval.py -c configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.yml\n+python tools/eval.py -c configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/faster_rcnn_r50_fpn_1x_coco.pdparams\n# Inference\n-python tools/infer.py -c configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.yml --infer_img=demo/000000570688.jpg\n+python tools/infer.py -c configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.yml --infer_img=demo/000000570688.jpg -o weights=https://paddledet.bj.bcebos.com/models/faster_rcnn_r50_fpn_1x_coco.pdparams\n```\n### Other argument list\n"
},
{
"change_type": "MODIFY",
"old_path": "static/ppdet/modeling/tests/test_architectures.py",
"new_path": "static/ppdet/modeling/tests/test_architectures.py",
"diff": "@@ -28,9 +28,22 @@ parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 4)))\nif parent_path not in sys.path:\nsys.path.append(parent_path)\n+try:\nfrom ppdet.modeling.tests.decorator_helper import prog_scope\nfrom ppdet.core.workspace import load_config, merge_config, create\nfrom ppdet.utils.check import enable_static_mode\n+except ImportError as e:\n+ if sys.argv[0].find('static') >= 0:\n+ logger.error(\"Importing ppdet failed when running static model \"\n+ \"with error: {}\\n\"\n+ \"please try:\\n\"\n+ \"\\t1. run static model under PaddleDetection/static \"\n+ \"directory\\n\"\n+ \"\\t2. run 'pip uninstall ppdet' to uninstall ppdet \"\n+ \"dynamic version firstly.\".format(e))\n+ sys.exit(-1)\n+ else:\n+ raise e\nclass TestFasterRCNN(unittest.TestCase):\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix static notice & GETTING_STARTED (#2809)
|
499,348 |
12.05.2021 13:15:44
| -28,800 |
912833f2013685b5d1f634ed3889aab701628ecc
|
change hrhrnet eval heat_thresh=0.1; fix crowdpose eval
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/data/source/keypoint_coco.py",
"new_path": "ppdet/data/source/keypoint_coco.py",
"diff": "@@ -60,7 +60,6 @@ class KeypointBottomUpBaseDataset(DetDataset):\nself.test_mode = test_mode\nself.ann_info['num_joints'] = num_joints\n-\nself.img_ids = []\ndef __len__(self):\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/engine/trainer.py",
"new_path": "ppdet/engine/trainer.py",
"diff": "@@ -147,6 +147,7 @@ class Trainer(object):\neval_dataset.check_or_download_dataset()\nanno_file = eval_dataset.get_anno()\n+ IouType = self.cfg['IouType'] if 'IouType' in self.cfg else 'bbox'\nself._metrics = [\nCOCOMetric(\nanno_file=anno_file,\n@@ -154,6 +155,7 @@ class Trainer(object):\nclasswise=classwise,\noutput_eval=output_eval,\nbias=bias,\n+ IouType=IouType,\nsave_prediction_only=save_prediction_only)\n]\nelif self.cfg.metric == 'VOC':\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/metrics/coco_utils.py",
"new_path": "ppdet/metrics/coco_utils.py",
"diff": "@@ -107,7 +107,6 @@ def cocoapi_eval(jsonfile,\ncoco_eval.params.maxDets = list(max_dets)\nelif style == 'keypoints_crowd':\ncoco_eval = COCOeval(coco_gt, coco_dt, style, sigmas, use_area)\n- coco_gt.anno_file.append(\"\")\nelse:\ncoco_eval = COCOeval(coco_gt, coco_dt, style)\ncoco_eval.evaluate()\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/architectures/keypoint_hrhrnet.py",
"new_path": "ppdet/modeling/architectures/keypoint_hrhrnet.py",
"diff": "@@ -52,7 +52,7 @@ class HigherHRNet(BaseArch):\nsuper(HigherHRNet, self).__init__()\nself.backbone = backbone\nself.hrhrnet_head = hrhrnet_head\n- self.post_process = HrHRNetPostProcess()\n+ self.post_process = post_process\nself.flip = eval_flip\nself.flip_perm = paddle.to_tensor(flip_perm)\nself.deploy = False\n@@ -85,6 +85,7 @@ class HigherHRNet(BaseArch):\nreturn self.hrhrnet_head(body_feats, self.inputs)\nelse:\noutputs = self.hrhrnet_head(body_feats)\n+\nif self.flip and not self.deploy:\noutputs = [paddle.split(o, 2) for o in outputs]\noutput_rflip = [\n@@ -105,7 +106,6 @@ class HigherHRNet(BaseArch):\nw = self.inputs['im_shape'][0, 1].numpy().item()\nkpts, scores = self.post_process(*outputs, h, w)\nres_lst.append([kpts, scores])\n-\nreturn res_lst\ndef get_loss(self):\n@@ -157,7 +157,7 @@ class HrHRNetPostProcess(object):\noriginal_height, original_width (float): the original image size\n'''\n- def __init__(self, max_num_people=30, heat_thresh=0.2, tag_thresh=1.):\n+ def __init__(self, max_num_people=30, heat_thresh=0.1, tag_thresh=1.):\nself.max_num_people = max_num_people\nself.heat_thresh = heat_thresh\nself.tag_thresh = tag_thresh\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
change hrhrnet eval heat_thresh=0.1; fix crowdpose eval (#2945)
|
499,348 |
13.05.2021 11:28:57
| -28,800 |
33dca040b7c72c5bce52faa6c6e6594fd4b10cf9
|
fix batchsize=1 backward error
|
[
{
"change_type": "MODIFY",
"old_path": "configs/keypoint/higherhrnet/higherhrnet_hrnet_w32_512.yml",
"new_path": "configs/keypoint/higherhrnet/higherhrnet_hrnet_w32_512.yml",
"diff": "use_gpu: true\n-log_iter: 1\n+log_iter: 10\nsave_dir: output\nsnapshot_epoch: 10\n-weights: output/higherhrnet_hrnet_v1_512/290\n+weights: output/higherhrnet_hrnet_w32_512/model_final\nepoch: 300\nnum_joints: &num_joints 17\nflip_perm: &flip_perm [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/heads/keypoint_hrhrnet_head.py",
"new_path": "ppdet/modeling/heads/keypoint_hrhrnet_head.py",
"diff": "@@ -92,15 +92,14 @@ class HrHRNetHead(nn.Layer):\nxo2 = self.conv2(x2)\nnum_joints = self.num_joints\nif self.training:\n+ heatmap1, tagmap = paddle.split(xo1, 2, axis=1)\nif self.swahr:\nso1 = self.scalelayer0(x1)\nso2 = self.scalelayer1(x2)\n- hrhrnet_outputs = ([xo1[:, :num_joints], so1], [xo2, so2],\n- xo1[:, num_joints:])\n+ hrhrnet_outputs = ([heatmap1, so1], [xo2, so2], tagmap)\nreturn self.loss(hrhrnet_outputs, targets)\nelse:\n- hrhrnet_outputs = (xo1[:, :num_joints], xo2,\n- xo1[:, num_joints:])\n+ hrhrnet_outputs = (heatmap1, xo2, tagmap)\nreturn self.loss(hrhrnet_outputs, targets)\n# averaged heatmap, upsampled tagmap\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/losses/keypoint_loss.py",
"new_path": "ppdet/modeling/losses/keypoint_loss.py",
"diff": "@@ -194,7 +194,10 @@ class AELoss(object):\ndef __call__(self, preds, tagmaps):\nbs = preds.shape[0]\n- losses = [self.apply_single(preds[i], tagmaps[i]) for i in range(bs)]\n+ losses = [\n+ self.apply_single(preds[i:i + 1].squeeze(),\n+ tagmaps[i:i + 1].squeeze()) for i in range(bs)\n+ ]\npull = self.pull_factor * sum(loss[0] for loss in losses) / len(losses)\npush = self.push_factor * sum(loss[1] for loss in losses) / len(losses)\nreturn pull, push\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix batchsize=1 backward error (#2973)
|
499,333 |
13.05.2021 14:05:30
| -28,800 |
76ab27803fce08f2a1b3743afff72966007e6d10
|
fix return_index in nms
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/ops.py",
"new_path": "ppdet/modeling/ops.py",
"diff": "@@ -987,7 +987,7 @@ def multiclass_nms(bboxes,\n'normalized', normalized)\noutput, index, nms_rois_num = core.ops.multiclass_nms3(bboxes, scores,\nrois_num, *attrs)\n- if return_index:\n+ if not return_index:\nindex = None\nreturn output, nms_rois_num, index\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix return_index in nms (#2980)
|
499,339 |
13.05.2021 18:43:06
| -28,800 |
4e88bec59cc15f1f980ba9ad61f760c552062861
|
add res2net
|
[
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/res2net/README.md",
"diff": "+# Res2Net\n+\n+## Introduction\n+\n+- Res2Net: A New Multi-scale Backbone Architecture: [https://arxiv.org/abs/1904.01169](https://arxiv.org/abs/1904.01169)\n+\n+```\n+@article{DBLP:journals/corr/abs-1904-01169,\n+ author = {Shanghua Gao and\n+ Ming{-}Ming Cheng and\n+ Kai Zhao and\n+ Xinyu Zhang and\n+ Ming{-}Hsuan Yang and\n+ Philip H. S. Torr},\n+ title = {Res2Net: {A} New Multi-scale Backbone Architecture},\n+ journal = {CoRR},\n+ volume = {abs/1904.01169},\n+ year = {2019},\n+ url = {http://arxiv.org/abs/1904.01169},\n+ archivePrefix = {arXiv},\n+ eprint = {1904.01169},\n+ timestamp = {Thu, 25 Apr 2019 10:24:54 +0200},\n+ biburl = {https://dblp.org/rec/bib/journals/corr/abs-1904-01169},\n+ bibsource = {dblp computer science bibliography, https://dblp.org}\n+}\n+```\n+\n+\n+## Model Zoo\n+\n+| Backbone | Type | Image/gpu | Lr schd | Inf time (fps) | Box AP | Mask AP | Download | Configs |\n+| :---------------------- | :------------- | :-------: | :-----: | :------------: | :----: | :-----: | :----------------------------------------------------------: | :-----: |\n+| Res2Net50-FPN | Faster | 2 | 1x | - | 40.6 | - | [model](https://paddledet.bj.bcebos.com/models/faster_rcnn_res2net50_vb_26w_4s_fpn_1x_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/develop/configs/res2net/faster_rcnn_res2net50_vb_26w_4s_fpn_1x.yml) |\n+| Res2Net50-FPN | Mask | 2 | 2x | - | 42.4 | 38.1 | [model](https://paddledet.bj.bcebos.com/models/mask_rcnn_res2net50_vb_26w_4s_fpn_2x_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/develop/configs/res2net/mask_rcnn_res2net50_vb_26w_4s_fpn_2x_coco.yml) |\n+| Res2Net50-vd-FPN | Mask | 2 | 2x | - | 42.6 | 38.1 | [model](https://paddledet.bj.bcebos.com/models/mask_rcnn_res2net50_vd_26w_4s_fpn_2x_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/develop/configs/res2net/mask_rcnn_res2net50_vd_26w_4s_fpn_2x_coco.yml) |\n+\n+Note: all the above models are trained with 8 gpus.\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/res2net/faster_rcnn_res2net50_vb_26w_4s_fpn_1x_coco.yml",
"diff": "+_BASE_: [\n+ '../datasets/coco_detection.yml',\n+ '../runtime.yml',\n+ '../faster_rcnn/_base_/optimizer_1x.yml',\n+ '../faster_rcnn/_base_/faster_rcnn_r50_fpn.yml',\n+ '../faster_rcnn/_base_/faster_fpn_reader.yml',\n+]\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/Res2Net50_26w_4s_pretrained.pdparams\n+weights: output/faster_rcnn_res2net50_vb_26w_4s_fpn_1x_coco/model_final\n+\n+FasterRCNN:\n+ backbone: Res2Net\n+ neck: FPN\n+ rpn_head: RPNHead\n+ bbox_head: BBoxHead\n+ # post process\n+ bbox_post_process: BBoxPostProcess\n+\n+\n+Res2Net:\n+ # index 0 stands for res2\n+ depth: 50\n+ width: 26\n+ scales: 4\n+ norm_type: bn\n+ freeze_at: 0\n+ return_idx: [0,1,2,3]\n+ num_stages: 4\n+ variant: b\n+\n+\n+TrainReader:\n+ batch_size: 2\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/res2net/mask_rcnn_res2net50_vb_26w_4s_fpn_2x_coco.yml",
"diff": "+_BASE_: [\n+ '../datasets/coco_instance.yml',\n+ '../runtime.yml',\n+ '../mask_rcnn/_base_/optimizer_1x.yml',\n+ '../mask_rcnn/_base_/mask_rcnn_r50_fpn.yml',\n+ '../mask_rcnn/_base_/mask_fpn_reader.yml',\n+]\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/Res2Net50_26w_4s_pretrained.pdparams\n+weights: output/mask_rcnn_res2net50_vb_26w_4s_fpn_2x_coco/model_final\n+\n+MaskRCNN:\n+ backbone: Res2Net\n+ neck: FPN\n+ rpn_head: RPNHead\n+ bbox_head: BBoxHead\n+ mask_head: MaskHead\n+ # post process\n+ bbox_post_process: BBoxPostProcess\n+ mask_post_process: MaskPostProcess\n+\n+\n+Res2Net:\n+ # index 0 stands for res2\n+ depth: 50\n+ width: 26\n+ scales: 4\n+ norm_type: bn\n+ freeze_at: 0\n+ return_idx: [0,1,2,3]\n+ num_stages: 4\n+ variant: b\n+\n+\n+epoch: 24\n+LearningRate:\n+ base_lr: 0.01\n+ schedulers:\n+ - !PiecewiseDecay\n+ gamma: 0.1\n+ milestones: [16, 22]\n+ - !LinearWarmup\n+ start_factor: 0.3333333333333333\n+ steps: 500\n+\n+\n+TrainReader:\n+ batch_size: 2\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/res2net/mask_rcnn_res2net50_vd_26w_4s_fpn_2x_coco.yml",
"diff": "+_BASE_: [\n+ '../datasets/coco_instance.yml',\n+ '../runtime.yml',\n+ '../mask_rcnn/_base_/optimizer_1x.yml',\n+ '../mask_rcnn/_base_/mask_rcnn_r50_fpn.yml',\n+ '../mask_rcnn/_base_/mask_fpn_reader.yml',\n+]\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/Res2Net50_vd_26w_4s_pretrained.pdparams\n+weights: output/mask_rcnn_res2net50_vd_26w_4s_fpn_2x_coco/model_final\n+\n+MaskRCNN:\n+ backbone: Res2Net\n+ neck: FPN\n+ rpn_head: RPNHead\n+ bbox_head: BBoxHead\n+ mask_head: MaskHead\n+ # post process\n+ bbox_post_process: BBoxPostProcess\n+ mask_post_process: MaskPostProcess\n+\n+\n+Res2Net:\n+ # index 0 stands for res2\n+ depth: 50\n+ width: 26\n+ scales: 4\n+ norm_type: bn\n+ freeze_at: 0\n+ return_idx: [0,1,2,3]\n+ num_stages: 4\n+ variant: d\n+\n+\n+epoch: 24\n+LearningRate:\n+ base_lr: 0.01\n+ schedulers:\n+ - !PiecewiseDecay\n+ gamma: 0.1\n+ milestones: [16, 22]\n+ - !LinearWarmup\n+ start_factor: 0.3333333333333333\n+ steps: 500\n+\n+\n+TrainReader:\n+ batch_size: 2\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/backbones/__init__.py",
"new_path": "ppdet/modeling/backbones/__init__.py",
"diff": "@@ -21,6 +21,7 @@ from . import hrnet\nfrom . import blazenet\nfrom . import ghostnet\nfrom . import senet\n+from . import res2net\nfrom .vgg import *\nfrom .resnet import *\n@@ -31,3 +32,4 @@ from .hrnet import *\nfrom .blazenet import *\nfrom .ghostnet import *\nfrom .senet import *\n+from .res2net import *\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "ppdet/modeling/backbones/res2net.py",
"diff": "+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from numbers import Integral\n+\n+import paddle\n+import paddle.nn as nn\n+import paddle.nn.functional as F\n+from ppdet.core.workspace import register, serializable\n+from ..shape_spec import ShapeSpec\n+from .resnet import ConvNormLayer\n+\n+__all__ = ['Res2Net', 'Res2NetC5']\n+\n+Res2Net_cfg = {\n+ 50: [3, 4, 6, 3],\n+ 101: [3, 4, 23, 3],\n+ 152: [3, 8, 36, 3],\n+ 200: [3, 12, 48, 3]\n+}\n+\n+\n+class BottleNeck(nn.Layer):\n+ def __init__(self,\n+ ch_in,\n+ ch_out,\n+ stride,\n+ shortcut,\n+ width,\n+ scales=4,\n+ variant='b',\n+ groups=1,\n+ lr=1.0,\n+ norm_type='bn',\n+ norm_decay=0.,\n+ freeze_norm=True,\n+ dcn_v2=False):\n+ super(BottleNeck, self).__init__()\n+\n+ self.shortcut = shortcut\n+ self.scales = scales\n+ self.stride = stride\n+ if not shortcut:\n+ if variant == 'd' and stride == 2:\n+ self.branch1 = nn.Sequential()\n+ self.branch1.add_sublayer(\n+ 'pool',\n+ nn.AvgPool2D(\n+ kernel_size=2, stride=2, padding=0, ceil_mode=True))\n+ self.branch1.add_sublayer(\n+ 'conv',\n+ ConvNormLayer(\n+ ch_in=ch_in,\n+ ch_out=ch_out,\n+ filter_size=1,\n+ stride=1,\n+ norm_type=norm_type,\n+ norm_decay=norm_decay,\n+ freeze_norm=freeze_norm,\n+ lr=lr))\n+ else:\n+ self.branch1 = ConvNormLayer(\n+ ch_in=ch_in,\n+ ch_out=ch_out,\n+ filter_size=1,\n+ stride=stride,\n+ norm_type=norm_type,\n+ norm_decay=norm_decay,\n+ freeze_norm=freeze_norm,\n+ lr=lr)\n+\n+ self.branch2a = ConvNormLayer(\n+ ch_in=ch_in,\n+ ch_out=width * scales,\n+ filter_size=1,\n+ stride=stride if variant == 'a' else 1,\n+ groups=1,\n+ act='relu',\n+ norm_type=norm_type,\n+ norm_decay=norm_decay,\n+ freeze_norm=freeze_norm,\n+ lr=lr)\n+\n+ self.branch2b = nn.LayerList([\n+ ConvNormLayer(\n+ ch_in=width,\n+ ch_out=width,\n+ filter_size=3,\n+ stride=1 if variant == 'a' else stride,\n+ groups=groups,\n+ act='relu',\n+ norm_type=norm_type,\n+ norm_decay=norm_decay,\n+ freeze_norm=freeze_norm,\n+ lr=lr,\n+ dcn_v2=dcn_v2) for _ in range(self.scales - 1)\n+ ])\n+\n+ self.branch2c = ConvNormLayer(\n+ ch_in=width * scales,\n+ ch_out=ch_out,\n+ filter_size=1,\n+ stride=1,\n+ groups=1,\n+ norm_type=norm_type,\n+ norm_decay=norm_decay,\n+ freeze_norm=freeze_norm,\n+ lr=lr)\n+\n+ def forward(self, inputs):\n+\n+ out = self.branch2a(inputs)\n+ feature_split = paddle.split(out, self.scales, 1)\n+ out_split = []\n+ for i in range(self.scales - 1):\n+ if i == 0 or self.stride == 2:\n+ out_split.append(self.branch2b[i](feature_split[i]))\n+ else:\n+ out_split.append(self.branch2b[i](paddle.add(feature_split[i],\n+ out_split[-1])))\n+ if self.stride == 1:\n+ out_split.append(feature_split[-1])\n+ else:\n+ out_split.append(F.avg_pool2d(feature_split[-1], 3, self.stride, 1))\n+ out = self.branch2c(paddle.concat(out_split, 1))\n+\n+ if self.shortcut:\n+ short = inputs\n+ else:\n+ short = self.branch1(inputs)\n+\n+ out = paddle.add(out, short)\n+ out = F.relu(out)\n+\n+ return out\n+\n+\n+class Blocks(nn.Layer):\n+ def __init__(self,\n+ ch_in,\n+ ch_out,\n+ count,\n+ stage_num,\n+ width,\n+ scales=4,\n+ variant='b',\n+ groups=1,\n+ lr=1.0,\n+ norm_type='bn',\n+ norm_decay=0.,\n+ freeze_norm=True,\n+ dcn_v2=False):\n+ super(Blocks, self).__init__()\n+\n+ self.blocks = nn.Sequential()\n+ for i in range(count):\n+ self.blocks.add_sublayer(\n+ str(i),\n+ BottleNeck(\n+ ch_in=ch_in if i == 0 else ch_out,\n+ ch_out=ch_out,\n+ stride=2 if i == 0 and stage_num != 2 else 1,\n+ shortcut=False if i == 0 else True,\n+ width=width * (2**(stage_num - 2)),\n+ scales=scales,\n+ variant=variant,\n+ groups=groups,\n+ lr=lr,\n+ norm_type=norm_type,\n+ norm_decay=norm_decay,\n+ freeze_norm=freeze_norm,\n+ dcn_v2=dcn_v2))\n+\n+ def forward(self, inputs):\n+ return self.blocks(inputs)\n+\n+\n+@register\n+@serializable\n+class Res2Net(nn.Layer):\n+ \"\"\"\n+ Res2Net, see https://arxiv.org/abs/1904.01169\n+ Args:\n+ depth (int): Res2Net depth, should be 50, 101, 152, 200.\n+ width (int): Res2Net width\n+ scales (int): Res2Net scale\n+ variant (str): Res2Net variant, supports 'a', 'b', 'c', 'd' currently\n+ lr_mult_list (list): learning rate ratio of different resnet stages(2,3,4,5),\n+ lower learning rate ratio is need for pretrained model\n+ got using distillation(default as [1.0, 1.0, 1.0, 1.0]).\n+ groups (int): The groups number of the Conv Layer.\n+ norm_type (str): normalization type, 'bn' or 'sync_bn'\n+ norm_decay (float): weight decay for normalization layer weights\n+ freeze_norm (bool): freeze normalization layers\n+ freeze_at (int): freeze the backbone at which stage\n+ return_idx (list): index of stages whose feature maps are returned,\n+ index 0 stands for res2\n+ dcn_v2_stages (list): index of stages who select deformable conv v2\n+ num_stages (int): number of stages created\n+\n+ \"\"\"\n+ __shared__ = ['norm_type']\n+\n+ def __init__(self,\n+ depth=50,\n+ width=26,\n+ scales=4,\n+ variant='b',\n+ lr_mult_list=[1.0, 1.0, 1.0, 1.0],\n+ groups=1,\n+ norm_type='bn',\n+ norm_decay=0.,\n+ freeze_norm=True,\n+ freeze_at=0,\n+ return_idx=[0, 1, 2, 3],\n+ dcn_v2_stages=[-1],\n+ num_stages=4):\n+ super(Res2Net, self).__init__()\n+\n+ self._model_type = 'Res2Net' if groups == 1 else 'Res2NeXt'\n+\n+ assert depth in [50, 101, 152, 200], \\\n+ \"depth {} not in [50, 101, 152, 200]\"\n+ assert variant in ['a', 'b', 'c', 'd'], \"invalid Res2Net variant\"\n+ assert num_stages >= 1 and num_stages <= 4\n+\n+ self.depth = depth\n+ self.variant = variant\n+ self.norm_type = norm_type\n+ self.norm_decay = norm_decay\n+ self.freeze_norm = freeze_norm\n+ self.freeze_at = freeze_at\n+ if isinstance(return_idx, Integral):\n+ return_idx = [return_idx]\n+ assert max(return_idx) < num_stages, \\\n+ 'the maximum return index must smaller than num_stages, ' \\\n+ 'but received maximum return index is {} and num_stages ' \\\n+ 'is {}'.format(max(return_idx), num_stages)\n+ self.return_idx = return_idx\n+ self.num_stages = num_stages\n+ assert len(lr_mult_list) == 4, \\\n+ \"lr_mult_list length must be 4 but got {}\".format(len(lr_mult_list))\n+ if isinstance(dcn_v2_stages, Integral):\n+ dcn_v2_stages = [dcn_v2_stages]\n+ assert max(dcn_v2_stages) < num_stages\n+ self.dcn_v2_stages = dcn_v2_stages\n+\n+ block_nums = Res2Net_cfg[depth]\n+\n+ # C1 stage\n+ if self.variant in ['c', 'd']:\n+ conv_def = [\n+ [3, 32, 3, 2, \"conv1_1\"],\n+ [32, 32, 3, 1, \"conv1_2\"],\n+ [32, 64, 3, 1, \"conv1_3\"],\n+ ]\n+ else:\n+ conv_def = [[3, 64, 7, 2, \"conv1\"]]\n+ self.res1 = nn.Sequential()\n+ for (c_in, c_out, k, s, _name) in conv_def:\n+ self.res1.add_sublayer(\n+ _name,\n+ ConvNormLayer(\n+ ch_in=c_in,\n+ ch_out=c_out,\n+ filter_size=k,\n+ stride=s,\n+ groups=1,\n+ act='relu',\n+ norm_type=norm_type,\n+ norm_decay=norm_decay,\n+ freeze_norm=freeze_norm,\n+ lr=1.0))\n+\n+ self._in_channels = [64, 256, 512, 1024]\n+ self._out_channels = [256, 512, 1024, 2048]\n+ self._out_strides = [4, 8, 16, 32]\n+\n+ # C2-C5 stages\n+ self.res_layers = []\n+ for i in range(num_stages):\n+ lr_mult = lr_mult_list[i]\n+ stage_num = i + 2\n+ self.res_layers.append(\n+ self.add_sublayer(\n+ \"res{}\".format(stage_num),\n+ Blocks(\n+ self._in_channels[i],\n+ self._out_channels[i],\n+ count=block_nums[i],\n+ stage_num=stage_num,\n+ width=width,\n+ scales=scales,\n+ groups=groups,\n+ lr=lr_mult,\n+ norm_type=norm_type,\n+ norm_decay=norm_decay,\n+ freeze_norm=freeze_norm,\n+ dcn_v2=(i in self.dcn_v2_stages))))\n+\n+ @property\n+ def out_shape(self):\n+ return [\n+ ShapeSpec(\n+ channels=self._out_channels[i], stride=self._out_strides[i])\n+ for i in self.return_idx\n+ ]\n+\n+ def forward(self, inputs):\n+ x = inputs['image']\n+ res1 = self.res1(x)\n+ x = F.max_pool2d(res1, kernel_size=3, stride=2, padding=1)\n+ outs = []\n+ for idx, stage in enumerate(self.res_layers):\n+ x = stage(x)\n+ if idx == self.freeze_at:\n+ x.stop_gradient = True\n+ if idx in self.return_idx:\n+ outs.append(x)\n+ return outs\n+\n+\n+@register\n+class Res2NetC5(nn.Layer):\n+ def __init__(self, depth=50, width=26, scales=4, variant='b'):\n+ super(Res2NetC5, self).__init__()\n+ feat_in, feat_out = [1024, 2048]\n+ self.res5 = Blocks(\n+ feat_in,\n+ feat_out,\n+ count=3,\n+ stage_num=5,\n+ width=width,\n+ scales=scales,\n+ variant=variant)\n+ self.feat_out = feat_out\n+\n+ @property\n+ def out_shape(self):\n+ return [ShapeSpec(\n+ channels=self.feat_out,\n+ stride=32, )]\n+\n+ def forward(self, roi_feat, stage=0):\n+ y = self.res5(roi_feat)\n+ return y\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add res2net (#2992)
|
499,304 |
14.05.2021 15:12:18
| -28,800 |
f6139c054e2a75b5871a4a4adbe4375362b7fec7
|
fix static trt_int8 inference
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/engine/callbacks.py",
"new_path": "ppdet/engine/callbacks.py",
"diff": "@@ -158,7 +158,9 @@ class Checkpointer(Callback):\nif dist.get_world_size() < 2 or dist.get_rank() == 0:\nif mode == 'train':\nend_epoch = self.model.cfg.epoch\n- if epoch_id % self.model.cfg.snapshot_epoch == 0 or epoch_id == end_epoch - 1:\n+ if (\n+ epoch_id + 1\n+ ) % self.model.cfg.snapshot_epoch == 0 or epoch_id == end_epoch - 1:\nsave_name = str(\nepoch_id) if epoch_id != end_epoch - 1 else \"model_final\"\nweight = self.weight\n"
},
{
"change_type": "MODIFY",
"old_path": "static/deploy/cpp/include/object_detector.h",
"new_path": "static/deploy/cpp/include/object_detector.h",
"diff": "@@ -58,11 +58,12 @@ class ObjectDetector {\nexplicit ObjectDetector(const std::string& model_dir,\nbool use_gpu=false,\nconst std::string& run_mode=\"fluid\",\n- const int gpu_id=0) {\n+ const int gpu_id=0,\n+ bool trt_calib_mode=false) {\nconfig_.load_config(model_dir);\nthreshold_ = config_.draw_threshold_;\npreprocessor_.Init(config_.preprocess_info_, config_.arch_);\n- LoadModel(model_dir, use_gpu, config_.min_subgraph_size_, 1, run_mode, gpu_id);\n+ LoadModel(model_dir, use_gpu, config_.min_subgraph_size_, 1, run_mode, gpu_id, trt_calib_mode);\n}\n// Load Paddle inference model\n@@ -72,7 +73,8 @@ class ObjectDetector {\nconst int min_subgraph_size,\nconst int batch_size = 1,\nconst std::string& run_mode = \"fluid\",\n- const int gpu_id=0);\n+ const int gpu_id=0,\n+ bool trt_calib_mode=false);\n// Run predictor\nvoid Predict(const cv::Mat& im,\n"
},
{
"change_type": "MODIFY",
"old_path": "static/deploy/cpp/src/main.cc",
"new_path": "static/deploy/cpp/src/main.cc",
"diff": "@@ -43,6 +43,7 @@ DEFINE_int32(camera_id, -1, \"Device id of camera to predict\");\nDEFINE_bool(run_benchmark, false, \"Whether to predict a image_file repeatedly for benchmark\");\nDEFINE_double(threshold, 0.5, \"Threshold of score.\");\nDEFINE_string(output_dir, \"output\", \"Directory of output visualization files.\");\n+DEFINE_bool(trt_calib_mode, false, \"If the model is produced by TRT offline quantitative calibration, trt_calib_mode need to set True\");\nstatic std::string DirName(const std::string &filepath) {\nauto pos = filepath.rfind(OS_PATH_SEP);\n@@ -206,7 +207,7 @@ int main(int argc, char** argv) {\n// Load model and create a object detector\nPaddleDetection::ObjectDetector det(FLAGS_model_dir, FLAGS_use_gpu,\n- FLAGS_run_mode, FLAGS_gpu_id);\n+ FLAGS_run_mode, FLAGS_gpu_id, FLAGS_trt_calib_mode);\n// Do inference on input video or image\nif (!FLAGS_video_path.empty() || FLAGS_use_camera) {\nPredictVideo(FLAGS_video_path, &det);\n"
},
{
"change_type": "MODIFY",
"old_path": "static/deploy/cpp/src/object_detector.cc",
"new_path": "static/deploy/cpp/src/object_detector.cc",
"diff": "@@ -25,7 +25,8 @@ void ObjectDetector::LoadModel(const std::string& model_dir,\nconst int min_subgraph_size,\nconst int batch_size,\nconst std::string& run_mode,\n- const int gpu_id) {\n+ const int gpu_id,\n+ bool trt_calib_mode) {\npaddle::AnalysisConfig config;\nstd::string prog_file = model_dir + OS_PATH_SEP + \"__model__\";\nstd::string params_file = model_dir + OS_PATH_SEP + \"__params__\";\n@@ -33,14 +34,12 @@ void ObjectDetector::LoadModel(const std::string& model_dir,\nif (use_gpu) {\nconfig.EnableUseGpu(100, gpu_id);\nconfig.SwitchIrOptim(true);\n- bool use_calib_mode = false;\nif (run_mode != \"fluid\") {\nauto precision = paddle::AnalysisConfig::Precision::kFloat32;\nif (run_mode == \"trt_fp16\") {\nprecision = paddle::AnalysisConfig::Precision::kHalf;\n} else if (run_mode == \"trt_int8\") {\nprecision = paddle::AnalysisConfig::Precision::kInt8;\n- use_calib_mode = true;\n} else {\nprintf(\"run_mode should be 'fluid', 'trt_fp32', 'trt_fp16' or 'trt_int8'\");\n}\n@@ -50,7 +49,7 @@ void ObjectDetector::LoadModel(const std::string& model_dir,\nmin_subgraph_size,\nprecision,\nfalse,\n- use_calib_mode);\n+ trt_calib_mode);\n}\n} else {\nconfig.DisableGpu();\n"
},
{
"change_type": "MODIFY",
"old_path": "static/deploy/python/infer.py",
"new_path": "static/deploy/python/infer.py",
"diff": "@@ -65,7 +65,8 @@ class Detector(object):\nmodel_dir,\nuse_gpu=False,\nrun_mode='fluid',\n- threshold=0.5):\n+ threshold=0.5,\n+ trt_calib_mode=False):\nself.config = config\nif self.config.use_python_inference:\nself.executor, self.program, self.fecth_targets = load_executor(\n@@ -75,7 +76,8 @@ class Detector(object):\nmodel_dir,\nrun_mode=run_mode,\nmin_subgraph_size=self.config.min_subgraph_size,\n- use_gpu=use_gpu)\n+ use_gpu=use_gpu,\n+ trt_calib_mode=trt_calib_mode)\ndef preprocess(self, im):\npreprocess_ops = []\n@@ -221,13 +223,15 @@ class DetectorSOLOv2(Detector):\nmodel_dir,\nuse_gpu=False,\nrun_mode='fluid',\n- threshold=0.5):\n+ threshold=0.5,\n+ trt_calib_mode=False):\nsuper(DetectorSOLOv2, self).__init__(\nconfig=config,\nmodel_dir=model_dir,\nuse_gpu=use_gpu,\nrun_mode=run_mode,\n- threshold=threshold)\n+ threshold=threshold,\n+ trt_calib_mode=trt_calib_mode)\ndef predict(self,\nimage,\n@@ -379,11 +383,14 @@ def load_predictor(model_dir,\nrun_mode='fluid',\nbatch_size=1,\nuse_gpu=False,\n- min_subgraph_size=3):\n+ min_subgraph_size=3,\n+ trt_calib_mode=False):\n\"\"\"set AnalysisConfig, generate AnalysisPredictor\nArgs:\nmodel_dir (str): root path of __model__ and __params__\nuse_gpu (bool): whether use gpu\n+ trt_calib_mode (bool): If the model is produced by TRT offline quantitative\n+ calibration, trt_calib_mode need to set True\nReturns:\npredictor (PaddlePredictor): AnalysisPredictor\nRaises:\n@@ -393,7 +400,6 @@ def load_predictor(model_dir,\nraise ValueError(\n\"Predict by TensorRT mode: {}, expect use_gpu==True, but use_gpu == {}\"\n.format(run_mode, use_gpu))\n- use_calib_mode = True if run_mode == 'trt_int8' else False\nprecision_map = {\n'trt_int8': fluid.core.AnalysisConfig.Precision.Int8,\n'trt_fp32': fluid.core.AnalysisConfig.Precision.Float32,\n@@ -417,7 +423,7 @@ def load_predictor(model_dir,\nmin_subgraph_size=min_subgraph_size,\nprecision_mode=precision_map[run_mode],\nuse_static=False,\n- use_calib_mode=use_calib_mode)\n+ use_calib_mode=trt_calib_mode)\n# disable print log when predict\nconfig.disable_glog_info()\n@@ -531,13 +537,18 @@ def predict_video(detector, camera_id):\ndef main():\nconfig = Config(FLAGS.model_dir)\ndetector = Detector(\n- config, FLAGS.model_dir, use_gpu=FLAGS.use_gpu, run_mode=FLAGS.run_mode)\n+ config,\n+ FLAGS.model_dir,\n+ use_gpu=FLAGS.use_gpu,\n+ run_mode=FLAGS.run_mode,\n+ trt_calib_mode=FLAGS.trt_calib_mode)\nif config.arch == 'SOLOv2':\ndetector = DetectorSOLOv2(\nconfig,\nFLAGS.model_dir,\nuse_gpu=FLAGS.use_gpu,\n- run_mode=FLAGS.run_mode)\n+ run_mode=FLAGS.run_mode,\n+ trt_calib_mode=FLAGS.trt_calib_mode)\n# predict from image\nif FLAGS.image_file != '':\npredict_image(detector)\n@@ -590,6 +601,12 @@ if __name__ == '__main__':\ntype=str,\ndefault=\"output\",\nhelp=\"Directory of output visualization files.\")\n+ parser.add_argument(\n+ \"--trt_calib_mode\",\n+ type=bool,\n+ default=False,\n+ help=\"If the model is produced by TRT offline quantitative \"\n+ \"calibration, trt_calib_mode need to set True.\")\nFLAGS = parser.parse_args()\nprint_arguments(FLAGS)\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix static trt_int8 inference (#3001)
|
499,298 |
14.05.2021 19:39:01
| -28,800 |
2bf412c6406073f48515e1ec2e9bffdd1ed83d5f
|
[MOT] add JDE other scales and fix MOT doc
|
[
{
"change_type": "MODIFY",
"old_path": "configs/mot/deepsort/README.md",
"new_path": "configs/mot/deepsort/README.md",
"diff": "@@ -37,17 +37,17 @@ det_results_dir\n```bash\n# use weights released in PaddleDetection model zoo\n-CUDA_VISIBLE_DEVICES=0 python tools/eval_mot.py -c configs/mot/jde/jde_darknet53_30e_1088x608_track.yml -o metric=MOT weights=https://paddledet.bj.bcebos.com/models/mot/jde_darknet53_30e_1088x608.pdparams --output ./det_results_dir\n+CUDA_VISIBLE_DEVICES=0 python tools/eval_mot.py -c configs/mot/jde/jde_darknet53_30e_1088x608_track.yml -o metric=MOT weights=https://paddledet.bj.bcebos.com/models/mot/jde_darknet53_30e_1088x608.pdparams\n# use saved checkpoint after training\n-CUDA_VISIBLE_DEVICES=0 python tools/eval_mot.py -c configs/mot/jde/jde_darknet53_30e_1088x608_track.yml -o metric=MOT weights=output/jde_darknet53_30e_1088x608/model_final --output ./det_results_dir\n+CUDA_VISIBLE_DEVICES=0 python tools/eval_mot.py -c configs/mot/jde/jde_darknet53_30e_1088x608_track.yml -o metric=MOT weights=output/jde_darknet53_30e_1088x608/model_final\n```\n### 2. Tracking\n```bash\n# track the objects by loading detected result files\n-CUDA_VISIBLE_DEVICES=0 python tools/eval_mot.py -c configs/mot/deepsort/deepsort_pcb_pyramid_r101.yml --det_results_dir ./det_results_dir/mot_results\n+CUDA_VISIBLE_DEVICES=0 python tools/eval_mot.py -c configs/mot/deepsort/deepsort_pcb_pyramid_r101.yml --det_results_dir {your detection results}\n```\n## Citations\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/mot/jde/_base_/jde_reader_576x320.yml",
"diff": "+worker_num: 2\n+TrainReader:\n+ sample_transforms:\n+ - Decode: {}\n+ - AugmentHSV: {}\n+ - LetterBoxResize: {target_size: [320, 576]}\n+ - MOTRandomAffine: {}\n+ - RandomFlip: {}\n+ - BboxXYXY2XYWH: {}\n+ - NormalizeBox: {}\n+ - NormalizeImage: {mean: [0, 0, 0], std: [1, 1, 1], is_scale: True}\n+ - Permute: {}\n+ batch_transforms:\n+ - Gt2JDETargetThres:\n+ anchor_masks: [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]\n+ anchors: [[[85,255], [120,320], [170,320], [340,320]],\n+ [[21,64], [30,90], [43,128], [60,180]],\n+ [[6,16], [8,23], [11,32], [16,45]]]\n+ downsample_ratios: [32, 16, 8]\n+ ide_thresh: 0.5\n+ fg_thresh: 0.5\n+ bg_thresh: 0.4\n+ batch_size: 4\n+ shuffle: true\n+ drop_last: true\n+ use_shared_memory: true\n+\n+\n+EvalReader:\n+ sample_transforms:\n+ - Decode: {}\n+ - LetterBoxResize: {target_size: [320, 576]}\n+ - BboxXYXY2XYWH: {}\n+ - NormalizeBox: {}\n+ - NormalizeImage: {mean: [0, 0, 0], std: [1, 1, 1], is_scale: True}\n+ - Permute: {}\n+ batch_transforms:\n+ - Gt2JDETargetMax:\n+ anchor_masks: [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]\n+ anchors: [[[85,255], [120,320], [170,320], [340,320]],\n+ [[21,64], [30,90], [43,128], [60,180]],\n+ [[6,16], [8,23], [11,32], [16,45]]]\n+ downsample_ratios: [32, 16, 8]\n+ max_iou_thresh: 0.60\n+ - BboxCXCYWH2XYXY: {}\n+ - Norm2PixelBbox: {}\n+ batch_size: 1\n+ drop_empty: false\n+\n+\n+TestReader:\n+ inputs_def:\n+ image_shape: [3, 320, 576]\n+ sample_transforms:\n+ - Decode: {}\n+ - LetterBoxResize: {target_size: [320, 576]}\n+ - NormalizeImage: {mean: [0, 0, 0], std: [1, 1, 1], is_scale: True}\n+ - Permute: {}\n+ batch_size: 1\n+\n+\n+EvalMOTReader:\n+ sample_transforms:\n+ - Decode: {}\n+ - LetterBoxResize: {target_size: [320, 576]}\n+ - NormalizeImage: {mean: [0, 0, 0], std: [1, 1, 1], is_scale: True}\n+ - Permute: {}\n+ batch_size: 1\n+\n+\n+TestMOTReader:\n+ inputs_def:\n+ image_shape: [3, 320, 576]\n+ sample_transforms:\n+ - Decode: {}\n+ - LetterBoxResize: {target_size: [320, 576]}\n+ - NormalizeImage: {mean: [0, 0, 0], std: [1, 1, 1], is_scale: True}\n+ - Permute: {}\n+ batch_size: 1\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/mot/jde/_base_/jde_reader_864x480.yml",
"diff": "+worker_num: 2\n+TrainReader:\n+ sample_transforms:\n+ - Decode: {}\n+ - AugmentHSV: {}\n+ - LetterBoxResize: {target_size: [480, 864]}\n+ - MOTRandomAffine: {}\n+ - RandomFlip: {}\n+ - BboxXYXY2XYWH: {}\n+ - NormalizeBox: {}\n+ - NormalizeImage: {mean: [0, 0, 0], std: [1, 1, 1], is_scale: True}\n+ - Permute: {}\n+ batch_transforms:\n+ - Gt2JDETargetThres:\n+ anchor_masks: [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]\n+ anchors: [[[102,305], [143, 429], [203,508], [407,508]],\n+ [[25,76], [36,107], [51,152], [71,215]],\n+ [[6,19], [9,27], [13,38], [18,54]]]\n+ downsample_ratios: [32, 16, 8]\n+ ide_thresh: 0.5\n+ fg_thresh: 0.5\n+ bg_thresh: 0.4\n+ batch_size: 4\n+ shuffle: true\n+ drop_last: true\n+ use_shared_memory: true\n+\n+\n+EvalReader:\n+ sample_transforms:\n+ - Decode: {}\n+ - LetterBoxResize: {target_size: [480, 864]}\n+ - BboxXYXY2XYWH: {}\n+ - NormalizeBox: {}\n+ - NormalizeImage: {mean: [0, 0, 0], std: [1, 1, 1], is_scale: True}\n+ - Permute: {}\n+ batch_transforms:\n+ - Gt2JDETargetMax:\n+ anchor_masks: [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]\n+ anchors: [[[102,305], [143, 429], [203,508], [407,508]],\n+ [[25,76], [36,107], [51,152], [71,215]],\n+ [[6,19], [9,27], [13,38], [18,54]]]\n+ downsample_ratios: [32, 16, 8]\n+ max_iou_thresh: 0.60\n+ - BboxCXCYWH2XYXY: {}\n+ - Norm2PixelBbox: {}\n+ batch_size: 1\n+ drop_empty: false\n+\n+\n+TestReader:\n+ inputs_def:\n+ image_shape: [3, 480, 864]\n+ sample_transforms:\n+ - Decode: {}\n+ - LetterBoxResize: {target_size: [480, 864]}\n+ - NormalizeImage: {mean: [0, 0, 0], std: [1, 1, 1], is_scale: True}\n+ - Permute: {}\n+ batch_size: 1\n+\n+\n+EvalMOTReader:\n+ sample_transforms:\n+ - Decode: {}\n+ - LetterBoxResize: {target_size: [480, 864]}\n+ - NormalizeImage: {mean: [0, 0, 0], std: [1, 1, 1], is_scale: True}\n+ - Permute: {}\n+ batch_size: 1\n+\n+\n+TestMOTReader:\n+ inputs_def:\n+ image_shape: [3, 480, 864]\n+ sample_transforms:\n+ - Decode: {}\n+ - LetterBoxResize: {target_size: [480, 864]}\n+ - NormalizeImage: {mean: [0, 0, 0], std: [1, 1, 1], is_scale: True}\n+ - Permute: {}\n+ batch_size: 1\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/mot/jde/jde_darknet53_30e_576x320.yml",
"diff": "+_BASE_: [\n+ '../../datasets/mot.yml',\n+ '../../runtime.yml',\n+ '_base_/optimizer_30e.yml',\n+ '_base_/jde_darknet53.yml',\n+ '_base_/jde_reader_576x320.yml',\n+]\n+weights: output/jde_darknet53_30e_576x320/model_final\n+\n+JDE:\n+ detector: YOLOv3\n+ reid: JDEEmbeddingHead\n+ tracker: JDETracker\n+\n+YOLOv3:\n+ backbone: DarkNet\n+ neck: YOLOv3FPN\n+ yolo_head: YOLOv3Head\n+ post_process: JDEBBoxPostProcess\n+ for_mot: True\n+\n+YOLOv3Head:\n+ anchors: [[85,255], [120,320], [170,320], [340,320],\n+ [21,64], [30,90], [43,128], [60,180],\n+ [6,16], [8,23], [11,32], [16,45]]\n+ anchor_masks: [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]\n+ loss: JDEDetectionLoss\n+\n+JDETracker:\n+ det_thresh: 0.3\n+ track_buffer: 30\n+ min_box_area: 200\n+ motion: KalmanFilter\n+\n+JDEBBoxPostProcess:\n+ decode:\n+ name: JDEBox\n+ conf_thresh: 0.5\n+ downsample_ratio: 32\n+ nms:\n+ name: MultiClassNMS\n+ keep_top_k: 500\n+ score_threshold: 0.01\n+ nms_threshold: 0.4\n+ nms_top_k: 2000\n+ normalized: true\n+ return_index: true\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/mot/jde/jde_darknet53_30e_864x480.yml",
"diff": "+_BASE_: [\n+ '../../datasets/mot.yml',\n+ '../../runtime.yml',\n+ '_base_/optimizer_30e.yml',\n+ '_base_/jde_darknet53.yml',\n+ '_base_/jde_reader_864x480.yml',\n+]\n+weights: output/jde_darknet53_30e_864x480/model_final\n+\n+JDE:\n+ detector: YOLOv3\n+ reid: JDEEmbeddingHead\n+ tracker: JDETracker\n+\n+YOLOv3:\n+ backbone: DarkNet\n+ neck: YOLOv3FPN\n+ yolo_head: YOLOv3Head\n+ post_process: JDEBBoxPostProcess\n+ for_mot: True\n+\n+YOLOv3Head:\n+ anchors: [[102,305], [143, 429], [203,508], [407,508],\n+ [25,76], [36,107], [51,152], [71,215],\n+ [6,19], [9,27], [13,38], [18,54]]\n+ anchor_masks: [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]\n+ loss: JDEDetectionLoss\n+\n+JDETracker:\n+ det_thresh: 0.3\n+ track_buffer: 30\n+ min_box_area: 200\n+ motion: KalmanFilter\n+\n+JDEBBoxPostProcess:\n+ decode:\n+ name: JDEBox\n+ conf_thresh: 0.5\n+ downsample_ratio: 32\n+ nms:\n+ name: MultiClassNMS\n+ keep_top_k: 500\n+ score_threshold: 0.01\n+ nms_threshold: 0.4\n+ nms_top_k: 2000\n+ normalized: true\n+ return_index: true\n"
},
{
"change_type": "MODIFY",
"old_path": "requirements.txt",
"new_path": "requirements.txt",
"diff": "@@ -14,3 +14,4 @@ lap\nsklearn\ncython_bbox\nmotmetrics\n+openpyxl\n\\ No newline at end of file\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
[MOT] add JDE other scales and fix MOT doc (#3008)
|
499,333 |
14.05.2021 22:05:33
| -28,800 |
c1bd0ac245b8aa4943f3c23ae12bfbda2257eb2a
|
fix none output in rcnn
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/post_process.py",
"new_path": "ppdet/modeling/post_process.py",
"diff": "@@ -86,6 +86,13 @@ class BBoxPostProcess(object):\npred_result (Tensor): The final prediction results with shape [N, 6]\nincluding labels, scores and bboxes.\n\"\"\"\n+\n+ if bboxes.shape[0] == 0:\n+ bbox_pred = paddle.to_tensor(\n+ np.array(\n+ [[-1, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype='float32'))\n+ bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))\n+\norigin_shape = paddle.floor(im_shape / scale_factor + 0.5)\norigin_shape_list = []\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix none output in rcnn (#2984)
|
499,395 |
17.05.2021 10:49:27
| -28,800 |
9dadaae7e131e0f7b5f0f460601950430f6a9097
|
fix some problem in docs, test=document_fix
|
[
{
"change_type": "MODIFY",
"old_path": "configs/pedestrian/README.md",
"new_path": "configs/pedestrian/README.md",
"diff": "@@ -45,6 +45,6 @@ python -u tools/infer.py -c configs/pedestrian/pedestrian_yolov3_darknet.yml \\\nSome inference results are visualized below:\n-\n+\n-\n+\n"
},
{
"change_type": "MODIFY",
"old_path": "configs/vehicle/README.md",
"new_path": "configs/vehicle/README.md",
"diff": "@@ -48,6 +48,6 @@ python -u tools/infer.py -c configs/vehicle/vehicle_yolov3_darknet.yml \\\nSome inference results are visualized below:\n-\n+\n-\n+\n"
},
{
"change_type": "ADD",
"old_path": "docs/images/PedestrianDetection_001.png",
"new_path": "docs/images/PedestrianDetection_001.png",
"diff": "Binary files /dev/null and b/docs/images/PedestrianDetection_001.png differ\n"
},
{
"change_type": "ADD",
"old_path": "docs/images/PedestrianDetection_004.png",
"new_path": "docs/images/PedestrianDetection_004.png",
"diff": "Binary files /dev/null and b/docs/images/PedestrianDetection_004.png differ\n"
},
{
"change_type": "ADD",
"old_path": "docs/images/VehicleDetection_001.jpeg",
"new_path": "docs/images/VehicleDetection_001.jpeg",
"diff": "Binary files /dev/null and b/docs/images/VehicleDetection_001.jpeg differ\n"
},
{
"change_type": "ADD",
"old_path": "docs/images/VehicleDetection_005.png",
"new_path": "docs/images/VehicleDetection_005.png",
"diff": "Binary files /dev/null and b/docs/images/VehicleDetection_005.png differ\n"
},
{
"change_type": "ADD",
"old_path": "docs/images/ppyolo_map_fps.png",
"new_path": "docs/images/ppyolo_map_fps.png",
"diff": "Binary files /dev/null and b/docs/images/ppyolo_map_fps.png differ\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix some problem in docs, test=document_fix (#3028)
|
499,333 |
17.05.2021 21:09:24
| -28,800 |
34968d61e66197b18d9c2c06a7da2796798db95e
|
[doc] update link(#3043)
|
[
{
"change_type": "MODIFY",
"old_path": "configs/res2net/README.md",
"new_path": "configs/res2net/README.md",
"diff": "| Backbone | Type | Image/gpu | Lr schd | Inf time (fps) | Box AP | Mask AP | Download | Configs |\n| :---------------------- | :------------- | :-------: | :-----: | :------------: | :----: | :-----: | :----------------------------------------------------------: | :-----: |\n-| Res2Net50-FPN | Faster | 2 | 1x | - | 40.6 | - | [model](https://paddledet.bj.bcebos.com/models/faster_rcnn_res2net50_vb_26w_4s_fpn_1x_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/develop/configs/res2net/faster_rcnn_res2net50_vb_26w_4s_fpn_1x.yml) |\n-| Res2Net50-FPN | Mask | 2 | 2x | - | 42.4 | 38.1 | [model](https://paddledet.bj.bcebos.com/models/mask_rcnn_res2net50_vb_26w_4s_fpn_2x_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/develop/configs/res2net/mask_rcnn_res2net50_vb_26w_4s_fpn_2x_coco.yml) |\n-| Res2Net50-vd-FPN | Mask | 2 | 2x | - | 42.6 | 38.1 | [model](https://paddledet.bj.bcebos.com/models/mask_rcnn_res2net50_vd_26w_4s_fpn_2x_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/develop/configs/res2net/mask_rcnn_res2net50_vd_26w_4s_fpn_2x_coco.yml) |\n+| Res2Net50-FPN | Faster | 2 | 1x | - | 40.6 | - | [model](https://paddledet.bj.bcebos.com/models/faster_rcnn_res2net50_vb_26w_4s_fpn_1x_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/configs/res2net/faster_rcnn_res2net50_vb_26w_4s_fpn_1x_coco.yml) |\n+| Res2Net50-FPN | Mask | 2 | 2x | - | 42.4 | 38.1 | [model](https://paddledet.bj.bcebos.com/models/mask_rcnn_res2net50_vb_26w_4s_fpn_2x_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/configs/res2net/mask_rcnn_res2net50_vb_26w_4s_fpn_2x_coco.yml) |\n+| Res2Net50-vd-FPN | Mask | 2 | 2x | - | 42.6 | 38.1 | [model](https://paddledet.bj.bcebos.com/models/mask_rcnn_res2net50_vd_26w_4s_fpn_2x_coco.pdparams) | [config](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/configs/res2net/mask_rcnn_res2net50_vd_26w_4s_fpn_2x_coco.yml) |\nNote: all the above models are trained with 8 gpus.\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
[doc] update link(#3043)
|
499,348 |
17.05.2021 22:43:37
| -28,800 |
d7ff713eccb24adcb4d29c954f132f87435caff1
|
add pose demo
|
[
{
"change_type": "ADD",
"old_path": "configs/keypoint/football_keypoint.gif",
"new_path": "configs/keypoint/football_keypoint.gif",
"diff": "Binary files /dev/null and b/configs/keypoint/football_keypoint.gif differ\n"
},
{
"change_type": "ADD",
"old_path": "demo/hrnet_demo.jpg",
"new_path": "demo/hrnet_demo.jpg",
"diff": "Binary files /dev/null and b/demo/hrnet_demo.jpg differ\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/python/keypoint_det_unite_infer.py",
"new_path": "deploy/python/keypoint_det_unite_infer.py",
"diff": "@@ -26,19 +26,22 @@ from keypoint_infer import KeyPoint_Detector, PredictConfig_KeyPoint\nfrom keypoint_visualize import draw_pose\n-def expand_crop(images, rect, expand_ratio=0.5):\n+def expand_crop(images, rect, expand_ratio=0.3):\nimgh, imgw, c = images.shape\n- label, _, xmin, ymin, xmax, ymax = [int(x) for x in rect.tolist()]\n+ label, conf, xmin, ymin, xmax, ymax = [int(x) for x in rect.tolist()]\nif label != 0:\n- return None, None\n+ return None, None, None\n+ org_rect = [xmin, ymin, xmax, ymax]\nh_half = (ymax - ymin) * (1 + expand_ratio) / 2.\nw_half = (xmax - xmin) * (1 + expand_ratio) / 2.\n+ if h_half > w_half * 4 / 3:\n+ w_half = h_half * 0.75\ncenter = [(ymin + ymax) / 2., (xmin + xmax) / 2.]\nymin = max(0, int(center[0] - h_half))\nymax = min(imgh - 1, int(center[0] + h_half))\nxmin = max(0, int(center[1] - w_half))\nxmax = min(imgw - 1, int(center[1] + w_half))\n- return images[ymin:ymax, xmin:xmax, :], [xmin, ymin, xmax, ymax]\n+ return images[ymin:ymax, xmin:xmax, :], [xmin, ymin, xmax, ymax], org_rect\ndef get_person_from_rect(images, results):\n@@ -46,12 +49,14 @@ def get_person_from_rect(images, results):\nmask = det_results[:, 1] > FLAGS.det_threshold\nvalid_rects = det_results[mask]\nimage_buff = []\n+ org_rects = []\nfor rect in valid_rects:\n- rect_image, new_rect = expand_crop(images, rect)\n+ rect_image, new_rect, org_rect = expand_crop(images, rect)\nif rect_image is None:\ncontinue\nimage_buff.append([rect_image, new_rect])\n- return image_buff\n+ org_rects.append(org_rect)\n+ return image_buff, org_rects\ndef affine_backto_orgimages(keypoint_result, batch_records):\n@@ -65,10 +70,10 @@ def topdown_unite_predict(detector, topdown_keypoint_detector, image_list):\nfor i, img_file in enumerate(image_list):\nimage, _ = decode_image(img_file, {})\nresults = detector.predict(image, FLAGS.det_threshold)\n- batchs_images = get_person_from_rect(image, results)\n+ batchs_images, det_rects = get_person_from_rect(image, results)\nkeypoint_vector = []\nscore_vector = []\n- rect_vecotr = []\n+ rect_vecotr = det_rects\nfor batch_images, batch_records in batchs_images:\nkeypoint_result = topdown_keypoint_detector.predict(\nbatch_images, FLAGS.keypoint_threshold)\n@@ -76,14 +81,18 @@ def topdown_unite_predict(detector, topdown_keypoint_detector, image_list):\nbatch_records)\nkeypoint_vector.append(orgkeypoints)\nscore_vector.append(scores)\n- rect_vecotr.append(batch_records)\nkeypoint_res = {}\nkeypoint_res['keypoint'] = [\nnp.vstack(keypoint_vector), np.vstack(score_vector)\n]\nkeypoint_res['bbox'] = rect_vecotr\n+ if not os.path.exists(FLAGS.output_dir):\n+ os.makedirs(FLAGS.output_dir)\ndraw_pose(\n- img_file, keypoint_res, visual_thread=FLAGS.keypoint_threshold)\n+ img_file,\n+ keypoint_res,\n+ visual_thread=FLAGS.keypoint_threshold,\n+ save_dir=FLAGS.output_dir)\ndef topdown_unite_predict_video(detector, topdown_keypoint_detector, camera_id):\n@@ -92,8 +101,8 @@ def topdown_unite_predict_video(detector, topdown_keypoint_detector, camera_id):\nvideo_name = 'output.mp4'\nelse:\ncapture = cv2.VideoCapture(FLAGS.video_file)\n- video_name = os.path.basename(\n- os.path.split(FLAGS.video_file + '.mp4')[-1])\n+ video_name = os.path.splitext(os.path.basename(FLAGS.video_file))[\n+ 0] + '.mp4'\nfps = 30\nwidth = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))\nheight = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n@@ -114,10 +123,9 @@ def topdown_unite_predict_video(detector, topdown_keypoint_detector, camera_id):\nframe2 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\nresults = detector.predict(frame2, FLAGS.det_threshold)\n- batchs_images = get_person_from_rect(frame, results)\n+ batchs_images, rect_vecotr = get_person_from_rect(frame2, results)\nkeypoint_vector = []\nscore_vector = []\n- rect_vecotr = []\nfor batch_images, batch_records in batchs_images:\nkeypoint_result = topdown_keypoint_detector.predict(\nbatch_images, FLAGS.keypoint_threshold)\n@@ -125,7 +133,6 @@ def topdown_unite_predict_video(detector, topdown_keypoint_detector, camera_id):\nbatch_records)\nkeypoint_vector.append(orgkeypoints)\nscore_vector.append(scores)\n- rect_vecotr.append(batch_records)\nkeypoint_res = {}\nkeypoint_res['keypoint'] = [\nnp.vstack(keypoint_vector), np.vstack(score_vector)\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/python/keypoint_infer.py",
"new_path": "deploy/python/keypoint_infer.py",
"diff": "@@ -332,7 +332,13 @@ def predict_image(detector, image_list):\nprint('Test iter {}, file name:{}'.format(i, img_file))\nelse:\nresults = detector.predict(img_file, FLAGS.threshold)\n- draw_pose(img_file, results, visual_thread=FLAGS.threshold)\n+ if not os.path.exists(FLAGS.output_dir):\n+ os.makedirs(FLAGS.output_dir)\n+ draw_pose(\n+ img_file,\n+ results,\n+ visual_thread=FLAGS.threshold,\n+ save_dir=FLAGS.output_dir)\ndef predict_video(detector, camera_id):\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/python/keypoint_visualize.py",
"new_path": "deploy/python/keypoint_visualize.py",
"diff": "@@ -28,6 +28,7 @@ def draw_pose(imgfile,\nresults,\nvisual_thread=0.6,\nsave_name='pose.jpg',\n+ save_dir='output',\nreturnimg=False):\ntry:\nimport matplotlib.pyplot as plt\n@@ -56,8 +57,7 @@ def draw_pose(imgfile,\nbboxs = results['bbox']\nfor idx, rect in enumerate(bboxs):\nxmin, ymin, xmax, ymax = rect\n- cv2.rectangle(img, (xmin, ymin), (xmax, ymax),\n- colors[idx % len(colors)], 2)\n+ cv2.rectangle(img, (xmin, ymin), (xmax, ymax), colors[0], 1)\ncanvas = img.copy()\nfor i in range(17):\n@@ -100,7 +100,8 @@ def draw_pose(imgfile,\ncanvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)\nif returnimg:\nreturn canvas\n- save_name = 'output/' + os.path.basename(imgfile)[:-4] + '_vis.jpg'\n+ save_name = os.path.join(\n+ save_dir, os.path.splitext(os.path.basename(imgfile))[0] + '_vis.jpg')\nplt.imsave(save_name, canvas[:, :, ::-1])\nprint(\"keypoint visualize image saved to: \" + save_name)\nplt.close()\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add pose demo (#3042)
|
499,304 |
18.05.2021 09:53:50
| -28,800 |
b7c50f5bd1b8d02661ae3313d5719e5bbaafb6cb
|
fix oid model export model
|
[
{
"change_type": "MODIFY",
"old_path": "static/ppdet/utils/export_utils.py",
"new_path": "static/ppdet/utils/export_utils.py",
"diff": "@@ -67,6 +67,8 @@ def parse_reader(reader_cfg, metric, arch):\nfrom ppdet.utils.voc_eval import get_category_info\nelif metric == \"WIDERFACE\":\nfrom ppdet.utils.widerface_eval_utils import get_category_info\n+ elif cfg.metric == 'OID':\n+ from ppdet.utils.oid_eval import get_category_info\nelse:\nraise ValueError(\n\"metric only supports COCO, VOC, WIDERFACE, but received {}\".format(\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix oid model export model (#3037)
|
499,357 |
18.05.2021 14:06:29
| -28,800 |
fa4741956ec9d805df40e892baebb04d30f61fcb
|
add yolov3 darknet config to develop, test=kunlun
|
[
{
"change_type": "ADD",
"old_path": null,
"new_path": "static/configs/yolov3_darknet_roadsign_kunlun.yml",
"diff": "+architecture: YOLOv3\n+use_gpu: false\n+use_xpu: true\n+max_iters: 1200\n+log_iter: 1\n+save_dir: output\n+snapshot_iter: 200\n+metric: VOC\n+map_type: integral\n+pretrain_weights: https://paddlemodels.bj.bcebos.com/object_detection/yolov3_darknet.tar\n+weights: output/yolov3_darknet_roadsign_xpu/model_final\n+num_classes: 4\n+finetune_exclude_pretrained_params: ['yolo_output']\n+use_fine_grained_loss: false\n+\n+YOLOv3:\n+ backbone: DarkNet\n+ yolo_head: YOLOv3Head\n+\n+DarkNet:\n+ norm_type: bn\n+ norm_decay: 0.\n+ depth: 53\n+\n+YOLOv3Head:\n+ anchor_masks: [[6, 7, 8], [3, 4, 5], [0, 1, 2]]\n+ anchors: [[10, 13], [16, 30], [33, 23],\n+ [30, 61], [62, 45], [59, 119],\n+ [116, 90], [156, 198], [373, 326]]\n+ norm_decay: 0.\n+ yolo_loss: YOLOv3Loss\n+ nms:\n+ background_label: -1\n+ keep_top_k: 100\n+ nms_threshold: 0.45\n+ nms_top_k: 1000\n+ normalized: false\n+ score_threshold: 0.01\n+\n+YOLOv3Loss:\n+ ignore_thresh: 0.7\n+ label_smooth: true\n+\n+LearningRate:\n+ base_lr: 0.000125 #0.00025\n+ schedulers:\n+ - !PiecewiseDecay\n+ gamma: 0.1\n+ milestones:\n+ - 800 #400\n+ - 1100 #550\n+ - !LinearWarmup\n+ start_factor: 0.\n+ steps: 200 #200\n+\n+OptimizerBuilder:\n+ optimizer:\n+ momentum: 0.9\n+ type: Momentum\n+ regularizer:\n+ factor: 0.0005\n+ type: L2\n+\n+TrainReader:\n+ inputs_def:\n+ fields: ['image', 'gt_bbox', 'gt_class', 'gt_score']\n+ num_max_boxes: 50\n+ dataset:\n+ !VOCDataSet\n+ dataset_dir: dataset/roadsign_voc\n+ anno_path: train.txt\n+ with_background: false\n+ sample_transforms:\n+ - !DecodeImage\n+ to_rgb: True\n+ with_mixup: True\n+ - !MixupImage\n+ alpha: 1.5\n+ beta: 1.5\n+ - !ColorDistort {}\n+ - !RandomExpand\n+ fill_value: [123.675, 116.28, 103.53]\n+ ratio: 1.5\n+ - !RandomCrop {}\n+ - !RandomFlipImage\n+ is_normalized: false\n+ - !NormalizeBox {}\n+ - !PadBox\n+ num_max_boxes: 50\n+ - !BboxXYXY2XYWH {}\n+ batch_transforms:\n+ - !RandomShape\n+ sizes: [320, 352, 384, 416, 448, 480, 512, 544, 576, 608]\n+ random_inter: True\n+ - !NormalizeImage\n+ mean: [0.485, 0.456, 0.406]\n+ std: [0.229, 0.224, 0.225]\n+ is_scale: True\n+ is_channel_first: false\n+ - !Permute\n+ to_bgr: false\n+ channel_first: True\n+ # Gt2YoloTarget is only used when use_fine_grained_loss set as true,\n+ # this operator will be deleted automatically if use_fine_grained_loss\n+ # is set as false\n+ - !Gt2YoloTarget\n+ anchor_masks: [[6, 7, 8], [3, 4, 5], [0, 1, 2]]\n+ anchors: [[10, 13], [16, 30], [33, 23],\n+ [30, 61], [62, 45], [59, 119],\n+ [116, 90], [156, 198], [373, 326]]\n+ downsample_ratios: [32, 16, 8]\n+ batch_size: 2\n+ shuffle: true\n+ mixup_epoch: 250\n+ drop_last: true\n+ worker_num: 2\n+ bufsize: 2\n+ use_process: false #true\n+\n+\n+EvalReader:\n+ inputs_def:\n+ fields: ['image', 'im_size', 'im_id', 'gt_bbox', 'gt_class', 'is_difficult']\n+ num_max_boxes: 50\n+ dataset:\n+ !VOCDataSet\n+ dataset_dir: dataset/roadsign_voc\n+ anno_path: valid.txt\n+ with_background: false\n+ sample_transforms:\n+ - !DecodeImage\n+ to_rgb: True\n+ - !ResizeImage\n+ target_size: 608\n+ interp: 2\n+ - !NormalizeImage\n+ mean: [0.485, 0.456, 0.406]\n+ std: [0.229, 0.224, 0.225]\n+ is_scale: True\n+ is_channel_first: false\n+ - !PadBox\n+ num_max_boxes: 50\n+ - !Permute\n+ to_bgr: false\n+ channel_first: True\n+ batch_size: 4\n+ drop_empty: false\n+ worker_num: 4\n+ bufsize: 2\n+\n+TestReader:\n+ inputs_def:\n+ image_shape: [3, 608, 608]\n+ fields: ['image', 'im_size', 'im_id']\n+ dataset:\n+ !ImageFolder\n+ anno_path: dataset/roadsign_voc/label_list.txt\n+ with_background: false\n+ sample_transforms:\n+ - !DecodeImage\n+ to_rgb: True\n+ - !ResizeImage\n+ target_size: 608\n+ interp: 2\n+ - !NormalizeImage\n+ mean: [0.485, 0.456, 0.406]\n+ std: [0.229, 0.224, 0.225]\n+ is_scale: True\n+ is_channel_first: false\n+ - !Permute\n+ to_bgr: false\n+ channel_first: True\n+ batch_size: 1\n"
},
{
"change_type": "MODIFY",
"old_path": "static/docs/tutorials/train_on_kunlun.md",
"new_path": "static/docs/tutorials/train_on_kunlun.md",
"diff": "## yolov3\n### Prepare data\n-Prepare data roadsign\n-\n+Prepare data roadsign:\n+```shell\n+cd PaddleDetection/static/dataset/roadsign_voc/\n+python3.7 download_roadsign_voc.py\n+```\n### Train\n```shell\n@@ -20,6 +23,19 @@ python3.7 -u tools/train.py -c configs/yolov3_mobilenet_v1_roadsign.yml -o use_g\npython3.7 -u tools/eval.py -c configs/yolov3_mobilenet_v1_roadsign.yml -o weights=output/yolov3_mobilenet_v1_roadsign/model_final.pdparams use_gpu=False use_xpu=True\n```\n+### Train on Darknet\n+```shell\n+cd static/\n+python3.7 -u tools/train.py -c configs/yolov3_datknet_roadsign_kunlun.yml -o use_gpu=False use_xpu=True\n+```\n+\n+\n+### Eval on Darknet\n+```shell\n+cd static/\n+python3.7 -u tools/eval.py -c configs/yolov3_darknet_roadsign_kunlun.yml -o weights=output/yolov3_darknet_roadsign_kunlun/model_final.pdparams use_gpu=False use_xpu=True\n+```\n+\n## ppyolo\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add yolov3 darknet config to develop, test=kunlun (#3035)
|
499,331 |
18.05.2021 20:41:30
| -28,800 |
2b7a999d43933d65fc9df35a3b42c1d5f9bb7b18
|
move optimizer of roadsign config to a single file
|
[
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/yolov3/_base_/optimizer_40e.yml",
"diff": "+epoch: 40\n+\n+LearningRate:\n+ base_lr: 0.0001\n+ schedulers:\n+ - !PiecewiseDecay\n+ gamma: 0.1\n+ milestones:\n+ - 32\n+ - 36\n+ - !LinearWarmup\n+ start_factor: 0.3333333333333333\n+ steps: 100\n+\n+OptimizerBuilder:\n+ optimizer:\n+ momentum: 0.9\n+ type: Momentum\n+ regularizer:\n+ factor: 0.0005\n+ type: L2\n"
},
{
"change_type": "MODIFY",
"old_path": "configs/yolov3/yolov3_mobilenet_v1_roadsign.yml",
"new_path": "configs/yolov3/yolov3_mobilenet_v1_roadsign.yml",
"diff": "_BASE_: [\n'../datasets/roadsign_voc.yml',\n'../runtime.yml',\n+ '_base_/optimizer_40e.yml',\n'_base_/yolov3_mobilenet_v1.yml',\n'_base_/yolov3_reader.yml',\n]\n@@ -10,24 +11,3 @@ weights: output/yolov3_mobilenet_v1_roadsign/model_final\nYOLOv3Loss:\nignore_thresh: 0.7\nlabel_smooth: true\n-\n-snapshot_epoch: 2\n-epoch: 40\n-\n-LearningRate:\n- base_lr: 0.0001\n- schedulers:\n- - !PiecewiseDecay\n- gamma: 0.1\n- milestones: [32, 36]\n- - !LinearWarmup\n- start_factor: 0.3333333333333333\n- steps: 100\n-\n-OptimizerBuilder:\n- optimizer:\n- momentum: 0.9\n- type: Momentum\n- regularizer:\n- factor: 0.0005\n- type: L2\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
move optimizer of roadsign config to a single file (#3055)
|
499,313 |
20.05.2021 11:27:27
| -28,800 |
e0ad4dbef8e6edb60a25a107ff896f4bbce6b335
|
fix shm check failed in windows/Mac system
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/data/reader.py",
"new_path": "ppdet/data/reader.py",
"diff": "@@ -180,7 +180,10 @@ class BaseDataLoader(object):\nelse:\nself._batch_sampler = batch_sampler\n- use_shared_memory = self.use_shared_memory\n+ # DataLoader do not start sub-process in Windows and Mac\n+ # system, do not need to use shared memory\n+ use_shared_memory = self.use_shared_memory and \\\n+ sys.platform not in ['win32', 'darwin']\n# check whether shared memory size is bigger than 1G(1024M)\nif use_shared_memory:\nshm_size = _get_shared_memory_size_in_M()\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix shm check failed in windows/Mac system (#3069)
|
499,313 |
20.05.2021 15:18:10
| -28,800 |
47101cfbb875f17240f185f9378338d44a6e8540
|
fix install error on Python36
|
[
{
"change_type": "MODIFY",
"old_path": "docs/tutorials/INSTALL.md",
"new_path": "docs/tutorials/INSTALL.md",
"diff": "@@ -101,6 +101,8 @@ pip install -r requirements.txt\n```pip install git+https://github.com/philferriere/cocoapi.git#subdirectory=PythonAPI```\n+2. If you are using Python <= 3.6, `pycocotools` installing may failed with error like `distutils.errors.DistutilsError: Could not find suitable distribution for Requirement.parse('cython>=0.27.3')`, please install `cython` firstly, for example `pip install cython`\n+\nAfter installation, make sure the tests pass:\n```shell\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix install error on Python36 (#3083)
|
499,348 |
20.05.2021 16:13:09
| -28,800 |
19124833e324cf3f5ddc4799a38f50b5d1432875
|
add mot_pose_demo;sych with det benchmark codes
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/python/infer.py",
"new_path": "deploy/python/infer.py",
"diff": "@@ -541,8 +541,8 @@ def main():\ndetector.det_times.info(average=True)\nelse:\nmems = {\n- 'cpu_rss': detector.cpu_mem / len(img_list),\n- 'gpu_rss': detector.gpu_mem / len(img_list),\n+ 'cpu_rss_mb': detector.cpu_mem / len(img_list),\n+ 'gpu_rss_mb': detector.gpu_mem / len(img_list),\n'gpu_util': detector.gpu_util * 100 / len(img_list)\n}\n@@ -558,8 +558,8 @@ def main():\n'shape': \"dynamic_shape\",\n'data_num': perf_info['img_num']\n}\n- det_log = PaddleInferBenchmark(\n- detector.config, model_info, data_info, perf_info, mems)\n+ det_log = PaddleInferBenchmark(detector.config, model_info,\n+ data_info, perf_info, mems)\ndet_log('Det')\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/python/keypoint_det_unite_infer.py",
"new_path": "deploy/python/keypoint_det_unite_infer.py",
"diff": "# limitations under the License.\nimport os\n-\nfrom PIL import Image\nimport cv2\nimport numpy as np\n@@ -52,7 +51,7 @@ def get_person_from_rect(images, results):\norg_rects = []\nfor rect in valid_rects:\nrect_image, new_rect, org_rect = expand_crop(images, rect)\n- if rect_image is None:\n+ if rect_image is None or rect_image.size == 0:\ncontinue\nimage_buff.append([rect_image, new_rect])\norg_rects.append(org_rect)\n@@ -113,13 +112,13 @@ def topdown_unite_predict_video(detector, topdown_keypoint_detector, camera_id):\nos.makedirs(FLAGS.output_dir)\nout_path = os.path.join(FLAGS.output_dir, video_name)\nwriter = cv2.VideoWriter(out_path, fourcc, fps, (width, height))\n- index = 1\n+ index = 0\nwhile (1):\nret, frame = capture.read()\nif not ret:\nbreak\n- print('detect frame:%d' % (index))\nindex += 1\n+ print('detect frame:%d' % (index))\nframe2 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\nresults = detector.predict(frame2, FLAGS.det_threshold)\n@@ -136,7 +135,7 @@ def topdown_unite_predict_video(detector, topdown_keypoint_detector, camera_id):\nkeypoint_res = {}\nkeypoint_res['keypoint'] = [\nnp.vstack(keypoint_vector), np.vstack(score_vector)\n- ]\n+ ] if len(keypoint_vector) > 0 else [[], []]\nkeypoint_res['bbox'] = rect_vecotr\nim = draw_pose(\nframe,\n@@ -189,8 +188,6 @@ def main():\n# predict from image\nimg_list = get_test_images(FLAGS.image_dir, FLAGS.image_file)\ntopdown_unite_predict(detector, topdown_keypoint_detector, img_list)\n- detector.det_times.info(average=True)\n- topdown_keypoint_detector.det_times.info(average=True)\nif __name__ == '__main__':\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/python/keypoint_infer.py",
"new_path": "deploy/python/keypoint_infer.py",
"diff": "@@ -28,7 +28,8 @@ from keypoint_postprocess import HrHRNetPostProcess, HRNetPostProcess\nfrom keypoint_visualize import draw_pose\nfrom paddle.inference import Config\nfrom paddle.inference import create_predictor\n-from utils import argsparser, Timer, get_current_memory_mb, LoggerHelper\n+from utils import argsparser, Timer, get_current_memory_mb\n+from benchmark_utils import PaddleInferBenchmark\nfrom infer import get_test_images, print_arguments\n# Global dictionary\n@@ -66,7 +67,7 @@ class KeyPoint_Detector(object):\ncpu_threads=1,\nenable_mkldnn=False):\nself.pred_config = pred_config\n- self.predictor = load_predictor(\n+ self.predictor, self.config = load_predictor(\nmodel_dir,\nrun_mode=run_mode,\nmin_subgraph_size=self.pred_config.min_subgraph_size,\n@@ -129,7 +130,7 @@ class KeyPoint_Detector(object):\nMaskRCNN's results include 'masks': np.ndarray:\nshape: [N, im_h, im_w]\n'''\n- self.det_times.preprocess_time.start()\n+ self.det_times.preprocess_time_s.start()\ninputs = self.preprocess(image)\nnp_boxes, np_masks = None, None\ninput_names = self.predictor.get_input_names()\n@@ -137,7 +138,7 @@ class KeyPoint_Detector(object):\nfor i in range(len(input_names)):\ninput_tensor = self.predictor.get_input_handle(input_names[i])\ninput_tensor.copy_from_cpu(inputs[input_names[i]])\n- self.det_times.preprocess_time.end()\n+ self.det_times.preprocess_time_s.end()\nfor i in range(warmup):\nself.predictor.run()\noutput_names = self.predictor.get_output_names()\n@@ -152,7 +153,7 @@ class KeyPoint_Detector(object):\ninds_k.copy_to_cpu()\n]\n- self.det_times.inference_time.start()\n+ self.det_times.inference_time_s.start()\nfor i in range(repeats):\nself.predictor.run()\noutput_names = self.predictor.get_output_names()\n@@ -166,12 +167,12 @@ class KeyPoint_Detector(object):\nmasks_tensor.copy_to_cpu(), heat_k.copy_to_cpu(),\ninds_k.copy_to_cpu()\n]\n- self.det_times.inference_time.end(repeats=repeats)\n+ self.det_times.inference_time_s.end(repeats=repeats)\n- self.det_times.postprocess_time.start()\n+ self.det_times.postprocess_time_s.start()\nresults = self.postprocess(\nnp_boxes, np_masks, inputs, threshold=threshold)\n- self.det_times.postprocess_time.end()\n+ self.det_times.postprocess_time_s.end()\nself.det_times.img_num += 1\nreturn results\n@@ -318,7 +319,7 @@ def load_predictor(model_dir,\n# disable feed, fetch OP, needed by zero_copy_run\nconfig.switch_use_feed_fetch_ops(False)\npredictor = create_predictor(config)\n- return predictor\n+ return predictor, config\ndef predict_image(detector, image_list):\n@@ -347,7 +348,8 @@ def predict_video(detector, camera_id):\nvideo_name = 'output.mp4'\nelse:\ncapture = cv2.VideoCapture(FLAGS.video_file)\n- video_name = os.path.basename(os.path.split(FLAGS.video_file)[-1])\n+ video_name = os.path.splitext(os.path.basename(FLAGS.video_file))[\n+ 0] + '.mp4'\nfps = 30\nwidth = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))\nheight = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n@@ -403,13 +405,25 @@ def main():\ndetector.det_times.info(average=True)\nelse:\nmems = {\n- 'cpu_rss': detector.cpu_mem / len(img_list),\n- 'gpu_rss': detector.gpu_mem / len(img_list),\n+ 'cpu_rss_mb': detector.cpu_mem / len(img_list),\n+ 'gpu_rss_mb': detector.gpu_mem / len(img_list),\n'gpu_util': detector.gpu_util * 100 / len(img_list)\n}\n- det_logger = LoggerHelper(\n- FLAGS, detector.det_times.report(average=True), mems)\n- det_logger.report()\n+ perf_info = detector.det_times.report(average=True)\n+ model_dir = FLAGS.model_dir\n+ mode = FLAGS.run_mode\n+ model_info = {\n+ 'model_name': model_dir.strip('/').split('/')[-1],\n+ 'precision': mode.split('_')[-1]\n+ }\n+ data_info = {\n+ 'batch_size': 1,\n+ 'shape': \"dynamic_shape\",\n+ 'data_num': perf_info['img_num']\n+ }\n+ det_log = PaddleInferBenchmark(detector.config, model_info,\n+ data_info, perf_info, mems)\n+ det_log('KeyPoint')\nif __name__ == '__main__':\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/python/keypoint_visualize.py",
"new_path": "deploy/python/keypoint_visualize.py",
"diff": "@@ -19,11 +19,6 @@ import numpy as np\nimport math\n-def map_coco_to_personlab(keypoints):\n- permute = [0, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3]\n- return keypoints[:, permute, :]\n-\n-\ndef draw_pose(imgfile,\nresults,\nvisual_thread=0.6,\n@@ -39,9 +34,9 @@ def draw_pose(imgfile,\n'for example: `pip install matplotlib`.')\nraise e\n- EDGES = [(0, 14), (0, 13), (0, 4), (0, 1), (14, 16), (13, 15), (4, 10),\n- (1, 7), (10, 11), (7, 8), (11, 12), (8, 9), (4, 5), (1, 2), (5, 6),\n- (2, 3)]\n+ EDGES = [(0, 1), (0, 2), (1, 3), (2, 4), (3, 5), (4, 6), (5, 7), (6, 8),\n+ (7, 9), (8, 10), (5, 11), (6, 12), (11, 13), (12, 14), (13, 15),\n+ (14, 16), (11, 12)]\nNUM_EDGES = len(EDGES)\ncolors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \\\n@@ -52,25 +47,28 @@ def draw_pose(imgfile,\nimg = cv2.imread(imgfile) if type(imgfile) == str else imgfile\nskeletons, scores = results['keypoint']\n+ color_set = results['colors'] if 'colors' in results else None\nif 'bbox' in results:\nbboxs = results['bbox']\n- for idx, rect in enumerate(bboxs):\n+ for j, rect in enumerate(bboxs):\nxmin, ymin, xmax, ymax = rect\n- cv2.rectangle(img, (xmin, ymin), (xmax, ymax), colors[0], 1)\n+ color = colors[0] if color_set is None else colors[color_set[j] %\n+ len(colors)]\n+ cv2.rectangle(img, (xmin, ymin), (xmax, ymax), color, 1)\ncanvas = img.copy()\nfor i in range(17):\n- rgba = np.array(cmap(1 - i / 17. - 1. / 34))\n- rgba[0:3] *= 255\nfor j in range(len(skeletons)):\nif skeletons[j][i, 2] < visual_thread:\ncontinue\n+ color = colors[i] if color_set is None else colors[color_set[j] %\n+ len(colors)]\ncv2.circle(\ncanvas,\ntuple(skeletons[j][i, 0:2].astype('int32')),\n2,\n- colors[i],\n+ color,\nthickness=-1)\nto_plot = cv2.addWeighted(img, 0.3, canvas, 0.7, 0)\n@@ -78,7 +76,6 @@ def draw_pose(imgfile,\nstickwidth = 2\n- skeletons = map_coco_to_personlab(skeletons)\nfor i in range(NUM_EDGES):\nfor j in range(len(skeletons)):\nedge = EDGES[i]\n@@ -96,7 +93,9 @@ def draw_pose(imgfile,\npolygon = cv2.ellipse2Poly((int(mY), int(mX)),\n(int(length / 2), stickwidth),\nint(angle), 0, 360, 1)\n- cv2.fillConvexPoly(cur_canvas, polygon, colors[i])\n+ color = colors[i] if color_set is None else colors[color_set[j] %\n+ len(colors)]\n+ cv2.fillConvexPoly(cur_canvas, polygon, color)\ncanvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)\nif returnimg:\nreturn canvas\n"
},
{
"change_type": "ADD",
"old_path": "docs/images/mot_pose_demo_640x360.gif",
"new_path": "docs/images/mot_pose_demo_640x360.gif",
"diff": "Binary files /dev/null and b/docs/images/mot_pose_demo_640x360.gif differ\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add mot_pose_demo;sych with det benchmark codes (#3079)
|
499,313 |
20.05.2021 17:41:17
| -28,800 |
b274f7ad271968159169c60639a8fa7375d51141
|
fix download in windows
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/utils/download.py",
"new_path": "ppdet/utils/download.py",
"diff": "@@ -18,6 +18,7 @@ from __future__ import print_function\nimport os\nimport os.path as osp\n+import sys\nimport yaml\nimport shutil\nimport requests\n@@ -349,6 +350,11 @@ def _download(url, path, md5sum=None):\nlogger.info(\"Downloading {} from {}\".format(fname, url))\n+\n+ # NOTE: windows path join may incur \\, which is invalid in url\n+ if sys.platform == \"win32\":\n+ url = url.replace('\\\\', '/')\n+\nreq = requests.get(url, stream=True)\nif req.status_code != 200:\nraise RuntimeError(\"Downloading from {} failed with code \"\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix download in windows (#3084)
|
499,333 |
20.05.2021 18:24:34
| -28,800 |
fd49465754d8d1a691c6eb57afe96836811d6af6
|
correct min_subgraph_size in ttfnet
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/python/infer.py",
"new_path": "deploy/python/infer.py",
"diff": "@@ -123,14 +123,14 @@ class Detector(object):\nMaskRCNN's results include 'masks': np.ndarray:\nshape: [N, im_h, im_w]\n'''\n- self.det_times.postprocess_time_s.start()\n+ self.det_times.preprocess_time_s.start()\ninputs = self.preprocess(image)\nnp_boxes, np_masks = None, None\ninput_names = self.predictor.get_input_names()\nfor i in range(len(input_names)):\ninput_tensor = self.predictor.get_input_handle(input_names[i])\ninput_tensor.copy_from_cpu(inputs[input_names[i]])\n- self.det_times.postprocess_time_s.end()\n+ self.det_times.preprocess_time_s.end()\nfor i in range(warmup):\nself.predictor.run()\noutput_names = self.predictor.get_output_names()\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/engine/export_utils.py",
"new_path": "ppdet/engine/export_utils.py",
"diff": "@@ -34,7 +34,7 @@ TRT_MIN_SUBGRAPH = {\n'S2ANet': 40,\n'EfficientDet': 40,\n'Face': 3,\n- 'TTFNet': 3,\n+ 'TTFNet': 60,\n'FCOS': 16,\n'SOLOv2': 60,\n'HigherHRNet': 3,\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
correct min_subgraph_size in ttfnet (#3088)
|
499,313 |
21.05.2021 10:31:03
| -28,800 |
875277e8c35fe4ea840ee97384a5072f622f2fa0
|
polish version log in build_wheel.sh
|
[
{
"change_type": "MODIFY",
"old_path": "scripts/build_wheel.sh",
"new_path": "scripts/build_wheel.sh",
"diff": "@@ -133,11 +133,23 @@ build_and_install\nunittest\ncleanup\n+# get Paddle version\nPADDLE_VERSION=`python -c \"import paddle; print(paddle.version.full_version)\"`\nPADDLE_COMMIT=`python -c \"import paddle; print(paddle.version.commit)\"`\n+PADDLE_COMMIT=`git rev-parse --short $PADDLE_COMMIT`\n+\n+# get PaddleDetection branch\n+PPDET_BRANCH=`git rev-parse --abbrev-ref HEAD`\n+PPDET_COMMIT=`git rev-parse --short HEAD`\n+\n+# get Python version\n+PYTHON_VERSION=`python -c \"import platform; print(platform.python_version())\"`\n+\necho -e \"\\n${GREEN}paddledet wheel compiled and checked success !${NONE}\n- ${BLUE}paddle version:${NONE} $PADDLE_VERSION\n- ${BLUE}paddle commit:${NONE} $PADDLE_COMMIT\\n\"\n+ ${BLUE}Python version:${NONE} $PYTHON_VERSION\n+ ${BLUE}Paddle version:${NONE} $PADDLE_VERSION ($PADDLE_COMMIT)\n+ ${BLUE}PaddleDetection branch:${NONE} $PPDET_BRANCH ($PPDET_COMMIT)\\n\"\n+\necho -e \"${GREEN}wheel saved under${NONE} ${RED}${BOLD}./dist\"\ntrap : 0\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
polish version log in build_wheel.sh (#3095)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.