author
int64
658
755k
date
stringlengths
19
19
timezone
int64
-46,800
43.2k
hash
stringlengths
40
40
message
stringlengths
5
490
mods
list
language
stringclasses
20 values
license
stringclasses
3 values
repo
stringlengths
5
68
original_message
stringlengths
12
491
499,301
31.12.2021 15:02:26
-28,800
1a46f29fe1b42c6350cadf2b5ace66a217cdf145
params group
[ { "change_type": "MODIFY", "old_path": "configs/faster_rcnn/_base_/optimizer_swin_1x.yml", "new_path": "configs/faster_rcnn/_base_/optimizer_swin_1x.yml", "diff": "@@ -15,4 +15,8 @@ OptimizerBuilder:\noptimizer:\ntype: AdamW\nweight_decay: 0.05\n- without_weight_decay_params: ['absolute_pos_embed', 'relative_position_bias_table', 'norm']\n+\n+ param_groups:\n+ -\n+ params: ['absolute_pos_embed', 'relative_position_bias_table', 'norm']\n+ weight_decay: 0.\n" }, { "change_type": "MODIFY", "old_path": "ppdet/optimizer.py", "new_path": "ppdet/optimizer.py", "diff": "@@ -249,21 +249,37 @@ class OptimizerBuilder():\noptim_args['weight_decay'] = regularization\nop = getattr(optimizer, optim_type)\n- if 'without_weight_decay_params' in optim_args:\n- keys = optim_args['without_weight_decay_params']\n- params = [{\n- 'params': [\n- p for n, p in model.named_parameters()\n- if any([k in n for k in keys])\n- ],\n- 'weight_decay': 0.\n- }, {\n- 'params': [\n- p for n, p in model.named_parameters()\n- if all([k not in n for k in keys])\n+ if 'param_groups' in optim_args:\n+ assert isinstance(optim_args['param_groups'], list), ''\n+\n+ param_groups = optim_args.pop('param_groups')\n+\n+ params, visited = [], []\n+ for group in param_groups:\n+ assert isinstance(group,\n+ dict) and 'params' in group and isinstance(\n+ group['params'], list), ''\n+ _params = {\n+ n: p\n+ for n, p in model.named_parameters()\n+ if any([k in n for k in group['params']])\n+ }\n+ _group = group.copy()\n+ _group.update({'params': list(_params.values())})\n+\n+ params.append(_group)\n+ visited.extend(list(_params.keys()))\n+\n+ ext_params = [\n+ p for n, p in model.named_parameters() if n not in visited\n]\n- }]\n- del optim_args['without_weight_decay_params']\n+\n+ if len(ext_params) < len(model.parameters()):\n+ params.append({'params': ext_params})\n+\n+ elif len(ext_params) > len(model.parameters()):\n+ raise RuntimeError\n+\nelse:\nparams = model.parameters()\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
params group (#4955)
499,301
04.01.2022 13:47:11
-28,800
5f198f393dba3773b85d3b212e8d8df43812dccd
update test keep ratio same as eval
[ { "change_type": "MODIFY", "old_path": "configs/faster_rcnn/_base_/faster_rcnn_swin_reader.yml", "new_path": "configs/faster_rcnn/_base_/faster_rcnn_swin_reader.yml", "diff": "@@ -33,7 +33,7 @@ TestReader:\nimage_shape: [1, 3, 640, 640]\nsample_transforms:\n- Decode: {}\n- - Resize: {interp: 2, target_size: [640, 640], keep_ratio: False}\n+ - Resize: {interp: 2, target_size: [640, 640], keep_ratio: True}\n- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n- Permute: {}\nbatch_transforms:\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
update test keep ratio same as eval (#5051)
499,376
05.01.2022 14:06:14
-28,800
2711a60b8773e1fdf579052dbb78988b3953a768
Change some config for det trainning performance
[ { "change_type": "MODIFY", "old_path": "configs/deformable_detr/_base_/deformable_detr_reader.yml", "new_path": "configs/deformable_detr/_base_/deformable_detr_reader.yml", "diff": "-worker_num: 0\n+worker_num: 2\nTrainReader:\nsample_transforms:\n- Decode: {}\n" }, { "change_type": "MODIFY", "old_path": "configs/mot/jde/_base_/jde_reader_1088x608.yml", "new_path": "configs/mot/jde/_base_/jde_reader_1088x608.yml", "diff": "-worker_num: 2\n+worker_num: 8\nTrainReader:\nsample_transforms:\n- Decode: {}\n" }, { "change_type": "MODIFY", "old_path": "configs/solov2/_base_/solov2_reader.yml", "new_path": "configs/solov2/_base_/solov2_reader.yml", "diff": "-worker_num: 2\n+worker_num: 8\nTrainReader:\nsample_transforms:\n- Decode: {}\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
Change some config for det trainning performance (#5055)
499,301
10.01.2022 21:41:25
-28,800
e62c687486c0881759ffd49b736afb5ccaa3d717
ema update
[ { "change_type": "MODIFY", "old_path": "ppdet/optimizer.py", "new_path": "ppdet/optimizer.py", "diff": "@@ -17,6 +17,7 @@ from __future__ import division\nfrom __future__ import print_function\nimport math\n+import weakref\nimport paddle\nimport paddle.nn as nn\n@@ -319,19 +320,31 @@ class ModelEMA(object):\nself.use_thres_step = use_thres_step\nself.cycle_epoch = cycle_epoch\n+ self._model_state = {\n+ k: weakref.ref(p)\n+ for k, p in model.state_dict().items()\n+ }\n+\ndef reset(self):\nself.step = 0\nself.epoch = 0\nfor k, v in self.state_dict.items():\nself.state_dict[k] = paddle.zeros_like(v)\n- def update(self, model):\n+ def update(self, model=None):\nif self.use_thres_step:\ndecay = min(self.decay, (1 + self.step) / (10 + self.step))\nelse:\ndecay = self.decay\nself._decay = decay\n+\n+ if model is not None:\nmodel_dict = model.state_dict()\n+ else:\n+ model_dict = {k: p() for k, p in self._model_state.items()}\n+ assert all(\n+ [v is not None for _, v in model_dict.items()]), 'python gc.'\n+\nfor k, v in self.state_dict.items():\nv = decay * v + (1 - decay) * model_dict[k]\nv.stop_gradient = True\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
ema update (#5089)
499,304
13.01.2022 14:25:40
-28,800
df55cb9b8cb3b3ef701738f82068e51528101748
update PicoDet and GFL post_process
[ { "change_type": "MODIFY", "old_path": "configs/picodet/_base_/picodet_esnet.yml", "new_path": "configs/picodet/_base_/picodet_esnet.yml", "diff": "architecture: PicoDet\npretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ESNet_x1_0_pretrained.pdparams\n-export_post_process: False # Whether post-processing is included in the network\n+export_post_process: False # Whether post-processing is included in the network when export model.\nPicoDet:\nbackbone: ESNet\n" }, { "change_type": "MODIFY", "old_path": "ppdet/engine/trainer.py", "new_path": "ppdet/engine/trainer.py", "diff": "@@ -631,9 +631,12 @@ class Trainer(object):\nim_shape = [image_shape[0], 2]\nscale_factor = [image_shape[0], 2]\n- export_post_process = self.cfg.get('export_post_process', False)\n- if hasattr(self.model, 'deploy') and not export_post_process:\n+ if hasattr(self.model, 'deploy'):\nself.model.deploy = True\n+ export_post_process = self.cfg.get('export_post_process', False)\n+ if hasattr(self.model, 'export_post_process'):\n+ self.model.export_post_process = export_post_process\n+ image_shape = [None] + image_shape[1:]\nif hasattr(self.model, 'fuse_norm'):\nself.model.fuse_norm = self.cfg['TestReader'].get('fuse_normalize',\nFalse)\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/architectures/picodet.py", "new_path": "ppdet/modeling/architectures/picodet.py", "diff": "@@ -41,7 +41,7 @@ class PicoDet(BaseArch):\nself.backbone = backbone\nself.neck = neck\nself.head = head\n- self.deploy = False\n+ self.export_post_process = True\n@classmethod\ndef from_config(cls, cfg, *args, **kwargs):\n@@ -62,8 +62,8 @@ class PicoDet(BaseArch):\ndef _forward(self):\nbody_feats = self.backbone(self.inputs)\nfpn_feats = self.neck(body_feats)\n- head_outs = self.head(fpn_feats, self.deploy)\n- if self.training or self.deploy:\n+ head_outs = self.head(fpn_feats, self.export_post_process)\n+ if self.training or not self.export_post_process:\nreturn head_outs, None\nelse:\nim_shape = self.inputs['im_shape']\n@@ -83,7 +83,7 @@ class PicoDet(BaseArch):\nreturn loss\ndef get_pred(self):\n- if self.deploy:\n+ if not self.export_post_process:\nreturn {'picodet': self._forward()[0]}\nelse:\nbbox_pred, bbox_num = self._forward()\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/bbox_utils.py", "new_path": "ppdet/modeling/bbox_utils.py", "diff": "@@ -756,20 +756,22 @@ def bbox_center(boxes):\ndef batch_distance2bbox(points, distance, max_shapes=None):\n\"\"\"Decode distance prediction to bounding box for batch.\nArgs:\n- points (Tensor): [B, ..., 2]\n- distance (Tensor): [B, ..., 4]\n- max_shapes (tuple): [B, 2], \"h,w\" format, Shape of the image.\n+ points (Tensor): [B, ..., 2], \"xy\" format\n+ distance (Tensor): [B, ..., 4], \"ltrb\" format\n+ max_shapes (Tensor): [B, 2], \"h,w\" format, Shape of the image.\nReturns:\n- Tensor: Decoded bboxes.\n+ Tensor: Decoded bboxes, \"x1y1x2y2\" format.\n\"\"\"\n- x1 = points[..., 0] - distance[..., 0]\n- y1 = points[..., 1] - distance[..., 1]\n- x2 = points[..., 0] + distance[..., 2]\n- y2 = points[..., 1] + distance[..., 3]\n+ lt, rb = paddle.split(distance, 2, -1)\n+ x1y1 = points - lt\n+ x2y2 = points + rb\n+ out_bbox = paddle.concat([x1y1, x2y2], -1)\nif max_shapes is not None:\n- for i, max_shape in enumerate(max_shapes):\n- x1[i] = x1[i].clip(min=0, max=max_shape[1])\n- y1[i] = y1[i].clip(min=0, max=max_shape[0])\n- x2[i] = x2[i].clip(min=0, max=max_shape[1])\n- y2[i] = y2[i].clip(min=0, max=max_shape[0])\n- return paddle.stack([x1, y1, x2, y2], -1)\n+ max_shapes = max_shapes.flip(-1).tile([1, 2])\n+ delta_dim = out_bbox.ndim - max_shapes.ndim\n+ for _ in range(delta_dim):\n+ max_shapes.unsqueeze_(1)\n+ out_bbox = paddle.where(out_bbox < max_shapes, out_bbox, max_shapes)\n+ out_bbox = paddle.where(out_bbox > 0, out_bbox,\n+ paddle.zeros_like(out_bbox))\n+ return out_bbox\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/gfl_head.py", "new_path": "ppdet/modeling/heads/gfl_head.py", "diff": "@@ -29,7 +29,7 @@ from paddle.nn.initializer import Normal, Constant\nfrom ppdet.core.workspace import register\nfrom ppdet.modeling.layers import ConvNormLayer\n-from ppdet.modeling.bbox_utils import distance2bbox, bbox2distance\n+from ppdet.modeling.bbox_utils import distance2bbox, bbox2distance, batch_distance2bbox\nfrom ppdet.data.transform.atss_assigner import bbox_overlaps\n@@ -241,18 +241,34 @@ class GFLHead(nn.Layer):\n), \"The size of fpn_feats is not equal to size of fpn_stride\"\ncls_logits_list = []\nbboxes_reg_list = []\n- for scale_reg, fpn_feat in zip(self.scales_regs, fpn_feats):\n+ for stride, scale_reg, fpn_feat in zip(self.fpn_stride,\n+ self.scales_regs, fpn_feats):\nconv_cls_feat, conv_reg_feat = self.conv_feat(fpn_feat)\n- cls_logits = self.gfl_head_cls(conv_cls_feat)\n- bbox_reg = scale_reg(self.gfl_head_reg(conv_reg_feat))\n+ cls_score = self.gfl_head_cls(conv_cls_feat)\n+ bbox_pred = scale_reg(self.gfl_head_reg(conv_reg_feat))\nif self.dgqp_module:\n- quality_score = self.dgqp_module(bbox_reg)\n- cls_logits = F.sigmoid(cls_logits) * quality_score\n+ quality_score = self.dgqp_module(bbox_pred)\n+ cls_score = F.sigmoid(cls_score) * quality_score\nif not self.training:\n- cls_logits = F.sigmoid(cls_logits.transpose([0, 2, 3, 1]))\n- bbox_reg = bbox_reg.transpose([0, 2, 3, 1])\n- cls_logits_list.append(cls_logits)\n- bboxes_reg_list.append(bbox_reg)\n+ cls_score = F.sigmoid(cls_score.transpose([0, 2, 3, 1]))\n+ bbox_pred = bbox_pred.transpose([0, 2, 3, 1])\n+ b, cell_h, cell_w, _ = paddle.shape(cls_score)\n+ y, x = self.get_single_level_center_point(\n+ [cell_h, cell_w], stride, cell_offset=self.cell_offset)\n+ center_points = paddle.stack([x, y], axis=-1)\n+ cls_score = cls_score.reshape([b, -1, self.cls_out_channels])\n+ bbox_pred = self.distribution_project(bbox_pred) * stride\n+ bbox_pred = bbox_pred.reshape([b, cell_h * cell_w, 4])\n+\n+ # NOTE: If keep_ratio=False and image shape value that\n+ # multiples of 32, distance2bbox not set max_shapes parameter\n+ # to speed up model prediction. If need to set max_shapes,\n+ # please use inputs['im_shape'].\n+ bbox_pred = batch_distance2bbox(\n+ center_points, bbox_pred, max_shapes=None)\n+\n+ cls_logits_list.append(cls_score)\n+ bboxes_reg_list.append(bbox_pred)\nreturn (cls_logits_list, bboxes_reg_list)\n@@ -410,71 +426,15 @@ class GFLHead(nn.Layer):\nx = x.flatten()\nreturn y, x\n- def get_bboxes_single(self,\n- cls_scores,\n- bbox_preds,\n- img_shape,\n- scale_factor,\n- rescale=True,\n- cell_offset=0):\n- assert len(cls_scores) == len(bbox_preds)\n- mlvl_bboxes = []\n- mlvl_scores = []\n- for stride, cls_score, bbox_pred in zip(self.fpn_stride, cls_scores,\n- bbox_preds):\n- featmap_size = [\n- paddle.shape(cls_score)[0], paddle.shape(cls_score)[1]\n- ]\n- y, x = self.get_single_level_center_point(\n- featmap_size, stride, cell_offset=cell_offset)\n- center_points = paddle.stack([x, y], axis=-1)\n- scores = cls_score.reshape([-1, self.cls_out_channels])\n- bbox_pred = self.distribution_project(bbox_pred) * stride\n-\n- if scores.shape[0] > self.nms_pre:\n- max_scores = scores.max(axis=1)\n- _, topk_inds = max_scores.topk(self.nms_pre)\n- center_points = center_points.gather(topk_inds)\n- bbox_pred = bbox_pred.gather(topk_inds)\n- scores = scores.gather(topk_inds)\n-\n- bboxes = distance2bbox(\n- center_points, bbox_pred, max_shape=img_shape)\n- mlvl_bboxes.append(bboxes)\n- mlvl_scores.append(scores)\n- mlvl_bboxes = paddle.concat(mlvl_bboxes)\n- if rescale:\n- # [h_scale, w_scale] to [w_scale, h_scale, w_scale, h_scale]\n- im_scale = paddle.concat([scale_factor[::-1], scale_factor[::-1]])\n- mlvl_bboxes /= im_scale\n- mlvl_scores = paddle.concat(mlvl_scores)\n- mlvl_scores = mlvl_scores.transpose([1, 0])\n- return mlvl_bboxes, mlvl_scores\n-\n- def decode(self, cls_scores, bbox_preds, im_shape, scale_factor,\n- cell_offset):\n- batch_bboxes = []\n- batch_scores = []\n- for img_id in range(cls_scores[0].shape[0]):\n- num_levels = len(cls_scores)\n- cls_score_list = [cls_scores[i][img_id] for i in range(num_levels)]\n- bbox_pred_list = [bbox_preds[i][img_id] for i in range(num_levels)]\n- bboxes, scores = self.get_bboxes_single(\n- cls_score_list,\n- bbox_pred_list,\n- im_shape[img_id],\n- scale_factor[img_id],\n- cell_offset=cell_offset)\n- batch_bboxes.append(bboxes)\n- batch_scores.append(scores)\n- batch_bboxes = paddle.stack(batch_bboxes, axis=0)\n- batch_scores = paddle.stack(batch_scores, axis=0)\n-\n- return batch_bboxes, batch_scores\n-\ndef post_process(self, gfl_head_outs, im_shape, scale_factor):\ncls_scores, bboxes_reg = gfl_head_outs\n- bboxes, score = self.decode(cls_scores, bboxes_reg, im_shape,\n- scale_factor, self.cell_offset)\n- bbox_pred, bbox_num, _ = self.nms(bboxes, score)\n+ bboxes = paddle.concat(bboxes_reg, axis=1)\n+ # rescale: [h_scale, w_scale] -> [w_scale, h_scale, w_scale, h_scale]\n+ im_scale = paddle.concat(\n+ [scale_factor[:, ::-1], scale_factor[:, ::-1]],\n+ axis=-1).unsqueeze(1)\n+ bboxes /= im_scale\n+ mlvl_scores = paddle.concat(cls_scores, axis=1)\n+ mlvl_scores = mlvl_scores.transpose([0, 2, 1])\n+ bbox_pred, bbox_num, _ = self.nms(bboxes, mlvl_scores)\nreturn bbox_pred, bbox_num\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/pico_head.py", "new_path": "ppdet/modeling/heads/pico_head.py", "diff": "@@ -26,6 +26,7 @@ from paddle.nn.initializer import Normal, Constant\nfrom ppdet.core.workspace import register\nfrom ppdet.modeling.layers import ConvNormLayer\n+from ppdet.modeling.bbox_utils import batch_distance2bbox\nfrom .simota_head import OTAVFLHead\n@@ -238,7 +239,7 @@ class PicoHead(OTAVFLHead):\nbias_attr=ParamAttr(initializer=Constant(value=0))))\nself.head_reg_list.append(head_reg)\n- def forward(self, fpn_feats, deploy=False):\n+ def forward(self, fpn_feats, export_post_process=True):\nassert len(fpn_feats) == len(\nself.fpn_stride\n), \"The size of fpn_feats is not equal to size of fpn_stride\"\n@@ -260,7 +261,7 @@ class PicoHead(OTAVFLHead):\nquality_score = self.dgqp_module(bbox_pred)\ncls_score = F.sigmoid(cls_score) * quality_score\n- if deploy:\n+ if not export_post_process:\n# Now only supports batch size = 1 in deploy\n# TODO(ygh): support batch size > 1\ncls_score = F.sigmoid(cls_score).reshape(\n@@ -270,6 +271,21 @@ class PicoHead(OTAVFLHead):\nelif not self.training:\ncls_score = F.sigmoid(cls_score.transpose([0, 2, 3, 1]))\nbbox_pred = bbox_pred.transpose([0, 2, 3, 1])\n+ stride = self.fpn_stride[i]\n+ b, cell_h, cell_w, _ = paddle.shape(cls_score)\n+ y, x = self.get_single_level_center_point(\n+ [cell_h, cell_w], stride, cell_offset=self.cell_offset)\n+ center_points = paddle.stack([x, y], axis=-1)\n+ cls_score = cls_score.reshape([b, -1, self.cls_out_channels])\n+ bbox_pred = self.distribution_project(bbox_pred) * stride\n+ bbox_pred = bbox_pred.reshape([b, cell_h * cell_w, 4])\n+\n+ # NOTE: If keep_ratio=False and image shape value that\n+ # multiples of 32, distance2bbox not set max_shapes parameter\n+ # to speed up model prediction. If need to set max_shapes,\n+ # please use inputs['im_shape'].\n+ bbox_pred = batch_distance2bbox(\n+ center_points, bbox_pred, max_shapes=None)\ncls_logits_list.append(cls_score)\nbboxes_reg_list.append(bbox_pred)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
update PicoDet and GFL post_process (#5101)
499,340
19.01.2022 11:05:46
-28,800
3233ef340b0d387641d0a6e8c173bdae14552c12
support sparse evaluation
[ { "change_type": "MODIFY", "old_path": "ppdet/engine/trainer.py", "new_path": "ppdet/engine/trainer.py", "diff": "@@ -130,6 +130,7 @@ class Trainer(object):\nself.lr = create('LearningRate')(steps_per_epoch)\nself.optimizer = create('OptimizerBuilder')(self.lr, self.model)\n+ # Unstructured pruner is only enabled in the train mode.\nif self.cfg.get('unstructured_prune'):\nself.pruner = create('UnstructuredPruner')(self.model,\nsteps_per_epoch)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
support sparse evaluation (#5122)
499,298
28.01.2022 17:38:16
-28,800
6ee18c2b3964f50ea5a850cec224f0c06f01a403
fix reid resnet bn export, test=document_fix
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/reid/resnet.py", "new_path": "ppdet/modeling/reid/resnet.py", "diff": "@@ -55,7 +55,7 @@ class ConvBNLayer(nn.Layer):\nbias_attr=False,\ndata_format=data_format)\n- self._batch_norm = nn.BatchNorm2D(num_filters, data_layout=data_format)\n+ self._batch_norm = nn.BatchNorm2D(num_filters)\nself.act = act\ndef forward(self, inputs):\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix reid resnet bn export, test=document_fix (#5162)
499,339
29.01.2022 19:08:14
-28,800
0a3d768ce3464fca945ba58f0742fbe003930ec7
[dev] update assigner and tood_head
[ { "change_type": "MODIFY", "old_path": "configs/tood/_base_/tood_reader.yml", "new_path": "configs/tood/_base_/tood_reader.yml", "diff": "@@ -8,10 +8,11 @@ TrainReader:\n- Permute: {}\nbatch_transforms:\n- PadBatch: {pad_to_stride: 32}\n+ - PadGT: {}\nbatch_size: 4\nshuffle: true\ndrop_last: true\n- collate_batch: false\n+ collate_batch: true\nuse_shared_memory: true\n" }, { "change_type": "MODIFY", "old_path": "ppdet/data/transform/batch_operators.py", "new_path": "ppdet/data/transform/batch_operators.py", "diff": "@@ -47,6 +47,7 @@ __all__ = [\n'PadMaskBatch',\n'Gt2GFLTarget',\n'Gt2CenterNetTarget',\n+ 'PadGT',\n]\n@@ -72,13 +73,15 @@ class PadBatch(BaseOperator):\ncoarsest_stride = self.pad_to_stride\n# multi scale input is nested list\n- if isinstance(samples, typing.Sequence) and len(samples) > 0 and isinstance(samples[0], typing.Sequence):\n+ if isinstance(samples,\n+ typing.Sequence) and len(samples) > 0 and isinstance(\n+ samples[0], typing.Sequence):\ninner_samples = samples[0]\nelse:\ninner_samples = samples\n- max_shape = np.array([data['image'].shape for data in inner_samples]).max(\n- axis=0)\n+ max_shape = np.array(\n+ [data['image'].shape for data in inner_samples]).max(axis=0)\nif coarsest_stride > 0:\nmax_shape[1] = int(\nnp.ceil(max_shape[1] / coarsest_stride) * coarsest_stride)\n@@ -1066,3 +1069,56 @@ class Gt2CenterNetTarget(BaseOperator):\nsample['size'] = wh\nsample['offset'] = reg\nreturn sample\n+\n+\n+@register_op\n+class PadGT(BaseOperator):\n+ \"\"\"\n+ Pad 0 to `gt_class`, `gt_bbox`, `gt_score`...\n+ The num_max_boxes is the largest for batch.\n+ Args:\n+ return_gt_mask (bool): If true, return `pad_gt_mask`,\n+ 1 means bbox, 0 means no bbox.\n+ \"\"\"\n+\n+ def __init__(self, return_gt_mask=True):\n+ super(PadGT, self).__init__()\n+ self.return_gt_mask = return_gt_mask\n+\n+ def __call__(self, samples, context=None):\n+ num_max_boxes = max([len(s['gt_bbox']) for s in samples])\n+ for sample in samples:\n+ if self.return_gt_mask:\n+ sample['pad_gt_mask'] = np.zeros(\n+ (num_max_boxes, 1), dtype=np.float32)\n+ if num_max_boxes == 0:\n+ continue\n+\n+ num_gt = len(sample['gt_bbox'])\n+ pad_gt_class = np.zeros((num_max_boxes, 1), dtype=np.int32)\n+ pad_gt_bbox = np.zeros((num_max_boxes, 4), dtype=np.float32)\n+ if num_gt > 0:\n+ pad_gt_class[:num_gt] = sample['gt_class']\n+ pad_gt_bbox[:num_gt] = sample['gt_bbox']\n+ sample['gt_class'] = pad_gt_class\n+ sample['gt_bbox'] = pad_gt_bbox\n+ # pad_gt_mask\n+ if 'pad_gt_mask' in sample:\n+ sample['pad_gt_mask'][:num_gt] = 1\n+ # gt_score\n+ if 'gt_score' in sample:\n+ pad_gt_score = np.zeros((num_max_boxes, 1), dtype=np.float32)\n+ if num_gt > 0:\n+ pad_gt_score[:num_gt] = sample['gt_score']\n+ sample['gt_score'] = pad_gt_score\n+ if 'is_crowd' in sample:\n+ pad_is_crowd = np.zeros((num_max_boxes, 1), dtype=np.int32)\n+ if num_gt > 0:\n+ pad_is_crowd[:num_gt] = sample['is_crowd']\n+ sample['is_crowd'] = pad_is_crowd\n+ if 'difficult' in sample:\n+ pad_diff = np.zeros((num_max_boxes, 1), dtype=np.int32)\n+ if num_gt > 0:\n+ pad_diff[:num_gt] = sample['difficult']\n+ sample['difficult'] = pad_diff\n+ return samples\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/assigners/atss_assigner.py", "new_path": "ppdet/modeling/assigners/atss_assigner.py", "diff": "@@ -23,10 +23,13 @@ import paddle.nn.functional as F\nfrom ppdet.core.workspace import register\nfrom ..ops import iou_similarity\n+from ..bbox_utils import iou_similarity as batch_iou_similarity\nfrom ..bbox_utils import bbox_center\n-from .utils import (pad_gt, check_points_inside_bboxes, compute_max_iou_anchor,\n+from .utils import (check_points_inside_bboxes, compute_max_iou_anchor,\ncompute_max_iou_gt)\n+__all__ = ['ATSSAssigner']\n+\n@register\nclass ATSSAssigner(nn.Layer):\n@@ -77,8 +80,10 @@ class ATSSAssigner(nn.Layer):\nnum_anchors_list,\ngt_labels,\ngt_bboxes,\n+ pad_gt_mask,\nbg_index,\n- gt_scores=None):\n+ gt_scores=None,\n+ pred_bboxes=None):\nr\"\"\"This code is based on\nhttps://github.com/fcjian/TOOD/blob/master/mmdet/core/bbox/assigners/atss_assigner.py\n@@ -99,18 +104,18 @@ class ATSSAssigner(nn.Layer):\nanchor_bboxes (Tensor, float32): pre-defined anchors, shape(L, 4),\n\"xmin, xmax, ymin, ymax\" format\nnum_anchors_list (List): num of anchors in each level\n- gt_labels (Tensor|List[Tensor], int64): Label of gt_bboxes, shape(B, n, 1)\n- gt_bboxes (Tensor|List[Tensor], float32): Ground truth bboxes, shape(B, n, 4)\n+ gt_labels (Tensor, int64|int32): Label of gt_bboxes, shape(B, n, 1)\n+ gt_bboxes (Tensor, float32): Ground truth bboxes, shape(B, n, 4)\n+ pad_gt_mask (Tensor, float32): 1 means bbox, 0 means no bbox, shape(B, n, 1)\nbg_index (int): background index\n- gt_scores (Tensor|List[Tensor]|None, float32) Score of gt_bboxes,\n+ gt_scores (Tensor|None, float32) Score of gt_bboxes,\nshape(B, n, 1), if None, then it will initialize with one_hot label\n+ pred_bboxes (Tensor, float32, optional): predicted bounding boxes, shape(B, L, 4)\nReturns:\nassigned_labels (Tensor): (B, L)\nassigned_bboxes (Tensor): (B, L, 4)\n- assigned_scores (Tensor): (B, L, C)\n+ assigned_scores (Tensor): (B, L, C), if pred_bboxes is not None, then output ious\n\"\"\"\n- gt_labels, gt_bboxes, pad_gt_scores, pad_gt_mask = pad_gt(\n- gt_labels, gt_bboxes, gt_scores)\nassert gt_labels.ndim == gt_bboxes.ndim and \\\ngt_bboxes.ndim == 3\n@@ -198,9 +203,14 @@ class ATSSAssigner(nn.Layer):\nassigned_bboxes = assigned_bboxes.reshape([batch_size, num_anchors, 4])\nassigned_scores = F.one_hot(assigned_labels, self.num_classes)\n- if gt_scores is not None:\n+ if pred_bboxes is not None:\n+ # assigned iou\n+ ious = batch_iou_similarity(gt_bboxes, pred_bboxes) * mask_positive\n+ ious = ious.max(axis=-2).unsqueeze(-1)\n+ assigned_scores *= ious\n+ elif gt_scores is not None:\ngather_scores = paddle.gather(\n- pad_gt_scores.flatten(), assigned_gt_index.flatten(), axis=0)\n+ gt_scores.flatten(), assigned_gt_index.flatten(), axis=0)\ngather_scores = gather_scores.reshape([batch_size, num_anchors])\ngather_scores = paddle.where(mask_positive_sum > 0, gather_scores,\npaddle.zeros_like(gather_scores))\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/assigners/task_aligned_assigner.py", "new_path": "ppdet/modeling/assigners/task_aligned_assigner.py", "diff": "@@ -22,9 +22,11 @@ import paddle.nn.functional as F\nfrom ppdet.core.workspace import register\nfrom ..bbox_utils import iou_similarity\n-from .utils import (pad_gt, gather_topk_anchors, check_points_inside_bboxes,\n+from .utils import (gather_topk_anchors, check_points_inside_bboxes,\ncompute_max_iou_anchor)\n+__all__ = ['TaskAlignedAssigner']\n+\n@register\nclass TaskAlignedAssigner(nn.Layer):\n@@ -43,8 +45,10 @@ class TaskAlignedAssigner(nn.Layer):\npred_scores,\npred_bboxes,\nanchor_points,\n+ num_anchors_list,\ngt_labels,\ngt_bboxes,\n+ pad_gt_mask,\nbg_index,\ngt_scores=None):\nr\"\"\"This code is based on\n@@ -61,20 +65,18 @@ class TaskAlignedAssigner(nn.Layer):\npred_scores (Tensor, float32): predicted class probability, shape(B, L, C)\npred_bboxes (Tensor, float32): predicted bounding boxes, shape(B, L, 4)\nanchor_points (Tensor, float32): pre-defined anchors, shape(L, 2), \"cxcy\" format\n- gt_labels (Tensor|List[Tensor], int64): Label of gt_bboxes, shape(B, n, 1)\n- gt_bboxes (Tensor|List[Tensor], float32): Ground truth bboxes, shape(B, n, 4)\n+ num_anchors_list (List): num of anchors in each level, shape(L)\n+ gt_labels (Tensor, int64|int32): Label of gt_bboxes, shape(B, n, 1)\n+ gt_bboxes (Tensor, float32): Ground truth bboxes, shape(B, n, 4)\n+ pad_gt_mask (Tensor, float32): 1 means bbox, 0 means no bbox, shape(B, n, 1)\nbg_index (int): background index\n- gt_scores (Tensor|List[Tensor]|None, float32) Score of gt_bboxes,\n- shape(B, n, 1), if None, then it will initialize with one_hot label\n+ gt_scores (Tensor|None, float32) Score of gt_bboxes, shape(B, n, 1)\nReturns:\nassigned_labels (Tensor): (B, L)\nassigned_bboxes (Tensor): (B, L, 4)\nassigned_scores (Tensor): (B, L, C)\n\"\"\"\nassert pred_scores.ndim == pred_bboxes.ndim\n-\n- gt_labels, gt_bboxes, pad_gt_scores, pad_gt_mask = pad_gt(\n- gt_labels, gt_bboxes, gt_scores)\nassert gt_labels.ndim == gt_bboxes.ndim and \\\ngt_bboxes.ndim == 3\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/tood_head.py", "new_path": "ppdet/modeling/heads/tood_head.py", "diff": "@@ -286,9 +286,11 @@ class TOODHead(nn.Layer):\nreturn loss\ndef get_loss(self, head_outs, gt_meta):\n- pred_scores, pred_bboxes, anchors, num_anchors_list, stride_tensor_list = head_outs\n+ pred_scores, pred_bboxes, anchors, \\\n+ num_anchors_list, stride_tensor_list = head_outs\ngt_labels = gt_meta['gt_class']\ngt_bboxes = gt_meta['gt_bbox']\n+ pad_gt_mask = gt_meta['pad_gt_mask']\n# label assignment\nif gt_meta['epoch_id'] < self.static_assigner_epoch:\nassigned_labels, assigned_bboxes, assigned_scores = self.static_assigner(\n@@ -296,6 +298,7 @@ class TOODHead(nn.Layer):\nnum_anchors_list,\ngt_labels,\ngt_bboxes,\n+ pad_gt_mask,\nbg_index=self.num_classes)\nalpha_l = 0.25\nelse:\n@@ -303,8 +306,10 @@ class TOODHead(nn.Layer):\npred_scores.detach(),\npred_bboxes.detach() * stride_tensor_list,\nbbox_center(anchors),\n+ num_anchors_list,\ngt_labels,\ngt_bboxes,\n+ pad_gt_mask,\nbg_index=self.num_classes)\nalpha_l = -1\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[dev] update assigner and tood_head (#5169)
499,339
14.02.2022 19:06:44
-28,800
c0b10e6e7d6865c6bc3236b4aeb58935c5b82f34
[TIPC] fix benchmark scripts
[ { "change_type": "MODIFY", "old_path": "test_tipc/benchmark_train.sh", "new_path": "test_tipc/benchmark_train.sh", "diff": "@@ -91,6 +91,7 @@ profile_option=\"${profile_option_key}:${profile_option_params}\"\nline_num=`expr $line_num + 1`\nflags_value=$(func_parser_value \"${lines[line_num]}\")\n+if [ ${flags_value} != \"null\" ];then\n# set flags\nIFS=\";\"\nflags_list=(${flags_value})\n@@ -98,6 +99,7 @@ for _flag in ${flags_list[*]}; do\ncmd=\"export ${_flag}\"\neval $cmd\ndone\n+fi\n# set log_name\nrepo_name=$(get_repo_name )\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/configs/deformable_detr/deformable_detr_r50_1x_coco_train_infer_python.txt", "new_path": "test_tipc/configs/deformable_detr/deformable_detr_r50_1x_coco_train_infer_python.txt", "diff": "@@ -54,4 +54,4 @@ batch_size:2\nfp_items:fp32|fp16\nepoch:1\n--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile\n-flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096\n\\ No newline at end of file\n+flags:null\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco_train_infer_python.txt", "new_path": "test_tipc/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco_train_infer_python.txt", "diff": "@@ -54,4 +54,4 @@ batch_size:2|8\nfp_items:fp32|fp16\nepoch:1\n--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile\n-flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096\n\\ No newline at end of file\n+flags:null\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/configs/fcos/fcos_r50_fpn_1x_coco_train_infer_python.txt", "new_path": "test_tipc/configs/fcos/fcos_r50_fpn_1x_coco_train_infer_python.txt", "diff": "@@ -54,4 +54,4 @@ batch_size:2|8\nfp_items:fp32|fp16\nepoch:1\n--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile\n-flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096\n\\ No newline at end of file\n+flags:null\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/configs/gfl/gfl_r50_fpn_1x_coco_train_infer_python.txt", "new_path": "test_tipc/configs/gfl/gfl_r50_fpn_1x_coco_train_infer_python.txt", "diff": "@@ -54,4 +54,4 @@ batch_size:2|8\nfp_items:fp32|fp16\nepoch:1\n--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile\n-flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096\n\\ No newline at end of file\n+flags:null\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/configs/keypoint/higherhrnet_hrnet_w32_512_train_infer_python.txt", "new_path": "test_tipc/configs/keypoint/higherhrnet_hrnet_w32_512_train_infer_python.txt", "diff": "@@ -54,4 +54,4 @@ batch_size:20|24\nfp_items:fp32|fp16\nepoch:1\n--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile\n-flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096\n\\ No newline at end of file\n+flags:null\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/configs/keypoint/hrnet_w32_256x192_train_infer_python.txt", "new_path": "test_tipc/configs/keypoint/hrnet_w32_256x192_train_infer_python.txt", "diff": "@@ -54,4 +54,4 @@ batch_size:64|160\nfp_items:fp32|fp16\nepoch:1\n--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile\n-flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096\n\\ No newline at end of file\n+flags:null\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/configs/mot/fairmot_dla34_30e_1088x608_train_infer_python.txt", "new_path": "test_tipc/configs/mot/fairmot_dla34_30e_1088x608_train_infer_python.txt", "diff": "@@ -54,4 +54,4 @@ batch_size:6|22\nfp_items:fp32|fp16\nepoch:1\n--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile\n-flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096\n\\ No newline at end of file\n+flags:null\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/configs/mot/jde_darknet53_30e_1088x608_train_infer_python.txt", "new_path": "test_tipc/configs/mot/jde_darknet53_30e_1088x608_train_infer_python.txt", "diff": "@@ -54,4 +54,4 @@ batch_size:4|14\nfp_items:fp32|fp16\nepoch:1\n--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile\n-flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096\n\\ No newline at end of file\n+flags:null\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/configs/solov2/solov2_r50_fpn_1x_coco_train_infer_python.txt", "new_path": "test_tipc/configs/solov2/solov2_r50_fpn_1x_coco_train_infer_python.txt", "diff": "@@ -54,4 +54,4 @@ batch_size:2|4\nfp_items:fp32|fp16\nepoch:1\n--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile\n-flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096\n\\ No newline at end of file\n+flags:null\n\\ No newline at end of file\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[TIPC] fix benchmark scripts (#5207)
499,299
17.02.2022 14:41:10
-28,800
9287258124f63a5fdae58e7227de2e379ba31e83
fix the order of calculation of center coordinary in ToHeatmapsTopDown
[ { "change_type": "MODIFY", "old_path": "ppdet/data/transform/keypoint_operators.py", "new_path": "ppdet/data/transform/keypoint_operators.py", "diff": "@@ -698,8 +698,8 @@ class ToHeatmapsTopDown(object):\ntmp_size = self.sigma * 3\nfeat_stride = image_size / self.hmsize\nfor joint_id in range(num_joints):\n- mu_x = int(joints[joint_id][0] + 0.5) / feat_stride[0]\n- mu_y = int(joints[joint_id][1] + 0.5) / feat_stride[1]\n+ mu_x = int(joints[joint_id][0] / feat_stride[0] + 0.5)\n+ mu_y = int(joints[joint_id][1] / feat_stride[1] + 0.5)\n# Check that any part of the gaussian is in-bounds\nul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]\nbr = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix the order of calculation of center coordinary in ToHeatmapsTopDown (#5222)
499,339
18.02.2022 20:32:43
-28,800
7b60e7d892247c8bca9d2d12b5e2d8e3bec982f8
[TIPC] add benchmark for yolov3, mask_rcnn
[ { "change_type": "MODIFY", "old_path": "test_tipc/configs/mask_rcnn/mask_rcnn_r50_1x_coco_train_infer_python.txt", "new_path": "test_tipc/configs/mask_rcnn/mask_rcnn_r50_1x_coco_train_infer_python.txt", "diff": "@@ -49,3 +49,9 @@ inference:./deploy/python/infer.py\n--save_log_path:null\n--run_benchmark:True\n--trt_max_shape:1600\n+===========================train_benchmark_params==========================\n+batch_size:2|4\n+fp_items:fp32|fp16\n+epoch:1\n+--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile\n+flags:null\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco_train_infer_python.txt", "new_path": "test_tipc/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco_train_infer_python.txt", "diff": "@@ -49,3 +49,9 @@ inference:./deploy/python/infer.py\n--save_log_path:null\n--run_benchmark:True\n--trt_max_shape:1600\n+===========================train_benchmark_params==========================\n+batch_size:2|4\n+fp_items:fp32|fp16\n+epoch:1\n+--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile\n+flags:null\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_train_infer_python.txt", "new_path": "test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_train_infer_python.txt", "diff": "@@ -49,3 +49,9 @@ inference:./deploy/python/infer.py\n--save_log_path:null\n--run_benchmark:True\nnull:null\n+===========================train_benchmark_params==========================\n+batch_size:8\n+fp_items:fp32|fp16\n+epoch:1\n+--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile\n+flags:null\n\\ No newline at end of file\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test_tipc/static/mask_rcnn_r50_1x_coco/N1C1/mask_rcnn_r50_1x_coco_bs2_fp32_SingleP_DP.sh", "diff": "+model_item=mask_rcnn_r50_1x_coco\n+bs_item=2\n+fp_item=fp32\n+run_process_type=SingleP\n+run_mode=DP\n+device_num=N1C1\n+max_iter=500\n+num_workers=2\n+\n+# get data\n+bash test_tipc/static/${model_item}/benchmark_common/prepare.sh\n+# run\n+bash test_tipc/static/${model_item}/benchmark_common/run_benchmark.sh ${model_item} ${bs_item} ${fp_item} ${run_process_type} ${run_mode} ${device_num} ${max_iter} ${num_workers} 2>&1;\n+# run profiling\n+sleep 10;\n+export PROFILING=true\n+bash test_tipc/static/${model_item}/benchmark_common/run_benchmark.sh ${model_item} ${bs_item} ${fp_item} ${run_process_type} ${run_mode} ${device_num} ${max_iter} ${num_workers} 2>&1;\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test_tipc/static/mask_rcnn_r50_1x_coco/N1C8/mask_rcnn_r50_1x_coco_bs2_fp32_MultiP_DP.sh", "diff": "+model_item=mask_rcnn_r50_1x_coco\n+bs_item=2\n+fp_item=fp32\n+run_process_type=MultiP\n+run_mode=DP\n+device_num=N1C8\n+max_iter=500\n+num_workers=2\n+\n+# get data\n+bash test_tipc/static/${model_item}/benchmark_common/prepare.sh\n+# run\n+bash test_tipc/static/${model_item}/benchmark_common/run_benchmark.sh ${model_item} ${bs_item} ${fp_item} ${run_process_type} ${run_mode} ${device_num} ${max_iter} ${num_workers} 2>&1;\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test_tipc/static/mask_rcnn_r50_fpn_1x_coco/N1C1/mask_rcnn_r50_fpn_1x_coco_bs2_fp32_SingleP_DP.sh", "diff": "+model_item=mask_rcnn_r50_fpn_1x_coco\n+bs_item=2\n+fp_item=fp32\n+run_process_type=SingleP\n+run_mode=DP\n+device_num=N1C1\n+max_iter=500\n+num_workers=2\n+\n+# get data\n+bash test_tipc/static/${model_item}/benchmark_common/prepare.sh\n+# run\n+bash test_tipc/static/${model_item}/benchmark_common/run_benchmark.sh ${model_item} ${bs_item} ${fp_item} ${run_process_type} ${run_mode} ${device_num} ${max_iter} ${num_workers} 2>&1;\n+# run profiling\n+sleep 10;\n+export PROFILING=true\n+bash test_tipc/static/${model_item}/benchmark_common/run_benchmark.sh ${model_item} ${bs_item} ${fp_item} ${run_process_type} ${run_mode} ${device_num} ${max_iter} ${num_workers} 2>&1;\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test_tipc/static/mask_rcnn_r50_fpn_1x_coco/N1C8/mask_rcnn_r50_fpn_1x_coco_bs2_fp32_MultiP_DP.sh", "diff": "+model_item=mask_rcnn_r50_fpn_1x_coco\n+bs_item=2\n+fp_item=fp32\n+run_process_type=MultiP\n+run_mode=DP\n+device_num=N1C8\n+max_iter=500\n+num_workers=2\n+\n+# get data\n+bash test_tipc/static/${model_item}/benchmark_common/prepare.sh\n+# run\n+bash test_tipc/static/${model_item}/benchmark_common/run_benchmark.sh ${model_item} ${bs_item} ${fp_item} ${run_process_type} ${run_mode} ${device_num} ${max_iter} ${num_workers} 2>&1;\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test_tipc/static/yolov3_darknet53_270e_coco/N1C1/yolov3_darknet53_270e_coco_bs8_fp32_SingleP_DP.sh", "diff": "+model_item=yolov3_darknet53_270e_coco\n+bs_item=8\n+fp_item=fp32\n+run_process_type=SingleP\n+run_mode=DP\n+device_num=N1C1\n+max_iter=500\n+num_workers=8\n+\n+# get data\n+bash test_tipc/static/${model_item}/benchmark_common/prepare.sh\n+# run\n+bash test_tipc/static/${model_item}/benchmark_common/run_benchmark.sh ${model_item} ${bs_item} ${fp_item} ${run_process_type} ${run_mode} ${device_num} ${max_iter} ${num_workers} 2>&1;\n+# run profiling\n+sleep 10;\n+export PROFILING=true\n+bash test_tipc/static/${model_item}/benchmark_common/run_benchmark.sh ${model_item} ${bs_item} ${fp_item} ${run_process_type} ${run_mode} ${device_num} ${max_iter} ${num_workers} 2>&1;\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test_tipc/static/yolov3_darknet53_270e_coco/N1C8/yolov3_darknet53_270e_coco_bs8_fp32_MultiP_DP.sh", "diff": "+model_item=yolov3_darknet53_270e_coco\n+bs_item=8\n+fp_item=fp32\n+run_process_type=MultiP\n+run_mode=DP\n+device_num=N1C8\n+max_iter=500\n+num_workers=8\n+\n+# get data\n+bash test_tipc/static/${model_item}/benchmark_common/prepare.sh\n+# run\n+bash test_tipc/static/${model_item}/benchmark_common/run_benchmark.sh ${model_item} ${bs_item} ${fp_item} ${run_process_type} ${run_mode} ${device_num} ${max_iter} ${num_workers} 2>&1;\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[TIPC] add benchmark for yolov3, mask_rcnn (#5224)
499,352
22.02.2022 19:58:53
-28,800
7762b906a5c673a3081818a7d60f717b88ebf202
fix blazeface nas
[ { "change_type": "MODIFY", "old_path": "static/slim/nas/blazeface.yml", "new_path": "static/slim/nas/blazeface.yml", "diff": "@@ -93,3 +93,20 @@ EvalReader:\nmean: [104, 117, 123]\nstd: [127.502231, 127.502231, 127.502231]\nbatch_size: 1\n+\n+TestReader:\n+ inputs_def:\n+ fields: ['image', 'im_id', 'im_shape']\n+ dataset:\n+ !ImageFolder\n+ use_default_label: true\n+ sample_transforms:\n+ - !DecodeImage\n+ to_rgb: true\n+ - !NormalizeImage\n+ is_channel_first: false\n+ is_scale: false\n+ mean: [123, 117, 104]\n+ std: [127.502231, 127.502231, 127.502231]\n+ - !Permute {}\n+ batch_size: 1\n" }, { "change_type": "MODIFY", "old_path": "static/slim/nas/search_space/blazefacespace_nas.py", "new_path": "static/slim/nas/search_space/blazefacespace_nas.py", "diff": "@@ -33,7 +33,9 @@ class BlazeFaceNasSpace(SearchSpaceBase):\nself.mid_filter_num = np.array([8, 12, 16, 20, 24, 32])\nself.double_filter_num = np.array(\n[8, 12, 16, 24, 32, 40, 48, 64, 72, 80, 88, 96])\n- self.use_5x5kernel = np.array([0, 1])\n+ self.use_5x5kernel = np.array(\n+ [0]\n+ ) ### if constraint is latency, use 3x3 kernel, otherwise self.use_5x5kernel = np.array([0, 1])\ndef init_tokens(self):\nreturn [2, 1, 3, 8, 2, 1, 2, 1, 1]\n@@ -74,7 +76,8 @@ class BlazeFaceNasSpace(SearchSpaceBase):\nself.double_filter_num[tokens[3]]\n]]\n- is_5x5kernel = True if self.use_5x5kernel[tokens[8]] else False\n+ ### if constraint is latency, use 3x3 kernel, otherwise is_5x5kernel = True if self.use_5x5kernel[tokens[8]] else False\n+ is_5x5kernel = False ###True if self.use_5x5kernel[tokens[8]] else False\nreturn blaze_filters, double_blaze_filters, is_5x5kernel\ndef token2arch(self, tokens=None):\n" }, { "change_type": "MODIFY", "old_path": "static/slim/nas/train_nas.py", "new_path": "static/slim/nas/train_nas.py", "diff": "@@ -62,6 +62,8 @@ except ImportError as e:\nfrom paddleslim.analysis import flops, TableLatencyEvaluator\nfrom paddleslim.nas import SANAS\n+### register search space to paddleslim\n+import search_space\n@register\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix blazeface nas (#5241)
499,301
01.03.2022 15:14:41
-28,800
89bfcdaffaaefe7cd24241952b3fc0bc206fb38f
rename fp16 -> amp
[ { "change_type": "MODIFY", "old_path": "ppdet/engine/trainer.py", "new_path": "ppdet/engine/trainer.py", "diff": "@@ -362,8 +362,8 @@ class Trainer(object):\nmodel = paddle.DataParallel(\nself.model, find_unused_parameters=find_unused_parameters)\n- # initial fp16\n- if self.cfg.get('fp16', False):\n+ # enabel auto mixed precision mode\n+ if self.cfg.get('amp', False):\nscaler = amp.GradScaler(\nenable=self.cfg.use_gpu, init_loss_scaling=1024)\n@@ -401,7 +401,7 @@ class Trainer(object):\nself._compose_callback.on_step_begin(self.status)\ndata['epoch_id'] = epoch_id\n- if self.cfg.get('fp16', False):\n+ if self.cfg.get('amp', False):\nwith amp.auto_cast(enable=self.cfg.use_gpu):\n# model forward\noutputs = model(data)\n" }, { "change_type": "MODIFY", "old_path": "tools/train.py", "new_path": "tools/train.py", "diff": "@@ -60,10 +60,10 @@ def parse_args():\nhelp=\"If set True, enable continuous evaluation job.\"\n\"This flag is only used for internal test.\")\nparser.add_argument(\n- \"--fp16\",\n+ \"--amp\",\naction='store_true',\ndefault=False,\n- help=\"Enable mixed precision training.\")\n+ help=\"Enable auto mixed precision training.\")\nparser.add_argument(\n\"--fleet\", action='store_true', default=False, help=\"Use fleet or not\")\nparser.add_argument(\n@@ -130,7 +130,7 @@ def run(FLAGS, cfg):\ndef main():\nFLAGS = parse_args()\ncfg = load_config(FLAGS.config)\n- cfg['fp16'] = FLAGS.fp16\n+ cfg['amp'] = FLAGS.amp\ncfg['fleet'] = FLAGS.fleet\ncfg['use_vdl'] = FLAGS.use_vdl\ncfg['vdl_log_dir'] = FLAGS.vdl_log_dir\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
rename fp16 -> amp (#5268)
499,298
01.03.2022 15:16:00
-28,800
629e1533b9a9bfb58df9e4595a36539b22afaa63
fix doc deadlinks
[ { "change_type": "RENAME", "old_path": "configs/centernet/centernet_mbv1_1x_140e_coco.yml", "new_path": "configs/centernet/centernet_mbv1_140e_coco.yml", "diff": "@@ -3,7 +3,7 @@ _BASE_: [\n]\npretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/MobileNetV1_pretrained.pdparams\n-weights: output/centernet_mbv1_1x_140e_coco/model_final\n+weights: output/centernet_mbv1_140e_coco/model_final\nCenterNet:\nbackbone: MobileNet\n" }, { "change_type": "RENAME", "old_path": "configs/centernet/centernet_mbv3_large_1x_140e_coco.yml", "new_path": "configs/centernet/centernet_mbv3_large_140e_coco.yml", "diff": "@@ -3,7 +3,7 @@ _BASE_: [\n]\npretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/MobileNetV3_large_x1_0_ssld_pretrained.pdparams\n-weights: output/centernet_mbv3_large_1x_140e_coco/model_final\n+weights: output/centernet_mbv3_large_140e_coco/model_final\nCenterNet:\nbackbone: MobileNetV3\n" }, { "change_type": "RENAME", "old_path": "configs/centernet/centernet_mbv3_small_1x_140e_coco.yml", "new_path": "configs/centernet/centernet_mbv3_small_140e_coco.yml", "diff": "@@ -3,7 +3,7 @@ _BASE_: [\n]\npretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/MobileNetV3_small_x1_0_ssld_pretrained.pdparams\n-weights: output/centernet_mbv3_small_1x_140e_coco/model_final\n+weights: output/centernet_mbv3_small_140e_coco/model_final\nCenterNet:\nbackbone: MobileNetV3\n@@ -24,6 +24,5 @@ CenterNetDLAFPN:\ndown_ratio: 8\ndcn_v2: False\n-\nTrainReader:\nbatch_size: 32\n" }, { "change_type": "RENAME", "old_path": "configs/centernet/centernet_shufflenetv2_1x_140e_coco.yml", "new_path": "configs/centernet/centernet_shufflenetv2_140e_coco.yml", "diff": "@@ -3,7 +3,7 @@ _BASE_: [\n]\npretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ShuffleNetV2_x1_0_pretrained.pdparams\n-weights: output/centernet_shufflenetv2_1x_140e_coco/model_final\n+weights: output/centernet_shufflenetv2_140e_coco/model_final\nCenterNet:\nbackbone: ShuffleNetV2\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix doc deadlinks (#5270)
499,339
03.03.2022 18:36:04
-28,800
e9cfb1876e5dabfd116526639911905cc8e7a6fd
[DETR] fix find_unused_parameters in yaml
[ { "change_type": "MODIFY", "old_path": "configs/deformable_detr/README.md", "new_path": "configs/deformable_detr/README.md", "diff": "@@ -20,7 +20,7 @@ Deformable DETR is an object detection model based on DETR. We reproduced the mo\nGPU multi-card training\n```bash\nexport CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7\n-python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/deformable_detr/deformable_detr_r50_1x_coco.yml --fleet -o find_unused_parameters=True\n+python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/deformable_detr/deformable_detr_r50_1x_coco.yml --fleet\n```\n## Citations\n" }, { "change_type": "MODIFY", "old_path": "configs/deformable_detr/deformable_detr_r50_1x_coco.yml", "new_path": "configs/deformable_detr/deformable_detr_r50_1x_coco.yml", "diff": "@@ -6,3 +6,4 @@ _BASE_: [\n'_base_/deformable_detr_reader.yml',\n]\nweights: output/deformable_detr_r50_1x_coco/model_final\n+find_unused_parameters: True\n" }, { "change_type": "MODIFY", "old_path": "configs/detr/README.md", "new_path": "configs/detr/README.md", "diff": "@@ -20,7 +20,7 @@ DETR is an object detection model based on transformer. We reproduced the model\nGPU multi-card training\n```bash\nexport CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7\n-python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/detr/detr_r50_1x_coco.yml --fleet -o find_unused_parameters=True\n+python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/detr/detr_r50_1x_coco.yml --fleet\n```\n## Citations\n" }, { "change_type": "MODIFY", "old_path": "configs/detr/detr_r50_1x_coco.yml", "new_path": "configs/detr/detr_r50_1x_coco.yml", "diff": "@@ -6,3 +6,4 @@ _BASE_: [\n'_base_/detr_reader.yml',\n]\nweights: output/detr_r50_1x_coco/model_final\n+find_unused_parameters: True\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[DETR] fix find_unused_parameters in yaml (#5291)
499,304
04.03.2022 09:49:49
-28,800
faa4f9a8e71bdb94ad3c6415dbefe0abd98a27ea
fix picodet post_process
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/gfl_head.py", "new_path": "ppdet/modeling/heads/gfl_head.py", "diff": "@@ -430,9 +430,7 @@ class GFLHead(nn.Layer):\ncls_scores, bboxes_reg = gfl_head_outs\nbboxes = paddle.concat(bboxes_reg, axis=1)\n# rescale: [h_scale, w_scale] -> [w_scale, h_scale, w_scale, h_scale]\n- im_scale = paddle.concat(\n- [scale_factor[:, ::-1], scale_factor[:, ::-1]],\n- axis=-1).unsqueeze(1)\n+ im_scale = scale_factor.flip([1]).tile([1, 2]).unsqueeze(1)\nbboxes /= im_scale\nmlvl_scores = paddle.concat(cls_scores, axis=1)\nmlvl_scores = mlvl_scores.transpose([0, 2, 1])\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix picodet post_process (#5292)
499,339
04.03.2022 11:15:42
-28,800
161d5bbec5b8430afdeffdae716fd5dba3d7a251
[TIPC] fix benchmark static shell, test=document_fix
[ { "change_type": "MODIFY", "old_path": "test_tipc/static/mask_rcnn_r50_1x_coco/N1C1/mask_rcnn_r50_1x_coco_bs2_fp32_SingleP_DP.sh", "new_path": "test_tipc/static/mask_rcnn_r50_1x_coco/N1C1/mask_rcnn_r50_1x_coco_bs2_fp32_SingleP_DP.sh", "diff": "@@ -4,7 +4,7 @@ fp_item=fp32\nrun_process_type=SingleP\nrun_mode=DP\ndevice_num=N1C1\n-max_iter=500\n+max_iter=100\nnum_workers=2\n# get data\n@@ -14,4 +14,4 @@ bash test_tipc/static/${model_item}/benchmark_common/run_benchmark.sh ${model_it\n# run profiling\nsleep 10;\nexport PROFILING=true\n-bash test_tipc/static/${model_item}/benchmark_common/run_benchmark.sh ${model_item} ${bs_item} ${fp_item} ${run_process_type} ${run_mode} ${device_num} ${max_iter} ${num_workers} 2>&1;\n+bash test_tipc/static/${model_item}/benchmark_common/run_benchmark.sh ${model_item} ${bs_item} ${fp_item} ${run_process_type} ${run_mode} ${device_num} 11 ${num_workers} 2>&1;\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/static/mask_rcnn_r50_1x_coco/N1C8/mask_rcnn_r50_1x_coco_bs2_fp32_MultiP_DP.sh", "new_path": "test_tipc/static/mask_rcnn_r50_1x_coco/N1C8/mask_rcnn_r50_1x_coco_bs2_fp32_MultiP_DP.sh", "diff": "@@ -4,7 +4,7 @@ fp_item=fp32\nrun_process_type=MultiP\nrun_mode=DP\ndevice_num=N1C8\n-max_iter=500\n+max_iter=100\nnum_workers=2\n# get data\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/static/mask_rcnn_r50_fpn_1x_coco/N1C1/mask_rcnn_r50_fpn_1x_coco_bs2_fp32_SingleP_DP.sh", "new_path": "test_tipc/static/mask_rcnn_r50_fpn_1x_coco/N1C1/mask_rcnn_r50_fpn_1x_coco_bs2_fp32_SingleP_DP.sh", "diff": "@@ -4,7 +4,7 @@ fp_item=fp32\nrun_process_type=SingleP\nrun_mode=DP\ndevice_num=N1C1\n-max_iter=500\n+max_iter=100\nnum_workers=2\n# get data\n@@ -14,4 +14,4 @@ bash test_tipc/static/${model_item}/benchmark_common/run_benchmark.sh ${model_it\n# run profiling\nsleep 10;\nexport PROFILING=true\n-bash test_tipc/static/${model_item}/benchmark_common/run_benchmark.sh ${model_item} ${bs_item} ${fp_item} ${run_process_type} ${run_mode} ${device_num} ${max_iter} ${num_workers} 2>&1;\n+bash test_tipc/static/${model_item}/benchmark_common/run_benchmark.sh ${model_item} ${bs_item} ${fp_item} ${run_process_type} ${run_mode} ${device_num} 11 ${num_workers} 2>&1;\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/static/mask_rcnn_r50_fpn_1x_coco/N1C8/mask_rcnn_r50_fpn_1x_coco_bs2_fp32_MultiP_DP.sh", "new_path": "test_tipc/static/mask_rcnn_r50_fpn_1x_coco/N1C8/mask_rcnn_r50_fpn_1x_coco_bs2_fp32_MultiP_DP.sh", "diff": "@@ -4,7 +4,7 @@ fp_item=fp32\nrun_process_type=MultiP\nrun_mode=DP\ndevice_num=N1C8\n-max_iter=500\n+max_iter=100\nnum_workers=2\n# get data\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/static/yolov3_darknet53_270e_coco/N1C1/yolov3_darknet53_270e_coco_bs8_fp32_SingleP_DP.sh", "new_path": "test_tipc/static/yolov3_darknet53_270e_coco/N1C1/yolov3_darknet53_270e_coco_bs8_fp32_SingleP_DP.sh", "diff": "@@ -4,7 +4,7 @@ fp_item=fp32\nrun_process_type=SingleP\nrun_mode=DP\ndevice_num=N1C1\n-max_iter=500\n+max_iter=100\nnum_workers=8\n# get data\n@@ -14,4 +14,4 @@ bash test_tipc/static/${model_item}/benchmark_common/run_benchmark.sh ${model_it\n# run profiling\nsleep 10;\nexport PROFILING=true\n-bash test_tipc/static/${model_item}/benchmark_common/run_benchmark.sh ${model_item} ${bs_item} ${fp_item} ${run_process_type} ${run_mode} ${device_num} ${max_iter} ${num_workers} 2>&1;\n+bash test_tipc/static/${model_item}/benchmark_common/run_benchmark.sh ${model_item} ${bs_item} ${fp_item} ${run_process_type} ${run_mode} ${device_num} 11 ${num_workers} 2>&1;\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/static/yolov3_darknet53_270e_coco/N1C8/yolov3_darknet53_270e_coco_bs8_fp32_MultiP_DP.sh", "new_path": "test_tipc/static/yolov3_darknet53_270e_coco/N1C8/yolov3_darknet53_270e_coco_bs8_fp32_MultiP_DP.sh", "diff": "@@ -4,7 +4,7 @@ fp_item=fp32\nrun_process_type=MultiP\nrun_mode=DP\ndevice_num=N1C8\n-max_iter=500\n+max_iter=100\nnum_workers=8\n# get data\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[TIPC] fix benchmark static shell, test=document_fix (#5293)
499,299
05.03.2022 21:49:22
-28,800
fca31c8133ab2c13504256bbb2d843050c9cb3e8
fix python inference error when use fuse_norm keypoint model
[ { "change_type": "MODIFY", "old_path": "deploy/python/keypoint_infer.py", "new_path": "deploy/python/keypoint_infer.py", "diff": "@@ -288,7 +288,7 @@ def create_inputs(imgs, im_info):\ninputs (dict): input of model\n\"\"\"\ninputs = {}\n- inputs['image'] = np.stack(imgs, axis=0)\n+ inputs['image'] = np.stack(imgs, axis=0).astype('float32')\nim_shape = []\nfor e in im_info:\nim_shape.append(np.array((e['im_shape'])).astype('float32'))\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix python inference error when use fuse_norm keypoint model (#5300)
499,299
05.03.2022 21:49:33
-28,800
a4e093b799af582df74079584f1fa3ae07c1c538
fix use_dark can not set to False
[ { "change_type": "MODIFY", "old_path": "deploy/python/det_keypoint_unite_utils.py", "new_path": "deploy/python/det_keypoint_unite_utils.py", "diff": "@@ -112,7 +112,7 @@ def argsparser():\n\"calibration, trt_calib_mode need to set True.\")\nparser.add_argument(\n'--use_dark',\n- type=bool,\n+ type=ast.literal_eval,\ndefault=True,\nhelp='whether to use darkpose to get better keypoint position predict ')\nparser.add_argument(\n" }, { "change_type": "MODIFY", "old_path": "deploy/python/utils.py", "new_path": "deploy/python/utils.py", "diff": "@@ -133,7 +133,7 @@ def argsparser():\nhelp=\"max batch_size for reid model inference.\")\nparser.add_argument(\n'--use_dark',\n- type=bool,\n+ type=ast.literal_eval,\ndefault=True,\nhelp='whether to use darkpose to get better keypoint position predict ')\nreturn parser\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix use_dark can not set to False (#5304)
499,304
07.03.2022 10:48:35
-28,800
c27233d313fa7ddf3da73725eff71fac65f8d8e4
fix picodetv2 post process
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/pico_head.py", "new_path": "ppdet/modeling/heads/pico_head.py", "diff": "@@ -460,18 +460,15 @@ class PicoHeadV2(GFLHead):\nact=self.act,\nuse_act_in_out=False))\n- def forward(self, fpn_feats, deploy=False):\n+ def forward(self, fpn_feats, export_post_process=True):\nassert len(fpn_feats) == len(\nself.fpn_stride\n), \"The size of fpn_feats is not equal to size of fpn_stride\"\n- anchors, _, num_anchors_list, stride_tensor_list = generate_anchors_for_grid_cell(\n- fpn_feats, self.fpn_stride, self.grid_cell_scale, self.cell_offset)\n- anchors_split = paddle.split(anchors, num_anchors_list)\ncls_score_list, reg_list, box_list = [], [], []\n- for i, fpn_feat, anchor, stride, align_cls in zip(\n- range(len(self.fpn_stride)), fpn_feats, anchors_split,\n- self.fpn_stride, self.cls_align):\n+ for i, fpn_feat, stride, align_cls in zip(\n+ range(len(self.fpn_stride)), fpn_feats, self.fpn_stride,\n+ self.cls_align):\nb, _, h, w = get_static_shape(fpn_feat)\n# task decomposition\nconv_cls_feat, se_feat = self.conv_feat(fpn_feat, i)\n@@ -485,22 +482,35 @@ class PicoHeadV2(GFLHead):\nelse:\ncls_score = F.sigmoid(cls_logit)\n- anchor_centers = bbox_center(anchor).unsqueeze(0) / stride\n- anchor_centers = anchor_centers.reshape([1, h, w, 2])\n-\n- pred_distances = self.distribution_project(\n- reg_pred.transpose([0, 2, 3, 1])).reshape([b, h, w, 4])\n- reg_bbox = batch_distance2bbox(\n- anchor_centers, pred_distances, max_shapes=None)\n- if not self.training:\n+ if not export_post_process and not self.training:\n+ # Now only supports batch size = 1 in deploy\ncls_score_list.append(\n- cls_score.transpose([0, 2, 3, 1]).reshape(\n- [b, -1, self.cls_out_channels]))\n- box_list.append(reg_bbox.reshape([b, -1, 4]) * stride)\n+ cls_score.reshape([1, self.cls_out_channels, -1]).transpose(\n+ [0, 2, 1]))\n+ box_list.append(\n+ reg_pred.reshape([1, (self.reg_max + 1) * 4, -1]).transpose(\n+ [0, 2, 1]))\nelse:\n- cls_score_list.append(cls_score.flatten(2).transpose([0, 2, 1]))\n+ cls_score_out = cls_score.transpose([0, 2, 3, 1])\n+ bbox_pred = reg_pred.transpose([0, 2, 3, 1])\n+ b, cell_h, cell_w, _ = paddle.shape(cls_score_out)\n+ y, x = self.get_single_level_center_point(\n+ [cell_h, cell_w], stride, cell_offset=self.cell_offset)\n+ center_points = paddle.stack([x, y], axis=-1)\n+ cls_score_out = cls_score_out.reshape(\n+ [b, -1, self.cls_out_channels])\n+ bbox_pred = self.distribution_project(bbox_pred) * stride\n+ bbox_pred = bbox_pred.reshape([b, cell_h * cell_w, 4])\n+ bbox_pred = batch_distance2bbox(\n+ center_points, bbox_pred, max_shapes=None)\n+ if not self.training:\n+ cls_score_list.append(cls_score_out)\n+ box_list.append(bbox_pred)\n+ else:\n+ cls_score_list.append(\n+ cls_score.flatten(2).transpose([0, 2, 1]))\nreg_list.append(reg_pred.flatten(2).transpose([0, 2, 1]))\n- box_list.append(reg_bbox.reshape([b, -1, 4]))\n+ box_list.append(bbox_pred / stride)\nif not self.training:\nreturn cls_score_list, box_list\n@@ -508,16 +518,19 @@ class PicoHeadV2(GFLHead):\ncls_score_list = paddle.concat(cls_score_list, axis=1)\nbox_list = paddle.concat(box_list, axis=1)\nreg_list = paddle.concat(reg_list, axis=1)\n- return cls_score_list, reg_list, box_list, anchors, num_anchors_list, stride_tensor_list\n+ return cls_score_list, reg_list, box_list, fpn_feats\ndef get_loss(self, head_outs, gt_meta):\n- pred_scores, pred_regs, pred_bboxes, anchors, num_anchors_list, stride_tensor_list = head_outs\n+ pred_scores, pred_regs, pred_bboxes, fpn_feats = head_outs\ngt_labels = gt_meta['gt_class']\ngt_bboxes = gt_meta['gt_bbox']\ngt_scores = gt_meta['gt_score'] if 'gt_score' in gt_meta else None\nnum_imgs = gt_meta['im_id'].shape[0]\npad_gt_mask = gt_meta['pad_gt_mask']\n+ anchors, _, num_anchors_list, stride_tensor_list = generate_anchors_for_grid_cell(\n+ fpn_feats, self.fpn_stride, self.grid_cell_scale, self.cell_offset)\n+\ncenters = bbox_center(anchors)\n# label assignment\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix picodetv2 post process (#5306)
499,339
07.03.2022 17:12:54
-28,800
34a74b39dc439d1f13925ca70a9418a1fee477fa
refine ema_model save
[ { "change_type": "MODIFY", "old_path": "ppdet/engine/callbacks.py", "new_path": "ppdet/engine/callbacks.py", "diff": "@@ -182,7 +182,7 @@ class Checkpointer(Callback):\n) % self.model.cfg.snapshot_epoch == 0 or epoch_id == end_epoch - 1:\nsave_name = str(\nepoch_id) if epoch_id != end_epoch - 1 else \"model_final\"\n- weight = self.weight\n+ weight = self.weight.state_dict()\nelif mode == 'eval':\nif 'save_best_model' in status and status['save_best_model']:\nfor metric in self.model._metrics:\n@@ -201,18 +201,22 @@ class Checkpointer(Callback):\nif map_res[key][0] > self.best_ap:\nself.best_ap = map_res[key][0]\nsave_name = 'best_model'\n- weight = self.weight\n+ weight = self.weight.state_dict()\nlogger.info(\"Best test {} ap is {:0.3f}.\".format(\nkey, self.best_ap))\nif weight:\nif self.model.use_ema:\n- save_model(status['weight'], self.save_dir, save_name,\n- epoch_id + 1, self.model.optimizer)\n- save_model(weight, self.save_dir,\n- '{}_ema'.format(save_name), epoch_id + 1)\n+ # save model and ema_model\n+ save_model(\n+ status['weight'],\n+ self.model.optimizer,\n+ self.save_dir,\n+ save_name,\n+ epoch_id + 1,\n+ ema_model=weight)\nelse:\n- save_model(weight, self.save_dir, save_name, epoch_id + 1,\n- self.model.optimizer)\n+ save_model(weight, self.model.optimizer, self.save_dir,\n+ save_name, epoch_id + 1)\nclass WiferFaceEval(Callback):\n" }, { "change_type": "MODIFY", "old_path": "ppdet/optimizer.py", "new_path": "ppdet/optimizer.py", "diff": "@@ -332,7 +332,7 @@ class ModelEMA(object):\nfor k, v in self.state_dict.items():\nself.state_dict[k] = paddle.zeros_like(v)\n- def resume(self, state_dict, step):\n+ def resume(self, state_dict, step=0):\nfor k, v in state_dict.items():\nself.state_dict[k] = v\nself.step = step\n" }, { "change_type": "MODIFY", "old_path": "ppdet/utils/checkpoint.py", "new_path": "ppdet/utils/checkpoint.py", "diff": "@@ -72,7 +72,14 @@ def load_weight(model, weight, optimizer=None, ema=None):\nraise ValueError(\"Model pretrain path {} does not \"\n\"exists.\".format(pdparam_path))\n+ if ema is not None and os.path.exists(path + '.pdema'):\n+ # Exchange model and ema_model to load\n+ ema_state_dict = paddle.load(pdparam_path)\n+ param_state_dict = paddle.load(path + '.pdema')\n+ else:\n+ ema_state_dict = None\nparam_state_dict = paddle.load(pdparam_path)\n+\nmodel_dict = model.state_dict()\nmodel_weight = {}\nincorrect_keys = 0\n@@ -102,10 +109,11 @@ def load_weight(model, weight, optimizer=None, ema=None):\nlast_epoch = optim_state_dict.pop('last_epoch')\noptimizer.set_state_dict(optim_state_dict)\n- if ema is not None and os.path.exists(path + '_ema.pdparams'):\n- ema_state_dict = paddle.load(path + '_ema.pdparams')\n+ if ema_state_dict is not None:\nema.resume(ema_state_dict,\noptim_state_dict['LR_Scheduler']['last_epoch'])\n+ elif ema_state_dict is not None:\n+ ema.resume(ema_state_dict)\nreturn last_epoch\n@@ -205,30 +213,42 @@ def load_pretrain_weight(model, pretrain_weight):\nlogger.info('Finish loading model weights: {}'.format(weights_path))\n-def save_model(model, save_dir, save_name, last_epoch, optimizer=None):\n+def save_model(model,\n+ optimizer,\n+ save_dir,\n+ save_name,\n+ last_epoch,\n+ ema_model=None):\n\"\"\"\nsave model into disk.\nArgs:\n- model (paddle.nn.Layer): the Layer instalce to save parameters.\n+ model (dict): the model state_dict to save parameters.\noptimizer (paddle.optimizer.Optimizer): the Optimizer instance to\nsave optimizer states.\nsave_dir (str): the directory to be saved.\nsave_name (str): the path to be saved.\nlast_epoch (int): the epoch index.\n+ ema_model (dict|None): the ema_model state_dict to save parameters.\n\"\"\"\nif paddle.distributed.get_rank() != 0:\nreturn\n+ assert isinstance(model, dict), (\"model is not a instance of dict, \"\n+ \"please call model.state_dict() to get.\")\nif not os.path.exists(save_dir):\nos.makedirs(save_dir)\nsave_path = os.path.join(save_dir, save_name)\n- if isinstance(model, nn.Layer):\n- paddle.save(model.state_dict(), save_path + \".pdparams\")\n- else:\n- assert isinstance(model,\n- dict), 'model is not a instance of nn.layer or dict'\n+ # save model\n+ if ema_model is None:\npaddle.save(model, save_path + \".pdparams\")\n- if optimizer is not None:\n+ else:\n+ assert isinstance(ema_model,\n+ dict), (\"ema_model is not a instance of dict, \"\n+ \"please call model.state_dict() to get.\")\n+ # Exchange model and ema_model to save\n+ paddle.save(ema_model, save_path + \".pdparams\")\n+ paddle.save(model, save_path + \".pdema\")\n+ # save optimizer\nstate_dict = optimizer.state_dict()\nstate_dict['last_epoch'] = last_epoch\npaddle.save(state_dict, save_path + \".pdopt\")\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
refine ema_model save (#5314)
499,298
08.03.2022 12:03:05
-28,800
56d22694f75b00e89cf694ccc069e4a027b2b6fb
[MOT] unify mot and det output format
[ { "change_type": "MODIFY", "old_path": "deploy/pptracking/python/mot/tracker/deepsort_tracker.py", "new_path": "deploy/pptracking/python/mot/tracker/deepsort_tracker.py", "diff": "@@ -90,13 +90,13 @@ class DeepSORTTracker(object):\nPerform measurement update and track management.\nArgs:\npred_dets (np.array): Detection results of the image, the shape is\n- [N, 6], means 'x0, y0, x1, y1, score, cls_id'.\n+ [N, 6], means 'cls_id, score, x0, y0, x1, y1'.\npred_embs (np.array): Embedding results of the image, the shape is\n[N, 128], usually pred_embs.shape[1] is a multiple of 128.\n\"\"\"\n- pred_tlwhs = pred_dets[:, :4]\n- pred_scores = pred_dets[:, 4:5]\n- pred_cls_ids = pred_dets[:, 5:]\n+ pred_cls_ids = pred_dets[:, 0:1]\n+ pred_scores = pred_dets[:, 1:2]\n+ pred_tlwhs = pred_dets[:, 2:6]\ndetections = [\nDetection(tlwh, score, feat, cls_id)\n" }, { "change_type": "MODIFY", "old_path": "deploy/pptracking/python/mot/tracker/jde_tracker.py", "new_path": "deploy/pptracking/python/mot/tracker/jde_tracker.py", "diff": "@@ -100,7 +100,7 @@ class JDETracker(object):\nArgs:\npred_dets (np.array): Detection results of the image, the shape is\n- [N, 6], means 'x0, y0, x1, y1, score, cls_id'.\n+ [N, 6], means 'cls_id, score, x0, y0, x1, y1'.\npred_embs (np.array): Embedding results of the image, the shape is\n[N, 128] or [N, 512].\n@@ -122,7 +122,7 @@ class JDETracker(object):\n# unify single and multi classes detection and embedding results\nfor cls_id in range(self.num_classes):\n- cls_idx = (pred_dets[:, 5:] == cls_id).squeeze(-1)\n+ cls_idx = (pred_dets[:, 0:1] == cls_id).squeeze(-1)\npred_dets_dict[cls_id] = pred_dets[cls_idx]\nif pred_embs is not None:\npred_embs_dict[cls_id] = pred_embs[cls_idx]\n@@ -133,21 +133,26 @@ class JDETracker(object):\n\"\"\" Step 1: Get detections by class\"\"\"\npred_dets_cls = pred_dets_dict[cls_id]\npred_embs_cls = pred_embs_dict[cls_id]\n- remain_inds = (pred_dets_cls[:, 4:5] > self.conf_thres).squeeze(-1)\n+ remain_inds = (pred_dets_cls[:, 1:2] > self.conf_thres).squeeze(-1)\nif remain_inds.sum() > 0:\npred_dets_cls = pred_dets_cls[remain_inds]\nif self.use_byte:\ndetections = [\nSTrack(\n- STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], cls_id, 30, temp_feat=None)\n- for tlbrs in pred_dets_cls\n+ STrack.tlbr_to_tlwh(tlbrs[2:6]),\n+ tlbrs[1],\n+ cls_id,\n+ 30,\n+ temp_feat=None) for tlbrs in pred_dets_cls\n]\nelse:\npred_embs_cls = pred_embs_cls[remain_inds]\ndetections = [\nSTrack(\n- STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], cls_id, 30, temp_feat)\n- for (tlbrs, temp_feat) in zip(pred_dets_cls, pred_embs_cls)\n+ STrack.tlbr_to_tlwh(tlbrs[2:6]), tlbrs[1], cls_id,\n+ 30, temp_feat)\n+ for (tlbrs, temp_feat\n+ ) in zip(pred_dets_cls, pred_embs_cls)\n]\nelse:\ndetections = []\n@@ -171,14 +176,17 @@ class JDETracker(object):\nSTrack.multi_predict(track_pool_dict[cls_id], self.motion)\nif self.use_byte:\n- dists = matching.iou_distance(track_pool_dict[cls_id], detections)\n+ dists = matching.iou_distance(track_pool_dict[cls_id],\n+ detections)\nmatches, u_track, u_detection = matching.linear_assignment(\ndists, thresh=self.match_thres) # not self.tracked_thresh\nelse:\ndists = matching.embedding_distance(\n- track_pool_dict[cls_id], detections, metric=self.metric_type)\n- dists = matching.fuse_motion(self.motion, dists,\n- track_pool_dict[cls_id], detections)\n+ track_pool_dict[cls_id],\n+ detections,\n+ metric=self.metric_type)\n+ dists = matching.fuse_motion(\n+ self.motion, dists, track_pool_dict[cls_id], detections)\nmatches, u_track, u_detection = matching.linear_assignment(\ndists, thresh=self.tracked_thresh)\n@@ -199,15 +207,20 @@ class JDETracker(object):\n# None of the steps below happen if there are no undetected tracks.\n\"\"\" Step 3: Second association, with IOU\"\"\"\nif self.use_byte:\n- inds_low = pred_dets_dict[cls_id][:, 4:5] > self.low_conf_thres\n- inds_high = pred_dets_dict[cls_id][:, 4:5] < self.conf_thres\n+ inds_low = pred_dets_dict[cls_id][:, 1:2] > self.low_conf_thres\n+ inds_high = pred_dets_dict[cls_id][:, 1:2] < self.conf_thres\ninds_second = np.logical_and(inds_low, inds_high).squeeze(-1)\npred_dets_cls_second = pred_dets_dict[cls_id][inds_second]\n# association the untrack to the low score detections\nif len(pred_dets_cls_second) > 0:\ndetections_second = [\n- STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], cls_id, 30, temp_feat=None)\n+ STrack(\n+ STrack.tlbr_to_tlwh(tlbrs[:4]),\n+ tlbrs[4],\n+ cls_id,\n+ 30,\n+ temp_feat=None)\nfor tlbrs in pred_dets_cls_second[:, :5]\n]\nelse:\n@@ -216,7 +229,8 @@ class JDETracker(object):\ntrack_pool_dict[cls_id][i] for i in u_track\nif track_pool_dict[cls_id][i].state == TrackState.Tracked\n]\n- dists = matching.iou_distance(r_tracked_stracks, detections_second)\n+ dists = matching.iou_distance(r_tracked_stracks,\n+ detections_second)\nmatches, u_track, u_detection_second = matching.linear_assignment(\ndists, thresh=0.4) # not r_tracked_thresh\nelse:\n@@ -232,7 +246,8 @@ class JDETracker(object):\nfor i_tracked, idet in matches:\ntrack = r_tracked_stracks[i_tracked]\n- det = detections[idet] if not self.use_byte else detections_second[idet]\n+ det = detections[\n+ idet] if not self.use_byte else detections_second[idet]\nif track.state == TrackState.Tracked:\ntrack.update(det, self.frame_id)\nactivated_tracks_dict[cls_id].append(track)\n" }, { "change_type": "MODIFY", "old_path": "deploy/python/mot_jde_infer.py", "new_path": "deploy/python/mot_jde_infer.py", "diff": "@@ -115,7 +115,7 @@ class JDE_Detector(Detector):\nreturn result\ndef tracking(self, det_results):\n- pred_dets = det_results['pred_dets']\n+ pred_dets = det_results['pred_dets'] # 'cls_id, score, x0, y0, x1, y1'\npred_embs = det_results['pred_embs']\nonline_targets_dict = self.tracker.update(pred_dets, pred_embs)\n@@ -143,7 +143,7 @@ class JDE_Detector(Detector):\nrepeats (int): repeats number for prediction\nReturns:\nresult (dict): include 'pred_dets': np.ndarray: shape:[N,6], N: number of box,\n- matix element:[x_min, y_min, x_max, y_max, score, class]\n+ matix element:[class, score, x_min, y_min, x_max, y_max]\nFairMOT(JDE)'s result include 'pred_embs': np.ndarray:\nshape: [N, 128]\n'''\n" }, { "change_type": "MODIFY", "old_path": "deploy/python/mot_sde_infer.py", "new_path": "deploy/python/mot_sde_infer.py", "diff": "@@ -111,11 +111,8 @@ class SDE_Detector(Detector):\nlow_conf_thres=low_conf_thres)\ndef tracking(self, det_results):\n- pred_dets = det_results['boxes']\n+ pred_dets = det_results['boxes'] # 'cls_id, score, x0, y0, x1, y1'\npred_embs = None\n- pred_dets = np.concatenate(\n- (pred_dets[:, 2:], pred_dets[:, 1:2], pred_dets[:, 0:1]), 1)\n- # pred_dets should be 'x0, y0, x1, y1, score, cls_id'\nonline_targets_dict = self.tracker.update(pred_dets, pred_embs)\nonline_tlwhs = defaultdict(list)\n" }, { "change_type": "MODIFY", "old_path": "ppdet/engine/tracker.py", "new_path": "ppdet/engine/tracker.py", "diff": "@@ -282,14 +282,14 @@ class Tracker(object):\n# thus will not inference reid model\ncontinue\n- pred_scores = pred_scores[keep_idx[0]]\npred_cls_ids = pred_cls_ids[keep_idx[0]]\n+ pred_scores = pred_scores[keep_idx[0]]\npred_tlwhs = np.concatenate(\n(pred_xyxys[:, 0:2],\npred_xyxys[:, 2:4] - pred_xyxys[:, 0:2] + 1),\naxis=1)\npred_dets = np.concatenate(\n- (pred_tlwhs, pred_scores, pred_cls_ids), axis=1)\n+ (pred_cls_ids, pred_scores, pred_tlwhs), axis=1)\ntracker = self.model.tracker\ncrops = get_crops(\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/mot/tracker/deepsort_tracker.py", "new_path": "ppdet/modeling/mot/tracker/deepsort_tracker.py", "diff": "@@ -96,13 +96,13 @@ class DeepSORTTracker(object):\nPerform measurement update and track management.\nArgs:\npred_dets (np.array): Detection results of the image, the shape is\n- [N, 6], means 'x0, y0, x1, y1, score, cls_id'.\n+ [N, 6], means 'cls_id, score, x0, y0, x1, y1'.\npred_embs (np.array): Embedding results of the image, the shape is\n[N, 128], usually pred_embs.shape[1] is a multiple of 128.\n\"\"\"\n- pred_tlwhs = pred_dets[:, :4]\n- pred_scores = pred_dets[:, 4:5]\n- pred_cls_ids = pred_dets[:, 5:]\n+ pred_cls_ids = pred_dets[:, 0:1]\n+ pred_scores = pred_dets[:, 1:2]\n+ pred_tlwhs = pred_dets[:, 2:6]\ndetections = [\nDetection(tlwh, score, feat, cls_id)\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/mot/tracker/jde_tracker.py", "new_path": "ppdet/modeling/mot/tracker/jde_tracker.py", "diff": "@@ -106,7 +106,7 @@ class JDETracker(object):\nArgs:\npred_dets (np.array): Detection results of the image, the shape is\n- [N, 6], means 'x0, y0, x1, y1, score, cls_id'.\n+ [N, 6], means 'cls_id, score, x0, y0, x1, y1'.\npred_embs (np.array): Embedding results of the image, the shape is\n[N, 128] or [N, 512].\n@@ -128,7 +128,7 @@ class JDETracker(object):\n# unify single and multi classes detection and embedding results\nfor cls_id in range(self.num_classes):\n- cls_idx = (pred_dets[:, 5:] == cls_id).squeeze(-1)\n+ cls_idx = (pred_dets[:, 0:1] == cls_id).squeeze(-1)\npred_dets_dict[cls_id] = pred_dets[cls_idx]\nif pred_embs is not None:\npred_embs_dict[cls_id] = pred_embs[cls_idx]\n@@ -139,21 +139,26 @@ class JDETracker(object):\n\"\"\" Step 1: Get detections by class\"\"\"\npred_dets_cls = pred_dets_dict[cls_id]\npred_embs_cls = pred_embs_dict[cls_id]\n- remain_inds = (pred_dets_cls[:, 4:5] > self.conf_thres).squeeze(-1)\n+ remain_inds = (pred_dets_cls[:, 1:2] > self.conf_thres).squeeze(-1)\nif remain_inds.sum() > 0:\npred_dets_cls = pred_dets_cls[remain_inds]\nif self.use_byte:\ndetections = [\nSTrack(\n- STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], cls_id, 30, temp_feat=None)\n- for tlbrs in pred_dets_cls\n+ STrack.tlbr_to_tlwh(tlbrs[2:6]),\n+ tlbrs[1],\n+ cls_id,\n+ 30,\n+ temp_feat=None) for tlbrs in pred_dets_cls\n]\nelse:\npred_embs_cls = pred_embs_cls[remain_inds]\ndetections = [\nSTrack(\n- STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], cls_id, 30, temp_feat)\n- for (tlbrs, temp_feat) in zip(pred_dets_cls, pred_embs_cls)\n+ STrack.tlbr_to_tlwh(tlbrs[2:6]), tlbrs[1], cls_id,\n+ 30, temp_feat)\n+ for (tlbrs, temp_feat\n+ ) in zip(pred_dets_cls, pred_embs_cls)\n]\nelse:\ndetections = []\n@@ -177,14 +182,17 @@ class JDETracker(object):\nSTrack.multi_predict(track_pool_dict[cls_id], self.motion)\nif self.use_byte:\n- dists = matching.iou_distance(track_pool_dict[cls_id], detections)\n+ dists = matching.iou_distance(track_pool_dict[cls_id],\n+ detections)\nmatches, u_track, u_detection = matching.linear_assignment(\n- dists, thresh=self.match_thres) #\n+ dists, thresh=self.match_thres) # not self.tracked_thresh\nelse:\ndists = matching.embedding_distance(\n- track_pool_dict[cls_id], detections, metric=self.metric_type)\n- dists = matching.fuse_motion(self.motion, dists,\n- track_pool_dict[cls_id], detections)\n+ track_pool_dict[cls_id],\n+ detections,\n+ metric=self.metric_type)\n+ dists = matching.fuse_motion(\n+ self.motion, dists, track_pool_dict[cls_id], detections)\nmatches, u_track, u_detection = matching.linear_assignment(\ndists, thresh=self.tracked_thresh)\n@@ -205,15 +213,20 @@ class JDETracker(object):\n# None of the steps below happen if there are no undetected tracks.\n\"\"\" Step 3: Second association, with IOU\"\"\"\nif self.use_byte:\n- inds_low = pred_dets_dict[cls_id][:, 4:5] > self.low_conf_thres\n- inds_high = pred_dets_dict[cls_id][:, 4:5] < self.conf_thres\n+ inds_low = pred_dets_dict[cls_id][:, 1:2] > self.low_conf_thres\n+ inds_high = pred_dets_dict[cls_id][:, 1:2] < self.conf_thres\ninds_second = np.logical_and(inds_low, inds_high).squeeze(-1)\npred_dets_cls_second = pred_dets_dict[cls_id][inds_second]\n# association the untrack to the low score detections\nif len(pred_dets_cls_second) > 0:\ndetections_second = [\n- STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], cls_id, 30, temp_feat=None)\n+ STrack(\n+ STrack.tlbr_to_tlwh(tlbrs[:4]),\n+ tlbrs[4],\n+ cls_id,\n+ 30,\n+ temp_feat=None)\nfor tlbrs in pred_dets_cls_second[:, :5]\n]\nelse:\n@@ -222,7 +235,8 @@ class JDETracker(object):\ntrack_pool_dict[cls_id][i] for i in u_track\nif track_pool_dict[cls_id][i].state == TrackState.Tracked\n]\n- dists = matching.iou_distance(r_tracked_stracks, detections_second)\n+ dists = matching.iou_distance(r_tracked_stracks,\n+ detections_second)\nmatches, u_track, u_detection_second = matching.linear_assignment(\ndists, thresh=0.4) # not r_tracked_thresh\nelse:\n@@ -238,7 +252,8 @@ class JDETracker(object):\nfor i_tracked, idet in matches:\ntrack = r_tracked_stracks[i_tracked]\n- det = detections[idet] if not self.use_byte else detections_second[idet]\n+ det = detections[\n+ idet] if not self.use_byte else detections_second[idet]\nif track.state == TrackState.Tracked:\ntrack.update(det, self.frame_id)\nactivated_tracks_dict[cls_id].append(track)\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/post_process.py", "new_path": "ppdet/modeling/post_process.py", "diff": "@@ -504,11 +504,10 @@ class CenterNetPostProcess(TTFBox):\nboxes_shape = bboxes.shape[:]\nscale_expand = paddle.expand(scale_expand, shape=boxes_shape)\nbboxes = paddle.divide(bboxes, scale_expand)\n+ results = paddle.concat([clses, scores, bboxes], axis=1)\nif self.for_mot:\n- results = paddle.concat([bboxes, scores, clses], axis=1)\nreturn results, inds, topk_clses\nelse:\n- results = paddle.concat([clses, scores, bboxes], axis=1)\nreturn results, paddle.shape(results)[0:1], topk_clses\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/reid/jde_embedding_head.py", "new_path": "ppdet/modeling/reid/jde_embedding_head.py", "diff": "@@ -152,9 +152,8 @@ class JDEEmbeddingHead(nn.Layer):\nscale_factor = targets['scale_factor'][0].numpy()\nbboxes[:, 2:] = self.scale_coords(bboxes[:, 2:], input_shape,\nim_shape, scale_factor)\n- # tlwhs, scores, cls_ids\n- pred_dets = paddle.concat(\n- (bboxes[:, 2:], bboxes[:, 1:2], bboxes[:, 0:1]), axis=1)\n+ # cls_ids, scores, tlwhs\n+ pred_dets = bboxes\nreturn pred_dets, pred_embs\ndef scale_coords(self, coords, input_shape, im_shape, scale_factor):\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[MOT] unify mot and det output format (#5320)
499,339
09.03.2022 21:15:30
-28,800
11c1efffc35ed2c6638abe62713b9b97700ec063
[SSD] add use_fuse_decode to SSDBox
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/layers.py", "new_path": "ppdet/modeling/layers.py", "diff": "@@ -553,9 +553,14 @@ class YOLOBox(object):\n@register\n@serializable\nclass SSDBox(object):\n- def __init__(self, is_normalized=True):\n+ def __init__(self,\n+ is_normalized=True,\n+ prior_box_var=[0.1, 0.1, 0.2, 0.2],\n+ use_fuse_decode=False):\nself.is_normalized = is_normalized\nself.norm_delta = float(not self.is_normalized)\n+ self.prior_box_var = prior_box_var\n+ self.use_fuse_decode = use_fuse_decode\ndef __call__(self,\npreds,\n@@ -564,40 +569,42 @@ class SSDBox(object):\nscale_factor,\nvar_weight=None):\nboxes, scores = preds\n- outputs = []\n- for box, score, prior_box in zip(boxes, scores, prior_boxes):\n- pb_w = prior_box[:, 2] - prior_box[:, 0] + self.norm_delta\n- pb_h = prior_box[:, 3] - prior_box[:, 1] + self.norm_delta\n- pb_x = prior_box[:, 0] + pb_w * 0.5\n- pb_y = prior_box[:, 1] + pb_h * 0.5\n- out_x = pb_x + box[:, :, 0] * pb_w * 0.1\n- out_y = pb_y + box[:, :, 1] * pb_h * 0.1\n- out_w = paddle.exp(box[:, :, 2] * 0.2) * pb_w\n- out_h = paddle.exp(box[:, :, 3] * 0.2) * pb_h\n-\n- if self.is_normalized:\n- h = paddle.unsqueeze(\n- im_shape[:, 0] / scale_factor[:, 0], axis=-1)\n- w = paddle.unsqueeze(\n- im_shape[:, 1] / scale_factor[:, 1], axis=-1)\n- output = paddle.stack(\n- [(out_x - out_w / 2.) * w, (out_y - out_h / 2.) * h,\n- (out_x + out_w / 2.) * w, (out_y + out_h / 2.) * h],\n- axis=-1)\n+ boxes = paddle.concat(boxes, axis=1)\n+ prior_boxes = paddle.concat(prior_boxes)\n+ if self.use_fuse_decode:\n+ output_boxes = ops.box_coder(\n+ prior_boxes,\n+ self.prior_box_var,\n+ boxes,\n+ code_type=\"decode_center_size\",\n+ box_normalized=self.is_normalized)\nelse:\n- output = paddle.stack(\n+ pb_w = prior_boxes[:, 2] - prior_boxes[:, 0] + self.norm_delta\n+ pb_h = prior_boxes[:, 3] - prior_boxes[:, 1] + self.norm_delta\n+ pb_x = prior_boxes[:, 0] + pb_w * 0.5\n+ pb_y = prior_boxes[:, 1] + pb_h * 0.5\n+ out_x = pb_x + boxes[:, :, 0] * pb_w * self.prior_box_var[0]\n+ out_y = pb_y + boxes[:, :, 1] * pb_h * self.prior_box_var[1]\n+ out_w = paddle.exp(boxes[:, :, 2] * self.prior_box_var[2]) * pb_w\n+ out_h = paddle.exp(boxes[:, :, 3] * self.prior_box_var[3]) * pb_h\n+ output_boxes = paddle.stack(\n[\n- out_x - out_w / 2., out_y - out_h / 2.,\n- out_x + out_w / 2. - 1., out_y + out_h / 2. - 1.\n+ out_x - out_w / 2., out_y - out_h / 2., out_x + out_w / 2.,\n+ out_y + out_h / 2.\n],\naxis=-1)\n- outputs.append(output)\n- boxes = paddle.concat(outputs, axis=1)\n- scores = F.softmax(paddle.concat(scores, axis=1))\n- scores = paddle.transpose(scores, [0, 2, 1])\n+ if self.is_normalized:\n+ h = (im_shape[:, 0] / scale_factor[:, 0]).unsqueeze(-1)\n+ w = (im_shape[:, 1] / scale_factor[:, 1]).unsqueeze(-1)\n+ im_shape = paddle.stack([w, h, w, h], axis=-1)\n+ output_boxes *= im_shape\n+ else:\n+ output_boxes[..., -2:] -= 1.0\n+ output_scores = F.softmax(paddle.concat(\n+ scores, axis=1)).transpose([0, 2, 1])\n- return boxes, scores\n+ return output_boxes, output_scores\n@register\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[SSD] add use_fuse_decode to SSDBox (#5336)
499,333
10.03.2022 15:16:15
-28,800
80b1789eb3406f4f0d26290d4be48cecf766af73
add attr in deploy
[ { "change_type": "ADD", "old_path": null, "new_path": "deploy/python/attr_infer.py", "diff": "+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+import yaml\n+import glob\n+from functools import reduce\n+\n+import cv2\n+import numpy as np\n+import math\n+import paddle\n+from paddle.inference import Config\n+from paddle.inference import create_predictor\n+\n+import sys\n+# add deploy path of PadleDetection to sys.path\n+parent_path = os.path.abspath(os.path.join(__file__, *(['..'])))\n+sys.path.insert(0, parent_path)\n+\n+from benchmark_utils import PaddleInferBenchmark\n+from preprocess import preprocess, Resize, NormalizeImage, Permute, PadStride, LetterBoxResize, WarpAffine\n+from visualize import visualize_attr\n+from utils import argsparser, Timer, get_current_memory_mb\n+from infer import Detector, get_test_images, print_arguments, load_predictor\n+\n+from PIL import Image, ImageDraw, ImageFont\n+\n+\n+class AttrDetector(Detector):\n+ \"\"\"\n+ Args:\n+ pred_config (object): config of model, defined by `Config(model_dir)`\n+ model_dir (str): root path of model.pdiparams, model.pdmodel and infer_cfg.yml\n+ device (str): Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU\n+ run_mode (str): mode of running(paddle/trt_fp32/trt_fp16)\n+ batch_size (int): size of pre batch in inference\n+ trt_min_shape (int): min shape for dynamic shape in trt\n+ trt_max_shape (int): max shape for dynamic shape in trt\n+ trt_opt_shape (int): opt shape for dynamic shape in trt\n+ trt_calib_mode (bool): If the model is produced by TRT offline quantitative\n+ calibration, trt_calib_mode need to set True\n+ cpu_threads (int): cpu threads\n+ enable_mkldnn (bool): whether to open MKLDNN\n+ output_dir (str): The path of output\n+ threshold (float): The threshold of score for visualization\n+ \"\"\"\n+\n+ def __init__(\n+ self,\n+ model_dir,\n+ device='CPU',\n+ run_mode='paddle',\n+ batch_size=1,\n+ trt_min_shape=1,\n+ trt_max_shape=1280,\n+ trt_opt_shape=640,\n+ trt_calib_mode=False,\n+ cpu_threads=1,\n+ enable_mkldnn=False,\n+ output_dir='output',\n+ threshold=0.5, ):\n+ super(AttrDetector, self).__init__(\n+ model_dir=model_dir,\n+ device=device,\n+ run_mode=run_mode,\n+ batch_size=batch_size,\n+ trt_min_shape=trt_min_shape,\n+ trt_max_shape=trt_max_shape,\n+ trt_opt_shape=trt_opt_shape,\n+ trt_calib_mode=trt_calib_mode,\n+ cpu_threads=cpu_threads,\n+ enable_mkldnn=enable_mkldnn,\n+ output_dir=output_dir,\n+ threshold=threshold, )\n+\n+ def get_label(self):\n+ return self.pred_config.labels\n+\n+ def postprocess(self, inputs, result):\n+ # postprocess output of predictor\n+ im_results = result['output']\n+ im_results = np.where(im_results < self.threshold, 0, im_results)\n+ label_list = [['Head', ['Hat', 'Glasses']], [\n+ 'Upper', [\n+ 'ShortSleeve', 'LongSleeve', 'UpperStride', 'UpperLogo',\n+ 'UpperPlaid', 'UpperSplice'\n+ ]\n+ ], [\n+ 'Lower', [\n+ 'LowerStripe', 'LowerPattern', 'LongCoat', 'Trousers', 'Shorts',\n+ 'Skirt&Dress'\n+ ]\n+ ], ['Shoes', ['boots']], [\n+ 'Accessory',\n+ ['HandBag', 'ShoulderBag', 'Backpack', 'HoldObjectsInFront']\n+ ], ['Age', ['AgeOver60', 'Age18-60', 'AgeLess18']],\n+ ['Gender', ['Female']],\n+ ['Direction', ['Front', 'Side', 'Back']]]\n+\n+ attr_type = [name[0] for name in label_list]\n+ labels = self.pred_config.labels\n+\n+ batch_res = []\n+ for res in im_results:\n+ label_res = {}\n+ label_res = {t: [] for t in attr_type}\n+ num = 0\n+ for i in range(len(label_list)):\n+ type_name_i = attr_type[i]\n+ attr_name_list = label_list[i][1]\n+ for attr_name in attr_name_list:\n+ attr_name = labels[num]\n+ output_prob = res[num]\n+ if output_prob != 0:\n+ label_res[type_name_i].append(attr_name)\n+ num += 1\n+\n+ if len(label_res['Shoes']) == 0:\n+ label_res['Shoes'] = ['no boots']\n+ if len(label_res['Gender']) == 0:\n+ label_res['Gender'] = ['Male']\n+ label_res['Age'] = [labels[19 + np.argmax(res[19:22])]]\n+ label_res['Direction'] = [labels[23 + np.argmax(res[23:])]]\n+ batch_res.append(label_res)\n+ result = {'output': batch_res}\n+ return result\n+\n+ def predict(self, repeats=1):\n+ '''\n+ Args:\n+ repeats (int): repeats number for prediction\n+ Returns:\n+ result (dict): include 'boxes': np.ndarray: shape:[N,6], N: number of box,\n+ matix element:[class, score, x_min, y_min, x_max, y_max]\n+ MaskRCNN's result include 'masks': np.ndarray:\n+ shape: [N, im_h, im_w]\n+ '''\n+ # model prediction\n+ for i in range(repeats):\n+ self.predictor.run()\n+ output_names = self.predictor.get_output_names()\n+ output_tensor = self.predictor.get_output_handle(output_names[0])\n+ np_output = output_tensor.copy_to_cpu()\n+ result = dict(output=np_output)\n+ return result\n+\n+ def predict_image(self,\n+ image_list,\n+ run_benchmark=False,\n+ repeats=1,\n+ visual=True):\n+ batch_loop_cnt = math.ceil(float(len(image_list)) / self.batch_size)\n+ results = []\n+ for i in range(batch_loop_cnt):\n+ start_index = i * self.batch_size\n+ end_index = min((i + 1) * self.batch_size, len(image_list))\n+ batch_image_list = image_list[start_index:end_index]\n+ if run_benchmark:\n+ # preprocess\n+ inputs = self.preprocess(batch_image_list) # warmup\n+ self.det_times.preprocess_time_s.start()\n+ inputs = self.preprocess(batch_image_list)\n+ self.det_times.preprocess_time_s.end()\n+\n+ # model prediction\n+ result = self.predict(repeats=repeats) # warmup\n+ self.det_times.inference_time_s.start()\n+ result = self.predict(repeats=repeats)\n+ self.det_times.inference_time_s.end(repeats=repeats)\n+\n+ # postprocess\n+ result_warmup = self.postprocess(inputs, result) # warmup\n+ self.det_times.postprocess_time_s.start()\n+ result = self.postprocess(inputs, result)\n+ self.det_times.postprocess_time_s.end()\n+ self.det_times.img_num += len(batch_image_list)\n+\n+ cm, gm, gu = get_current_memory_mb()\n+ self.cpu_mem += cm\n+ self.gpu_mem += gm\n+ self.gpu_util += gu\n+ else:\n+ # preprocess\n+ self.det_times.preprocess_time_s.start()\n+ inputs = self.preprocess(batch_image_list)\n+ self.det_times.preprocess_time_s.end()\n+\n+ # model prediction\n+ self.det_times.inference_time_s.start()\n+ result = self.predict()\n+ self.det_times.inference_time_s.end()\n+\n+ # postprocess\n+ self.det_times.postprocess_time_s.start()\n+ result = self.postprocess(inputs, result)\n+ self.det_times.postprocess_time_s.end()\n+ self.det_times.img_num += len(batch_image_list)\n+\n+ if visual:\n+ visualize(\n+ batch_image_list, result, output_dir=self.output_dir)\n+\n+ results.append(result)\n+ if visual:\n+ print('Test iter {}'.format(i))\n+\n+ results = self.merge_batch_result(results)\n+ return results\n+\n+ def merge_batch_result(self, batch_result):\n+ if len(batch_result) == 1:\n+ return batch_result[0]\n+ res_key = batch_result[0].keys()\n+ results = {k: [] for k in res_key}\n+ for res in batch_result:\n+ for k, v in res.items():\n+ results[k].extend(v)\n+ return results\n+\n+\n+def visualize(image_list, batch_res, output_dir='output'):\n+\n+ # visualize the predict result\n+ batch_res = batch_res['output']\n+ for image_file, res in zip(image_list, batch_res):\n+ im = visualize_attr(image_file, [res])\n+ if not os.path.exists(output_dir):\n+ os.makedirs(output_dir)\n+ img_name = os.path.split(image_file)[-1]\n+ out_path = os.path.join(output_dir, img_name)\n+ im.save(out_path, quality=95)\n+ print(\"save result to: \" + out_path)\n+\n+\n+def main():\n+ detector = AttrDetector(\n+ FLAGS.model_dir,\n+ device=FLAGS.device,\n+ run_mode=FLAGS.run_mode,\n+ batch_size=FLAGS.batch_size,\n+ trt_min_shape=FLAGS.trt_min_shape,\n+ trt_max_shape=FLAGS.trt_max_shape,\n+ trt_opt_shape=FLAGS.trt_opt_shape,\n+ trt_calib_mode=FLAGS.trt_calib_mode,\n+ cpu_threads=FLAGS.cpu_threads,\n+ enable_mkldnn=FLAGS.enable_mkldnn,\n+ threshold=FLAGS.threshold,\n+ output_dir=FLAGS.output_dir)\n+\n+ # predict from image\n+ if FLAGS.image_dir is None and FLAGS.image_file is not None:\n+ assert FLAGS.batch_size == 1, \"batch_size should be 1, when image_file is not None\"\n+ img_list = get_test_images(FLAGS.image_dir, FLAGS.image_file)\n+ detector.predict_image(img_list, FLAGS.run_benchmark, repeats=10)\n+ if not FLAGS.run_benchmark:\n+ detector.det_times.info(average=True)\n+ else:\n+ mems = {\n+ 'cpu_rss_mb': detector.cpu_mem / len(img_list),\n+ 'gpu_rss_mb': detector.gpu_mem / len(img_list),\n+ 'gpu_util': detector.gpu_util * 100 / len(img_list)\n+ }\n+\n+ perf_info = detector.det_times.report(average=True)\n+ model_dir = FLAGS.model_dir\n+ mode = FLAGS.run_mode\n+ model_info = {\n+ 'model_name': model_dir.strip('/').split('/')[-1],\n+ 'precision': mode.split('_')[-1]\n+ }\n+ data_info = {\n+ 'batch_size': FLAGS.batch_size,\n+ 'shape': \"dynamic_shape\",\n+ 'data_num': perf_info['img_num']\n+ }\n+ det_log = PaddleInferBenchmark(detector.config, model_info, data_info,\n+ perf_info, mems)\n+ det_log('Attr')\n+\n+\n+if __name__ == '__main__':\n+ paddle.enable_static()\n+ parser = argsparser()\n+ FLAGS = parser.parse_args()\n+ print_arguments(FLAGS)\n+ FLAGS.device = FLAGS.device.upper()\n+ assert FLAGS.device in ['CPU', 'GPU', 'XPU'\n+ ], \"device should be CPU, GPU or XPU\"\n+ assert not FLAGS.use_gpu, \"use_gpu has been deprecated, please use --device\"\n+\n+ main()\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add attr in deploy (#5342)
499,304
11.03.2022 18:11:44
-28,800
f08c2ca7aff0eec9bb590d861c37a6700b7ce2ea
fix picoheadv2 use_align_head
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/pico_head.py", "new_path": "ppdet/modeling/heads/pico_head.py", "diff": "@@ -466,9 +466,7 @@ class PicoHeadV2(GFLHead):\n), \"The size of fpn_feats is not equal to size of fpn_stride\"\ncls_score_list, reg_list, box_list = [], [], []\n- for i, fpn_feat, stride, align_cls in zip(\n- range(len(self.fpn_stride)), fpn_feats, self.fpn_stride,\n- self.cls_align):\n+ for i, (fpn_feat, stride) in enumerate(zip(fpn_feats, self.fpn_stride)):\nb, _, h, w = get_static_shape(fpn_feat)\n# task decomposition\nconv_cls_feat, se_feat = self.conv_feat(fpn_feat, i)\n@@ -477,7 +475,7 @@ class PicoHeadV2(GFLHead):\n# cls prediction and alignment\nif self.use_align_head:\n- cls_prob = F.sigmoid(align_cls(conv_cls_feat))\n+ cls_prob = F.sigmoid(self.cls_align[i](conv_cls_feat))\ncls_score = (F.sigmoid(cls_logit) * cls_prob + eps).sqrt()\nelse:\ncls_score = F.sigmoid(cls_logit)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix picoheadv2 use_align_head (#5352)
499,373
15.03.2022 20:34:38
-28,800
1acb18522ac110fe38f308600c176bb814ee6d2f
[Fix] Update operators.py Fixed a possible error on line 2021: "TypeError: 'numpy.float64' object cannot be interpreted as an integer".
[ { "change_type": "MODIFY", "old_path": "ppdet/data/transform/operators.py", "new_path": "ppdet/data/transform/operators.py", "diff": "@@ -2032,8 +2032,8 @@ class Pad(BaseOperator):\nim_h < h and im_w < w\n), '(h, w) of target size should be greater than (im_h, im_w)'\nelse:\n- h = np.ceil(im_h / self.size_divisor) * self.size_divisor\n- w = np.ceil(im_w / self.size_divisor) * self.size_divisor\n+ h = int(np.ceil(im_h / self.size_divisor) * self.size_divisor)\n+ w = int(np.ceil(im_w / self.size_divisor) * self.size_divisor)\nif h == im_h and w == im_w:\nreturn sample\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[Fix] Update operators.py (#5357) Fixed a possible error on line 2021: "TypeError: 'numpy.float64' object cannot be interpreted as an integer".
499,339
15.03.2022 21:00:45
-28,800
3d1dfea9eb39b09b149ee8d097aec732a76f2d25
[dev] add use_epoch in LinearWarmup
[ { "change_type": "MODIFY", "old_path": "ppdet/optimizer.py", "new_path": "ppdet/optimizer.py", "diff": "@@ -132,19 +132,24 @@ class LinearWarmup(object):\nArgs:\nsteps (int): warm up steps\nstart_factor (float): initial learning rate factor\n+ epochs (int|None): use epochs as warm up steps, the priority\n+ of `epochs` is higher than `steps`. Default: None.\n\"\"\"\n- def __init__(self, steps=500, start_factor=1. / 3):\n+ def __init__(self, steps=500, start_factor=1. / 3, epochs=None):\nsuper(LinearWarmup, self).__init__()\nself.steps = steps\nself.start_factor = start_factor\n+ self.epochs = epochs\ndef __call__(self, base_lr, step_per_epoch):\nboundary = []\nvalue = []\n- for i in range(self.steps + 1):\n- if self.steps > 0:\n- alpha = i / self.steps\n+ warmup_steps = self.epochs * step_per_epoch \\\n+ if self.epochs is not None else self.steps\n+ for i in range(warmup_steps + 1):\n+ if warmup_steps > 0:\n+ alpha = i / warmup_steps\nfactor = self.start_factor * (1 - alpha) + alpha\nlr = base_lr * factor\nvalue.append(lr)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[dev] add use_epoch in LinearWarmup (#5366)
499,304
18.03.2022 11:16:37
-28,800
50da62fdf5530437e83dfb39bca8f278f64cecf2
fix picodet_pedestrian export
[ { "change_type": "MODIFY", "old_path": "configs/picodet/application/pedestrian_detection/picodet_s_192_pedestrian.yml", "new_path": "configs/picodet/application/pedestrian_detection/picodet_s_192_pedestrian.yml", "diff": "@@ -12,6 +12,11 @@ snapshot_epoch: 10\nepoch: 300\nmetric: COCO\nnum_classes: 1\n+# Exporting the model\n+export:\n+ post_process: False # Whether post-processing is included in the network when export model.\n+ nms: False # Whether NMS is included in the network when export model.\n+ benchmark: False # It is used to testing model performance, if set `True`, post-process and NMS will not be exported.\narchitecture: PicoDet\n" }, { "change_type": "MODIFY", "old_path": "configs/picodet/application/pedestrian_detection/picodet_s_320_pedestrian.yml", "new_path": "configs/picodet/application/pedestrian_detection/picodet_s_320_pedestrian.yml", "diff": "@@ -12,6 +12,11 @@ snapshot_epoch: 10\nepoch: 300\nmetric: COCO\nnum_classes: 1\n+# Exporting the model\n+export:\n+ post_process: False # Whether post-processing is included in the network when export model.\n+ nms: False # Whether NMS is included in the network when export model.\n+ benchmark: False # It is used to testing model performance, if set `True`, post-process and NMS will not be exported.\narchitecture: PicoDet\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix picodet_pedestrian export (#5395)
499,395
19.03.2022 20:38:52
-28,800
911112265a99b17f487f4902e1e979f0dc784d7f
fix register_buffer
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/bbox_utils.py", "new_path": "ppdet/modeling/bbox_utils.py", "diff": "@@ -763,8 +763,9 @@ def batch_distance2bbox(points, distance, max_shapes=None):\nTensor: Decoded bboxes, \"x1y1x2y2\" format.\n\"\"\"\nlt, rb = paddle.split(distance, 2, -1)\n- x1y1 = points - lt\n- x2y2 = points + rb\n+ # while tensor add parameters, parameters should be better placed on the second place\n+ x1y1 = -lt + points\n+ x2y2 = rb + points\nout_bbox = paddle.concat([x1y1, x2y2], -1)\nif max_shapes is not None:\nmax_shapes = max_shapes.flip(-1).tile([1, 2])\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix register_buffer (#5398)
499,312
21.03.2022 14:24:02
-28,800
6a17524f8377eed56ac1713877cf43ea18a02f94
adjust blazeface's search space
[ { "change_type": "MODIFY", "old_path": "static/slim/nas/search_space/blazefacespace_nas.py", "new_path": "static/slim/nas/search_space/blazefacespace_nas.py", "diff": "@@ -28,8 +28,8 @@ class BlazeFaceNasSpace(SearchSpaceBase):\ndef __init__(self, input_size, output_size, block_num, block_mask):\nsuper(BlazeFaceNasSpace, self).__init__(input_size, output_size,\nblock_num, block_mask)\n- self.blaze_filter_num1 = np.array([4, 8, 12, 16, 20, 24, 32])\n- self.blaze_filter_num2 = np.array([8, 12, 16, 20, 24, 32, 40, 48, 64])\n+ self.blaze_filter_num1 = np.array([4, 8, 12, 16, 24, 32])\n+ self.blaze_filter_num2 = np.array([8, 12, 16, 24, 32, 40, 48, 64])\nself.mid_filter_num = np.array([8, 12, 16, 20, 24, 32])\nself.double_filter_num = np.array(\n[8, 12, 16, 24, 32, 40, 48, 64, 72, 80, 88, 96])\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
adjust blazeface's search space (#5404)
499,304
21.03.2022 19:27:54
-28,800
c616c10c7b7745915892bb055c916c7179b2b50a
fix new PicoDet name
[ { "change_type": "MODIFY", "old_path": "configs/picodet/legacy_model/README.md", "new_path": "configs/picodet/legacy_model/README.md", "diff": "- PicoDet used 4 or 8 GPUs for training and all checkpoints are trained with default settings and hyperparameters.\n</details>\n+\n+## Cite PP-PicoDet\n+```\n+@misc{yu2021pppicodet,\n+ title={PP-PicoDet: A Better Real-Time Object Detector on Mobile Devices},\n+ author={Guanghua Yu and Qinyao Chang and Wenyu Lv and Chang Xu and Cheng Cui and Wei Ji and Qingqing Dang and Kaipeng Deng and Guanzhong Wang and Yuning Du and Baohua Lai and Qiwen Liu and Xiaoguang Hu and Dianhai Yu and Yanjun Ma},\n+ year={2021},\n+ eprint={2111.00902},\n+ archivePrefix={arXiv},\n+ primaryClass={cs.CV}\n+}\n+\n+```\n" }, { "change_type": "RENAME", "old_path": "configs/picodet/picodet_l_320_coco.yml", "new_path": "configs/picodet/picodet_l_320_coco_lcnet.yml", "diff": "" }, { "change_type": "RENAME", "old_path": "configs/picodet/picodet_l_416_coco.yml", "new_path": "configs/picodet/picodet_l_416_coco_lcnet.yml", "diff": "" }, { "change_type": "RENAME", "old_path": "configs/picodet/picodet_l_640_coco.yml", "new_path": "configs/picodet/picodet_l_640_coco_lcnet.yml", "diff": "" }, { "change_type": "RENAME", "old_path": "configs/picodet/picodet_m_320_coco.yml", "new_path": "configs/picodet/picodet_m_320_coco_lcnet.yml", "diff": "" }, { "change_type": "RENAME", "old_path": "configs/picodet/picodet_m_416_coco.yml", "new_path": "configs/picodet/picodet_m_416_coco_lcnet.yml", "diff": "" }, { "change_type": "RENAME", "old_path": "configs/picodet/picodet_s_320_coco.yml", "new_path": "configs/picodet/picodet_s_320_coco_lcnet.yml", "diff": "" }, { "change_type": "RENAME", "old_path": "configs/picodet/picodet_s_416_coco.yml", "new_path": "configs/picodet/picodet_s_416_coco_lcnet.yml", "diff": "" }, { "change_type": "RENAME", "old_path": "configs/picodet/picodet_xs_320_coco.yml", "new_path": "configs/picodet/picodet_xs_320_coco_lcnet.yml", "diff": "" }, { "change_type": "RENAME", "old_path": "configs/picodet/picodet_xs_416_coco.yml", "new_path": "configs/picodet/picodet_xs_416_coco_lcnet.yml", "diff": "" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix new PicoDet name (#5414)
499,333
21.03.2022 22:13:31
-28,800
8622966f6d09e1969ecf96534f8bf4bc1c2efd8f
update pphuman for pptracking
[ { "change_type": "MODIFY", "old_path": "deploy/pphuman/config/tracker_config.yml", "new_path": "deploy/pphuman/config/tracker_config.yml", "diff": "-# config of tracker for MOT SDE Detector, use ByteTracker as default.\n-# The tracker of MOT JDE Detector is exported together with the model.\n+# config of tracker for MOT SDE Detector, use 'JDETracker' as default.\n+# The tracker of MOT JDE Detector (such as FairMOT) is exported together with the model.\n# Here 'min_box_area' and 'vertical_ratio' are set for pedestrian, you can modify for other objects tracking.\n-tracker:\n- use_byte: true\n+\n+type: JDETracker # 'JDETracker' or 'DeepSORTTracker'\n+\n+# BYTETracker\n+JDETracker:\n+ use_byte: True\n+ det_thresh: 0.3\nconf_thres: 0.6\nlow_conf_thres: 0.1\nmatch_thres: 0.9\nmin_box_area: 100\n- vertical_ratio: 1.6\n+ vertical_ratio: 1.6 # for pedestrian\n+\n+DeepSORTTracker:\n+ input_size: [64, 192]\n+ min_box_area: 0\n+ vertical_ratio: -1\n+ budget: 100\n+ max_age: 70\n+ n_init: 3\n+ metric_type: cosine\n+ matching_threshold: 0.2\n+ max_iou_distance: 0.9\n" }, { "change_type": "MODIFY", "old_path": "deploy/pphuman/pipeline.py", "new_path": "deploy/pphuman/pipeline.py", "diff": "@@ -28,7 +28,6 @@ parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))\nsys.path.insert(0, parent_path)\nfrom python.infer import Detector, DetectorPicoDet\n-from python.mot_sde_infer import SDE_Detector\nfrom python.attr_infer import AttrDetector\nfrom python.keypoint_infer import KeyPointDetector\nfrom python.keypoint_postprocess import translate_to_ori_images\n@@ -39,6 +38,8 @@ from pipe_utils import argsparser, print_arguments, merge_cfg, PipeTimer\nfrom pipe_utils import get_test_images, crop_image_with_det, crop_image_with_mot, parse_mot_res, parse_mot_keypoint\nfrom python.preprocess import decode_image\nfrom python.visualize import visualize_box_mask, visualize_attr, visualize_pose, visualize_action\n+\n+from pptracking.python.mot_sde_infer import SDE_Detector\nfrom pptracking.python.visualize import plot_tracking\n@@ -374,6 +375,8 @@ class PipePredictor(object):\n# det output format: class, score, xmin, ymin, xmax, ymax\ndet_res = self.det_predictor.predict_image(\nbatch_input, visual=False)\n+ det_res = self.det_predictor.filter_box(det_res,\n+ self.cfg['crop_thresh'])\nif i > self.warmup_frame:\nself.pipe_timer.module_time['det'].end()\nself.pipeline_res.update(det_res, 'det')\n@@ -563,6 +566,8 @@ class PipePredictor(object):\ndet_res_i,\nlabels=['person'],\nthreshold=self.cfg['crop_thresh'])\n+ im = np.ascontiguousarray(np.copy(im))\n+ im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)\nif attr_res is not None:\nattr_res_i = attr_res['output'][start_idx:start_idx +\nboxes_num_i]\n@@ -571,7 +576,7 @@ class PipePredictor(object):\nif not os.path.exists(self.output_dir):\nos.makedirs(self.output_dir)\nout_path = os.path.join(self.output_dir, img_name)\n- im.save(out_path, quality=95)\n+ cv2.imwrite(out_path, im)\nprint(\"save result to: \" + out_path)\nstart_idx += boxes_num_i\n" }, { "change_type": "MODIFY", "old_path": "deploy/pptracking/python/mot_sde_infer.py", "new_path": "deploy/pptracking/python/mot_sde_infer.py", "diff": "@@ -24,8 +24,8 @@ import paddle\nfrom benchmark_utils import PaddleInferBenchmark\nfrom preprocess import decode_image\n-from utils import argsparser, Timer, get_current_memory_mb, _is_valid_video, video2frames\n-from det_infer import Detector, get_test_images, print_arguments, bench_log, PredictConfig, load_predictor\n+from .utils import argsparser, Timer, get_current_memory_mb, _is_valid_video, video2frames\n+from .det_infer import Detector, get_test_images, print_arguments, bench_log, PredictConfig, load_predictor\n# add python path\nimport sys\n@@ -34,7 +34,7 @@ sys.path.insert(0, parent_path)\nfrom mot.tracker import JDETracker, DeepSORTTracker\nfrom mot.utils import MOTTimer, write_mot_results, flow_statistic, get_crops, clip_box\n-from visualize import plot_tracking, plot_tracking_dict\n+from .visualize import plot_tracking, plot_tracking_dict\nfrom mot.mtmct.utils import parse_bias\nfrom mot.mtmct.postprocess import trajectory_fusion, sub_cluster, gen_res, print_mtmct_result\n@@ -120,10 +120,12 @@ class SDE_Detector(Detector):\ncfg = tracker_cfg[tracker_cfg['type']]\n# tracker config\n- self.use_deepsort_tracker = True if tracker_cfg['type'] == 'DeepSORTTracker' else False\n+ self.use_deepsort_tracker = True if tracker_cfg[\n+ 'type'] == 'DeepSORTTracker' else False\nif self.use_deepsort_tracker:\n# use DeepSORTTracker\n- if self.reid_pred_config is not None and hasattr(self.reid_pred_config, 'tracker'):\n+ if self.reid_pred_config is not None and hasattr(\n+ self.reid_pred_config, 'tracker'):\ncfg = self.reid_pred_config.tracker\nbudget = cfg.get('budget', 100)\nmax_age = cfg.get('max_age', 30)\n@@ -138,8 +140,7 @@ class SDE_Detector(Detector):\nmax_iou_distance=max_iou_distance,\nmatching_threshold=matching_threshold,\nmin_box_area=min_box_area,\n- vertical_ratio=vertical_ratio,\n- )\n+ vertical_ratio=vertical_ratio, )\nelse:\n# use ByteTracker\nuse_byte = cfg.get('use_byte', False)\n@@ -158,8 +159,7 @@ class SDE_Detector(Detector):\nvertical_ratio=vertical_ratio,\nmatch_thres=match_thres,\nconf_thres=conf_thres,\n- low_conf_thres=low_conf_thres,\n- )\n+ low_conf_thres=low_conf_thres, )\nself.do_mtmct = False if mtmct_dir is None else True\nself.mtmct_dir = mtmct_dir\n@@ -206,7 +206,8 @@ class SDE_Detector(Detector):\nfor i in range(repeats):\nself.reid_predictor.run()\noutput_names = self.reid_predictor.get_output_names()\n- feature_tensor = self.reid_predictor.get_output_handle(output_names[0])\n+ feature_tensor = self.reid_predictor.get_output_handle(output_names[\n+ 0])\npred_embs = feature_tensor.copy_to_cpu()\ndet_results['embeddings'] = pred_embs\n@@ -249,7 +250,8 @@ class SDE_Detector(Detector):\nframe_id = det_results['frame_id']\ntracking_outs['feat_data'] = {}\n- for _tlbr, _id, _feat in zip(online_tlbrs, online_ids, online_feats):\n+ for _tlbr, _id, _feat in zip(online_tlbrs, online_ids,\n+ online_feats):\nfeat_data = {}\nfeat_data['bbox'] = _tlbr\nfeat_data['frame'] = f\"{frame_id:06d}\"\n@@ -265,7 +267,8 @@ class SDE_Detector(Detector):\nonline_scores = defaultdict(list)\nonline_ids = defaultdict(list)\nif self.do_mtmct:\n- online_tlbrs, online_feats = defaultdict(list), defaultdict(list)\n+ online_tlbrs, online_feats = defaultdict(list), defaultdict(\n+ list)\nonline_targets_dict = self.tracker.update(pred_dets, pred_embs)\nfor cls_id in range(self.num_classes):\nonline_targets = online_targets_dict[cls_id]\n@@ -295,7 +298,8 @@ class SDE_Detector(Detector):\nseq_name = det_results['seq_name']\nframe_id = det_results['frame_id']\ntracking_outs['feat_data'] = {}\n- for _tlbr, _id, _feat in zip(online_tlbrs[0], online_ids[0], online_feats[0]):\n+ for _tlbr, _id, _feat in zip(online_tlbrs[0], online_ids[0],\n+ online_feats[0]):\nfeat_data = {}\nfeat_data['bbox'] = _tlbr\nfeat_data['frame'] = f\"{frame_id:06d}\"\n@@ -469,14 +473,17 @@ class SDE_Detector(Detector):\ntimer.tic()\nseq_name = video_out_name.split('.')[0]\n- mot_results = self.predict_image([frame], visual=False, seq_name=seq_name)\n+ mot_results = self.predict_image(\n+ [frame], visual=False, seq_name=seq_name)\ntimer.toc()\n- online_tlwhs, online_scores, online_ids = mot_results[0] # bs=1 in MOT model\n+ online_tlwhs, online_scores, online_ids = mot_results[\n+ 0] # bs=1 in MOT model\nfps = 1. / timer.duration\nif num_classes == 1 and self.use_reid:\n# use DeepSORTTracker, only support singe class\n- results[0].append((frame_id + 1, online_tlwhs, online_scores, online_ids))\n+ results[0].append(\n+ (frame_id + 1, online_tlwhs, online_scores, online_ids))\nim = plot_tracking(\nframe,\nonline_tlwhs,\n@@ -488,8 +495,8 @@ class SDE_Detector(Detector):\n# use ByteTracker, support multiple class\nfor cls_id in range(num_classes):\nresults[cls_id].append(\n- (frame_id + 1, online_tlwhs[cls_id], online_scores[cls_id],\n- online_ids[cls_id]))\n+ (frame_id + 1, online_tlwhs[cls_id],\n+ online_scores[cls_id], online_ids[cls_id]))\nim = plot_tracking_dict(\nframe,\nnum_classes,\n@@ -549,13 +556,15 @@ class SDE_Detector(Detector):\ncontinue\nif os.path.exists(os.path.join(fpath, 'img1')):\nfpath = os.path.join(fpath, 'img1')\n- assert os.path.isdir(fpath), '{} should be a directory'.format(fpath)\n+ assert os.path.isdir(fpath), '{} should be a directory'.format(\n+ fpath)\nimage_list = glob.glob(os.path.join(fpath, '*.jpg'))\nimage_list.sort()\nassert len(image_list) > 0, '{} has no images.'.format(fpath)\nprint('start tracking seq: {}'.format(seq))\n- mot_features_dict = self.predict_image(image_list, visual=False, seq_name=seq)\n+ mot_features_dict = self.predict_image(\n+ image_list, visual=False, seq_name=seq)\ncid = int(re.sub('[a-z,A-Z]', \"\", seq))\ntid_data, mot_list_break = trajectory_fusion(\n@@ -627,8 +636,7 @@ def main():\nthreshold=FLAGS.threshold,\noutput_dir=FLAGS.output_dir,\nreid_model_dir=FLAGS.reid_model_dir,\n- mtmct_dir=FLAGS.mtmct_dir,\n- )\n+ mtmct_dir=FLAGS.mtmct_dir, )\n# predict from video file or camera video stream\nif FLAGS.video_file is not None or FLAGS.camera_id != -1:\n@@ -643,7 +651,8 @@ def main():\nassert FLAGS.batch_size == 1, \"--batch_size should be 1 in MOT models.\"\nimg_list = get_test_images(FLAGS.image_dir, FLAGS.image_file)\nseq_name = FLAGS.image_dir.split('/')[-1]\n- detector.predict_image(img_list, FLAGS.run_benchmark, repeats=10, seq_name=seq_name)\n+ detector.predict_image(\n+ img_list, FLAGS.run_benchmark, repeats=10, seq_name=seq_name)\nif not FLAGS.run_benchmark:\ndetector.det_times.info(average=True)\n" }, { "change_type": "MODIFY", "old_path": "deploy/python/infer.py", "new_path": "deploy/python/infer.py", "diff": "@@ -150,6 +150,25 @@ class Detector(object):\nresult = {k: v for k, v in result.items() if v is not None}\nreturn result\n+ def filter_box(self, result, threshold):\n+ np_boxes_num = result['boxes_num']\n+ boxes = result['boxes']\n+ start_idx = 0\n+ filter_boxes = []\n+ filter_num = []\n+ for i in range(len(np_boxes_num)):\n+ boxes_num = np_boxes_num[i]\n+ boxes_i = boxes[start_idx:start_idx + boxes_num, :]\n+ idx = boxes_i[:, 1] > threshold\n+ filter_boxes_i = boxes_i[idx, :]\n+ filter_boxes.append(filter_boxes_i)\n+ filter_num.append(filter_boxes_i.shape[0])\n+ start_idx += boxes_num\n+ boxes = np.concatenate(filter_boxes)\n+ filter_num = np.array(filter_num)\n+ filter_res = {'boxes': boxes, 'boxes_num': filter_num}\n+ return filter_res\n+\ndef predict(self, repeats=1):\n'''\nArgs:\n@@ -736,7 +755,8 @@ def main():\nelif arch == 'PicoDet':\ndetector_func = 'DetectorPicoDet'\n- detector = eval(detector_func)(FLAGS.model_dir,\n+ detector = eval(detector_func)(\n+ FLAGS.model_dir,\ndevice=FLAGS.device,\nrun_mode=FLAGS.run_mode,\nbatch_size=FLAGS.batch_size,\n@@ -781,6 +801,8 @@ if __name__ == '__main__':\n], \"device should be CPU, GPU or XPU\"\nassert not FLAGS.use_gpu, \"use_gpu has been deprecated, please use --device\"\n- assert not (FLAGS.enable_mkldnn==False and FLAGS.enable_mkldnn_bfloat16==True), 'To enable mkldnn bfloat, please turn on both enable_mkldnn and enable_mkldnn_bfloat16'\n+ assert not (\n+ FLAGS.enable_mkldnn == False and FLAGS.enable_mkldnn_bfloat16 == True\n+ ), 'To enable mkldnn bfloat, please turn on both enable_mkldnn and enable_mkldnn_bfloat16'\nmain()\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
update pphuman for pptracking (#5419)
499,333
22.03.2022 12:23:43
-28,800
227368762aa666c18dbb5ba06346490439e55188
refine pptracking import
[ { "change_type": "MODIFY", "old_path": "deploy/pphuman/pipeline.py", "new_path": "deploy/pphuman/pipeline.py", "diff": "@@ -40,7 +40,7 @@ from python.preprocess import decode_image\nfrom python.visualize import visualize_box_mask, visualize_attr, visualize_pose, visualize_action\nfrom pptracking.python.mot_sde_infer import SDE_Detector\n-from pptracking.python.visualize import plot_tracking\n+from pptracking.python.mot.visualize import plot_tracking\nclass Pipeline(object):\n" }, { "change_type": "MODIFY", "old_path": "deploy/pptracking/python/det_infer.py", "new_path": "deploy/pptracking/python/det_infer.py", "diff": "@@ -33,8 +33,8 @@ sys.path.insert(0, parent_path)\nfrom benchmark_utils import PaddleInferBenchmark\nfrom picodet_postprocess import PicoDetPostProcess\nfrom preprocess import preprocess, Resize, NormalizeImage, Permute, PadStride, LetterBoxResize, decode_image\n-from visualize import visualize_box_mask\n-from utils import argsparser, Timer, get_current_memory_mb\n+from mot.visualize import visualize_box_mask\n+from mot_utils import argsparser, Timer, get_current_memory_mb\n# Global dictionary\nSUPPORT_MODELS = {\n" }, { "change_type": "RENAME", "old_path": "deploy/pptracking/python/visualize.py", "new_path": "deploy/pptracking/python/mot/visualize.py", "diff": "" }, { "change_type": "MODIFY", "old_path": "deploy/pptracking/python/mot_jde_infer.py", "new_path": "deploy/pptracking/python/mot_jde_infer.py", "diff": "@@ -22,7 +22,7 @@ import paddle\nfrom benchmark_utils import PaddleInferBenchmark\nfrom preprocess import decode_image\n-from utils import argsparser, Timer, get_current_memory_mb\n+from mot_utils import argsparser, Timer, get_current_memory_mb\nfrom det_infer import Detector, get_test_images, print_arguments, bench_log, PredictConfig\n# add python path\n@@ -31,8 +31,8 @@ parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))\nsys.path.insert(0, parent_path)\nfrom mot import JDETracker\n-from utils import MOTTimer, write_mot_results\n-from visualize import plot_tracking, plot_tracking_dict\n+from mot.utils import MOTTimer, write_mot_results\n+from mot.visualize import plot_tracking, plot_tracking_dict\n# Global dictionary\nMOT_JDE_SUPPORT_MODELS = {\n" }, { "change_type": "MODIFY", "old_path": "deploy/pptracking/python/mot_sde_infer.py", "new_path": "deploy/pptracking/python/mot_sde_infer.py", "diff": "@@ -24,17 +24,17 @@ import paddle\nfrom benchmark_utils import PaddleInferBenchmark\nfrom preprocess import decode_image\n-from .utils import argsparser, Timer, get_current_memory_mb, _is_valid_video, video2frames\n-from .det_infer import Detector, get_test_images, print_arguments, bench_log, PredictConfig, load_predictor\n# add python path\nimport sys\n-parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))\n+parent_path = os.path.abspath(os.path.join(__file__, *(['..'])))\nsys.path.insert(0, parent_path)\n+from det_infer import Detector, get_test_images, print_arguments, bench_log, PredictConfig, load_predictor\n+from mot_utils import argsparser, Timer, get_current_memory_mb, video2frames, _is_valid_video\nfrom mot.tracker import JDETracker, DeepSORTTracker\nfrom mot.utils import MOTTimer, write_mot_results, flow_statistic, get_crops, clip_box\n-from .visualize import plot_tracking, plot_tracking_dict\n+from mot.visualize import plot_tracking, plot_tracking_dict\nfrom mot.mtmct.utils import parse_bias\nfrom mot.mtmct.postprocess import trajectory_fusion, sub_cluster, gen_res, print_mtmct_result\n" }, { "change_type": "RENAME", "old_path": "deploy/pptracking/python/utils.py", "new_path": "deploy/pptracking/python/mot_utils.py", "diff": "@@ -147,7 +147,6 @@ def argsparser():\nreturn parser\n-\nclass Times(object):\ndef __init__(self):\nself.time = 0.\n" }, { "change_type": "MODIFY", "old_path": "deploy/python/mot_sde_infer.py", "new_path": "deploy/python/mot_sde_infer.py", "diff": "@@ -32,7 +32,7 @@ sys.path.insert(0, parent_path)\nfrom pptracking.python.mot import JDETracker\nfrom pptracking.python.mot.utils import MOTTimer, write_mot_results\n-from pptracking.python.visualize import plot_tracking, plot_tracking_dict\n+from pptracking.python.mot.visualize import plot_tracking, plot_tracking_dict\nclass SDE_Detector(Detector):\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
refine pptracking import (#5421)
499,333
22.03.2022 20:28:16
-28,800
e0a8d481843c5b900067664de3a6f0722f45f465
refine pphuman vis
[ { "change_type": "MODIFY", "old_path": "deploy/pphuman/pipeline.py", "new_path": "deploy/pphuman/pipeline.py", "diff": "@@ -521,13 +521,16 @@ class PipePredictor(object):\nmot_res = result.get('mot')\nif mot_res is not None:\nids = mot_res['boxes'][:, 0]\n+ scores = mot_res['boxes'][:, 2]\nboxes = mot_res['boxes'][:, 3:]\nboxes[:, 2] = boxes[:, 2] - boxes[:, 0]\nboxes[:, 3] = boxes[:, 3] - boxes[:, 1]\nelse:\nboxes = np.zeros([0, 4])\nids = np.zeros([0])\n- image = plot_tracking(image, boxes, ids, frame_id=frame_id, fps=fps)\n+ scores = np.zeros([0])\n+ image = plot_tracking(\n+ image, boxes, ids, scores, frame_id=frame_id, fps=fps)\nattr_res = result.get('attr')\nif attr_res is not None:\n" }, { "change_type": "MODIFY", "old_path": "deploy/pptracking/python/mot/visualize.py", "new_path": "deploy/pptracking/python/mot/visualize.py", "diff": "@@ -134,47 +134,45 @@ def plot_tracking(image,\nim = np.ascontiguousarray(np.copy(image))\nim_h, im_w = im.shape[:2]\n- text_scale = max(1, image.shape[1] / 1600.)\n+ text_scale = max(0.5, image.shape[1] / 3000.)\ntext_thickness = 2\nline_thickness = max(1, int(image.shape[1] / 500.))\ncv2.putText(\nim,\n'frame: %d fps: %.2f num: %d' % (frame_id, fps, len(tlwhs)),\n- (0, int(15 * text_scale)),\n- cv2.FONT_HERSHEY_PLAIN,\n+ (0, int(15 * text_scale) + 5),\n+ cv2.FONT_ITALIC,\ntext_scale, (0, 0, 255),\n- thickness=2)\n-\n+ thickness=text_thickness)\nfor i, tlwh in enumerate(tlwhs):\nx1, y1, w, h = tlwh\nintbox = tuple(map(int, (x1, y1, x1 + w, y1 + h)))\nobj_id = int(obj_ids[i])\n- id_text = '{}'.format(int(obj_id))\n+ id_text = 'ID: {}'.format(int(obj_id))\nif ids2names != []:\nassert len(\nids2names) == 1, \"plot_tracking only supports single classes.\"\n- id_text = '{}_'.format(ids2names[0]) + id_text\n+ id_text = 'ID: {}_'.format(ids2names[0]) + id_text\n_line_thickness = 1 if obj_id <= 0 else line_thickness\ncolor = get_color(abs(obj_id))\ncv2.rectangle(\nim, intbox[0:2], intbox[2:4], color=color, thickness=line_thickness)\ncv2.putText(\nim,\n- id_text, (intbox[0], intbox[1] - 10),\n- cv2.FONT_HERSHEY_PLAIN,\n- text_scale, (0, 0, 255),\n+ id_text, (intbox[0], intbox[1] - 25),\n+ cv2.FONT_ITALIC,\n+ text_scale, (0, 255, 255),\nthickness=text_thickness)\nif scores is not None:\n- text = '{:.2f}'.format(float(scores[i]))\n+ text = 'score: {:.2f}'.format(float(scores[i]))\ncv2.putText(\nim,\n- text, (intbox[0], intbox[1] + 10),\n- cv2.FONT_HERSHEY_PLAIN,\n- text_scale, (0, 255, 255),\n+ text, (intbox[0], intbox[1] - 6),\n+ cv2.FONT_ITALIC,\n+ text_scale, (0, 255, 0),\nthickness=text_thickness)\n-\nif do_entrance_counting:\nentrance_line = tuple(map(int, entrance))\ncv2.rectangle(\n@@ -201,7 +199,7 @@ def plot_tracking_dict(image,\nim = np.ascontiguousarray(np.copy(image))\nim_h, im_w = im.shape[:2]\n- text_scale = max(1, image.shape[1] / 1600.)\n+ text_scale = max(0.5, image.shape[1] / 3000.)\ntext_thickness = 2\nline_thickness = max(1, int(image.shape[1] / 500.))\n@@ -212,9 +210,9 @@ def plot_tracking_dict(image,\ncv2.putText(\nim,\nrecords[-1][start:end], (0, int(40 * text_scale)),\n- cv2.FONT_HERSHEY_PLAIN,\n+ cv2.FONT_ITALIC,\ntext_scale, (0, 0, 255),\n- thickness=2)\n+ thickness=text_thickness)\nif num_classes == 1 and do_entrance_counting:\nentrance_line = tuple(map(int, entrance))\n@@ -229,9 +227,9 @@ def plot_tracking_dict(image,\ncv2.putText(\nim,\nrecords[-1][start:-1], (0, int(60 * text_scale)),\n- cv2.FONT_HERSHEY_PLAIN,\n+ cv2.FONT_ITALIC,\ntext_scale, (0, 0, 255),\n- thickness=2)\n+ thickness=text_thickness)\nfor cls_id in range(num_classes):\ntlwhs = tlwhs_dict[cls_id]\n@@ -240,10 +238,10 @@ def plot_tracking_dict(image,\ncv2.putText(\nim,\n'frame: %d fps: %.2f num: %d' % (frame_id, fps, len(tlwhs)),\n- (0, int(15 * text_scale)),\n- cv2.FONT_HERSHEY_PLAIN,\n+ (0, int(15 * text_scale) + 5),\n+ cv2.FONT_ITALIC,\ntext_scale, (0, 0, 255),\n- thickness=2)\n+ thickness=text_thickness)\nrecord_id = set()\nfor i, tlwh in enumerate(tlwhs):\n@@ -273,18 +271,18 @@ def plot_tracking_dict(image,\nthickness=line_thickness)\ncv2.putText(\nim,\n- id_text, (intbox[0], intbox[1] - 10),\n- cv2.FONT_HERSHEY_PLAIN,\n- text_scale, (0, 0, 255),\n+ id_text, (intbox[0], intbox[1] - 25),\n+ cv2.FONT_ITALIC,\n+ text_scale, (0, 255, 255),\nthickness=text_thickness)\nif scores is not None:\n- text = '{:.2f}'.format(float(scores[i]))\n+ text = 'score: {:.2f}'.format(float(scores[i]))\ncv2.putText(\nim,\n- text, (intbox[0], intbox[1] + 10),\n- cv2.FONT_HERSHEY_PLAIN,\n- text_scale, (0, 255, 255),\n+ text, (intbox[0], intbox[1] - 6),\n+ cv2.FONT_ITALIC,\n+ text_scale, (0, 255, 0),\nthickness=text_thickness)\nif center_traj is not None:\nfor traj in center_traj:\n" }, { "change_type": "MODIFY", "old_path": "deploy/python/visualize.py", "new_path": "deploy/python/visualize.py", "diff": "@@ -338,17 +338,17 @@ def visualize_attr(im, results, boxes=None):\nim = np.ascontiguousarray(np.copy(im))\nim_h, im_w = im.shape[:2]\n- text_scale = max(1, int(im.shape[0] / 1600.))\n- text_thickness = 2\n+ text_scale = max(0.5, im.shape[0] / 3000.)\n+ text_thickness = 1\n- line_inter = im.shape[0] / 50.\n+ line_inter = im.shape[0] / 40.\nfor i, res in enumerate(results):\nif boxes is None:\n- text_w = 1\n+ text_w = 3\ntext_h = 1\nelse:\nbox = boxes[i]\n- text_w = int(box[2])\n+ text_w = int(box[2]) + 3\ntext_h = int(box[3])\nfor text in res:\ntext_h += int(line_inter)\n@@ -357,8 +357,8 @@ def visualize_attr(im, results, boxes=None):\nim,\ntext,\ntext_loc,\n- cv2.FONT_HERSHEY_PLAIN,\n- text_scale, (0, 0, 255),\n+ cv2.FONT_ITALIC,\n+ text_scale, (0, 255, 255),\nthickness=text_thickness)\nreturn im\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
refine pphuman vis (#5426)
499,395
22.03.2022 21:09:30
-28,800
082b1ac9b23d5b21d45f77d707ef87d038e737cf
fix transform problem of RandomCrop
[ { "change_type": "MODIFY", "old_path": "ppdet/data/transform/operators.py", "new_path": "ppdet/data/transform/operators.py", "diff": "@@ -1504,6 +1504,11 @@ class RandomCrop(BaseOperator):\nif 'is_crowd' in sample:\nsample['is_crowd'] = np.take(\nsample['is_crowd'], valid_ids, axis=0)\n+\n+ if 'difficult' in sample:\n+ sample['difficult'] = np.take(\n+ sample['difficult'], valid_ids, axis=0)\n+\nreturn sample\nreturn sample\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix transform problem of RandomCrop (#5412)
499,398
23.03.2022 11:14:44
-28,800
14e722043da22c4a43595ee3b415b2fe0197893d
[NPU] fix fp16
[ { "change_type": "MODIFY", "old_path": "ppdet/engine/trainer.py", "new_path": "ppdet/engine/trainer.py", "diff": "@@ -373,7 +373,7 @@ class Trainer(object):\n# enabel auto mixed precision mode\nif self.cfg.get('amp', False):\nscaler = amp.GradScaler(\n- enable=self.cfg.use_gpu, init_loss_scaling=1024)\n+ enable=self.cfg.use_gpu or self.cfg.use_npu, init_loss_scaling=1024)\nself.status.update({\n'epoch_id': self.start_epoch,\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[NPU] fix fp16 (#5417)
499,333
23.03.2022 15:31:40
-28,800
ef342535e0eb4da49e401b5c6f4a9ff1af42fa6b
fix mask head when mask num is 1
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/mask_head.py", "new_path": "ppdet/modeling/heads/mask_head.py", "diff": "@@ -226,6 +226,10 @@ class MaskHead(nn.Layer):\nnum_masks = paddle.shape(mask_logit)[0]\nindex = paddle.arange(num_masks).cast('int32')\nmask_out = mask_logit[index, labels]\n+ mask_out_shape = paddle.shape(mask_out)\n+ mask_out = paddle.reshape(mask_out, [\n+ paddle.shape(index), mask_out_shape[-2], mask_out_shape[-1]\n+ ])\nmask_out = F.sigmoid(mask_out)\nreturn mask_out\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix mask head when mask num is 1 (#5436)
499,333
24.03.2022 14:12:32
-28,800
5815fbc9faab8e9eff1c7004954ed698f38c5050
add message for export_onnx
[ { "change_type": "MODIFY", "old_path": "deploy/python/infer.py", "new_path": "deploy/python/infer.py", "diff": "@@ -553,6 +553,10 @@ class PredictConfig():\nself.nms = yml_conf['NMS']\nif 'fpn_stride' in yml_conf:\nself.fpn_stride = yml_conf['fpn_stride']\n+ if self.arch == 'RCNN' and yml_conf.get('export_onnx', False):\n+ print(\n+ 'The RCNN export model is used for ONNX and it only supports batch_size = 1'\n+ )\nself.print_config()\ndef check_model(self, yml_conf):\n" }, { "change_type": "MODIFY", "old_path": "ppdet/engine/export_utils.py", "new_path": "ppdet/engine/export_utils.py", "diff": "@@ -126,7 +126,13 @@ def _dump_infer_config(config, path, image_shape, model):\n'metric': config['metric'],\n'use_dynamic_shape': use_dynamic_shape\n})\n+ export_onnx = config.get('export_onnx', False)\n+\ninfer_arch = config['architecture']\n+ if 'RCNN' in infer_arch and export_onnx:\n+ logger.warning(\n+ \"Exporting RCNN model to ONNX only support batch_size = 1\")\n+ infer_cfg['export_onnx'] = True\nif infer_arch in MOT_ARCH:\nif infer_arch == 'DeepSORT':\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add message for export_onnx (#5444)
499,339
24.03.2022 16:12:18
-28,800
d6ffa2b5e9f4d8725a8ecb168ea43431534268ae
[PPYOLOE] fix oom eval in train
[ { "change_type": "MODIFY", "old_path": "configs/ppyoloe/_base_/optimizer_300e.yml", "new_path": "configs/ppyoloe/_base_/optimizer_300e.yml", "diff": "epoch: 300\nLearningRate:\n- base_lr: 0.03\n+ base_lr: 0.025\nschedulers:\n- !CosineDecay\nmax_epochs: 360\n- !LinearWarmup\n- start_factor: 0.001\n- steps: 3000\n+ start_factor: 0.\n+ epochs: 5\nOptimizerBuilder:\noptimizer:\n" }, { "change_type": "MODIFY", "old_path": "configs/ppyoloe/_base_/ppyoloe_reader.yml", "new_path": "configs/ppyoloe/_base_/ppyoloe_reader.yml", "diff": "-worker_num: 8\n+worker_num: 4\nTrainReader:\nsample_transforms:\n- Decode: {}\n@@ -11,7 +11,7 @@ TrainReader:\n- NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n- Permute: {}\n- PadGT: {}\n- batch_size: 24\n+ batch_size: 20\nshuffle: true\ndrop_last: true\nuse_shared_memory: true\n@@ -23,7 +23,7 @@ EvalReader:\n- Resize: {target_size: [640, 640], keep_ratio: False, interp: 2}\n- NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n- Permute: {}\n- batch_size: 4\n+ batch_size: 2\nTestReader:\ninputs_def:\n" }, { "change_type": "MODIFY", "old_path": "configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml", "new_path": "configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml", "diff": "@@ -9,7 +9,6 @@ _BASE_: [\nlog_iter: 100\nsnapshot_epoch: 10\nweights: output/ppyoloe_crn_l_300e_coco/model_final\n-find_unused_parameters: True\npretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/CSPResNetb_l_pretrained.pdparams\ndepth_mult: 1.0\n" }, { "change_type": "MODIFY", "old_path": "configs/ppyoloe/ppyoloe_crn_m_300e_coco.yml", "new_path": "configs/ppyoloe/ppyoloe_crn_m_300e_coco.yml", "diff": "@@ -9,20 +9,13 @@ _BASE_: [\nlog_iter: 100\nsnapshot_epoch: 10\nweights: output/ppyoloe_crn_m_300e_coco/model_final\n-find_unused_parameters: True\npretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/CSPResNetb_m_pretrained.pdparams\ndepth_mult: 0.67\nwidth_mult: 0.75\nTrainReader:\n- batch_size: 32\n+ batch_size: 28\nLearningRate:\n- base_lr: 0.04\n- schedulers:\n- - !CosineDecay\n- max_epochs: 360\n- - !LinearWarmup\n- start_factor: 0.001\n- steps: 2300\n+ base_lr: 0.035\n" }, { "change_type": "MODIFY", "old_path": "configs/ppyoloe/ppyoloe_crn_s_300e_coco.yml", "new_path": "configs/ppyoloe/ppyoloe_crn_s_300e_coco.yml", "diff": "@@ -9,7 +9,6 @@ _BASE_: [\nlog_iter: 100\nsnapshot_epoch: 10\nweights: output/ppyoloe_crn_s_300e_coco/model_final\n-find_unused_parameters: True\npretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/CSPResNetb_s_pretrained.pdparams\ndepth_mult: 0.33\n@@ -20,9 +19,3 @@ TrainReader:\nLearningRate:\nbase_lr: 0.04\n- schedulers:\n- - !CosineDecay\n- max_epochs: 360\n- - !LinearWarmup\n- start_factor: 0.001\n- steps: 2300\n" }, { "change_type": "MODIFY", "old_path": "configs/ppyoloe/ppyoloe_crn_x_300e_coco.yml", "new_path": "configs/ppyoloe/ppyoloe_crn_x_300e_coco.yml", "diff": "@@ -9,7 +9,6 @@ _BASE_: [\nlog_iter: 100\nsnapshot_epoch: 10\nweights: output/ppyoloe_crn_x_300e_coco/model_final\n-find_unused_parameters: True\npretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/CSPResNetb_x_pretrained.pdparams\ndepth_mult: 1.33\n@@ -20,9 +19,3 @@ TrainReader:\nLearningRate:\nbase_lr: 0.02\n- schedulers:\n- - !CosineDecay\n- max_epochs: 360\n- - !LinearWarmup\n- start_factor: 0.001\n- steps: 4600\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/assigners/atss_assigner.py", "new_path": "ppdet/modeling/assigners/atss_assigner.py", "diff": "@@ -183,9 +183,6 @@ class ATSSAssigner(nn.Layer):\nmask_positive)\nmask_positive_sum = mask_positive.sum(axis=-2)\nassigned_gt_index = mask_positive.argmax(axis=-2)\n- assert mask_positive_sum.max() == 1, \\\n- (\"one anchor just assign one gt, but received not equals 1. \"\n- \"Received: %f\" % mask_positive_sum.max().item())\n# assigned target\nbatch_ind = paddle.arange(\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/assigners/task_aligned_assigner.py", "new_path": "ppdet/modeling/assigners/task_aligned_assigner.py", "diff": "@@ -129,9 +129,6 @@ class TaskAlignedAssigner(nn.Layer):\nmask_positive)\nmask_positive_sum = mask_positive.sum(axis=-2)\nassigned_gt_index = mask_positive.argmax(axis=-2)\n- assert mask_positive_sum.max() == 1, \\\n- (\"one anchor just assign one gt, but received not equals 1. \"\n- \"Received: %f\" % mask_positive_sum.max().item())\n# assigned target\nassigned_gt_index = assigned_gt_index + batch_ind * num_max_boxes\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[PPYOLOE] fix oom eval in train (#5409)
499,348
25.03.2022 15:38:40
-28,800
87b9d564ad761fe7db7a8ca47a676ecac6f7c3df
fix datacollector error without reid;test=document_fix
[ { "change_type": "MODIFY", "old_path": "deploy/pphuman/datacollector.py", "new_path": "deploy/pphuman/datacollector.py", "diff": "@@ -75,7 +75,8 @@ class DataCollector(object):\naction_res = Result.get('action')\nreid_res = Result.get('reid')\n- for idx, mot_item in enumerate(reid_res['rects']):\n+ rects = reid_res['rects'] if reid_res is not None else mot_res['boxes']\n+ for idx, mot_item in enumerate(rects):\nids = int(mot_item[0])\nif ids not in self.collector:\nself.collector[ids] = copy.deepcopy(self.mots)\n" }, { "change_type": "MODIFY", "old_path": "deploy/pphuman/pipeline.py", "new_path": "deploy/pphuman/pipeline.py", "diff": "@@ -460,6 +460,7 @@ class PipePredictor(object):\n# nothing detected\nif len(mot_res['boxes']) == 0:\nframe_id += 1\n+ if frame_id > self.warmup_frame:\nself.pipe_timer.img_num += 1\nself.pipe_timer.total_time.end()\nif self.cfg['visual']:\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix datacollector error without reid;test=document_fix (#5459)
499,348
25.03.2022 17:36:30
-28,800
abb60be9c9206990abafc5e2e3d46d2791a4c68a
add mtmct docs; test=document_fix
[ { "change_type": "MODIFY", "old_path": "deploy/pphuman/datacollector.py", "new_path": "deploy/pphuman/datacollector.py", "diff": "@@ -86,7 +86,7 @@ class DataCollector(object):\nif attr_res:\nself.collector[ids][\"attrs\"].append(attr_res['output'][idx])\nif kpt_res:\n- self.collector[ids][\"kpts\"].append(kpt_res['output'][idx])\n+ self.collector[ids][\"kpts\"].append(kpt_res['keypoint'][idx])\nif action_res:\nself.collector[ids][\"actions\"].append(action_res['output'][idx])\nelse:\n" }, { "change_type": "ADD", "old_path": "deploy/pphuman/docs/images/c1.gif", "new_path": "deploy/pphuman/docs/images/c1.gif", "diff": "Binary files /dev/null and b/deploy/pphuman/docs/images/c1.gif differ\n" }, { "change_type": "ADD", "old_path": "deploy/pphuman/docs/images/c2.gif", "new_path": "deploy/pphuman/docs/images/c2.gif", "diff": "Binary files /dev/null and b/deploy/pphuman/docs/images/c2.gif differ\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add mtmct docs; (#5458) test=document_fix
499,348
26.03.2022 14:39:06
-28,800
49379331ad6dbb732c2942492bd66f8bd92728fb
update kpt and action data collector; test=document_fix
[ { "change_type": "MODIFY", "old_path": "deploy/pphuman/datacollector.py", "new_path": "deploy/pphuman/datacollector.py", "diff": "@@ -86,9 +86,10 @@ class DataCollector(object):\nif attr_res:\nself.collector[ids][\"attrs\"].append(attr_res['output'][idx])\nif kpt_res:\n- self.collector[ids][\"kpts\"].append(kpt_res['keypoint'][idx])\n+ self.collector[ids][\"kpts\"].append(\n+ [kpt_res['keypoint'][0][idx], kpt_res['keypoint'][1][idx]])\nif action_res:\n- self.collector[ids][\"actions\"].append(action_res['output'][idx])\n+ self.collector[ids][\"actions\"].append(action_res[idx + 1])\nelse:\n# action model generate result per X frames, Not available every frames\nself.collector[ids][\"actions\"].append(None)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
update kpt and action data collector; (#5464) test=document_fix
499,348
27.03.2022 18:07:09
-28,800
9631d37de4b627fbbf18b7d47def8473140444ce
fix print logs; test=document_fix
[ { "change_type": "MODIFY", "old_path": "deploy/pphuman/mtmct.py", "new_path": "deploy/pphuman/mtmct.py", "diff": "@@ -111,7 +111,8 @@ def save_mtmct_vis_results(camera_results, captures, output_dir):\nfor idx, video_file in enumerate(captures):\ncapture = cv2.VideoCapture(video_file)\ncid = camera_ids[idx]\n- video_out_name = \"mtmct_vis_c\" + str(cid) + \".mp4\"\n+ basename = os.path.basename(video_file)\n+ video_out_name = \"vis_\" + basename\nprint(\"Start visualizing output video: {}\".format(video_out_name))\nout_path = os.path.join(save_dir, video_out_name)\n@@ -182,7 +183,7 @@ def get_dist_mat(x, y, func_name=\"euclidean\"):\ndist_mat = get_cosine(x, y)\nelif func_name == \"euclidean\":\ndist_mat = get_euclidean(x, y)\n- print(\"Using {func_name} as distance function during evaluation\")\n+ print(\"Using {} as distance function during evaluation\".format(func_name))\nreturn dist_mat\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix print logs; (#5466) test=document_fix
499,333
28.03.2022 15:23:48
-28,800
12fb0052564c5de546cd8071ba249cf2c4937e13
update data in pphuman, test=document_fix
[ { "change_type": "MODIFY", "old_path": "deploy/pptracking/python/mot/visualize.py", "new_path": "deploy/pptracking/python/mot/visualize.py", "diff": "@@ -209,7 +209,7 @@ def plot_tracking_dict(image,\nend = records[-1].find('In')\ncv2.putText(\nim,\n- records[-1][start:end], (0, int(40 * text_scale)),\n+ records[-1][start:end], (0, int(40 * text_scale) + 10),\ncv2.FONT_ITALIC,\ntext_scale, (0, 0, 255),\nthickness=text_thickness)\n@@ -226,7 +226,7 @@ def plot_tracking_dict(image,\nstart = records[-1].find('In')\ncv2.putText(\nim,\n- records[-1][start:-1], (0, int(60 * text_scale)),\n+ records[-1][start:-1], (0, int(60 * text_scale) + 10),\ncv2.FONT_ITALIC,\ntext_scale, (0, 0, 255),\nthickness=text_thickness)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
update data in pphuman, test=document_fix (#5474)
499,301
28.03.2022 20:00:44
-28,800
2ef5f0cd218f7b29fc85b946a31db30ae0fcae72
update yoloe configs
[ { "change_type": "MODIFY", "old_path": "configs/ppyoloe/README.md", "new_path": "configs/ppyoloe/README.md", "diff": "@@ -75,6 +75,12 @@ CUDA_VISIBLE_DEVICES=0 python tools/infer.py -c configs/ppyoloe/ppyoloe_crn_l_30\n### 4. Deployment\n+- PaddleInference [Python](../../deploy/python) & [C++](../../deploy/cpp)\n+- [Paddle-TensorRT](../../deploy/TENSOR_RT.md)\n+- [Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX)\n+- [PaddleServing](https://github.com/PaddlePaddle/Serving)\n+<!-- - [Paddle-Lite](https://github.com/PaddlePaddle/Paddle-Lite) -->\n+\nFor deployment on GPU or benchmarked, model should be first exported to inference model using `tools/export_model.py`.\nExporting PP-YOLOE for Paddle Inference **without TensorRT**, use following command.\n@@ -116,6 +122,20 @@ paddle2onnx --model_dir output_inference/ppyoloe_crn_l_300e_coco --model_filenam\n```\n+### 5. Other Datasets\n+\n+Model | AP | AP<sub>50</sub>\n+---|---|---\n+[YOLOX](https://github.com/Megvii-BaseDetection/YOLOX) | 22.6 | 37.5\n+[YOLOv5](https://github.com/ultralytics/yolov5) | 26.0 | 42.7\n+**PP-YOLOE** | **30.5** | **46.4**\n+\n+**Note**\n+- Here, we use [VisDrone](https://github.com/VisDrone/VisDrone-Dataset) dataset, and to detect 9 objects including `person, bicycles, car, van, truck, tricyle, awning-tricyle, bus, motor`.\n+- Above models trained using official default config, and load pretrained parameters on COCO dataset.\n+- *Due to the limited time, more verification results will be supplemented in the future. You are also welcome to contribute to PP-YOLOE*\n+\n+\n## Appendix\nAblation experiments of PP-YOLOE.\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
update yoloe configs (#5478)
499,339
28.03.2022 20:01:25
-28,800
839746dd865b5734461e90ce4d1aba2ea426a633
[TIPC] fix hrnet epoch num
[ { "change_type": "MODIFY", "old_path": "test_tipc/configs/keypoint/higherhrnet_hrnet_w32_512_train_infer_python.txt", "new_path": "test_tipc/configs/keypoint/higherhrnet_hrnet_w32_512_train_infer_python.txt", "diff": "@@ -52,7 +52,7 @@ inference:./deploy/python/keypoint_infer.py\n===========================train_benchmark_params==========================\nbatch_size:20|24\nfp_items:fp32|fp16\n-epoch:1\n+epoch:20\n--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile\nflags:null\n===========================infer_benchmark_params===========================\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/configs/keypoint/hrnet_w32_256x192_train_infer_python.txt", "new_path": "test_tipc/configs/keypoint/hrnet_w32_256x192_train_infer_python.txt", "diff": "@@ -52,7 +52,7 @@ null:null\n===========================train_benchmark_params==========================\nbatch_size:64|160\nfp_items:fp32|fp16\n-epoch:1\n+epoch:40\n--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile\nflags:null\n===========================infer_benchmark_params===========================\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[TIPC] fix hrnet epoch num (#5481)
499,395
28.03.2022 20:59:00
-28,800
f3f8dc9cbdf6b0b7ba3fc758b692233658e413a6
fix ema resume
[ { "change_type": "MODIFY", "old_path": "ppdet/optimizer.py", "new_path": "ppdet/optimizer.py", "diff": "@@ -339,6 +339,7 @@ class ModelEMA(object):\ndef resume(self, state_dict, step=0):\nfor k, v in state_dict.items():\n+ if k in self.state_dict:\nself.state_dict[k] = v\nself.step = step\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix ema resume (#5413)
499,395
28.03.2022 20:59:12
-28,800
504e89ba580112f481ff9d8698ea4f7c234ba745
fix ppyoloe docs, test=document_fix
[ { "change_type": "MODIFY", "old_path": "configs/ppyoloe/README.md", "new_path": "configs/ppyoloe/README.md", "diff": "@@ -92,7 +92,7 @@ python tools/export_model.py configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml -o weig\nExporting PP-YOLOE for Paddle Inference **with TensorRT** for better performance, use following command with extra `-o trt=True` setting.\n```bash\n-python tools/export_model.py configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_300e_coco.pdparams -o trt=True\n+python tools/export_model.py configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_300e_coco.pdparams trt=True\n```\n`deploy/python/infer.py` is used to load exported paddle inference model above for inference and benchmark through PaddleInference.\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix ppyoloe docs, test=document_fix (#5484)
499,339
28.03.2022 21:39:43
-28,800
98aebc4603640ab8f603bad696e17ad41e8e57e8
refine coslr, add last_plateau_epochs
[ { "change_type": "MODIFY", "old_path": "ppdet/optimizer.py", "new_path": "ppdet/optimizer.py", "diff": "@@ -41,12 +41,21 @@ class CosineDecay(object):\nmax_epochs (int): max epochs for the training process.\nif you commbine cosine decay with warmup, it is recommended that\nthe max_iters is much larger than the warmup iter\n+ use_warmup (bool): whether to use warmup. Default: True.\n+ min_lr_ratio (float): minimum learning rate ratio. Default: 0.\n+ last_plateau_epochs (int): use minimum learning rate in\n+ the last few epochs. Default: 0.\n\"\"\"\n- def __init__(self, max_epochs=1000, use_warmup=True, eta_min=0.):\n+ def __init__(self,\n+ max_epochs=1000,\n+ use_warmup=True,\n+ min_lr_ratio=0.,\n+ last_plateau_epochs=0):\nself.max_epochs = max_epochs\nself.use_warmup = use_warmup\n- self.eta_min = eta_min\n+ self.min_lr_ratio = min_lr_ratio\n+ self.last_plateau_epochs = last_plateau_epochs\ndef __call__(self,\nbase_lr=None,\n@@ -56,21 +65,38 @@ class CosineDecay(object):\nassert base_lr is not None, \"either base LR or values should be provided\"\nmax_iters = self.max_epochs * int(step_per_epoch)\n-\n+ last_plateau_iters = self.last_plateau_epochs * int(step_per_epoch)\n+ min_lr = base_lr * self.min_lr_ratio\nif boundary is not None and value is not None and self.use_warmup:\n+ # use warmup\nwarmup_iters = len(boundary)\nfor i in range(int(boundary[-1]), max_iters):\nboundary.append(i)\n-\n- decayed_lr = base_lr * 0.5 * (math.cos(\n+ if i < max_iters - last_plateau_iters:\n+ decayed_lr = min_lr + (base_lr - min_lr) * 0.5 * (math.cos(\n(i - warmup_iters) * math.pi /\n- (max_iters - warmup_iters)) + 1)\n- decayed_lr = decayed_lr if decayed_lr > self.eta_min else self.eta_min\n+ (max_iters - warmup_iters - last_plateau_iters)) + 1)\nvalue.append(decayed_lr)\n+ else:\n+ value.append(min_lr)\n+ return optimizer.lr.PiecewiseDecay(boundary, value)\n+ elif last_plateau_iters > 0:\n+ # not use warmup, but set `last_plateau_epochs` > 0\n+ boundary = []\n+ value = []\n+ for i in range(max_iters):\n+ if i < max_iters - last_plateau_iters:\n+ decayed_lr = min_lr + (base_lr - min_lr) * 0.5 * (math.cos(\n+ i * math.pi / (max_iters - last_plateau_iters)) + 1)\n+ value.append(decayed_lr)\n+ else:\n+ value.append(min_lr)\n+ if i > 0:\n+ boundary.append(i)\nreturn optimizer.lr.PiecewiseDecay(boundary, value)\nreturn optimizer.lr.CosineAnnealingDecay(\n- base_lr, T_max=max_iters, eta_min=self.eta_min)\n+ base_lr, T_max=max_iters, eta_min=min_lr)\n@serializable\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
refine coslr, add last_plateau_epochs (#5401)
499,348
28.03.2022 22:14:31
-28,800
cd08a2450f71a9d657e600bf4294e5196b24fdae
fix some problem in lite deploy
[ { "change_type": "MODIFY", "old_path": "deploy/lite/include/config_parser.h", "new_path": "deploy/lite/include/config_parser.h", "diff": "namespace PaddleDetection {\n-void load_jsonf(std::string jsonfile, Json::Value& jsondata);\n+void load_jsonf(std::string jsonfile, const Json::Value& jsondata);\n// Inference model configuration parser\nclass ConfigPaser {\n@@ -43,12 +43,13 @@ class ConfigPaser {\nJson::Value config;\nload_jsonf(model_dir + OS_PATH_SEP + cfg + \".json\", config);\n- // Get model arch : YOLO, SSD, RetinaNet, RCNN, Face\n+ // Get model arch : YOLO, SSD, RetinaNet, RCNN, Face, PicoDet, HRNet\nif (config.isMember(\"arch\")) {\narch_ = config[\"arch\"].as<std::string>();\n} else {\n- std::cerr << \"Please set model arch,\"\n- << \"support value : YOLO, SSD, RetinaNet, RCNN, Face.\"\n+ std::cerr\n+ << \"Please set model arch,\"\n+ << \"support value : YOLO, SSD, RetinaNet, RCNN, Face, PicoDet, HRNet.\"\n<< std::endl;\nreturn false;\n}\n" }, { "change_type": "MODIFY", "old_path": "deploy/lite/src/config_parser.cc", "new_path": "deploy/lite/src/config_parser.cc", "diff": "namespace PaddleDetection {\n-void load_jsonf(std::string jsonfile, Json::Value &jsondata) {\n+void load_jsonf(std::string jsonfile, const Json::Value &jsondata) {\nstd::ifstream ifs;\nifs.open(jsonfile);\n" }, { "change_type": "MODIFY", "old_path": "deploy/lite/src/main.cc", "new_path": "deploy/lite/src/main.cc", "diff": "@@ -43,10 +43,8 @@ void PrintBenchmarkLog(std::vector<double> det_time, int img_num) {\n<< std::endl;\nRT_Config[\"model_dir_det\"].as<std::string>().erase(\nRT_Config[\"model_dir_det\"].as<std::string>().find_last_not_of(\"/\") + 1);\n- std::cout\n- << \"detection model_name: \"\n- << RT_Config[\"model_dir_det\"].as<std::string>()\n- << std::endl;\n+ std::cout << \"detection model_name: \"\n+ << RT_Config[\"model_dir_det\"].as<std::string>() << std::endl;\nstd::cout << \"----------------------- Perf info ------------------------\"\n<< std::endl;\nstd::cout << \"Total number of predicted data: \" << img_num\n@@ -69,14 +67,14 @@ void PrintKptsBenchmarkLog(std::vector<double> det_time, int img_num){\nRT_Config[\"model_dir_keypoint\"].as<std::string>().erase(\nRT_Config[\"model_dir_keypoint\"].as<std::string>().find_last_not_of(\"/\") +\n1);\n- std::cout\n- << \"keypoint model_name: \"\n+ std::cout << \"keypoint model_name: \"\n<< RT_Config[\"model_dir_keypoint\"].as<std::string>() << std::endl;\nstd::cout << \"----------------------- Perf info ------------------------\"\n<< std::endl;\nstd::cout << \"Total number of predicted data: \" << img_num\n<< \" and total time spent(ms): \"\n- << std::accumulate(det_time.begin(), det_time.end(), 0.) << std::endl;\n+ << std::accumulate(det_time.begin(), det_time.end(), 0.)\n+ << std::endl;\nimg_num = std::max(1, img_num);\nstd::cout << \"Average time cost per person:\" << std::endl\n<< \"preproce_time(ms): \" << det_time[0] / img_num\n@@ -136,7 +134,7 @@ void PredictImage(const std::vector<std::string> all_img_paths,\nPaddleDetection::KeyPointDetector* keypoint,\nconst std::string& output_dir = \"output\") {\nstd::vector<double> det_t = {0, 0, 0};\n- int steps = ceil(float(all_img_paths.size()) / batch_size_det);\n+ int steps = ceil(static_cast<float>(all_img_paths.size()) / batch_size_det);\nint kpts_imgs = 0;\nstd::vector<double> keypoint_t = {0, 0, 0};\ndouble midtimecost = 0;\n@@ -243,7 +241,7 @@ void PredictImage(const std::vector<std::string> all_img_paths,\nstd::chrono::duration<float> midtimediff =\nkeypoint_crop_time - keypoint_start_time;\n- midtimecost += double(midtimediff.count() * 1000);\n+ midtimecost += static_cast<double>(midtimediff.count() * 1000);\nif (imgs_kpts.size() == RT_Config[\"batch_size_keypoint\"].as<int>() ||\n((i == imsize - 1) && !imgs_kpts.empty())) {\n@@ -275,8 +273,8 @@ void PredictImage(const std::vector<std::string> all_img_paths,\nstd::string kpts_savepath =\noutput_path + \"keypoint_\" +\nimage_file_path.substr(image_file_path.find_last_of('/') + 1);\n- cv::Mat kpts_vis_img =\n- VisualizeKptsResult(im, result_kpts, colormap_kpts, keypoint->get_threshold());\n+ cv::Mat kpts_vis_img = VisualizeKptsResult(\n+ im, result_kpts, colormap_kpts, keypoint->get_threshold());\ncv::imwrite(kpts_savepath, kpts_vis_img, compression_params);\nprintf(\"Visualized output saved as %s\\n\", kpts_savepath.c_str());\n} else {\n@@ -298,16 +296,15 @@ void PredictImage(const std::vector<std::string> all_img_paths,\nPrintBenchmarkLog(det_t, all_img_paths.size());\nif (keypoint) {\nPrintKptsBenchmarkLog(keypoint_t, kpts_imgs);\n- PrintTotalIimeLog((det_t[0] + det_t[1] + det_t[2]) / all_img_paths.size(),\n+ PrintTotalIimeLog(\n+ (det_t[0] + det_t[1] + det_t[2]) / all_img_paths.size(),\n(keypoint_t[0] + keypoint_t[1] + keypoint_t[2]) / all_img_paths.size(),\nmidtimecost / all_img_paths.size());\n}\n-\n}\nint main(int argc, char** argv) {\n- std::cout << \"Usage: \" << argv[0]\n- << \" [config_path](option) [image_dir](option)\\n\";\n+ std::cout << \"Usage: \" << argv[0] << \" [config_path] [image_dir](option)\\n\";\nif (argc < 2) {\nstd::cout << \"Usage: ./main det_runtime_config.json\" << std::endl;\nreturn -1;\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix some problem in lite deploy (#5488)
499,385
29.03.2022 14:53:39
-28,800
f4b8dded1ef7555325997cedd056fdb8f728ac9d
Update docs for PP-YOLOE
[ { "change_type": "MODIFY", "old_path": "configs/ppyoloe/README.md", "new_path": "configs/ppyoloe/README.md", "diff": "@@ -86,13 +86,13 @@ For deployment on GPU or benchmarked, model should be first exported to inferenc\nExporting PP-YOLOE for Paddle Inference **without TensorRT**, use following command.\n```bash\n-python tools/export_model.py configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_300e_coco.pdparams\n+python tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_300e_coco.pdparams\n```\nExporting PP-YOLOE for Paddle Inference **with TensorRT** for better performance, use following command with extra `-o trt=True` setting.\n```bash\n-python tools/export_model.py configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_300e_coco.pdparams trt=True\n+python tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_300e_coco.pdparams trt=True\n```\n`deploy/python/infer.py` is used to load exported paddle inference model above for inference and benchmark through Paddle Inference.\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
Update docs for PP-YOLOE (#5494)
499,333
29.03.2022 21:11:25
-28,800
0743e380e47def3e45a2e6f59067903958e3d285
fix test arch
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/tests/test_architectures.py", "new_path": "ppdet/modeling/tests/test_architectures.py", "diff": "@@ -62,7 +62,7 @@ class TestGFL(TestFasterRCNN):\nclass TestPicoDet(TestFasterRCNN):\ndef set_config(self):\n- self.cfg_file = 'configs/picodet/picodet_s_320_coco.yml'\n+ self.cfg_file = 'configs/picodet/picodet_s_320_coco_lcnet.yml'\nif __name__ == '__main__':\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix test arch (#5503)
499,395
30.03.2022 15:41:32
-28,800
8d3b8b47838e7c73fab59117288c52233e70b812
modify fuse normalize
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/architectures/meta_arch.py", "new_path": "ppdet/modeling/architectures/meta_arch.py", "diff": "@@ -22,22 +22,23 @@ class BaseArch(nn.Layer):\nself.fuse_norm = False\ndef load_meanstd(self, cfg_transform):\n- self.scale = 1.\n- self.mean = paddle.to_tensor([0.485, 0.456, 0.406]).reshape(\n- (1, 3, 1, 1))\n- self.std = paddle.to_tensor([0.229, 0.224, 0.225]).reshape((1, 3, 1, 1))\n+ scale = 1.\n+ mean = np.array([0.485, 0.456, 0.406], dtype=np.float32)\n+ std = np.array([0.229, 0.224, 0.225], dtype=np.float32)\nfor item in cfg_transform:\nif 'NormalizeImage' in item:\n- self.mean = paddle.to_tensor(item['NormalizeImage'][\n- 'mean']).reshape((1, 3, 1, 1))\n- self.std = paddle.to_tensor(item['NormalizeImage'][\n- 'std']).reshape((1, 3, 1, 1))\n+ mean = np.array(\n+ item['NormalizeImage']['mean'], dtype=np.float32)\n+ std = np.array(item['NormalizeImage']['std'], dtype=np.float32)\nif item['NormalizeImage'].get('is_scale', True):\n- self.scale = 1. / 255.\n+ scale = 1. / 255.\nbreak\nif self.data_format == 'NHWC':\n- self.mean = self.mean.reshape(1, 1, 1, 3)\n- self.std = self.std.reshape(1, 1, 1, 3)\n+ self.scale = paddle.to_tensor(scale / std).reshape((1, 1, 1, 3))\n+ self.bias = paddle.to_tensor(-mean / std).reshape((1, 1, 1, 3))\n+ else:\n+ self.scale = paddle.to_tensor(scale / std).reshape((1, 3, 1, 1))\n+ self.bias = paddle.to_tensor(-mean / std).reshape((1, 3, 1, 1))\ndef forward(self, inputs):\nif self.data_format == 'NHWC':\n@@ -46,7 +47,7 @@ class BaseArch(nn.Layer):\nif self.fuse_norm:\nimage = inputs['image']\n- self.inputs['image'] = (image * self.scale - self.mean) / self.std\n+ self.inputs['image'] = image * self.scale + self.bias\nself.inputs['im_shape'] = inputs['im_shape']\nself.inputs['scale_factor'] = inputs['scale_factor']\nelse:\n@@ -66,8 +67,7 @@ class BaseArch(nn.Layer):\nouts = []\nfor inp in inputs_list:\nif self.fuse_norm:\n- self.inputs['image'] = (\n- inp['image'] * self.scale - self.mean) / self.std\n+ self.inputs['image'] = inp['image'] * self.scale + self.bias\nself.inputs['im_shape'] = inp['im_shape']\nself.inputs['scale_factor'] = inp['scale_factor']\nelse:\n@@ -92,7 +92,9 @@ class BaseArch(nn.Layer):\nkeep_top_k = self.bbox_post_process.nms.keep_top_k\nnms_threshold = self.bbox_post_process.nms.nms_threshold\nelse:\n- raise Exception(\"Multi scale test only supports CascadeRCNN, FasterRCNN and MaskRCNN for now\")\n+ raise Exception(\n+ \"Multi scale test only supports CascadeRCNN, FasterRCNN and MaskRCNN for now\"\n+ )\nfinal_boxes = []\nall_scale_outs = paddle.concat([o['bbox'] for o in outs]).numpy()\n@@ -101,9 +103,11 @@ class BaseArch(nn.Layer):\nif np.count_nonzero(idxs) == 0:\ncontinue\nr = nms(all_scale_outs[idxs, 1:], nms_threshold)\n- final_boxes.append(np.concatenate([np.full((r.shape[0], 1), c), r], 1))\n+ final_boxes.append(\n+ np.concatenate([np.full((r.shape[0], 1), c), r], 1))\nout = np.concatenate(final_boxes)\n- out = np.concatenate(sorted(out, key=lambda e: e[1])[-keep_top_k:]).reshape((-1, 6))\n+ out = np.concatenate(sorted(\n+ out, key=lambda e: e[1])[-keep_top_k:]).reshape((-1, 6))\nout = {\n'bbox': paddle.to_tensor(out),\n'bbox_num': paddle.to_tensor(np.array([out.shape[0], ]))\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
modify fuse normalize (#5513)
499,299
30.03.2022 17:37:29
-28,800
35c9da7a61d18a0bd9892b4ac96fc86a0371ff5f
fix cpp inference trt error when bs>1
[ { "change_type": "MODIFY", "old_path": "deploy/cpp/src/object_detector.cc", "new_path": "deploy/cpp/src/object_detector.cc", "diff": "// for setprecision\n#include <chrono>\n#include <iomanip>\n-#include \"include/object_detector.h\"\n-using namespace paddle_infer;\n+#include \"include/object_detector.h\"\nnamespace PaddleDetection {\n@@ -42,27 +41,22 @@ void ObjectDetector::LoadModel(const std::string& model_dir,\n} else if (run_mode == \"trt_int8\") {\nprecision = paddle_infer::Config::Precision::kInt8;\n} else {\n- printf(\n- \"run_mode should be 'paddle', 'trt_fp32', 'trt_fp16' or \"\n+ printf(\"run_mode should be 'paddle', 'trt_fp32', 'trt_fp16' or \"\n\"'trt_int8'\");\n}\n// set tensorrt\n- config.EnableTensorRtEngine(1 << 30,\n- batch_size,\n- this->min_subgraph_size_,\n- precision,\n- false,\n- this->trt_calib_mode_);\n+ config.EnableTensorRtEngine(1 << 30, batch_size, this->min_subgraph_size_,\n+ precision, false, this->trt_calib_mode_);\n// set use dynamic shape\nif (this->use_dynamic_shape_) {\n- // set DynamicShsape for image tensor\n+ // set DynamicShape for image tensor\nconst std::vector<int> min_input_shape = {\n- 1, 3, this->trt_min_shape_, this->trt_min_shape_};\n+ batch_size, 3, this->trt_min_shape_, this->trt_min_shape_};\nconst std::vector<int> max_input_shape = {\n- 1, 3, this->trt_max_shape_, this->trt_max_shape_};\n+ batch_size, 3, this->trt_max_shape_, this->trt_max_shape_};\nconst std::vector<int> opt_input_shape = {\n- 1, 3, this->trt_opt_shape_, this->trt_opt_shape_};\n+ batch_size, 3, this->trt_opt_shape_, this->trt_opt_shape_};\nconst std::map<std::string, std::vector<int>> map_min_input_shape = {\n{\"image\", min_input_shape}};\nconst std::map<std::string, std::vector<int>> map_max_input_shape = {\n@@ -70,8 +64,8 @@ void ObjectDetector::LoadModel(const std::string& model_dir,\nconst std::map<std::string, std::vector<int>> map_opt_input_shape = {\n{\"image\", opt_input_shape}};\n- config.SetTRTDynamicShapeInfo(\n- map_min_input_shape, map_max_input_shape, map_opt_input_shape);\n+ config.SetTRTDynamicShapeInfo(map_min_input_shape, map_max_input_shape,\n+ map_opt_input_shape);\nstd::cout << \"TensorRT dynamic shape enabled\" << std::endl;\n}\n}\n@@ -96,12 +90,11 @@ void ObjectDetector::LoadModel(const std::string& model_dir,\n}\n// Visualiztion MaskDetector results\n-cv::Mat VisualizeResult(\n- const cv::Mat& img,\n+cv::Mat\n+VisualizeResult(const cv::Mat &img,\nconst std::vector<PaddleDetection::ObjectResult> &results,\nconst std::vector<std::string> &lables,\n- const std::vector<int>& colormap,\n- const bool is_rbox = false) {\n+ const std::vector<int> &colormap, const bool is_rbox = false) {\ncv::Mat vis_img = img.clone();\nfor (int i = 0; i < results.size(); ++i) {\n// Configure color and text size\n@@ -142,19 +135,13 @@ cv::Mat VisualizeResult(\norigin.y = results[i].rect[1];\n// Configure text background\n- cv::Rect text_back = cv::Rect(results[i].rect[0],\n- results[i].rect[1] - text_size.height,\n- text_size.width,\n- text_size.height);\n+ cv::Rect text_back =\n+ cv::Rect(results[i].rect[0], results[i].rect[1] - text_size.height,\n+ text_size.width, text_size.height);\n// Draw text, and background\ncv::rectangle(vis_img, text_back, roi_color, -1);\n- cv::putText(vis_img,\n- text,\n- origin,\n- font_face,\n- font_scale,\n- cv::Scalar(255, 255, 255),\n- thickness);\n+ cv::putText(vis_img, text, origin, font_face, font_scale,\n+ cv::Scalar(255, 255, 255), thickness);\n}\nreturn vis_img;\n}\n@@ -169,8 +156,7 @@ void ObjectDetector::Preprocess(const cv::Mat& ori_im) {\nvoid ObjectDetector::Postprocess(\nconst std::vector<cv::Mat> mats,\nstd::vector<PaddleDetection::ObjectResult> *result,\n- std::vector<int> bbox_num,\n- std::vector<float> output_data_,\n+ std::vector<int> bbox_num, std::vector<float> output_data_,\nbool is_rbox = false) {\nresult->clear();\nint start_idx = 0;\n@@ -226,8 +212,7 @@ void ObjectDetector::Postprocess(\n}\nvoid ObjectDetector::Predict(const std::vector<cv::Mat> imgs,\n- const double threshold,\n- const int warmup,\n+ const double threshold, const int warmup,\nconst int repeats,\nstd::vector<PaddleDetection::ObjectResult> *result,\nstd::vector<int> *bbox_num,\n@@ -255,9 +240,8 @@ void ObjectDetector::Predict(const std::vector<cv::Mat> imgs,\nscale_factor_all[bs_idx * 2] = inputs_.scale_factor_[0];\nscale_factor_all[bs_idx * 2 + 1] = inputs_.scale_factor_[1];\n- // TODO: reduce cost time\n- in_data_all.insert(\n- in_data_all.end(), inputs_.im_data_.begin(), inputs_.im_data_.end());\n+ in_data_all.insert(in_data_all.end(), inputs_.im_data_.begin(),\n+ inputs_.im_data_.end());\n// collect in_net img\nin_net_img_all[bs_idx] = inputs_.in_net_im_;\n@@ -278,8 +262,8 @@ void ObjectDetector::Predict(const std::vector<cv::Mat> imgs,\npad_data.resize(rc * rh * rw);\nfloat *base = pad_data.data();\nfor (int i = 0; i < rc; ++i) {\n- cv::extractChannel(\n- pad_img, cv::Mat(rh, rw, CV_32FC1, base + i * rh * rw), i);\n+ cv::extractChannel(pad_img,\n+ cv::Mat(rh, rw, CV_32FC1, base + i * rh * rw), i);\n}\nin_data_all.insert(in_data_all.end(), pad_data.begin(), pad_data.end());\n}\n@@ -320,8 +304,8 @@ void ObjectDetector::Predict(const std::vector<cv::Mat> imgs,\nfor (int j = 0; j < output_names.size(); j++) {\nauto output_tensor = predictor_->GetOutputHandle(output_names[j]);\nstd::vector<int> output_shape = output_tensor->shape();\n- int out_num = std::accumulate(\n- output_shape.begin(), output_shape.end(), 1, std::multiplies<int>());\n+ int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1,\n+ std::multiplies<int>());\nif (output_tensor->type() == paddle_infer::DataType::INT32) {\nout_bbox_num_data_.resize(out_num);\noutput_tensor->CopyToCpu(out_bbox_num_data_.data());\n@@ -344,8 +328,8 @@ void ObjectDetector::Predict(const std::vector<cv::Mat> imgs,\nfor (int j = 0; j < output_names.size(); j++) {\nauto output_tensor = predictor_->GetOutputHandle(output_names[j]);\nstd::vector<int> output_shape = output_tensor->shape();\n- int out_num = std::accumulate(\n- output_shape.begin(), output_shape.end(), 1, std::multiplies<int>());\n+ int out_num = std::accumulate(output_shape.begin(), output_shape.end(), 1,\n+ std::multiplies<int>());\noutput_shape_list.push_back(output_shape);\nif (output_tensor->type() == paddle_infer::DataType::INT32) {\nout_bbox_num_data_.resize(out_num);\n@@ -372,21 +356,14 @@ void ObjectDetector::Predict(const std::vector<cv::Mat> imgs,\nreg_max = output_shape_list[i][2] / 4 - 1;\n}\nfloat *buffer = new float[out_tensor_list[i].size()];\n- memcpy(buffer,\n- &out_tensor_list[i][0],\n+ memcpy(buffer, &out_tensor_list[i][0],\nout_tensor_list[i].size() * sizeof(float));\noutput_data_list_.push_back(buffer);\n}\nPaddleDetection::PicoDetPostProcess(\n- result,\n- output_data_list_,\n- config_.fpn_stride_,\n- inputs_.im_shape_,\n- inputs_.scale_factor_,\n- config_.nms_info_[\"score_threshold\"].as<float>(),\n- config_.nms_info_[\"nms_threshold\"].as<float>(),\n- num_class,\n- reg_max);\n+ result, output_data_list_, config_.fpn_stride_, inputs_.im_shape_,\n+ inputs_.scale_factor_, config_.nms_info_[\"score_threshold\"].as<float>(),\n+ config_.nms_info_[\"nms_threshold\"].as<float>(), num_class, reg_max);\nbbox_num->push_back(result->size());\n} else {\nis_rbox = output_shape_list[0][output_shape_list[0].size() - 1] % 10 == 0;\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix cpp inference trt error when bs>1 (#5518)
499,339
30.03.2022 18:53:33
-28,800
eb8e1541822997bad13270430de2b9f9509e3fee
[deploy] fix benchmark repeats
[ { "change_type": "MODIFY", "old_path": "deploy/python/infer.py", "new_path": "deploy/python/infer.py", "diff": "@@ -231,7 +231,7 @@ class Detector(object):\nself.det_times.preprocess_time_s.end()\n# model prediction\n- result = self.predict(repeats=repeats) # warmup\n+ result = self.predict(repeats=50) # warmup\nself.det_times.inference_time_s.start()\nresult = self.predict(repeats=repeats)\nself.det_times.inference_time_s.end(repeats=repeats)\n@@ -790,7 +790,7 @@ def main():\nif FLAGS.image_dir is None and FLAGS.image_file is not None:\nassert FLAGS.batch_size == 1, \"batch_size should be 1, when image_file is not None\"\nimg_list = get_test_images(FLAGS.image_dir, FLAGS.image_file)\n- detector.predict_image(img_list, FLAGS.run_benchmark, repeats=10)\n+ detector.predict_image(img_list, FLAGS.run_benchmark, repeats=100)\nif not FLAGS.run_benchmark:\ndetector.det_times.info(average=True)\nelse:\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[deploy] fix benchmark repeats (#5526)
499,339
30.03.2022 21:26:17
-28,800
2cd40bdd8236bca312bb4594c45a733a5ddaf735
fix one_hot error
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/assigners/atss_assigner.py", "new_path": "ppdet/modeling/assigners/atss_assigner.py", "diff": "@@ -199,7 +199,11 @@ class ATSSAssigner(nn.Layer):\ngt_bboxes.reshape([-1, 4]), assigned_gt_index.flatten(), axis=0)\nassigned_bboxes = assigned_bboxes.reshape([batch_size, num_anchors, 4])\n- assigned_scores = F.one_hot(assigned_labels, self.num_classes)\n+ assigned_scores = F.one_hot(assigned_labels, self.num_classes + 1)\n+ ind = list(range(self.num_classes + 1))\n+ ind.remove(bg_index)\n+ assigned_scores = paddle.index_select(\n+ assigned_scores, paddle.to_tensor(ind), axis=-1)\nif pred_bboxes is not None:\n# assigned iou\nious = batch_iou_similarity(gt_bboxes, pred_bboxes) * mask_positive\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/assigners/task_aligned_assigner.py", "new_path": "ppdet/modeling/assigners/task_aligned_assigner.py", "diff": "@@ -143,7 +143,11 @@ class TaskAlignedAssigner(nn.Layer):\ngt_bboxes.reshape([-1, 4]), assigned_gt_index.flatten(), axis=0)\nassigned_bboxes = assigned_bboxes.reshape([batch_size, num_anchors, 4])\n- assigned_scores = F.one_hot(assigned_labels, num_classes)\n+ assigned_scores = F.one_hot(assigned_labels, num_classes + 1)\n+ ind = list(range(num_classes + 1))\n+ ind.remove(bg_index)\n+ assigned_scores = paddle.index_select(\n+ assigned_scores, paddle.to_tensor(ind), axis=-1)\n# rescale alignment metrics\nalignment_metrics *= mask_positive\nmax_metrics_per_instance = alignment_metrics.max(axis=-1, keepdim=True)\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/ppyoloe_head.py", "new_path": "ppdet/modeling/heads/ppyoloe_head.py", "diff": "@@ -331,7 +331,8 @@ class PPYOLOEHead(nn.Layer):\nassigned_bboxes /= stride_tensor\n# cls loss\nif self.use_varifocal_loss:\n- one_hot_label = F.one_hot(assigned_labels, self.num_classes)\n+ one_hot_label = F.one_hot(assigned_labels,\n+ self.num_classes + 1)[..., :-1]\nloss_cls = self._varifocal_loss(pred_scores, assigned_scores,\none_hot_label)\nelse:\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/losses/detr_loss.py", "new_path": "ppdet/modeling/losses/detr_loss.py", "diff": "@@ -80,7 +80,7 @@ class DETRLoss(nn.Layer):\ntarget_label = target_label.reshape([bs, num_query_objects])\nif self.use_focal_loss:\ntarget_label = F.one_hot(target_label,\n- self.num_classes + 1)[:, :, :-1]\n+ self.num_classes + 1)[..., :-1]\nreturn {\n'loss_class': self.loss_coeff['class'] * sigmoid_focal_loss(\nlogits, target_label, num_gts / num_query_objects)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix one_hot error (#5532)
499,339
31.03.2022 13:33:20
-28,800
b7dd8133b382a5e4500bf3a077acf8215e3fd536
[PPYOLOE] update doc, params, flops
[ { "change_type": "MODIFY", "old_path": "ppdet/engine/trainer.py", "new_path": "ppdet/engine/trainer.py", "diff": "@@ -373,7 +373,8 @@ class Trainer(object):\n# enabel auto mixed precision mode\nif self.cfg.get('amp', False):\nscaler = amp.GradScaler(\n- enable=self.cfg.use_gpu or self.cfg.use_npu, init_loss_scaling=1024)\n+ enable=self.cfg.use_gpu or self.cfg.use_npu,\n+ init_loss_scaling=1024)\nself.status.update({\n'epoch_id': self.start_epoch,\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/backbones/cspresnet.py", "new_path": "ppdet/modeling/backbones/cspresnet.py", "diff": "@@ -96,6 +96,8 @@ class RepVggBlock(nn.Layer):\nkernel, bias = self.get_equivalent_kernel_bias()\nself.conv.weight.set_value(kernel)\nself.conv.bias.set_value(bias)\n+ self.__delattr__('conv1')\n+ self.__delattr__('conv2')\ndef get_equivalent_kernel_bias(self):\nkernel3x3, bias3x3 = self._fuse_bn_tensor(self.conv1)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[PPYOLOE] update doc, params, flops (#5543)
499,333
31.03.2022 19:12:19
-28,800
66e4f30a90eb98b777337177a6c35ee508297415
fix camera id in pphuman
[ { "change_type": "MODIFY", "old_path": "deploy/pphuman/pipeline.py", "new_path": "deploy/pphuman/pipeline.py", "diff": "@@ -400,7 +400,11 @@ class PipePredictor(object):\ncpu_threads, enable_mkldnn)\ndef set_file_name(self, path):\n+ if path is not None:\nself.file_name = os.path.split(path)[-1]\n+ else:\n+ # use camera id\n+ self.file_name = None\ndef get_result(self):\nreturn self.collector.get_res()\n@@ -533,6 +537,11 @@ class PipePredictor(object):\nim = self.visualize_video(frame, mot_res, frame_id,\nfps) # visualize\nwriter.write(im)\n+ if self.file_name is None: # use camera_id\n+ cv2.imshow('PPHuman', im)\n+ if cv2.waitKey(1) & 0xFF == ord('q'):\n+ break\n+\ncontinue\nself.pipeline_res.update(mot_res, 'mot')\n@@ -619,6 +628,10 @@ class PipePredictor(object):\nfps, entrance, records,\ncenter_traj) # visualize\nwriter.write(im)\n+ if self.file_name is None: # use camera_id\n+ cv2.imshow('PPHuman', im)\n+ if cv2.waitKey(1) & 0xFF == ord('q'):\n+ break\nwriter.release()\nprint('save result to {}'.format(out_path))\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix camera id in pphuman (#5553)
499,394
02.04.2022 11:18:34
-28,800
18f8eaf8d9b71b0dfd7cf4713aec8843b0c1aed6
enable eval_mot.py to support use_xpu
[ { "change_type": "MODIFY", "old_path": "tools/eval_mot.py", "new_path": "tools/eval_mot.py", "diff": "@@ -108,8 +108,17 @@ def main():\ncheck_gpu(cfg.use_gpu)\ncheck_version()\n- place = 'gpu:{}'.format(ParallelEnv().dev_id) if cfg.use_gpu else 'cpu'\n- place = paddle.set_device(place)\n+ # disable xpu in config by default\n+ if 'use_xpu' not in cfg:\n+ cfg.use_xpu = False\n+\n+ if cfg.use_gpu:\n+ place = paddle.set_device('gpu')\n+ elif cfg.use_xpu:\n+ place = paddle.set_device('xpu')\n+ else:\n+ place = paddle.set_device('cpu')\n+\nrun(FLAGS, cfg)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
enable eval_mot.py to support use_xpu (#5570)
499,339
02.04.2022 21:14:27
-28,800
d9b219a7ebf25ced8bb1bcd46bf11b6f287b3393
[PPYOLOE] alter eval_input_size
[ { "change_type": "MODIFY", "old_path": "configs/ppyoloe/_base_/ppyoloe_crn.yml", "new_path": "configs/ppyoloe/_base_/ppyoloe_crn.yml", "diff": "@@ -28,7 +28,6 @@ PPYOLOEHead:\ngrid_cell_offset: 0.5\nstatic_assigner_epoch: 100\nuse_varifocal_loss: True\n- eval_input_size: [640, 640]\nloss_weight: {class: 1.0, iou: 2.5, dfl: 0.5}\nstatic_assigner:\nname: ATSSAssigner\n" }, { "change_type": "MODIFY", "old_path": "configs/ppyoloe/_base_/ppyoloe_reader.yml", "new_path": "configs/ppyoloe/_base_/ppyoloe_reader.yml", "diff": "worker_num: 4\n+eval_height: &eval_height 640\n+eval_width: &eval_width 640\n+eval_size: &eval_size [*eval_height, *eval_width]\n+\nTrainReader:\nsample_transforms:\n- Decode: {}\n@@ -20,17 +24,17 @@ TrainReader:\nEvalReader:\nsample_transforms:\n- Decode: {}\n- - Resize: {target_size: [640, 640], keep_ratio: False, interp: 2}\n+ - Resize: {target_size: *eval_size, keep_ratio: False, interp: 2}\n- NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n- Permute: {}\nbatch_size: 2\nTestReader:\ninputs_def:\n- image_shape: [3, 640, 640]\n+ image_shape: [3, *eval_height, *eval_width]\nsample_transforms:\n- Decode: {}\n- - Resize: {target_size: [640, 640], keep_ratio: False, interp: 2}\n+ - Resize: {target_size: *eval_size, keep_ratio: False, interp: 2}\n- NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n- Permute: {}\nbatch_size: 1\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/ppyoloe_head.py", "new_path": "ppdet/modeling/heads/ppyoloe_head.py", "diff": "@@ -45,7 +45,7 @@ class ESEAttn(nn.Layer):\n@register\nclass PPYOLOEHead(nn.Layer):\n- __shared__ = ['num_classes', 'trt', 'exclude_nms']\n+ __shared__ = ['num_classes', 'eval_size', 'trt', 'exclude_nms']\n__inject__ = ['static_assigner', 'assigner', 'nms']\ndef __init__(self,\n@@ -61,7 +61,7 @@ class PPYOLOEHead(nn.Layer):\nstatic_assigner='ATSSAssigner',\nassigner='TaskAlignedAssigner',\nnms='MultiClassNMS',\n- eval_input_size=[],\n+ eval_size=None,\nloss_weight={\n'class': 1.0,\n'iou': 2.5,\n@@ -80,7 +80,7 @@ class PPYOLOEHead(nn.Layer):\nself.iou_loss = GIoULoss()\nself.loss_weight = loss_weight\nself.use_varifocal_loss = use_varifocal_loss\n- self.eval_input_size = eval_input_size\n+ self.eval_size = eval_size\nself.static_assigner_epoch = static_assigner_epoch\nself.static_assigner = static_assigner\n@@ -127,7 +127,7 @@ class PPYOLOEHead(nn.Layer):\nself.proj.reshape([1, self.reg_max + 1, 1, 1]))\nself.proj_conv.weight.stop_gradient = True\n- if self.eval_input_size:\n+ if self.eval_size:\nanchor_points, stride_tensor = self._generate_anchors()\nself.register_buffer('anchor_points', anchor_points)\nself.register_buffer('stride_tensor', stride_tensor)\n@@ -164,8 +164,8 @@ class PPYOLOEHead(nn.Layer):\nif feats is not None:\n_, _, h, w = feats[i].shape\nelse:\n- h = int(self.eval_input_size[0] / stride)\n- w = int(self.eval_input_size[1] / stride)\n+ h = int(self.eval_size[0] / stride)\n+ w = int(self.eval_size[1] / stride)\nshift_x = paddle.arange(end=w) + self.grid_cell_offset\nshift_y = paddle.arange(end=h) + self.grid_cell_offset\nshift_y, shift_x = paddle.meshgrid(shift_y, shift_x)\n@@ -181,7 +181,7 @@ class PPYOLOEHead(nn.Layer):\nreturn anchor_points, stride_tensor\ndef forward_eval(self, feats):\n- if self.eval_input_size:\n+ if self.eval_size:\nanchor_points, stride_tensor = self.anchor_points, self.stride_tensor\nelse:\nanchor_points, stride_tensor = self._generate_anchors(feats)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[PPYOLOE] alter eval_input_size (#5569)
499,319
02.04.2022 22:32:30
-28,800
28f791573ba12cb9954df26706973d2cabbbe141
[picodet]openvino benchmark
[ { "change_type": "MODIFY", "old_path": "configs/picodet/README_en.md", "new_path": "configs/picodet/README_en.md", "diff": "@@ -33,20 +33,20 @@ We developed a series of lightweight models, named `PP-PicoDet`. Because of the\n| Model | Input size | mAP<sup>val<br>0.5:0.95 | mAP<sup>val<br>0.5 | Params<br><sup>(M) | FLOPS<br><sup>(G) | Latency<sup><small>[CPU](#latency)</small><sup><br><sup>(ms) | Latency<sup><small>[Lite](#latency)</small><sup><br><sup>(ms) | Download | Config |\n| :-------- | :--------: | :---------------------: | :----------------: | :----------------: | :---------------: | :-----------------------------: | :-----------------------------: | :----------------------------------------: | :--------------------------------------- |\n-| PicoDet-XS | 320*320 | 23.5 | 36.1 | 0.70 | 0.67 | 10.9ms | 7.81ms | [model](https://paddledet.bj.bcebos.com/models/picodet_xs_320_coco_lcnet.pdparams) &#124; [log](https://paddledet.bj.bcebos.com/logs/train_picodet_xs_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_xs_320_coco_lcnet.yml) |\n-| PicoDet-XS | 416*416 | 26.2 | 39.3 | 0.70 | 1.13 | 15.4ms | 12.38ms | [model](https://paddledet.bj.bcebos.com/models/picodet_xs_416_coco_lcnet.pdparams) &#124; [log](https://paddledet.bj.bcebos.com/logs/train_picodet_xs_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_xs_416_coco_lcnet.yml) |\n-| PicoDet-S | 320*320 | 29.1 | 43.4 | 1.18 | 0.97 | 12.6ms | 9.56ms | [model](https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams) &#124; [log](https://paddledet.bj.bcebos.com/logs/train_picodet_s_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_s_320_coco_lcnet.yml) |\n-| PicoDet-S | 416*416 | 32.5 | 47.6 | 1.18 | 1.65 | 17.2ms | 15.20ms | [model](https://paddledet.bj.bcebos.com/models/picodet_s_416_coco_lcnet.pdparams) &#124; [log](https://paddledet.bj.bcebos.com/logs/train_picodet_s_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_s_416_coco_lcnet.yml) |\n-| PicoDet-M | 320*320 | 34.4 | 50.0 | 3.46 | 2.57 | 14.5ms | 17.68ms | [model](https://paddledet.bj.bcebos.com/models/picodet_m_320_coco_lcnet.pdparams) &#124; [log](https://paddledet.bj.bcebos.com/logs/train_picodet_m_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_m_320_coco_lcnet.yml) |\n-| PicoDet-M | 416*416 | 37.5 | 53.4 | 3.46 | 4.34 | 19.5ms | 28.39ms | [model](https://paddledet.bj.bcebos.com/models/picodet_m_416_coco_lcnet.pdparams) &#124; [log](https://paddledet.bj.bcebos.com/logs/train_picodet_m_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_m_416_coco_lcnet.yml) |\n-| PicoDet-L | 320*320 | 36.1 | 52.0 | 5.80 | 4.20 | 18.3ms | 25.21ms | [model](https://paddledet.bj.bcebos.com/models/picodet_l_320_coco_lcnet.pdparams) &#124; [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_320_coco_lcnet.yml) |\n-| PicoDet-L | 416*416 | 39.4 | 55.7 | 5.80 | 7.10 | 22.1ms | 42.23ms | [model](https://paddledet.bj.bcebos.com/models/picodet_l_416_coco_lcnet.pdparams) &#124; [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_416_coco_lcnet.yml) |\n-| PicoDet-L | 640*640 | 42.6 | 59.2 | 5.80 | 16.81 | 43.1ms | 108.1ms | [model](https://paddledet.bj.bcebos.com/models/picodet_l_640_coco_lcnet.pdparams) &#124; [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_640_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_640_coco_lcnet.yml) |\n+| PicoDet-XS | 320*320 | 23.5 | 36.1 | 0.70 | 0.67 | 3.9ms | 7.81ms | [model](https://paddledet.bj.bcebos.com/models/picodet_xs_320_coco_lcnet.pdparams) &#124; [log](https://paddledet.bj.bcebos.com/logs/train_picodet_xs_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_xs_320_coco_lcnet.yml) |\n+| PicoDet-XS | 416*416 | 26.2 | 39.3 | 0.70 | 1.13 | 6.1ms | 12.38ms | [model](https://paddledet.bj.bcebos.com/models/picodet_xs_416_coco_lcnet.pdparams) &#124; [log](https://paddledet.bj.bcebos.com/logs/train_picodet_xs_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_xs_416_coco_lcnet.yml) |\n+| PicoDet-S | 320*320 | 29.1 | 43.4 | 1.18 | 0.97 | 4.8ms | 9.56ms | [model](https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams) &#124; [log](https://paddledet.bj.bcebos.com/logs/train_picodet_s_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_s_320_coco_lcnet.yml) |\n+| PicoDet-S | 416*416 | 32.5 | 47.6 | 1.18 | 1.65 | 6.6ms | 15.20ms | [model](https://paddledet.bj.bcebos.com/models/picodet_s_416_coco_lcnet.pdparams) &#124; [log](https://paddledet.bj.bcebos.com/logs/train_picodet_s_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_s_416_coco_lcnet.yml) |\n+| PicoDet-M | 320*320 | 34.4 | 50.0 | 3.46 | 2.57 | 8.2ms | 17.68ms | [model](https://paddledet.bj.bcebos.com/models/picodet_m_320_coco_lcnet.pdparams) &#124; [log](https://paddledet.bj.bcebos.com/logs/train_picodet_m_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_m_320_coco_lcnet.yml) |\n+| PicoDet-M | 416*416 | 37.5 | 53.4 | 3.46 | 4.34 | 12.7ms | 28.39ms | [model](https://paddledet.bj.bcebos.com/models/picodet_m_416_coco_lcnet.pdparams) &#124; [log](https://paddledet.bj.bcebos.com/logs/train_picodet_m_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_m_416_coco_lcnet.yml) |\n+| PicoDet-L | 320*320 | 36.1 | 52.0 | 5.80 | 4.20 | 11.5ms | 25.21ms | [model](https://paddledet.bj.bcebos.com/models/picodet_l_320_coco_lcnet.pdparams) &#124; [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_320_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_320_coco_lcnet.yml) |\n+| PicoDet-L | 416*416 | 39.4 | 55.7 | 5.80 | 7.10 | 20.7ms | 42.23ms | [model](https://paddledet.bj.bcebos.com/models/picodet_l_416_coco_lcnet.pdparams) &#124; [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_416_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_416_coco_lcnet.yml) |\n+| PicoDet-L | 640*640 | 42.6 | 59.2 | 5.80 | 16.81 | 62.5ms | 108.1ms | [model](https://paddledet.bj.bcebos.com/models/picodet_l_640_coco_lcnet.pdparams) &#124; [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_640_coco_lcnet.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_640_coco_lcnet.yml) |\n<details open>\n<summary><b>Table Notes:</b></summary>\n-- <a name=\"latency\">Latency:</a> All our models test on `Intel-Xeon-Gold-6148` CPU with MKLDNN by 10 threads and `Qualcomm Snapdragon 865(4xA77+4xA55)` with 4 threads by arm8 and with FP16. In the above table, test CPU latency on Paddle-Inference and testing Mobile latency with `Lite`->[Paddle-Lite](https://github.com/PaddlePaddle/Paddle-Lite).\n+- <a name=\"latency\">Latency:</a> All our models test on `Intel core i7 10750H` CPU with MKLDNN by 12 threads and `Qualcomm Snapdragon 865(4xA77+4xA55)` with 4 threads by arm8 and with FP16. In the above table, test CPU latency on Paddle-Inference and testing Mobile latency with `Lite`->[Paddle-Lite](https://github.com/PaddlePaddle/Paddle-Lite).\n- PicoDet is trained on COCO train2017 dataset and evaluated on COCO val2017. And PicoDet used 4 GPUs for training and all checkpoints are trained with default settings and hyperparameters.\n- Benchmark test: When testing the speed benchmark, the post-processing is not included in the exported model, you need to set `-o export.benchmark=True` or manually modify [runtime.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/configs/runtime.yml#L12).\n@@ -138,7 +138,7 @@ Detail also can refer to [Quick start guide](https://github.com/PaddlePaddle/Pad\ncd PaddleDetection\npython tools/export_model.py -c configs/picodet/picodet_s_320_coco_lcnet.yml \\\n-o weights=https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams \\\n- --output_dir=inference_model\n+ --output_dir=output_inference\n```\n- If no post processing is required, please specify: `-o export.benchmark=True` (if -o has already appeared, delete -o here) or manually modify corresponding fields in [runtime.yml](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/configs/runtime.yml).\n@@ -160,9 +160,9 @@ pip install paddlelite\n```shell\n# FP32\n-paddle_lite_opt --model_dir=inference_model/picodet_s_320_coco_lcnet --valid_targets=arm --optimize_out=picodet_s_320_coco_fp32\n+paddle_lite_opt --model_dir=output_inference/picodet_s_320_coco_lcnet --valid_targets=arm --optimize_out=picodet_s_320_coco_fp32\n# FP16\n-paddle_lite_opt --model_dir=inference_model/picodet_s_320_coco_lcnet --valid_targets=arm --optimize_out=picodet_s_320_coco_fp16 --enable_fp16=true\n+paddle_lite_opt --model_dir=output_inference/picodet_s_320_coco_lcnet --valid_targets=arm --optimize_out=picodet_s_320_coco_fp16 --enable_fp16=true\n```\n</details>\n@@ -202,19 +202,17 @@ paddle2onnx --model_dir output_inference/picodet_s_320_coco_lcnet/ \\\n- Deploy models\n-| Model | Input size | ONNX | Paddle Lite(fp32) | Paddle Lite(fp16) |\n+| Model | Input size | ONNX(w/o postprocess) | Paddle Lite(fp32) | Paddle Lite(fp16) |\n| :-------- | :--------: | :---------------------: | :----------------: | :----------------: |\n-| PicoDet-S | 320*320 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_s_320_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_320.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_320_fp16.tar) |\n-| PicoDet-S | 416*416 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_s_416_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_416.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_416_fp16.tar) |\n-| PicoDet-M | 320*320 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_m_320_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_m_320.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_m_320_fp16.tar) |\n-| PicoDet-M | 416*416 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_m_416_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_m_416.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_m_416_fp16.tar) |\n-| PicoDet-L | 320*320 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_l_320_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_320.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_320_fp16.tar) |\n-| PicoDet-L | 416*416 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_l_416_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_416.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_416_fp16.tar) |\n+| PicoDet-XS | 320*320 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_xs_320_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_416.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_416_fp16.tar) |\n+| PicoDet-XS | 416*416 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_xs_416_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_416.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_416_fp16.tar) |\n+| PicoDet-S | 320*320 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_s_320_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_320.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_320_fp16.tar) |\n+| PicoDet-S | 416*416 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_s_416_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_416.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_416_fp16.tar) |\n+| PicoDet-M | 320*320 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_m_320_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_m_320.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_m_320_fp16.tar) |\n+| PicoDet-M | 416*416 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_m_416_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_m_416.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_m_416_fp16.tar) |\n+| PicoDet-L | 320*320 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_l_320_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_320.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_320_fp16.tar) |\n+| PicoDet-L | 416*416 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_l_416_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_416.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_416_fp16.tar) |\n| PicoDet-L | 640*640 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_l_640_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_640.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_640_fp16.tar) |\n-| PicoDet-Shufflenetv2 1x | 416*416 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_shufflenetv2_1x_416_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_shufflenetv2_1x.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_shufflenetv2_1x_fp16.tar) |\n-| PicoDet-MobileNetv3-large 1x | 416*416 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_mobilenetv3_large_1x_416_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_mobilenetv3_large_1x.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_mobilenetv3_large_1x_fp16.tar) |\n-| PicoDet-LCNet 1.5x | 416*416 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_lcnet_1_5x_416_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_lcnet_1_5x.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_lcnet_1_5x_fp16.tar) |\n-\n### Deploy\n" }, { "change_type": "ADD", "old_path": null, "new_path": "deploy/third_engine/demo_openvino/python/openvino_benchmark.py", "diff": "+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import cv2\n+import numpy as np\n+import time\n+import argparse\n+from openvino.runtime import Core\n+\n+\n+def image_preprocess_mobilenetv3(img_path, re_shape):\n+ img = cv2.imread(img_path)\n+ img = cv2.resize(\n+ img, (re_shape, re_shape), interpolation=cv2.INTER_LANCZOS4)\n+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n+ img = np.transpose(img, [2, 0, 1]) / 255\n+ img = np.expand_dims(img, 0)\n+ img_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))\n+ img_std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))\n+ img -= img_mean\n+ img /= img_std\n+ return img.astype(np.float32)\n+\n+\n+def benchmark(img_file, onnx_file, re_shape):\n+\n+ ie = Core()\n+ net = ie.read_model(onnx_file)\n+\n+ test_image = image_preprocess_mobilenetv3(img_file, re_shape)\n+\n+ compiled_model = ie.compile_model(net, 'CPU')\n+\n+ # benchmark\n+ loop_num = 100\n+ warm_up = 8\n+ timeall = 0\n+ time_min = float(\"inf\")\n+ time_max = float('-inf')\n+\n+ for i in range(loop_num + warm_up):\n+ time0 = time.time()\n+ #perform the inference step\n+\n+ output = compiled_model.infer_new_request({0: test_image})\n+ time1 = time.time()\n+ timed = time1 - time0\n+\n+ if i >= warm_up:\n+ timeall = timeall + timed\n+ time_min = min(time_min, timed)\n+ time_max = max(time_max, timed)\n+\n+ time_avg = timeall / loop_num\n+\n+ print(\n+ f'inference_time(ms): min={round(time_min*1000, 2)}, max = {round(time_max*1000, 1)}, avg = {round(time_avg*1000, 1)}'\n+ )\n+\n+\n+if __name__ == '__main__':\n+\n+ onnx_path = \"out_onnx\"\n+ onnx_file = onnx_path + \"/picodet_s_320_coco.onnx\"\n+\n+ parser = argparse.ArgumentParser()\n+ parser.add_argument(\n+ '--img_path',\n+ type=str,\n+ default='demo/000000570688.jpg',\n+ help=\"image path\")\n+ parser.add_argument(\n+ '--onnx_path',\n+ type=str,\n+ default='out_onnxsim/picodet_xs_320_coco_lcnet.onnx',\n+ help=\"onnx filepath\")\n+ parser.add_argument('--in_shape', type=int, default=320, help=\"input_size\")\n+\n+ args = parser.parse_args()\n+ benchmark(args.img_path, args.onnx_path, args.in_shape)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[picodet]openvino benchmark (#5578)
499,298
05.04.2022 21:08:51
-28,800
cf1b5ce991cd435fdcf8a84d117e119d5d2eb919
[MOT] support xpu npu MOT infer
[ { "change_type": "MODIFY", "old_path": "tools/eval_mot.py", "new_path": "tools/eval_mot.py", "diff": "@@ -28,11 +28,11 @@ import warnings\nwarnings.filterwarnings('ignore')\nimport paddle\n-from paddle.distributed import ParallelEnv\n+\nfrom ppdet.core.workspace import load_config, merge_config\n-from ppdet.engine import Tracker\n-from ppdet.utils.check import check_gpu, check_version, check_config\n+from ppdet.utils.check import check_gpu, check_npu, check_xpu, check_version, check_config\nfrom ppdet.utils.cli import ArgsParser\n+from ppdet.engine import Tracker\ndef parse_args():\n@@ -104,9 +104,9 @@ def main():\ncfg = load_config(FLAGS.config)\nmerge_config(FLAGS.opt)\n- check_config(cfg)\n- check_gpu(cfg.use_gpu)\n- check_version()\n+ # disable npu in config by default\n+ if 'use_npu' not in cfg:\n+ cfg.use_npu = False\n# disable xpu in config by default\nif 'use_xpu' not in cfg:\n@@ -114,11 +114,22 @@ def main():\nif cfg.use_gpu:\nplace = paddle.set_device('gpu')\n+ elif cfg.use_npu:\n+ place = paddle.set_device('npu')\nelif cfg.use_xpu:\nplace = paddle.set_device('xpu')\nelse:\nplace = paddle.set_device('cpu')\n+ if 'norm_type' in cfg and cfg['norm_type'] == 'sync_bn' and not cfg.use_gpu:\n+ cfg['norm_type'] = 'bn'\n+\n+ check_config(cfg)\n+ check_gpu(cfg.use_gpu)\n+ check_npu(cfg.use_npu)\n+ check_xpu(cfg.use_xpu)\n+ check_version()\n+\nrun(FLAGS, cfg)\n" }, { "change_type": "MODIFY", "old_path": "tools/infer_mot.py", "new_path": "tools/infer_mot.py", "diff": "@@ -28,10 +28,9 @@ import warnings\nwarnings.filterwarnings('ignore')\nimport paddle\n-from paddle.distributed import ParallelEnv\nfrom ppdet.core.workspace import load_config, merge_config\nfrom ppdet.engine import Tracker\n-from ppdet.utils.check import check_gpu, check_version, check_config\n+from ppdet.utils.check import check_gpu, check_npu, check_xpu, check_version, check_config\nfrom ppdet.utils.cli import ArgsParser\n@@ -117,12 +116,32 @@ def main():\ncfg = load_config(FLAGS.config)\nmerge_config(FLAGS.opt)\n+ # disable npu in config by default\n+ if 'use_npu' not in cfg:\n+ cfg.use_npu = False\n+\n+ # disable xpu in config by default\n+ if 'use_xpu' not in cfg:\n+ cfg.use_xpu = False\n+\n+ if cfg.use_gpu:\n+ place = paddle.set_device('gpu')\n+ elif cfg.use_npu:\n+ place = paddle.set_device('npu')\n+ elif cfg.use_xpu:\n+ place = paddle.set_device('xpu')\n+ else:\n+ place = paddle.set_device('cpu')\n+\n+ if 'norm_type' in cfg and cfg['norm_type'] == 'sync_bn' and not cfg.use_gpu:\n+ cfg['norm_type'] = 'bn'\n+\ncheck_config(cfg)\ncheck_gpu(cfg.use_gpu)\n+ check_npu(cfg.use_npu)\n+ check_xpu(cfg.use_xpu)\ncheck_version()\n- place = 'gpu:{}'.format(ParallelEnv().dev_id) if cfg.use_gpu else 'cpu'\n- place = paddle.set_device(place)\nrun(FLAGS, cfg)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[MOT] support xpu npu MOT infer (#5584)
499,304
06.04.2022 11:28:02
-28,800
2fe432558ef1b497c5ae3f73bddb1ed4e22df3ae
update picodet inference model
[ { "change_type": "MODIFY", "old_path": "configs/picodet/legacy_model/README.md", "new_path": "configs/picodet/legacy_model/README.md", "diff": "</details>\n+- Deploy models\n+\n+| Model | Input size | ONNX | Paddle Lite(fp32) | Paddle Lite(fp16) |\n+| :-------- | :--------: | :---------------------: | :----------------: | :----------------: |\n+| PicoDet-S | 320*320 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_s_320_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_320.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_320_fp16.tar) |\n+| PicoDet-S | 416*416 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_s_416_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_416.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_416_fp16.tar) |\n+| PicoDet-M | 320*320 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_m_320_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_m_320.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_m_320_fp16.tar) |\n+| PicoDet-M | 416*416 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_m_416_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_m_416.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_m_416_fp16.tar) |\n+| PicoDet-L | 320*320 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_l_320_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_320.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_320_fp16.tar) |\n+| PicoDet-L | 416*416 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_l_416_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_416.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_416_fp16.tar) |\n+| PicoDet-L | 640*640 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_l_640_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_640.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_640_fp16.tar) |\n+| PicoDet-Shufflenetv2 1x | 416*416 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_shufflenetv2_1x_416_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_shufflenetv2_1x.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_shufflenetv2_1x_fp16.tar) |\n+| PicoDet-MobileNetv3-large 1x | 416*416 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_mobilenetv3_large_1x_416_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_mobilenetv3_large_1x.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_mobilenetv3_large_1x_fp16.tar) |\n+| PicoDet-LCNet 1.5x | 416*416 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_lcnet_1_5x_416_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_lcnet_1_5x.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_lcnet_1_5x_fp16.tar) |\n+\n+\n+\n## Cite PP-PicoDet\n```\n@misc{yu2021pppicodet,\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
update picodet inference model (#5582)
499,304
06.04.2022 13:14:47
-28,800
e23ab8d8e31d7c6d261d19d99c0a74bbed29d94a
fix picodet openvino demo
[ { "change_type": "MODIFY", "old_path": "deploy/third_engine/demo_openvino/python/openvino_benchmark.py", "new_path": "deploy/third_engine/demo_openvino/python/openvino_benchmark.py", "diff": "@@ -19,7 +19,7 @@ import argparse\nfrom openvino.runtime import Core\n-def image_preprocess_mobilenetv3(img_path, re_shape):\n+def image_preprocess(img_path, re_shape):\nimg = cv2.imread(img_path)\nimg = cv2.resize(\nimg, (re_shape, re_shape), interpolation=cv2.INTER_LANCZOS4)\n@@ -38,7 +38,7 @@ def benchmark(img_file, onnx_file, re_shape):\nie = Core()\nnet = ie.read_model(onnx_file)\n- test_image = image_preprocess_mobilenetv3(img_file, re_shape)\n+ test_image = image_preprocess(img_file, re_shape)\ncompiled_model = ie.compile_model(net, 'CPU')\n@@ -64,9 +64,9 @@ def benchmark(img_file, onnx_file, re_shape):\ntime_avg = timeall / loop_num\n- print(\n- f'inference_time(ms): min={round(time_min*1000, 2)}, max = {round(time_max*1000, 1)}, avg = {round(time_avg*1000, 1)}'\n- )\n+ print('inference_time(ms): min={}, max={}, avg={}'.format(\n+ round(time_min * 1000, 2),\n+ round(time_max * 1000, 1), round(time_avg * 1000, 1)))\nif __name__ == '__main__':\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix picodet openvino demo (#5595)
499,333
06.04.2022 14:31:55
-28,800
ae54352f57f397df6f9da5c2c87ee54aa486a9be
fix rcnn pred
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/architectures/faster_rcnn.py", "new_path": "ppdet/modeling/architectures/faster_rcnn.py", "diff": "@@ -87,8 +87,8 @@ class FasterRCNN(BaseArch):\nim_shape, scale_factor)\n# rescale the prediction back to origin image\n- bbox_pred = self.bbox_post_process.get_pred(bbox, bbox_num,\n- im_shape, scale_factor)\n+ bboxes, bbox_pred, bbox_num = self.bbox_post_process.get_pred(\n+ bbox, bbox_num, im_shape, scale_factor)\nreturn bbox_pred, bbox_num\ndef get_loss(self, ):\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix rcnn pred (#5598)
499,298
06.04.2022 18:52:02
-28,800
057ef8bd937913afacd0e27f3582d77bdcb47c80
add tqdm eval and infer
[ { "change_type": "MODIFY", "old_path": "ppdet/engine/callbacks.py", "new_path": "ppdet/engine/callbacks.py", "diff": "@@ -141,10 +141,6 @@ class LogPrinter(Callback):\ndtime=str(data_time),\nips=ips)\nlogger.info(fmt)\n- if mode == 'eval':\n- step_id = status['step_id']\n- if step_id % 100 == 0:\n- logger.info(\"Eval iter: {}\".format(step_id))\ndef on_epoch_end(self, status):\nif dist.get_world_size() < 2 or dist.get_rank() == 0:\n" }, { "change_type": "MODIFY", "old_path": "ppdet/engine/tracker.py", "new_path": "ppdet/engine/tracker.py", "diff": "@@ -17,12 +17,11 @@ from __future__ import division\nfrom __future__ import print_function\nimport os\n-import cv2\nimport glob\nimport re\nimport paddle\nimport numpy as np\n-import os.path as osp\n+from tqdm import tqdm\nfrom collections import defaultdict\nfrom ppdet.core.workspace import create\n@@ -31,8 +30,7 @@ from ppdet.modeling.mot.utils import Detection, get_crops, scale_coords, clip_bo\nfrom ppdet.modeling.mot.utils import MOTTimer, load_det_results, write_mot_results, save_vis_results\nfrom ppdet.modeling.mot.tracker import JDETracker, DeepSORTTracker\n-from ppdet.metrics import Metric, MOTMetric, KITTIMOTMetric\n-from ppdet.metrics import MCMOTMetric\n+from ppdet.metrics import Metric, MOTMetric, KITTIMOTMetric, MCMOTMetric\nimport ppdet.utils.stats as stats\nfrom .callbacks import Callback, ComposeCallback\n@@ -142,11 +140,8 @@ class Tracker(object):\nself.model.eval()\nresults = defaultdict(list) # support single class and multi classes\n- for step_id, data in enumerate(dataloader):\n+ for step_id, data in enumerate(tqdm(dataloader)):\nself.status['step_id'] = step_id\n- if frame_id % 40 == 0:\n- logger.info('Processing frame {} ({:.2f} fps)'.format(\n- frame_id, 1. / max(1e-5, timer.average_time)))\n# forward\ntimer.tic()\npred_dets, pred_embs = self.model(data)\n@@ -210,12 +205,8 @@ class Tracker(object):\ndet_file))\ntracker = self.model.tracker\n- for step_id, data in enumerate(dataloader):\n+ for step_id, data in enumerate(tqdm(dataloader)):\nself.status['step_id'] = step_id\n- if frame_id % 40 == 0:\n- logger.info('Processing frame {} ({:.2f} fps)'.format(\n- frame_id, 1. / max(1e-5, timer.average_time)))\n-\nori_image = data['ori_image'] # [bs, H, W, 3]\nori_image_shape = data['ori_image'].shape[1:3]\n# ori_image_shape: [H, W]\n@@ -366,8 +357,8 @@ class Tracker(object):\nonline_scores[cls_id].append(tscore)\n# save results\nresults[cls_id].append(\n- (frame_id + 1, online_tlwhs[cls_id], online_scores[cls_id],\n- online_ids[cls_id]))\n+ (frame_id + 1, online_tlwhs[cls_id],\n+ online_scores[cls_id], online_ids[cls_id]))\ntimer.toc()\nsave_vis_results(data, frame_id, online_ids, online_tlwhs,\nonline_scores, timer.average_time, show_image,\n@@ -417,7 +408,7 @@ class Tracker(object):\nsave_dir = os.path.join(output_dir, 'mot_outputs',\nseq) if save_images or save_videos else None\n- logger.info('start seq: {}'.format(seq))\n+ logger.info('Evaluate seq: {}'.format(seq))\nself.dataset.set_images(self.get_infer_images(infer_dir))\ndataloader = create('EvalMOTReader')(self.dataset, 0)\n@@ -458,7 +449,6 @@ class Tracker(object):\nos.system(cmd_str)\nlogger.info('Save video in {}.'.format(output_video_path))\n- logger.info('Evaluate seq: {}'.format(seq))\n# update metrics\nfor metric in self._metrics:\nmetric.update(data_root, seq, data_type, result_root,\n@@ -582,6 +572,7 @@ class Tracker(object):\nwrite_mot_results(result_filename, results, data_type,\nself.cfg.num_classes)\n+\ndef get_trick_hyperparams(video_name, ori_buffer, ori_thresh):\nif video_name[:3] != 'MOT':\n# only used for MOTChallenge (MOT17, MOT20) Test-set\n" }, { "change_type": "MODIFY", "old_path": "ppdet/engine/trainer.py", "new_path": "ppdet/engine/trainer.py", "diff": "@@ -20,6 +20,7 @@ import os\nimport sys\nimport copy\nimport time\n+from tqdm import tqdm\nimport numpy as np\nimport typing\n@@ -500,7 +501,7 @@ class Trainer(object):\nflops_loader = create('{}Reader'.format(self.mode.capitalize()))(\nself.dataset, self.cfg.worker_num, self._eval_batch_sampler)\nself._flops(flops_loader)\n- for step_id, data in enumerate(loader):\n+ for step_id, data in enumerate(tqdm(loader)):\nself.status['step_id'] = step_id\nself._compose_callback.on_step_begin(self.status)\n# forward\n@@ -553,7 +554,7 @@ class Trainer(object):\nflops_loader = create('TestReader')(self.dataset, 0)\nself._flops(flops_loader)\nresults = []\n- for step_id, data in enumerate(loader):\n+ for step_id, data in enumerate(tqdm(loader)):\nself.status['step_id'] = step_id\n# forward\nouts = self.model(data)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add tqdm eval and infer (#5587)
499,319
07.04.2022 09:42:00
-28,800
bbece39537a8180bd4d6aa2dbceedd3118b35ffa
[pico] openvino demo
[ { "change_type": "ADD", "old_path": null, "new_path": "deploy/third_engine/demo_openvino/python/coco_label.txt", "diff": "+person\n+bicycle\n+car\n+motorbike\n+aeroplane\n+bus\n+train\n+truck\n+boat\n+traffic light\n+fire hydrant\n+stop sign\n+parking meter\n+bench\n+bird\n+cat\n+dog\n+horse\n+sheep\n+cow\n+elephant\n+bear\n+zebra\n+giraffe\n+backpack\n+umbrella\n+handbag\n+tie\n+suitcase\n+frisbee\n+skis\n+snowboard\n+sports ball\n+kite\n+baseball bat\n+baseball glove\n+skateboard\n+surfboard\n+tennis racket\n+bottle\n+wine glass\n+cup\n+fork\n+knife\n+spoon\n+bowl\n+banana\n+apple\n+sandwich\n+orange\n+broccoli\n+carrot\n+hot dog\n+pizza\n+donut\n+cake\n+chair\n+sofa\n+pottedplant\n+bed\n+diningtable\n+toilet\n+tvmonitor\n+laptop\n+mouse\n+remote\n+keyboard\n+cell phone\n+microwave\n+oven\n+toaster\n+sink\n+refrigerator\n+book\n+clock\n+vase\n+scissors\n+teddy bear\n+hair drier\n+toothbrush\n" }, { "change_type": "MODIFY", "old_path": "deploy/third_engine/demo_openvino/python/openvino_benchmark.py", "new_path": "deploy/third_engine/demo_openvino/python/openvino_benchmark.py", "diff": "@@ -16,6 +16,7 @@ import cv2\nimport numpy as np\nimport time\nimport argparse\n+from scipy.special import softmax\nfrom openvino.runtime import Core\n@@ -33,14 +34,275 @@ def image_preprocess(img_path, re_shape):\nreturn img.astype(np.float32)\n-def benchmark(img_file, onnx_file, re_shape):\n+def draw_box(img, results, class_label, scale_x, scale_y):\n- ie = Core()\n- net = ie.read_model(onnx_file)\n+ label_list = list(\n+ map(lambda x: x.strip(), open(class_label, 'r').readlines()))\n- test_image = image_preprocess(img_file, re_shape)\n+ for i in range(len(results)):\n+ print(label_list[int(results[i][0])], ':', results[i][1])\n+ bbox = results[i, 2:]\n+ label_id = int(results[i, 0])\n+ score = results[i, 1]\n+ if (score > 0.20):\n+ xmin, ymin, xmax, ymax = [\n+ int(bbox[0] * scale_x), int(bbox[1] * scale_y),\n+ int(bbox[2] * scale_x), int(bbox[3] * scale_y)\n+ ]\n+ cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 255, 0), 3)\n+ font = cv2.FONT_HERSHEY_SIMPLEX\n+ label_text = label_list[label_id]\n+ cv2.rectangle(img, (xmin, ymin), (xmax, ymin - 60), (0, 255, 0), -1)\n+ cv2.putText(img, \"#\" + label_text, (xmin, ymin - 10), font, 1,\n+ (255, 255, 255), 2, cv2.LINE_AA)\n+ cv2.putText(img,\n+ str(round(score, 3)), (xmin, ymin - 40), font, 0.8,\n+ (255, 255, 255), 2, cv2.LINE_AA)\n+ return img\n- compiled_model = ie.compile_model(net, 'CPU')\n+\n+def hard_nms(box_scores, iou_threshold, top_k=-1, candidate_size=200):\n+ \"\"\"\n+ Args:\n+ box_scores (N, 5): boxes in corner-form and probabilities.\n+ iou_threshold: intersection over union threshold.\n+ top_k: keep top_k results. If k <= 0, keep all the results.\n+ candidate_size: only consider the candidates with the highest scores.\n+ Returns:\n+ picked: a list of indexes of the kept boxes\n+ \"\"\"\n+ scores = box_scores[:, -1]\n+ boxes = box_scores[:, :-1]\n+ picked = []\n+ indexes = np.argsort(scores)\n+ indexes = indexes[-candidate_size:]\n+ while len(indexes) > 0:\n+ current = indexes[-1]\n+ picked.append(current)\n+ if 0 < top_k == len(picked) or len(indexes) == 1:\n+ break\n+ current_box = boxes[current, :]\n+ indexes = indexes[:-1]\n+ rest_boxes = boxes[indexes, :]\n+ iou = iou_of(\n+ rest_boxes,\n+ np.expand_dims(\n+ current_box, axis=0), )\n+ indexes = indexes[iou <= iou_threshold]\n+\n+ return box_scores[picked, :]\n+\n+\n+def iou_of(boxes0, boxes1, eps=1e-5):\n+ \"\"\"Return intersection-over-union (Jaccard index) of boxes.\n+ Args:\n+ boxes0 (N, 4): ground truth boxes.\n+ boxes1 (N or 1, 4): predicted boxes.\n+ eps: a small number to avoid 0 as denominator.\n+ Returns:\n+ iou (N): IoU values.\n+ \"\"\"\n+ overlap_left_top = np.maximum(boxes0[..., :2], boxes1[..., :2])\n+ overlap_right_bottom = np.minimum(boxes0[..., 2:], boxes1[..., 2:])\n+\n+ overlap_area = area_of(overlap_left_top, overlap_right_bottom)\n+ area0 = area_of(boxes0[..., :2], boxes0[..., 2:])\n+ area1 = area_of(boxes1[..., :2], boxes1[..., 2:])\n+ return overlap_area / (area0 + area1 - overlap_area + eps)\n+\n+\n+def area_of(left_top, right_bottom):\n+ \"\"\"Compute the areas of rectangles given two corners.\n+ Args:\n+ left_top (N, 2): left top corner.\n+ right_bottom (N, 2): right bottom corner.\n+ Returns:\n+ area (N): return the area.\n+ \"\"\"\n+ hw = np.clip(right_bottom - left_top, 0.0, None)\n+ return hw[..., 0] * hw[..., 1]\n+\n+\n+class PicoDetPostProcess(object):\n+ \"\"\"\n+ Args:\n+ input_shape (int): network input image size\n+ ori_shape (int): ori image shape of before padding\n+ scale_factor (float): scale factor of ori image\n+ enable_mkldnn (bool): whether to open MKLDNN\n+ \"\"\"\n+\n+ def __init__(self,\n+ input_shape,\n+ ori_shape,\n+ scale_factor,\n+ strides=[8, 16, 32, 64],\n+ score_threshold=0.4,\n+ nms_threshold=0.5,\n+ nms_top_k=1000,\n+ keep_top_k=100):\n+ self.ori_shape = ori_shape\n+ self.input_shape = input_shape\n+ self.scale_factor = scale_factor\n+ self.strides = strides\n+ self.score_threshold = score_threshold\n+ self.nms_threshold = nms_threshold\n+ self.nms_top_k = nms_top_k\n+ self.keep_top_k = keep_top_k\n+\n+ def warp_boxes(self, boxes, ori_shape):\n+ \"\"\"Apply transform to boxes\n+ \"\"\"\n+ width, height = ori_shape[1], ori_shape[0]\n+ n = len(boxes)\n+ if n:\n+ # warp points\n+ xy = np.ones((n * 4, 3))\n+ xy[:, :2] = boxes[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(\n+ n * 4, 2) # x1y1, x2y2, x1y2, x2y1\n+ # xy = xy @ M.T # transform\n+ xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale\n+ # create new boxes\n+ x = xy[:, [0, 2, 4, 6]]\n+ y = xy[:, [1, 3, 5, 7]]\n+ xy = np.concatenate(\n+ (x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T\n+ # clip boxes\n+ xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)\n+ xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)\n+ return xy.astype(np.float32)\n+ else:\n+ return boxes\n+\n+ def __call__(self, scores, raw_boxes):\n+ batch_size = raw_boxes[0].shape[0]\n+ reg_max = int(raw_boxes[0].shape[-1] / 4 - 1)\n+ out_boxes_num = []\n+ out_boxes_list = []\n+ for batch_id in range(batch_size):\n+ # generate centers\n+ decode_boxes = []\n+ select_scores = []\n+ for stride, box_distribute, score in zip(self.strides, raw_boxes,\n+ scores):\n+ box_distribute = box_distribute[batch_id]\n+ score = score[batch_id]\n+ # centers\n+ fm_h = self.input_shape[0] / stride\n+ fm_w = self.input_shape[1] / stride\n+ h_range = np.arange(fm_h)\n+ w_range = np.arange(fm_w)\n+ ww, hh = np.meshgrid(w_range, h_range)\n+ ct_row = (hh.flatten() + 0.5) * stride\n+ ct_col = (ww.flatten() + 0.5) * stride\n+ center = np.stack((ct_col, ct_row, ct_col, ct_row), axis=1)\n+\n+ # box distribution to distance\n+ reg_range = np.arange(reg_max + 1)\n+ box_distance = box_distribute.reshape((-1, reg_max + 1))\n+ box_distance = softmax(box_distance, axis=1)\n+ box_distance = box_distance * np.expand_dims(reg_range, axis=0)\n+ box_distance = np.sum(box_distance, axis=1).reshape((-1, 4))\n+ box_distance = box_distance * stride\n+\n+ # top K candidate\n+ topk_idx = np.argsort(score.max(axis=1))[::-1]\n+ topk_idx = topk_idx[:self.nms_top_k]\n+ center = center[topk_idx]\n+ score = score[topk_idx]\n+ box_distance = box_distance[topk_idx]\n+\n+ # decode box\n+ decode_box = center + [-1, -1, 1, 1] * box_distance\n+\n+ select_scores.append(score)\n+ decode_boxes.append(decode_box)\n+\n+ # nms\n+ bboxes = np.concatenate(decode_boxes, axis=0)\n+ confidences = np.concatenate(select_scores, axis=0)\n+ picked_box_probs = []\n+ picked_labels = []\n+ for class_index in range(0, confidences.shape[1]):\n+ probs = confidences[:, class_index]\n+ mask = probs > self.score_threshold\n+ probs = probs[mask]\n+ if probs.shape[0] == 0:\n+ continue\n+ subset_boxes = bboxes[mask, :]\n+ box_probs = np.concatenate(\n+ [subset_boxes, probs.reshape(-1, 1)], axis=1)\n+ box_probs = hard_nms(\n+ box_probs,\n+ iou_threshold=self.nms_threshold,\n+ top_k=self.keep_top_k, )\n+ picked_box_probs.append(box_probs)\n+ picked_labels.extend([class_index] * box_probs.shape[0])\n+\n+ if len(picked_box_probs) == 0:\n+ out_boxes_list.append(np.empty((0, 4)))\n+ out_boxes_num.append(0)\n+\n+ else:\n+ picked_box_probs = np.concatenate(picked_box_probs)\n+\n+ # resize output boxes\n+ picked_box_probs[:, :4] = self.warp_boxes(\n+ picked_box_probs[:, :4], self.ori_shape[batch_id])\n+ im_scale = np.concatenate([\n+ self.scale_factor[batch_id][::-1],\n+ self.scale_factor[batch_id][::-1]\n+ ])\n+ picked_box_probs[:, :4] /= im_scale\n+ # clas score box\n+ out_boxes_list.append(\n+ np.concatenate(\n+ [\n+ np.expand_dims(\n+ np.array(picked_labels),\n+ axis=-1), np.expand_dims(\n+ picked_box_probs[:, 4], axis=-1),\n+ picked_box_probs[:, :4]\n+ ],\n+ axis=1))\n+ out_boxes_num.append(len(picked_labels))\n+\n+ out_boxes_list = np.concatenate(out_boxes_list, axis=0)\n+ out_boxes_num = np.asarray(out_boxes_num).astype(np.int32)\n+ return out_boxes_list, out_boxes_num\n+\n+\n+def detect(img_file, compiled_model, re_shape, class_label):\n+ output = compiled_model.infer_new_request({0: test_image})\n+ result_ie = list(output.values()) #[0]\n+\n+ test_im_shape = np.array([[re_shape, re_shape]]).astype('float32')\n+ test_scale_factor = np.array([[1, 1]]).astype('float32')\n+\n+ np_score_list = []\n+ np_boxes_list = []\n+\n+ num_outs = int(len(result_ie) / 2)\n+ for out_idx in range(num_outs):\n+ np_score_list.append(result_ie[out_idx])\n+ np_boxes_list.append(result_ie[out_idx + num_outs])\n+\n+ postprocess = PicoDetPostProcess(test_image.shape[2:], test_im_shape,\n+ test_scale_factor)\n+\n+ np_boxes, np_boxes_num = postprocess(np_score_list, np_boxes_list)\n+\n+ image = cv2.imread(img_file, 1)\n+ scale_x = image.shape[1] / test_image.shape[3]\n+ scale_y = image.shape[0] / test_image.shape[2]\n+ res_image = draw_box(image, np_boxes, class_label, scale_x, scale_y)\n+\n+ cv2.imwrite('res.jpg', res_image)\n+ cv2.imshow(\"res\", res_image)\n+ cv2.waitKey()\n+\n+\n+def benchmark(test_image, compiled_model):\n# benchmark\nloop_num = 100\n@@ -71,21 +333,33 @@ def benchmark(img_file, onnx_file, re_shape):\nif __name__ == '__main__':\n- onnx_path = \"out_onnx\"\n- onnx_file = onnx_path + \"/picodet_s_320_coco.onnx\"\n-\nparser = argparse.ArgumentParser()\n+ parser.add_argument(\n+ '--benchmark', type=int, default=1, help=\"0:detect; 1:benchmark\")\nparser.add_argument(\n'--img_path',\ntype=str,\n- default='demo/000000570688.jpg',\n+ default='demo/000000014439.jpg',\nhelp=\"image path\")\nparser.add_argument(\n'--onnx_path',\ntype=str,\n- default='out_onnxsim/picodet_xs_320_coco_lcnet.onnx',\n+ default='out_onnxsim/picodet_s_320_processed.onnx',\nhelp=\"onnx filepath\")\nparser.add_argument('--in_shape', type=int, default=320, help=\"input_size\")\n-\n+ parser.add_argument(\n+ '--class_label',\n+ type=str,\n+ default='coco_label.txt',\n+ help=\"class label file\")\nargs = parser.parse_args()\n- benchmark(args.img_path, args.onnx_path, args.in_shape)\n+\n+ ie = Core()\n+ net = ie.read_model(args.onnx_path)\n+ test_image = image_preprocess(args.img_path, args.in_shape)\n+ compiled_model = ie.compile_model(net, 'CPU')\n+\n+ if args.benchmark == 0:\n+ detect(args.img_path, compiled_model, args.in_shape, args.class_label)\n+ if args.benchmark == 1:\n+ benchmark(test_image, compiled_model)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[pico] openvino demo (#5605)
499,333
07.04.2022 14:52:00
-28,800
2bce73cc6c27422a2fc4fcac7cce5fe6fe21034a
fix centernet deploy, test=develop
[ { "change_type": "MODIFY", "old_path": "configs/centernet/_base_/centernet_reader.yml", "new_path": "configs/centernet/_base_/centernet_reader.yml", "diff": "@@ -30,6 +30,6 @@ TestReader:\nsample_transforms:\n- Decode: {}\n- WarpAffine: {keep_res: True, input_h: 512, input_w: 512}\n- - NormalizeImage: {mean: [0.40789655, 0.44719303, 0.47026116], std: [0.2886383 , 0.27408165, 0.27809834]}\n+ - NormalizeImage: {mean: [0.40789655, 0.44719303, 0.47026116], std: [0.2886383 , 0.27408165, 0.27809834], is_scale: True}\n- Permute: {}\nbatch_size: 1\n" }, { "change_type": "MODIFY", "old_path": "deploy/cpp/include/preprocess_op.h", "new_path": "deploy/cpp/include/preprocess_op.h", "diff": "@@ -74,7 +74,7 @@ class NormalizeImage : public PreprocessOp {\n// CHW or HWC\nstd::vector<float> mean_;\nstd::vector<float> scale_;\n- bool is_scale_;\n+ bool is_scale_ = true;\n};\nclass Permute : public PreprocessOp {\n@@ -143,6 +143,24 @@ class TopDownEvalAffine : public PreprocessOp {\nstd::vector<int> trainsize_;\n};\n+class WarpAffine : public PreprocessOp {\n+ public:\n+ virtual void Init(const YAML::Node& item) {\n+ input_h_ = item[\"input_h\"].as<int>();\n+ input_w_ = item[\"input_w\"].as<int>();\n+ keep_res_ = item[\"keep_res\"].as<bool>();\n+ }\n+\n+ virtual void Run(cv::Mat* im, ImageBlob* data);\n+\n+ private:\n+ int input_h_;\n+ int input_w_;\n+ int interp_ = 1;\n+ bool keep_res_ = true;\n+ int pad_ = 31;\n+};\n+\nvoid CropImg(cv::Mat& img,\ncv::Mat& crop_img,\nstd::vector<int>& area,\n@@ -183,6 +201,8 @@ class Preprocessor {\nreturn std::make_shared<PadStride>();\n} else if (name == \"TopDownEvalAffine\") {\nreturn std::make_shared<TopDownEvalAffine>();\n+ } else if (name == \"WarpAffine\") {\n+ return std::make_shared<WarpAffine>();\n}\nstd::cerr << \"can not find function of OP: \" << name\n<< \" and return: nullptr\" << std::endl;\n" }, { "change_type": "MODIFY", "old_path": "deploy/cpp/src/preprocess_op.cc", "new_path": "deploy/cpp/src/preprocess_op.cc", "diff": "@@ -177,11 +177,64 @@ void TopDownEvalAffine::Run(cv::Mat* im, ImageBlob* data) {\n};\n}\n+void GetAffineTrans(const cv::Point2f center,\n+ const cv::Point2f input_size,\n+ const cv::Point2f output_size,\n+ cv::Mat* trans) {\n+ cv::Point2f srcTri[3];\n+ cv::Point2f dstTri[3];\n+ float src_w = input_size.x;\n+ float dst_w = output_size.x;\n+ float dst_h = output_size.y;\n+\n+ cv::Point2f src_dir(0, -0.5 * src_w);\n+ cv::Point2f dst_dir(0, -0.5 * dst_w);\n+\n+ srcTri[0] = center;\n+ srcTri[1] = center + src_dir;\n+ cv::Point2f src_d = srcTri[0] - srcTri[1];\n+ srcTri[2] = srcTri[1] + cv::Point2f(-src_d.y, src_d.x);\n+\n+ dstTri[0] = cv::Point2f(dst_w * 0.5, dst_h * 0.5);\n+ dstTri[1] = cv::Point2f(dst_w * 0.5, dst_h * 0.5) + dst_dir;\n+ cv::Point2f dst_d = dstTri[0] - dstTri[1];\n+ dstTri[2] = dstTri[1] + cv::Point2f(-dst_d.y, dst_d.x);\n+\n+ *trans = cv::getAffineTransform(srcTri, dstTri);\n+}\n+\n+void WarpAffine::Run(cv::Mat* im, ImageBlob* data) {\n+ cv::cvtColor(*im, *im, cv::COLOR_RGB2BGR);\n+ cv::Mat trans(2, 3, CV_32FC1);\n+ cv::Point2f center;\n+ cv::Point2f input_size;\n+ int h = im->rows;\n+ int w = im->cols;\n+ if (keep_res_) {\n+ input_h_ = (h | pad_) + 1;\n+ input_w_ = (w + pad_) + 1;\n+ input_size = cv::Point2f(input_w_, input_h_);\n+ center = cv::Point2f(w / 2, h / 2);\n+ } else {\n+ float s = std::max(h, w) * 1.0;\n+ input_size = cv::Point2f(s, s);\n+ center = cv::Point2f(w / 2., h / 2.);\n+ }\n+ cv::Point2f output_size(input_w_, input_h_);\n+\n+ GetAffineTrans(center, input_size, output_size, &trans);\n+ cv::warpAffine(*im, *im, trans, cv::Size(input_w_, input_h_));\n+ data->in_net_shape_ = {\n+ static_cast<float>(input_h_), static_cast<float>(input_w_),\n+ };\n+}\n+\n// Preprocessor op running order\nconst std::vector<std::string> Preprocessor::RUN_ORDER = {\"InitInfo\",\n\"TopDownEvalAffine\",\n\"Resize\",\n\"LetterBoxResize\",\n+ \"WarpAffine\",\n\"NormalizeImage\",\n\"PadStride\",\n\"Permute\"};\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/post_process.py", "new_path": "ppdet/modeling/post_process.py", "diff": "@@ -524,7 +524,7 @@ class CenterNetPostProcess(TTFBox):\nx2 = xs + wh[:, 0:1] / 2\ny2 = ys + wh[:, 1:2] / 2\n- n, c, feat_h, feat_w = hm.shape[:]\n+ n, c, feat_h, feat_w = paddle.shape(hm)\npadw = (feat_w * self.down_ratio - im_shape[0, 1]) / 2\npadh = (feat_h * self.down_ratio - im_shape[0, 0]) / 2\nx1 = x1 * self.down_ratio\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix centernet deploy, test=develop (#5616)
499,304
07.04.2022 18:48:00
-28,800
573d7314da78d6e279c4c28921bdc36da3dbed91
add picodet quant model
[ { "change_type": "ADD", "old_path": null, "new_path": "configs/slim/quant/picodet_s_416_lcnet_quant.yml", "diff": "+pretrain_weights: https://paddledet.bj.bcebos.com/models/picodet_s_416_coco_lcnet.pdparams\n+slim: QAT\n+\n+QAT:\n+ quant_config: {\n+ 'activation_preprocess_type': 'PACT',\n+ 'weight_quantize_type': 'channel_wise_abs_max', 'activation_quantize_type': 'moving_average_abs_max',\n+ 'weight_bits': 8, 'activation_bits': 8, 'dtype': 'int8', 'window_size': 10000, 'moving_rate': 0.9,\n+ 'quantizable_layer_type': ['Conv2D', 'Linear']}\n+ print_model: False\n+\n+TrainReader:\n+ batch_size: 48\n+\n+LearningRate:\n+ base_lr: 0.024\n+ schedulers:\n+ - !CosineDecay\n+ max_epochs: 300\n+ - !LinearWarmup\n+ start_factor: 0.1\n+ steps: 300\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add picodet quant model (#5617)
499,339
08.04.2022 17:40:40
-28,800
dfba7a5d81d240aed03be5c04afcff421d516276
[PPYOLOE] fix doc, test=document_fix
[ { "change_type": "MODIFY", "old_path": "configs/ppyoloe/README.md", "new_path": "configs/ppyoloe/README.md", "diff": "@@ -99,13 +99,13 @@ python tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml -o w\n```bash\n# inference single image\n-CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyolo_r50vd_dcn_1x_coco --image_file=demo/000000014439_640x640.jpg --device=gpu\n+CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_crn_l_300e_coco --image_file=demo/000000014439_640x640.jpg --device=gpu\n# inference all images in the directory\n-CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyolo_r50vd_dcn_1x_coco --image_dir=demo/ --device=gpu\n+CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_crn_l_300e_coco --image_dir=demo/ --device=gpu\n# benchmark\n-CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyolo_r50vd_dcn_1x_coco --image_file=demo/000000014439_640x640.jpg --device=gpu --run_benchmark=True\n+CUDA_VISIBLE_DEVICES=0 python deploy/python/infer.py --model_dir=output_inference/ppyoloe_crn_l_300e_coco --image_file=demo/000000014439_640x640.jpg --device=gpu --run_benchmark=True\n```\nIf you want to export PP-YOLOE model to **ONNX format**, use following command refer to [PaddleDetection Model Export as ONNX Format Tutorial](../../deploy/EXPORT_ONNX_MODEL_en.md).\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[PPYOLOE] fix doc, test=document_fix (#5633)
499,339
08.04.2022 20:10:49
-28,800
7fbea77a791a9528cb3e6d709c587e395a8de9b4
[TIPC] fix log path in shell
[ { "change_type": "MODIFY", "old_path": "test_tipc/test_train_inference_python.sh", "new_path": "test_tipc/test_train_inference_python.sh", "diff": "@@ -92,7 +92,7 @@ benchmark_value=$(func_parser_value \"${lines[49]}\")\ninfer_key1=$(func_parser_key \"${lines[50]}\")\ninfer_value1=$(func_parser_value \"${lines[50]}\")\n-LOG_PATH=\"./test_tipc/output\"\n+LOG_PATH=\"./test_tipc/output/${model_name}\"\nmkdir -p ${LOG_PATH}\nstatus_log=\"${LOG_PATH}/results_python.log\"\n@@ -305,7 +305,7 @@ else\nstatus_check $? \"${export_cmd}\" \"${status_log}\"\n#run inference\n- save_export_model_dir=\"${save_export_value}/${model_name}\"\n+ save_export_model_dir=\"${save_log}/${model_name}\"\neval $env\nfunc_inference \"${python}\" \"${inference_py}\" \"${save_export_model_dir}\" \"${LOG_PATH}\" \"${train_infer_img_dir}\" \"${flag_quant}\"\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[TIPC] fix log path in shell (#5638)
499,319
09.04.2022 10:12:59
-28,800
ce2a78dd89c8fde1cab1d2d91115a1cca4d5bad2
[pico] add onnxruntime demo
[ { "change_type": "MODIFY", "old_path": "configs/picodet/README_en.md", "new_path": "configs/picodet/README_en.md", "diff": "@@ -204,15 +204,16 @@ paddle2onnx --model_dir output_inference/picodet_s_320_coco_lcnet/ \\\n| Model | Input size | ONNX(w/o postprocess) | Paddle Lite(fp32) | Paddle Lite(fp16) |\n| :-------- | :--------: | :---------------------: | :----------------: | :----------------: |\n-| PicoDet-XS | 320*320 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_xs_320_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_416.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_416_fp16.tar) |\n-| PicoDet-XS | 416*416 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_xs_416_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_416.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_416_fp16.tar) |\n-| PicoDet-S | 320*320 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_s_320_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_320.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_320_fp16.tar) |\n-| PicoDet-S | 416*416 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_s_416_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_416.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_416_fp16.tar) |\n-| PicoDet-M | 320*320 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_m_320_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_m_320.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_m_320_fp16.tar) |\n-| PicoDet-M | 416*416 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_m_416_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_m_416.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_m_416_fp16.tar) |\n-| PicoDet-L | 320*320 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_l_320_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_320.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_320_fp16.tar) |\n-| PicoDet-L | 416*416 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_l_416_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_416.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_416_fp16.tar) |\n-| PicoDet-L | 640*640 | [model](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_l_640_coco.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_640.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_640_fp16.tar) |\n+| PicoDet-XS | 320*320 | [( w/ postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_xs_320_lcnet_postprocessed.onnx) &#124; [( w/o postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_xs_320_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_xs_320.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_xs_320_fp16.tar) |\n+| PicoDet-XS | 416*416 | [( w/ postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_xs_416_lcnet_postprocessed.onnx) &#124; [( w/o postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_xs_416_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_xs_416.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_xs_416_fp16.tar) |\n+| PicoDet-S | 320*320 | [( w/ postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_s_320_lcnet_postprocessed.onnx) &#124; [( w/o postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_s_320_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_320.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_320_fp16.tar) |\n+| PicoDet-S | 416*416 | [( w/ postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_s_416_lcnet_postprocessed.onnx) &#124; [( w/o postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_s_416_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_416.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_s_416_fp16.tar) |\n+| PicoDet-M | 320*320 | [( w/ postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_m_320_lcnet_postprocessed.onnx) &#124; [( w/o postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_m_320_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_m_320.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_m_320_fp16.tar) |\n+| PicoDet-M | 416*416 | [( w/ postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_m_416_lcnet_postprocessed.onnx) &#124; [( w/o postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_m_416_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_m_416.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_m_416_fp16.tar) |\n+| PicoDet-L | 320*320 | [( w/ postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_l_320_lcnet_postprocessed.onnx) &#124; [( w/o postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_l_320_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_320.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_320_fp16.tar) |\n+| PicoDet-L | 416*416 | [( w/ postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_l_416_lcnet_postprocessed.onnx) &#124; [( w/o postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_l_416_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_416.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_416_fp16.tar) |\n+| PicoDet-L | 640*640 | [( w/ postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_l_640_lcnet_postprocessed.onnx) &#124; [( w/o postprocess)](https://paddledet.bj.bcebos.com/deploy/third_engine/picodet_l_640_coco_lcnet.onnx) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_640.tar) | [model](https://paddledet.bj.bcebos.com/deploy/paddlelite/picodet_l_640_fp16.tar) |\n+\n### Deploy\n" }, { "change_type": "ADD", "old_path": null, "new_path": "deploy/third_engine/demo_onnxruntime/coco_label.txt", "diff": "+person\n+bicycle\n+car\n+motorbike\n+aeroplane\n+bus\n+train\n+truck\n+boat\n+traffic light\n+fire hydrant\n+stop sign\n+parking meter\n+bench\n+bird\n+cat\n+dog\n+horse\n+sheep\n+cow\n+elephant\n+bear\n+zebra\n+giraffe\n+backpack\n+umbrella\n+handbag\n+tie\n+suitcase\n+frisbee\n+skis\n+snowboard\n+sports ball\n+kite\n+baseball bat\n+baseball glove\n+skateboard\n+surfboard\n+tennis racket\n+bottle\n+wine glass\n+cup\n+fork\n+knife\n+spoon\n+bowl\n+banana\n+apple\n+sandwich\n+orange\n+broccoli\n+carrot\n+hot dog\n+pizza\n+donut\n+cake\n+chair\n+sofa\n+pottedplant\n+bed\n+diningtable\n+toilet\n+tvmonitor\n+laptop\n+mouse\n+remote\n+keyboard\n+cell phone\n+microwave\n+oven\n+toaster\n+sink\n+refrigerator\n+book\n+clock\n+vase\n+scissors\n+teddy bear\n+hair drier\n+toothbrush\n" }, { "change_type": "ADD", "old_path": "deploy/third_engine/demo_onnxruntime/imgs/bus.jpg", "new_path": "deploy/third_engine/demo_onnxruntime/imgs/bus.jpg", "diff": "Binary files /dev/null and b/deploy/third_engine/demo_onnxruntime/imgs/bus.jpg differ\n" }, { "change_type": "ADD", "old_path": "deploy/third_engine/demo_onnxruntime/imgs/dog.jpg", "new_path": "deploy/third_engine/demo_onnxruntime/imgs/dog.jpg", "diff": "Binary files /dev/null and b/deploy/third_engine/demo_onnxruntime/imgs/dog.jpg differ\n" }, { "change_type": "ADD", "old_path": null, "new_path": "deploy/third_engine/demo_onnxruntime/infer_demo.py", "diff": "+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import cv2\n+import numpy as np\n+import argparse\n+import onnxruntime as ort\n+from pathlib import Path\n+from tqdm import tqdm\n+\n+\n+class PicoDet():\n+ def __init__(self,\n+ model_pb_path,\n+ label_path,\n+ prob_threshold=0.4,\n+ iou_threshold=0.3):\n+ self.classes = list(\n+ map(lambda x: x.strip(), open(label_path, 'r').readlines()))\n+ self.num_classes = len(self.classes)\n+ self.prob_threshold = prob_threshold\n+ self.iou_threshold = iou_threshold\n+ self.mean = np.array(\n+ [103.53, 116.28, 123.675], dtype=np.float32).reshape(1, 1, 3)\n+ self.std = np.array(\n+ [57.375, 57.12, 58.395], dtype=np.float32).reshape(1, 1, 3)\n+ so = ort.SessionOptions()\n+ so.log_severity_level = 3\n+ self.net = ort.InferenceSession(model_pb_path, so)\n+ self.input_shape = (self.net.get_inputs()[0].shape[2],\n+ self.net.get_inputs()[0].shape[3])\n+\n+ def _normalize(self, img):\n+ img = img.astype(np.float32)\n+ img = (img / 255.0 - self.mean / 255.0) / (self.std / 255.0)\n+ return img\n+\n+ def resize_image(self, srcimg, keep_ratio=False):\n+ top, left, newh, neww = 0, 0, self.input_shape[0], self.input_shape[1]\n+ origin_shape = srcimg.shape[:2]\n+ im_scale_y = newh / float(origin_shape[0])\n+ im_scale_x = neww / float(origin_shape[1])\n+ scale_factor = np.array([[im_scale_y, im_scale_x]]).astype('float32')\n+\n+ if keep_ratio and srcimg.shape[0] != srcimg.shape[1]:\n+ hw_scale = srcimg.shape[0] / srcimg.shape[1]\n+ if hw_scale > 1:\n+ newh, neww = self.input_shape[0], int(self.input_shape[1] /\n+ hw_scale)\n+ img = cv2.resize(\n+ srcimg, (neww, newh), interpolation=cv2.INTER_AREA)\n+ left = int((self.input_shape[1] - neww) * 0.5)\n+ img = cv2.copyMakeBorder(\n+ img,\n+ 0,\n+ 0,\n+ left,\n+ self.input_shape[1] - neww - left,\n+ cv2.BORDER_CONSTANT,\n+ value=0) # add border\n+ else:\n+ newh, neww = int(self.input_shape[0] *\n+ hw_scale), self.input_shape[1]\n+ img = cv2.resize(\n+ srcimg, (neww, newh), interpolation=cv2.INTER_AREA)\n+ top = int((self.input_shape[0] - newh) * 0.5)\n+ img = cv2.copyMakeBorder(\n+ img,\n+ top,\n+ self.input_shape[0] - newh - top,\n+ 0,\n+ 0,\n+ cv2.BORDER_CONSTANT,\n+ value=0)\n+ else:\n+ img = cv2.resize(\n+ srcimg, self.input_shape, interpolation=cv2.INTER_AREA)\n+\n+ return img, scale_factor\n+\n+ def get_color_map_list(self, num_classes):\n+ color_map = num_classes * [0, 0, 0]\n+ for i in range(0, num_classes):\n+ j = 0\n+ lab = i\n+ while lab:\n+ color_map[i * 3] |= (((lab >> 0) & 1) << (7 - j))\n+ color_map[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j))\n+ color_map[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j))\n+ j += 1\n+ lab >>= 3\n+ color_map = [color_map[i:i + 3] for i in range(0, len(color_map), 3)]\n+ return color_map\n+\n+ def detect(self, srcimg):\n+ img, scale_factor = self.resize_image(srcimg)\n+ img = self._normalize(img)\n+\n+ blob = np.expand_dims(np.transpose(img, (2, 0, 1)), axis=0)\n+\n+ outs = self.net.run(None, {\n+ self.net.get_inputs()[0].name: blob,\n+ self.net.get_inputs()[1].name: scale_factor\n+ })\n+\n+ outs = np.array(outs[0])\n+ expect_boxes = (outs[:, 1] > 0.5) & (outs[:, 0] > -1)\n+ np_boxes = outs[expect_boxes, :]\n+\n+ color_list = self.get_color_map_list(self.num_classes)\n+ clsid2color = {}\n+\n+ for i in range(np_boxes.shape[0]):\n+ classid, conf = int(np_boxes[i, 0]), np_boxes[i, 1]\n+ xmin, ymin, xmax, ymax = int(np_boxes[i, 2]), int(np_boxes[\n+ i, 3]), int(np_boxes[i, 4]), int(np_boxes[i, 5])\n+\n+ if classid not in clsid2color:\n+ clsid2color[classid] = color_list[classid]\n+ color = tuple(clsid2color[classid])\n+\n+ cv2.rectangle(\n+ srcimg, (xmin, ymin), (xmax, ymax), color, thickness=2)\n+ print(self.classes[classid] + ': ' + str(round(conf, 3)))\n+ cv2.putText(\n+ srcimg,\n+ self.classes[classid] + ':' + str(round(conf, 3)), (xmin,\n+ ymin - 10),\n+ cv2.FONT_HERSHEY_SIMPLEX,\n+ 0.8, (0, 255, 0),\n+ thickness=2)\n+\n+ return srcimg\n+\n+ def detect_folder(self, img_fold, result_path):\n+ img_fold = Path(img_fold)\n+ result_path = Path(result_path)\n+ result_path.mkdir(parents=True, exist_ok=True)\n+\n+ img_name_list = filter(\n+ lambda x: str(x).endswith(\".png\") or str(x).endswith(\".jpg\"),\n+ img_fold.iterdir(), )\n+ img_name_list = list(img_name_list)\n+ print(f\"find {len(img_name_list)} images\")\n+\n+ for img_path in tqdm(img_name_list):\n+ img = cv2.imread(str(img_path))\n+\n+ srcimg = net.detect(img)\n+ save_path = str(result_path / img_path.name.replace(\".png\", \".jpg\"))\n+ cv2.imwrite(save_path, srcimg)\n+\n+\n+if __name__ == '__main__':\n+ parser = argparse.ArgumentParser()\n+ parser.add_argument(\n+ '--modelpath',\n+ type=str,\n+ default='onnx_file/picodet_s_320_lcnet_postprocessed.onnx',\n+ help=\"onnx filepath\")\n+ parser.add_argument(\n+ '--classfile',\n+ type=str,\n+ default='coco_label.txt',\n+ help=\"classname filepath\")\n+ parser.add_argument(\n+ '--confThreshold', default=0.5, type=float, help='class confidence')\n+ parser.add_argument(\n+ '--nmsThreshold', default=0.6, type=float, help='nms iou thresh')\n+ parser.add_argument(\n+ \"--img_fold\", dest=\"img_fold\", type=str, default=\"./imgs\")\n+ parser.add_argument(\n+ \"--result_fold\", dest=\"result_fold\", type=str, default=\"./results\")\n+ args = parser.parse_args()\n+\n+ net = PicoDet(\n+ args.modelpath,\n+ args.classfile,\n+ prob_threshold=args.confThreshold,\n+ iou_threshold=args.nmsThreshold)\n+\n+ net.detect_folder(args.img_fold, args.result_fold)\n" }, { "change_type": "ADD", "old_path": "docs/images/bus.jpg", "new_path": "docs/images/bus.jpg", "diff": "Binary files /dev/null and b/docs/images/bus.jpg differ\n" }, { "change_type": "ADD", "old_path": "docs/images/dog.jpg", "new_path": "docs/images/dog.jpg", "diff": "Binary files /dev/null and b/docs/images/dog.jpg differ\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[pico] add onnxruntime demo (#5627)
499,395
11.04.2022 19:52:58
-28,800
bc5d5245bf991be48b3e60e46301affdce33b21f
remove register_buffer
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/ppyoloe_head.py", "new_path": "ppdet/modeling/heads/ppyoloe_head.py", "diff": "@@ -132,8 +132,8 @@ class PPYOLOEHead(nn.Layer):\nif self.eval_size:\nanchor_points, stride_tensor = self._generate_anchors()\n- self.register_buffer('anchor_points', anchor_points)\n- self.register_buffer('stride_tensor', stride_tensor)\n+ self.anchor_points = anchor_points\n+ self.stride_tensor = stride_tensor\ndef forward_train(self, feats, targets):\nanchors, anchor_points, num_anchors_list, stride_tensor = \\\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
remove register_buffer (#5631)
499,314
12.04.2022 10:29:47
-28,800
df4a27c6db2e237c47c63b888905d2a1f4803fba
change core.ops to _C_ops
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/ops.py", "new_path": "ppdet/modeling/ops.py", "diff": "@@ -17,6 +17,7 @@ import paddle.nn.functional as F\nimport paddle.nn as nn\nfrom paddle import ParamAttr\nfrom paddle.regularizer import L2Decay\n+from paddle import _C_ops\nfrom paddle.fluid.framework import Variable, in_dygraph_mode\nfrom paddle.fluid import core\n@@ -174,7 +175,7 @@ def roi_pool(input,\npooled_height, pooled_width = output_size\nif in_dygraph_mode():\nassert rois_num is not None, \"rois_num should not be None in dygraph mode.\"\n- pool_out, argmaxes = core.ops.roi_pool(\n+ pool_out, argmaxes = _C_ops.roi_pool(\ninput, rois, rois_num, \"pooled_height\", pooled_height,\n\"pooled_width\", pooled_width, \"spatial_scale\", spatial_scale)\nreturn pool_out, argmaxes\n@@ -281,7 +282,7 @@ def roi_align(input,\nif in_dygraph_mode():\nassert rois_num is not None, \"rois_num should not be None in dygraph mode.\"\n- align_out = core.ops.roi_align(\n+ align_out = _C_ops.roi_align(\ninput, rois, rois_num, \"pooled_height\", pooled_height,\n\"pooled_width\", pooled_width, \"spatial_scale\", spatial_scale,\n\"sampling_ratio\", sampling_ratio, \"aligned\", aligned)\n@@ -364,7 +365,7 @@ def iou_similarity(x, y, box_normalized=True, name=None):\n\"\"\"\nif in_dygraph_mode():\n- out = core.ops.iou_similarity(x, y, 'box_normalized', box_normalized)\n+ out = _C_ops.iou_similarity(x, y, 'box_normalized', box_normalized)\nreturn out\nelse:\nhelper = LayerHelper(\"iou_similarity\", **locals())\n@@ -460,7 +461,7 @@ def collect_fpn_proposals(multi_rois,\nif in_dygraph_mode():\nassert rois_num_per_level is not None, \"rois_num_per_level should not be None in dygraph mode.\"\nattrs = ('post_nms_topN', post_nms_top_n)\n- output_rois, rois_num = core.ops.collect_fpn_proposals(\n+ output_rois, rois_num = _C_ops.collect_fpn_proposals(\ninput_rois, input_scores, rois_num_per_level, *attrs)\nreturn output_rois, rois_num\n@@ -575,7 +576,7 @@ def distribute_fpn_proposals(fpn_rois,\nattrs = ('min_level', min_level, 'max_level', max_level, 'refer_level',\nrefer_level, 'refer_scale', refer_scale, 'pixel_offset',\npixel_offset)\n- multi_rois, restore_ind, rois_num_per_level = core.ops.distribute_fpn_proposals(\n+ multi_rois, restore_ind, rois_num_per_level = _C_ops.distribute_fpn_proposals(\nfpn_rois, rois_num, num_lvl, num_lvl, *attrs)\nreturn multi_rois, restore_ind, rois_num_per_level\n@@ -729,7 +730,7 @@ def yolo_box(\nattrs = ('anchors', anchors, 'class_num', class_num, 'conf_thresh',\nconf_thresh, 'downsample_ratio', downsample_ratio, 'clip_bbox',\nclip_bbox, 'scale_x_y', scale_x_y)\n- boxes, scores = core.ops.yolo_box(x, origin_shape, *attrs)\n+ boxes, scores = _C_ops.yolo_box(x, origin_shape, *attrs)\nreturn boxes, scores\nelse:\nboxes = helper.create_variable_for_type_inference(dtype=x.dtype)\n@@ -867,7 +868,7 @@ def prior_box(input,\n'min_max_aspect_ratios_order', min_max_aspect_ratios_order)\nif cur_max_sizes is not None:\nattrs += ('max_sizes', cur_max_sizes)\n- box, var = core.ops.prior_box(input, image, *attrs)\n+ box, var = _C_ops.prior_box(input, image, *attrs)\nreturn box, var\nelse:\nattrs = {\n@@ -1010,7 +1011,7 @@ def multiclass_nms(bboxes,\nscore_threshold, 'nms_top_k', nms_top_k, 'nms_threshold',\nnms_threshold, 'keep_top_k', keep_top_k, 'nms_eta', nms_eta,\n'normalized', normalized)\n- output, index, nms_rois_num = core.ops.multiclass_nms3(bboxes, scores,\n+ output, index, nms_rois_num = _C_ops.multiclass_nms3(bboxes, scores,\nrois_num, *attrs)\nif not return_index:\nindex = None\n@@ -1152,7 +1153,7 @@ def matrix_nms(bboxes,\nnms_top_k, 'gaussian_sigma', gaussian_sigma, 'use_gaussian',\nuse_gaussian, 'keep_top_k', keep_top_k, 'normalized',\nnormalized)\n- out, index, rois_num = core.ops.matrix_nms(bboxes, scores, *attrs)\n+ out, index, rois_num = _C_ops.matrix_nms(bboxes, scores, *attrs)\nif not return_index:\nindex = None\nif not return_rois_num:\n@@ -1273,7 +1274,7 @@ def bipartite_match(dist_matrix,\n['float32', 'float64'], 'bipartite_match')\nif in_dygraph_mode():\n- match_indices, match_distance = core.ops.bipartite_match(\n+ match_indices, match_distance = _C_ops.bipartite_match(\ndist_matrix, \"match_type\", match_type, \"dist_threshold\",\ndist_threshold)\nreturn match_indices, match_distance\n@@ -1410,12 +1411,12 @@ def box_coder(prior_box,\nif in_dygraph_mode():\nif isinstance(prior_box_var, Variable):\n- output_box = core.ops.box_coder(\n+ output_box = _C_ops.box_coder(\nprior_box, prior_box_var, target_box, \"code_type\", code_type,\n\"box_normalized\", box_normalized, \"axis\", axis)\nelif isinstance(prior_box_var, list):\n- output_box = core.ops.box_coder(\n+ output_box = _C_ops.box_coder(\nprior_box, None, target_box, \"code_type\", code_type,\n\"box_normalized\", box_normalized, \"axis\", axis, \"variance\",\nprior_box_var)\n@@ -1538,7 +1539,7 @@ def generate_proposals(scores,\nattrs = ('pre_nms_topN', pre_nms_top_n, 'post_nms_topN', post_nms_top_n,\n'nms_thresh', nms_thresh, 'min_size', min_size, 'eta', eta,\n'pixel_offset', pixel_offset)\n- rpn_rois, rpn_roi_probs, rpn_rois_num = core.ops.generate_proposals_v2(\n+ rpn_rois, rpn_roi_probs, rpn_rois_num = _C_ops.generate_proposals_v2(\nscores, bbox_deltas, im_shape, anchors, variances, *attrs)\nif not return_rois_num:\nrpn_rois_num = None\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
change core.ops to _C_ops (#5663)
499,304
12.04.2022 11:58:14
-28,800
c612935d8d7431f3a730cf5e213159f6b20938d1
Simplify picodet postprocess
[ { "change_type": "MODIFY", "old_path": "configs/picodet/_base_/picodet_320_reader.yml", "new_path": "configs/picodet/_base_/picodet_320_reader.yml", "diff": "worker_num: 6\n+eval_height: &eval_height 320\n+eval_width: &eval_width 320\n+eval_size: &eval_size [*eval_height, *eval_width]\n+\nTrainReader:\nsample_transforms:\n- Decode: {}\n@@ -18,7 +22,7 @@ TrainReader:\nEvalReader:\nsample_transforms:\n- Decode: {}\n- - Resize: {interp: 2, target_size: [320, 320], keep_ratio: False}\n+ - Resize: {interp: 2, target_size: *eval_size, keep_ratio: False}\n- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n- Permute: {}\nbatch_transforms:\n@@ -29,13 +33,10 @@ EvalReader:\nTestReader:\ninputs_def:\n- image_shape: [1, 3, 320, 320]\n+ image_shape: [1, 3, *eval_height, *eval_width]\nsample_transforms:\n- Decode: {}\n- - Resize: {interp: 2, target_size: [320, 320], keep_ratio: False}\n+ - Resize: {interp: 2, target_size: *eval_size, keep_ratio: False}\n- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n- Permute: {}\n- batch_transforms:\n- - PadBatch: {pad_to_stride: 32}\nbatch_size: 1\n- shuffle: false\n" }, { "change_type": "MODIFY", "old_path": "configs/picodet/_base_/picodet_416_reader.yml", "new_path": "configs/picodet/_base_/picodet_416_reader.yml", "diff": "worker_num: 6\n+eval_height: &eval_height 416\n+eval_width: &eval_width 416\n+eval_size: &eval_size [*eval_height, *eval_width]\n+\nTrainReader:\nsample_transforms:\n- Decode: {}\n@@ -18,7 +22,7 @@ TrainReader:\nEvalReader:\nsample_transforms:\n- Decode: {}\n- - Resize: {interp: 2, target_size: [416, 416], keep_ratio: False}\n+ - Resize: {interp: 2, target_size: *eval_size, keep_ratio: False}\n- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n- Permute: {}\nbatch_transforms:\n@@ -29,13 +33,10 @@ EvalReader:\nTestReader:\ninputs_def:\n- image_shape: [1, 3, 416, 416]\n+ image_shape: [1, 3, *eval_height, *eval_width]\nsample_transforms:\n- Decode: {}\n- - Resize: {interp: 2, target_size: [416, 416], keep_ratio: False}\n+ - Resize: {interp: 2, target_size: *eval_size, keep_ratio: False}\n- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n- Permute: {}\n- batch_transforms:\n- - PadBatch: {pad_to_stride: 32}\nbatch_size: 1\n- shuffle: false\n" }, { "change_type": "MODIFY", "old_path": "configs/picodet/_base_/picodet_640_reader.yml", "new_path": "configs/picodet/_base_/picodet_640_reader.yml", "diff": "worker_num: 6\n+eval_height: &eval_height 640\n+eval_width: &eval_width 640\n+eval_size: &eval_size [*eval_height, *eval_width]\n+\nTrainReader:\nsample_transforms:\n- Decode: {}\n@@ -18,7 +22,7 @@ TrainReader:\nEvalReader:\nsample_transforms:\n- Decode: {}\n- - Resize: {interp: 2, target_size: [640, 640], keep_ratio: False}\n+ - Resize: {interp: 2, target_size: *eval_size, keep_ratio: False}\n- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n- Permute: {}\nbatch_transforms:\n@@ -29,13 +33,10 @@ EvalReader:\nTestReader:\ninputs_def:\n- image_shape: [1, 3, 640, 640]\n+ image_shape: [1, 3, *eval_height, *eval_width]\nsample_transforms:\n- Decode: {}\n- - Resize: {interp: 2, target_size: [640, 640], keep_ratio: False}\n+ - Resize: {interp: 2, target_size: *eval_size, keep_ratio: False}\n- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n- Permute: {}\n- batch_transforms:\n- - PadBatch: {pad_to_stride: 32}\nbatch_size: 1\n- shuffle: false\n" }, { "change_type": "MODIFY", "old_path": "configs/picodet/legacy_model/_base_/picodet_320_reader.yml", "new_path": "configs/picodet/legacy_model/_base_/picodet_320_reader.yml", "diff": "worker_num: 6\n+eval_height: &eval_height 320\n+eval_width: &eval_width 320\n+eval_size: &eval_size [*eval_height, *eval_width]\n+\nTrainReader:\nsample_transforms:\n- Decode: {}\n@@ -18,7 +22,7 @@ TrainReader:\nEvalReader:\nsample_transforms:\n- Decode: {}\n- - Resize: {interp: 2, target_size: [320, 320], keep_ratio: False}\n+ - Resize: {interp: 2, target_size: *eval_size, keep_ratio: False}\n- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n- Permute: {}\nbatch_transforms:\n@@ -29,13 +33,10 @@ EvalReader:\nTestReader:\ninputs_def:\n- image_shape: [1, 3, 320, 320]\n+ image_shape: [1, 3, *eval_height, *eval_width]\nsample_transforms:\n- Decode: {}\n- - Resize: {interp: 2, target_size: [320, 320], keep_ratio: False}\n+ - Resize: {interp: 2, target_size: *eval_size, keep_ratio: False}\n- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n- Permute: {}\n- batch_transforms:\n- - PadBatch: {pad_to_stride: 32}\nbatch_size: 1\n- shuffle: false\n" }, { "change_type": "MODIFY", "old_path": "configs/picodet/legacy_model/_base_/picodet_416_reader.yml", "new_path": "configs/picodet/legacy_model/_base_/picodet_416_reader.yml", "diff": "worker_num: 6\n+eval_height: &eval_height 416\n+eval_width: &eval_width 416\n+eval_size: &eval_size [*eval_height, *eval_width]\n+\nTrainReader:\nsample_transforms:\n- Decode: {}\n@@ -18,7 +22,7 @@ TrainReader:\nEvalReader:\nsample_transforms:\n- Decode: {}\n- - Resize: {interp: 2, target_size: [416, 416], keep_ratio: False}\n+ - Resize: {interp: 2, target_size: *eval_size, keep_ratio: False}\n- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n- Permute: {}\nbatch_transforms:\n@@ -29,13 +33,10 @@ EvalReader:\nTestReader:\ninputs_def:\n- image_shape: [1, 3, 416, 416]\n+ image_shape: [1, 3, *eval_height, *eval_width]\nsample_transforms:\n- Decode: {}\n- - Resize: {interp: 2, target_size: [416, 416], keep_ratio: False}\n+ - Resize: {interp: 2, target_size: *eval_size, keep_ratio: False}\n- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n- Permute: {}\n- batch_transforms:\n- - PadBatch: {pad_to_stride: 32}\nbatch_size: 1\n- shuffle: false\n" }, { "change_type": "MODIFY", "old_path": "configs/picodet/legacy_model/_base_/picodet_640_reader.yml", "new_path": "configs/picodet/legacy_model/_base_/picodet_640_reader.yml", "diff": "worker_num: 6\n+eval_height: &eval_height 640\n+eval_width: &eval_width 640\n+eval_size: &eval_size [*eval_height, *eval_width]\n+\nTrainReader:\nsample_transforms:\n- Decode: {}\n@@ -18,7 +22,7 @@ TrainReader:\nEvalReader:\nsample_transforms:\n- Decode: {}\n- - Resize: {interp: 2, target_size: [640, 640], keep_ratio: False}\n+ - Resize: {interp: 2, target_size: *eval_size, keep_ratio: False}\n- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n- Permute: {}\nbatch_transforms:\n@@ -29,13 +33,10 @@ EvalReader:\nTestReader:\ninputs_def:\n- image_shape: [1, 3, 640, 640]\n+ image_shape: [1, 3, *eval_height, *eval_width]\nsample_transforms:\n- Decode: {}\n- - Resize: {interp: 2, target_size: [640, 640], keep_ratio: False}\n+ - Resize: {interp: 2, target_size: *eval_size, keep_ratio: False}\n- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n- Permute: {}\n- batch_transforms:\n- - PadBatch: {pad_to_stride: 32}\nbatch_size: 1\n- shuffle: false\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/architectures/picodet.py", "new_path": "ppdet/modeling/architectures/picodet.py", "diff": "@@ -67,10 +67,9 @@ class PicoDet(BaseArch):\nif self.training or not self.export_post_process:\nreturn head_outs, None\nelse:\n- im_shape = self.inputs['im_shape']\nscale_factor = self.inputs['scale_factor']\nbboxes, bbox_num = self.head.post_process(\n- head_outs, im_shape, scale_factor, export_nms=self.export_nms)\n+ head_outs, scale_factor, export_nms=self.export_nms)\nreturn bboxes, bbox_num\ndef get_loss(self, ):\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/gfl_head.py", "new_path": "ppdet/modeling/heads/gfl_head.py", "diff": "@@ -79,7 +79,9 @@ class Integral(nn.Layer):\noffsets from the box center in four directions, shape (N, 4).\n\"\"\"\nx = F.softmax(x.reshape([-1, self.reg_max + 1]), axis=1)\n- x = F.linear(x, self.project).reshape([-1, 4])\n+ x = F.linear(x, self.project)\n+ if self.training:\n+ x = x.reshape([-1, 4])\nreturn x\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/pico_head.py", "new_path": "ppdet/modeling/heads/pico_head.py", "diff": "@@ -194,7 +194,7 @@ class PicoHead(OTAVFLHead):\n'conv_feat', 'dgqp_module', 'loss_class', 'loss_dfl', 'loss_bbox',\n'assigner', 'nms'\n]\n- __shared__ = ['num_classes']\n+ __shared__ = ['num_classes', 'eval_size']\ndef __init__(self,\nconv_feat='PicoFeat',\n@@ -210,7 +210,8 @@ class PicoHead(OTAVFLHead):\nfeat_in_chan=96,\nnms=None,\nnms_pre=1000,\n- cell_offset=0):\n+ cell_offset=0,\n+ eval_size=None):\nsuper(PicoHead, self).__init__(\nconv_feat=conv_feat,\ndgqp_module=dgqp_module,\n@@ -239,6 +240,7 @@ class PicoHead(OTAVFLHead):\nself.nms = nms\nself.nms_pre = nms_pre\nself.cell_offset = cell_offset\n+ self.eval_size = eval_size\nself.use_sigmoid = self.loss_vfl.use_sigmoid\nif self.use_sigmoid:\n@@ -282,12 +284,50 @@ class PicoHead(OTAVFLHead):\nbias_attr=ParamAttr(initializer=Constant(value=0))))\nself.head_reg_list.append(head_reg)\n+ # initialize the anchor points\n+ if self.eval_size:\n+ self.anchor_points, self.stride_tensor = self._generate_anchors()\n+\ndef forward(self, fpn_feats, export_post_process=True):\nassert len(fpn_feats) == len(\nself.fpn_stride\n), \"The size of fpn_feats is not equal to size of fpn_stride\"\n- cls_logits_list = []\n- bboxes_reg_list = []\n+\n+ if self.training:\n+ return self.forward_train(fpn_feats)\n+ else:\n+ return self.forward_eval(\n+ fpn_feats, export_post_process=export_post_process)\n+\n+ def forward_train(self, fpn_feats):\n+ cls_logits_list, bboxes_reg_list = [], []\n+ for i, fpn_feat in enumerate(fpn_feats):\n+ conv_cls_feat, conv_reg_feat = self.conv_feat(fpn_feat, i)\n+ if self.conv_feat.share_cls_reg:\n+ cls_logits = self.head_cls_list[i](conv_cls_feat)\n+ cls_score, bbox_pred = paddle.split(\n+ cls_logits,\n+ [self.cls_out_channels, 4 * (self.reg_max + 1)],\n+ axis=1)\n+ else:\n+ cls_score = self.head_cls_list[i](conv_cls_feat)\n+ bbox_pred = self.head_reg_list[i](conv_reg_feat)\n+\n+ if self.dgqp_module:\n+ quality_score = self.dgqp_module(bbox_pred)\n+ cls_score = F.sigmoid(cls_score) * quality_score\n+\n+ cls_logits_list.append(cls_score)\n+ bboxes_reg_list.append(bbox_pred)\n+\n+ return (cls_logits_list, bboxes_reg_list)\n+\n+ def forward_eval(self, fpn_feats, export_post_process=True):\n+ if self.eval_size:\n+ anchor_points, stride_tensor = self.anchor_points, self.stride_tensor\n+ else:\n+ anchor_points, stride_tensor = self._generate_anchors(fpn_feats)\n+ cls_logits_list, bboxes_reg_list = [], []\nfor i, fpn_feat in enumerate(fpn_feats):\nconv_cls_feat, conv_reg_feat = self.conv_feat(fpn_feat, i)\nif self.conv_feat.share_cls_reg:\n@@ -307,50 +347,68 @@ class PicoHead(OTAVFLHead):\nif not export_post_process:\n# Now only supports batch size = 1 in deploy\n# TODO(ygh): support batch size > 1\n- cls_score = F.sigmoid(cls_score).reshape(\n+ cls_score_out = F.sigmoid(cls_score).reshape(\n[1, self.cls_out_channels, -1]).transpose([0, 2, 1])\nbbox_pred = bbox_pred.reshape([1, (self.reg_max + 1) * 4,\n-1]).transpose([0, 2, 1])\n- elif not self.training:\n- cls_score = F.sigmoid(cls_score.transpose([0, 2, 3, 1]))\n+ else:\n+ b, _, h, w = fpn_feat.shape\n+ l = h * w\n+ cls_score_out = F.sigmoid(\n+ cls_score.reshape([b, self.cls_out_channels, l]))\nbbox_pred = bbox_pred.transpose([0, 2, 3, 1])\n- stride = self.fpn_stride[i]\n- b, cell_h, cell_w, _ = paddle.shape(cls_score)\n- y, x = self.get_single_level_center_point(\n- [cell_h, cell_w], stride, cell_offset=self.cell_offset)\n- center_points = paddle.stack([x, y], axis=-1)\n- cls_score = cls_score.reshape([b, -1, self.cls_out_channels])\n- bbox_pred = self.distribution_project(bbox_pred) * stride\n- bbox_pred = bbox_pred.reshape([b, cell_h * cell_w, 4])\n-\n- # NOTE: If keep_ratio=False and image shape value that\n- # multiples of 32, distance2bbox not set max_shapes parameter\n- # to speed up model prediction. If need to set max_shapes,\n- # please use inputs['im_shape'].\n- bbox_pred = batch_distance2bbox(\n- center_points, bbox_pred, max_shapes=None)\n+ bbox_pred = self.distribution_project(bbox_pred)\n+ bbox_pred = bbox_pred.reshape([b, l, 4])\n- cls_logits_list.append(cls_score)\n+ cls_logits_list.append(cls_score_out)\nbboxes_reg_list.append(bbox_pred)\n+ if export_post_process:\n+ cls_logits_list = paddle.concat(cls_logits_list, axis=-1)\n+ bboxes_reg_list = paddle.concat(bboxes_reg_list, axis=1)\n+ bboxes_reg_list = batch_distance2bbox(anchor_points,\n+ bboxes_reg_list)\n+ bboxes_reg_list *= stride_tensor\n+\nreturn (cls_logits_list, bboxes_reg_list)\n- def post_process(self,\n- gfl_head_outs,\n- im_shape,\n- scale_factor,\n- export_nms=True):\n- cls_scores, bboxes_reg = gfl_head_outs\n- bboxes = paddle.concat(bboxes_reg, axis=1)\n- mlvl_scores = paddle.concat(cls_scores, axis=1)\n- mlvl_scores = mlvl_scores.transpose([0, 2, 1])\n+ def _generate_anchors(self, feats=None):\n+ # just use in eval time\n+ anchor_points = []\n+ stride_tensor = []\n+ for i, stride in enumerate(self.fpn_stride):\n+ if feats is not None:\n+ _, _, h, w = feats[i].shape\n+ else:\n+ h = math.ceil(self.eval_size[0] / stride)\n+ w = math.ceil(self.eval_size[1] / stride)\n+ shift_x = paddle.arange(end=w) + self.cell_offset\n+ shift_y = paddle.arange(end=h) + self.cell_offset\n+ shift_y, shift_x = paddle.meshgrid(shift_y, shift_x)\n+ anchor_point = paddle.cast(\n+ paddle.stack(\n+ [shift_x, shift_y], axis=-1), dtype='float32')\n+ anchor_points.append(anchor_point.reshape([-1, 2]))\n+ stride_tensor.append(\n+ paddle.full(\n+ [h * w, 1], stride, dtype='float32'))\n+ anchor_points = paddle.concat(anchor_points)\n+ stride_tensor = paddle.concat(stride_tensor)\n+ return anchor_points, stride_tensor\n+\n+ def post_process(self, head_outs, scale_factor, export_nms=True):\n+ pred_scores, pred_bboxes = head_outs\nif not export_nms:\n- return bboxes, mlvl_scores\n+ return pred_bboxes, pred_scores\nelse:\n# rescale: [h_scale, w_scale] -> [w_scale, h_scale, w_scale, h_scale]\n- im_scale = scale_factor.flip([1]).tile([1, 2]).unsqueeze(1)\n- bboxes /= im_scale\n- bbox_pred, bbox_num, _ = self.nms(bboxes, mlvl_scores)\n+ scale_y, scale_x = paddle.split(scale_factor, 2, axis=-1)\n+ scale_factor = paddle.concat(\n+ [scale_x, scale_y, scale_x, scale_y],\n+ axis=-1).reshape([-1, 1, 4])\n+ # scale bbox to origin image size.\n+ pred_bboxes /= scale_factor\n+ bbox_pred, bbox_num, _ = self.nms(pred_bboxes, pred_scores)\nreturn bbox_pred, bbox_num\n@@ -374,10 +432,9 @@ class PicoHeadV2(GFLHead):\n'conv_feat', 'dgqp_module', 'loss_class', 'loss_dfl', 'loss_bbox',\n'static_assigner', 'assigner', 'nms'\n]\n- __shared__ = ['num_classes']\n+ __shared__ = ['num_classes', 'eval_size']\n- def __init__(\n- self,\n+ def __init__(self,\nconv_feat='PicoFeatV2',\ndgqp_module=None,\nnum_classes=80,\n@@ -396,7 +453,8 @@ class PicoHeadV2(GFLHead):\nnms_pre=1000,\ncell_offset=0,\nact='hard_swish',\n- grid_cell_scale=5.0, ):\n+ grid_cell_scale=5.0,\n+ eval_size=None):\nsuper(PicoHeadV2, self).__init__(\nconv_feat=conv_feat,\ndgqp_module=dgqp_module,\n@@ -432,6 +490,7 @@ class PicoHeadV2(GFLHead):\nself.grid_cell_scale = grid_cell_scale\nself.use_align_head = use_align_head\nself.cls_out_channels = self.num_classes\n+ self.eval_size = eval_size\nbias_init_value = -math.log((1 - self.prior_prob) / self.prior_prob)\n# Clear the super class initialization\n@@ -478,11 +537,22 @@ class PicoHeadV2(GFLHead):\nact=self.act,\nuse_act_in_out=False))\n+ # initialize the anchor points\n+ if self.eval_size:\n+ self.anchor_points, self.stride_tensor = self._generate_anchors()\n+\ndef forward(self, fpn_feats, export_post_process=True):\nassert len(fpn_feats) == len(\nself.fpn_stride\n), \"The size of fpn_feats is not equal to size of fpn_stride\"\n+ if self.training:\n+ return self.forward_train(fpn_feats)\n+ else:\n+ return self.forward_eval(\n+ fpn_feats, export_post_process=export_post_process)\n+\n+ def forward_train(self, fpn_feats):\ncls_score_list, reg_list, box_list = [], [], []\nfor i, (fpn_feat, stride) in enumerate(zip(fpn_feats, self.fpn_stride)):\nb, _, h, w = get_static_shape(fpn_feat)\n@@ -498,15 +568,6 @@ class PicoHeadV2(GFLHead):\nelse:\ncls_score = F.sigmoid(cls_logit)\n- if not export_post_process and not self.training:\n- # Now only supports batch size = 1 in deploy\n- cls_score_list.append(\n- cls_score.reshape([1, self.cls_out_channels, -1]).transpose(\n- [0, 2, 1]))\n- box_list.append(\n- reg_pred.reshape([1, (self.reg_max + 1) * 4, -1]).transpose(\n- [0, 2, 1]))\n- else:\ncls_score_out = cls_score.transpose([0, 2, 3, 1])\nbbox_pred = reg_pred.transpose([0, 2, 3, 1])\nb, cell_h, cell_w, _ = paddle.shape(cls_score_out)\n@@ -519,23 +580,60 @@ class PicoHeadV2(GFLHead):\nbbox_pred = bbox_pred.reshape([b, cell_h * cell_w, 4])\nbbox_pred = batch_distance2bbox(\ncenter_points, bbox_pred, max_shapes=None)\n- if not self.training:\n- cls_score_list.append(cls_score_out)\n- box_list.append(bbox_pred)\n- else:\n- cls_score_list.append(\n- cls_score.flatten(2).transpose([0, 2, 1]))\n+ cls_score_list.append(cls_score.flatten(2).transpose([0, 2, 1]))\nreg_list.append(reg_pred.flatten(2).transpose([0, 2, 1]))\nbox_list.append(bbox_pred / stride)\n- if not self.training:\n- return cls_score_list, box_list\n- else:\ncls_score_list = paddle.concat(cls_score_list, axis=1)\nbox_list = paddle.concat(box_list, axis=1)\nreg_list = paddle.concat(reg_list, axis=1)\nreturn cls_score_list, reg_list, box_list, fpn_feats\n+ def forward_eval(self, fpn_feats, export_post_process=True):\n+ if self.eval_size:\n+ anchor_points, stride_tensor = self.anchor_points, self.stride_tensor\n+ else:\n+ anchor_points, stride_tensor = self._generate_anchors(fpn_feats)\n+ cls_score_list, box_list = [], []\n+ for i, (fpn_feat, stride) in enumerate(zip(fpn_feats, self.fpn_stride)):\n+ b, _, h, w = fpn_feat.shape\n+ # task decomposition\n+ conv_cls_feat, se_feat = self.conv_feat(fpn_feat, i)\n+ cls_logit = self.head_cls_list[i](se_feat)\n+ reg_pred = self.head_reg_list[i](se_feat)\n+\n+ # cls prediction and alignment\n+ if self.use_align_head:\n+ cls_prob = F.sigmoid(self.cls_align[i](conv_cls_feat))\n+ cls_score = (F.sigmoid(cls_logit) * cls_prob + eps).sqrt()\n+ else:\n+ cls_score = F.sigmoid(cls_logit)\n+\n+ if not export_post_process:\n+ # Now only supports batch size = 1 in deploy\n+ cls_score_list.append(\n+ cls_score.reshape([1, self.cls_out_channels, -1]).transpose(\n+ [0, 2, 1]))\n+ box_list.append(\n+ reg_pred.reshape([1, (self.reg_max + 1) * 4, -1]).transpose(\n+ [0, 2, 1]))\n+ else:\n+ l = h * w\n+ cls_score_out = cls_score.reshape([b, self.cls_out_channels, l])\n+ bbox_pred = reg_pred.transpose([0, 2, 3, 1])\n+ bbox_pred = self.distribution_project(bbox_pred)\n+ bbox_pred = bbox_pred.reshape([b, l, 4])\n+ cls_score_list.append(cls_score_out)\n+ box_list.append(bbox_pred)\n+\n+ if export_post_process:\n+ cls_score_list = paddle.concat(cls_score_list, axis=-1)\n+ box_list = paddle.concat(box_list, axis=1)\n+ box_list = batch_distance2bbox(anchor_points, box_list)\n+ box_list *= stride_tensor\n+\n+ return cls_score_list, box_list\n+\ndef get_loss(self, head_outs, gt_meta):\npred_scores, pred_regs, pred_bboxes, fpn_feats = head_outs\ngt_labels = gt_meta['gt_class']\n@@ -644,20 +742,41 @@ class PicoHeadV2(GFLHead):\nreturn loss_states\n- def post_process(self,\n- gfl_head_outs,\n- im_shape,\n- scale_factor,\n- export_nms=True):\n- cls_scores, bboxes_reg = gfl_head_outs\n- bboxes = paddle.concat(bboxes_reg, axis=1)\n- mlvl_scores = paddle.concat(cls_scores, axis=1)\n- mlvl_scores = mlvl_scores.transpose([0, 2, 1])\n+ def _generate_anchors(self, feats=None):\n+ # just use in eval time\n+ anchor_points = []\n+ stride_tensor = []\n+ for i, stride in enumerate(self.fpn_stride):\n+ if feats is not None:\n+ _, _, h, w = feats[i].shape\n+ else:\n+ h = math.ceil(self.eval_size[0] / stride)\n+ w = math.ceil(self.eval_size[1] / stride)\n+ shift_x = paddle.arange(end=w) + self.cell_offset\n+ shift_y = paddle.arange(end=h) + self.cell_offset\n+ shift_y, shift_x = paddle.meshgrid(shift_y, shift_x)\n+ anchor_point = paddle.cast(\n+ paddle.stack(\n+ [shift_x, shift_y], axis=-1), dtype='float32')\n+ anchor_points.append(anchor_point.reshape([-1, 2]))\n+ stride_tensor.append(\n+ paddle.full(\n+ [h * w, 1], stride, dtype='float32'))\n+ anchor_points = paddle.concat(anchor_points)\n+ stride_tensor = paddle.concat(stride_tensor)\n+ return anchor_points, stride_tensor\n+\n+ def post_process(self, head_outs, scale_factor, export_nms=True):\n+ pred_scores, pred_bboxes = head_outs\nif not export_nms:\n- return bboxes, mlvl_scores\n+ return pred_bboxes, pred_scores\nelse:\n# rescale: [h_scale, w_scale] -> [w_scale, h_scale, w_scale, h_scale]\n- im_scale = scale_factor.flip([1]).tile([1, 2]).unsqueeze(1)\n- bboxes /= im_scale\n- bbox_pred, bbox_num, _ = self.nms(bboxes, mlvl_scores)\n+ scale_y, scale_x = paddle.split(scale_factor, 2, axis=-1)\n+ scale_factor = paddle.concat(\n+ [scale_x, scale_y, scale_x, scale_y],\n+ axis=-1).reshape([-1, 1, 4])\n+ # scale bbox to origin image size.\n+ pred_bboxes /= scale_factor\n+ bbox_pred, bbox_num, _ = self.nms(pred_bboxes, pred_scores)\nreturn bbox_pred, bbox_num\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
Simplify picodet postprocess (#5650)
499,339
12.04.2022 16:05:18
-28,800
0c4166af9e5a0f5adf1069d503cd0d666cad6174
fix assigner dtype error
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/assigners/atss_assigner.py", "new_path": "ppdet/modeling/assigners/atss_assigner.py", "diff": "@@ -124,7 +124,8 @@ class ATSSAssigner(nn.Layer):\n# negative batch\nif num_max_boxes == 0:\n- assigned_labels = paddle.full([batch_size, num_anchors], bg_index)\n+ assigned_labels = paddle.full(\n+ [batch_size, num_anchors], bg_index, dtype=gt_labels.dtype)\nassigned_bboxes = paddle.zeros([batch_size, num_anchors, 4])\nassigned_scores = paddle.zeros(\n[batch_size, num_anchors, self.num_classes])\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/assigners/task_aligned_assigner.py", "new_path": "ppdet/modeling/assigners/task_aligned_assigner.py", "diff": "@@ -85,7 +85,8 @@ class TaskAlignedAssigner(nn.Layer):\n# negative batch\nif num_max_boxes == 0:\n- assigned_labels = paddle.full([batch_size, num_anchors], bg_index)\n+ assigned_labels = paddle.full(\n+ [batch_size, num_anchors], bg_index, dtype=gt_labels.dtype)\nassigned_bboxes = paddle.zeros([batch_size, num_anchors, 4])\nassigned_scores = paddle.zeros(\n[batch_size, num_anchors, num_classes])\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix assigner dtype error (#5665)
499,299
13.04.2022 15:46:26
-28,800
8dfe9bc7d71923879720bfb6b3c201fbc99c8e93
fix visualize error in keypoint infer
[ { "change_type": "MODIFY", "old_path": "deploy/python/keypoint_infer.py", "new_path": "deploy/python/keypoint_infer.py", "diff": "@@ -267,8 +267,10 @@ class KeyPointDetector(Detector):\nprint('detect frame: %d' % (index))\nindex += 1\nresults = self.predict_image([frame], visual=False)\n+ im_results = {}\n+ im_results['keypoint'] = [results['keypoint'], results['score']]\nim = visualize_pose(\n- frame, results, visual_thresh=self.threshold, returnimg=True)\n+ frame, im_results, visual_thresh=self.threshold, returnimg=True)\nwriter.write(im)\nif camera_id != -1:\ncv2.imshow('Mask Detection', im)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix visualize error in keypoint infer (#5676)
499,319
14.04.2022 12:10:09
-28,800
cfc020341c3209534e542dbdf161d0099117e396
[pico] openvino infer
[ { "change_type": "MODIFY", "old_path": "deploy/third_engine/demo_openvino/python/openvino_benchmark.py", "new_path": "deploy/third_engine/demo_openvino/python/openvino_benchmark.py", "diff": "@@ -339,7 +339,7 @@ if __name__ == '__main__':\nparser.add_argument(\n'--img_path',\ntype=str,\n- default='demo/000000014439.jpg',\n+ default='../../../../demo/000000014439.jpg',\nhelp=\"image path\")\nparser.add_argument(\n'--onnx_path',\n" }, { "change_type": "ADD", "old_path": null, "new_path": "deploy/third_engine/demo_openvino/python/openvino_infer.py", "diff": "+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import cv2\n+import numpy as np\n+import argparse\n+from scipy.special import softmax\n+from openvino.runtime import Core\n+\n+\n+def image_preprocess(img_path, re_shape):\n+ img = cv2.imread(img_path)\n+ img = cv2.resize(\n+ img, (re_shape, re_shape), interpolation=cv2.INTER_LANCZOS4)\n+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n+ img = np.transpose(img, [2, 0, 1]) / 255\n+ img = np.expand_dims(img, 0)\n+ img_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))\n+ img_std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))\n+ img -= img_mean\n+ img /= img_std\n+ return img.astype(np.float32)\n+\n+\n+def get_color_map_list(num_classes):\n+ color_map = num_classes * [0, 0, 0]\n+ for i in range(0, num_classes):\n+ j = 0\n+ lab = i\n+ while lab:\n+ color_map[i * 3] |= (((lab >> 0) & 1) << (7 - j))\n+ color_map[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j))\n+ color_map[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j))\n+ j += 1\n+ lab >>= 3\n+ color_map = [color_map[i:i + 3] for i in range(0, len(color_map), 3)]\n+ return color_map\n+\n+\n+def draw_box(srcimg, results, class_label):\n+ label_list = list(\n+ map(lambda x: x.strip(), open(class_label, 'r').readlines()))\n+ for i in range(len(results)):\n+ color_list = get_color_map_list(len(label_list))\n+ clsid2color = {}\n+ classid, conf = int(results[i, 0]), results[i, 1]\n+ xmin, ymin, xmax, ymax = int(results[i, 2]), int(results[i, 3]), int(\n+ results[i, 4]), int(results[i, 5])\n+\n+ if classid not in clsid2color:\n+ clsid2color[classid] = color_list[classid]\n+ color = tuple(clsid2color[classid])\n+\n+ cv2.rectangle(srcimg, (xmin, ymin), (xmax, ymax), color, thickness=2)\n+ print(label_list[classid] + ': ' + str(round(conf, 3)))\n+ cv2.putText(\n+ srcimg,\n+ label_list[classid] + ':' + str(round(conf, 3)), (xmin, ymin - 10),\n+ cv2.FONT_HERSHEY_SIMPLEX,\n+ 0.8, (0, 255, 0),\n+ thickness=2)\n+ return srcimg\n+\n+\n+def hard_nms(box_scores, iou_threshold, top_k=-1, candidate_size=200):\n+ \"\"\"\n+ Args:\n+ box_scores (N, 5): boxes in corner-form and probabilities.\n+ iou_threshold: intersection over union threshold.\n+ top_k: keep top_k results. If k <= 0, keep all the results.\n+ candidate_size: only consider the candidates with the highest scores.\n+ Returns:\n+ picked: a list of indexes of the kept boxes\n+ \"\"\"\n+ scores = box_scores[:, -1]\n+ boxes = box_scores[:, :-1]\n+ picked = []\n+ indexes = np.argsort(scores)\n+ indexes = indexes[-candidate_size:]\n+ while len(indexes) > 0:\n+ current = indexes[-1]\n+ picked.append(current)\n+ if 0 < top_k == len(picked) or len(indexes) == 1:\n+ break\n+ current_box = boxes[current, :]\n+ indexes = indexes[:-1]\n+ rest_boxes = boxes[indexes, :]\n+ iou = iou_of(\n+ rest_boxes,\n+ np.expand_dims(\n+ current_box, axis=0), )\n+ indexes = indexes[iou <= iou_threshold]\n+\n+ return box_scores[picked, :]\n+\n+\n+def iou_of(boxes0, boxes1, eps=1e-5):\n+ \"\"\"Return intersection-over-union (Jaccard index) of boxes.\n+ Args:\n+ boxes0 (N, 4): ground truth boxes.\n+ boxes1 (N or 1, 4): predicted boxes.\n+ eps: a small number to avoid 0 as denominator.\n+ Returns:\n+ iou (N): IoU values.\n+ \"\"\"\n+ overlap_left_top = np.maximum(boxes0[..., :2], boxes1[..., :2])\n+ overlap_right_bottom = np.minimum(boxes0[..., 2:], boxes1[..., 2:])\n+\n+ overlap_area = area_of(overlap_left_top, overlap_right_bottom)\n+ area0 = area_of(boxes0[..., :2], boxes0[..., 2:])\n+ area1 = area_of(boxes1[..., :2], boxes1[..., 2:])\n+ return overlap_area / (area0 + area1 - overlap_area + eps)\n+\n+\n+def area_of(left_top, right_bottom):\n+ \"\"\"Compute the areas of rectangles given two corners.\n+ Args:\n+ left_top (N, 2): left top corner.\n+ right_bottom (N, 2): right bottom corner.\n+ Returns:\n+ area (N): return the area.\n+ \"\"\"\n+ hw = np.clip(right_bottom - left_top, 0.0, None)\n+ return hw[..., 0] * hw[..., 1]\n+\n+\n+class PicoDetNMS(object):\n+ \"\"\"\n+ Args:\n+ input_shape (int): network input image size\n+ scale_factor (float): scale factor of ori image\n+ \"\"\"\n+\n+ def __init__(self,\n+ input_shape,\n+ scale_x,\n+ scale_y,\n+ strides=[8, 16, 32, 64],\n+ score_threshold=0.4,\n+ nms_threshold=0.5,\n+ nms_top_k=1000,\n+ keep_top_k=100):\n+ self.input_shape = input_shape\n+ self.scale_x = scale_x\n+ self.scale_y = scale_y\n+ self.strides = strides\n+ self.score_threshold = score_threshold\n+ self.nms_threshold = nms_threshold\n+ self.nms_top_k = nms_top_k\n+ self.keep_top_k = keep_top_k\n+\n+ def __call__(self, decode_boxes, select_scores):\n+ batch_size = 1\n+ out_boxes_list = []\n+ for batch_id in range(batch_size):\n+ # nms\n+ bboxes = np.concatenate(decode_boxes, axis=0)\n+ confidences = np.concatenate(select_scores, axis=0)\n+ picked_box_probs = []\n+ picked_labels = []\n+ for class_index in range(0, confidences.shape[1]):\n+ probs = confidences[:, class_index]\n+ mask = probs > self.score_threshold\n+ probs = probs[mask]\n+ if probs.shape[0] == 0:\n+ continue\n+ subset_boxes = bboxes[mask, :]\n+ box_probs = np.concatenate(\n+ [subset_boxes, probs.reshape(-1, 1)], axis=1)\n+ box_probs = hard_nms(\n+ box_probs,\n+ iou_threshold=self.nms_threshold,\n+ top_k=self.keep_top_k, )\n+ picked_box_probs.append(box_probs)\n+ picked_labels.extend([class_index] * box_probs.shape[0])\n+\n+ if len(picked_box_probs) == 0:\n+ out_boxes_list.append(np.empty((0, 4)))\n+\n+ else:\n+ picked_box_probs = np.concatenate(picked_box_probs)\n+\n+ # resize output boxes\n+ picked_box_probs[:, 0] *= self.scale_x\n+ picked_box_probs[:, 2] *= self.scale_x\n+ picked_box_probs[:, 1] *= self.scale_y\n+ picked_box_probs[:, 3] *= self.scale_y\n+\n+ # clas score box\n+ out_boxes_list.append(\n+ np.concatenate(\n+ [\n+ np.expand_dims(\n+ np.array(picked_labels),\n+ axis=-1), np.expand_dims(\n+ picked_box_probs[:, 4], axis=-1),\n+ picked_box_probs[:, :4]\n+ ],\n+ axis=1))\n+\n+ out_boxes_list = np.concatenate(out_boxes_list, axis=0)\n+ return out_boxes_list\n+\n+\n+def detect(img_file, compiled_model, class_label):\n+ output = compiled_model.infer_new_request({0: test_image})\n+ result_ie = list(output.values())\n+\n+ decode_boxes = []\n+ select_scores = []\n+ num_outs = int(len(result_ie) / 2)\n+ for out_idx in range(num_outs):\n+ decode_boxes.append(result_ie[out_idx])\n+ select_scores.append(result_ie[out_idx + num_outs])\n+\n+ image = cv2.imread(img_file, 1)\n+ scale_x = image.shape[1] / test_image.shape[3]\n+ scale_y = image.shape[0] / test_image.shape[2]\n+\n+ nms = PicoDetNMS(test_image.shape[2:], scale_x, scale_y)\n+ np_boxes = nms(decode_boxes, select_scores)\n+\n+ res_image = draw_box(image, np_boxes, class_label)\n+\n+ cv2.imwrite('res.jpg', res_image)\n+ cv2.imshow(\"res\", res_image)\n+ cv2.waitKey()\n+\n+\n+if __name__ == '__main__':\n+\n+ parser = argparse.ArgumentParser()\n+ parser.add_argument(\n+ '--img_path',\n+ type=str,\n+ default='../../demo_onnxruntime/imgs/bus.jpg',\n+ help=\"image path\")\n+ parser.add_argument(\n+ '--onnx_path',\n+ type=str,\n+ default='out_onnxsim_infer/picodet_s_320_postproccesed_woNMS.onnx',\n+ help=\"onnx filepath\")\n+ parser.add_argument('--in_shape', type=int, default=320, help=\"input_size\")\n+ parser.add_argument(\n+ '--class_label',\n+ type=str,\n+ default='coco_label.txt',\n+ help=\"class label file\")\n+ args = parser.parse_args()\n+\n+ ie = Core()\n+ net = ie.read_model(args.onnx_path)\n+ test_image = image_preprocess(args.img_path, args.in_shape)\n+ compiled_model = ie.compile_model(net, 'CPU')\n+\n+ detect(args.img_path, compiled_model, args.class_label)\n" }, { "change_type": "ADD", "old_path": "docs/images/res.jpg", "new_path": "docs/images/res.jpg", "diff": "Binary files /dev/null and b/docs/images/res.jpg differ\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[pico] openvino infer (#5687)
499,304
14.04.2022 12:23:42
-28,800
12865d90fc22c4ea65921ea8a4607a1b454ed573
fix Mask-RCNN QAT bug
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/post_process.py", "new_path": "ppdet/modeling/post_process.py", "diff": "@@ -33,7 +33,7 @@ __all__ = [\n@register\n-class BBoxPostProcess(nn.Layer):\n+class BBoxPostProcess(object):\n__shared__ = ['num_classes', 'export_onnx']\n__inject__ = ['decode', 'nms']\n@@ -45,7 +45,7 @@ class BBoxPostProcess(nn.Layer):\nself.nms = nms\nself.export_onnx = export_onnx\n- def forward(self, head_out, rois, im_shape, scale_factor):\n+ def __call__(self, head_out, rois, im_shape, scale_factor):\n\"\"\"\nDecode the bbox and do NMS if needed.\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix Mask-RCNN QAT bug (#5689)
499,301
15.04.2022 13:44:00
-28,800
376c25c0955799cbbe10628cf0fc85010dabb41a
update readme, add yoloe
[ { "change_type": "MODIFY", "old_path": "README_en.md", "new_path": "README_en.md", "diff": "@@ -243,7 +243,7 @@ The relationship between COCO mAP and FPS on Tesla V100 of representative models\n- `PP-YOLO` achieves mAP of 45.9% on COCO and 72.9FPS on Tesla V100. Both precision and speed surpass [YOLOv4](https://arxiv.org/abs/2004.10934)\n- `PP-YOLO v2` is optimized version of `PP-YOLO` which has mAP of 49.5% and 68.9FPS on Tesla V100\n-\n+- `PP-YOLOE` is optimized version of `PP-YOLO v2` which has mAP of 51.4% and 78.1FPS on Tesla V100\n- All these models can be get in [Model Zoo](#ModelZoo)\nThe relationship between COCO mAP and FPS on Qualcomm Snapdragon 865 of representative mobile side models.\n" }, { "change_type": "MODIFY", "old_path": "docs/images/fps_map.png", "new_path": "docs/images/fps_map.png", "diff": "Binary files a/docs/images/fps_map.png and b/docs/images/fps_map.png differ\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
update readme, add yoloe (#5703)
499,339
20.04.2022 18:59:39
-28,800
c0cb9b02be7ff7bea26b17a17a5a359c4a22090a
[PPYOLOE] fix loss bug in negative batch
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/ppyoloe_head.py", "new_path": "ppdet/modeling/heads/ppyoloe_head.py", "diff": "@@ -293,7 +293,7 @@ class PPYOLOEHead(nn.Layer):\nelse:\nloss_l1 = paddle.zeros([1])\nloss_iou = paddle.zeros([1])\n- loss_dfl = paddle.zeros([1])\n+ loss_dfl = pred_dist.sum() * 0.\nreturn loss_l1, loss_iou, loss_dfl\ndef get_loss(self, head_outs, gt_meta):\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[PPYOLOE] fix loss bug in negative batch (#5774)
499,313
21.04.2022 13:57:03
-28,800
5652dca372899acfbacf46d9d8427bd1ce215250
fix build wheel script, run unittest separatly
[ { "change_type": "MODIFY", "old_path": "scripts/build_wheel.sh", "new_path": "scripts/build_wheel.sh", "diff": "@@ -91,9 +91,8 @@ function unittest() {\nif [ $? != 0 ]; then\nexit 1\nfi\n- find \"../ppdet\" -name 'tests' -type d -print0 | \\\n- xargs -0 -I{} -n1 bash -c \\\n- 'python -m unittest discover -v -s {}'\n+ find \"../ppdet\" -wholename '*tests/test_*' -type f -print0 | \\\n+ xargs -0 -I{} -n1 -t bash -c 'python -u -s {}'\n# clean TEST_DIR\ncd ..\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix build wheel script, run unittest separatly (#5776)
499,298
22.04.2022 10:40:35
-28,800
96959b21641544f2f334946e707cdf78738c03af
refine RetinaNet codes
[ { "change_type": "MODIFY", "old_path": "configs/retinanet/README.md", "new_path": "configs/retinanet/README.md", "diff": "-# Focal Loss for Dense Object Detection\n-\n-## Introduction\n-\n-We reproduce RetinaNet proposed in paper Focal Loss for Dense Object Detection.\n+# RetinaNet (Focal Loss for Dense Object Detection)\n## Model Zoo\n-| Backbone | Model | mstrain | imgs/GPU | lr schedule | FPS | Box AP | download | config |\n-| ------------ | --------- | ------- | -------- | ----------- | --- | ------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------- |\n-| ResNet50-FPN | RetinaNet | Yes | 4 | 1x | --- | 37.5 | [model](https://bj.bcebos.com/v1/paddledet/models/retinanet_r50_fpn_mstrain_1x_coco.pdparams)\\|[log](https://bj.bcebos.com/v1/paddledet/logs/retinanet_r50_fpn_mstrain_1x_coco.log) | retinanet_r50_fpn_mstrain_1x_coco.yml |\n-\n+| Backbone | Model | imgs/GPU | lr schedule | FPS | Box AP | download | config |\n+| ------------ | --------- | -------- | ----------- | --- | ------ | ---------- | ----------- |\n+| ResNet50-FPN | RetinaNet | 2 | 1x | --- | 37.5 | [model](https://bj.bcebos.com/v1/paddledet/models/retinanet_r50_fpn_1x_coco.pdparams) | [config](./retinanet_r50_fpn_1x_coco.yml) |\n**Notes:**\n-- All above models are trained on COCO train2017 with 4 GPUs and evaludated on val2017. Box AP=`mAP(IoU=0.5:0.95)`.\n+- All above models are trained on COCO train2017 with 8 GPUs and evaludated on val2017. Box AP=`mAP(IoU=0.5:0.95)`.\n-- Config `configs/retinanet/retinanet_r50_fpn_1x_coco.yml` is for 8 GPUs and `configs/retinanet/retinanet_r50_fpn_mstrain_1x_coco.yml` is for 4 GPUs (mind the difference of train batch size).\n## Citation\n" }, { "change_type": "MODIFY", "old_path": "configs/retinanet/_base_/retinanet_r50_fpn.yml", "new_path": "configs/retinanet/_base_/retinanet_r50_fpn.yml", "diff": "@@ -22,10 +22,6 @@ FPN:\nuse_c5: false\nRetinaHead:\n- num_classes: 80\n- prior_prob: 0.01\n- nms_pre: 1000\n- decode_reg_out: false\nconv_feat:\nname: RetinaFeat\nfeat_in: 256\n@@ -44,10 +40,6 @@ RetinaHead:\npositive_overlap: 0.5\nnegative_overlap: 0.4\nallow_low_quality: true\n- bbox_coder:\n- name: DeltaBBoxCoder\n- norm_mean: [0.0, 0.0, 0.0, 0.0]\n- norm_std: [1.0, 1.0, 1.0, 1.0]\nloss_class:\nname: FocalLoss\ngamma: 2.0\n" }, { "change_type": "MODIFY", "old_path": "configs/retinanet/_base_/retinanet_reader.yml", "new_path": "configs/retinanet/_base_/retinanet_reader.yml", "diff": "@@ -2,38 +2,35 @@ worker_num: 2\nTrainReader:\nsample_transforms:\n- Decode: {}\n- - RandomFlip: {prob: 0.5}\n- - Resize: {target_size: [800, 1333], keep_ratio: true, interp: 1}\n- - NormalizeImage: {is_scale: true, mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225]}\n+ - RandomResize: {target_size: [[640, 1333], [672, 1333], [704, 1333], [736, 1333], [768, 1333], [800, 1333]], keep_ratio: True, interp: 1}\n+ - RandomFlip: {}\n+ - NormalizeImage: {is_scale: True, mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225]}\n- Permute: {}\nbatch_transforms:\n- PadBatch: {pad_to_stride: 32}\nbatch_size: 2\n- shuffle: true\n- drop_last: true\n- use_process: true\n- collate_batch: false\n+ shuffle: True\n+ drop_last: True\n+ collate_batch: False\nEvalReader:\nsample_transforms:\n- Decode: {}\n- - Resize: {target_size: [800, 1333], keep_ratio: true, interp: 1}\n- - NormalizeImage: {is_scale: true, mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225]}\n+ - Resize: {target_size: [800, 1333], keep_ratio: True, interp: 1}\n+ - NormalizeImage: {is_scale: True, mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225]}\n- Permute: {}\nbatch_transforms:\n- PadBatch: {pad_to_stride: 32}\n- batch_size: 2\n- shuffle: false\n+ batch_size: 8\nTestReader:\nsample_transforms:\n- Decode: {}\n- - Resize: {target_size: [800, 1333], keep_ratio: true, interp: 1}\n- - NormalizeImage: {is_scale: true, mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225]}\n+ - Resize: {target_size: [800, 1333], keep_ratio: True, interp: 1}\n+ - NormalizeImage: {is_scale: True, mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225]}\n- Permute: {}\nbatch_transforms:\n- PadBatch: {pad_to_stride: 32}\nbatch_size: 1\n- shuffle: false\n" }, { "change_type": "MODIFY", "old_path": "configs/retinanet/retinanet_r50_fpn_1x_coco.yml", "new_path": "configs/retinanet/retinanet_r50_fpn_1x_coco.yml", "diff": "@@ -7,4 +7,3 @@ _BASE_: [\n]\nweights: output/retinanet_r50_fpn_1x_coco/model_final\n-find_unused_parameters: true\n\\ No newline at end of file\n" }, { "change_type": "DELETE", "old_path": "configs/retinanet/retinanet_r50_fpn_mstrain_1x_coco.yml", "new_path": null, "diff": "-_BASE_: [\n- '../datasets/coco_detection.yml',\n- '../runtime.yml',\n- '_base_/retinanet_r50_fpn.yml',\n- '_base_/optimizer_1x.yml',\n- '_base_/retinanet_reader.yml'\n-]\n-\n-worker_num: 4\n-TrainReader:\n- batch_size: 4\n- sample_transforms:\n- - Decode: {}\n- - RandomFlip: {prob: 0.5}\n- - RandomResize: {target_size: [[640, 1333], [672, 1333], [704, 1333], [736, 1333], [768, 1333], [800, 1333]], keep_ratio: true, interp: 1}\n- - NormalizeImage: {is_scale: true, mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225]}\n- - Permute: {}\n-\n-weights: output/retinanet_r50_fpn_mstrain_1x_coco/model_final\n-find_unused_parameters: true\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/__init__.py", "new_path": "ppdet/modeling/__init__.py", "diff": "@@ -29,7 +29,6 @@ from . import reid\nfrom . import mot\nfrom . import transformers\nfrom . import assigners\n-from . import coders\nfrom .ops import *\nfrom .backbones import *\n@@ -44,4 +43,3 @@ from .reid import *\nfrom .mot import *\nfrom .transformers import *\nfrom .assigners import *\n-from .coders import *\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/architectures/retinanet.py", "new_path": "ppdet/modeling/architectures/retinanet.py", "diff": "@@ -22,14 +22,12 @@ import paddle\n__all__ = ['RetinaNet']\n+\n@register\nclass RetinaNet(BaseArch):\n__category__ = 'architecture'\n- def __init__(self,\n- backbone,\n- neck,\n- head):\n+ def __init__(self, backbone, neck, head):\nsuper(RetinaNet, self).__init__()\nself.backbone = backbone\nself.neck = neck\n@@ -38,35 +36,33 @@ class RetinaNet(BaseArch):\n@classmethod\ndef from_config(cls, cfg, *args, **kwargs):\nbackbone = create(cfg['backbone'])\n+\nkwargs = {'input_shape': backbone.out_shape}\nneck = create(cfg['neck'], **kwargs)\n- head = create(cfg['head'])\n+\n+ kwargs = {'input_shape': neck.out_shape}\n+ head = create(cfg['head'], **kwargs)\n+\nreturn {\n'backbone': backbone,\n'neck': neck,\n- 'head': head}\n+ 'head': head,\n+ }\ndef _forward(self):\nbody_feats = self.backbone(self.inputs)\nneck_feats = self.neck(body_feats)\n+\n+ if self.training:\n+ return self.head(neck_feats, self.inputs)\n+ else:\nhead_outs = self.head(neck_feats)\n- if not self.training:\n- im_shape = self.inputs['im_shape']\n- scale_factor = self.inputs['scale_factor']\n- bboxes, bbox_num = self.head.post_process(head_outs, im_shape, scale_factor)\n- return bboxes, bbox_num\n- return head_outs\n+ bbox, bbox_num = self.head.post_process(\n+ head_outs, self.inputs['im_shape'], self.inputs['scale_factor'])\n+ return {'bbox': bbox, 'bbox_num': bbox_num}\ndef get_loss(self):\n- loss = dict()\n- head_outs = self._forward()\n- loss_retina = self.head.get_loss(head_outs, self.inputs)\n- loss.update(loss_retina)\n- total_loss = paddle.add_n(list(loss.values()))\n- loss.update(loss=total_loss)\n- return loss\n+ return self._forward()\ndef get_pred(self):\n- bbox_pred, bbox_num = self._forward()\n- output = dict(bbox=bbox_pred, bbox_num=bbox_num)\n- return output\n+ return self._forward()\n" }, { "change_type": "DELETE", "old_path": "ppdet/modeling/coders/__init__.py", "new_path": null, "diff": "-from .delta_bbox_coder import DeltaBBoxCoder\n" }, { "change_type": "DELETE", "old_path": "ppdet/modeling/coders/delta_bbox_coder.py", "new_path": null, "diff": "-import paddle\n-import numpy as np\n-from ppdet.core.workspace import register\n-from ppdet.modeling.bbox_utils import delta2bbox_v2, bbox2delta_v2\n-\n-__all__ = ['DeltaBBoxCoder']\n-\n-\n-@register\n-class DeltaBBoxCoder:\n- \"\"\"Encode bboxes in terms of delta/offset of a reference bbox.\n- Args:\n- norm_mean (list[float]): the mean to normalize delta\n- norm_std (list[float]): the std to normalize delta\n- wh_ratio_clip (float): to clip delta wh of decoded bboxes\n- ctr_clip (float or None): whether to clip delta xy of decoded bboxes\n- \"\"\"\n- def __init__(self,\n- norm_mean=[0.0, 0.0, 0.0, 0.0],\n- norm_std=[1., 1., 1., 1.],\n- wh_ratio_clip=16/1000.0,\n- ctr_clip=None):\n- self.norm_mean = norm_mean\n- self.norm_std = norm_std\n- self.wh_ratio_clip = wh_ratio_clip\n- self.ctr_clip = ctr_clip\n-\n- def encode(self, bboxes, tar_bboxes):\n- return bbox2delta_v2(\n- bboxes, tar_bboxes, means=self.norm_mean, stds=self.norm_std)\n-\n- def decode(self, bboxes, deltas, max_shape=None):\n- return delta2bbox_v2(\n- bboxes,\n- deltas,\n- max_shape=max_shape,\n- wh_ratio_clip=self.wh_ratio_clip,\n- ctr_clip=self.ctr_clip,\n- means=self.norm_mean,\n- stds=self.norm_std)\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/retina_head.py", "new_path": "ppdet/modeling/heads/retina_head.py", "diff": "-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n@@ -16,17 +16,20 @@ from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n-import math, paddle\n+import math\n+import paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\nfrom paddle import ParamAttr\nfrom paddle.nn.initializer import Normal, Constant\n-from ppdet.modeling.proposal_generator import AnchorGenerator\n-from ppdet.core.workspace import register\n+from ppdet.modeling.bbox_utils import bbox2delta, delta2bbox\nfrom ppdet.modeling.heads.fcos_head import FCOSFeat\n+from ppdet.core.workspace import register\n+\n__all__ = ['RetinaHead']\n+\n@register\nclass RetinaFeat(FCOSFeat):\n\"\"\"We use FCOSFeat to construct conv layers in RetinaNet.\n@@ -34,72 +37,49 @@ class RetinaFeat(FCOSFeat):\n\"\"\"\npass\n-@register\n-class RetinaAnchorGenerator(AnchorGenerator):\n- def __init__(self,\n- octave_base_scale=4,\n- scales_per_octave=3,\n- aspect_ratios=[0.5, 1.0, 2.0],\n- strides=[8.0, 16.0, 32.0, 64.0, 128.0],\n- variance=[1.0, 1.0, 1.0, 1.0],\n- offset=0.0):\n- anchor_sizes = []\n- for s in strides:\n- anchor_sizes.append([\n- s * octave_base_scale * 2**(i/scales_per_octave) \\\n- for i in range(scales_per_octave)])\n- super(RetinaAnchorGenerator, self).__init__(\n- anchor_sizes=anchor_sizes,\n- aspect_ratios=aspect_ratios,\n- strides=strides,\n- variance=variance,\n- offset=offset)\n@register\nclass RetinaHead(nn.Layer):\n\"\"\"Used in RetinaNet proposed in paper https://arxiv.org/pdf/1708.02002.pdf\n\"\"\"\n+ __shared__ = ['num_classes']\n__inject__ = [\n- 'conv_feat', 'anchor_generator', 'bbox_assigner',\n- 'bbox_coder', 'loss_class', 'loss_bbox', 'nms']\n+ 'conv_feat', 'anchor_generator', 'bbox_assigner', 'loss_class',\n+ 'loss_bbox', 'nms'\n+ ]\n+\ndef __init__(self,\nnum_classes=80,\n+ conv_feat='RetinaFeat',\n+ anchor_generator='RetinaAnchorGenerator',\n+ bbox_assigner='MaxIoUAssigner',\n+ loss_class='FocalLoss',\n+ loss_bbox='SmoothL1Loss',\n+ nms='MultiClassNMS',\nprior_prob=0.01,\n- decode_reg_out=False,\n- conv_feat=None,\n- anchor_generator=None,\n- bbox_assigner=None,\n- bbox_coder=None,\n- loss_class=None,\n- loss_bbox=None,\nnms_pre=1000,\n- nms=None):\n+ weights=[1., 1., 1., 1.]):\nsuper(RetinaHead, self).__init__()\nself.num_classes = num_classes\n- self.prior_prob = prior_prob\n- # allow RetinaNet to use IoU based losses.\n- self.decode_reg_out = decode_reg_out\nself.conv_feat = conv_feat\nself.anchor_generator = anchor_generator\nself.bbox_assigner = bbox_assigner\n- self.bbox_coder = bbox_coder\nself.loss_class = loss_class\nself.loss_bbox = loss_bbox\n- self.nms_pre = nms_pre\nself.nms = nms\n- self.cls_out_channels = num_classes\n- self.init_layers()\n+ self.nms_pre = nms_pre\n+ self.weights = weights\n- def init_layers(self):\n- bias_init_value = -math.log((1 - self.prior_prob) / self.prior_prob)\n+ bias_init_value = -math.log((1 - prior_prob) / prior_prob)\nnum_anchors = self.anchor_generator.num_anchors\nself.retina_cls = nn.Conv2D(\nin_channels=self.conv_feat.feat_out,\n- out_channels=self.cls_out_channels * num_anchors,\n+ out_channels=self.num_classes * num_anchors,\nkernel_size=3,\nstride=1,\npadding=1,\n- weight_attr=ParamAttr(initializer=Normal(mean=0.0, std=0.01)),\n+ weight_attr=ParamAttr(initializer=Normal(\n+ mean=0.0, std=0.01)),\nbias_attr=ParamAttr(initializer=Constant(value=bias_init_value)))\nself.retina_reg = nn.Conv2D(\nin_channels=self.conv_feat.feat_out,\n@@ -107,10 +87,11 @@ class RetinaHead(nn.Layer):\nkernel_size=3,\nstride=1,\npadding=1,\n- weight_attr=ParamAttr(initializer=Normal(mean=0.0, std=0.01)),\n+ weight_attr=ParamAttr(initializer=Normal(\n+ mean=0.0, std=0.01)),\nbias_attr=ParamAttr(initializer=Constant(value=0)))\n- def forward(self, neck_feats):\n+ def forward(self, neck_feats, targets=None):\ncls_logits_list = []\nbboxes_reg_list = []\nfor neck_feat in neck_feats:\n@@ -119,33 +100,40 @@ class RetinaHead(nn.Layer):\nbbox_reg = self.retina_reg(conv_reg_feat)\ncls_logits_list.append(cls_logits)\nbboxes_reg_list.append(bbox_reg)\n- return (cls_logits_list, bboxes_reg_list)\n- def get_loss(self, head_outputs, meta):\n+ if self.training:\n+ return self.get_loss([cls_logits_list, bboxes_reg_list], targets)\n+ else:\n+ return [cls_logits_list, bboxes_reg_list]\n+\n+ def get_loss(self, head_outputs, targets):\n\"\"\"Here we calculate loss for a batch of images.\nWe assign anchors to gts in each image and gather all the assigned\npostive and negative samples. Then loss is calculated on the gathered\nsamples.\n\"\"\"\n- cls_logits, bboxes_reg = head_outputs\n- # we use the same anchor for all images\n- anchors = self.anchor_generator(cls_logits)\n+ cls_logits_list, bboxes_reg_list = head_outputs\n+ anchors = self.anchor_generator(cls_logits_list)\nanchors = paddle.concat(anchors)\n# matches: contain gt_inds\n# match_labels: -1(ignore), 0(neg) or 1(pos)\nmatches_list, match_labels_list = [], []\n# assign anchors to gts, no sampling is involved\n- for gt_bbox in meta['gt_bbox']:\n+ for gt_bbox in targets['gt_bbox']:\nmatches, match_labels = self.bbox_assigner(anchors, gt_bbox)\nmatches_list.append(matches)\nmatch_labels_list.append(match_labels)\n+\n# reshape network outputs\n- cls_logits = [_.transpose([0, 2, 3, 1]) for _ in cls_logits]\n- cls_logits = [_.reshape([0, -1, self.cls_out_channels]) \\\n- for _ in cls_logits]\n- bboxes_reg = [_.transpose([0, 2, 3, 1]) for _ in bboxes_reg]\n- bboxes_reg = [_.reshape([0, -1, 4]) for _ in bboxes_reg]\n+ cls_logits = [\n+ _.transpose([0, 2, 3, 1]).reshape([0, -1, self.num_classes])\n+ for _ in cls_logits_list\n+ ]\n+ bboxes_reg = [\n+ _.transpose([0, 2, 3, 1]).reshape([0, -1, 4])\n+ for _ in bboxes_reg_list\n+ ]\ncls_logits = paddle.concat(cls_logits, axis=1)\nbboxes_reg = paddle.concat(bboxes_reg, axis=1)\n@@ -154,7 +142,7 @@ class RetinaHead(nn.Layer):\n# find and gather preds and targets in each image\nfor matches, match_labels, cls_logit, bbox_reg, gt_bbox, gt_class in \\\nzip(matches_list, match_labels_list, cls_logits, bboxes_reg,\n- meta['gt_bbox'], meta['gt_class']):\n+ targets['gt_bbox'], targets['gt_class']):\npos_mask = (match_labels == 1)\nneg_mask = (match_labels == 0)\nchosen_mask = paddle.logical_or(pos_mask, neg_mask)\n@@ -163,19 +151,16 @@ class RetinaHead(nn.Layer):\nbg_class = paddle.to_tensor(\n[self.num_classes], dtype=gt_class.dtype)\n# a trick to assign num_classes to negative targets\n- gt_class = paddle.concat([gt_class, bg_class])\n- matches = paddle.where(\n- neg_mask, paddle.full_like(matches, gt_class.size-1), matches)\n+ gt_class = paddle.concat([gt_class, bg_class], axis=-1)\n+ matches = paddle.where(neg_mask,\n+ paddle.full_like(matches, gt_class.size - 1),\n+ matches)\ncls_pred = cls_logit[chosen_mask]\ncls_tar = gt_class[matches[chosen_mask]]\nreg_pred = bbox_reg[pos_mask].reshape([-1, 4])\nreg_tar = gt_bbox[matches[pos_mask]].reshape([-1, 4])\n- if self.decode_reg_out:\n- reg_pred = self.bbox_coder.decode(\n- anchors[pos_mask], reg_pred)\n- else:\n- reg_tar = self.bbox_coder.encode(anchors[pos_mask], reg_tar)\n+ reg_tar = bbox2delta(anchors[pos_mask], reg_tar, self.weights)\ncls_pred_list.append(cls_pred)\ncls_tar_list.append(cls_tar)\nreg_pred_list.append(reg_pred)\n@@ -184,27 +169,38 @@ class RetinaHead(nn.Layer):\ncls_tar = paddle.concat(cls_tar_list)\nreg_pred = paddle.concat(reg_pred_list)\nreg_tar = paddle.concat(reg_tar_list)\n+\navg_factor = max(1.0, reg_pred.shape[0])\ncls_loss = self.loss_class(\ncls_pred, cls_tar, reduction='sum') / avg_factor\n- if reg_pred.size == 0:\n- reg_loss = bboxes_reg[0][0].sum() * 0\n+\n+ if reg_pred.shape[0] == 0:\n+ reg_loss = paddle.zeros([1])\n+ reg_loss.stop_gradient = False\nelse:\nreg_loss = self.loss_bbox(\nreg_pred, reg_tar, reduction='sum') / avg_factor\n- return dict(loss_cls=cls_loss, loss_reg=reg_loss)\n+\n+ loss = cls_loss + reg_loss\n+ out_dict = {\n+ 'loss_cls': cls_loss,\n+ 'loss_reg': reg_loss,\n+ 'loss': loss,\n+ }\n+ return out_dict\ndef get_bboxes_single(self,\nanchors,\n- cls_scores,\n- bbox_preds,\n+ cls_scores_list,\n+ bbox_preds_list,\nim_shape,\nscale_factor,\nrescale=True):\n- assert len(cls_scores) == len(bbox_preds)\n+ assert len(cls_scores_list) == len(bbox_preds_list)\nmlvl_bboxes = []\nmlvl_scores = []\n- for anchor, cls_score, bbox_pred in zip(anchors, cls_scores, bbox_preds):\n+ for anchor, cls_score, bbox_pred in zip(anchors, cls_scores_list,\n+ bbox_preds_list):\ncls_score = cls_score.reshape([-1, self.num_classes])\nbbox_pred = bbox_pred.reshape([-1, 4])\nif self.nms_pre is not None and cls_score.shape[0] > self.nms_pre:\n@@ -213,9 +209,7 @@ class RetinaHead(nn.Layer):\nbbox_pred = bbox_pred.gather(topk_inds)\nanchor = anchor.gather(topk_inds)\ncls_score = cls_score.gather(topk_inds)\n- bbox_pred = self.bbox_coder.decode(\n- anchor, bbox_pred, max_shape=im_shape)\n- bbox_pred = bbox_pred.squeeze()\n+ bbox_pred = delta2bbox(bbox_pred, anchor, self.weights).squeeze()\nmlvl_bboxes.append(bbox_pred)\nmlvl_scores.append(F.sigmoid(cls_score))\nmlvl_bboxes = paddle.concat(mlvl_bboxes)\n@@ -227,18 +221,15 @@ class RetinaHead(nn.Layer):\nmlvl_scores = mlvl_scores.transpose([1, 0])\nreturn mlvl_bboxes, mlvl_scores\n- def decode(self, anchors, cls_scores, bbox_preds, im_shape, scale_factor):\n+ def decode(self, anchors, cls_logits, bboxes_reg, im_shape, scale_factor):\nbatch_bboxes = []\nbatch_scores = []\n- for img_id in range(cls_scores[0].shape[0]):\n- num_lvls = len(cls_scores)\n- cls_score_list = [cls_scores[i][img_id] for i in range(num_lvls)]\n- bbox_pred_list = [bbox_preds[i][img_id] for i in range(num_lvls)]\n+ for img_id in range(cls_logits[0].shape[0]):\n+ num_lvls = len(cls_logits)\n+ cls_scores_list = [cls_logits[i][img_id] for i in range(num_lvls)]\n+ bbox_preds_list = [bboxes_reg[i][img_id] for i in range(num_lvls)]\nbboxes, scores = self.get_bboxes_single(\n- anchors,\n- cls_score_list,\n- bbox_pred_list,\n- im_shape[img_id],\n+ anchors, cls_scores_list, bbox_preds_list, im_shape[img_id],\nscale_factor[img_id])\nbatch_bboxes.append(bboxes)\nbatch_scores.append(scores)\n@@ -247,11 +238,12 @@ class RetinaHead(nn.Layer):\nreturn batch_bboxes, batch_scores\ndef post_process(self, head_outputs, im_shape, scale_factor):\n- cls_scores, bbox_preds = head_outputs\n- anchors = self.anchor_generator(cls_scores)\n- cls_scores = [_.transpose([0, 2, 3, 1]) for _ in cls_scores]\n- bbox_preds = [_.transpose([0, 2, 3, 1]) for _ in bbox_preds]\n- bboxes, scores = self.decode(\n- anchors, cls_scores, bbox_preds, im_shape, scale_factor)\n+ cls_logits_list, bboxes_reg_list = head_outputs\n+ anchors = self.anchor_generator(cls_logits_list)\n+ cls_logits = [_.transpose([0, 2, 3, 1]) for _ in cls_logits_list]\n+ bboxes_reg = [_.transpose([0, 2, 3, 1]) for _ in bboxes_reg_list]\n+ bboxes, scores = self.decode(anchors, cls_logits, bboxes_reg, im_shape,\n+ scale_factor)\n+\nbbox_pred, bbox_num, _ = self.nms(bboxes, scores)\nreturn bbox_pred, bbox_num\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/proposal_generator/anchor_generator.py", "new_path": "ppdet/modeling/proposal_generator/anchor_generator.py", "diff": "@@ -22,6 +22,8 @@ import paddle.nn as nn\nfrom ppdet.core.workspace import register\n+__all__ = ['AnchorGenerator', 'RetinaAnchorGenerator']\n+\n@register\nclass AnchorGenerator(nn.Layer):\n@@ -129,3 +131,25 @@ class AnchorGenerator(nn.Layer):\nFor FPN models, `num_anchors` on every feature map is the same.\n\"\"\"\nreturn len(self.cell_anchors[0])\n+\n+\n+@register\n+class RetinaAnchorGenerator(AnchorGenerator):\n+ def __init__(self,\n+ octave_base_scale=4,\n+ scales_per_octave=3,\n+ aspect_ratios=[0.5, 1.0, 2.0],\n+ strides=[8.0, 16.0, 32.0, 64.0, 128.0],\n+ variance=[1.0, 1.0, 1.0, 1.0],\n+ offset=0.0):\n+ anchor_sizes = []\n+ for s in strides:\n+ anchor_sizes.append([\n+ s * octave_base_scale * 2**(i/scales_per_octave) \\\n+ for i in range(scales_per_octave)])\n+ super(RetinaAnchorGenerator, self).__init__(\n+ anchor_sizes=anchor_sizes,\n+ aspect_ratios=aspect_ratios,\n+ strides=strides,\n+ variance=variance,\n+ offset=offset)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
refine RetinaNet codes (#5796)
499,304
22.04.2022 11:43:04
-28,800
ad0a436a12d45ec3cd13ae16bac79611908a872b
fix AugmentHSV output type
[ { "change_type": "MODIFY", "old_path": "ppdet/data/transform/operators.py", "new_path": "ppdet/data/transform/operators.py", "diff": "@@ -2202,7 +2202,7 @@ class AugmentHSV(BaseOperator):\nelse:\ncv2.cvtColor(img_hsv, cv2.COLOR_HSV2RGB, dst=img)\n- sample['image'] = img\n+ sample['image'] = img.astype(np.float32)\nreturn sample\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix AugmentHSV output type (#5800)
499,298
22.04.2022 15:23:31
-28,800
7c5736b2d46c940caf4b6a35c6abfa533bfb9ff6
fix yolox cpp infer
[ { "change_type": "MODIFY", "old_path": "configs/yolox/_base_/yolox_reader.yml", "new_path": "configs/yolox/_base_/yolox_reader.yml", "diff": "@@ -27,8 +27,8 @@ TrainReader:\nEvalReader:\nsample_transforms:\n- Decode: {}\n- - Resize: {target_size: 640, keep_ratio: True}\n- - Pad: {size: 640, fill_value: [114., 114., 114.]}\n+ - Resize: {target_size: [640, 640], keep_ratio: True, interp: 1}\n+ - Pad: {size: [640, 640], fill_value: [114., 114., 114.]}\n- Permute: {}\nbatch_size: 4\n@@ -38,7 +38,7 @@ TestReader:\nimage_shape: [3, 640, 640]\nsample_transforms:\n- Decode: {}\n- - Resize: {target_size: 640, keep_ratio: True}\n- - Pad: {size: 640, fill_value: [114., 114., 114.]}\n+ - Resize: {target_size: [640, 640], keep_ratio: True, interp: 1}\n+ - Pad: {size: [640, 640], fill_value: [114., 114., 114.]}\n- Permute: {}\nbatch_size: 1\n" }, { "change_type": "MODIFY", "old_path": "configs/yolox/yolox_nano_300e_coco.yml", "new_path": "configs/yolox/yolox_nano_300e_coco.yml", "diff": "@@ -64,8 +64,8 @@ TrainReader:\nEvalReader:\nsample_transforms:\n- Decode: {}\n- - Resize: {target_size: 416, keep_ratio: True}\n- - Pad: {size: 416, fill_value: [114., 114., 114.]}\n+ - Resize: {target_size: [416, 416], keep_ratio: True, interp: 1}\n+ - Pad: {size: [416, 416], fill_value: [114., 114., 114.]}\n- Permute: {}\nbatch_size: 8\n@@ -75,7 +75,7 @@ TestReader:\nimage_shape: [3, 416, 416]\nsample_transforms:\n- Decode: {}\n- - Resize: {target_size: 416, keep_ratio: True}\n- - Pad: {size: 416, fill_value: [114., 114., 114.]}\n+ - Resize: {target_size: [416, 416], keep_ratio: True, interp: 1}\n+ - Pad: {size: [416, 416], fill_value: [114., 114., 114.]}\n- Permute: {}\nbatch_size: 1\n" }, { "change_type": "MODIFY", "old_path": "configs/yolox/yolox_tiny_300e_coco.yml", "new_path": "configs/yolox/yolox_tiny_300e_coco.yml", "diff": "@@ -52,8 +52,8 @@ TrainReader:\nEvalReader:\nsample_transforms:\n- Decode: {}\n- - Resize: {target_size: 416, keep_ratio: True}\n- - Pad: {size: 416, fill_value: [114., 114., 114.]}\n+ - Resize: {target_size: [416, 416], keep_ratio: True, interp: 1}\n+ - Pad: {size: [416, 416], fill_value: [114., 114., 114.]}\n- Permute: {}\nbatch_size: 8\n@@ -63,7 +63,7 @@ TestReader:\nimage_shape: [3, 416, 416]\nsample_transforms:\n- Decode: {}\n- - Resize: {target_size: 416, keep_ratio: True}\n- - Pad: {size: 416, fill_value: [114., 114., 114.]}\n+ - Resize: {target_size: [416, 416], keep_ratio: True, interp: 1}\n+ - Pad: {size: [416, 416], fill_value: [114., 114., 114.]}\n- Permute: {}\nbatch_size: 1\n" }, { "change_type": "MODIFY", "old_path": "deploy/cpp/include/preprocess_op.h", "new_path": "deploy/cpp/include/preprocess_op.h", "diff": "@@ -161,6 +161,20 @@ class WarpAffine : public PreprocessOp {\nint pad_ = 31;\n};\n+class Pad : public PreprocessOp {\n+ public:\n+ virtual void Init(const YAML::Node& item) {\n+ size_ = item[\"size\"].as<std::vector<int>>();\n+ fill_value_ = item[\"fill_value\"].as<std::vector<float>>();\n+ }\n+\n+ virtual void Run(cv::Mat* im, ImageBlob* data);\n+\n+ private:\n+ std::vector<int> size_;\n+ std::vector<float> fill_value_;\n+};\n+\nvoid CropImg(cv::Mat& img,\ncv::Mat& crop_img,\nstd::vector<int>& area,\n@@ -203,6 +217,8 @@ class Preprocessor {\nreturn std::make_shared<TopDownEvalAffine>();\n} else if (name == \"WarpAffine\") {\nreturn std::make_shared<WarpAffine>();\n+ }else if (name == \"Pad\") {\n+ return std::make_shared<Pad>();\n}\nstd::cerr << \"can not find function of OP: \" << name\n<< \" and return: nullptr\" << std::endl;\n" }, { "change_type": "MODIFY", "old_path": "deploy/cpp/src/preprocess_op.cc", "new_path": "deploy/cpp/src/preprocess_op.cc", "diff": "@@ -229,6 +229,23 @@ void WarpAffine::Run(cv::Mat* im, ImageBlob* data) {\n};\n}\n+void Pad::Run(cv::Mat* im, ImageBlob* data) {\n+ int h = size_[0];\n+ int w = size_[1];\n+ int rh = im->rows;\n+ int rw = im->cols;\n+ if (h == rh && w == rw){\n+ data->in_net_im_ = im->clone();\n+ return;\n+ }\n+ cv::copyMakeBorder(\n+ *im, *im, 0, h - rh, 0, w - rw, cv::BORDER_CONSTANT, cv::Scalar(114));\n+ data->in_net_im_ = im->clone();\n+ data->in_net_shape_ = {\n+ static_cast<float>(im->rows), static_cast<float>(im->cols),\n+ };\n+}\n+\n// Preprocessor op running order\nconst std::vector<std::string> Preprocessor::RUN_ORDER = {\"InitInfo\",\n\"TopDownEvalAffine\",\n@@ -237,7 +254,8 @@ const std::vector<std::string> Preprocessor::RUN_ORDER = {\"InitInfo\",\n\"WarpAffine\",\n\"NormalizeImage\",\n\"PadStride\",\n- \"Permute\"};\n+ \"Permute\",\n+ \"Pad\",};\nvoid Preprocessor::Run(cv::Mat* im, ImageBlob* data) {\nfor (const auto& name : RUN_ORDER) {\n" }, { "change_type": "MODIFY", "old_path": "deploy/python/preprocess.py", "new_path": "deploy/python/preprocess.py", "diff": "@@ -247,77 +247,30 @@ class LetterBoxResize(object):\nclass Pad(object):\n- def __init__(self,\n- size=None,\n- size_divisor=32,\n- pad_mode=0,\n- offsets=None,\n- fill_value=(127.5, 127.5, 127.5)):\n+ def __init__(self, size, fill_value=[114.0, 114.0, 114.0]):\n\"\"\"\n- Pad image to a specified size or multiple of size_divisor.\n+ Pad image to a specified size.\nArgs:\n- size (int, Sequence): image target size, if None, pad to multiple of size_divisor, default None\n- size_divisor (int): size divisor, default 32\n- pad_mode (int): pad mode, currently only supports four modes [-1, 0, 1, 2]. if -1, use specified offsets\n- if 0, only pad to right and bottom. if 1, pad according to center. if 2, only pad left and top\n- offsets (list): [offset_x, offset_y], specify offset while padding, only supported pad_mode=-1\n- fill_value (bool): rgb value of pad area, default (127.5, 127.5, 127.5)\n+ size (list[int]): image target size\n+ fill_value (list[float]): rgb value of pad area, default (114.0, 114.0, 114.0)\n\"\"\"\nsuper(Pad, self).__init__()\nif isinstance(size, int):\nsize = [size, size]\n-\n- assert pad_mode in [\n- -1, 0, 1, 2\n- ], 'currently only supports four modes [-1, 0, 1, 2]'\n- if pad_mode == -1:\n- assert offsets, 'if pad_mode is -1, offsets should not be None'\n-\nself.size = size\n- self.size_divisor = size_divisor\n- self.pad_mode = pad_mode\nself.fill_value = fill_value\n- self.offsets = offsets\n-\n- def apply_image(self, image, offsets, im_size, size):\n- x, y = offsets\n- im_h, im_w = im_size\n- h, w = size\n- canvas = np.ones((h, w, 3), dtype=np.float32)\n- canvas *= np.array(self.fill_value, dtype=np.float32)\n- canvas[y:y + im_h, x:x + im_w, :] = image.astype(np.float32)\n- return canvas\ndef __call__(self, im, im_info):\nim_h, im_w = im.shape[:2]\n- if self.size:\nh, w = self.size\n- assert (\n- im_h <= h and im_w <= w\n- ), '(h, w) of target size should be greater than (im_h, im_w)'\n- else:\n- h = int(np.ceil(im_h / self.size_divisor) * self.size_divisor)\n- w = int(np.ceil(im_w / self.size_divisor) * self.size_divisor)\n-\nif h == im_h and w == im_w:\nim = im.astype(np.float32)\nreturn im, im_info\n- if self.pad_mode == -1:\n- offset_x, offset_y = self.offsets\n- elif self.pad_mode == 0:\n- offset_y, offset_x = 0, 0\n- elif self.pad_mode == 1:\n- offset_y, offset_x = (h - im_h) // 2, (w - im_w) // 2\n- else:\n- offset_y, offset_x = h - im_h, w - im_w\n-\n- offsets, im_size, size = [offset_x, offset_y], [im_h, im_w], [h, w]\n- im = self.apply_image(im, offsets, im_size, size)\n-\n- if self.pad_mode == 0:\n- return im, im_info\n-\n+ canvas = np.ones((h, w, 3), dtype=np.float32)\n+ canvas *= np.array(self.fill_value, dtype=np.float32)\n+ canvas[0:im_h, 0:im_w, :] = im.astype(np.float32)\n+ im = canvas\nreturn im, im_info\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix yolox cpp infer (#5804)
499,304
22.04.2022 19:40:36
-28,800
97e9e75bd1c746084d8f047cfd7abf8cc64c9263
fix transform resize type to float32
[ { "change_type": "MODIFY", "old_path": "ppdet/data/transform/operators.py", "new_path": "ppdet/data/transform/operators.py", "diff": "@@ -824,7 +824,7 @@ class Resize(BaseOperator):\nim_scale_x = resize_w / im_shape[1]\nim = self.apply_image(sample['image'], [im_scale_x, im_scale_y])\n- sample['image'] = im\n+ sample['image'] = im.astype(np.float32)\nsample['im_shape'] = np.asarray([resize_h, resize_w], dtype=np.float32)\nif 'scale_factor' in sample:\nscale_factor = sample['scale_factor']\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix transform resize type to float32 (#5802)
499,386
24.04.2022 07:54:05
18,000
8bdcd15723d735de8a4f36816d4a94fc27003673
fix dark postprocess in cpp infer deploy(
[ { "change_type": "MODIFY", "old_path": "deploy/cpp/src/keypoint_postprocess.cc", "new_path": "deploy/cpp/src/keypoint_postprocess.cc", "diff": "@@ -193,7 +193,7 @@ void get_final_preds(std::vector<float>& heatmap,\nint px = int(coords[j * 2] + 0.5);\nint py = int(coords[j * 2 + 1] + 0.5);\n- if(DARK && px > 1 && px < heatmap_width - 2){\n+ if(DARK && px > 1 && px < heatmap_width - 2 && py > 1 && py < heatmap_height - 2){\ndark_parse(heatmap, dim, coords, px, py, index, j);\n}\nelse{\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix dark postprocess in cpp infer deploy( #5751) (#5815)
499,333
28.04.2022 11:55:54
-28,800
9535ca879fc01b38ab39659370909caf5583ebcf
yaml support no constructor
[ { "change_type": "MODIFY", "old_path": "configs/datasets/roadsign_voc.yml", "new_path": "configs/datasets/roadsign_voc.yml", "diff": "@@ -3,19 +3,19 @@ map_type: integral\nnum_classes: 4\nTrainDataset:\n- !VOCDataSet\n+ name: VOCDataSet\ndataset_dir: dataset/roadsign_voc\nanno_path: train.txt\nlabel_list: label_list.txt\ndata_fields: ['image', 'gt_bbox', 'gt_class', 'difficult']\nEvalDataset:\n- !VOCDataSet\n+ name: VOCDataSet\ndataset_dir: dataset/roadsign_voc\nanno_path: valid.txt\nlabel_list: label_list.txt\ndata_fields: ['image', 'gt_bbox', 'gt_class', 'difficult']\nTestDataset:\n- !ImageFolder\n+ name: ImageFolder\nanno_path: dataset/roadsign_voc/label_list.txt\n" }, { "change_type": "MODIFY", "old_path": "configs/yolov3/_base_/optimizer_40e.yml", "new_path": "configs/yolov3/_base_/optimizer_40e.yml", "diff": "@@ -3,12 +3,12 @@ epoch: 40\nLearningRate:\nbase_lr: 0.0001\nschedulers:\n- - !PiecewiseDecay\n+ - name: PiecewiseDecay\ngamma: 0.1\nmilestones:\n- 32\n- 36\n- - !LinearWarmup\n+ - name: LinearWarmup\nstart_factor: 0.3333333333333333\nsteps: 100\n" }, { "change_type": "MODIFY", "old_path": "ppdet/core/workspace.py", "new_path": "ppdet/core/workspace.py", "diff": "@@ -210,9 +210,17 @@ def create(cls_or_name, **kwargs):\nassert type(cls_or_name) in [type, str\n], \"should be a class or name of a class\"\nname = type(cls_or_name) == str and cls_or_name or cls_or_name.__name__\n- assert name in global_config and \\\n- isinstance(global_config[name], SchemaDict), \\\n- \"the module {} is not registered\".format(name)\n+ if name in global_config:\n+ if isinstance(global_config[name], SchemaDict):\n+ pass\n+ elif hasattr(global_config[name], \"__dict__\"):\n+ # support instance return directly\n+ return global_config[name]\n+ else:\n+ raise ValueError(\"The module {} is not registered\".format(name))\n+ else:\n+ raise ValueError(\"The module {} is not registered\".format(name))\n+\nconfig = global_config[name]\ncls = getattr(config.pymodule, name)\ncls_kwargs = {}\n" }, { "change_type": "MODIFY", "old_path": "ppdet/data/source/dataset.py", "new_path": "ppdet/data/source/dataset.py", "diff": "@@ -23,6 +23,7 @@ from paddle.io import Dataset\nfrom ppdet.core.workspace import register, serializable\nfrom ppdet.utils.download import get_dataset_path\nimport copy\n+import ppdet.data.source as source\n@serializable\n@@ -60,6 +61,9 @@ class DetDataset(Dataset):\ndef __len__(self, ):\nreturn len(self.roidbs)\n+ def __call__(self, *args, **kwargs):\n+ return self\n+\ndef __getitem__(self, idx):\n# data batch\nroidb = copy.deepcopy(self.roidbs[idx])\n@@ -198,3 +202,40 @@ class ImageFolder(DetDataset):\ndef set_images(self, images):\nself.image_dir = images\nself.roidbs = self._load_images()\n+\n+\n+@register\n+class CommonDataset(object):\n+ def __init__(self, **dataset_args):\n+ super(CommonDataset, self).__init__()\n+ dataset_args = copy.deepcopy(dataset_args)\n+ type = dataset_args.pop(\"name\")\n+ self.dataset = getattr(source, type)(**dataset_args)\n+\n+ def __call__(self):\n+ return self.dataset\n+\n+\n+@register\n+class TrainDataset(CommonDataset):\n+ pass\n+\n+\n+@register\n+class EvalMOTDataset(CommonDataset):\n+ pass\n+\n+\n+@register\n+class TestMOTDataset(CommonDataset):\n+ pass\n+\n+\n+@register\n+class EvalDataset(CommonDataset):\n+ pass\n+\n+\n+@register\n+class TestDataset(CommonDataset):\n+ pass\n" }, { "change_type": "MODIFY", "old_path": "ppdet/engine/trainer.py", "new_path": "ppdet/engine/trainer.py", "diff": "@@ -68,9 +68,10 @@ class Trainer(object):\n# build data loader\nif cfg.architecture in MOT_ARCH and self.mode in ['eval', 'test']:\n- self.dataset = cfg['{}MOTDataset'.format(self.mode.capitalize())]\n+ self.dataset = create('{}MOTDataset'.format(self.mode.capitalize(\n+ )))()\nelse:\n- self.dataset = cfg['{}Dataset'.format(self.mode.capitalize())]\n+ self.dataset = create('{}Dataset'.format(self.mode.capitalize()))()\nif cfg.architecture == 'DeepSORT' and self.mode == 'train':\nlogger.error('DeepSORT has no need of training on mot dataset.')\n" }, { "change_type": "MODIFY", "old_path": "ppdet/optimizer.py", "new_path": "ppdet/optimizer.py", "diff": "@@ -16,6 +16,7 @@ from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n+import sys\nimport math\nimport weakref\nimport paddle\n@@ -25,6 +26,7 @@ import paddle.optimizer as optimizer\nimport paddle.regularizer as regularizer\nfrom ppdet.core.workspace import register, serializable\n+import copy\n__all__ = ['LearningRate', 'OptimizerBuilder']\n@@ -252,7 +254,18 @@ class LearningRate(object):\nschedulers=[PiecewiseDecay(), LinearWarmup()]):\nsuper(LearningRate, self).__init__()\nself.base_lr = base_lr\n- self.schedulers = schedulers\n+ self.schedulers = []\n+\n+ schedulers = copy.deepcopy(schedulers)\n+ for sched in schedulers:\n+ if isinstance(sched, dict):\n+ # support dict sched instantiate\n+ module = sys.modules[__name__]\n+ type = sched.pop(\"name\")\n+ scheduler = getattr(module, type)(**sched)\n+ self.schedulers.append(scheduler)\n+ else:\n+ self.schedulers.append(sched)\ndef __call__(self, step_per_epoch):\nassert len(self.schedulers) >= 1\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
yaml support no constructor (#5847)
499,319
29.04.2022 11:28:49
-28,800
81d28246b487580ceacaed4f16b98109e46731eb
[test_tipc] add paddle2onnx; fix onnxruntime infer_demo
[ { "change_type": "MODIFY", "old_path": "deploy/third_engine/demo_onnxruntime/infer_demo.py", "new_path": "deploy/third_engine/demo_onnxruntime/infer_demo.py", "diff": "@@ -38,8 +38,12 @@ class PicoDet():\nso = ort.SessionOptions()\nso.log_severity_level = 3\nself.net = ort.InferenceSession(model_pb_path, so)\n- self.input_shape = (self.net.get_inputs()[0].shape[2],\n- self.net.get_inputs()[0].shape[3])\n+ inputs_name = [a.name for a in self.net.get_inputs()]\n+ inputs_shape = {\n+ k: v.shape\n+ for k, v in zip(inputs_name, self.net.get_inputs())\n+ }\n+ self.input_shape = inputs_shape['image'][2:]\ndef _normalize(self, img):\nimg = img.astype(np.float32)\n@@ -51,6 +55,8 @@ class PicoDet():\norigin_shape = srcimg.shape[:2]\nim_scale_y = newh / float(origin_shape[0])\nim_scale_x = neww / float(origin_shape[1])\n+ img_shape = np.array([[float(origin_shape[0]), float(origin_shape[1])]\n+ ]).astype('float32')\nscale_factor = np.array([[im_scale_y, im_scale_x]]).astype('float32')\nif keep_ratio and srcimg.shape[0] != srcimg.shape[1]:\n@@ -87,7 +93,7 @@ class PicoDet():\nimg = cv2.resize(\nsrcimg, self.input_shape, interpolation=cv2.INTER_AREA)\n- return img, scale_factor\n+ return img, img_shape, scale_factor\ndef get_color_map_list(self, num_classes):\ncolor_map = num_classes * [0, 0, 0]\n@@ -104,15 +110,20 @@ class PicoDet():\nreturn color_map\ndef detect(self, srcimg):\n- img, scale_factor = self.resize_image(srcimg)\n+ img, im_shape, scale_factor = self.resize_image(srcimg)\nimg = self._normalize(img)\nblob = np.expand_dims(np.transpose(img, (2, 0, 1)), axis=0)\n- outs = self.net.run(None, {\n- self.net.get_inputs()[0].name: blob,\n- self.net.get_inputs()[1].name: scale_factor\n- })\n+ inputs_dict = {\n+ 'im_shape': im_shape,\n+ 'image': blob,\n+ 'scale_factor': scale_factor\n+ }\n+ inputs_name = [a.name for a in self.net.get_inputs()]\n+ net_inputs = {k: inputs_dict[k] for k in inputs_name}\n+\n+ outs = self.net.run(None, net_inputs)\nouts = np.array(outs[0])\nexpect_boxes = (outs[:, 1] > 0.5) & (outs[:, 0] > -1)\n@@ -181,7 +192,7 @@ if __name__ == '__main__':\nparser.add_argument(\n\"--img_fold\", dest=\"img_fold\", type=str, default=\"./imgs\")\nparser.add_argument(\n- \"--result_fold\", dest=\"result_fold\", type=str, default=\"./results\")\n+ \"--result_fold\", dest=\"result_fold\", type=str, default=\"results\")\nargs = parser.parse_args()\nnet = PicoDet(\n@@ -191,3 +202,6 @@ if __name__ == '__main__':\niou_threshold=args.nmsThreshold)\nnet.detect_folder(args.img_fold, args.result_fold)\n+ print(\n+ f'infer results in ./deploy/third_engine/demo_onnxruntime/{args.result_fold}'\n+ )\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test_tipc/configs/picodet/picodet_s_320_coco_lcnet_linux_gpu_normal_normal_paddle2onnx_python_linux_gpu_cpu.txt", "diff": "+===========================paddle2onnx_params===========================\n+model_name:picodet_s_320_coco_lcnet\n+python:python3.7\n+filename:null\n+##\n+--output_dir:./output_inference\n+weights:https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams\n+norm_export:tools/export_model.py -c configs/picodet/picodet_s_320_coco_lcnet.yml -o\n+quant_export:tools/export_model.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --slim_config configs/picodet/picodet_s_320_coco_lcnet.yml -o\n+fpgm_export:tools/export_model.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --slim_config configs/picodet/picodet_s_320_coco_lcnet.yml -o\n+distill_export:null\n+export1:null\n+export2:null\n+kl_quant_export:tools/post_quant.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --slim_config configs/picodet/picodet_s_320_coco_lcnet.yml -o\n+##\n+2onnx: paddle2onnx\n+--model_dir:./output_inference/picodet_s_320_coco_lcnet/\n+--model_filename:model.pdmodel\n+--params_filename:model.pdiparams\n+--save_file:./deploy/third_engine/demo_onnxruntime/onnx_file/picodet_s_320_coco.onnx\n+--opset_version:11\n+##\n+inference:infer_demo.py\n+--modelpath:./onnx_file/picodet_s_320_coco.onnx\n+--img_fold:./imgs\n+--result_fold:results\n+infer_mode:norm\n+null:null\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/prepare.sh", "new_path": "test_tipc/prepare.sh", "diff": "@@ -4,7 +4,7 @@ source test_tipc/utils_func.sh\nFILENAME=$1\n# MODE be one of ['lite_train_lite_infer' 'lite_train_whole_infer'\n# 'whole_train_whole_infer', 'whole_infer', 'klquant_whole_infer',\n-# 'cpp_infer', 'serving_infer', 'lite_infer']\n+# 'cpp_infer', 'serving_infer', 'lite_infer', 'paddle2onnx_infer']\nMODE=$2\n# parse params\n@@ -67,6 +67,10 @@ elif [ ${MODE} = \"benchmark_train\" ];then\nwget -nc -P ./dataset/mot/ https://paddledet.bj.bcebos.com/data/mot_benchmark.tar\ncd ./dataset/mot/ && tar -xvf mot_benchmark.tar && mv -u mot_benchmark/* .\nrm -rf mot_benchmark/ && cd ../../\n+elif [ ${MODE} = \"paddle2onnx_infer\" ];then\n+ # set paddle2onnx_infer enve\n+ ${python} -m pip install install paddle2onnx\n+ ${python} -m pip install onnxruntime==1.10.0\nelse\n# download coco lite data\nwget -nc -P ./dataset/coco/ https://paddledet.bj.bcebos.com/data/tipc/coco_tipc.tar\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test_tipc/test_paddle2onnx.sh", "diff": "+#!/bin/bash\n+source test_tipc/utils_func.sh\n+\n+FILENAME=$1\n+\n+# parser model_name\n+dataline=$(cat ${FILENAME})\n+IFS=$'\\n'\n+lines=(${dataline})\n+model_name=$(func_parser_value \"${lines[1]}\")\n+echo \"ppdet onnx_infer: ${model_name}\"\n+python=$(func_parser_value \"${lines[2]}\")\n+filename_key=$(func_parser_key \"${lines[3]}\")\n+filename_value=$(func_parser_value \"${lines[3]}\")\n+\n+# export params\n+save_export_key=$(func_parser_key \"${lines[5]}\")\n+save_export_value=$(func_parser_value \"${lines[5]}\")\n+export_weight_key=$(func_parser_key \"${lines[6]}\")\n+export_weight_value=$(func_parser_value \"${lines[6]}\")\n+norm_export=$(func_parser_value \"${lines[7]}\")\n+pact_export=$(func_parser_value \"${lines[8]}\")\n+fpgm_export=$(func_parser_value \"${lines[9]}\")\n+distill_export=$(func_parser_value \"${lines[10]}\")\n+export_key1=$(func_parser_key \"${lines[11]}\")\n+export_value1=$(func_parser_value \"${lines[11]}\")\n+export_key2=$(func_parser_key \"${lines[12]}\")\n+export_value2=$(func_parser_value \"${lines[12]}\")\n+kl_quant_export=$(func_parser_value \"${lines[13]}\")\n+\n+# parser paddle2onnx\n+padlle2onnx_cmd=$(func_parser_value \"${lines[15]}\")\n+infer_model_dir_key=$(func_parser_key \"${lines[16]}\")\n+infer_model_dir_value=$(func_parser_value \"${lines[16]}\")\n+model_filename_key=$(func_parser_key \"${lines[17]}\")\n+model_filename_value=$(func_parser_value \"${lines[17]}\")\n+params_filename_key=$(func_parser_key \"${lines[18]}\")\n+params_filename_value=$(func_parser_value \"${lines[18]}\")\n+save_file_key=$(func_parser_key \"${lines[19]}\")\n+save_file_value=$(func_parser_value \"${lines[19]}\")\n+opset_version_key=$(func_parser_key \"${lines[20]}\")\n+opset_version_value=$(func_parser_value \"${lines[20]}\")\n+\n+# parser onnx inference\n+inference_py=$(func_parser_value \"${lines[22]}\")\n+model_file_key=$(func_parser_key \"${lines[23]}\")\n+model_file_value=$(func_parser_value \"${lines[23]}\")\n+img_fold_key=$(func_parser_key \"${lines[24]}\")\n+img_fold_value=$(func_parser_value \"${lines[24]}\")\n+results_fold_key=$(func_parser_key \"${lines[25]}\")\n+results_fold_value=$(func_parser_value \"${lines[25]}\")\n+onnx_infer_mode_list=$(func_parser_value \"${lines[26]}\")\n+\n+LOG_PATH=\"./test_tipc/output\"\n+mkdir -p ${LOG_PATH}\n+status_log=\"${LOG_PATH}/results_paddle2onnx.log\"\n+\n+function func_paddle2onnx(){\n+ IFS='|'\n+ _script=$1\n+\n+ # paddle2onnx\n+ echo \"################### run onnx export ###################\"\n+ _save_log_path=\"${LOG_PATH}/paddle2onnx_infer_cpu.log\"\n+ set_dirname=$(func_set_params \"${infer_model_dir_key}\" \"${infer_model_dir_value}\")\n+ set_model_filename=$(func_set_params \"${model_filename_key}\" \"${model_filename_value}\")\n+ set_params_filename=$(func_set_params \"${params_filename_key}\" \"${params_filename_value}\")\n+ set_save_model=$(func_set_params \"${save_file_key}\" \"${save_file_value}\")\n+ set_opset_version=$(func_set_params \"${opset_version_key}\" \"${opset_version_value}\")\n+ trans_model_cmd=\"${padlle2onnx_cmd} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_save_model} ${set_opset_version}\"\n+ eval $trans_model_cmd\n+ last_status=${PIPESTATUS[0]}\n+ status_check $last_status \"${trans_model_cmd}\" \"${status_log}\"\n+ # python inference\n+ echo \"################### run infer ###################\"\n+ cd ./deploy/third_engine/demo_onnxruntime/\n+ model_file=$(func_set_params \"${model_file_key}\" \"${model_file_value}\")\n+ img_fold=$(func_set_params \"${img_fold_key}\" \"${img_fold_value}\")\n+ results_fold=$(func_set_params \"${results_fold_key}\" \"${results_fold_value}\")\n+ infer_model_cmd=\"${python} ${inference_py} ${model_file} ${img_fold} ${results_fold}\"\n+ eval $infer_model_cmd\n+ last_status=${PIPESTATUS[0]}\n+ status_check $last_status \"${infer_model_cmd}\" \"${status_log}\"\n+}\n+\n+export Count=0\n+IFS=\"|\"\n+echo \"################### run paddle export ###################\"\n+for infer_mode in ${onnx_infer_mode_list[*]}; do\n+\n+ # run export\n+ case ${infer_mode} in\n+ norm) run_export=${norm_export} ;;\n+ quant) run_export=${pact_export} ;;\n+ fpgm) run_export=${fpgm_export} ;;\n+ distill) run_export=${distill_export} ;;\n+ kl_quant) run_export=${kl_quant_export} ;;\n+ *) echo \"Undefined infer_mode!\"; exit 1;\n+ esac\n+ if [ ${run_export} = \"null\" ]; then\n+ continue\n+ fi\n+ set_export_weight=$(func_set_params \"${export_weight_key}\" \"${export_weight_value}\")\n+ set_save_export_dir=$(func_set_params \"${save_export_key}\" \"${save_export_value}\")\n+ set_filename=$(func_set_params \"${filename_key}\" \"${model_name}\")\n+ export_cmd=\"${python} ${run_export} ${set_export_weight} ${set_filename} ${set_save_export_dir} \"\n+ echo $export_cmd\n+ eval $export_cmd\n+ status_export=$?\n+ status_check $status_export \"${export_cmd}\" \"${status_log}\"\n+done\n+func_paddle2onnx\n\\ No newline at end of file\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[test_tipc] add paddle2onnx; fix onnxruntime infer_demo (#5857) Co-authored-by: lyuwenyu <wenyu.lyu@gmail.com>
499,333
29.04.2022 13:50:23
-28,800
e22f2d6b5ab83a6baaae8ced11dfdefe0b01795e
fix mask rcnn target
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/proposal_generator/target.py", "new_path": "ppdet/modeling/proposal_generator/target.py", "diff": "@@ -356,7 +356,7 @@ def generate_mask_target(gt_segms, rois, labels_int32, sampled_gt_inds,\nfg_inds_new = fg_inds.reshape([-1]).numpy()\nresults = []\nif len(gt_segms_per_im) > 0:\n- for j in fg_inds_new:\n+ for j in range(fg_inds_new.shape[0]):\nresults.append(\nrasterize_polygons_within_box(new_segm[j], boxes[j],\nresolution))\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix mask rcnn target (#5861)
499,299
29.04.2022 14:51:30
-28,800
bb319a8a598cdaa3fdf431988f28cafbdd9b2a11
add pdparams links
[ { "change_type": "MODIFY", "old_path": "configs/keypoint/README_en.md", "new_path": "configs/keypoint/README_en.md", "diff": "@@ -89,6 +89,13 @@ MPII Dataset\n| :---- | -------- | :--------: | :------------: | :----------------------------------------------------------: | -------------------------------------------- |\n| HRNet-w32 | 256x256 | 90.6 | 38.5 | [hrnet_w32_256x256_mpii.pdparams](https://paddledet.bj.bcebos.com/models/keypoint/hrnet_w32_256x256_mpii.pdparams) | [config](./hrnet/hrnet_w32_256x256_mpii.yml) |\n+\n+Model for Scenes\n+| Model | Strategy | Input Size | Precision | Inference Speed |Model Weights | Model Inference and Deployment | description|\n+| :---- | ---|----- | :--------: | :-------: |:------------: |:------------: |:-------------------: |\n+| HRNet-w32 + DarkPose | Top-Down|256x192 | AP: 87.1 (on internal dataset)| 2.9ms per person |[Link](https://bj.bcebos.com/v1/paddledet/models/pipeline/dark_hrnet_w32_256x192.pdparams) |[Link](https://bj.bcebos.com/v1/paddledet/models/pipeline/dark_hrnet_w32_256x192.zip) | Especially optimized for fall scenarios, the model is applied to [PP-Human](../../deploy/pphuman/README_en.md) |\n+\n+\nWe also release [PP-TinyPose](./tiny_pose/README_en.md), a real-time keypoint detection model optimized for mobile devices. Welcome to experience.\n## Getting Start\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add pdparams links (#5864)
499,299
29.04.2022 18:27:39
-28,800
55ae0d1b83f937c316c6788d834b9a35f1cd99ae
fix kpt doc error
[ { "change_type": "MODIFY", "old_path": "configs/keypoint/README_en.md", "new_path": "configs/keypoint/README_en.md", "diff": "@@ -161,7 +161,6 @@ CUDA_VISIBLE_DEVICES=0 python3 tools/infer.py -c configs/keypoint/higherhrnet/hi\n```shell\n#Export Detection Model\n-\npython tools/export_model.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/ppyolov2_r50vd_dcn_365e_coco.pdparams\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix kpt doc error (#5867)
499,333
01.05.2022 15:17:21
-28,800
3d1e2320bf5b6ae30eb5c0fd296e83b8bd88afb4
[test_tipc] add serving tipc
[ { "change_type": "ADD", "old_path": null, "new_path": "test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt", "diff": "+===========================cpp_infer_params===========================\n+model_name:yolov3_darknet53_270e_coco\n+python:python\n+filename:null\n+##\n+--output_dir:./output_inference\n+weights:https://paddledet.bj.bcebos.com/models/yolov3_darknet53_270e_coco.pdparams\n+norm_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml -o\n+quant_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/quant/yolov3_darknet_qat.yml -o\n+fpgm_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/prune/yolov3_darknet_prune_fpgm.yml -o\n+distill_export:null\n+export1:null\n+export2:null\n+kl_quant_export:tools/post_quant.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o\n+--export_serving_model:True\n+##\n+start_serving:-m paddle_serving_server.serve --model serving_server\n+--port:9393\n+--gpu_ids:0\n+##\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/prepare.sh", "new_path": "test_tipc/prepare.sh", "diff": "@@ -71,6 +71,16 @@ elif [ ${MODE} = \"paddle2onnx_infer\" ];then\n# set paddle2onnx_infer enve\n${python} -m pip install install paddle2onnx\n${python} -m pip install onnxruntime==1.10.0\n+elif [ ${MODE} = \"serving_infer\" ];then\n+ git clone https://github.com/PaddlePaddle/Serving\n+ bash Serving/tools/paddle_env_install.sh\n+ cd Serving\n+ pip install -r python/requirements.txt\n+ cd ..\n+ pip install paddle-serving-client==0.8.3 -i https://pypi.tuna.tsinghua.edu.cn/simple\n+ pip install paddle-serving-app==0.8.3 -i https://pypi.tuna.tsinghua.edu.cn/simple\n+ pip install paddle-serving-server-gpu==0.8.3.post101 -i https://pypi.tuna.tsinghua.edu.cn/simple\n+ python -m pip install paddlepaddle-gpu==2.2.2.post101 -f https://www.paddlepaddle.org.cn/whl/linux/mkl/avx/stable.html\nelse\n# download coco lite data\nwget -nc -P ./dataset/coco/ https://paddledet.bj.bcebos.com/data/tipc/coco_tipc.tar\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/test_serving.sh", "new_path": "test_tipc/test_serving.sh", "diff": "+#!/bin/bash\n+source test_tipc/utils_func.sh\n+\n+FILENAME=$1\n+\n+# parser model_name\n+dataline=$(cat ${FILENAME})\n+IFS=$'\\n'\n+lines=(${dataline})\n+model_name=$(func_parser_value \"${lines[1]}\")\n+echo \"ppdet serving: ${model_name}\"\n+python=$(func_parser_value \"${lines[2]}\")\n+filename_key=$(func_parser_key \"${lines[3]}\")\n+filename_value=$(func_parser_value \"${lines[3]}\")\n+\n+# export params\n+save_export_key=$(func_parser_key \"${lines[5]}\")\n+save_export_value=$(func_parser_value \"${lines[5]}\")\n+export_weight_key=$(func_parser_key \"${lines[6]}\")\n+export_weight_value=$(func_parser_value \"${lines[6]}\")\n+norm_export=$(func_parser_value \"${lines[7]}\")\n+pact_export=$(func_parser_value \"${lines[8]}\")\n+fpgm_export=$(func_parser_value \"${lines[9]}\")\n+distill_export=$(func_parser_value \"${lines[10]}\")\n+export_key1=$(func_parser_key \"${lines[11]}\")\n+export_value1=$(func_parser_value \"${lines[11]}\")\n+export_key2=$(func_parser_key \"${lines[12]}\")\n+export_value2=$(func_parser_value \"${lines[12]}\")\n+kl_quant_export=$(func_parser_value \"${lines[13]}\")\n+export_serving_model_key=$(func_parser_key \"${lines[14]}\")\n+export_serving_model_value=$(func_parser_value \"${lines[14]}\")\n+# parser serving\n+start_serving=$(func_parser_value \"${lines[16]}\")\n+port_key=$(func_parser_key \"${lines[17]}\")\n+port_value=$(func_parser_value \"${lines[17]}\")\n+gpu_id_key=$(func_parser_key \"${lines[18]}\")\n+gpu_id_value=$(func_parser_value \"${lines[18]}\")\n+\n+LOG_PATH=\"./test_tipc/output\"\n+mkdir -p ${LOG_PATH}\n+status_log=\"${LOG_PATH}/results_serving.log\"\n+\n+function func_serving(){\n+ IFS='|'\n+ if [ ${gpu_id_key} = \"null\" ]; then\n+ start_serving_command=\"nohup ${python} ${start_serving} ${port_key} ${port_value} > serving.log 2>&1 &\"\n+ else\n+ start_serving_command=\"nohup ${python} ${start_serving} ${port_key} ${port_value} ${gpu_id_key} ${gpu_id_value} > serving.log 2>&1 &\"\n+ fi\n+ echo $start_serving_command\n+ eval $start_serving_command\n+ last_status=${PIPESTATUS[0]}\n+ status_check $last_status \"${start_serving_command}\" \"${status_log}\"\n+}\n+cd output_inference/${model_name}\n+echo $PWD\n+func_serving\n+test_command=\"${python} ../../deploy/serving/test_client.py ../../deploy/serving/label_list.txt ../../demo/000000014439.jpg\"\n+echo $test_command\n+eval $test_command\n+last_status=${PIPESTATUS[0]}\n+status_check $last_status\"${test_command}\" \"${status_log}\"\n+\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[test_tipc] add serving tipc (#5865)
499,298
04.05.2022 23:25:00
-28,800
2b88308f9b97bba1443688e6a6907cb02cb50fb8
fix pphuman vis when nothing detected
[ { "change_type": "MODIFY", "old_path": "deploy/pphuman/pipeline.py", "new_path": "deploy/pphuman/pipeline.py", "diff": "@@ -537,8 +537,9 @@ class PipePredictor(object):\nself.pipe_timer.total_time.end()\nif self.cfg['visual']:\n_, _, fps = self.pipe_timer.get_total_time()\n- im = self.visualize_video(frame, mot_res, frame_id,\n- fps) # visualize\n+ im = self.visualize_video(frame, mot_res, frame_id, fps,\n+ entrance, records,\n+ center_traj) # visualize\nwriter.write(im)\nif self.file_name is None: # use camera_id\ncv2.imshow('PPHuman', im)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix pphuman vis when nothing detected (#5858)
499,299
07.05.2022 10:06:57
-28,800
756e3dca26e5c4f853203fcc8f5b5af1da234eeb
fix cpp infer error in blazeface
[ { "change_type": "MODIFY", "old_path": "deploy/cpp/src/object_detector.cc", "new_path": "deploy/cpp/src/object_detector.cc", "diff": "@@ -213,10 +213,6 @@ void ObjectDetector::Postprocess(\ncv::Mat raw_mat = mats[im_id];\nint rh = 1;\nint rw = 1;\n- if (config_.arch_ == \"Face\") {\n- rh = raw_mat.rows;\n- rw = raw_mat.cols;\n- }\nfor (int j = start_idx; j < start_idx + bbox_num[im_id]; j++) {\nif (is_rbox) {\n// Class id\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix cpp infer error in blazeface (#5892)
499,304
07.05.2022 13:37:59
-28,800
acaf10684d8e071e48098c660355d54981d93b7c
add pp-yoloe quantization model
[ { "change_type": "MODIFY", "old_path": "configs/slim/README_en.md", "new_path": "configs/slim/README_en.md", "diff": "@@ -124,6 +124,8 @@ Description:\n| Model | Compression Strategy | Input Size | Model Volume(MB) | Prediction Delay(V100) | Prediction Delay(SD855) | Box AP | Download | Download of Inference Model | Model Configuration File | Compression Algorithm Configuration File |\n| ------------------------- | -------------------------- | ----------- | :--------------: | :--------------------: | :---------------------: | :-------------------: | :-----------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------: |\n+| PP-YOLOE-l | baseline | 640 | - | 11.2ms(trt_fp32) &#124; 7.7ms(trt_fp16) | -- | 50.9 | [link](https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_300e_coco.pdparams) | - | [Configuration File](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml) | - |\n+| PP-YOLOE-l | Common Online quantitative | 640 | - | 6.7ms(trt_int8) | -- | 48.8 | [link](https://paddledet.bj.bcebos.com/models/slim/ppyoloe_l_coco_qat.pdparams) | - | [Configuration File](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml) | [Configuration File](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/slim/quant/ppyoloe_l_qat.yml) |\n| PP-YOLOv2_R50vd | baseline | 640 | 208.6 | 19.1ms | -- | 49.1 | [link](https://paddledet.bj.bcebos.com/models/ppyolov2_r50vd_dcn_365e_coco.pdparams) | [link](https://paddledet.bj.bcebos.com/models/slim/ppyolov2_r50vd_dcn_365e_coco.tar) | [Configuration File](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml) | - |\n| PP-YOLOv2_R50vd | PACT Online quantitative | 640 | -- | 17.3ms | -- | 48.1 | [link](https://paddledet.bj.bcebos.com/models/slim/ppyolov2_r50vd_dcn_qat.pdparams) | [link](https://paddledet.bj.bcebos.com/models/slim/ppyolov2_r50vd_dcn_qat.tar) | [Configuration File ](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml) | [Configuration File ](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/slim/quant/ppyolov2_r50vd_dcn_qat.yml) |\n| PP-YOLO_R50vd | baseline | 608 | 183.3 | 17.4ms | -- | 44.8 | [link](https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams) | [link](https://paddledet.bj.bcebos.com/models/slim/ppyolo_r50vd_dcn_1x_coco.tar) | [Configuration File ](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml) | - |\n" }, { "change_type": "ADD", "old_path": null, "new_path": "configs/slim/quant/ppyoloe_l_qat.yml", "diff": "+pretrain_weights: https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_300e_coco.pdparams\n+slim: QAT\n+\n+QAT:\n+ quant_config: {\n+ 'weight_quantize_type': 'channel_wise_abs_max', 'activation_quantize_type': 'moving_average_abs_max',\n+ 'weight_bits': 8, 'activation_bits': 8, 'dtype': 'int8', 'window_size': 10000, 'moving_rate': 0.9,\n+ 'quantizable_layer_type': ['Conv2D', 'Linear']}\n+ print_model: True\n+\n+epoch: 30\n+snapshot_epoch: 5\n+LearningRate:\n+ base_lr: 0.001\n+ schedulers:\n+ - !PiecewiseDecay\n+ gamma: 0.1\n+ milestones:\n+ - 10\n+ - 20\n+ - !LinearWarmup\n+ start_factor: 0.\n+ steps: 100\n+\n+TrainReader:\n+ batch_size: 8\n" }, { "change_type": "MODIFY", "old_path": "ppdet/engine/trainer.py", "new_path": "ppdet/engine/trainer.py", "diff": "@@ -701,6 +701,7 @@ class Trainer(object):\nif hasattr(self.model, 'deploy'):\nself.model.deploy = True\n+ if 'slim' not in self.cfg:\nfor layer in self.model.sublayers():\nif hasattr(layer, 'convert_to_deploy'):\nlayer.convert_to_deploy()\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/ppyoloe_head.py", "new_path": "ppdet/modeling/heads/ppyoloe_head.py", "diff": "@@ -111,6 +111,7 @@ class PPYOLOEHead(nn.Layer):\nin_c, 4 * (self.reg_max + 1), 3, padding=1))\n# projection conv\nself.proj_conv = nn.Conv2D(self.reg_max + 1, 1, 1, bias_attr=False)\n+ self.proj_conv.skip_quant = True\nself._init_weights()\n@classmethod\n" }, { "change_type": "MODIFY", "old_path": "ppdet/slim/quant.py", "new_path": "ppdet/slim/quant.py", "diff": "@@ -38,6 +38,11 @@ class QAT(object):\nlogger.info(\"Model before quant:\")\nlogger.info(model)\n+ # For PP-YOLOE, convert model to deploy firstly.\n+ for layer in model.sublayers():\n+ if hasattr(layer, 'convert_to_deploy'):\n+ layer.convert_to_deploy()\n+\nself.quanter.quantize(model)\nif self.print_model:\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add pp-yoloe quantization model (#5894)
499,304
10.05.2022 09:44:03
-28,800
2ecf726e6a44fbbf16094f6b8c8a76410d0f29e8
fix picofeat assert error
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/pico_head.py", "new_path": "ppdet/modeling/heads/pico_head.py", "diff": "@@ -91,7 +91,7 @@ class PicoFeat(nn.Layer):\nself.reg_convs = []\nif use_se:\nassert share_cls_reg == True, \\\n- 'In the case of using se, share_cls_reg is not supported'\n+ 'In the case of using se, share_cls_reg must be set to True'\nself.se = nn.LayerList()\nfor stage_idx in range(num_fpn_stride):\ncls_subnet_convs = []\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix picofeat assert error (#5911)
499,301
10.05.2022 17:23:06
-28,800
81f84e237b71fb152d749a06860d34d34938dde1
add slim link
[ { "change_type": "MODIFY", "old_path": "configs/ppyoloe/README.md", "new_path": "configs/ppyoloe/README.md", "diff": "@@ -136,6 +136,7 @@ PP-YOLOE can be deployed by following approches:\n- Paddle Inference [Python](../../deploy/python) & [C++](../../deploy/cpp)\n- [Paddle-TensorRT](../../deploy/TENSOR_RT.md)\n- [PaddleServing](https://github.com/PaddlePaddle/Serving)\n+ - [PaddleSlim](../configs/slim)\nNext, we will introduce how to use Paddle Inference to deploy PP-YOLOE models in TensorRT FP16 mode.\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add slim link (#5908)
499,333
10.05.2022 19:12:33
-28,800
a853016850a3a0a173544d7072bd8cc115dd279e
refine device when assign on cpu
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/post_process.py", "new_path": "ppdet/modeling/post_process.py", "diff": "@@ -194,8 +194,7 @@ class MaskPostProcess(object):\nsuper(MaskPostProcess, self).__init__()\nself.binary_thresh = binary_thresh\nself.export_onnx = export_onnx\n- self.assign_on_cpu = assign_on_cpu and paddle.device.is_compiled_with_cuda(\n- )\n+ self.assign_on_cpu = assign_on_cpu\ndef paste_mask(self, masks, boxes, im_h, im_w):\n\"\"\"\n@@ -240,6 +239,7 @@ class MaskPostProcess(object):\n\"\"\"\nnum_mask = mask_out.shape[0]\norigin_shape = paddle.cast(origin_shape, 'int32')\n+ device = paddle.device.get_device()\nif self.export_onnx:\nh, w = origin_shape[0][0], origin_shape[0][1]\n@@ -269,7 +269,7 @@ class MaskPostProcess(object):\nim_w] = pred_mask\nid_start += bbox_num[i]\nif self.assign_on_cpu:\n- paddle.set_device('gpu')\n+ paddle.set_device(device)\nreturn pred_result\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/proposal_generator/target.py", "new_path": "ppdet/modeling/proposal_generator/target.py", "diff": "@@ -74,9 +74,11 @@ def label_box(anchors,\nis_crowd=None,\nassign_on_cpu=False):\nif assign_on_cpu:\n+ device = paddle.device.get_device()\npaddle.set_device(\"cpu\")\niou = bbox_overlaps(gt_boxes, anchors)\n- paddle.set_device(\"gpu\")\n+ paddle.set_device(device)\n+\nelse:\niou = bbox_overlaps(gt_boxes, anchors)\nn_gt = gt_boxes.shape[0]\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/proposal_generator/target_layer.py", "new_path": "ppdet/modeling/proposal_generator/target_layer.py", "diff": "@@ -68,8 +68,7 @@ class RPNTargetAssign(object):\nself.negative_overlap = negative_overlap\nself.ignore_thresh = ignore_thresh\nself.use_random = use_random\n- self.assign_on_cpu = assign_on_cpu and paddle.device.is_compiled_with_cuda(\n- )\n+ self.assign_on_cpu = assign_on_cpu\ndef __call__(self, inputs, anchors):\n\"\"\"\n@@ -150,8 +149,7 @@ class BBoxAssigner(object):\nself.use_random = use_random\nself.cascade_iou = cascade_iou\nself.num_classes = num_classes\n- self.assign_on_cpu = assign_on_cpu and paddle.device.is_compiled_with_cuda(\n- )\n+ self.assign_on_cpu = assign_on_cpu\ndef __call__(self,\nrpn_rois,\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
refine device when assign on cpu (#5924)
499,301
12.05.2022 10:13:54
-28,800
2629454d0c87fc0d2295b11ffa529829e5fb4c5f
update e quant link
[ { "change_type": "MODIFY", "old_path": "configs/ppyoloe/README.md", "new_path": "configs/ppyoloe/README.md", "diff": "@@ -136,7 +136,7 @@ PP-YOLOE can be deployed by following approches:\n- Paddle Inference [Python](../../deploy/python) & [C++](../../deploy/cpp)\n- [Paddle-TensorRT](../../deploy/TENSOR_RT.md)\n- [PaddleServing](https://github.com/PaddlePaddle/Serving)\n- - [PaddleSlim](../configs/slim)\n+ - [PaddleSlim](../slim)\nNext, we will introduce how to use Paddle Inference to deploy PP-YOLOE models in TensorRT FP16 mode.\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
update e quant link (#5938)