author
int64
658
755k
date
stringlengths
19
19
timezone
int64
-46,800
43.2k
hash
stringlengths
40
40
message
stringlengths
5
490
mods
list
language
stringclasses
20 values
license
stringclasses
3 values
repo
stringlengths
5
68
original_message
stringlengths
12
491
499,333
13.05.2022 20:57:53
-28,800
67742521f191b423f277f004a71c2d2b41545019
fix evaldataset
[ { "change_type": "MODIFY", "old_path": "ppdet/engine/trainer.py", "new_path": "ppdet/engine/trainer.py", "diff": "@@ -376,7 +376,8 @@ class Trainer(object):\nassert self.mode == 'train', \"Model not in 'train' mode\"\nInit_mark = False\nif validate:\n- self.cfg.EvalDataset = create(\"EvalDataset\")()\n+ self.cfg['EvalDataset'] = self.cfg.EvalDataset = create(\n+ \"EvalDataset\")()\nsync_bn = (getattr(self.cfg, 'norm_type', None) == 'sync_bn' and\nself.cfg.use_gpu and self._nranks > 1)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix evaldataset (#5978)
499,304
13.05.2022 20:58:22
-28,800
a972c39e26a40c90c2ef4c1c4a06f8e9fba15509
support fuse conv bn when export model
[ { "change_type": "MODIFY", "old_path": "configs/runtime.yml", "new_path": "configs/runtime.yml", "diff": "@@ -10,3 +10,4 @@ export:\npost_process: True # Whether post-processing is included in the network when export model.\nnms: True # Whether NMS is included in the network when export model.\nbenchmark: False # It is used to testing model performance, if set `True`, post-process and NMS will not be exported.\n+ fuse_conv_bn: False\n" }, { "change_type": "MODIFY", "old_path": "ppdet/engine/trainer.py", "new_path": "ppdet/engine/trainer.py", "diff": "@@ -44,6 +44,7 @@ from ppdet.metrics import RBoxMetric, JDEDetMetric, SNIPERCOCOMetric\nfrom ppdet.data.source.sniper_coco import SniperCOCODataSet\nfrom ppdet.data.source.category import get_categories\nimport ppdet.utils.stats as stats\n+from ppdet.utils.fuse_utils import fuse_conv_bn\nfrom ppdet.utils import profiler\nfrom .callbacks import Callback, ComposeCallback, LogPrinter, Checkpointer, WiferFaceEval, VisualDLWriter, SniperProposalsGenerator, WandbCallback\n@@ -770,6 +771,11 @@ class Trainer(object):\ndef export(self, output_dir='output_inference'):\nself.model.eval()\n+\n+ if hasattr(self.cfg, 'export') and 'fuse_conv_bn' in self.cfg[\n+ 'export'] and self.cfg['export']['fuse_conv_bn']:\n+ self.model = fuse_conv_bn(self.model)\n+\nmodel_name = os.path.splitext(os.path.split(self.cfg.filename)[-1])[0]\nsave_dir = os.path.join(output_dir, model_name)\nif not os.path.exists(save_dir):\n" }, { "change_type": "ADD", "old_path": null, "new_path": "ppdet/utils/fuse_utils.py", "diff": "+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import copy\n+import paddle\n+import paddle.nn as nn\n+\n+__all__ = ['fuse_conv_bn']\n+\n+\n+def fuse_conv_bn(model):\n+ is_train = False\n+ if model.training:\n+ model.eval()\n+ is_train = True\n+ fuse_list = []\n+ tmp_pair = [None, None]\n+ for name, layer in model.named_sublayers():\n+ if isinstance(layer, nn.Conv2D):\n+ tmp_pair[0] = name\n+ if isinstance(layer, nn.BatchNorm2D):\n+ tmp_pair[1] = name\n+\n+ if tmp_pair[0] and tmp_pair[1] and len(tmp_pair) == 2:\n+ fuse_list.append(tmp_pair)\n+ tmp_pair = [None, None]\n+ model = fuse_layers(model, fuse_list)\n+ if is_train:\n+ model.train()\n+ return model\n+\n+\n+def find_parent_layer_and_sub_name(model, name):\n+ \"\"\"\n+ Given the model and the name of a layer, find the parent layer and\n+ the sub_name of the layer.\n+ For example, if name is 'block_1/convbn_1/conv_1', the parent layer is\n+ 'block_1/convbn_1' and the sub_name is `conv_1`.\n+ Args:\n+ model(paddle.nn.Layer): the model to be quantized.\n+ name(string): the name of a layer\n+\n+ Returns:\n+ parent_layer, subname\n+ \"\"\"\n+ assert isinstance(model, nn.Layer), \\\n+ \"The model must be the instance of paddle.nn.Layer.\"\n+ assert len(name) > 0, \"The input (name) should not be empty.\"\n+\n+ last_idx = 0\n+ idx = 0\n+ parent_layer = model\n+ while idx < len(name):\n+ if name[idx] == '.':\n+ sub_name = name[last_idx:idx]\n+ if hasattr(parent_layer, sub_name):\n+ parent_layer = getattr(parent_layer, sub_name)\n+ last_idx = idx + 1\n+ idx += 1\n+ sub_name = name[last_idx:idx]\n+ return parent_layer, sub_name\n+\n+\n+class Identity(nn.Layer):\n+ '''a layer to replace bn or relu layers'''\n+\n+ def __init__(self, *args, **kwargs):\n+ super(Identity, self).__init__()\n+\n+ def forward(self, input):\n+ return input\n+\n+\n+def fuse_layers(model, layers_to_fuse, inplace=False):\n+ '''\n+ fuse layers in layers_to_fuse\n+\n+ Args:\n+ model(nn.Layer): The model to be fused.\n+ layers_to_fuse(list): The layers' names to be fused. For\n+ example,\"fuse_list = [[\"conv1\", \"bn1\"], [\"conv2\", \"bn2\"]]\".\n+ A TypeError would be raised if \"fuse\" was set as\n+ True but \"fuse_list\" was None.\n+ Default: None.\n+ inplace(bool): Whether apply fusing to the input model.\n+ Default: False.\n+\n+ Return\n+ fused_model(paddle.nn.Layer): The fused model.\n+ '''\n+ if not inplace:\n+ model = copy.deepcopy(model)\n+ for layers_list in layers_to_fuse:\n+ layer_list = []\n+ for layer_name in layers_list:\n+ parent_layer, sub_name = find_parent_layer_and_sub_name(model,\n+ layer_name)\n+ layer_list.append(getattr(parent_layer, sub_name))\n+ new_layers = _fuse_func(layer_list)\n+ for i, item in enumerate(layers_list):\n+ parent_layer, sub_name = find_parent_layer_and_sub_name(model, item)\n+ setattr(parent_layer, sub_name, new_layers[i])\n+ return model\n+\n+\n+def _fuse_func(layer_list):\n+ '''choose the fuser method and fuse layers'''\n+ types = tuple(type(m) for m in layer_list)\n+ fusion_method = types_to_fusion_method.get(types, None)\n+ new_layers = [None] * len(layer_list)\n+ fused_layer = fusion_method(*layer_list)\n+ for handle_id, pre_hook_fn in layer_list[0]._forward_pre_hooks.items():\n+ fused_layer.register_forward_pre_hook(pre_hook_fn)\n+ del layer_list[0]._forward_pre_hooks[handle_id]\n+ for handle_id, hook_fn in layer_list[-1]._forward_post_hooks.items():\n+ fused_layer.register_forward_post_hook(hook_fn)\n+ del layer_list[-1]._forward_post_hooks[handle_id]\n+ new_layers[0] = fused_layer\n+ for i in range(1, len(layer_list)):\n+ identity = Identity()\n+ identity.training = layer_list[0].training\n+ new_layers[i] = identity\n+ return new_layers\n+\n+\n+def _fuse_conv_bn(conv, bn):\n+ '''fuse conv and bn for train or eval'''\n+ assert(conv.training == bn.training),\\\n+ \"Conv and BN both must be in the same mode (train or eval).\"\n+ if conv.training:\n+ assert bn._num_features == conv._out_channels, 'Output channel of Conv2d must match num_features of BatchNorm2d'\n+ raise NotImplementedError\n+ else:\n+ return _fuse_conv_bn_eval(conv, bn)\n+\n+\n+def _fuse_conv_bn_eval(conv, bn):\n+ '''fuse conv and bn for eval'''\n+ assert (not (conv.training or bn.training)), \"Fusion only for eval!\"\n+ fused_conv = copy.deepcopy(conv)\n+\n+ fused_weight, fused_bias = _fuse_conv_bn_weights(\n+ fused_conv.weight, fused_conv.bias, bn._mean, bn._variance, bn._epsilon,\n+ bn.weight, bn.bias)\n+ fused_conv.weight.set_value(fused_weight)\n+ if fused_conv.bias is None:\n+ fused_conv.bias = paddle.create_parameter(\n+ shape=[fused_conv._out_channels], is_bias=True, dtype=bn.bias.dtype)\n+ fused_conv.bias.set_value(fused_bias)\n+ return fused_conv\n+\n+\n+def _fuse_conv_bn_weights(conv_w, conv_b, bn_rm, bn_rv, bn_eps, bn_w, bn_b):\n+ '''fuse weights and bias of conv and bn'''\n+ if conv_b is None:\n+ conv_b = paddle.zeros_like(bn_rm)\n+ if bn_w is None:\n+ bn_w = paddle.ones_like(bn_rm)\n+ if bn_b is None:\n+ bn_b = paddle.zeros_like(bn_rm)\n+ bn_var_rsqrt = paddle.rsqrt(bn_rv + bn_eps)\n+ conv_w = conv_w * \\\n+ (bn_w * bn_var_rsqrt).reshape([-1] + [1] * (len(conv_w.shape) - 1))\n+ conv_b = (conv_b - bn_rm) * bn_var_rsqrt * bn_w + bn_b\n+ return conv_w, conv_b\n+\n+\n+types_to_fusion_method = {(nn.Conv2D, nn.BatchNorm2D): _fuse_conv_bn, }\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
support fuse conv bn when export model (#5977)
499,301
23.05.2022 11:15:25
-28,800
ea5f339ac6178360cea1ed0c30fa72dd40cc3802
reorg optimizer
[ { "change_type": "ADD", "old_path": null, "new_path": "ppdet/optimizer/__init__.py", "diff": "+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from .optimizer import *\n+from .ema import ModelEMA\n" }, { "change_type": "ADD", "old_path": null, "new_path": "ppdet/optimizer/ema.py", "diff": "+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from __future__ import absolute_import\n+from __future__ import division\n+from __future__ import print_function\n+\n+import math\n+import paddle\n+import weakref\n+\n+\n+class ModelEMA(object):\n+ \"\"\"\n+ Exponential Weighted Average for Deep Neutal Networks\n+ Args:\n+ model (nn.Layer): Detector of model.\n+ decay (int): The decay used for updating ema parameter.\n+ Ema's parameter are updated with the formula:\n+ `ema_param = decay * ema_param + (1 - decay) * cur_param`.\n+ Defaults is 0.9998.\n+ ema_decay_type (str): type in ['threshold', 'normal', 'exponential'],\n+ 'threshold' as default.\n+ cycle_epoch (int): The epoch of interval to reset ema_param and\n+ step. Defaults is -1, which means not reset. Its function is to\n+ add a regular effect to ema, which is set according to experience\n+ and is effective when the total training epoch is large.\n+ \"\"\"\n+\n+ def __init__(self,\n+ model,\n+ decay=0.9998,\n+ ema_decay_type='threshold',\n+ cycle_epoch=-1):\n+ self.step = 0\n+ self.epoch = 0\n+ self.decay = decay\n+ self.state_dict = dict()\n+ for k, v in model.state_dict().items():\n+ self.state_dict[k] = paddle.zeros_like(v)\n+ self.ema_decay_type = ema_decay_type\n+ self.cycle_epoch = cycle_epoch\n+\n+ self._model_state = {\n+ k: weakref.ref(p)\n+ for k, p in model.state_dict().items()\n+ }\n+\n+ def reset(self):\n+ self.step = 0\n+ self.epoch = 0\n+ for k, v in self.state_dict.items():\n+ self.state_dict[k] = paddle.zeros_like(v)\n+\n+ def resume(self, state_dict, step=0):\n+ for k, v in state_dict.items():\n+ if k in self.state_dict:\n+ self.state_dict[k] = v\n+ self.step = step\n+\n+ def update(self, model=None):\n+ if self.ema_decay_type == 'threshold':\n+ decay = min(self.decay, (1 + self.step) / (10 + self.step))\n+ elif self.ema_decay_type == 'exponential':\n+ decay = self.decay * (1 - math.exp(-(self.step + 1) / 2000))\n+ else:\n+ decay = self.decay\n+ self._decay = decay\n+\n+ if model is not None:\n+ model_dict = model.state_dict()\n+ else:\n+ model_dict = {k: p() for k, p in self._model_state.items()}\n+ assert all(\n+ [v is not None for _, v in model_dict.items()]), 'python gc.'\n+\n+ for k, v in self.state_dict.items():\n+ v = decay * v + (1 - decay) * model_dict[k]\n+ v.stop_gradient = True\n+ self.state_dict[k] = v\n+ self.step += 1\n+\n+ def apply(self):\n+ if self.step == 0:\n+ return self.state_dict\n+ state_dict = dict()\n+ for k, v in self.state_dict.items():\n+ if self.ema_decay_type != 'exponential':\n+ v = v / (1 - self._decay**self.step)\n+ v.stop_gradient = True\n+ state_dict[k] = v\n+ self.epoch += 1\n+ if self.cycle_epoch > 0 and self.epoch == self.cycle_epoch:\n+ self.reset()\n+\n+ return state_dict\n" }, { "change_type": "RENAME", "old_path": "ppdet/optimizer.py", "new_path": "ppdet/optimizer/optimizer.py", "diff": "@@ -18,7 +18,6 @@ from __future__ import print_function\nimport sys\nimport math\n-import weakref\nimport paddle\nimport paddle.nn as nn\n@@ -360,89 +359,3 @@ class OptimizerBuilder():\nparameters=params,\ngrad_clip=grad_clip,\n**optim_args)\n-\n-\n-class ModelEMA(object):\n- \"\"\"\n- Exponential Weighted Average for Deep Neutal Networks\n- Args:\n- model (nn.Layer): Detector of model.\n- decay (int): The decay used for updating ema parameter.\n- Ema's parameter are updated with the formula:\n- `ema_param = decay * ema_param + (1 - decay) * cur_param`.\n- Defaults is 0.9998.\n- ema_decay_type (str): type in ['threshold', 'normal', 'exponential'],\n- 'threshold' as default.\n- cycle_epoch (int): The epoch of interval to reset ema_param and\n- step. Defaults is -1, which means not reset. Its function is to\n- add a regular effect to ema, which is set according to experience\n- and is effective when the total training epoch is large.\n- \"\"\"\n-\n- def __init__(self,\n- model,\n- decay=0.9998,\n- ema_decay_type='threshold',\n- cycle_epoch=-1):\n- self.step = 0\n- self.epoch = 0\n- self.decay = decay\n- self.state_dict = dict()\n- for k, v in model.state_dict().items():\n- self.state_dict[k] = paddle.zeros_like(v)\n- self.ema_decay_type = ema_decay_type\n- self.cycle_epoch = cycle_epoch\n-\n- self._model_state = {\n- k: weakref.ref(p)\n- for k, p in model.state_dict().items()\n- }\n-\n- def reset(self):\n- self.step = 0\n- self.epoch = 0\n- for k, v in self.state_dict.items():\n- self.state_dict[k] = paddle.zeros_like(v)\n-\n- def resume(self, state_dict, step=0):\n- for k, v in state_dict.items():\n- if k in self.state_dict:\n- self.state_dict[k] = v\n- self.step = step\n-\n- def update(self, model=None):\n- if self.ema_decay_type == 'threshold':\n- decay = min(self.decay, (1 + self.step) / (10 + self.step))\n- elif self.ema_decay_type == 'exponential':\n- decay = self.decay * (1 - math.exp(-(self.step + 1) / 2000))\n- else:\n- decay = self.decay\n- self._decay = decay\n-\n- if model is not None:\n- model_dict = model.state_dict()\n- else:\n- model_dict = {k: p() for k, p in self._model_state.items()}\n- assert all(\n- [v is not None for _, v in model_dict.items()]), 'python gc.'\n-\n- for k, v in self.state_dict.items():\n- v = decay * v + (1 - decay) * model_dict[k]\n- v.stop_gradient = True\n- self.state_dict[k] = v\n- self.step += 1\n-\n- def apply(self):\n- if self.step == 0:\n- return self.state_dict\n- state_dict = dict()\n- for k, v in self.state_dict.items():\n- if self.ema_decay_type != 'exponential':\n- v = v / (1 - self._decay**self.step)\n- v.stop_gradient = True\n- state_dict[k] = v\n- self.epoch += 1\n- if self.cycle_epoch > 0 and self.epoch == self.cycle_epoch:\n- self.reset()\n-\n- return state_dict\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
reorg optimizer (#6016)
499,299
23.05.2022 12:22:17
-28,800
b185733495f0600c8e178e331f04f98d5d4ae25b
fix solov2 not support multi-images
[ { "change_type": "MODIFY", "old_path": "deploy/python/infer.py", "new_path": "deploy/python/infer.py", "diff": "@@ -224,7 +224,7 @@ class Detector(object):\nfor k, v in res.items():\nresults[k].append(v)\nfor k, v in results.items():\n- if k != 'masks':\n+ if k not in ['masks', 'segm']:\nresults[k] = np.concatenate(v)\nreturn results\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix solov2 not support multi-images (#6019)
499,333
24.05.2022 10:36:08
-28,800
1d8c3a7edffc6c899f3b7ab3eb95b81ef300be0b
enhance shm utils
[ { "change_type": "MODIFY", "old_path": "ppdet/data/shm_utils.py", "new_path": "ppdet/data/shm_utils.py", "diff": "@@ -34,6 +34,9 @@ SHM_DEFAULT_MOUNT = '/dev/shm'\ndef _parse_size_in_M(size_str):\n+ if size_str[-1] == 'B':\n+ num, unit = size_str[:-2], size_str[-2]\n+ else:\nnum, unit = size_str[:-1], size_str[-1]\nassert unit in SIZE_UNIT, \\\n\"unknown shm size unit {}\".format(unit)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
enhance shm utils (#6042)
499,299
25.05.2022 10:49:48
-28,800
84faecbca8748f1da572d0da12e3c0214f3d84bf
add cpp infer support for solov2
[ { "change_type": "MODIFY", "old_path": "deploy/cpp/include/object_detector.h", "new_path": "deploy/cpp/include/object_detector.h", "diff": "#include \"include/utils.h\"\nusing namespace paddle_infer;\n-\nnamespace PaddleDetection {\n// Generate visualization colormap for each class\nstd::vector<int> GenerateColorMap(int num_class);\n// Visualiztion Detection Result\n-cv::Mat VisualizeResult(\n- const cv::Mat& img,\n+cv::Mat\n+VisualizeResult(const cv::Mat &img,\nconst std::vector<PaddleDetection::ObjectResult> &results,\nconst std::vector<std::string> &lables,\n- const std::vector<int>& colormap,\n- const bool is_rbox);\n+ const std::vector<int> &colormap, const bool is_rbox);\nclass ObjectDetector {\npublic:\nexplicit ObjectDetector(const std::string &model_dir,\nconst std::string &device = \"CPU\",\n- bool use_mkldnn = false,\n- int cpu_threads = 1,\n+ bool use_mkldnn = false, int cpu_threads = 1,\nconst std::string &run_mode = \"paddle\",\n- const int batch_size = 1,\n- const int gpu_id = 0,\n+ const int batch_size = 1, const int gpu_id = 0,\nconst int trt_min_shape = 1,\nconst int trt_max_shape = 1280,\nconst int trt_opt_shape = 640,\n@@ -78,15 +74,12 @@ class ObjectDetector {\n}\n// Load Paddle inference model\n- void LoadModel(const std::string& model_dir,\n- const int batch_size = 1,\n+ void LoadModel(const std::string &model_dir, const int batch_size = 1,\nconst std::string &run_mode = \"paddle\");\n// Run predictor\n- void Predict(const std::vector<cv::Mat> imgs,\n- const double threshold = 0.5,\n- const int warmup = 0,\n- const int repeats = 1,\n+ void Predict(const std::vector<cv::Mat> imgs, const double threshold = 0.5,\n+ const int warmup = 0, const int repeats = 1,\nstd::vector<PaddleDetection::ObjectResult> *result = nullptr,\nstd::vector<int> *bbox_num = nullptr,\nstd::vector<double> *times = nullptr);\n@@ -112,10 +105,14 @@ class ObjectDetector {\n// Postprocess result\nvoid Postprocess(const std::vector<cv::Mat> mats,\nstd::vector<PaddleDetection::ObjectResult> *result,\n- std::vector<int> bbox_num,\n- std::vector<float> output_data_,\n- std::vector<int> output_mask_data_,\n- bool is_rbox);\n+ std::vector<int> bbox_num, std::vector<float> output_data_,\n+ std::vector<int> output_mask_data_, bool is_rbox);\n+\n+ void SOLOv2Postprocess(\n+ const std::vector<cv::Mat> mats, std::vector<ObjectResult> *result,\n+ std::vector<int> *bbox_num, std::vector<int> out_bbox_num_data_,\n+ std::vector<int64_t> out_label_data_, std::vector<float> out_score_data_,\n+ std::vector<uint8_t> out_global_mask_data_, float threshold = 0.5);\nstd::shared_ptr<Predictor> predictor_;\nPreprocessor preprocessor_;\n" }, { "change_type": "MODIFY", "old_path": "deploy/cpp/src/object_detector.cc", "new_path": "deploy/cpp/src/object_detector.cc", "diff": "@@ -41,17 +41,12 @@ void ObjectDetector::LoadModel(const std::string &model_dir,\n} else if (run_mode == \"trt_int8\") {\nprecision = paddle_infer::Config::Precision::kInt8;\n} else {\n- printf(\n- \"run_mode should be 'paddle', 'trt_fp32', 'trt_fp16' or \"\n+ printf(\"run_mode should be 'paddle', 'trt_fp32', 'trt_fp16' or \"\n\"'trt_int8'\");\n}\n// set tensorrt\n- config.EnableTensorRtEngine(1 << 30,\n- batch_size,\n- this->min_subgraph_size_,\n- precision,\n- false,\n- this->trt_calib_mode_);\n+ config.EnableTensorRtEngine(1 << 30, batch_size, this->min_subgraph_size_,\n+ precision, false, this->trt_calib_mode_);\n// set use dynamic shape\nif (this->use_dynamic_shape_) {\n@@ -69,8 +64,8 @@ void ObjectDetector::LoadModel(const std::string &model_dir,\nconst std::map<std::string, std::vector<int>> map_opt_input_shape = {\n{\"image\", opt_input_shape}};\n- config.SetTRTDynamicShapeInfo(\n- map_min_input_shape, map_max_input_shape, map_opt_input_shape);\n+ config.SetTRTDynamicShapeInfo(map_min_input_shape, map_max_input_shape,\n+ map_opt_input_shape);\nstd::cout << \"TensorRT dynamic shape enabled\" << std::endl;\n}\n}\n@@ -95,12 +90,11 @@ void ObjectDetector::LoadModel(const std::string &model_dir,\n}\n// Visualiztion MaskDetector results\n-cv::Mat VisualizeResult(\n- const cv::Mat &img,\n+cv::Mat\n+VisualizeResult(const cv::Mat &img,\nconst std::vector<PaddleDetection::ObjectResult> &results,\nconst std::vector<std::string> &lables,\n- const std::vector<int> &colormap,\n- const bool is_rbox = false) {\n+ const std::vector<int> &colormap, const bool is_rbox = false) {\ncv::Mat vis_img = img.clone();\nint img_h = vis_img.rows;\nint img_w = vis_img.cols;\n@@ -149,16 +143,10 @@ cv::Mat VisualizeResult(\nstd::vector<cv::Mat> contours;\ncv::Mat hierarchy;\nmask.convertTo(mask, CV_8U);\n- cv::findContours(\n- mask, contours, hierarchy, cv::RETR_CCOMP, cv::CHAIN_APPROX_SIMPLE);\n- cv::drawContours(colored_img,\n- contours,\n- -1,\n- roi_color,\n- -1,\n- cv::LINE_8,\n- hierarchy,\n- 100);\n+ cv::findContours(mask, contours, hierarchy, cv::RETR_CCOMP,\n+ cv::CHAIN_APPROX_SIMPLE);\n+ cv::drawContours(colored_img, contours, -1, roi_color, -1, cv::LINE_8,\n+ hierarchy, 100);\ncv::Mat debug_roi = vis_img;\ncolored_img = 0.4 * colored_img + 0.6 * vis_img;\n@@ -170,19 +158,13 @@ cv::Mat VisualizeResult(\norigin.y = results[i].rect[1];\n// Configure text background\n- cv::Rect text_back = cv::Rect(results[i].rect[0],\n- results[i].rect[1] - text_size.height,\n- text_size.width,\n- text_size.height);\n+ cv::Rect text_back =\n+ cv::Rect(results[i].rect[0], results[i].rect[1] - text_size.height,\n+ text_size.width, text_size.height);\n// Draw text, and background\ncv::rectangle(vis_img, text_back, roi_color, -1);\n- cv::putText(vis_img,\n- text,\n- origin,\n- font_face,\n- font_scale,\n- cv::Scalar(255, 255, 255),\n- thickness);\n+ cv::putText(vis_img, text, origin, font_face, font_scale,\n+ cv::Scalar(255, 255, 255), thickness);\n}\nreturn vis_img;\n}\n@@ -197,10 +179,8 @@ void ObjectDetector::Preprocess(const cv::Mat &ori_im) {\nvoid ObjectDetector::Postprocess(\nconst std::vector<cv::Mat> mats,\nstd::vector<PaddleDetection::ObjectResult> *result,\n- std::vector<int> bbox_num,\n- std::vector<float> output_data_,\n- std::vector<int> output_mask_data_,\n- bool is_rbox = false) {\n+ std::vector<int> bbox_num, std::vector<float> output_data_,\n+ std::vector<int> output_mask_data_, bool is_rbox = false) {\nresult->clear();\nint start_idx = 0;\nint total_num = std::accumulate(bbox_num.begin(), bbox_num.end(), 0);\n@@ -267,9 +247,81 @@ void ObjectDetector::Postprocess(\n}\n}\n+// This function is to convert output result from SOLOv2 to class ObjectResult\n+void ObjectDetector::SOLOv2Postprocess(\n+ const std::vector<cv::Mat> mats, std::vector<ObjectResult> *result,\n+ std::vector<int> *bbox_num, std::vector<int> out_bbox_num_data_,\n+ std::vector<int64_t> out_label_data_, std::vector<float> out_score_data_,\n+ std::vector<uint8_t> out_global_mask_data_, float threshold) {\n+\n+ for (int im_id = 0; im_id < mats.size(); im_id++) {\n+ cv::Mat mat = mats[im_id];\n+\n+ int valid_bbox_count = 0;\n+ for (int bbox_id = 0; bbox_id < out_bbox_num_data_[im_id]; ++bbox_id) {\n+ if (out_score_data_[bbox_id] >= threshold) {\n+ ObjectResult result_item;\n+ result_item.class_id = out_label_data_[bbox_id];\n+ result_item.confidence = out_score_data_[bbox_id];\n+ std::vector<int> global_mask;\n+\n+ for (int k = 0; k < mat.rows * mat.cols; ++k) {\n+ global_mask.push_back(static_cast<int>(\n+ out_global_mask_data_[k + bbox_id * mat.rows * mat.cols]));\n+ }\n+\n+ // find minimize bounding box from mask\n+ cv::Mat mask(mat.rows, mat.cols, CV_32SC1);\n+ std::memcpy(mask.data, global_mask.data(),\n+ global_mask.size() * sizeof(int));\n+\n+ cv::Mat mask_fp;\n+ cv::Mat rowSum;\n+ cv::Mat colSum;\n+ std::vector<float> sum_of_row(mat.rows);\n+ std::vector<float> sum_of_col(mat.cols);\n+\n+ mask.convertTo(mask_fp, CV_32FC1);\n+ cv::reduce(mask_fp, colSum, 0, CV_REDUCE_SUM, CV_32FC1);\n+ cv::reduce(mask_fp, rowSum, 1, CV_REDUCE_SUM, CV_32FC1);\n+\n+ for (int row_id = 0; row_id < mat.rows; ++row_id) {\n+ sum_of_row[row_id] = rowSum.at<float>(row_id, 0);\n+ }\n+\n+ for (int col_id = 0; col_id < mat.cols; ++col_id) {\n+ sum_of_col[col_id] = colSum.at<float>(0, col_id);\n+ }\n+\n+ auto it = std::find_if(sum_of_row.begin(), sum_of_row.end(),\n+ [](int x) { return x > 0.5; });\n+ int y1 = std::distance(sum_of_row.begin(), it);\n+\n+ auto it2 = std::find_if(sum_of_col.begin(), sum_of_col.end(),\n+ [](int x) { return x > 0.5; });\n+ int x1 = std::distance(sum_of_col.begin(), it2);\n+\n+ auto rit = std::find_if(sum_of_row.rbegin(), sum_of_row.rend(),\n+ [](int x) { return x > 0.5; });\n+ int y2 = std::distance(rit, sum_of_row.rend());\n+\n+ auto rit2 = std::find_if(sum_of_col.rbegin(), sum_of_col.rend(),\n+ [](int x) { return x > 0.5; });\n+ int x2 = std::distance(rit2, sum_of_col.rend());\n+\n+ result_item.rect = {x1, y1, x2, y2};\n+ result_item.mask = global_mask;\n+\n+ result->push_back(result_item);\n+ valid_bbox_count++;\n+ }\n+ }\n+ bbox_num->push_back(valid_bbox_count);\n+ }\n+}\n+\nvoid ObjectDetector::Predict(const std::vector<cv::Mat> imgs,\n- const double threshold,\n- const int warmup,\n+ const double threshold, const int warmup,\nconst int repeats,\nstd::vector<PaddleDetection::ObjectResult> *result,\nstd::vector<int> *bbox_num,\n@@ -285,6 +337,11 @@ void ObjectDetector::Predict(const std::vector<cv::Mat> imgs,\nstd::vector<int> out_bbox_num_data_;\nstd::vector<int> out_mask_data_;\n+ // these parameters are for SOLOv2 output\n+ std::vector<float> out_score_data_;\n+ std::vector<uint8_t> out_global_mask_data_;\n+ std::vector<int64_t> out_label_data_;\n+\n// in_net img for each batch\nstd::vector<cv::Mat> in_net_img_all(batch_size);\n@@ -298,8 +355,8 @@ void ObjectDetector::Predict(const std::vector<cv::Mat> imgs,\nscale_factor_all[bs_idx * 2] = inputs_.scale_factor_[0];\nscale_factor_all[bs_idx * 2 + 1] = inputs_.scale_factor_[1];\n- in_data_all.insert(\n- in_data_all.end(), inputs_.im_data_.begin(), inputs_.im_data_.end());\n+ in_data_all.insert(in_data_all.end(), inputs_.im_data_.begin(),\n+ inputs_.im_data_.end());\n// collect in_net img\nin_net_img_all[bs_idx] = inputs_.in_net_im_;\n@@ -320,8 +377,8 @@ void ObjectDetector::Predict(const std::vector<cv::Mat> imgs,\npad_data.resize(rc * rh * rw);\nfloat *base = pad_data.data();\nfor (int i = 0; i < rc; ++i) {\n- cv::extractChannel(\n- pad_img, cv::Mat(rh, rw, CV_32FC1, base + i * rh * rw), i);\n+ cv::extractChannel(pad_img,\n+ cv::Mat(rh, rw, CV_32FC1, base + i * rh * rw), i);\n}\nin_data_all.insert(in_data_all.end(), pad_data.begin(), pad_data.end());\n}\n@@ -354,6 +411,64 @@ void ObjectDetector::Predict(const std::vector<cv::Mat> imgs,\nbool is_rbox = false;\nint reg_max = 7;\nint num_class = 80;\n+\n+ auto inference_start = std::chrono::steady_clock::now();\n+ if (config_.arch_ == \"SOLOv2\") {\n+ // warmup\n+ for (int i = 0; i < warmup; i++) {\n+ predictor_->Run();\n+ // Get output tensor\n+ auto output_names = predictor_->GetOutputNames();\n+ for (int j = 0; j < output_names.size(); j++) {\n+ auto output_tensor = predictor_->GetOutputHandle(output_names[j]);\n+ std::vector<int> output_shape = output_tensor->shape();\n+ int out_num = std::accumulate(output_shape.begin(), output_shape.end(),\n+ 1, std::multiplies<int>());\n+ if (j == 0) {\n+ out_bbox_num_data_.resize(out_num);\n+ output_tensor->CopyToCpu(out_bbox_num_data_.data());\n+ } else if (j == 1) {\n+ out_label_data_.resize(out_num);\n+ output_tensor->CopyToCpu(out_label_data_.data());\n+ } else if (j == 2) {\n+ out_score_data_.resize(out_num);\n+ output_tensor->CopyToCpu(out_score_data_.data());\n+ } else if (config_.mask_ && (j == 3)) {\n+ out_global_mask_data_.resize(out_num);\n+ output_tensor->CopyToCpu(out_global_mask_data_.data());\n+ }\n+ }\n+ }\n+\n+ inference_start = std::chrono::steady_clock::now();\n+ for (int i = 0; i < repeats; i++) {\n+ predictor_->Run();\n+ // Get output tensor\n+ out_tensor_list.clear();\n+ output_shape_list.clear();\n+ auto output_names = predictor_->GetOutputNames();\n+ for (int j = 0; j < output_names.size(); j++) {\n+ auto output_tensor = predictor_->GetOutputHandle(output_names[j]);\n+ std::vector<int> output_shape = output_tensor->shape();\n+ int out_num = std::accumulate(output_shape.begin(), output_shape.end(),\n+ 1, std::multiplies<int>());\n+ output_shape_list.push_back(output_shape);\n+ if (j == 0) {\n+ out_bbox_num_data_.resize(out_num);\n+ output_tensor->CopyToCpu(out_bbox_num_data_.data());\n+ } else if (j == 1) {\n+ out_label_data_.resize(out_num);\n+ output_tensor->CopyToCpu(out_label_data_.data());\n+ } else if (j == 2) {\n+ out_score_data_.resize(out_num);\n+ output_tensor->CopyToCpu(out_score_data_.data());\n+ } else if (config_.mask_ && (j == 3)) {\n+ out_global_mask_data_.resize(out_num);\n+ output_tensor->CopyToCpu(out_global_mask_data_.data());\n+ }\n+ }\n+ }\n+ } else {\n// warmup\nfor (int i = 0; i < warmup; i++) {\npredictor_->Run();\n@@ -362,8 +477,8 @@ void ObjectDetector::Predict(const std::vector<cv::Mat> imgs,\nfor (int j = 0; j < output_names.size(); j++) {\nauto output_tensor = predictor_->GetOutputHandle(output_names[j]);\nstd::vector<int> output_shape = output_tensor->shape();\n- int out_num = std::accumulate(\n- output_shape.begin(), output_shape.end(), 1, std::multiplies<int>());\n+ int out_num = std::accumulate(output_shape.begin(), output_shape.end(),\n+ 1, std::multiplies<int>());\nif (config_.mask_ && (j == 2)) {\nout_mask_data_.resize(out_num);\noutput_tensor->CopyToCpu(out_mask_data_.data());\n@@ -379,7 +494,7 @@ void ObjectDetector::Predict(const std::vector<cv::Mat> imgs,\n}\n}\n- auto inference_start = std::chrono::steady_clock::now();\n+ inference_start = std::chrono::steady_clock::now();\nfor (int i = 0; i < repeats; i++) {\npredictor_->Run();\n// Get output tensor\n@@ -389,8 +504,8 @@ void ObjectDetector::Predict(const std::vector<cv::Mat> imgs,\nfor (int j = 0; j < output_names.size(); j++) {\nauto output_tensor = predictor_->GetOutputHandle(output_names[j]);\nstd::vector<int> output_shape = output_tensor->shape();\n- int out_num = std::accumulate(\n- output_shape.begin(), output_shape.end(), 1, std::multiplies<int>());\n+ int out_num = std::accumulate(output_shape.begin(), output_shape.end(),\n+ 1, std::multiplies<int>());\noutput_shape_list.push_back(output_shape);\nif (config_.mask_ && (j == 2)) {\nout_mask_data_.resize(out_num);\n@@ -406,6 +521,8 @@ void ObjectDetector::Predict(const std::vector<cv::Mat> imgs,\n}\n}\n}\n+ }\n+\nauto inference_end = std::chrono::steady_clock::now();\nauto postprocess_start = std::chrono::steady_clock::now();\n// Postprocessing result\n@@ -420,30 +537,23 @@ void ObjectDetector::Predict(const std::vector<cv::Mat> imgs,\nreg_max = output_shape_list[i][2] / 4 - 1;\n}\nfloat *buffer = new float[out_tensor_list[i].size()];\n- memcpy(buffer,\n- &out_tensor_list[i][0],\n+ memcpy(buffer, &out_tensor_list[i][0],\nout_tensor_list[i].size() * sizeof(float));\noutput_data_list_.push_back(buffer);\n}\nPaddleDetection::PicoDetPostProcess(\n- result,\n- output_data_list_,\n- config_.fpn_stride_,\n- inputs_.im_shape_,\n- inputs_.scale_factor_,\n- config_.nms_info_[\"score_threshold\"].as<float>(),\n- config_.nms_info_[\"nms_threshold\"].as<float>(),\n- num_class,\n- reg_max);\n+ result, output_data_list_, config_.fpn_stride_, inputs_.im_shape_,\n+ inputs_.scale_factor_, config_.nms_info_[\"score_threshold\"].as<float>(),\n+ config_.nms_info_[\"nms_threshold\"].as<float>(), num_class, reg_max);\nbbox_num->push_back(result->size());\n+ } else if (config_.arch_ == \"SOLOv2\") {\n+ SOLOv2Postprocess(imgs, result, bbox_num, out_bbox_num_data_,\n+ out_label_data_, out_score_data_, out_global_mask_data_,\n+ threshold);\n} else {\nis_rbox = output_shape_list[0][output_shape_list[0].size() - 1] % 10 == 0;\n- Postprocess(imgs,\n- result,\n- out_bbox_num_data_,\n- out_tensor_list[0],\n- out_mask_data_,\n- is_rbox);\n+ Postprocess(imgs, result, out_bbox_num_data_, out_tensor_list[0],\n+ out_mask_data_, is_rbox);\nfor (int k = 0; k < out_bbox_num_data_.size(); k++) {\nint tmp = out_bbox_num_data_[k];\nbbox_num->push_back(tmp);\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add cpp infer support for solov2 (#6050)
499,339
25.05.2022 12:26:32
-28,800
2dc058ad75021e2ad07aaaf16d9183a404fbc674
[dev] update amp, add amp_level
[ { "change_type": "MODIFY", "old_path": "ppdet/engine/trainer.py", "new_path": "ppdet/engine/trainer.py", "diff": "@@ -32,7 +32,6 @@ import paddle\nimport paddle.nn as nn\nimport paddle.distributed as dist\nfrom paddle.distributed import fleet\n-from paddle import amp\nfrom paddle.static import InputSpec\nfrom ppdet.optimizer import ModelEMA\n@@ -380,13 +379,21 @@ class Trainer(object):\nself.cfg['EvalDataset'] = self.cfg.EvalDataset = create(\n\"EvalDataset\")()\n+ model = self.model\nsync_bn = (getattr(self.cfg, 'norm_type', None) == 'sync_bn' and\nself.cfg.use_gpu and self._nranks > 1)\nif sync_bn:\n- self.model = paddle.nn.SyncBatchNorm.convert_sync_batchnorm(\n- self.model)\n+ model = paddle.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n- model = self.model\n+ # enabel auto mixed precision mode\n+ use_amp = self.cfg.get('amp', False)\n+ amp_level = self.cfg.get('amp_level', 'O1')\n+ if use_amp:\n+ scaler = paddle.amp.GradScaler(\n+ enable=self.cfg.use_gpu or self.cfg.use_npu,\n+ init_loss_scaling=self.cfg.get('init_loss_scaling', 1024))\n+ model = paddle.amp.decorate(models=model, level=amp_level)\n+ # get distributed model\nif self.cfg.get('fleet', False):\nmodel = fleet.distributed_model(model)\nself.optimizer = fleet.distributed_optimizer(self.optimizer)\n@@ -394,13 +401,7 @@ class Trainer(object):\nfind_unused_parameters = self.cfg[\n'find_unused_parameters'] if 'find_unused_parameters' in self.cfg else False\nmodel = paddle.DataParallel(\n- self.model, find_unused_parameters=find_unused_parameters)\n-\n- # enabel auto mixed precision mode\n- if self.cfg.get('amp', False):\n- scaler = amp.GradScaler(\n- enable=self.cfg.use_gpu or self.cfg.use_npu,\n- init_loss_scaling=1024)\n+ model, find_unused_parameters=find_unused_parameters)\nself.status.update({\n'epoch_id': self.start_epoch,\n@@ -436,12 +437,12 @@ class Trainer(object):\nself._compose_callback.on_step_begin(self.status)\ndata['epoch_id'] = epoch_id\n- if self.cfg.get('amp', False):\n- with amp.auto_cast(enable=self.cfg.use_gpu):\n+ if use_amp:\n+ with paddle.amp.auto_cast(\n+ enable=self.cfg.use_gpu, level=amp_level):\n# model forward\noutputs = model(data)\nloss = outputs['loss']\n-\n# model backward\nscaled_loss = scaler.scale(loss)\nscaled_loss.backward()\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[dev] update amp, add amp_level (#6054)
499,333
25.05.2022 14:06:12
-28,800
51b7d4cf5e97b9d876ccf34b0a3503cee7b527eb
fit py36
[ { "change_type": "MODIFY", "old_path": "ppdet/data/source/dataset.py", "new_path": "ppdet/data/source/dataset.py", "diff": "@@ -23,7 +23,7 @@ from paddle.io import Dataset\nfrom ppdet.core.workspace import register, serializable\nfrom ppdet.utils.download import get_dataset_path\nimport copy\n-import ppdet.data.source as source\n+from ppdet.data import source\n@serializable\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fit py36 (#6056)
499,299
25.05.2022 21:21:58
-28,800
d2f86f6eac798988b646efeb64bc94e3da545033
fix image infer error in pphuman
[ { "change_type": "MODIFY", "old_path": "deploy/pphuman/pipe_utils.py", "new_path": "deploy/pphuman/pipe_utils.py", "diff": "@@ -297,6 +297,8 @@ def crop_image_with_det(batch_input, det_res, thresh=0.3):\ncrop_res = []\nfor b_id, input in enumerate(batch_input):\nboxes_num_i = boxes_num[b_id]\n+ if boxes_num_i == 0:\n+ continue\nboxes_i = boxes[start_idx:start_idx + boxes_num_i, :]\nscore_i = score[start_idx:start_idx + boxes_num_i]\nres = []\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix image infer error in pphuman (#6060)
499,339
26.05.2022 12:19:19
-28,800
2768e1ae40444f24e2ab1ff50e0931a50e9e67fd
[PPYOLOE] fix assigner bug
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/assigners/atss_assigner.py", "new_path": "ppdet/modeling/assigners/atss_assigner.py", "diff": "@@ -51,7 +51,6 @@ class ATSSAssigner(nn.Layer):\ndef _gather_topk_pyramid(self, gt2anchor_distances, num_anchors_list,\npad_gt_mask):\n- pad_gt_mask = pad_gt_mask.tile([1, 1, self.topk]).astype(paddle.bool)\ngt2anchor_distances_list = paddle.split(\ngt2anchor_distances, num_anchors_list, axis=-1)\nnum_anchors_index = np.cumsum(num_anchors_list).tolist()\n@@ -61,15 +60,12 @@ class ATSSAssigner(nn.Layer):\nfor distances, anchors_index in zip(gt2anchor_distances_list,\nnum_anchors_index):\nnum_anchors = distances.shape[-1]\n- topk_metrics, topk_idxs = paddle.topk(\n+ _, topk_idxs = paddle.topk(\ndistances, self.topk, axis=-1, largest=False)\ntopk_idxs_list.append(topk_idxs + anchors_index)\n- topk_idxs = paddle.where(pad_gt_mask, topk_idxs,\n- paddle.zeros_like(topk_idxs))\n- is_in_topk = F.one_hot(topk_idxs, num_anchors).sum(axis=-2)\n- is_in_topk = paddle.where(is_in_topk > 1,\n- paddle.zeros_like(is_in_topk), is_in_topk)\n- is_in_topk_list.append(is_in_topk.astype(gt2anchor_distances.dtype))\n+ is_in_topk = F.one_hot(topk_idxs, num_anchors).sum(\n+ axis=-2).astype(gt2anchor_distances.dtype)\n+ is_in_topk_list.append(is_in_topk * pad_gt_mask)\nis_in_topk_list = paddle.concat(is_in_topk_list, axis=-1)\ntopk_idxs_list = paddle.concat(topk_idxs_list, axis=-1)\nreturn is_in_topk_list, topk_idxs_list\n@@ -155,9 +151,8 @@ class ATSSAssigner(nn.Layer):\niou_threshold = iou_threshold.reshape([batch_size, num_max_boxes, -1])\niou_threshold = iou_threshold.mean(axis=-1, keepdim=True) + \\\niou_threshold.std(axis=-1, keepdim=True)\n- is_in_topk = paddle.where(\n- iou_candidates > iou_threshold.tile([1, 1, num_anchors]),\n- is_in_topk, paddle.zeros_like(is_in_topk))\n+ is_in_topk = paddle.where(iou_candidates > iou_threshold, is_in_topk,\n+ paddle.zeros_like(is_in_topk))\n# 6. check the positive sample's center in gt, [B, n, L]\nis_in_gts = check_points_inside_bboxes(anchor_centers, gt_bboxes)\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/assigners/task_aligned_assigner.py", "new_path": "ppdet/modeling/assigners/task_aligned_assigner.py", "diff": "@@ -112,9 +112,7 @@ class TaskAlignedAssigner(nn.Layer):\n# select topk largest alignment metrics pred bbox as candidates\n# for each gt, [B, n, L]\nis_in_topk = gather_topk_anchors(\n- alignment_metrics * is_in_gts,\n- self.topk,\n- topk_mask=pad_gt_mask.tile([1, 1, self.topk]).astype(paddle.bool))\n+ alignment_metrics * is_in_gts, self.topk, topk_mask=pad_gt_mask)\n# select positive sample, [B, n, L]\nmask_positive = is_in_topk * is_in_gts * pad_gt_mask\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/assigners/utils.py", "new_path": "ppdet/modeling/assigners/utils.py", "diff": "@@ -88,7 +88,7 @@ def gather_topk_anchors(metrics, topk, largest=True, topk_mask=None, eps=1e-9):\nlargest (bool) : largest is a flag, if set to true,\nalgorithm will sort by descending order, otherwise sort by\nascending order. Default: True\n- topk_mask (Tensor, bool|None): shape[B, n, topk], ignore bbox mask,\n+ topk_mask (Tensor, float32): shape[B, n, 1], ignore bbox mask,\nDefault: None\neps (float): Default: 1e-9\nReturns:\n@@ -98,13 +98,11 @@ def gather_topk_anchors(metrics, topk, largest=True, topk_mask=None, eps=1e-9):\ntopk_metrics, topk_idxs = paddle.topk(\nmetrics, topk, axis=-1, largest=largest)\nif topk_mask is None:\n- topk_mask = (topk_metrics.max(axis=-1, keepdim=True) > eps).tile(\n- [1, 1, topk])\n- topk_idxs = paddle.where(topk_mask, topk_idxs, paddle.zeros_like(topk_idxs))\n- is_in_topk = F.one_hot(topk_idxs, num_anchors).sum(axis=-2)\n- is_in_topk = paddle.where(is_in_topk > 1,\n- paddle.zeros_like(is_in_topk), is_in_topk)\n- return is_in_topk.astype(metrics.dtype)\n+ topk_mask = (\n+ topk_metrics.max(axis=-1, keepdim=True) > eps).astype(metrics.dtype)\n+ is_in_topk = F.one_hot(topk_idxs, num_anchors).sum(\n+ axis=-2).astype(metrics.dtype)\n+ return is_in_topk * topk_mask\ndef check_points_inside_bboxes(points,\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[PPYOLOE] fix assigner bug (#6066)
499,333
31.05.2022 10:03:15
-28,800
271347a39e009f60675e16256b6639625b7bfbab
fix aa order in rcnn enhance
[ { "change_type": "MODIFY", "old_path": "configs/rcnn_enhance/_base_/faster_rcnn_enhance_reader.yml", "new_path": "configs/rcnn_enhance/_base_/faster_rcnn_enhance_reader.yml", "diff": "@@ -2,9 +2,9 @@ worker_num: 2\nTrainReader:\nsample_transforms:\n- Decode: {}\n+ - AutoAugment: {autoaug_type: v1}\n- RandomResize: {target_size: [[384,1000], [416,1000], [448,1000], [480,1000], [512,1000], [544,1000], [576,1000], [608,1000], [640,1000], [672,1000]], interp: 2, keep_ratio: True}\n- RandomFlip: {prob: 0.5}\n- - AutoAugment: {autoaug_type: v1}\n- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n- Permute: {}\nbatch_transforms:\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix aa order in rcnn enhance (#6095)
499,301
01.06.2022 11:08:24
-28,800
ae27dcd95fb2e4fb4b39a2fe6ddbfdfa1b55af36
fix train dataset get_anno
[ { "change_type": "MODIFY", "old_path": "ppdet/engine/trainer.py", "new_path": "ppdet/engine/trainer.py", "diff": "@@ -218,13 +218,14 @@ class Trainer(object):\n# when do validation in train, annotation file should be get from\n# EvalReader instead of self.dataset(which is TrainReader)\n- anno_file = self.dataset.get_anno()\n- dataset = self.dataset\nif self.mode == 'train' and validate:\neval_dataset = self.cfg['EvalDataset']\neval_dataset.check_or_download_dataset()\nanno_file = eval_dataset.get_anno()\ndataset = eval_dataset\n+ else:\n+ dataset = self.dataset\n+ anno_file = dataset.get_anno()\nIouType = self.cfg['IouType'] if 'IouType' in self.cfg else 'bbox'\nif self.cfg.metric == \"COCO\":\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix train dataset get_anno (#6076)
499,304
02.06.2022 11:52:31
-28,800
40d58469f42a253d995581fdcbd39e847cfa852f
fix picodet output name in openvino demo
[ { "change_type": "MODIFY", "old_path": "deploy/third_engine/demo_openvino/picodet_openvino.h", "new_path": "deploy/third_engine/demo_openvino/picodet_openvino.h", "diff": "// limitations under the License.\n// reference from https://github.com/RangiLyu/nanodet/tree/main/demo_openvino\n-\n#ifndef _PICODET_OPENVINO_H_\n#define _PICODET_OPENVINO_H_\n-#include <string>\n-#include <opencv2/core.hpp>\n#include <inference_engine.hpp>\n+#include <opencv2/core.hpp>\n+#include <string>\n#define image_size 416\n-\n-typedef struct HeadInfo\n-{\n+typedef struct HeadInfo {\nstd::string cls_layer;\nstd::string dis_layer;\nint stride;\n} HeadInfo;\n-typedef struct BoxInfo\n-{\n+typedef struct BoxInfo {\nfloat x1;\nfloat y1;\nfloat x2;\n@@ -41,8 +37,7 @@ typedef struct BoxInfo\nint label;\n} BoxInfo;\n-class PicoDet\n-{\n+class PicoDet {\npublic:\nPicoDet(const char *param);\n@@ -54,25 +49,27 @@ public:\nstd::vector<HeadInfo> heads_info_{\n// cls_pred|dis_pred|stride\n- {\"save_infer_model/scale_0.tmp_1\", \"save_infer_model/scale_4.tmp_1\", 8},\n- {\"save_infer_model/scale_1.tmp_1\", \"save_infer_model/scale_5.tmp_1\", 16},\n- {\"save_infer_model/scale_2.tmp_1\", \"save_infer_model/scale_6.tmp_1\", 32},\n- {\"save_infer_model/scale_3.tmp_1\", \"save_infer_model/scale_7.tmp_1\", 64},\n+ {\"transpose_0.tmp_0\", \"transpose_1.tmp_0\", 8},\n+ {\"transpose_2.tmp_0\", \"transpose_3.tmp_0\", 16},\n+ {\"transpose_4.tmp_0\", \"transpose_5.tmp_0\", 32},\n+ {\"transpose_6.tmp_0\", \"transpose_7.tmp_0\", 64},\n};\n- std::vector<BoxInfo> detect(cv::Mat image, float score_threshold, float nms_threshold);\n+ std::vector<BoxInfo> detect(cv::Mat image, float score_threshold,\n+ float nms_threshold);\nprivate:\nvoid preprocess(cv::Mat &image, InferenceEngine::Blob::Ptr &blob);\n- void decode_infer(const float*& cls_pred, const float*& dis_pred, int stride, float threshold, std::vector<std::vector<BoxInfo>>& results);\n- BoxInfo disPred2Bbox(const float*& dfl_det, int label, float score, int x, int y, int stride);\n+ void decode_infer(const float *&cls_pred, const float *&dis_pred, int stride,\n+ float threshold,\n+ std::vector<std::vector<BoxInfo>> &results);\n+ BoxInfo disPred2Bbox(const float *&dfl_det, int label, float score, int x,\n+ int y, int stride);\nstatic void nms(std::vector<BoxInfo> &result, float nms_threshold);\nstd::string input_name_;\nint input_size_ = image_size;\nint num_class_ = 80;\nint reg_max_ = 7;\n-\n};\n-\n#endif\n" }, { "change_type": "MODIFY", "old_path": "deploy/third_engine/demo_openvino_kpts/picodet_openvino.h", "new_path": "deploy/third_engine/demo_openvino_kpts/picodet_openvino.h", "diff": "@@ -48,25 +48,22 @@ class PicoDet {\nstd::vector<HeadInfo> heads_info_{\n// cls_pred|dis_pred|stride\n- {\"save_infer_model/scale_0.tmp_1\", \"save_infer_model/scale_4.tmp_1\", 8},\n- {\"save_infer_model/scale_1.tmp_1\", \"save_infer_model/scale_5.tmp_1\", 16},\n- {\"save_infer_model/scale_2.tmp_1\", \"save_infer_model/scale_6.tmp_1\", 32},\n- {\"save_infer_model/scale_3.tmp_1\", \"save_infer_model/scale_7.tmp_1\", 64},\n+ {\"transpose_0.tmp_0\", \"transpose_1.tmp_0\", 8},\n+ {\"transpose_2.tmp_0\", \"transpose_3.tmp_0\", 16},\n+ {\"transpose_4.tmp_0\", \"transpose_5.tmp_0\", 32},\n+ {\"transpose_6.tmp_0\", \"transpose_7.tmp_0\", 64},\n};\n- std::vector<BoxInfo> detect(cv::Mat image,\n- float score_threshold,\n+ std::vector<BoxInfo> detect(cv::Mat image, float score_threshold,\nfloat nms_threshold);\nprivate:\nvoid preprocess(cv::Mat &image, InferenceEngine::Blob::Ptr &blob);\n- void decode_infer(const float*& cls_pred,\n- const float*& dis_pred,\n- int stride,\n+ void decode_infer(const float *&cls_pred, const float *&dis_pred, int stride,\nfloat threshold,\nstd::vector<std::vector<BoxInfo>> &results);\n- BoxInfo disPred2Bbox(\n- const float*& dfl_det, int label, float score, int x, int y, int stride);\n+ BoxInfo disPred2Bbox(const float *&dfl_det, int label, float score, int x,\n+ int y, int stride);\nstatic void nms(std::vector<BoxInfo> &result, float nms_threshold);\nstd::string input_name_;\nint input_size_ = image_size;\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix picodet output name in openvino demo (#6114)
499,339
02.06.2022 16:37:27
-28,800
bf7b674cfea28f6c10d718f137d73e2fe0d325ce
[TIPC] add onnx infer
[ { "change_type": "MODIFY", "old_path": "deploy/serving/python/preprocess_ops.py", "new_path": "deploy/serving/python/preprocess_ops.py", "diff": "@@ -3,10 +3,14 @@ import cv2\nimport copy\n-def decode_image(im, img_info):\n+def decode_image(im):\nim = np.array(im)\n- img_info['im_shape'] = np.array(im.shape[:2], dtype=np.float32)\n- img_info['scale_factor'] = np.array([1., 1.], dtype=np.float32)\n+ img_info = {\n+ \"im_shape\": np.array(\n+ im.shape[:2], dtype=np.float32),\n+ \"scale_factor\": np.array(\n+ [1., 1.], dtype=np.float32)\n+ }\nreturn im, img_info\n@@ -399,16 +403,10 @@ class Compose:\nop_type = new_op_info.pop('type')\nself.transforms.append(eval(op_type)(**new_op_info))\n- self.im_info = {\n- 'scale_factor': np.array(\n- [1., 1.], dtype=np.float32),\n- 'im_shape': None\n- }\n-\ndef __call__(self, img):\n- img, self.im_info = decode_image(img, self.im_info)\n+ img, im_info = decode_image(img)\nfor t in self.transforms:\n- img, self.im_info = t(img, self.im_info)\n- inputs = copy.deepcopy(self.im_info)\n+ img, im_info = t(img, im_info)\n+ inputs = copy.deepcopy(im_info)\ninputs['image'] = img\nreturn inputs\n" }, { "change_type": "MODIFY", "old_path": "deploy/serving/python/web_service.py", "new_path": "deploy/serving/python/web_service.py", "diff": "@@ -132,7 +132,7 @@ class PredictConfig(object):\nself.arch = yml_conf['arch']\nself.preprocess_infos = yml_conf['Preprocess']\nself.min_subgraph_size = yml_conf['min_subgraph_size']\n- self.labels = yml_conf['label_list']\n+ self.label_list = yml_conf['label_list']\nself.use_dynamic_shape = yml_conf['use_dynamic_shape']\nself.draw_threshold = yml_conf.get(\"draw_threshold\", 0.5)\nself.mask = yml_conf.get(\"mask\", False)\n@@ -189,8 +189,8 @@ class DetectorOp(Op):\nresult = {}\nfor k, num in zip(input_dict.keys(), bboxes_num):\nbbox = bboxes[idx:idx + num]\n- result[k] = self.parse_det_result(bbox, draw_threshold,\n- GLOBAL_VAR['model_config'].labels)\n+ result[k] = self.parse_det_result(\n+ bbox, draw_threshold, GLOBAL_VAR['model_config'].label_list)\nreturn result, None, \"\"\ndef collate_inputs(self, inputs):\n@@ -206,7 +206,7 @@ class DetectorOp(Op):\ndef parse_det_result(self, bbox, draw_threshold, label_list):\nresult = []\nfor line in bbox:\n- if line[1] > draw_threshold:\n+ if line[0] > -1 and line[1] > draw_threshold:\nresult.append(f\"{label_list[int(line[0])]} {line[1]} \"\nf\"{line[2]} {line[3]} {line[4]} {line[5]}\")\nreturn result\n" }, { "change_type": "MODIFY", "old_path": "deploy/third_engine/demo_onnxruntime/infer_demo.py", "new_path": "deploy/third_engine/demo_onnxruntime/infer_demo.py", "diff": "@@ -55,7 +55,8 @@ class PicoDet():\norigin_shape = srcimg.shape[:2]\nim_scale_y = newh / float(origin_shape[0])\nim_scale_x = neww / float(origin_shape[1])\n- img_shape = np.array([[float(origin_shape[0]), float(origin_shape[1])]\n+ img_shape = np.array([\n+ [float(self.input_shape[0]), float(self.input_shape[1])]\n]).astype('float32')\nscale_factor = np.array([[im_scale_y, im_scale_x]]).astype('float32')\n" }, { "change_type": "ADD", "old_path": null, "new_path": "deploy/third_engine/onnx/infer.py", "diff": "+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+import yaml\n+import argparse\n+import numpy as np\n+import glob\n+from onnxruntime import InferenceSession\n+\n+from preprocess import Compose\n+\n+# Global dictionary\n+SUPPORT_MODELS = {\n+ 'YOLO',\n+ 'RCNN',\n+ 'SSD',\n+ 'Face',\n+ 'FCOS',\n+ 'SOLOv2',\n+ 'TTFNet',\n+ 'S2ANet',\n+ 'JDE',\n+ 'FairMOT',\n+ 'DeepSORT',\n+ 'GFL',\n+ 'PicoDet',\n+ 'CenterNet',\n+ 'TOOD',\n+ 'RetinaNet',\n+ 'StrongBaseline',\n+ 'STGCN',\n+ 'YOLOX',\n+}\n+\n+parser = argparse.ArgumentParser(description=__doc__)\n+parser.add_argument(\"-c\", \"--config\", type=str, help=\"infer_cfg.yml\")\n+parser.add_argument(\n+ '--onnx_file', type=str, default=\"model.onnx\", help=\"onnx model file path\")\n+parser.add_argument(\"--image_dir\", type=str)\n+parser.add_argument(\"--image_file\", type=str)\n+\n+\n+def get_test_images(infer_dir, infer_img):\n+ \"\"\"\n+ Get image path list in TEST mode\n+ \"\"\"\n+ assert infer_img is not None or infer_dir is not None, \\\n+ \"--image_file or --image_dir should be set\"\n+ assert infer_img is None or os.path.isfile(infer_img), \\\n+ \"{} is not a file\".format(infer_img)\n+ assert infer_dir is None or os.path.isdir(infer_dir), \\\n+ \"{} is not a directory\".format(infer_dir)\n+\n+ # infer_img has a higher priority\n+ if infer_img and os.path.isfile(infer_img):\n+ return [infer_img]\n+\n+ images = set()\n+ infer_dir = os.path.abspath(infer_dir)\n+ assert os.path.isdir(infer_dir), \\\n+ \"infer_dir {} is not a directory\".format(infer_dir)\n+ exts = ['jpg', 'jpeg', 'png', 'bmp']\n+ exts += [ext.upper() for ext in exts]\n+ for ext in exts:\n+ images.update(glob.glob('{}/*.{}'.format(infer_dir, ext)))\n+ images = list(images)\n+\n+ assert len(images) > 0, \"no image found in {}\".format(infer_dir)\n+ print(\"Found {} inference images in total.\".format(len(images)))\n+\n+ return images\n+\n+\n+class PredictConfig(object):\n+ \"\"\"set config of preprocess, postprocess and visualize\n+ Args:\n+ model_dir (str): root path of infer_cfg.yml\n+ \"\"\"\n+\n+ def __init__(self, infer_config):\n+ # parsing Yaml config for Preprocess\n+ with open(infer_config) as f:\n+ yml_conf = yaml.safe_load(f)\n+ self.check_model(yml_conf)\n+ self.arch = yml_conf['arch']\n+ self.preprocess_infos = yml_conf['Preprocess']\n+ self.min_subgraph_size = yml_conf['min_subgraph_size']\n+ self.label_list = yml_conf['label_list']\n+ self.use_dynamic_shape = yml_conf['use_dynamic_shape']\n+ self.draw_threshold = yml_conf.get(\"draw_threshold\", 0.5)\n+ self.mask = yml_conf.get(\"mask\", False)\n+ self.tracker = yml_conf.get(\"tracker\", None)\n+ self.nms = yml_conf.get(\"NMS\", None)\n+ self.fpn_stride = yml_conf.get(\"fpn_stride\", None)\n+ if self.arch == 'RCNN' and yml_conf.get('export_onnx', False):\n+ print(\n+ 'The RCNN export model is used for ONNX and it only supports batch_size = 1'\n+ )\n+ self.print_config()\n+\n+ def check_model(self, yml_conf):\n+ \"\"\"\n+ Raises:\n+ ValueError: loaded model not in supported model type\n+ \"\"\"\n+ for support_model in SUPPORT_MODELS:\n+ if support_model in yml_conf['arch']:\n+ return True\n+ raise ValueError(\"Unsupported arch: {}, expect {}\".format(yml_conf[\n+ 'arch'], SUPPORT_MODELS))\n+\n+ def print_config(self):\n+ print('----------- Model Configuration -----------')\n+ print('%s: %s' % ('Model Arch', self.arch))\n+ print('%s: ' % ('Transform Order'))\n+ for op_info in self.preprocess_infos:\n+ print('--%s: %s' % ('transform op', op_info['type']))\n+ print('--------------------------------------------')\n+\n+\n+def predict_image(infer_config, predictor, img_list):\n+ # load preprocess transforms\n+ transforms = Compose(infer_config.preprocess_infos)\n+ # predict image\n+ for img_path in img_list:\n+ inputs = transforms(img_path)\n+ inputs_name = [var.name for var in predictor.get_inputs()]\n+ inputs = {k: inputs[k][None, ] for k in inputs_name}\n+\n+ outputs = predictor.run(output_names=None, input_feed=inputs)\n+\n+ print(\"ONNXRuntime predict: \")\n+ bboxes = np.array(outputs[0])\n+ for bbox in bboxes:\n+ if bbox[0] > -1 and bbox[1] > infer_config.draw_threshold:\n+ print(f\"{infer_config.label_list[int(bbox[0])]} {bbox[1]} \"\n+ f\"{bbox[2]} {bbox[3]} {bbox[4]} {bbox[5]}\")\n+\n+\n+if __name__ == '__main__':\n+ FLAGS = parser.parse_args()\n+ # load image list\n+ img_list = get_test_images(FLAGS.image_dir, FLAGS.image_file)\n+ # load predictor\n+ predictor = InferenceSession(FLAGS.onnx_file)\n+ # load infer config\n+ infer_config = PredictConfig(FLAGS.config)\n+\n+ predict_image(infer_config, predictor, img_list)\n" }, { "change_type": "ADD", "old_path": null, "new_path": "deploy/third_engine/onnx/preprocess.py", "diff": "+import numpy as np\n+import cv2\n+import copy\n+\n+\n+def decode_image(img_path):\n+ with open(img_path, 'rb') as f:\n+ im_read = f.read()\n+ data = np.frombuffer(im_read, dtype='uint8')\n+ im = cv2.imdecode(data, 1) # BGR mode, but need RGB mode\n+ im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n+ img_info = {\n+ \"im_shape\": np.array(\n+ im.shape[:2], dtype=np.float32),\n+ \"scale_factor\": np.array(\n+ [1., 1.], dtype=np.float32)\n+ }\n+ return im, img_info\n+\n+\n+class Resize(object):\n+ \"\"\"resize image by target_size and max_size\n+ Args:\n+ target_size (int): the target size of image\n+ keep_ratio (bool): whether keep_ratio or not, default true\n+ interp (int): method of resize\n+ \"\"\"\n+\n+ def __init__(self, target_size, keep_ratio=True, interp=cv2.INTER_LINEAR):\n+ if isinstance(target_size, int):\n+ target_size = [target_size, target_size]\n+ self.target_size = target_size\n+ self.keep_ratio = keep_ratio\n+ self.interp = interp\n+\n+ def __call__(self, im, im_info):\n+ \"\"\"\n+ Args:\n+ im (np.ndarray): image (np.ndarray)\n+ im_info (dict): info of image\n+ Returns:\n+ im (np.ndarray): processed image (np.ndarray)\n+ im_info (dict): info of processed image\n+ \"\"\"\n+ assert len(self.target_size) == 2\n+ assert self.target_size[0] > 0 and self.target_size[1] > 0\n+ im_channel = im.shape[2]\n+ im_scale_y, im_scale_x = self.generate_scale(im)\n+ im = cv2.resize(\n+ im,\n+ None,\n+ None,\n+ fx=im_scale_x,\n+ fy=im_scale_y,\n+ interpolation=self.interp)\n+ im_info['im_shape'] = np.array(im.shape[:2]).astype('float32')\n+ im_info['scale_factor'] = np.array(\n+ [im_scale_y, im_scale_x]).astype('float32')\n+ return im, im_info\n+\n+ def generate_scale(self, im):\n+ \"\"\"\n+ Args:\n+ im (np.ndarray): image (np.ndarray)\n+ Returns:\n+ im_scale_x: the resize ratio of X\n+ im_scale_y: the resize ratio of Y\n+ \"\"\"\n+ origin_shape = im.shape[:2]\n+ im_c = im.shape[2]\n+ if self.keep_ratio:\n+ im_size_min = np.min(origin_shape)\n+ im_size_max = np.max(origin_shape)\n+ target_size_min = np.min(self.target_size)\n+ target_size_max = np.max(self.target_size)\n+ im_scale = float(target_size_min) / float(im_size_min)\n+ if np.round(im_scale * im_size_max) > target_size_max:\n+ im_scale = float(target_size_max) / float(im_size_max)\n+ im_scale_x = im_scale\n+ im_scale_y = im_scale\n+ else:\n+ resize_h, resize_w = self.target_size\n+ im_scale_y = resize_h / float(origin_shape[0])\n+ im_scale_x = resize_w / float(origin_shape[1])\n+ return im_scale_y, im_scale_x\n+\n+\n+class NormalizeImage(object):\n+ \"\"\"normalize image\n+ Args:\n+ mean (list): im - mean\n+ std (list): im / std\n+ is_scale (bool): whether need im / 255\n+ is_channel_first (bool): if True: image shape is CHW, else: HWC\n+ \"\"\"\n+\n+ def __init__(self, mean, std, is_scale=True):\n+ self.mean = mean\n+ self.std = std\n+ self.is_scale = is_scale\n+\n+ def __call__(self, im, im_info):\n+ \"\"\"\n+ Args:\n+ im (np.ndarray): image (np.ndarray)\n+ im_info (dict): info of image\n+ Returns:\n+ im (np.ndarray): processed image (np.ndarray)\n+ im_info (dict): info of processed image\n+ \"\"\"\n+ im = im.astype(np.float32, copy=False)\n+ mean = np.array(self.mean)[np.newaxis, np.newaxis, :]\n+ std = np.array(self.std)[np.newaxis, np.newaxis, :]\n+\n+ if self.is_scale:\n+ im = im / 255.0\n+ im -= mean\n+ im /= std\n+ return im, im_info\n+\n+\n+class Permute(object):\n+ \"\"\"permute image\n+ Args:\n+ to_bgr (bool): whether convert RGB to BGR\n+ channel_first (bool): whether convert HWC to CHW\n+ \"\"\"\n+\n+ def __init__(self, ):\n+ super(Permute, self).__init__()\n+\n+ def __call__(self, im, im_info):\n+ \"\"\"\n+ Args:\n+ im (np.ndarray): image (np.ndarray)\n+ im_info (dict): info of image\n+ Returns:\n+ im (np.ndarray): processed image (np.ndarray)\n+ im_info (dict): info of processed image\n+ \"\"\"\n+ im = im.transpose((2, 0, 1)).copy()\n+ return im, im_info\n+\n+\n+class PadStride(object):\n+ \"\"\" padding image for model with FPN, instead PadBatch(pad_to_stride) in original config\n+ Args:\n+ stride (bool): model with FPN need image shape % stride == 0\n+ \"\"\"\n+\n+ def __init__(self, stride=0):\n+ self.coarsest_stride = stride\n+\n+ def __call__(self, im, im_info):\n+ \"\"\"\n+ Args:\n+ im (np.ndarray): image (np.ndarray)\n+ im_info (dict): info of image\n+ Returns:\n+ im (np.ndarray): processed image (np.ndarray)\n+ im_info (dict): info of processed image\n+ \"\"\"\n+ coarsest_stride = self.coarsest_stride\n+ if coarsest_stride <= 0:\n+ return im, im_info\n+ im_c, im_h, im_w = im.shape\n+ pad_h = int(np.ceil(float(im_h) / coarsest_stride) * coarsest_stride)\n+ pad_w = int(np.ceil(float(im_w) / coarsest_stride) * coarsest_stride)\n+ padding_im = np.zeros((im_c, pad_h, pad_w), dtype=np.float32)\n+ padding_im[:, :im_h, :im_w] = im\n+ return padding_im, im_info\n+\n+\n+class LetterBoxResize(object):\n+ def __init__(self, target_size):\n+ \"\"\"\n+ Resize image to target size, convert normalized xywh to pixel xyxy\n+ format ([x_center, y_center, width, height] -> [x0, y0, x1, y1]).\n+ Args:\n+ target_size (int|list): image target size.\n+ \"\"\"\n+ super(LetterBoxResize, self).__init__()\n+ if isinstance(target_size, int):\n+ target_size = [target_size, target_size]\n+ self.target_size = target_size\n+\n+ def letterbox(self, img, height, width, color=(127.5, 127.5, 127.5)):\n+ # letterbox: resize a rectangular image to a padded rectangular\n+ shape = img.shape[:2] # [height, width]\n+ ratio_h = float(height) / shape[0]\n+ ratio_w = float(width) / shape[1]\n+ ratio = min(ratio_h, ratio_w)\n+ new_shape = (round(shape[1] * ratio),\n+ round(shape[0] * ratio)) # [width, height]\n+ padw = (width - new_shape[0]) / 2\n+ padh = (height - new_shape[1]) / 2\n+ top, bottom = round(padh - 0.1), round(padh + 0.1)\n+ left, right = round(padw - 0.1), round(padw + 0.1)\n+\n+ img = cv2.resize(\n+ img, new_shape, interpolation=cv2.INTER_AREA) # resized, no border\n+ img = cv2.copyMakeBorder(\n+ img, top, bottom, left, right, cv2.BORDER_CONSTANT,\n+ value=color) # padded rectangular\n+ return img, ratio, padw, padh\n+\n+ def __call__(self, im, im_info):\n+ \"\"\"\n+ Args:\n+ im (np.ndarray): image (np.ndarray)\n+ im_info (dict): info of image\n+ Returns:\n+ im (np.ndarray): processed image (np.ndarray)\n+ im_info (dict): info of processed image\n+ \"\"\"\n+ assert len(self.target_size) == 2\n+ assert self.target_size[0] > 0 and self.target_size[1] > 0\n+ height, width = self.target_size\n+ h, w = im.shape[:2]\n+ im, ratio, padw, padh = self.letterbox(im, height=height, width=width)\n+\n+ new_shape = [round(h * ratio), round(w * ratio)]\n+ im_info['im_shape'] = np.array(new_shape, dtype=np.float32)\n+ im_info['scale_factor'] = np.array([ratio, ratio], dtype=np.float32)\n+ return im, im_info\n+\n+\n+class Pad(object):\n+ def __init__(self, size, fill_value=[114.0, 114.0, 114.0]):\n+ \"\"\"\n+ Pad image to a specified size.\n+ Args:\n+ size (list[int]): image target size\n+ fill_value (list[float]): rgb value of pad area, default (114.0, 114.0, 114.0)\n+ \"\"\"\n+ super(Pad, self).__init__()\n+ if isinstance(size, int):\n+ size = [size, size]\n+ self.size = size\n+ self.fill_value = fill_value\n+\n+ def __call__(self, im, im_info):\n+ im_h, im_w = im.shape[:2]\n+ h, w = self.size\n+ if h == im_h and w == im_w:\n+ im = im.astype(np.float32)\n+ return im, im_info\n+\n+ canvas = np.ones((h, w, 3), dtype=np.float32)\n+ canvas *= np.array(self.fill_value, dtype=np.float32)\n+ canvas[0:im_h, 0:im_w, :] = im.astype(np.float32)\n+ im = canvas\n+ return im, im_info\n+\n+\n+def rotate_point(pt, angle_rad):\n+ \"\"\"Rotate a point by an angle.\n+\n+ Args:\n+ pt (list[float]): 2 dimensional point to be rotated\n+ angle_rad (float): rotation angle by radian\n+\n+ Returns:\n+ list[float]: Rotated point.\n+ \"\"\"\n+ assert len(pt) == 2\n+ sn, cs = np.sin(angle_rad), np.cos(angle_rad)\n+ new_x = pt[0] * cs - pt[1] * sn\n+ new_y = pt[0] * sn + pt[1] * cs\n+ rotated_pt = [new_x, new_y]\n+\n+ return rotated_pt\n+\n+\n+def _get_3rd_point(a, b):\n+ \"\"\"To calculate the affine matrix, three pairs of points are required. This\n+ function is used to get the 3rd point, given 2D points a & b.\n+\n+ The 3rd point is defined by rotating vector `a - b` by 90 degrees\n+ anticlockwise, using b as the rotation center.\n+\n+ Args:\n+ a (np.ndarray): point(x,y)\n+ b (np.ndarray): point(x,y)\n+\n+ Returns:\n+ np.ndarray: The 3rd point.\n+ \"\"\"\n+ assert len(a) == 2\n+ assert len(b) == 2\n+ direction = a - b\n+ third_pt = b + np.array([-direction[1], direction[0]], dtype=np.float32)\n+\n+ return third_pt\n+\n+\n+def get_affine_transform(center,\n+ input_size,\n+ rot,\n+ output_size,\n+ shift=(0., 0.),\n+ inv=False):\n+ \"\"\"Get the affine transform matrix, given the center/scale/rot/output_size.\n+\n+ Args:\n+ center (np.ndarray[2, ]): Center of the bounding box (x, y).\n+ scale (np.ndarray[2, ]): Scale of the bounding box\n+ wrt [width, height].\n+ rot (float): Rotation angle (degree).\n+ output_size (np.ndarray[2, ]): Size of the destination heatmaps.\n+ shift (0-100%): Shift translation ratio wrt the width/height.\n+ Default (0., 0.).\n+ inv (bool): Option to inverse the affine transform direction.\n+ (inv=False: src->dst or inv=True: dst->src)\n+\n+ Returns:\n+ np.ndarray: The transform matrix.\n+ \"\"\"\n+ assert len(center) == 2\n+ assert len(output_size) == 2\n+ assert len(shift) == 2\n+ if not isinstance(input_size, (np.ndarray, list)):\n+ input_size = np.array([input_size, input_size], dtype=np.float32)\n+ scale_tmp = input_size\n+\n+ shift = np.array(shift)\n+ src_w = scale_tmp[0]\n+ dst_w = output_size[0]\n+ dst_h = output_size[1]\n+\n+ rot_rad = np.pi * rot / 180\n+ src_dir = rotate_point([0., src_w * -0.5], rot_rad)\n+ dst_dir = np.array([0., dst_w * -0.5])\n+\n+ src = np.zeros((3, 2), dtype=np.float32)\n+ src[0, :] = center + scale_tmp * shift\n+ src[1, :] = center + src_dir + scale_tmp * shift\n+ src[2, :] = _get_3rd_point(src[0, :], src[1, :])\n+\n+ dst = np.zeros((3, 2), dtype=np.float32)\n+ dst[0, :] = [dst_w * 0.5, dst_h * 0.5]\n+ dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir\n+ dst[2, :] = _get_3rd_point(dst[0, :], dst[1, :])\n+\n+ if inv:\n+ trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))\n+ else:\n+ trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))\n+\n+ return trans\n+\n+\n+class WarpAffine(object):\n+ \"\"\"Warp affine the image\n+ \"\"\"\n+\n+ def __init__(self,\n+ keep_res=False,\n+ pad=31,\n+ input_h=512,\n+ input_w=512,\n+ scale=0.4,\n+ shift=0.1):\n+ self.keep_res = keep_res\n+ self.pad = pad\n+ self.input_h = input_h\n+ self.input_w = input_w\n+ self.scale = scale\n+ self.shift = shift\n+\n+ def __call__(self, im, im_info):\n+ \"\"\"\n+ Args:\n+ im (np.ndarray): image (np.ndarray)\n+ im_info (dict): info of image\n+ Returns:\n+ im (np.ndarray): processed image (np.ndarray)\n+ im_info (dict): info of processed image\n+ \"\"\"\n+ img = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)\n+\n+ h, w = img.shape[:2]\n+\n+ if self.keep_res:\n+ input_h = (h | self.pad) + 1\n+ input_w = (w | self.pad) + 1\n+ s = np.array([input_w, input_h], dtype=np.float32)\n+ c = np.array([w // 2, h // 2], dtype=np.float32)\n+\n+ else:\n+ s = max(h, w) * 1.0\n+ input_h, input_w = self.input_h, self.input_w\n+ c = np.array([w / 2., h / 2.], dtype=np.float32)\n+\n+ trans_input = get_affine_transform(c, s, 0, [input_w, input_h])\n+ img = cv2.resize(img, (w, h))\n+ inp = cv2.warpAffine(\n+ img, trans_input, (input_w, input_h), flags=cv2.INTER_LINEAR)\n+ return inp, im_info\n+\n+\n+class Compose:\n+ def __init__(self, transforms):\n+ self.transforms = []\n+ for op_info in transforms:\n+ new_op_info = op_info.copy()\n+ op_type = new_op_info.pop('type')\n+ self.transforms.append(eval(op_type)(**new_op_info))\n+\n+ def __call__(self, img_path):\n+ img, im_info = decode_image(img_path)\n+ for t in self.transforms:\n+ img, im_info = t(img, im_info)\n+ inputs = copy.deepcopy(im_info)\n+ inputs['image'] = img\n+ return inputs\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/assigners/task_aligned_assigner.py", "new_path": "ppdet/modeling/assigners/task_aligned_assigner.py", "diff": "@@ -93,7 +93,7 @@ class TaskAlignedAssigner(nn.Layer):\nreturn assigned_labels, assigned_bboxes, assigned_scores\n# compute iou between gt and pred bbox, [B, n, L]\n- ious = iou_similarity(gt_bboxes, pred_bboxes)\n+ ious = batch_iou_similarity(gt_bboxes, pred_bboxes)\n# gather pred bboxes class score\npred_scores = pred_scores.transpose([0, 2, 1])\nbatch_ind = paddle.arange(\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[TIPC] add onnx infer (#6119)
499,339
07.06.2022 14:44:29
-28,800
5c72d5a12b295d9242faaf37421e1b1257a00bc0
[TIPC] add dist train infer
[ { "change_type": "ADD", "old_path": null, "new_path": "test_tipc/configs/ppyoloe/ppyoloe_crn_s_300e_coco_train_fleet_infer_python.txt", "diff": "+===========================train_params===========================\n+model_name:ppyoloe_crn_s_300e_coco\n+python:python3.7\n+gpu_list:192.168.0.1,192.168.0.2;0,1\n+use_gpu:True\n+auto_cast:null\n+epoch:lite_train_lite_infer=1|lite_train_whole_infer=1|whole_train_whole_infer=300\n+save_dir:null\n+TrainReader.batch_size:lite_train_lite_infer=2|lite_train_whole_infer=2|whole_train_whole_infer=2\n+pretrain_weights:https://paddledet.bj.bcebos.com/models/ppyoloe_crn_s_300e_coco.pdparams\n+trained_model_name:model_final.pdparams\n+train_infer_img_dir:./dataset/coco/test2017/\n+filename:null\n+##\n+trainer:norm_train\n+norm_train:tools/train.py -c configs/ppyoloe/ppyoloe_crn_s_300e_coco.yml -o\n+pact_train:tools/train.py -c configs/ppyoloe/ppyoloe_crn_s_300e_coco.yml --slim_config _template_pact -o\n+fpgm_train:tools/train.py -c configs/ppyoloe/ppyoloe_crn_s_300e_coco.yml --slim_config _template_fpgm -o\n+distill_train:null\n+null:null\n+null:null\n+##\n+===========================eval_params===========================\n+eval:tools/eval.py -c configs/ppyoloe/ppyoloe_crn_s_300e_coco.yml -o\n+null:null\n+##\n+===========================infer_params===========================\n+--output_dir:./output_inference\n+weights:https://paddledet.bj.bcebos.com/models/ppyoloe_crn_s_300e_coco.pdparams\n+norm_export:tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_s_300e_coco.yml -o\n+pact_export:tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_s_300e_coco.yml --slim_config _template_pact -o\n+fpgm_export:tools/export_model.py -c configs/ppyoloe/ppyoloe_crn_s_300e_coco.yml --slim_config _template_fpgm -o\n+distill_export:null\n+export1:null\n+export2:null\n+kl_quant_export:tools/post_quant.py -c configs/ppyoloe/ppyoloe_crn_s_300e_coco.yml --slim_config _template_kl_quant -o\n+##\n+infer_mode:norm\n+infer_quant:False\n+inference:./deploy/python/infer.py\n+--device:gpu|cpu\n+--enable_mkldnn:False\n+--cpu_threads:4\n+--batch_size:1|2\n+--use_tensorrt:null\n+--run_mode:paddle\n+--model_dir:\n+--image_dir:./dataset/coco/test2017/\n+--save_log_path:null\n+--run_benchmark:False\n+--trt_max_shape:1600\n+===========================infer_benchmark_params===========================\n+numpy_infer_input:3x640x640.npy\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/prepare.sh", "new_path": "test_tipc/prepare.sh", "diff": "@@ -22,15 +22,15 @@ if [ ${MODE} = \"whole_train_whole_infer\" ];then\neval \"${python} ./dataset/coco/download_coco.py\"\nelif [ ${MODE} = \"cpp_infer\" ];then\n# download coco lite data\n- wget -nc -P ./dataset/coco/ https://paddledet.bj.bcebos.com/data/tipc/coco_tipc.tar\n+ wget -nc -P ./dataset/coco/ https://paddledet.bj.bcebos.com/data/tipc/coco_tipc.tar --no-check-certificate\ncd ./dataset/coco/ && tar -xvf coco_tipc.tar && mv -n coco_tipc/* .\nrm -rf coco_tipc/ && cd ../../\n# download wider_face lite data\n- wget -nc -P ./dataset/wider_face/ https://paddledet.bj.bcebos.com/data/tipc/wider_tipc.tar\n+ wget -nc -P ./dataset/wider_face/ https://paddledet.bj.bcebos.com/data/tipc/wider_tipc.tar --no-check-certificate\ncd ./dataset/wider_face/ && tar -xvf wider_tipc.tar && mv -n wider_tipc/* .\nrm -rf wider_tipc/ && cd ../../\n# download spine lite data\n- wget -nc -P ./dataset/spine_coco/ https://paddledet.bj.bcebos.com/data/tipc/spine_tipc.tar\n+ wget -nc -P ./dataset/spine_coco/ https://paddledet.bj.bcebos.com/data/tipc/spine_tipc.tar --no-check-certificate\ncd ./dataset/spine_coco/ && tar -xvf spine_tipc.tar && mv -n spine_tipc/* .\nrm -rf spine_tipc/ && cd ../../\nif [[ ${model_name} =~ \"s2anet\" ]]; then\n@@ -38,7 +38,7 @@ elif [ ${MODE} = \"cpp_infer\" ];then\ncd ../../\nfi\n# download mot lite data\n- wget -nc -P ./dataset/mot/ https://paddledet.bj.bcebos.com/data/tipc/mot_tipc.tar\n+ wget -nc -P ./dataset/mot/ https://paddledet.bj.bcebos.com/data/tipc/mot_tipc.tar --no-check-certificate\ncd ./dataset/mot/ && tar -xvf mot_tipc.tar && mv -n mot_tipc/* .\nrm -rf mot_tipc/ && cd ../../\n@@ -50,7 +50,7 @@ elif [ ${MODE} = \"cpp_infer\" ];then\necho \"################### Opencv already exists, skip downloading. ###################\"\nelse\nmkdir -p $(pwd)/deps && cd $(pwd)/deps\n- wget -c https://paddledet.bj.bcebos.com/data/opencv-3.4.16_gcc8.2_ffmpeg.tar.gz\n+ wget -c https://paddledet.bj.bcebos.com/data/opencv-3.4.16_gcc8.2_ffmpeg.tar.gz --no-check-certificate\ntar -xvf opencv-3.4.16_gcc8.2_ffmpeg.tar.gz && cd ../\necho \"################### Finish downloading opencv. ###################\"\nfi\n@@ -60,13 +60,13 @@ elif [ ${MODE} = \"benchmark_train\" ];then\npip install -U pip Cython\npip install -r requirements.txt\n# prepare lite benchmark coco data\n- wget -nc -P ./dataset/coco/ https://paddledet.bj.bcebos.com/data/coco_benchmark.tar\n+ wget -nc -P ./dataset/coco/ https://paddledet.bj.bcebos.com/data/coco_benchmark.tar --no-check-certificate\ncd ./dataset/coco/ && tar -xvf coco_benchmark.tar\nmv -u coco_benchmark/* ./\nls ./\ncd ../../\n# prepare lite benchmark mot data\n- wget -nc -P ./dataset/mot/ https://paddledet.bj.bcebos.com/data/mot_benchmark.tar\n+ wget -nc -P ./dataset/mot/ https://paddledet.bj.bcebos.com/data/mot_benchmark.tar --no-check-certificate\ncd ./dataset/mot/ && tar -xvf mot_benchmark.tar\nmv -u mot_benchmark/* ./\nls ./\n@@ -87,15 +87,15 @@ elif [ ${MODE} = \"serving_infer\" ];then\npython -m pip install paddlepaddle-gpu==2.2.2.post101 -f https://www.paddlepaddle.org.cn/whl/linux/mkl/avx/stable.html\nelse\n# download coco lite data\n- wget -nc -P ./dataset/coco/ https://paddledet.bj.bcebos.com/data/tipc/coco_tipc.tar\n+ wget -nc -P ./dataset/coco/ https://paddledet.bj.bcebos.com/data/tipc/coco_tipc.tar --no-check-certificate\ncd ./dataset/coco/ && tar -xvf coco_tipc.tar && mv -n coco_tipc/* .\nrm -rf coco_tipc/ && cd ../../\n# download wider_face lite data\n- wget -nc -P ./dataset/wider_face/ https://paddledet.bj.bcebos.com/data/tipc/wider_tipc.tar\n+ wget -nc -P ./dataset/wider_face/ https://paddledet.bj.bcebos.com/data/tipc/wider_tipc.tar --no-check-certificate\ncd ./dataset/wider_face/ && tar -xvf wider_tipc.tar && mv -n wider_tipc/* .\nrm -rf wider_tipc/ && cd ../../\n# download spine_coco lite data\n- wget -nc -P ./dataset/spine_coco/ https://paddledet.bj.bcebos.com/data/tipc/spine_tipc.tar\n+ wget -nc -P ./dataset/spine_coco/ https://paddledet.bj.bcebos.com/data/tipc/spine_tipc.tar --no-check-certificate\ncd ./dataset/spine_coco/ && tar -xvf spine_tipc.tar && mv -n spine_tipc/* .\nrm -rf spine_tipc/ && cd ../../\nif [[ ${model_name} =~ \"s2anet\" ]]; then\n@@ -103,7 +103,7 @@ else\ncd ../../\nfi\n# download mot lite data\n- wget -nc -P ./dataset/mot/ https://paddledet.bj.bcebos.com/data/tipc/mot_tipc.tar\n+ wget -nc -P ./dataset/mot/ https://paddledet.bj.bcebos.com/data/tipc/mot_tipc.tar --no-check-certificate\ncd ./dataset/mot/ && tar -xvf mot_tipc.tar && mv -n mot_tipc/* .\nrm -rf mot_tipc/ && cd ../../\nfi\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/test_train_inference_python.sh", "new_path": "test_tipc/test_train_inference_python.sh", "diff": "@@ -278,10 +278,16 @@ else\nset_save_model=$(func_set_params \"${save_model_key}\" \"${save_log}\")\nif [ ${#gpu} -le 2 ];then # train with cpu or single gpu\ncmd=\"${python} ${run_train} LearningRate.base_lr=0.0001 log_iter=1 ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_batchsize} ${set_filename} ${set_train_params1} ${set_autocast}\"\n- elif [ ${#ips} -le 26 ];then # train with multi-gpu\n+ elif [ ${#ips} -le 15 ];then # train with multi-gpu\ncmd=\"${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} log_iter=1 ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_batchsize} ${set_filename} ${set_train_params1} ${set_autocast}\"\nelse # train with multi-machine\n- cmd=\"${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${set_use_gpu} ${run_train} log_iter=1 ${set_save_model} ${set_epoch} ${set_pretrain} ${set_batchsize} ${set_filename} ${set_train_params1} ${set_autocast}\"\n+ IFS=\",\"\n+ ips_array=(${ips})\n+ nodes=${#ips_array[@]}\n+ save_log=\"${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}\"\n+ IFS=\"|\"\n+ set_save_model=$(func_set_params \"${save_model_key}\" \"${save_log}\")\n+ cmd=\"${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} log_iter=1 ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_batchsize} ${set_filename} ${set_train_params1} ${set_autocast}\"\nfi\n# run train\neval $cmd\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[TIPC] add dist train infer (#6141)
499,339
07.06.2022 20:43:18
-28,800
18c2099aa7fb80c4cad3f30d9fec02154c0e2e5e
[TIPC] fix cpp infer bug, test=document_fix
[ { "change_type": "MODIFY", "old_path": "test_tipc/test_inference_cpp.sh", "new_path": "test_tipc/test_inference_cpp.sh", "diff": "@@ -129,11 +129,21 @@ else\nfi\n# build program\n-# TODO: set PADDLE_DIR and TENSORRT_ROOT\n-if [ -z $PADDLE_DIR ]; then\n- wget -nc https://paddle-inference-lib.bj.bcebos.com/2.2.2/cxx_c/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda11.1_cudnn8.1.1_trt7.2.3.4/paddle_inference.tgz --no-check-certificate\n+# TODO: set PADDLE_INFER_DIR and TENSORRT_ROOT\n+if [ -z $PADDLE_INFER_DIR ]; then\n+ Paddle_Infer_Link=$2\n+ if [ \"\" = \"$Paddle_Infer_Link\" ];then\n+ wget -nc https://paddle-inference-lib.bj.bcebos.com/2.2.2/cxx_c/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda10.1_cudnn7.6.5_trt6.0.1.5/paddle_inference.tgz --no-check-certificate\ntar zxf paddle_inference.tgz\n- PADDLE_DIR=$(pwd)/paddle_inference\n+ PADDLE_INFER_DIR=$(pwd)/paddle_inference\n+ else\n+ wget -nc $Paddle_Infer_Link --no-check-certificate\n+ tar zxf paddle_inference.tgz\n+ PADDLE_INFER_DIR=$(pwd)/paddle_inference\n+ if [ ! -d \"paddle_inference\" ]; then\n+ PADDLE_INFER_DIR=$(pwd)/paddle_inference_install_dir\n+ fi\n+ fi\nfi\nif [ -z $TENSORRT_ROOT ]; then\nTENSORRT_ROOT=/usr/local/TensorRT6-cuda10.1-cudnn7\n@@ -148,10 +158,10 @@ mkdir -p build\ncd ./build\ncmake .. \\\n-DWITH_GPU=ON \\\n- -DWITH_MKL=OFF \\\n+ -DWITH_MKL=ON \\\n-DWITH_TENSORRT=OFF \\\n-DPADDLE_LIB_NAME=libpaddle_inference \\\n- -DPADDLE_DIR=${PADDLE_DIR} \\\n+ -DPADDLE_DIR=${PADDLE_INFER_DIR} \\\n-DCUDA_LIB=${CUDA_LIB} \\\n-DCUDNN_LIB=${CUDNN_LIB} \\\n-DTENSORRT_LIB_DIR=${TENSORRT_LIB_DIR} \\\n@@ -160,13 +170,13 @@ cmake .. \\\n-DWITH_KEYPOINT=ON \\\n-DWITH_MOT=ON\n-make -j4\n+make -j8\ncd ../../../\necho \"################### build finished! ###################\"\n# set cuda device\n-GPUID=$2\n+GPUID=$3\nif [ ${#GPUID} -le 0 ];then\nenv=\" \"\nelse\n@@ -178,7 +188,6 @@ Count=0\nIFS=\"|\"\ninfer_quant_flag=(${cpp_infer_is_quant_list})\nfor infer_mode in ${cpp_infer_mode_list[*]}; do\n-\n# run export\ncase ${infer_mode} in\nnorm) run_export=${norm_export} ;;\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[TIPC] fix cpp infer bug, test=document_fix (#6149)
499,304
07.06.2022 21:57:29
-28,800
9c680e5b42d89d158ad17bd92a5956141c41dd62
fix is_crowd and difficult in Mosaic
[ { "change_type": "MODIFY", "old_path": "ppdet/data/transform/operators.py", "new_path": "ppdet/data/transform/operators.py", "diff": "@@ -3184,7 +3184,7 @@ class Mosaic(BaseOperator):\nif np.random.uniform(0., 1.) > self.prob:\nreturn sample[0]\n- mosaic_gt_bbox, mosaic_gt_class, mosaic_is_crowd = [], [], []\n+ mosaic_gt_bbox, mosaic_gt_class, mosaic_is_crowd, mosaic_difficult = [], [], [], []\ninput_h, input_w = self.input_dim\nyc = int(random.uniform(0.5 * input_h, 1.5 * input_h))\nxc = int(random.uniform(0.5 * input_w, 1.5 * input_w))\n@@ -3217,21 +3217,35 @@ class Mosaic(BaseOperator):\n_gt_bbox[:, 2] = scale * gt_bbox[:, 2] + padw\n_gt_bbox[:, 3] = scale * gt_bbox[:, 3] + padh\n- is_crowd = sp['is_crowd'] if 'is_crowd' in sp else np.zeros(\n- (len(_gt_bbox), 1), dtype=np.int32)\nmosaic_gt_bbox.append(_gt_bbox)\nmosaic_gt_class.append(sp['gt_class'])\n- mosaic_is_crowd.append(is_crowd)\n+ if 'is_crowd' in sp:\n+ mosaic_is_crowd.append(sp['is_crowd'])\n+ if 'difficult' in sp:\n+ mosaic_difficult.append(sp['difficult'])\n# 2. clip bbox and get mosaic_labels([gt_bbox, gt_class, is_crowd])\nif len(mosaic_gt_bbox):\nmosaic_gt_bbox = np.concatenate(mosaic_gt_bbox, 0)\nmosaic_gt_class = np.concatenate(mosaic_gt_class, 0)\n+ if mosaic_is_crowd:\nmosaic_is_crowd = np.concatenate(mosaic_is_crowd, 0)\nmosaic_labels = np.concatenate([\n- mosaic_gt_bbox, mosaic_gt_class.astype(mosaic_gt_bbox.dtype),\n+ mosaic_gt_bbox,\n+ mosaic_gt_class.astype(mosaic_gt_bbox.dtype),\nmosaic_is_crowd.astype(mosaic_gt_bbox.dtype)\n], 1)\n+ elif mosaic_difficult:\n+ mosaic_difficult = np.concatenate(mosaic_difficult, 0)\n+ mosaic_labels = np.concatenate([\n+ mosaic_gt_bbox,\n+ mosaic_gt_class.astype(mosaic_gt_bbox.dtype),\n+ mosaic_difficult.astype(mosaic_gt_bbox.dtype)\n+ ], 1)\n+ else:\n+ mosaic_labels = np.concatenate([\n+ mosaic_gt_bbox, mosaic_gt_class.astype(mosaic_gt_bbox.dtype)\n+ ], 1)\nif self.remove_outside_box:\n# for MOT dataset\nflag1 = mosaic_gt_bbox[:, 0] < 2 * input_w\n@@ -3268,11 +3282,23 @@ class Mosaic(BaseOperator):\nrandom.random() < self.mixup_prob):\nsample_mixup = sample[4]\nmixup_img = sample_mixup['image']\n+ if 'is_crowd' in sample_mixup:\ncp_labels = np.concatenate([\nsample_mixup['gt_bbox'],\nsample_mixup['gt_class'].astype(mosaic_labels.dtype),\nsample_mixup['is_crowd'].astype(mosaic_labels.dtype)\n], 1)\n+ elif 'difficult' in sample_mixup:\n+ cp_labels = np.concatenate([\n+ sample_mixup['gt_bbox'],\n+ sample_mixup['gt_class'].astype(mosaic_labels.dtype),\n+ sample_mixup['difficult'].astype(mosaic_labels.dtype)\n+ ], 1)\n+ else:\n+ cp_labels = np.concatenate([\n+ sample_mixup['gt_bbox'],\n+ sample_mixup['gt_class'].astype(mosaic_labels.dtype)\n+ ], 1)\nmosaic_img, mosaic_labels = self.mixup_augment(\nmosaic_img, mosaic_labels, self.input_dim, cp_labels, mixup_img)\n@@ -3284,7 +3310,10 @@ class Mosaic(BaseOperator):\nsample0['im_shape'][1] = sample0['w']\nsample0['gt_bbox'] = mosaic_labels[:, :4].astype(np.float32)\nsample0['gt_class'] = mosaic_labels[:, 4:5].astype(np.float32)\n+ if 'is_crowd' in sample[0]:\nsample0['is_crowd'] = mosaic_labels[:, 5:6].astype(np.float32)\n+ if 'difficult' in sample[0]:\n+ sample0['difficult'] = mosaic_labels[:, 5:6].astype(np.float32)\nreturn sample0\ndef mixup_augment(self, origin_img, origin_labels, input_dim, cp_labels,\n@@ -3351,9 +3380,12 @@ class Mosaic(BaseOperator):\ncp_bboxes_transformed_np[:, 1::2] - y_offset, 0, target_h)\ncls_labels = cp_labels[:, 4:5].copy()\n- crd_labels = cp_labels[:, 5:6].copy()\nbox_labels = cp_bboxes_transformed_np\n+ if cp_labels.shape[-1] == 6:\n+ crd_labels = cp_labels[:, 5:6].copy()\nlabels = np.hstack((box_labels, cls_labels, crd_labels))\n+ else:\n+ labels = np.hstack((box_labels, cls_labels))\nif self.remove_outside_box:\nlabels = labels[labels[:, 0] < target_w]\nlabels = labels[labels[:, 2] > 0]\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix is_crowd and difficult in Mosaic (#6150)
499,298
08.06.2022 18:42:41
-28,800
170fa7d20d4a77dd9193a5c8ff160328431e98a0
update yolox cfg and doc
[ { "change_type": "ADD", "old_path": null, "new_path": "configs/yolox/yoloxv2_tiny_300e_coco.yml", "diff": "+_BASE_: [\n+ 'yolox_tiny_300e_coco.yml'\n+]\n+weights: output/yoloxv2_tiny_300e_coco/model_final\n+\n+CSPDarkNet:\n+ arch: \"P5\" # using the same backbone of YOLOv5 releases v6.0 and later version\n+ return_idx: [2, 3, 4]\n+ depthwise: False\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
update yolox cfg and doc (#6151)
499,298
08.06.2022 20:36:08
-28,800
ed331ba25c3d8157f20209806c472d07e59b126a
remove ppdet ops roi pool align, add vision roi pool align
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/roi_extractor.py", "new_path": "ppdet/modeling/heads/roi_extractor.py", "diff": "@@ -29,7 +29,7 @@ class RoIAlign(object):\nRoI Align module\nFor more details, please refer to the document of roi_align in\n- in ppdet/modeing/ops.py\n+ in https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/vision/ops.py\nArgs:\nresolution (int): The output size, default 14\n@@ -76,12 +76,12 @@ class RoIAlign(object):\ndef __call__(self, feats, roi, rois_num):\nroi = paddle.concat(roi) if len(roi) > 1 else roi[0]\nif len(feats) == 1:\n- rois_feat = ops.roi_align(\n- feats[self.start_level],\n- roi,\n- self.resolution,\n- self.spatial_scale[0],\n- rois_num=rois_num,\n+ rois_feat = paddle.vision.ops.roi_align(\n+ x=feats[self.start_level],\n+ boxes=roi,\n+ boxes_num=rois_num,\n+ output_size=self.resolution,\n+ spatial_scale=self.spatial_scale[0],\naligned=self.aligned)\nelse:\noffset = 2\n@@ -96,13 +96,13 @@ class RoIAlign(object):\nrois_num=rois_num)\nrois_feat_list = []\nfor lvl in range(self.start_level, self.end_level + 1):\n- roi_feat = ops.roi_align(\n- feats[lvl],\n- rois_dist[lvl],\n- self.resolution,\n- self.spatial_scale[lvl],\n+ roi_feat = paddle.vision.ops.roi_align(\n+ x=feats[lvl],\n+ boxes=rois_dist[lvl],\n+ boxes_num=rois_num_dist[lvl],\n+ output_size=self.resolution,\n+ spatial_scale=self.spatial_scale[lvl],\nsampling_ratio=self.sampling_ratio,\n- rois_num=rois_num_dist[lvl],\naligned=self.aligned)\nrois_feat_list.append(roi_feat)\nrois_feat_shuffle = paddle.concat(rois_feat_list)\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/ops.py", "new_path": "ppdet/modeling/ops.py", "diff": "@@ -23,8 +23,6 @@ from paddle import in_dynamic_mode\nfrom paddle.common_ops_import import Variable, LayerHelper, check_variable_and_dtype, check_type, check_dtype\n__all__ = [\n- 'roi_pool',\n- 'roi_align',\n'prior_box',\n'generate_proposals',\n'box_coder',\n@@ -117,215 +115,6 @@ def batch_norm(ch,\nreturn norm_layer\n-@paddle.jit.not_to_static\n-def roi_pool(input,\n- rois,\n- output_size,\n- spatial_scale=1.0,\n- rois_num=None,\n- name=None):\n- \"\"\"\n-\n- This operator implements the roi_pooling layer.\n- Region of interest pooling (also known as RoI pooling) is to perform max pooling on inputs of nonuniform sizes to obtain fixed-size feature maps (e.g. 7*7).\n-\n- The operator has three steps:\n-\n- 1. Dividing each region proposal into equal-sized sections with output_size(h, w);\n- 2. Finding the largest value in each section;\n- 3. Copying these max values to the output buffer.\n-\n- For more information, please refer to https://stackoverflow.com/questions/43430056/what-is-roi-layer-in-fast-rcnn\n-\n- Args:\n- input (Tensor): Input feature, 4D-Tensor with the shape of [N,C,H,W],\n- where N is the batch size, C is the input channel, H is Height, W is weight.\n- The data type is float32 or float64.\n- rois (Tensor): ROIs (Regions of Interest) to pool over.\n- 2D-Tensor or 2D-LoDTensor with the shape of [num_rois,4], the lod level is 1.\n- Given as [[x1, y1, x2, y2], ...], (x1, y1) is the top left coordinates,\n- and (x2, y2) is the bottom right coordinates.\n- output_size (int or tuple[int, int]): The pooled output size(h, w), data type is int32. If int, h and w are both equal to output_size.\n- spatial_scale (float, optional): Multiplicative spatial scale factor to translate ROI coords from their input scale to the scale used when pooling. Default: 1.0\n- rois_num (Tensor): The number of RoIs in each image. Default: None\n- name(str, optional): For detailed information, please refer\n- to :ref:`api_guide_Name`. Usually name is no need to set and\n- None by default.\n-\n-\n- Returns:\n- Tensor: The pooled feature, 4D-Tensor with the shape of [num_rois, C, output_size[0], output_size[1]].\n-\n-\n- Examples:\n-\n- .. code-block:: python\n-\n- import paddle\n- from ppdet.modeling import ops\n- paddle.enable_static()\n-\n- x = paddle.static.data(\n- name='data', shape=[None, 256, 32, 32], dtype='float32')\n- rois = paddle.static.data(\n- name='rois', shape=[None, 4], dtype='float32')\n- rois_num = paddle.static.data(name='rois_num', shape=[None], dtype='int32')\n-\n- pool_out = ops.roi_pool(\n- input=x,\n- rois=rois,\n- output_size=(1, 1),\n- spatial_scale=1.0,\n- rois_num=rois_num)\n- \"\"\"\n- check_type(output_size, 'output_size', (int, tuple), 'roi_pool')\n- if isinstance(output_size, int):\n- output_size = (output_size, output_size)\n-\n- pooled_height, pooled_width = output_size\n- if in_dynamic_mode():\n- assert rois_num is not None, \"rois_num should not be None in dygraph mode.\"\n- pool_out, argmaxes = _C_ops.roi_pool(\n- input, rois, rois_num, \"pooled_height\", pooled_height,\n- \"pooled_width\", pooled_width, \"spatial_scale\", spatial_scale)\n- return pool_out, argmaxes\n-\n- else:\n- check_variable_and_dtype(input, 'input', ['float32'], 'roi_pool')\n- check_variable_and_dtype(rois, 'rois', ['float32'], 'roi_pool')\n- helper = LayerHelper('roi_pool', **locals())\n- dtype = helper.input_dtype()\n- pool_out = helper.create_variable_for_type_inference(dtype)\n- argmaxes = helper.create_variable_for_type_inference(dtype='int32')\n-\n- inputs = {\n- \"X\": input,\n- \"ROIs\": rois,\n- }\n- if rois_num is not None:\n- inputs['RoisNum'] = rois_num\n- helper.append_op(\n- type=\"roi_pool\",\n- inputs=inputs,\n- outputs={\"Out\": pool_out,\n- \"Argmax\": argmaxes},\n- attrs={\n- \"pooled_height\": pooled_height,\n- \"pooled_width\": pooled_width,\n- \"spatial_scale\": spatial_scale\n- })\n- return pool_out, argmaxes\n-\n-\n-@paddle.jit.not_to_static\n-def roi_align(input,\n- rois,\n- output_size,\n- spatial_scale=1.0,\n- sampling_ratio=-1,\n- rois_num=None,\n- aligned=True,\n- name=None):\n- \"\"\"\n-\n- Region of interest align (also known as RoI align) is to perform\n- bilinear interpolation on inputs of nonuniform sizes to obtain\n- fixed-size feature maps (e.g. 7*7)\n-\n- Dividing each region proposal into equal-sized sections with\n- the pooled_width and pooled_height. Location remains the origin\n- result.\n-\n- In each ROI bin, the value of the four regularly sampled locations\n- are computed directly through bilinear interpolation. The output is\n- the mean of four locations.\n- Thus avoid the misaligned problem.\n-\n- Args:\n- input (Tensor): Input feature, 4D-Tensor with the shape of [N,C,H,W],\n- where N is the batch size, C is the input channel, H is Height, W is weight.\n- The data type is float32 or float64.\n- rois (Tensor): ROIs (Regions of Interest) to pool over.It should be\n- a 2-D Tensor or 2-D LoDTensor of shape (num_rois, 4), the lod level is 1.\n- The data type is float32 or float64. Given as [[x1, y1, x2, y2], ...],\n- (x1, y1) is the top left coordinates, and (x2, y2) is the bottom right coordinates.\n- output_size (int or tuple[int, int]): The pooled output size(h, w), data type is int32. If int, h and w are both equal to output_size.\n- spatial_scale (float32, optional): Multiplicative spatial scale factor to translate ROI coords\n- from their input scale to the scale used when pooling. Default: 1.0\n- sampling_ratio(int32, optional): number of sampling points in the interpolation grid.\n- If <=0, then grid points are adaptive to roi_width and pooled_w, likewise for height. Default: -1\n- rois_num (Tensor): The number of RoIs in each image. Default: None\n- name(str, optional): For detailed information, please refer\n- to :ref:`api_guide_Name`. Usually name is no need to set and\n- None by default.\n-\n- Returns:\n- Tensor:\n-\n- Output: The output of ROIAlignOp is a 4-D tensor with shape (num_rois, channels, pooled_h, pooled_w). The data type is float32 or float64.\n-\n-\n- Examples:\n- .. code-block:: python\n-\n- import paddle\n- from ppdet.modeling import ops\n- paddle.enable_static()\n-\n- x = paddle.static.data(\n- name='data', shape=[None, 256, 32, 32], dtype='float32')\n- rois = paddle.static.data(\n- name='rois', shape=[None, 4], dtype='float32')\n- rois_num = paddle.static.data(name='rois_num', shape=[None], dtype='int32')\n- align_out = ops.roi_align(input=x,\n- rois=rois,\n- output_size=(7, 7),\n- spatial_scale=0.5,\n- sampling_ratio=-1,\n- rois_num=rois_num)\n- \"\"\"\n- check_type(output_size, 'output_size', (int, tuple), 'roi_align')\n- if isinstance(output_size, int):\n- output_size = (output_size, output_size)\n-\n- pooled_height, pooled_width = output_size\n-\n- if in_dynamic_mode():\n- assert rois_num is not None, \"rois_num should not be None in dygraph mode.\"\n- align_out = _C_ops.roi_align(\n- input, rois, rois_num, \"pooled_height\", pooled_height,\n- \"pooled_width\", pooled_width, \"spatial_scale\", spatial_scale,\n- \"sampling_ratio\", sampling_ratio, \"aligned\", aligned)\n- return align_out\n-\n- else:\n- check_variable_and_dtype(input, 'input', ['float32', 'float64'],\n- 'roi_align')\n- check_variable_and_dtype(rois, 'rois', ['float32', 'float64'],\n- 'roi_align')\n- helper = LayerHelper('roi_align', **locals())\n- dtype = helper.input_dtype()\n- align_out = helper.create_variable_for_type_inference(dtype)\n- inputs = {\n- \"X\": input,\n- \"ROIs\": rois,\n- }\n- if rois_num is not None:\n- inputs['RoisNum'] = rois_num\n- helper.append_op(\n- type=\"roi_align\",\n- inputs=inputs,\n- outputs={\"Out\": align_out},\n- attrs={\n- \"pooled_height\": pooled_height,\n- \"pooled_width\": pooled_width,\n- \"spatial_scale\": spatial_scale,\n- \"sampling_ratio\": sampling_ratio,\n- \"aligned\": aligned,\n- })\n- return align_out\n-\n-\n@paddle.jit.not_to_static\ndef distribute_fpn_proposals(fpn_rois,\nmin_level,\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/tests/test_ops.py", "new_path": "ppdet/modeling/tests/test_ops.py", "diff": "@@ -128,11 +128,11 @@ class TestROIAlign(LayerTest):\nrois_num = paddle.static.data(\nname='rois_num', shape=[None], dtype='int32')\n- output = ops.roi_align(\n- input=inputs,\n- rois=rois,\n- output_size=output_size,\n- rois_num=rois_num)\n+ output = paddle.vision.ops.roi_align(\n+ x=inputs,\n+ boxes=rois,\n+ boxes_num=rois_num,\n+ output_size=output_size)\noutput_np, = self.get_static_graph_result(\nfeed={\n'inputs': inputs_np,\n@@ -147,11 +147,11 @@ class TestROIAlign(LayerTest):\nrois_dy = paddle.to_tensor(rois_np)\nrois_num_dy = paddle.to_tensor(rois_num_np)\n- output_dy = ops.roi_align(\n- input=inputs_dy,\n- rois=rois_dy,\n- output_size=output_size,\n- rois_num=rois_num_dy)\n+ output_dy = paddle.vision.ops.roi_align(\n+ x=inputs_dy,\n+ boxes=rois_dy,\n+ boxes_num=rois_num_dy,\n+ output_size=output_size)\noutput_dy_np = output_dy.numpy()\nself.assertTrue(np.array_equal(output_np, output_dy_np))\n@@ -164,7 +164,7 @@ class TestROIAlign(LayerTest):\nname='data_error', shape=[10, 4], dtype='int32', lod_level=1)\nself.assertRaises(\nTypeError,\n- ops.roi_align,\n+ paddle.vision.ops.roi_align,\ninput=inputs,\nrois=rois,\noutput_size=(7, 7))\n@@ -188,11 +188,11 @@ class TestROIPool(LayerTest):\nrois_num = paddle.static.data(\nname='rois_num', shape=[None], dtype='int32')\n- output, _ = ops.roi_pool(\n- input=inputs,\n- rois=rois,\n- output_size=output_size,\n- rois_num=rois_num)\n+ output = paddle.vision.ops.roi_pool(\n+ x=inputs,\n+ boxes=rois,\n+ boxes_num=rois_num,\n+ output_size=output_size)\noutput_np, = self.get_static_graph_result(\nfeed={\n'inputs': inputs_np,\n@@ -207,11 +207,11 @@ class TestROIPool(LayerTest):\nrois_dy = paddle.to_tensor(rois_np)\nrois_num_dy = paddle.to_tensor(rois_num_np)\n- output_dy, _ = ops.roi_pool(\n- input=inputs_dy,\n- rois=rois_dy,\n- output_size=output_size,\n- rois_num=rois_num_dy)\n+ output_dy = paddle.vision.ops.roi_pool(\n+ x=inputs_dy,\n+ boxes=rois_dy,\n+ boxes_num=rois_num_dy,\n+ output_size=output_size)\noutput_dy_np = output_dy.numpy()\nself.assertTrue(np.array_equal(output_np, output_dy_np))\n@@ -224,7 +224,7 @@ class TestROIPool(LayerTest):\nname='data_error', shape=[10, 4], dtype='int32', lod_level=1)\nself.assertRaises(\nTypeError,\n- ops.roi_pool,\n+ paddle.vision.ops.roi_pool,\ninput=inputs,\nrois=rois,\noutput_size=(7, 7))\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
remove ppdet ops roi pool align, add vision roi pool align (#6154)
499,333
09.06.2022 17:28:16
-28,800
f18e57984b2953320c5317eabcffca03080b36ed
keep device in export
[ { "change_type": "MODIFY", "old_path": "ppdet/engine/export_utils.py", "new_path": "ppdet/engine/export_utils.py", "diff": "@@ -58,7 +58,9 @@ MOT_ARCH = ['DeepSORT', 'JDE', 'FairMOT', 'ByteTrack']\ndef _prune_input_spec(input_spec, program, targets):\n# try to prune static program to figure out pruned input spec\n# so we perform following operations in static mode\n+ device = paddle.get_device()\npaddle.enable_static()\n+ paddle.set_device(device)\npruned_input_spec = [{}]\nprogram = program.clone()\nprogram = program._prune(targets=targets)\n@@ -69,7 +71,7 @@ def _prune_input_spec(input_spec, program, targets):\npruned_input_spec[0][name] = spec\nexcept Exception:\npass\n- paddle.disable_static()\n+ paddle.disable_static(place=device)\nreturn pruned_input_spec\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
keep device in export (#6157)
499,333
09.06.2022 18:06:56
-28,800
636b8c4794202470e310c8af07c9a632b7e90671
fix mask rcnn in eval when num_classes is 1
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/mask_head.py", "new_path": "ppdet/modeling/heads/mask_head.py", "diff": "@@ -222,7 +222,7 @@ class MaskHead(nn.Layer):\nmask_feat = self.head(rois_feat)\nmask_logit = self.mask_fcn_logits(mask_feat)\nif self.num_classes == 1:\n- mask_out = F.sigmoid(mask_logit)\n+ mask_out = F.sigmoid(mask_logit)[:, 0, :, :]\nelse:\nnum_masks = paddle.shape(mask_logit)[0]\nindex = paddle.arange(num_masks).cast('int32')\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix mask rcnn in eval when num_classes is 1 (#6168)
499,339
10.06.2022 14:04:42
-28,800
ff62e6ff4abbbdcff02834c5d058dce6addb34d2
[TIPC] fix fleet train shell name, test=document_fix
[ { "change_type": "MODIFY", "old_path": "test_tipc/configs/keypoint/tinypose_128x96_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt", "new_path": "test_tipc/configs/keypoint/tinypose_128x96_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt", "diff": "@@ -20,7 +20,7 @@ inference:./deploy/cpp/build/main\n--device:gpu|cpu\n--use_mkldnn:False\n--cpu_threads:4\n---batch_size:1|2\n+--batch_size:1\n--use_tensorrt:null\n--run_mode:paddle\n--model_dir_keypoint:\n" }, { "change_type": "RENAME", "old_path": "test_tipc/configs/ppyoloe/ppyoloe_crn_s_300e_coco_train_fleet_infer_python.txt", "new_path": "test_tipc/configs/ppyoloe/ppyoloe_crn_s_300e_coco_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt", "diff": "" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[TIPC] fix fleet train shell name, test=document_fix (#6176)
499,363
14.06.2022 14:07:37
-28,800
1c4da10b6c836f7f0b74c0847afcbca6d0c3ef30
Scale frames before fight action recognition * Scale frames before fight action recognition * put short_size = self.cfg["VIDEO_ACTION"]["short_size"] scale = Scale(short_size) out of while * change class name from Scale to ShortSizeScale
[ { "change_type": "MODIFY", "old_path": "deploy/pphuman/pipeline.py", "new_path": "deploy/pphuman/pipeline.py", "diff": "@@ -42,7 +42,7 @@ from python.action_utils import KeyPointBuff, SkeletonActionVisualHelper\nfrom pipe_utils import argsparser, print_arguments, merge_cfg, PipeTimer\nfrom pipe_utils import get_test_images, crop_image_with_det, crop_image_with_mot, parse_mot_res, parse_mot_keypoint\n-from python.preprocess import decode_image\n+from python.preprocess import decode_image, ShortSizeScale\nfrom python.visualize import visualize_box_mask, visualize_attr, visualize_pose, visualize_action\nfrom pptracking.python.mot_sde_infer import SDE_Detector\n@@ -554,6 +554,10 @@ class PipePredictor(object):\nvideo_action_imgs = []\n+ if self.with_video_action:\n+ short_size = self.cfg[\"VIDEO_ACTION\"][\"short_size\"]\n+ scale = ShortSizeScale(short_size)\n+\nwhile (1):\nif frame_id % 10 == 0:\nprint('frame id: ', frame_id)\n@@ -705,7 +709,9 @@ class PipePredictor(object):\n# collect frames\nif frame_id % sample_freq == 0:\n- video_action_imgs.append(frame)\n+ # Scale image\n+ scaled_img = scale(frame)\n+ video_action_imgs.append(scaled_img)\n# the number of collected frames is enough to predict video action\nif len(video_action_imgs) == frame_len:\n" }, { "change_type": "MODIFY", "old_path": "deploy/python/preprocess.py", "new_path": "deploy/python/preprocess.py", "diff": "import cv2\nimport numpy as np\nfrom keypoint_preprocess import get_affine_transform\n+from PIL import Image\ndef decode_image(im_file, im_info):\n@@ -106,6 +107,95 @@ class Resize(object):\nreturn im_scale_y, im_scale_x\n+class ShortSizeScale(object):\n+ \"\"\"\n+ Scale images by short size.\n+ Args:\n+ short_size(float | int): Short size of an image will be scaled to the short_size.\n+ fixed_ratio(bool): Set whether to zoom according to a fixed ratio. default: True\n+ do_round(bool): Whether to round up when calculating the zoom ratio. default: False\n+ backend(str): Choose pillow or cv2 as the graphics processing backend. default: 'pillow'\n+ \"\"\"\n+\n+ def __init__(self,\n+ short_size,\n+ fixed_ratio=True,\n+ keep_ratio=None,\n+ do_round=False,\n+ backend='pillow'):\n+ self.short_size = short_size\n+ assert (fixed_ratio and not keep_ratio) or (\n+ not fixed_ratio\n+ ), \"fixed_ratio and keep_ratio cannot be true at the same time\"\n+ self.fixed_ratio = fixed_ratio\n+ self.keep_ratio = keep_ratio\n+ self.do_round = do_round\n+\n+ assert backend in [\n+ 'pillow', 'cv2'\n+ ], \"Scale's backend must be pillow or cv2, but get {backend}\"\n+\n+ self.backend = backend\n+\n+ def __call__(self, img):\n+ \"\"\"\n+ Performs resize operations.\n+ Args:\n+ img (PIL.Image): a PIL.Image.\n+ return:\n+ resized_img: a PIL.Image after scaling.\n+ \"\"\"\n+\n+ result_img = None\n+\n+ if isinstance(img, np.ndarray):\n+ h, w, _ = img.shape\n+ elif isinstance(img, Image.Image):\n+ w, h = img.size\n+ else:\n+ raise NotImplementedError\n+\n+ if w <= h:\n+ ow = self.short_size\n+ if self.fixed_ratio: # default is True\n+ oh = int(self.short_size * 4.0 / 3.0)\n+ elif not self.keep_ratio: # no\n+ oh = self.short_size\n+ else:\n+ scale_factor = self.short_size / w\n+ oh = int(h * float(scale_factor) +\n+ 0.5) if self.do_round else int(h * self.short_size / w)\n+ ow = int(w * float(scale_factor) +\n+ 0.5) if self.do_round else int(w * self.short_size / h)\n+ else:\n+ oh = self.short_size\n+ if self.fixed_ratio:\n+ ow = int(self.short_size * 4.0 / 3.0)\n+ elif not self.keep_ratio: # no\n+ ow = self.short_size\n+ else:\n+ scale_factor = self.short_size / h\n+ oh = int(h * float(scale_factor) +\n+ 0.5) if self.do_round else int(h * self.short_size / w)\n+ ow = int(w * float(scale_factor) +\n+ 0.5) if self.do_round else int(w * self.short_size / h)\n+\n+ if type(img) == np.ndarray:\n+ img = Image.fromarray(img, mode='RGB')\n+\n+ if self.backend == 'pillow':\n+ result_img = img.resize((ow, oh), Image.BILINEAR)\n+ elif self.backend == 'cv2' and (self.keep_ratio is not None):\n+ result_img = cv2.resize(\n+ img, (ow, oh), interpolation=cv2.INTER_LINEAR)\n+ else:\n+ result_img = Image.fromarray(\n+ cv2.resize(\n+ np.asarray(img), (ow, oh), interpolation=cv2.INTER_LINEAR))\n+\n+ return result_img\n+\n+\nclass NormalizeImage(object):\n\"\"\"normalize image\nArgs:\n" }, { "change_type": "MODIFY", "old_path": "deploy/python/video_action_infer.py", "new_path": "deploy/python/video_action_infer.py", "diff": "@@ -197,7 +197,7 @@ class VideoActionRecognizer(object):\nimg_mean = [0.485, 0.456, 0.406]\nimg_std = [0.229, 0.224, 0.225]\nops = [\n- Scale(self.short_size), CenterCrop(self.target_size), Image2Array(),\n+ CenterCrop(self.target_size), Image2Array(),\nNormalization(img_mean, img_std)\n]\nfor op in ops:\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
Scale frames before fight action recognition (#6170) * Scale frames before fight action recognition * put short_size = self.cfg["VIDEO_ACTION"]["short_size"] scale = Scale(short_size) out of while * change class name from Scale to ShortSizeScale
499,299
14.06.2022 16:10:45
-28,800
51c2ae6e0394e1fc641f5f4c953371dc9c9a3e22
fix doc error, test=document_fix
[ { "change_type": "MODIFY", "old_path": "docs/tutorials/GETTING_STARTED.md", "new_path": "docs/tutorials/GETTING_STARTED.md", "diff": "@@ -128,7 +128,7 @@ list below can be viewed by `--help`\n--output_dir=infer_output/ \\\n--draw_threshold=0.5 \\\n-o weights=output/faster_rcnn_r50_fpn_1x_coco/model_final \\\n- --use_vdl=Ture\n+ --use_vdl=True\n```\n`--draw_threshold` is an optional argument. Default is 0.5.\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix doc error, test=document_fix (#6192)
499,304
15.06.2022 20:47:14
-28,800
941bbf7cb3829e390bc8e1eec3cd76d056b329ac
Improve the usability of voc metric
[ { "change_type": "MODIFY", "old_path": "ppdet/metrics/metrics.py", "new_path": "ppdet/metrics/metrics.py", "diff": "@@ -248,11 +248,13 @@ class VOCMetric(Metric):\nself.detection_map.reset()\ndef update(self, inputs, outputs):\n- bbox_np = outputs['bbox'].numpy()\n+ bbox_np = outputs['bbox'].numpy() if isinstance(\n+ outputs['bbox'], paddle.Tensor) else outputs['bbox']\nbboxes = bbox_np[:, 2:]\nscores = bbox_np[:, 1]\nlabels = bbox_np[:, 0]\n- bbox_lengths = outputs['bbox_num'].numpy()\n+ bbox_lengths = outputs['bbox_num'].numpy() if isinstance(\n+ outputs['bbox_num'], paddle.Tensor) else outputs['bbox_num']\nif bboxes.shape == (1, 1) or bboxes is None:\nreturn\n@@ -261,18 +263,26 @@ class VOCMetric(Metric):\ndifficults = inputs['difficult'] if not self.evaluate_difficult \\\nelse None\n- scale_factor = inputs['scale_factor'].numpy(\n- ) if 'scale_factor' in inputs else np.ones(\n- (gt_boxes.shape[0], 2)).astype('float32')\n+ if 'scale_factor' in inputs:\n+ scale_factor = inputs['scale_factor'].numpy() if isinstance(\n+ inputs['scale_factor'],\n+ paddle.Tensor) else inputs['scale_factor']\n+ else:\n+ scale_factor = np.ones((gt_boxes.shape[0], 2)).astype('float32')\nbbox_idx = 0\nfor i in range(len(gt_boxes)):\n- gt_box = gt_boxes[i].numpy()\n+ gt_box = gt_boxes[i].numpy() if isinstance(\n+ gt_boxes[i], paddle.Tensor) else gt_boxes[i]\nh, w = scale_factor[i]\ngt_box = gt_box / np.array([w, h, w, h])\n- gt_label = gt_labels[i].numpy()\n- difficult = None if difficults is None \\\n- else difficults[i].numpy()\n+ gt_label = gt_labels[i].numpy() if isinstance(\n+ gt_labels[i], paddle.Tensor) else gt_labels[i]\n+ if difficults is not None:\n+ difficult = difficults[i].numpy() if isinstance(\n+ difficults[i], paddle.Tensor) else difficults[i]\n+ else:\n+ difficult = None\nbbox_num = bbox_lengths[i]\nbbox = bboxes[bbox_idx:bbox_idx + bbox_num]\nscore = scores[bbox_idx:bbox_idx + bbox_num]\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
Improve the usability of voc metric (#6197)
499,301
16.06.2022 13:52:20
-28,800
145d155623abde6ad9dcc9357c05dfa437ef42a6
add layer_norm for convnext outputs
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/backbones/convnext.py", "new_path": "ppdet/modeling/backbones/convnext.py", "diff": "@@ -141,6 +141,7 @@ class ConvNeXt(nn.Layer):\nlayer_scale_init_value=1e-6,\nhead_init_scale=1.,\nreturn_idx=[1, 2, 3],\n+ norm_output=True,\npretrained=None, ):\nsuper().__init__()\n@@ -178,6 +179,14 @@ class ConvNeXt(nn.Layer):\nself.return_idx = return_idx\nself.dims = [dims[i] for i in return_idx] # [::-1]\n+ self.norm_output = norm_output\n+ if norm_output:\n+ self.norms = nn.LayerList([\n+ LayerNorm(\n+ c, eps=1e-6, data_format=\"channels_first\")\n+ for c in self.dims\n+ ])\n+\nself.apply(self._init_weights)\n# self.head.weight.set_value(self.head.weight.numpy() * head_init_scale)\n# self.head.bias.set_value(self.head.weight.numpy() * head_init_scale)\n@@ -202,9 +211,11 @@ class ConvNeXt(nn.Layer):\nx = self.stages[i](x)\noutput.append(x)\n- output = [output[i] for i in self.return_idx]\n+ outputs = [output[i] for i in self.return_idx]\n+ if self.norm_output:\n+ outputs = [self.norms[i](out) for i, out in enumerate(outputs)]\n- return output\n+ return outputs\ndef forward(self, x):\nx = self.forward_features(x['image'])\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add layer_norm for convnext outputs (#6201)
499,339
16.06.2022 20:47:21
-28,800
db1b265492311c8c8cf73fe6473486e4c489fd4e
[TIPC] fix serving infer shell
[ { "change_type": "MODIFY", "old_path": "test_tipc/prepare.sh", "new_path": "test_tipc/prepare.sh", "diff": "@@ -80,14 +80,6 @@ elif [ ${MODE} = \"paddle2onnx_infer\" ];then\n${python} -m pip install install paddle2onnx\n${python} -m pip install onnxruntime==1.10.0\nelif [ ${MODE} = \"serving_infer\" ];then\n- git clone https://github.com/PaddlePaddle/Serving\n- cd Serving\n- bash tools/paddle_env_install.sh\n- ${python} -m pip install -r python/requirements.txt\n- cd ..\n- ${python} -m pip install paddle-serving-client -i https://pypi.tuna.tsinghua.edu.cn/simple\n- ${python} -m pip install paddle-serving-app -i https://pypi.tuna.tsinghua.edu.cn/simple\n- ${python} -m pip install paddle-serving-server-gpu -i https://pypi.tuna.tsinghua.edu.cn/simple\nunset https_proxy http_proxy\nelse\n# download coco lite data\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/test_serving_infer_cpp.sh", "new_path": "test_tipc/test_serving_infer_cpp.sh", "diff": "@@ -89,10 +89,6 @@ function func_serving_inference(){\ndone\n}\n-# build paddle_serving_server\n-bash deploy/serving/cpp/build_server.sh\n-echo \"################### build finished! ###################\"\n-\n# run serving infer\nCount=0\nIFS=\"|\"\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/test_serving_infer_python.sh", "new_path": "test_tipc/test_serving_infer_python.sh", "diff": "@@ -81,9 +81,9 @@ function func_serving_inference(){\n}\n# set cuda device\n-GPUID=$2\n+GPUID=$3\nif [ ${#GPUID} -le 0 ];then\n- env=\" \"\n+ env=\"export CUDA_VISIBLE_DEVICES=0\"\nelse\nenv=\"export CUDA_VISIBLE_DEVICES=${GPUID}\"\nfi\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[TIPC] fix serving infer shell (#6206)
499,301
20.06.2022 17:36:36
-28,800
29a5c2fa3e30c07610a41392f1963d2b3d0c0fe5
use bias
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/necks/hrfpn.py", "new_path": "ppdet/modeling/necks/hrfpn.py", "diff": "@@ -37,7 +37,8 @@ class HRFPN(nn.Layer):\nout_channel=256,\nshare_conv=False,\nextra_stage=1,\n- spatial_scales=[1. / 4, 1. / 8, 1. / 16, 1. / 32]):\n+ spatial_scales=[1. / 4, 1. / 8, 1. / 16, 1. / 32],\n+ use_bias=False):\nsuper(HRFPN, self).__init__()\nin_channel = sum(in_channels)\nself.in_channel = in_channel\n@@ -47,12 +48,14 @@ class HRFPN(nn.Layer):\nspatial_scales = spatial_scales + [spatial_scales[-1] / 2.]\nself.spatial_scales = spatial_scales\nself.num_out = len(self.spatial_scales)\n+ self.use_bias = use_bias\n+ bias_attr = False if use_bias is False else None\nself.reduction = nn.Conv2D(\nin_channels=in_channel,\nout_channels=out_channel,\nkernel_size=1,\n- bias_attr=False)\n+ bias_attr=bias_attr)\nif share_conv:\nself.fpn_conv = nn.Conv2D(\n@@ -60,7 +63,7 @@ class HRFPN(nn.Layer):\nout_channels=out_channel,\nkernel_size=3,\npadding=1,\n- bias_attr=False)\n+ bias_attr=bias_attr)\nelse:\nself.fpn_conv = []\nfor i in range(self.num_out):\n@@ -72,7 +75,7 @@ class HRFPN(nn.Layer):\nout_channels=out_channel,\nkernel_size=3,\npadding=1,\n- bias_attr=False))\n+ bias_attr=bias_attr))\nself.fpn_conv.append(conv)\ndef forward(self, body_feats):\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
use bias (#6234)
499,392
20.06.2022 19:59:30
-28,800
3d45bee1013a3b1b4a7048d67e6842f4c116210c
Fix keypoint metric
[ { "change_type": "MODIFY", "old_path": "ppdet/metrics/keypoint_metrics.py", "new_path": "ppdet/metrics/keypoint_metrics.py", "diff": "@@ -16,6 +16,7 @@ import os\nimport json\nfrom collections import defaultdict, OrderedDict\nimport numpy as np\n+import paddle\nfrom pycocotools.coco import COCO\nfrom pycocotools.cocoeval import COCOeval\nfrom ..modeling.keypoint_utils import oks_nms\n@@ -70,15 +71,23 @@ class KeyPointTopDownCOCOEval(object):\nself.results['all_preds'][self.idx:self.idx + num_images, :, 0:\n3] = kpts[:, :, 0:3]\nself.results['all_boxes'][self.idx:self.idx + num_images, 0:2] = inputs[\n- 'center'].numpy()[:, 0:2]\n+ 'center'].numpy()[:, 0:2] if isinstance(\n+ inputs['center'], paddle.Tensor) else inputs['center'][:, 0:2]\nself.results['all_boxes'][self.idx:self.idx + num_images, 2:4] = inputs[\n- 'scale'].numpy()[:, 0:2]\n+ 'scale'].numpy()[:, 0:2] if isinstance(\n+ inputs['scale'], paddle.Tensor) else inputs['scale'][:, 0:2]\nself.results['all_boxes'][self.idx:self.idx + num_images, 4] = np.prod(\n- inputs['scale'].numpy() * 200, 1)\n- self.results['all_boxes'][self.idx:self.idx + num_images,\n- 5] = np.squeeze(inputs['score'].numpy())\n+ inputs['scale'].numpy() * 200,\n+ 1) if isinstance(inputs['scale'], paddle.Tensor) else np.prod(\n+ inputs['scale'] * 200, 1)\n+ self.results['all_boxes'][\n+ self.idx:self.idx + num_images,\n+ 5] = np.squeeze(inputs['score'].numpy()) if isinstance(\n+ inputs['score'], paddle.Tensor) else np.squeeze(inputs['score'])\n+ if isinstance(inputs['im_id'], paddle.Tensor):\nself.results['image_path'].extend(inputs['im_id'].numpy())\n-\n+ else:\n+ self.results['image_path'].extend(inputs['im_id'])\nself.idx += num_images\ndef _write_coco_keypoint_results(self, keypoints):\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
Fix keypoint metric (#6222)
499,301
22.06.2022 17:16:48
-28,800
b0fb44b0b97505184140cbc829691e0d7578af3b
fix convnext init
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/backbones/convnext.py", "new_path": "ppdet/modeling/backbones/convnext.py", "diff": "@@ -29,7 +29,7 @@ import numpy as np\nfrom ppdet.core.workspace import register, serializable\nfrom ..shape_spec import ShapeSpec\n-from .transformer_utils import DropPath, trunc_normal_\n+from .transformer_utils import DropPath, trunc_normal_, zeros_\n__all__ = ['ConvNeXt']\n@@ -129,7 +129,6 @@ class ConvNeXt(nn.Layer):\ndims (int): Feature dimension at each stage. Default: [96, 192, 384, 768]\ndrop_path_rate (float): Stochastic depth rate. Default: 0.\nlayer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.\n- head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.\n\"\"\"\ndef __init__(\n@@ -139,7 +138,6 @@ class ConvNeXt(nn.Layer):\ndims=[96, 192, 384, 768],\ndrop_path_rate=0.,\nlayer_scale_init_value=1e-6,\n- head_init_scale=1.,\nreturn_idx=[1, 2, 3],\nnorm_output=True,\npretrained=None, ):\n@@ -188,8 +186,6 @@ class ConvNeXt(nn.Layer):\n])\nself.apply(self._init_weights)\n- # self.head.weight.set_value(self.head.weight.numpy() * head_init_scale)\n- # self.head.bias.set_value(self.head.weight.numpy() * head_init_scale)\nif pretrained is not None:\nif 'http' in pretrained: #URL\n@@ -201,8 +197,8 @@ class ConvNeXt(nn.Layer):\ndef _init_weights(self, m):\nif isinstance(m, (nn.Conv2D, nn.Linear)):\n- trunc_normal_(m.weight, std=.02)\n- nn.init.constant_(m.bias, 0)\n+ trunc_normal_(m.weight)\n+ zeros_(m.bias)\ndef forward_features(self, x):\noutput = []\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix convnext init (#6248)
499,298
23.06.2022 12:37:59
-28,800
8d21f781c672889896f0684f22a41d5108825471
fix deepsort bytetrack doc
[ { "change_type": "MODIFY", "old_path": "configs/mot/bytetrack/_base_/mix_det.yml", "new_path": "configs/mot/bytetrack/_base_/mix_det.yml", "diff": "@@ -11,7 +11,7 @@ TrainDataset:\nEvalDataset:\n!COCODataSet\n- image_dir: train\n+ image_dir: images/train\nanno_path: annotations/val_half.json\ndataset_dir: dataset/mot/MOT17\n" }, { "change_type": "MODIFY", "old_path": "configs/mot/bytetrack/detector/README_cn.md", "new_path": "configs/mot/bytetrack/detector/README_cn.md", "diff": "@@ -30,7 +30,7 @@ job_name=ppyoloe_crn_l_36e_640x640_mot17half\nconfig=configs/mot/bytetrack/detector/${job_name}.yml\nlog_dir=log_dir/${job_name}\n# 1. training\n-python -m paddle.distributed.launch --log_dir=${log_dir} --gpus 0,1,2,3,4,5,6,7 tools/train.py -c ${config} --eval --amp --fleet\n+python -m paddle.distributed.launch --log_dir=${log_dir} --gpus 0,1,2,3,4,5,6,7 tools/train.py -c ${config} --eval --amp\n# 2. evaluation\nCUDA_VISIBLE_DEVICES=0 python tools/eval.py -c ${config} -o weights=https://paddledet.bj.bcebos.com/models/mot/${job_name}.pdparams\n```\n" }, { "change_type": "MODIFY", "old_path": "configs/mot/deepsort/deepsort_ppyoloe_pplcnet.yml", "new_path": "configs/mot/deepsort/deepsort_ppyoloe_pplcnet.yml", "diff": "@@ -92,7 +92,6 @@ PPYOLOEHead:\ngrid_cell_offset: 0.5\nstatic_assigner_epoch: -1 # 100\nuse_varifocal_loss: True\n- eval_input_size: [640, 640]\nloss_weight: {class: 1.0, iou: 2.5, dfl: 0.5}\nstatic_assigner:\nname: ATSSAssigner\n" }, { "change_type": "MODIFY", "old_path": "configs/mot/deepsort/deepsort_ppyoloe_resnet.yml", "new_path": "configs/mot/deepsort/deepsort_ppyoloe_resnet.yml", "diff": "@@ -91,7 +91,6 @@ PPYOLOEHead:\ngrid_cell_offset: 0.5\nstatic_assigner_epoch: -1 # 100\nuse_varifocal_loss: True\n- eval_input_size: [640, 640]\nloss_weight: {class: 1.0, iou: 2.5, dfl: 0.5}\nstatic_assigner:\nname: ATSSAssigner\n" }, { "change_type": "MODIFY", "old_path": "configs/mot/deepsort/detector/ppyoloe_crn_l_36e_640x640_mot17half.yml", "new_path": "configs/mot/deepsort/detector/ppyoloe_crn_l_36e_640x640_mot17half.yml", "diff": "@@ -6,6 +6,7 @@ weights: output/ppyoloe_crn_l_36e_640x640_mot17half/model_final\nlog_iter: 20\nsnapshot_epoch: 2\n+\n# schedule configuration for fine-tuning\nepoch: 36\nLearningRate:\n@@ -15,7 +16,7 @@ LearningRate:\nmax_epochs: 43\n- !LinearWarmup\nstart_factor: 0.001\n- steps: 100\n+ epochs: 1\nOptimizerBuilder:\noptimizer:\n@@ -25,9 +26,11 @@ OptimizerBuilder:\nfactor: 0.0005\ntype: L2\n+\nTrainReader:\nbatch_size: 8\n+\n# detector configuration\narchitecture: YOLOv3\nnorm_type: sync_bn\n@@ -62,7 +65,6 @@ PPYOLOEHead:\ngrid_cell_offset: 0.5\nstatic_assigner_epoch: -1 # 100\nuse_varifocal_loss: True\n- eval_input_size: [640, 640]\nloss_weight: {class: 1.0, iou: 2.5, dfl: 0.5}\nstatic_assigner:\nname: ATSSAssigner\n" }, { "change_type": "MODIFY", "old_path": "deploy/pptracking/python/det_infer.py", "new_path": "deploy/pptracking/python/det_infer.py", "diff": "@@ -32,7 +32,7 @@ sys.path.insert(0, parent_path)\nfrom benchmark_utils import PaddleInferBenchmark\nfrom picodet_postprocess import PicoDetPostProcess\n-from preprocess import preprocess, Resize, NormalizeImage, Permute, PadStride, LetterBoxResize, decode_image\n+from preprocess import preprocess, Resize, NormalizeImage, Permute, PadStride, LetterBoxResize, Pad, decode_image\nfrom mot.visualize import visualize_box_mask\nfrom mot_utils import argsparser, Timer, get_current_memory_mb\n" }, { "change_type": "MODIFY", "old_path": "deploy/pptracking/python/mot_sde_infer.py", "new_path": "deploy/pptracking/python/mot_sde_infer.py", "diff": "@@ -186,7 +186,9 @@ class SDE_Detector(Detector):\ndef postprocess(self, inputs, result):\n# postprocess output of predictor\n- np_boxes_num = result['boxes_num']\n+ keep_idx = result['boxes'][:, 1] > self.threshold\n+ result['boxes'] = result['boxes'][keep_idx]\n+ np_boxes_num = [len(result['boxes'])]\nif np_boxes_num[0] <= 0:\nprint('[WARNNING] No object detected.')\nresult = {'boxes': np.zeros([0, 6]), 'boxes_num': [0]}\n@@ -520,8 +522,8 @@ class SDE_Detector(Detector):\n# bs=1 in MOT model\nonline_tlwhs, online_scores, online_ids = mot_results[0]\n- # NOTE: just implement flow statistic for one class\n- if num_classes == 1:\n+ # flow statistic for one class, and only for bytetracker\n+ if num_classes == 1 and not self.use_deepsort_tracker:\nresult = (frame_id + 1, online_tlwhs[0], online_scores[0],\nonline_ids[0])\nstatistic = flow_statistic(\n" }, { "change_type": "MODIFY", "old_path": "deploy/pptracking/python/preprocess.py", "new_path": "deploy/pptracking/python/preprocess.py", "diff": "@@ -245,6 +245,34 @@ class LetterBoxResize(object):\nreturn im, im_info\n+class Pad(object):\n+ def __init__(self, size, fill_value=[114.0, 114.0, 114.0]):\n+ \"\"\"\n+ Pad image to a specified size.\n+ Args:\n+ size (list[int]): image target size\n+ fill_value (list[float]): rgb value of pad area, default (114.0, 114.0, 114.0)\n+ \"\"\"\n+ super(Pad, self).__init__()\n+ if isinstance(size, int):\n+ size = [size, size]\n+ self.size = size\n+ self.fill_value = fill_value\n+\n+ def __call__(self, im, im_info):\n+ im_h, im_w = im.shape[:2]\n+ h, w = self.size\n+ if h == im_h and w == im_w:\n+ im = im.astype(np.float32)\n+ return im, im_info\n+\n+ canvas = np.ones((h, w, 3), dtype=np.float32)\n+ canvas *= np.array(self.fill_value, dtype=np.float32)\n+ canvas[0:im_h, 0:im_w, :] = im.astype(np.float32)\n+ im = canvas\n+ return im, im_info\n+\n+\ndef preprocess(im, preprocess_ops):\n# process image by preprocess_ops\nim_info = {\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix deepsort bytetrack doc (#6255)
499,348
28.06.2022 18:43:51
-28,800
3428d97fce7e006f247e9a6671edc471da96fdc9
rename imshow name; test=document_fix
[ { "change_type": "MODIFY", "old_path": "deploy/pphuman/pipeline.py", "new_path": "deploy/pphuman/pipeline.py", "diff": "@@ -671,7 +671,7 @@ class PipePredictor(object):\ncenter_traj) # visualize\nwriter.write(im)\nif self.file_name is None: # use camera_id\n- cv2.imshow('PPHuman&&PPVehicle', im)\n+ cv2.imshow('Paddle-Pipeline', im)\nif cv2.waitKey(1) & 0xFF == ord('q'):\nbreak\ncontinue\n@@ -833,7 +833,7 @@ class PipePredictor(object):\ncenter_traj) # visualize\nwriter.write(im)\nif self.file_name is None: # use camera_id\n- cv2.imshow('PPHuman', im)\n+ cv2.imshow('Paddle-Pipeline', im)\nif cv2.waitKey(1) & 0xFF == ord('q'):\nbreak\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
rename imshow name; (#6295) test=document_fix
499,313
30.06.2022 14:33:13
-28,800
d2e7bd38c30b23fea75f67662ee8e4a5eb415bd0
fix spine_coco.tar md5sum
[ { "change_type": "MODIFY", "old_path": "ppdet/utils/download.py", "new_path": "ppdet/utils/download.py", "diff": "@@ -97,7 +97,7 @@ DATASETS = {\n'49ce5a9b5ad0d6266163cd01de4b018e', ), ], ['annotations', 'images']),\n'spine_coco': ([(\n'https://paddledet.bj.bcebos.com/data/spine_coco.tar',\n- '7ed69ae73f842cd2a8cf4f58dc3c5535', ), ], ['annotations', 'images']),\n+ '03030f42d9b6202a6e425d4becefda0d', ), ], ['annotations', 'images']),\n'mot': (),\n'objects365': (),\n'coco_ce': ([(\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix spine_coco.tar md5sum (#6309)
499,298
30.06.2022 18:05:12
-28,800
00b656f2f9d84e2cbe5689cd9ce29273b8466c6c
update ppyoloe test AP in all docs
[ { "change_type": "MODIFY", "old_path": "configs/ppyoloe/_base_/ppyoloe_crn.yml", "new_path": "configs/ppyoloe/_base_/ppyoloe_crn.yml", "diff": "@@ -39,7 +39,7 @@ PPYOLOEHead:\nbeta: 6.0\nnms:\nname: MultiClassNMS\n- nms_top_k: 1000\n- keep_top_k: 100\n+ nms_top_k: 10000\n+ keep_top_k: 300\nscore_threshold: 0.01\n- nms_threshold: 0.6\n+ nms_threshold: 0.7\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
update ppyoloe test AP in all docs (#6315)
499,339
01.07.2022 11:28:15
-28,800
bccee30e34c4e8f86672b4e4482ece03daa96ecc
[TIPC] fix ptq txt, test=document_fix
[ { "change_type": "MODIFY", "old_path": "test_tipc/configs/keypoint/tinypose_128x96_train_ptq_infer_python.txt", "new_path": "test_tipc/configs/keypoint/tinypose_128x96_train_ptq_infer_python.txt", "diff": "@@ -5,7 +5,7 @@ filename:\n##\n--output_dir:./output_inference\nweights:https://paddledet.bj.bcebos.com/models/keypoint/tinypose_128x96.pdparams\n-kl_quant_export:tools/post_quant.py -c test_tipc/configs/keypoint/tinypose_128x96.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o\n+kl_quant_export:tools/post_quant.py -c test_tipc/configs/keypoint/tinypose_128x96.yml --slim_config configs/slim/post_quant/tinypose_128x96_ptq.yml -o\nexport_param1:null\n##\ninference:./deploy/python/keypoint_infer.py\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco_train_pact_infer_python.txt", "new_path": "test_tipc/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco_train_pact_infer_python.txt", "diff": "@@ -14,7 +14,7 @@ filename:null\n##\ntrainer:pact_train\nnorm_train:tools/train.py -c configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.yml -o\n-pact_train:tools/train.py -c configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.yml --slim_config configs/slim/quant/yolov3_mobilenet_v3_qat.yml -o\n+pact_train:tools/train.py -c configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.yml --slim_config configs/slim/quant/mask_rcnn_r50_fpn_1x_qat.yml -o\nfpgm_train:tools/train.py -c configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.yml --slim_config _template_fpgm -o\ndistill_train:null\nnull:null\n@@ -41,7 +41,7 @@ inference:./deploy/python/infer.py\n--device:gpu|cpu\n--enable_mkldnn:False\n--cpu_threads:4\n---batch_size:1|2\n+--batch_size:1\n--use_tensorrt:null\n--run_mode:paddle\n--model_dir:\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco_train_ptq_infer_python.txt", "new_path": "test_tipc/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco_train_ptq_infer_python.txt", "diff": "@@ -5,14 +5,14 @@ filename:\n##\n--output_dir:./output_inference\nweights:https://paddledet.bj.bcebos.com/models/mask_rcnn_r50_fpn_1x_coco.pdparams\n-kl_quant_export:tools/post_quant.py -c configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o\n+kl_quant_export:tools/post_quant.py -c configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.yml --slim_config configs/slim/post_quant/mask_rcnn_r50_fpn_1x_coco_ptq.yml -o\nexport_param1:null\n##\ninference:./deploy/python/infer.py\n--device:gpu|cpu\n--enable_mkldnn:False\n--cpu_threads:4\n---batch_size:1|2\n+--batch_size:1\n--run_mode:paddle\n--model_dir:\n--image_dir:./dataset/coco/test2017/\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/configs/ppyolo/ppyolo_mbv3_large_coco_train_pact_infer_python.txt", "new_path": "test_tipc/configs/ppyolo/ppyolo_mbv3_large_coco_train_pact_infer_python.txt", "diff": "@@ -14,7 +14,7 @@ filename:null\n##\ntrainer:pact_train\nnorm_train:tools/train.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml -o\n-pact_train:tools/train.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml --slim_config configs/slim/quant/yolov3_mobilenet_v3_qat.yml -o\n+pact_train:tools/train.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml --slim_config configs/slim/quant/ppyolo_mbv3_large_qat.yml -o\nfpgm_train:tools/train.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml --slim_config configs/slim/prune/ppyolo_mbv3_large_prune_fpgm.yml -o\ndistill_train:null\nnull:null\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/configs/ppyolo/ppyolo_mbv3_large_coco_train_ptq_infer_python.txt", "new_path": "test_tipc/configs/ppyolo/ppyolo_mbv3_large_coco_train_ptq_infer_python.txt", "diff": "@@ -5,7 +5,7 @@ filename:\n##\n--output_dir:./output_inference\nweights:https://paddledet.bj.bcebos.com/models/ppyolo_mbv3_large_coco.pdparams\n-kl_quant_export:tools/post_quant.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o\n+kl_quant_export:tools/post_quant.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml --slim_config configs/slim/post_quant/ppyolo_mbv3_large_ptq.yml -o\nexport_param1:null\n##\ninference:./deploy/python/infer.py\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/configs/ppyoloe/ppyoloe_crn_s_300e_coco_train_ptq_infer_python.txt", "new_path": "test_tipc/configs/ppyoloe/ppyoloe_crn_s_300e_coco_train_ptq_infer_python.txt", "diff": "@@ -5,7 +5,7 @@ filename:\n##\n--output_dir:./output_inference\nweights:https://paddledet.bj.bcebos.com/models/ppyoloe_crn_s_300e_coco.pdparams\n-kl_quant_export:tools/post_quant.py -c configs/ppyoloe/ppyoloe_crn_s_300e_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o\n+kl_quant_export:tools/post_quant.py -c configs/ppyoloe/ppyoloe_crn_s_300e_coco.yml --slim_config configs/slim/post_quant/ppyoloe_crn_s_300e_coco_ptq.yml -o\nexport_param1:null\n##\ninference:./deploy/python/infer.py\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[TIPC] fix ptq txt, test=document_fix (#6320)
499,354
04.07.2022 18:32:27
-28,800
1f13295326a8d7584976c69c92d7c68f1493c208
mobileone block k>1 bugfix
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/backbones/mobileone.py", "new_path": "ppdet/modeling/backbones/mobileone.py", "diff": "@@ -22,7 +22,7 @@ import paddle\nimport paddle.nn as nn\nfrom paddle import ParamAttr\nfrom paddle.regularizer import L2Decay\n-from paddle.nn.initializer import Normal\n+from paddle.nn.initializer import Normal, Constant\nfrom ppdet.modeling.ops import get_act_fn\nfrom ppdet.modeling.layers import ConvNormLayer\n@@ -57,9 +57,7 @@ class MobileOneBlock(nn.Layer):\nself.depth_conv = nn.LayerList()\nself.point_conv = nn.LayerList()\n- for i in range(self.k):\n- if i > 0:\n- stride = 1\n+ for _ in range(self.k):\nself.depth_conv.append(\nConvNormLayer(\nch_in,\n@@ -112,7 +110,8 @@ class MobileOneBlock(nn.Layer):\nself.rbr_identity_st2 = nn.BatchNorm2D(\nnum_features=ch_out,\nweight_attr=ParamAttr(regularizer=L2Decay(0.0)),\n- bias_attr=ParamAttr(regularizer=L2Decay(0.0)))\n+ bias_attr=ParamAttr(regularizer=L2Decay(\n+ 0.0))) if ch_in == ch_out and self.stride == 1 else None\nself.act = get_act_fn(act) if act is None or isinstance(act, (\nstr, dict)) else act\n@@ -125,9 +124,10 @@ class MobileOneBlock(nn.Layer):\nelse:\nid_out_st1 = self.rbr_identity_st1(x)\n- x1_1 = x.clone()\n+ x1_1 = 0\nfor i in range(self.k):\n- x1_1 = self.depth_conv[i](x1_1)\n+ x1_1 += self.depth_conv[i](x)\n+\nx1_2 = self.rbr_1x1(x)\nx1 = self.act(x1_1 + x1_2 + id_out_st1)\n@@ -136,9 +136,9 @@ class MobileOneBlock(nn.Layer):\nelse:\nid_out_st2 = self.rbr_identity_st2(x1)\n- x2_1 = x1.clone()\n+ x2_1 = 0\nfor i in range(self.k):\n- x2_1 = self.point_conv[i](x2_1)\n+ x2_1 += self.point_conv[i](x1)\ny = self.act(x2_1 + id_out_st2)\nreturn y\n@@ -151,7 +151,9 @@ class MobileOneBlock(nn.Layer):\nkernel_size=self.kernel_size,\nstride=self.stride,\npadding=self.padding,\n- groups=self.ch_in)\n+ groups=self.ch_in,\n+ bias_attr=ParamAttr(\n+ initializer=Constant(value=0.), learning_rate=1.))\nif not hasattr(self, 'conv2'):\nself.conv2 = nn.Conv2D(\nin_channels=self.ch_in,\n@@ -159,7 +161,9 @@ class MobileOneBlock(nn.Layer):\nkernel_size=1,\nstride=1,\npadding='SAME',\n- groups=1)\n+ groups=1,\n+ bias_attr=ParamAttr(\n+ initializer=Constant(value=0.), learning_rate=1.))\nconv1_kernel, conv1_bias, conv2_kernel, conv2_bias = self.get_equivalent_kernel_bias(\n)\n@@ -211,26 +215,24 @@ class MobileOneBlock(nn.Layer):\nreturn 0, 0\nif isinstance(branch, nn.LayerList):\n- kernel = 0\n- running_mean = 0\n- running_var = 0\n- gamma = 0\n- beta = 0\n- eps = 0\n+ fused_kernels = []\n+ fused_bias = []\nfor block in branch:\n- kernel += block.conv.weight\n- running_mean += block.norm._mean\n- running_var += block.norm._variance\n- gamma += block.norm.weight\n- beta += block.norm.bias\n- eps += block.norm._epsilon\n+ kernel = block.conv.weight\n+ running_mean = block.norm._mean\n+ running_var = block.norm._variance\n+ gamma = block.norm.weight\n+ beta = block.norm.bias\n+ eps = block.norm._epsilon\n+\n+ std = (running_var + eps).sqrt()\n+ t = (gamma / std).reshape((-1, 1, 1, 1))\n+\n+ fused_kernels.append(kernel * t)\n+ fused_bias.append(beta - running_mean * gamma / std)\n+\n+ return sum(fused_kernels), sum(fused_bias)\n- kernel /= len(branch)\n- running_mean /= len(branch)\n- running_var /= len(branch)\n- gamma /= len(branch)\n- beta /= len(branch)\n- eps /= len(branch)\nelif isinstance(branch, ConvNormLayer):\nkernel = branch.conv.weight\nrunning_mean = branch.norm._mean\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
mobileone block k>1 bugfix (#6342)
499,348
04.07.2022 19:38:44
-28,800
fe9e983daeeb49be9a4af38b80cc4967afc1690f
add vehicle times
[ { "change_type": "MODIFY", "old_path": "deploy/pipeline/pipe_utils.py", "new_path": "deploy/pipeline/pipe_utils.py", "diff": "@@ -157,7 +157,8 @@ class PipeTimer(Times):\n'reid': Times(),\n'det_action': Times(),\n'cls_action': Times(),\n- 'vehicle_attr': Times()\n+ 'vehicle_attr': Times(),\n+ 'vehicleplate': Times()\n}\nself.img_num = 0\n" }, { "change_type": "MODIFY", "old_path": "deploy/pipeline/pipeline.py", "new_path": "deploy/pipeline/pipeline.py", "diff": "@@ -679,8 +679,12 @@ class PipePredictor(object):\nframe_rgb, mot_res)\nif self.with_vehicleplate:\n+ if frame_id > self.warmup_frame:\n+ self.pipe_timer.module_time['vehicleplate'].start()\nplatelicense = self.vehicleplate_detector.get_platelicense(\ncrop_input)\n+ if frame_id > self.warmup_frame:\n+ self.pipe_timer.module_time['vehicleplate'].end()\nself.pipeline_res.update(platelicense, 'vehicleplate')\nif self.with_human_attr:\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add vehicle times (#6327)
499,363
04.07.2022 19:47:17
-28,800
95e07d186bf940f087d7672e45de973f16da1048
Develop: get fight recognition model from model dir * Update action.md delete the step of change model name of fight recognition * Update video_action_infer.py get model path from model_dir without model name * optimize action vis
[ { "change_type": "MODIFY", "old_path": "deploy/pipeline/pipeline.py", "new_path": "deploy/pipeline/pipeline.py", "diff": "@@ -919,9 +919,12 @@ class PipePredictor(object):\nvideo_action_score = None\nif video_action_res and video_action_res[\"class\"] == 1:\nvideo_action_score = video_action_res[\"score\"]\n+ mot_boxes = None\n+ if mot_res:\n+ mot_boxes = mot_res['boxes']\nimage = visualize_action(\nimage,\n- mot_res['boxes'],\n+ mot_boxes,\naction_visual_collector=None,\naction_text=\"SkeletonAction\",\nvideo_action_score=video_action_score,\n" }, { "change_type": "MODIFY", "old_path": "deploy/pipeline/pphuman/video_action_infer.py", "new_path": "deploy/pipeline/pphuman/video_action_infer.py", "diff": "@@ -96,8 +96,8 @@ class VideoActionRecognizer(object):\nself.recognize_times = Timer()\n- model_file_path = os.path.join(model_dir, \"ppTSM.pdmodel\")\n- params_file_path = os.path.join(model_dir, \"ppTSM.pdiparams\")\n+ model_file_path = glob.glob(os.path.join(model_dir, \"*.pdmodel\"))[0]\n+ params_file_path = glob.glob(os.path.join(model_dir, \"*.pdiparams\"))[0]\nself.config = Config(model_file_path, params_file_path)\nif device == \"GPU\" or device == \"gpu\":\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
Develop: get fight recognition model from model dir (#6324) * Update action.md delete the step of change model name of fight recognition * Update video_action_infer.py get model path from model_dir without model name * optimize action vis
499,354
05.07.2022 10:58:32
-28,800
dcadfc3e8637d272113811fbf6026633eac17e5f
mobileone block bugfix
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/backbones/mobileone.py", "new_path": "ppdet/modeling/backbones/mobileone.py", "diff": "@@ -178,8 +178,6 @@ class MobileOneBlock(nn.Layer):\nself.__delattr__('rbr_identity_st1')\nif hasattr(self, 'rbr_identity_st2'):\nself.__delattr__('rbr_identity_st2')\n- if hasattr(self, 'id_tensor'):\n- self.__delattr__('id_tensor')\ndef get_equivalent_kernel_bias(self):\nst1_kernel3x3, st1_bias3x3 = self._fuse_bn_tensor(self.depth_conv)\n@@ -248,7 +246,8 @@ class MobileOneBlock(nn.Layer):\ndtype='float32')\nif kernel_size > 1:\nfor i in range(self.ch_in):\n- kernel_value[i, i % input_dim, 1, 1] = 1\n+ kernel_value[i, i % input_dim, (kernel_size - 1) // 2, (\n+ kernel_size - 1) // 2] = 1\nelif kernel_size == 1:\nfor i in range(self.ch_in):\nkernel_value[i, i % input_dim, 0, 0] = 1\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
mobileone block bugfix (#6358)
499,299
08.07.2022 10:43:51
-28,800
23e9cb95f95c9e42aa4f40c68789da417ddf6d72
add frame-skip to boost inference
[ { "change_type": "MODIFY", "old_path": "deploy/pipeline/config/infer_cfg_pphuman.yml", "new_path": "deploy/pipeline/config/infer_cfg_pphuman.yml", "diff": "@@ -50,6 +50,7 @@ ID_BASED_DETACTION:\nbasemode: \"idbased\"\nthreshold: 0.6\ndisplay_frames: 80\n+ skip_frame_num: 2\nenable: False\nID_BASED_CLSACTION:\n@@ -58,6 +59,7 @@ ID_BASED_CLSACTION:\nbasemode: \"idbased\"\nthreshold: 0.8\ndisplay_frames: 80\n+ skip_frame_num: 2\nenable: False\nREID:\n" }, { "change_type": "MODIFY", "old_path": "deploy/pipeline/pipeline.py", "new_path": "deploy/pipeline/pipeline.py", "diff": "@@ -342,7 +342,9 @@ class PipePredictor(object):\nbasemode = idbased_detaction_cfg['basemode']\nthreshold = idbased_detaction_cfg['threshold']\ndisplay_frames = idbased_detaction_cfg['display_frames']\n+ skip_frame_num = idbased_detaction_cfg['skip_frame_num']\nself.modebase[basemode] = True\n+\nself.det_action_predictor = DetActionRecognizer(\nmodel_dir,\ndevice,\n@@ -355,7 +357,8 @@ class PipePredictor(object):\ncpu_threads,\nenable_mkldnn,\nthreshold=threshold,\n- display_frames=display_frames)\n+ display_frames=display_frames,\n+ skip_frame_num=skip_frame_num)\nself.det_action_visual_helper = ActionVisualHelper(1)\nif self.with_idbased_clsaction:\n@@ -366,6 +369,8 @@ class PipePredictor(object):\nthreshold = idbased_clsaction_cfg['threshold']\nself.modebase[basemode] = True\ndisplay_frames = idbased_clsaction_cfg['display_frames']\n+ skip_frame_num = idbased_clsaction_cfg['skip_frame_num']\n+\nself.cls_action_predictor = ClsActionRecognizer(\nmodel_dir,\ndevice,\n@@ -378,7 +383,8 @@ class PipePredictor(object):\ncpu_threads,\nenable_mkldnn,\nthreshold=threshold,\n- display_frames=display_frames)\n+ display_frames=display_frames,\n+ skip_frame_num=skip_frame_num)\nself.cls_action_visual_helper = ActionVisualHelper(1)\nif self.with_skeleton_action:\n" }, { "change_type": "MODIFY", "old_path": "deploy/pipeline/pphuman/action_infer.py", "new_path": "deploy/pipeline/pphuman/action_infer.py", "diff": "@@ -280,6 +280,10 @@ class DetActionRecognizer(object):\nenable_mkldnn (bool): whether to open MKLDNN\nthreshold (float): The threshold of score for action feature object detection.\ndisplay_frames (int): The duration for corresponding detected action.\n+ skip_frame_num (int): The number of frames for interval prediction. A skipped frame will\n+ reuse the result of its last frame. If it is set to 0, no frame will be skipped. Default\n+ is 0.\n+\n\"\"\"\ndef __init__(self,\n@@ -295,7 +299,8 @@ class DetActionRecognizer(object):\nenable_mkldnn=False,\noutput_dir='output',\nthreshold=0.5,\n- display_frames=20):\n+ display_frames=20,\n+ skip_frame_num=0):\nsuper(DetActionRecognizer, self).__init__()\nself.detector = Detector(\nmodel_dir=model_dir,\n@@ -313,10 +318,21 @@ class DetActionRecognizer(object):\nself.threshold = threshold\nself.frame_life = display_frames\nself.result_history = {}\n+ self.skip_frame_num = skip_frame_num\n+ self.skip_frame_cnt = 0\n+ self.id_in_last_frame = []\ndef predict(self, images, mot_result):\n+ if self.skip_frame_cnt == 0 or (not self.check_id_is_same(mot_result)):\ndet_result = self.detector.predict_image(images, visual=False)\nresult = self.postprocess(det_result, mot_result)\n+ else:\n+ result = self.reuse_result(mot_result)\n+\n+ self.skip_frame_cnt += 1\n+ if self.skip_frame_cnt >= self.skip_frame_num:\n+ self.skip_frame_cnt = 0\n+\nreturn result\ndef postprocess(self, det_result, mot_result):\n@@ -343,10 +359,11 @@ class DetActionRecognizer(object):\nif valid_boxes.shape[0] >= 1:\naction_ret['class'] = valid_boxes[0, 0]\naction_ret['score'] = valid_boxes[0, 1]\n- self.result_history[tracker_id] = [0, self.frame_life]\n+ self.result_history[\n+ tracker_id] = [0, self.frame_life, valid_boxes[0, 1]]\nelse:\n- history_det, life_remain = self.result_history.get(tracker_id,\n- [1, 0])\n+ history_det, life_remain, history_score = self.result_history.get(\n+ tracker_id, [1, self.frame_life, -1.0])\naction_ret['class'] = history_det\naction_ret['score'] = -1.0\nlife_remain -= 1\n@@ -354,10 +371,48 @@ class DetActionRecognizer(object):\ndel (self.result_history[tracker_id])\nelif tracker_id in self.result_history:\nself.result_history[tracker_id][1] = life_remain\n+ else:\n+ self.result_history[tracker_id] = [\n+ history_det, life_remain, history_score\n+ ]\nmot_id.append(tracker_id)\nact_res.append(action_ret)\nresult = list(zip(mot_id, act_res))\n+ self.id_in_last_frame = mot_id\n+\n+ return result\n+\n+ def check_id_is_same(self, mot_result):\n+ mot_bboxes = mot_result.get('boxes')\n+ for idx in range(len(mot_bboxes)):\n+ tracker_id = mot_bboxes[idx, 0]\n+ if tracker_id not in self.id_in_last_frame:\n+ return False\n+ return True\n+\n+ def reuse_result(self, mot_result):\n+ # This function reusing previous results of the same ID directly.\n+ mot_bboxes = mot_result.get('boxes')\n+\n+ mot_id = []\n+ act_res = []\n+\n+ for idx in range(len(mot_bboxes)):\n+ tracker_id = mot_bboxes[idx, 0]\n+ history_cls, life_remain, history_score = self.result_history.get(\n+ tracker_id, [1, 0, -1.0])\n+\n+ life_remain -= 1\n+ if tracker_id in self.result_history:\n+ self.result_history[tracker_id][1] = life_remain\n+\n+ action_ret = {'class': history_cls, 'score': history_score}\n+ mot_id.append(tracker_id)\n+ act_res.append(action_ret)\n+\n+ result = list(zip(mot_id, act_res))\n+ self.id_in_last_frame = mot_id\nreturn result\n@@ -378,6 +433,9 @@ class ClsActionRecognizer(AttrDetector):\nenable_mkldnn (bool): whether to open MKLDNN\nthreshold (float): The threshold of score for action feature object detection.\ndisplay_frames (int): The duration for corresponding detected action.\n+ skip_frame_num (int): The number of frames for interval prediction. A skipped frame will\n+ reuse the result of its last frame. If it is set to 0, no frame will be skipped. Default\n+ is 0.\n\"\"\"\ndef __init__(self,\n@@ -393,7 +451,8 @@ class ClsActionRecognizer(AttrDetector):\nenable_mkldnn=False,\noutput_dir='output',\nthreshold=0.5,\n- display_frames=80):\n+ display_frames=80,\n+ skip_frame_num=0):\nsuper(ClsActionRecognizer, self).__init__(\nmodel_dir=model_dir,\ndevice=device,\n@@ -410,11 +469,22 @@ class ClsActionRecognizer(AttrDetector):\nself.threshold = threshold\nself.frame_life = display_frames\nself.result_history = {}\n+ self.skip_frame_num = skip_frame_num\n+ self.skip_frame_cnt = 0\n+ self.id_in_last_frame = []\ndef predict_with_mot(self, images, mot_result):\n+ if self.skip_frame_cnt == 0 or (not self.check_id_is_same(mot_result)):\nimages = self.crop_half_body(images)\ncls_result = self.predict_image(images, visual=False)[\"output\"]\nresult = self.match_action_with_id(cls_result, mot_result)\n+ else:\n+ result = self.reuse_result(mot_result)\n+\n+ self.skip_frame_cnt += 1\n+ if self.skip_frame_cnt >= self.skip_frame_num:\n+ self.skip_frame_cnt = 0\n+\nreturn result\ndef crop_half_body(self, images):\n@@ -456,8 +526,8 @@ class ClsActionRecognizer(AttrDetector):\n# Current now, class 0 is positive, class 1 is negative.\nif cls_id_res == 1 or (cls_id_res == 0 and\ncls_score_res < self.threshold):\n- history_cls, life_remain = self.result_history.get(tracker_id,\n- [1, 0])\n+ history_cls, life_remain, history_score = self.result_history.get(\n+ tracker_id, [1, self.frame_life, -1.0])\ncls_id_res = history_cls\ncls_score_res = 1 - cls_score_res\nlife_remain -= 1\n@@ -466,12 +536,50 @@ class ClsActionRecognizer(AttrDetector):\nelif tracker_id in self.result_history:\nself.result_history[tracker_id][1] = life_remain\nelse:\n- self.result_history[tracker_id] = [cls_id_res, self.frame_life]\n+ self.result_history[\n+ tracker_id] = [cls_id_res, life_remain, cls_score_res]\n+ else:\n+ self.result_history[\n+ tracker_id] = [cls_id_res, self.frame_life, cls_score_res]\naction_ret = {'class': cls_id_res, 'score': cls_score_res}\nmot_id.append(tracker_id)\nact_res.append(action_ret)\nresult = list(zip(mot_id, act_res))\n+ self.id_in_last_frame = mot_id\n+\n+ return result\n+\n+ def check_id_is_same(self, mot_result):\n+ mot_bboxes = mot_result.get('boxes')\n+ for idx in range(len(mot_bboxes)):\n+ tracker_id = mot_bboxes[idx, 0]\n+ if tracker_id not in self.id_in_last_frame:\n+ return False\n+ return True\n+\n+ def reuse_result(self, mot_result):\n+ # This function reusing previous results of the same ID directly.\n+ mot_bboxes = mot_result.get('boxes')\n+\n+ mot_id = []\n+ act_res = []\n+\n+ for idx in range(len(mot_bboxes)):\n+ tracker_id = mot_bboxes[idx, 0]\n+ history_cls, life_remain, history_score = self.result_history.get(\n+ tracker_id, [1, 0, -1.0])\n+\n+ life_remain -= 1\n+ if tracker_id in self.result_history:\n+ self.result_history[tracker_id][1] = life_remain\n+\n+ action_ret = {'class': history_cls, 'score': history_score}\n+ mot_id.append(tracker_id)\n+ act_res.append(action_ret)\n+\n+ result = list(zip(mot_id, act_res))\n+ self.id_in_last_frame = mot_id\nreturn result\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add frame-skip to boost inference (#6383)
499,339
08.07.2022 10:58:01
-28,800
f0b524486725d98bb3ba18b2c7ae34c3254d516a
[TIPC] add tinypose_KL txt, test=document_fix
[ { "change_type": "ADD", "old_path": null, "new_path": "test_tipc/configs/keypoint/tinypose_128x96_KL_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt", "diff": "+===========================cpp_infer_params===========================\n+model_name:tinypose_128x96_KL\n+python:python3.7\n+filename:null\n+##\n+--output_dir:./output_inference\n+weights:https://paddledet.bj.bcebos.com/models/keypoint/tinypose_128x96.pdparams\n+norm_export:tools/export_model.py -c test_tipc/configs/keypoint/tinypose_128x96.yml -o\n+quant_export:tools/export_model.py -c test_tipc/configs/keypoint/tinypose_128x96.yml --slim_config _template_pact -o\n+fpgm_export:tools/export_model.py -c test_tipc/configs/keypoint/tinypose_128x96.yml --slim_config _template_fpgm -o\n+distill_export:null\n+export1:null\n+export2:null\n+kl_quant_export:tools/post_quant.py -c test_tipc/configs/keypoint/tinypose_128x96.yml --slim_config _template_kl_quant -o\n+##\n+opencv_dir:default\n+infer_mode:null\n+infer_quant:True\n+inference:./deploy/cpp/build/main\n+--device:gpu|cpu\n+--use_mkldnn:False\n+--cpu_threads:4\n+--batch_size:1\n+--use_tensorrt:null\n+--run_mode:paddle\n+--model_dir_keypoint:\n+--image_dir:./dataset/coco/test2017/\n+--run_benchmark:False\n+--model_dir:./output_inference/picodet_s_320_pedestrian\n\\ No newline at end of file\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test_tipc/configs/keypoint/tinypose_128x96_KL_model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt", "diff": "+===========================serving_infer_cpp_params===========================\n+model_name:tinypose_128x96_KL\n+python:python3.7\n+filename:null\n+##\n+--output_dir:./output_inference\n+weights:https://paddledet.bj.bcebos.com/models/keypoint/tinypose_128x96.pdparams\n+norm_export:tools/export_model.py -c test_tipc/configs/keypoint/tinypose_128x96.yml --export_serving_model True -o\n+quant_export:tools/export_model.py -c test_tipc/configs/keypoint/tinypose_128x96.yml --slim_config _template_pact --export_serving_model True -o\n+fpgm_export:tools/export_model.py -c test_tipc/configs/keypoint/tinypose_128x96.yml --slim_config _template_fpgm --export_serving_model True -o\n+distill_export:null\n+export1:null\n+export2:null\n+kl_quant_export:tools/post_quant.py -c test_tipc/configs/keypoint/tinypose_128x96.yml --slim_config configs/slim/post_quant/tinypose_128x96_ptq.yml --export_serving_model True -o\n+##\n+infer_mode:null\n+infer_quant:True\n+--model:null\n+--op:tinypose_128x96\n+--port:9997\n+--gpu_ids:null|0\n+null:null\n+http_client:deploy/serving/cpp/serving_client.py\n+--serving_client:null\n+--image_file:./demo/hrnet_demo.jpg\n+null:null\n\\ No newline at end of file\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test_tipc/configs/keypoint/tinypose_128x96_KL_model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt", "diff": "+===========================serving_infer_python_params===========================\n+model_name:tinypose_128x96_KL\n+python:python3.7\n+filename:null\n+##\n+--output_dir:./output_inference\n+weights:https://paddledet.bj.bcebos.com/models/keypoint/tinypose_128x96.pdparams\n+norm_export:tools/export_model.py -c test_tipc/configs/keypoint/tinypose_128x96.yml --export_serving_model True -o\n+quant_export:tools/export_model.py -c test_tipc/configs/keypoint/tinypose_128x96.yml --slim_config _template_pact --export_serving_model True -o\n+fpgm_export:tools/export_model.py -c test_tipc/configs/keypoint/tinypose_128x96.yml --slim_config _template_fpgm --export_serving_model True -o\n+distill_export:null\n+export1:null\n+export2:null\n+kl_quant_export:tools/post_quant.py -c test_tipc/configs/keypoint/tinypose_128x96.yml --slim_config configs/slim/post_quant/tinypose_128x96_ptq.yml --export_serving_model True -o\n+##\n+infer_mode:null\n+infer_quant:True\n+web_service:deploy/serving/python/web_service.py --config=deploy/serving/python/config.yml\n+--model_dir:null\n+--opt:cpu:op.ppdet.local_service_conf.device_type=0|gpu:op.ppdet.local_service_conf.device_type=1\n+null:null\n+http_client:deploy/serving/python/pipeline_http_client.py\n+--image_file:./demo/hrnet_demo.jpg\n+null:null\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/prepare.sh", "new_path": "test_tipc/prepare.sh", "diff": "@@ -58,6 +58,10 @@ elif [ ${MODE} = \"cpp_infer\" ];then\nwget -nc -P ./output_inference/mask_rcnn_r50_fpn_1x_coco_KL/ https://bj.bcebos.com/v1/paddledet/data/tipc/models/mask_rcnn_r50_fpn_1x_coco_ptq.tar --no-check-certificate\ncd ./output_inference/mask_rcnn_r50_fpn_1x_coco_KL/ && tar -xvf mask_rcnn_r50_fpn_1x_coco_ptq.tar && mv -n mask_rcnn_r50_fpn_1x_coco_ptq/* .\ncd ../../\n+ elif [[ ${model_name} = \"tinypose_128x96_KL\" ]]; then\n+ wget -nc -P ./output_inference/tinypose_128x96_KL/ https://bj.bcebos.com/v1/paddledet/data/tipc/models/tinypose_128x96_ptq.tar --no-check-certificate\n+ cd ./output_inference/tinypose_128x96_KL/ && tar -xvf tinypose_128x96_ptq.tar && mv -n tinypose_128x96_ptq/* .\n+ cd ../../\nfi\n# download mot lite data\nwget -nc -P ./dataset/mot/ https://paddledet.bj.bcebos.com/data/tipc/mot_tipc.tar --no-check-certificate\n@@ -124,6 +128,11 @@ elif [ ${MODE} = \"serving_infer\" ];then\ncd ./output_inference/mask_rcnn_r50_fpn_1x_coco_KL/ && tar -xvf mask_rcnn_r50_fpn_1x_coco_ptq.tar && mv -n mask_rcnn_r50_fpn_1x_coco_ptq/* .\ncd ../../\neval \"${python} -m paddle_serving_client.convert --dirname output_inference/mask_rcnn_r50_fpn_1x_coco_KL/ --model_filename model.pdmodel --params_filename model.pdiparams --serving_server output_inference/mask_rcnn_r50_fpn_1x_coco_KL/serving_server --serving_client output_inference/mask_rcnn_r50_fpn_1x_coco_KL/serving_client\"\n+ elif [[ ${model_name} = \"tinypose_128x96_KL\" ]]; then\n+ wget -nc -P ./output_inference/tinypose_128x96_KL/ https://bj.bcebos.com/v1/paddledet/data/tipc/models/tinypose_128x96_ptq.tar --no-check-certificate\n+ cd ./output_inference/tinypose_128x96_KL/ && tar -xvf tinypose_128x96_ptq.tar && mv -n tinypose_128x96_ptq/* .\n+ cd ../../\n+ eval \"${python} -m paddle_serving_client.convert --dirname output_inference/tinypose_128x96_KL/ --model_filename model.pdmodel --params_filename model.pdiparams --serving_server output_inference/tinypose_128x96_KL/serving_server --serving_client output_inference/tinypose_128x96_KL/serving_client\"\nfi\nelse\n# download coco lite data\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[TIPC] add tinypose_KL txt, test=document_fix (#6384)
499,301
11.07.2022 16:42:48
-28,800
e6d4d2bc7ba5eb4aa543e3439fa4e24cdd68d028
fix export_model for swin
[ { "change_type": "MODIFY", "old_path": "configs/faster_rcnn/_base_/faster_rcnn_swin_reader.yml", "new_path": "configs/faster_rcnn/_base_/faster_rcnn_swin_reader.yml", "diff": "@@ -30,14 +30,12 @@ EvalReader:\nTestReader:\ninputs_def:\n- image_shape: [1, 3, 640, 640]\n+ image_shape: [-1, 3, 640, 640]\nsample_transforms:\n- Decode: {}\n- - Resize: {interp: 2, target_size: [640, 640], keep_ratio: True}\n+ - LetterBoxResize: {target_size: 640}\n- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n- Permute: {}\n- batch_transforms:\n- - PadBatch: {pad_to_stride: 32}\nbatch_size: 1\nshuffle: false\ndrop_last: false\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/backbones/swin_transformer.py", "new_path": "ppdet/modeling/backbones/swin_transformer.py", "diff": "@@ -20,7 +20,6 @@ MIT License [see LICENSE for details]\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\n-from paddle.nn.initializer import TruncatedNormal, Constant, Assign\nfrom ppdet.modeling.shape_spec import ShapeSpec\nfrom ppdet.core.workspace import register, serializable\nimport numpy as np\n@@ -64,7 +63,7 @@ def window_partition(x, window_size):\n\"\"\"\nB, H, W, C = x.shape\nx = x.reshape(\n- [B, H // window_size, window_size, W // window_size, window_size, C])\n+ [-1, H // window_size, window_size, W // window_size, window_size, C])\nwindows = x.transpose([0, 1, 3, 2, 4, 5]).reshape(\n[-1, window_size, window_size, C])\nreturn windows\n@@ -80,10 +79,11 @@ def window_reverse(windows, window_size, H, W):\nReturns:\nx: (B, H, W, C)\n\"\"\"\n+ _, _, _, C = windows.shape\nB = int(windows.shape[0] / (H * W / window_size / window_size))\nx = windows.reshape(\n- [B, H // window_size, W // window_size, window_size, window_size, -1])\n- x = x.transpose([0, 1, 3, 2, 4, 5]).reshape([B, H, W, -1])\n+ [-1, H // window_size, W // window_size, window_size, window_size, C])\n+ x = x.transpose([0, 1, 3, 2, 4, 5]).reshape([-1, H, W, C])\nreturn x\n@@ -158,14 +158,14 @@ class WindowAttention(nn.Layer):\n\"\"\"\nB_, N, C = x.shape\nqkv = self.qkv(x).reshape(\n- [B_, N, 3, self.num_heads, C // self.num_heads]).transpose(\n+ [-1, N, 3, self.num_heads, C // self.num_heads]).transpose(\n[2, 0, 3, 1, 4])\nq, k, v = qkv[0], qkv[1], qkv[2]\nq = q * self.scale\nattn = paddle.mm(q, k.transpose([0, 1, 3, 2]))\n- index = self.relative_position_index.reshape([-1])\n+ index = self.relative_position_index.flatten()\nrelative_position_bias = paddle.index_select(\nself.relative_position_bias_table, index)\n@@ -179,7 +179,7 @@ class WindowAttention(nn.Layer):\nif mask is not None:\nnW = mask.shape[0]\n- attn = attn.reshape([B_ // nW, nW, self.num_heads, N, N\n+ attn = attn.reshape([-1, nW, self.num_heads, N, N\n]) + mask.unsqueeze(1).unsqueeze(0)\nattn = attn.reshape([-1, self.num_heads, N, N])\nattn = self.softmax(attn)\n@@ -189,7 +189,7 @@ class WindowAttention(nn.Layer):\nattn = self.attn_drop(attn)\n# x = (attn @ v).transpose(1, 2).reshape([B_, N, C])\n- x = paddle.mm(attn, v).transpose([0, 2, 1, 3]).reshape([B_, N, C])\n+ x = paddle.mm(attn, v).transpose([0, 2, 1, 3]).reshape([-1, N, C])\nx = self.proj(x)\nx = self.proj_drop(x)\nreturn x\n@@ -267,7 +267,7 @@ class SwinTransformerBlock(nn.Layer):\nshortcut = x\nx = self.norm1(x)\n- x = x.reshape([B, H, W, C])\n+ x = x.reshape([-1, H, W, C])\n# pad feature maps to multiples of window size\npad_l = pad_t = 0\n@@ -289,7 +289,7 @@ class SwinTransformerBlock(nn.Layer):\nx_windows = window_partition(\nshifted_x, self.window_size) # nW*B, window_size, window_size, C\nx_windows = x_windows.reshape(\n- [-1, self.window_size * self.window_size,\n+ [x_windows.shape[0], self.window_size * self.window_size,\nC]) # nW*B, window_size*window_size, C\n# W-MSA/SW-MSA\n@@ -298,7 +298,7 @@ class SwinTransformerBlock(nn.Layer):\n# merge windows\nattn_windows = attn_windows.reshape(\n- [-1, self.window_size, self.window_size, C])\n+ [x_windows.shape[0], self.window_size, self.window_size, C])\nshifted_x = window_reverse(attn_windows, self.window_size, Hp,\nWp) # B H' W' C\n@@ -314,7 +314,7 @@ class SwinTransformerBlock(nn.Layer):\nif pad_r > 0 or pad_b > 0:\nx = x[:, :H, :W, :]\n- x = x.reshape([B, H * W, C])\n+ x = x.reshape([-1, H * W, C])\n# FFN\nx = shortcut + self.drop_path(x)\n@@ -345,7 +345,7 @@ class PatchMerging(nn.Layer):\nB, L, C = x.shape\nassert L == H * W, \"input feature has wrong size\"\n- x = x.reshape([B, H, W, C])\n+ x = x.reshape([-1, H, W, C])\n# padding\npad_input = (H % 2 == 1) or (W % 2 == 1)\n@@ -357,7 +357,7 @@ class PatchMerging(nn.Layer):\nx2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C\nx3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C\nx = paddle.concat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C\n- x = x.reshape([B, H * W // 4, 4 * C]) # B H/2*W/2 4*C\n+ x = x.reshape([-1, H * W // 4, 4 * C]) # B H/2*W/2 4*C\nx = self.norm(x)\nx = self.reduction(x)\n@@ -664,7 +664,7 @@ class SwinTransformer(nn.Layer):\ndef forward(self, x):\n\"\"\"Forward function.\"\"\"\nx = self.patch_embed(x['image'])\n- _, _, Wh, Ww = x.shape\n+ B, _, Wh, Ww = x.shape\nif self.ape:\n# interpolate the position embedding to the corresponding size\nabsolute_pos_embed = F.interpolate(\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix export_model for swin (#6399)
499,339
13.07.2022 12:40:43
-28,800
6c59641e92cb5754dcc21e09e1a956fb81e01678
[dev] add amp eval cast dtype in load_pretrain_weight
[ { "change_type": "MODIFY", "old_path": "ppdet/engine/trainer.py", "new_path": "ppdet/engine/trainer.py", "diff": "@@ -65,6 +65,8 @@ class Trainer(object):\nself.mode = mode.lower()\nself.optimizer = None\nself.is_loaded_weights = False\n+ self.use_amp = self.cfg.get('amp', False)\n+ self.amp_level = self.cfg.get('amp_level', 'O1')\n# build data loader\ncapital_mode = self.mode.capitalize()\n@@ -124,17 +126,6 @@ class Trainer(object):\nelse:\nself.model.load_meanstd(cfg['TestReader']['sample_transforms'])\n- self.use_ema = ('use_ema' in cfg and cfg['use_ema'])\n- if self.use_ema:\n- ema_decay = self.cfg.get('ema_decay', 0.9998)\n- cycle_epoch = self.cfg.get('cycle_epoch', -1)\n- ema_decay_type = self.cfg.get('ema_decay_type', 'threshold')\n- self.ema = ModelEMA(\n- self.model,\n- decay=ema_decay,\n- ema_decay_type=ema_decay_type,\n- cycle_epoch=cycle_epoch)\n-\n# EvalDataset build with BatchSampler to evaluate in single device\n# TODO: multi-device evaluate\nif self.mode == 'eval':\n@@ -162,6 +153,20 @@ class Trainer(object):\nself.pruner = create('UnstructuredPruner')(self.model,\nsteps_per_epoch)\n+ if self.use_amp and self.amp_level == 'O2':\n+ self.model = paddle.amp.decorate(\n+ models=self.model, level=self.amp_level)\n+ self.use_ema = ('use_ema' in cfg and cfg['use_ema'])\n+ if self.use_ema:\n+ ema_decay = self.cfg.get('ema_decay', 0.9998)\n+ cycle_epoch = self.cfg.get('cycle_epoch', -1)\n+ ema_decay_type = self.cfg.get('ema_decay_type', 'threshold')\n+ self.ema = ModelEMA(\n+ self.model,\n+ decay=ema_decay,\n+ ema_decay_type=ema_decay_type,\n+ cycle_epoch=cycle_epoch)\n+\nself._nranks = dist.get_world_size()\nself._local_rank = dist.get_rank()\n@@ -387,13 +392,10 @@ class Trainer(object):\nmodel = paddle.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n# enabel auto mixed precision mode\n- use_amp = self.cfg.get('amp', False)\n- amp_level = self.cfg.get('amp_level', 'O1')\n- if use_amp:\n+ if self.use_amp:\nscaler = paddle.amp.GradScaler(\nenable=self.cfg.use_gpu or self.cfg.use_npu,\ninit_loss_scaling=self.cfg.get('init_loss_scaling', 1024))\n- model = paddle.amp.decorate(models=model, level=amp_level)\n# get distributed model\nif self.cfg.get('fleet', False):\nmodel = fleet.distributed_model(model)\n@@ -438,9 +440,9 @@ class Trainer(object):\nself._compose_callback.on_step_begin(self.status)\ndata['epoch_id'] = epoch_id\n- if use_amp:\n+ if self.use_amp:\nwith paddle.amp.auto_cast(\n- enable=self.cfg.use_gpu, level=amp_level):\n+ enable=self.cfg.use_gpu, level=self.amp_level):\n# model forward\noutputs = model(data)\nloss = outputs['loss']\n@@ -532,6 +534,11 @@ class Trainer(object):\nself.status['step_id'] = step_id\nself._compose_callback.on_step_begin(self.status)\n# forward\n+ if self.use_amp:\n+ with paddle.amp.auto_cast(\n+ enable=self.cfg.use_gpu, level=self.amp_level):\n+ outs = self.model(data)\n+ else:\nouts = self.model(data)\n# update metrics\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/assigners/utils.py", "new_path": "ppdet/modeling/assigners/utils.py", "diff": "@@ -176,7 +176,8 @@ def compute_max_iou_gt(ious):\ndef generate_anchors_for_grid_cell(feats,\nfpn_strides,\ngrid_cell_size=5.0,\n- grid_cell_offset=0.5):\n+ grid_cell_offset=0.5,\n+ dtype='float32'):\nr\"\"\"\nLike ATSS, generate anchors based on grid size.\nArgs:\n@@ -206,16 +207,15 @@ def generate_anchors_for_grid_cell(feats,\nshift_x - cell_half_size, shift_y - cell_half_size,\nshift_x + cell_half_size, shift_y + cell_half_size\n],\n- axis=-1).astype(feat.dtype)\n- anchor_point = paddle.stack(\n- [shift_x, shift_y], axis=-1).astype(feat.dtype)\n+ axis=-1).astype(dtype)\n+ anchor_point = paddle.stack([shift_x, shift_y], axis=-1).astype(dtype)\nanchors.append(anchor.reshape([-1, 4]))\nanchor_points.append(anchor_point.reshape([-1, 2]))\nnum_anchors_list.append(len(anchors[-1]))\nstride_tensor.append(\npaddle.full(\n- [num_anchors_list[-1], 1], stride, dtype=feat.dtype))\n+ [num_anchors_list[-1], 1], stride, dtype=dtype))\nanchors = paddle.concat(anchors)\nanchors.stop_gradient = True\nanchor_points = paddle.concat(anchor_points)\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/ppyoloe_head.py", "new_path": "ppdet/modeling/heads/ppyoloe_head.py", "diff": "@@ -160,7 +160,7 @@ class PPYOLOEHead(nn.Layer):\nnum_anchors_list, stride_tensor\n], targets)\n- def _generate_anchors(self, feats=None):\n+ def _generate_anchors(self, feats=None, dtype='float32'):\n# just use in eval time\nanchor_points = []\nstride_tensor = []\n@@ -175,11 +175,9 @@ class PPYOLOEHead(nn.Layer):\nshift_y, shift_x = paddle.meshgrid(shift_y, shift_x)\nanchor_point = paddle.cast(\npaddle.stack(\n- [shift_x, shift_y], axis=-1), dtype='float32')\n+ [shift_x, shift_y], axis=-1), dtype=dtype)\nanchor_points.append(anchor_point.reshape([-1, 2]))\n- stride_tensor.append(\n- paddle.full(\n- [h * w, 1], stride, dtype='float32'))\n+ stride_tensor.append(paddle.full([h * w, 1], stride, dtype=dtype))\nanchor_points = paddle.concat(anchor_points)\nstride_tensor = paddle.concat(stride_tensor)\nreturn anchor_points, stride_tensor\n" }, { "change_type": "MODIFY", "old_path": "tools/eval.py", "new_path": "tools/eval.py", "diff": "@@ -77,6 +77,12 @@ def parse_args():\ndefault=False,\nhelp='Whether to save the evaluation results only')\n+ parser.add_argument(\n+ \"--amp\",\n+ action='store_true',\n+ default=False,\n+ help=\"Enable auto mixed precision eval.\")\n+\nargs = parser.parse_args()\nreturn args\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[dev] add amp eval (#6400) cast dtype in load_pretrain_weight
499,339
13.07.2022 16:51:15
-28,800
70d00f6fa713383b1b17e249f55575ed57eaa6c0
[dev] fix bug in _download_dist
[ { "change_type": "MODIFY", "old_path": "ppdet/utils/download.py", "new_path": "ppdet/utils/download.py", "diff": "@@ -393,7 +393,12 @@ def _download(url, path, md5sum=None):\ndef _download_dist(url, path, md5sum=None):\nenv = os.environ\nif 'PADDLE_TRAINERS_NUM' in env and 'PADDLE_TRAINER_ID' in env:\n- trainer_id = int(env['PADDLE_TRAINER_ID'])\n+ # Mainly used to solve the problem of downloading data from\n+ # different machines in the case of multiple machines.\n+ # Different nodes will download data, and the same node\n+ # will only download data once.\n+ # Reference https://github.com/PaddlePaddle/PaddleClas/blob/develop/ppcls/utils/download.py#L108\n+ rank_id_curr_node = int(os.environ.get(\"PADDLE_RANK_IN_NODE\", 0))\nnum_trainers = int(env['PADDLE_TRAINERS_NUM'])\nif num_trainers <= 1:\nreturn _download(url, path, md5sum)\n@@ -406,12 +411,9 @@ def _download_dist(url, path, md5sum=None):\nos.makedirs(path)\nif not osp.exists(fullname):\n- from paddle.distributed import ParallelEnv\n- unique_endpoints = _get_unique_endpoints(ParallelEnv()\n- .trainer_endpoints[:])\nwith open(lock_path, 'w'): # touch\nos.utime(lock_path, None)\n- if ParallelEnv().current_endpoint in unique_endpoints:\n+ if rank_id_curr_node == 0:\n_download(url, path, md5sum)\nos.remove(lock_path)\nelse:\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/test_train_inference_python.sh", "new_path": "test_tipc/test_train_inference_python.sh", "diff": "@@ -262,7 +262,7 @@ else\ncontinue\nfi\n- if [ ${autocast} = \"amp\" ]; then\n+ if [ ${autocast} = \"amp\" ] || [ ${autocast} = \"fp16\" ]; then\nset_autocast=\"--amp\"\nelse\nset_autocast=\" \"\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[dev] fix bug in _download_dist (#6419)
499,405
13.07.2022 21:44:19
-28,800
5e02a81af77a9a4ecd1e394430c4396b48bc76fd
remove one line redundant code
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/layers.py", "new_path": "ppdet/modeling/layers.py", "diff": "@@ -749,7 +749,6 @@ class TTFBox(object):\n# batch size is 1\nscores_r = paddle.reshape(scores, [cat, -1])\ntopk_scores, topk_inds = paddle.topk(scores_r, k)\n- topk_scores, topk_inds = paddle.topk(scores_r, k)\ntopk_ys = topk_inds // width\ntopk_xs = topk_inds % width\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
remove one line redundant code (#6424)
499,339
14.07.2022 14:10:27
-28,800
4a8fe37080d928bf8cc3c9f49946b7d52b2f3974
[dev] fix some deadlink in tipc and deploy, test=document_fix
[ { "change_type": "MODIFY", "old_path": "deploy/README_en.md", "new_path": "deploy/README_en.md", "diff": "@@ -21,7 +21,7 @@ Use the `tools/export_model.py` script to export the model and the configuration\n# The YOLOv3 model is derived\npython tools/export_model.py -c configs/yolov3/yolov3_mobilenet_v1_roadsign.yml -o weights=output/yolov3_mobilenet_v1_roadsign/best_model.pdparams\n```\n-The prediction model will be exported to the `output_inference/yolov3_mobilenet_v1_roadsign` directory `infer_cfg.yml`, `model.pdiparams`, `model.pdiparams.info`, `model.pdmodel`. For details on model export, please refer to the documentation [Tutorial on Paddle Detection MODEL EXPORT](EXPORT_MODEL_sh.md).\n+The prediction model will be exported to the `output_inference/yolov3_mobilenet_v1_roadsign` directory `infer_cfg.yml`, `model.pdiparams`, `model.pdiparams.info`, `model.pdmodel`. For details on model export, please refer to the documentation [Tutorial on Paddle Detection MODEL EXPORT](./EXPORT_MODEL_en.md).\n### 1.2 Use Paddle Inference to Make Predictions\n* Python deployment supports `CPU`, `GPU` and `XPU` environments, Windows, Linux, and NV Jetson embedded devices. Reference Documentation [Python Deployment](python/README.md)\n@@ -39,7 +39,7 @@ python tools/export_model.py -c configs/yolov3/yolov3_mobilenet_v1_roadsign.yml\n```\nThe prediction model will be exported to the `output_inference/yolov3_darknet53_270e_coco` directory `infer_cfg.yml`, `model.pdiparams`, `model.pdiparams.info`, `model.pdmodel`, `serving_client/` and `serving_server/` folder.\n-For details on model export, please refer to the documentation [Tutorial on Paddle Detection MODEL EXPORT](EXPORT_MODEL_en.md).\n+For details on model export, please refer to the documentation [Tutorial on Paddle Detection MODEL EXPORT](./EXPORT_MODEL_en.md).\n### 2.2 Predictions are made using Paddle Serving\n* [Install PaddleServing](https://github.com/PaddlePaddle/Serving/blob/develop/README.md#installation)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[dev] fix some deadlink in tipc and deploy, test=document_fix (#6431)
499,319
14.07.2022 14:52:26
-28,800
99f891bebc3113bf6720fc7dbcac2a0e2144b774
[doc] deadlinks fix
[ { "change_type": "MODIFY", "old_path": "configs/mot/README_en.md", "new_path": "configs/mot/README_en.md", "diff": "@@ -79,7 +79,7 @@ PaddleDetection implement [JDE](https://github.com/Zhongdao/Towards-Realtime-MOT\n**Notes:**\n- Multi-Object Tracking(MOT) datasets are always used for single category tracking. DeepSORT, JDE and FairMOT are single category MOT models. 'MIX' dataset and it's sub datasets are also single category pedestrian tracking datasets. It can be considered that there are additional IDs ground truth for detection datasets.\n-- In order to train the feature models of more scenes, more datasets are also processed into the same format as the MIX dataset. PaddleDetection Team also provides feature datasets and models of [vehicle tracking](vehicle/readme.md), [head tracking](headtracking21/readme.md) and more general [pedestrian tracking](pedestrian/readme.md). User defined datasets can also be prepared by referring to data preparation [doc](../../docs/tutorials/PrepareMOTDataSet.md).\n+- In order to train the feature models of more scenes, more datasets are also processed into the same format as the MIX dataset. PaddleDetection Team also provides feature datasets and models of [vehicle tracking](vehicle/README.md), [head tracking](headtracking21/README.md) and more general [pedestrian tracking](pedestrian/README.md). User defined datasets can also be prepared by referring to data preparation [doc](../../docs/tutorials/data/PrepareMOTDataSet.md).\n- The multipe category MOT model is [MCFairMOT] (mcfairmot/readme_cn.md), and the multi category dataset is the integrated version of VisDrone dataset. Please refer to the doc of [MCFairMOT](mcfairmot/README.md).\n- The Multi-Target Multi-Camera Tracking (MTMCT) model is [AIC21 MTMCT](https://www.aicitychallenge.org)(CityFlow) Multi-Camera Vehicle Tracking dataset. The dataset and model can refer to the doc of [MTMCT](mtmct/README.md)\n" }, { "change_type": "MODIFY", "old_path": "docs/tutorials/GETTING_STARTED.md", "new_path": "docs/tutorials/GETTING_STARTED.md", "diff": "@@ -11,7 +11,7 @@ instructions](INSTALL_cn.md).\n## Data preparation\n-- Please refer to [PrepareDetDataSet](PrepareDetDataSet_en.md) for data preparation\n+- Please refer to [PrepareDetDataSet](./data/PrepareDetDataSet_en.md) for data preparation\n- Please set the data path for data configuration file in ```configs/datasets```\n## Training & Evaluation & Inference\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[doc] deadlinks fix (#6434)
499,339
14.07.2022 19:38:35
-28,800
a2b3d3c0a1ca427db8296d6ece40e116b085a696
[TIPC] add dist train txt, test=document_fix
[ { "change_type": "ADD", "old_path": null, "new_path": "test_tipc/configs/keypoint/tinypose_128x96_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt", "diff": "+===========================train_params===========================\n+model_name:tinypose_128x96\n+python:python3.7\n+gpu_list:192.168.0.1,192.168.0.2;0,1\n+use_gpu:True\n+auto_cast:null\n+epoch:lite_train_lite_infer=1|lite_train_whole_infer=1|whole_train_whole_infer=420\n+save_dir:null\n+TrainReader.batch_size:lite_train_lite_infer=2|lite_train_whole_infer=2|whole_train_whole_infer=512\n+pretrain_weights:https://paddledet.bj.bcebos.com/models/keypoint/tinypose_128x96.pdparams\n+trained_model_name:model_final.pdparams\n+train_infer_img_dir:./dataset/coco/test2017/\n+filename:null\n+##\n+trainer:norm_train\n+norm_train:tools/train.py -c test_tipc/configs/keypoint/tinypose_128x96.yml -o\n+pact_train:tools/train.py -c test_tipc/configs/keypoint/tinypose_128x96.yml --slim_config _template_pact -o\n+fpgm_train:tools/train.py -c test_tipc/configs/keypoint/tinypose_128x96.yml --slim_config _template_fpgm -o\n+distill_train:null\n+null:null\n+null:null\n+##\n+===========================eval_params===========================\n+eval:tools/eval.py -c test_tipc/configs/keypoint/tinypose_128x96.yml -o\n+null:null\n+##\n+===========================infer_params===========================\n+--output_dir:./output_inference\n+weights:https://paddledet.bj.bcebos.com/models/keypoint/tinypose_128x96.pdparams\n+norm_export:tools/export_model.py -c test_tipc/configs/keypoint/tinypose_128x96.yml -o\n+pact_export:tools/export_model.py -c test_tipc/configs/keypoint/tinypose_128x96.yml --slim_config _template_pact -o\n+fpgm_export:tools/export_model.py -c test_tipc/configs/keypoint/tinypose_128x96.yml --slim_config _template_fpgm -o\n+distill_export:null\n+export1:null\n+export2:null\n+kl_quant_export:tools/post_quant.py -c test_tipc/configs/keypoint/tinypose_128x96.yml --slim_config configs/slim/post_quant/tinypose_128x96_ptq.yml -o\n+##\n+infer_mode:norm\n+infer_quant:False\n+inference:./deploy/python/keypoint_infer.py\n+--device:cpu\n+--enable_mkldnn:False\n+--cpu_threads:4\n+--batch_size:1|2\n+--use_tensorrt:null\n+--run_mode:paddle\n+--model_dir:\n+--image_dir:./dataset/coco/test2017/\n+--save_log_path:null\n+--run_benchmark:False\n+null:null\n\\ No newline at end of file\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test_tipc/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt", "diff": "+===========================train_params===========================\n+model_name:mask_rcnn_r50_fpn_1x_coco\n+python:python3.7\n+gpu_list:192.168.0.1,192.168.0.2;0,1\n+use_gpu:True\n+auto_cast:null\n+epoch:lite_train_lite_infer=1|lite_train_whole_infer=1|whole_train_whole_infer=12\n+save_dir:null\n+TrainReader.batch_size:lite_train_lite_infer=2|lite_train_whole_infer=2|whole_train_whole_infer=1\n+pretrain_weights:https://paddledet.bj.bcebos.com/models/mask_rcnn_r50_fpn_1x_coco.pdparams\n+trained_model_name:model_final.pdparams\n+train_infer_img_dir:./dataset/coco/test2017/\n+filename:null\n+##\n+trainer:norm_train\n+norm_train:tools/train.py -c configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.yml -o\n+pact_train:tools/train.py -c configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.yml --slim_config _template_pact -o\n+fpgm_train:tools/train.py -c configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.yml --slim_config _template_fpgm -o\n+distill_train:null\n+null:null\n+null:null\n+##\n+===========================eval_params===========================\n+eval:tools/eval.py -c configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.yml -o\n+null:null\n+##\n+===========================infer_params===========================\n+--output_dir:./output_inference\n+weights:https://paddledet.bj.bcebos.com/models/mask_rcnn_r50_fpn_1x_coco.pdparams\n+norm_export:tools/export_model.py -c configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.yml -o\n+pact_export:tools/export_model.py -c configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.yml --slim_config _template_pact -o\n+fpgm_export:tools/export_model.py -c configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.yml --slim_config _template_fpgm -o\n+distill_export:null\n+export1:null\n+export_onnx:null\n+kl_quant_export:tools/post_quant.py -c configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o\n+##\n+infer_mode:norm\n+infer_quant:False\n+inference:./deploy/python/infer.py\n+--device:cpu\n+--enable_mkldnn:False\n+--cpu_threads:4\n+--batch_size:1\n+--use_tensorrt:null\n+--run_mode:paddle\n+--model_dir:\n+--image_dir:./dataset/coco/test2017/\n+--save_log_path:null\n+--run_benchmark:False\n+--trt_max_shape:1600\n\\ No newline at end of file\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test_tipc/configs/picodet/picodet_lcnet_1_5x_416_coco_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt", "diff": "+===========================train_params===========================\n+model_name:picodet_lcnet_1_5x_416_coco\n+python:python3.7\n+gpu_list:192.168.0.1,192.168.0.2;0,1\n+use_gpu:True\n+auto_cast:null\n+epoch:lite_train_lite_infer=1|lite_train_whole_infer=1|whole_train_whole_infer=300\n+save_dir:null\n+TrainReader.batch_size:lite_train_lite_infer=2|lite_train_whole_infer=2|whole_train_whole_infer=80\n+pretrain_weights:https://paddledet.bj.bcebos.com/models/picodet_lcnet_1_5x_416_coco.pdparams\n+trained_model_name:model_final.pdparams\n+train_infer_img_dir:./dataset/coco/test2017/\n+filename:null\n+##\n+trainer:norm_train\n+norm_train:tools/train.py -c configs/picodet/legacy_model/more_config/picodet_lcnet_1_5x_416_coco.yml -o\n+pact_train:tools/train.py -c configs/picodet/legacy_model/more_config/picodet_lcnet_1_5x_416_coco.yml --slim_config _template_pact -o\n+fpgm_train:tools/train.py -c configs/picodet/legacy_model/more_config/picodet_lcnet_1_5x_416_coco.yml --slim_config _template_fpgm -o\n+distill_train:null\n+null:null\n+null:null\n+##\n+===========================eval_params===========================\n+eval:tools/eval.py -c configs/picodet/legacy_model/more_config/picodet_lcnet_1_5x_416_coco.yml -o\n+null:null\n+##\n+===========================infer_params===========================\n+--output_dir:./output_inference\n+weights:https://paddledet.bj.bcebos.com/models/picodet_lcnet_1_5x_416_coco.pdparams\n+norm_export:tools/export_model.py -c configs/picodet/legacy_model/more_config/picodet_lcnet_1_5x_416_coco.yml -o\n+pact_export:tools/export_model.py -c configs/picodet/legacy_model/more_config/picodet_lcnet_1_5x_416_coco.yml --slim_config _template_pact -o\n+fpgm_export:tools/export_model.py -c configs/picodet/legacy_model/more_config/picodet_lcnet_1_5x_416_coco.yml --slim_config _template_fpgm -o\n+distill_export:null\n+export1:null\n+export2:null\n+kl_quant_export:tools/post_quant.py -c configs/picodet/legacy_model/more_config/picodet_lcnet_1_5x_416_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o\n+##\n+infer_mode:norm\n+infer_quant:False\n+inference:./deploy/python/infer.py\n+--device:cpu\n+--enable_mkldnn:False\n+--cpu_threads:4\n+--batch_size:1\n+--use_tensorrt:null\n+--run_mode:paddle\n+--model_dir:\n+--image_dir:./dataset/coco/test2017/\n+--save_log_path:null\n+--run_benchmark:False\n+null:null\n\\ No newline at end of file\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test_tipc/configs/picodet/picodet_s_320_coco_lcnet_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt", "diff": "+===========================train_params===========================\n+model_name:picodet_s_320_coco_lcnet\n+python:python3.7\n+gpu_list:192.168.0.1,192.168.0.2;0,1\n+use_gpu:True\n+auto_cast:null\n+epoch:lite_train_lite_infer=1|lite_train_whole_infer=1|whole_train_whole_infer=300\n+save_dir:null\n+TrainReader.batch_size:lite_train_lite_infer=2|lite_train_whole_infer=2|whole_train_whole_infer=128\n+pretrain_weights:https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams\n+trained_model_name:model_final.pdparams\n+train_infer_img_dir:./dataset/coco/test2017/\n+filename:null\n+##\n+trainer:norm_train\n+norm_train:tools/train.py -c configs/picodet/picodet_s_320_coco_lcnet.yml -o\n+pact_train:tools/train.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --slim_config _template_pact -o\n+fpgm_train:tools/train.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --slim_config _template_fpgm -o\n+distill_train:null\n+null:null\n+null:null\n+##\n+===========================eval_params===========================\n+eval:tools/eval.py -c configs/picodet/picodet_s_320_coco_lcnet.yml -o\n+null:null\n+##\n+===========================infer_params===========================\n+--output_dir:./output_inference\n+weights:https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams\n+norm_export:tools/export_model.py -c configs/picodet/picodet_s_320_coco_lcnet.yml -o\n+pact_export:tools/export_model.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --slim_config _template_pact -o\n+fpgm_export:tools/export_model.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --slim_config _template_fpgm -o\n+distill_export:null\n+export1:null\n+export2:null\n+kl_quant_export:tools/post_quant.py -c configs/picodet/picodet_s_320_coco_lcnet.yml --slim_config _template_kl_quant -o\n+##\n+infer_mode:norm\n+infer_quant:False\n+inference:./deploy/python/infer.py\n+--device:cpu\n+--enable_mkldnn:False\n+--cpu_threads:4\n+--batch_size:1\n+--use_tensorrt:null\n+--run_mode:paddle\n+--model_dir:\n+--image_dir:./dataset/coco/test2017/\n+--save_log_path:null\n+--run_benchmark:False\n+null:null\n\\ No newline at end of file\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test_tipc/configs/picodet/picodet_s_320_coco_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt", "diff": "+===========================train_params===========================\n+model_name:picodet_s_320_coco\n+python:python3.7\n+gpu_list:192.168.0.1,192.168.0.2;0,1\n+use_gpu:True\n+auto_cast:null\n+epoch:lite_train_lite_infer=1|lite_train_whole_infer=1|whole_train_whole_infer=300\n+save_dir:null\n+TrainReader.batch_size:lite_train_lite_infer=2|lite_train_whole_infer=2|whole_train_whole_infer=128\n+pretrain_weights:https://paddledet.bj.bcebos.com/models/picodet_s_320_coco.pdparams\n+trained_model_name:model_final.pdparams\n+train_infer_img_dir:./dataset/coco/test2017/\n+filename:null\n+##\n+trainer:norm_train\n+norm_train:tools/train.py -c configs/picodet/legacy_model/picodet_s_320_coco.yml -o\n+pact_train:tools/train.py -c configs/picodet/legacy_model/picodet_s_320_coco.yml --slim_config _template_pact -o\n+fpgm_train:tools/train.py -c configs/picodet/legacy_model/picodet_s_320_coco.yml --slim_config _template_fpgm -o\n+distill_train:null\n+null:null\n+null:null\n+##\n+===========================eval_params===========================\n+eval:tools/eval.py -c configs/picodet/legacy_model/picodet_s_320_coco.yml -o\n+null:null\n+##\n+===========================infer_params===========================\n+--output_dir:./output_inference\n+weights:https://paddledet.bj.bcebos.com/models/picodet_s_320_coco.pdparams\n+norm_export:tools/export_model.py -c configs/picodet/legacy_model/picodet_s_320_coco.yml -o\n+pact_export:tools/export_model.py -c configs/picodet/legacy_model/picodet_s_320_coco.yml --slim_config _template_pact -o\n+fpgm_export:tools/export_model.py -c configs/picodet/legacy_model/picodet_s_320_coco.yml --slim_config _template_fpgm -o\n+distill_export:null\n+export1:null\n+export2:null\n+kl_quant_export:tools/post_quant.py -c configs/picodet/legacy_model/picodet_s_320_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o\n+##\n+infer_mode:norm\n+infer_quant:False\n+inference:./deploy/python/infer.py\n+--device:cpu\n+--enable_mkldnn:False\n+--cpu_threads:4\n+--batch_size:1\n+--use_tensorrt:null\n+--run_mode:paddle\n+--model_dir:\n+--image_dir:./dataset/coco/test2017/\n+--save_log_path:null\n+--run_benchmark:False\n+null:null\n\\ No newline at end of file\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test_tipc/configs/ppyolo/ppyolo_mbv3_large_coco_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt", "diff": "+===========================train_params===========================\n+model_name:ppyolo_mbv3_large_coco\n+python:python3.7\n+gpu_list:192.168.0.1,192.168.0.2;0,1\n+use_gpu:True\n+auto_cast:null\n+epoch:lite_train_lite_infer=1|lite_train_whole_infer=1|whole_train_whole_infer=405\n+save_dir:null\n+TrainReader.batch_size:lite_train_lite_infer=2|lite_train_whole_infer=2|whole_train_whole_infer=24\n+pretrain_weights:https://paddledet.bj.bcebos.com/models/ppyolo_mbv3_large_coco.pdparams\n+trained_model_name:model_final.pdparams\n+train_infer_img_dir:./dataset/coco/test2017/\n+filename:null\n+##\n+trainer:norm_train\n+norm_train:tools/train.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml -o\n+pact_train:tools/train.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml --slim_config configs/slim/quant/ppyolo_mbv3_large_qat.yml -o\n+fpgm_train:tools/train.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml --slim_config configs/slim/prune/ppyolo_mbv3_large_prune_fpgm.yml -o\n+distill_train:null\n+null:null\n+null:null\n+##\n+===========================eval_params===========================\n+eval:tools/eval.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml -o\n+null:null\n+##\n+===========================infer_params===========================\n+--output_dir:./output_inference\n+weights:https://paddledet.bj.bcebos.com/models/ppyolo_mbv3_large_coco.pdparams\n+norm_export:tools/export_model.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml -o\n+pact_export:tools/export_model.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml --slim_config configs/slim/quant/ppyolo_mbv3_large_qat.yml -o\n+fpgm_export:tools/export_model.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml --slim_config configs/slim/prune/ppyolo_mbv3_large_prune_fpgm.yml -o\n+distill_export:null\n+export1:null\n+export2:null\n+kl_quant_export:tools/post_quant.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml --slim_config configs/slim/post_quant/ppyolo_mbv3_large_ptq.yml -o\n+##\n+infer_mode:norm\n+infer_quant:False\n+inference:./deploy/python/infer.py\n+--device:cpu\n+--enable_mkldnn:False\n+--cpu_threads:4\n+--batch_size:1|2\n+--use_tensorrt:null\n+--run_mode:paddle\n+--model_dir:\n+--image_dir:./dataset/coco/test2017/\n+--save_log_path:null\n+--run_benchmark:False\n+null:null\n+===========================infer_benchmark_params===========================\n+numpy_infer_input:3x320x320.npy\n\\ No newline at end of file\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test_tipc/configs/ppyolo/ppyolo_r50vd_dcn_1x_coco_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt", "diff": "+===========================train_params===========================\n+model_name:ppyolo_r50vd_dcn_1x_coco\n+python:python3.7\n+gpu_list:192.168.0.1,192.168.0.2;0,1\n+use_gpu:True\n+auto_cast:null\n+epoch:lite_train_lite_infer=1|lite_train_whole_infer=1|whole_train_whole_infer=405\n+save_dir:null\n+TrainReader.batch_size:lite_train_lite_infer=2|lite_train_whole_infer=2|whole_train_whole_infer=24\n+pretrain_weights:https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams\n+trained_model_name:model_final.pdparams\n+train_infer_img_dir:./dataset/coco/test2017/\n+filename:null\n+##\n+trainer:norm_train\n+norm_train:tools/train.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o\n+pact_train:tools/train.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml --slim_config configs/slim/quant/ppyolo_r50vd_qat_pact.yml -o\n+fpgm_train:tools/train.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml --slim_config configs/slim/prune/ppyolo_r50vd_prune_fpgm.yml -o\n+distill_train:null\n+null:null\n+null:null\n+##\n+===========================eval_params===========================\n+eval:tools/eval.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o\n+null:null\n+##\n+===========================infer_params===========================\n+--output_dir:./output_inference\n+weights:https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams\n+norm_export:tools/export_model.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o\n+pact_export:tools/export_model.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml --slim_config configs/slim/quant/ppyolo_r50vd_qat_pact.yml -o\n+fpgm_export:tools/export_model.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml --slim_config configs/slim/prune/ppyolo_r50vd_prune_fpgm.yml -o\n+distill_export:null\n+export1:null\n+export2:null\n+kl_quant_export:tools/post_quant.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml --slim_config configs/slim/post_quant/ppyolo_r50vd_dcn_ptq.yml -o\n+##\n+infer_mode:norm\n+infer_quant:False\n+inference:./deploy/python/infer.py\n+--device:cpu\n+--enable_mkldnn:False\n+--cpu_threads:4\n+--batch_size:1|2\n+--use_tensorrt:null\n+--run_mode:paddle\n+--model_dir:\n+--image_dir:./dataset/coco/test2017/\n+--save_log_path:null\n+--run_benchmark:False\n+null:null\n+===========================infer_benchmark_params===========================\n+numpy_infer_input:3x608x608.npy\n\\ No newline at end of file\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test_tipc/configs/ppyolo/ppyolo_tiny_650e_coco_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt", "diff": "+===========================train_params===========================\n+model_name:ppyolo_tiny_650e_coco\n+python:python3.7\n+gpu_list:192.168.0.1,192.168.0.2;0,1\n+use_gpu:True\n+auto_cast:null\n+epoch:lite_train_lite_infer=1|lite_train_whole_infer=1|whole_train_whole_infer=650\n+save_dir:null\n+TrainReader.batch_size:lite_train_lite_infer=2|lite_train_whole_infer=2|whole_train_whole_infer=32\n+pretrain_weights:https://paddledet.bj.bcebos.com/models/ppyolo_tiny_650e_coco.pdparams\n+trained_model_name:model_final.pdparams\n+train_infer_img_dir:./dataset/coco/test2017/\n+filename:null\n+##\n+trainer:norm_train\n+norm_train:tools/train.py -c configs/ppyolo/ppyolo_tiny_650e_coco.yml -o\n+pact_train:tools/train.py -c configs/ppyolo/ppyolo_tiny_650e_coco.yml --slim_config _template_pact -o\n+fpgm_train:tools/train.py -c configs/ppyolo/ppyolo_tiny_650e_coco.yml --slim_config _template_fpgm -o\n+distill_train:null\n+null:null\n+null:null\n+##\n+===========================eval_params===========================\n+eval:tools/eval.py -c configs/ppyolo/ppyolo_tiny_650e_coco.yml -o\n+null:null\n+##\n+===========================infer_params===========================\n+--output_dir:./output_inference\n+weights:https://paddledet.bj.bcebos.com/models/ppyolo_tiny_650e_coco.pdparams\n+norm_export:tools/export_model.py -c configs/ppyolo/ppyolo_tiny_650e_coco.yml -o\n+pact_export:tools/export_model.py -c configs/ppyolo/ppyolo_tiny_650e_coco.yml --slim_config _template_pact -o\n+fpgm_export:tools/export_model.py -c configs/ppyolo/ppyolo_tiny_650e_coco.yml --slim_config _template_fpgm -o\n+distill_export:null\n+export1:null\n+export2:null\n+kl_quant_export:tools/post_quant.py -c configs/ppyolo/ppyolo_tiny_650e_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o\n+##\n+infer_mode:norm\n+infer_quant:False\n+inference:./deploy/python/infer.py\n+--device:cpu\n+--enable_mkldnn:False\n+--cpu_threads:4\n+--batch_size:1|2\n+--use_tensorrt:null\n+--run_mode:paddle\n+--model_dir:\n+--image_dir:./dataset/coco/test2017/\n+--save_log_path:null\n+--run_benchmark:False\n+null:null\n+===========================infer_benchmark_params===========================\n+numpy_infer_input:3x320x320.npy\n\\ No newline at end of file\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test_tipc/configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt", "diff": "+===========================train_params===========================\n+model_name:ppyolov2_r50vd_dcn_365e_coco\n+python:python3.7\n+gpu_list:192.168.0.1,192.168.0.2;0,1\n+use_gpu:True\n+auto_cast:null\n+epoch:lite_train_lite_infer=1|lite_train_whole_infer=1|whole_train_whole_infer=365\n+save_dir:null\n+TrainReader.batch_size:lite_train_lite_infer=2|lite_train_whole_infer=2|whole_train_whole_infer=12\n+pretrain_weights:https://paddledet.bj.bcebos.com/models/ppyolov2_r50vd_dcn_365e_coco.pdparams\n+trained_model_name:model_final.pdparams\n+train_infer_img_dir:./dataset/coco/test2017/\n+filename:null\n+##\n+trainer:norm_train\n+norm_train:tools/train.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml -o\n+pact_train:tools/train.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --slim_config _template_pact -o\n+fpgm_train:tools/train.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --slim_config _template_fpgm -o\n+distill_train:null\n+null:null\n+null:null\n+##\n+===========================eval_params===========================\n+eval:tools/eval.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml -o\n+null:null\n+##\n+===========================infer_params===========================\n+--output_dir:./output_inference\n+weights:https://paddledet.bj.bcebos.com/models/ppyolov2_r50vd_dcn_365e_coco.pdparams\n+norm_export:tools/export_model.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml -o\n+pact_export:tools/export_model.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --slim_config _template_pact -o\n+fpgm_export:tools/export_model.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --slim_config _template_fpgm -o\n+distill_export:null\n+export1:null\n+export2:null\n+kl_quant_export:tools/post_quant.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o\n+##\n+infer_mode:norm\n+infer_quant:False\n+inference:./deploy/python/infer.py\n+--device:cpu\n+--enable_mkldnn:False\n+--cpu_threads:4\n+--batch_size:1|2\n+--use_tensorrt:null\n+--run_mode:paddle\n+--model_dir:\n+--image_dir:./dataset/coco/test2017/\n+--save_log_path:null\n+--run_benchmark:False\n+null:null\n+===========================infer_benchmark_params===========================\n+numpy_infer_input:3x640x640.npy\n\\ No newline at end of file\n" }, { "change_type": "ADD", "old_path": null, "new_path": "test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_train_linux_gpu_fleet_normal_infer_python_linux_gpu_cpu.txt", "diff": "+===========================train_params===========================\n+model_name:yolov3_darknet53_270e_coco\n+python:python3.7\n+gpu_list:192.168.0.1,192.168.0.2;0,1\n+use_gpu:True\n+auto_cast:null\n+epoch:lite_train_lite_infer=1|lite_train_whole_infer=1|whole_train_whole_infer=270\n+save_dir:null\n+TrainReader.batch_size:lite_train_lite_infer=2|lite_train_whole_infer=2|whole_train_whole_infer=8\n+pretrain_weights:https://paddledet.bj.bcebos.com/models/yolov3_darknet53_270e_coco.pdparams\n+trained_model_name:model_final.pdparams\n+train_infer_img_dir:./dataset/coco/test2017/\n+filename:null\n+##\n+trainer:norm_train\n+norm_train:tools/train.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml -o\n+pact_train:tools/train.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/quant/yolov3_darknet_qat.yml -o\n+fpgm_train:tools/train.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/prune/yolov3_darknet_prune_fpgm.yml -o\n+distill_train:null\n+null:null\n+null:null\n+##\n+===========================eval_params===========================\n+eval:tools/eval.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml -o\n+null:null\n+##\n+===========================infer_params===========================\n+--output_dir:./output_inference\n+weights:https://paddledet.bj.bcebos.com/models/yolov3_darknet53_270e_coco.pdparams\n+norm_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml -o\n+pact_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/quant/yolov3_darknet_qat.yml -o\n+fpgm_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/prune/yolov3_darknet_prune_fpgm.yml -o\n+distill_export:null\n+export1:null\n+export2:null\n+kl_quant_export:tools/post_quant.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o\n+##\n+infer_mode:norm\n+infer_quant:False\n+inference:./deploy/python/infer.py\n+--device:cpu\n+--enable_mkldnn:False\n+--cpu_threads:4\n+--batch_size:1|2\n+--use_tensorrt:null\n+--run_mode:paddle\n+--model_dir:\n+--image_dir:./dataset/coco/test2017/\n+--save_log_path:null\n+--run_benchmark:False\n+null:null\n\\ No newline at end of file\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[TIPC] add dist train txt, test=document_fix (#6440)
499,348
14.07.2022 21:56:56
-28,800
63dc4c4afe129c59c80ac94122ff3f0ba75404e6
update pphumandocs&annodocs; test=document_fix
[ { "change_type": "ADD", "old_path": "docs/images/pphumanv2.png", "new_path": "docs/images/pphumanv2.png", "diff": "Binary files /dev/null and b/docs/images/pphumanv2.png differ\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
update pphumandocs&annodocs; test=document_fix (#6442)
499,319
15.07.2022 20:27:17
-28,800
0910e9882985d8100dcdc6b8055dec0fe554f496
add use_checkpoint and use_alpha for cspresnet
[ { "change_type": "ADD", "old_path": null, "new_path": "configs/visdrone/ppyoloe_crn_s_80e_visdrone_use_checkpoint.yml", "diff": "+_BASE_: [\n+ '../datasets/visdrone_detection.yml',\n+ '../runtime.yml',\n+ '../ppyoloe/_base_/optimizer_300e.yml',\n+ '../ppyoloe/_base_/ppyoloe_crn.yml',\n+ '../ppyoloe/_base_/ppyoloe_reader.yml',\n+]\n+log_iter: 100\n+snapshot_epoch: 10\n+weights: output/ppyoloe_crn_s_80e_visdrone_use_checkpoint/model_final\n+\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/ppyoloe_crn_s_300e_coco.pdparams\n+depth_mult: 0.33\n+width_mult: 0.50\n+\n+TrainReader:\n+ batch_size: 8\n+\n+epoch: 80\n+LearningRate:\n+ base_lr: 0.01\n+ schedulers:\n+ - !CosineDecay\n+ max_epochs: 96\n+ - !LinearWarmup\n+ start_factor: 0.\n+ epochs: 1\n+\n+CSPResNet:\n+ use_checkpoint: True\n+ use_alpha: True\n+\n+# when use_checkpoint\n+use_fused_allreduce_gradients: True\n+\n+PPYOLOEHead:\n+ static_assigner_epoch: -1\n+ nms:\n+ name: MultiClassNMS\n+ nms_top_k: 10000\n+ keep_top_k: 500\n+ score_threshold: 0.01\n+ nms_threshold: 0.6\n" }, { "change_type": "MODIFY", "old_path": "ppdet/engine/trainer.py", "new_path": "ppdet/engine/trainer.py", "diff": "@@ -49,6 +49,8 @@ from ppdet.utils import profiler\nfrom .callbacks import Callback, ComposeCallback, LogPrinter, Checkpointer, WiferFaceEval, VisualDLWriter, SniperProposalsGenerator, WandbCallback\nfrom .export_utils import _dump_infer_config, _prune_input_spec\n+from paddle.distributed.fleet.utils.hybrid_parallel_util import fused_allreduce_gradients\n+\nfrom ppdet.utils.logger import setup_logger\nlogger = setup_logger('ppdet.engine')\n@@ -152,7 +154,6 @@ class Trainer(object):\nif self.cfg.get('unstructured_prune'):\nself.pruner = create('UnstructuredPruner')(self.model,\nsteps_per_epoch)\n-\nif self.use_amp and self.amp_level == 'O2':\nself.model = paddle.amp.decorate(\nmodels=self.model, level=self.amp_level)\n@@ -426,6 +427,9 @@ class Trainer(object):\nself._compose_callback.on_train_begin(self.status)\n+ use_fused_allreduce_gradients = self.cfg[\n+ 'use_fused_allreduce_gradients'] if 'use_fused_allreduce_gradients' in self.cfg else False\n+\nfor epoch_id in range(self.start_epoch, self.cfg.epoch):\nself.status['mode'] = 'train'\nself.status['epoch_id'] = epoch_id\n@@ -441,7 +445,23 @@ class Trainer(object):\ndata['epoch_id'] = epoch_id\nif self.use_amp:\n- with paddle.amp.auto_cast(\n+ if isinstance(\n+ model, paddle.\n+ DataParallel) and use_fused_allreduce_gradients:\n+ with model.no_sync():\n+ with amp.auto_cast(\n+ enable=self.cfg.use_gpus,\n+ level=self.amp_level):\n+ # model forward\n+ outputs = model(data)\n+ loss = outputs['loss']\n+ # model backward\n+ scaled_loss = scaler.scale(loss)\n+ scaled_loss.backward()\n+ fused_allreduce_gradients(\n+ list(model.parameters()), None)\n+ else:\n+ with amp.auto_cast(\nenable=self.cfg.use_gpu, level=self.amp_level):\n# model forward\noutputs = model(data)\n@@ -451,6 +471,19 @@ class Trainer(object):\nscaled_loss.backward()\n# in dygraph mode, optimizer.minimize is equal to optimizer.step\nscaler.minimize(self.optimizer, scaled_loss)\n+\n+ else:\n+ if isinstance(\n+ model, paddle.\n+ DataParallel) and use_fused_allreduce_gradients:\n+ with model.no_sync():\n+ # model forward\n+ outputs = model(data)\n+ loss = outputs['loss']\n+ # model backward\n+ loss.backward()\n+ fused_allreduce_gradients(\n+ list(model.parameters()), None)\nelse:\n# model forward\noutputs = model(data)\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/backbones/cspresnet.py", "new_path": "ppdet/modeling/backbones/cspresnet.py", "diff": "@@ -21,6 +21,7 @@ import paddle.nn as nn\nimport paddle.nn.functional as F\nfrom paddle import ParamAttr\nfrom paddle.regularizer import L2Decay\n+from paddle.nn.initializer import Constant\nfrom ppdet.modeling.ops import get_act_fn\nfrom ppdet.core.workspace import register, serializable\n@@ -65,7 +66,7 @@ class ConvBNLayer(nn.Layer):\nclass RepVggBlock(nn.Layer):\n- def __init__(self, ch_in, ch_out, act='relu'):\n+ def __init__(self, ch_in, ch_out, act='relu', alpha=False):\nsuper(RepVggBlock, self).__init__()\nself.ch_in = ch_in\nself.ch_out = ch_out\n@@ -75,10 +76,20 @@ class RepVggBlock(nn.Layer):\nch_in, ch_out, 1, stride=1, padding=0, act=None)\nself.act = get_act_fn(act) if act is None or isinstance(act, (\nstr, dict)) else act\n+ if alpha:\n+ self.alpha = self.create_parameter(\n+ shape=[1],\n+ attr=ParamAttr(initializer=Constant(value=1.)),\n+ dtype=\"float32\")\n+ else:\n+ self.alpha = None\ndef forward(self, x):\nif hasattr(self, 'conv'):\ny = self.conv(x)\n+ else:\n+ if self.alpha:\n+ y = self.conv1(x) + self.alpha * self.conv2(x)\nelse:\ny = self.conv1(x) + self.conv2(x)\ny = self.act(y)\n@@ -102,6 +113,10 @@ class RepVggBlock(nn.Layer):\ndef get_equivalent_kernel_bias(self):\nkernel3x3, bias3x3 = self._fuse_bn_tensor(self.conv1)\nkernel1x1, bias1x1 = self._fuse_bn_tensor(self.conv2)\n+ if self.alpha:\n+ return kernel3x3 + self.alpha * self._pad_1x1_to_3x3_tensor(\n+ kernel1x1), bias3x3 + self.alpha * bias1x1\n+ else:\nreturn kernel3x3 + self._pad_1x1_to_3x3_tensor(\nkernel1x1), bias3x3 + bias1x1\n@@ -126,11 +141,16 @@ class RepVggBlock(nn.Layer):\nclass BasicBlock(nn.Layer):\n- def __init__(self, ch_in, ch_out, act='relu', shortcut=True):\n+ def __init__(self,\n+ ch_in,\n+ ch_out,\n+ act='relu',\n+ shortcut=True,\n+ use_alpha=False):\nsuper(BasicBlock, self).__init__()\nassert ch_in == ch_out\nself.conv1 = ConvBNLayer(ch_in, ch_out, 3, stride=1, padding=1, act=act)\n- self.conv2 = RepVggBlock(ch_out, ch_out, act=act)\n+ self.conv2 = RepVggBlock(ch_out, ch_out, act=act, alpha=use_alpha)\nself.shortcut = shortcut\ndef forward(self, x):\n@@ -167,7 +187,8 @@ class CSPResStage(nn.Layer):\nn,\nstride,\nact='relu',\n- attn='eca'):\n+ attn='eca',\n+ use_alpha=False):\nsuper(CSPResStage, self).__init__()\nch_mid = (ch_in + ch_out) // 2\n@@ -180,8 +201,11 @@ class CSPResStage(nn.Layer):\nself.conv2 = ConvBNLayer(ch_mid, ch_mid // 2, 1, act=act)\nself.blocks = nn.Sequential(*[\nblock_fn(\n- ch_mid // 2, ch_mid // 2, act=act, shortcut=True)\n- for i in range(n)\n+ ch_mid // 2,\n+ ch_mid // 2,\n+ act=act,\n+ shortcut=True,\n+ use_alpha=use_alpha) for i in range(n)\n])\nif attn:\nself.attn = EffectiveSELayer(ch_mid, act='hardsigmoid')\n@@ -216,8 +240,12 @@ class CSPResNet(nn.Layer):\nuse_large_stem=False,\nwidth_mult=1.0,\ndepth_mult=1.0,\n- trt=False):\n+ trt=False,\n+ use_checkpoint=False,\n+ use_alpha=False,\n+ **args):\nsuper(CSPResNet, self).__init__()\n+ self.use_checkpoint = use_checkpoint\nchannels = [max(round(c * width_mult), 1) for c in channels]\nlayers = [max(round(l * depth_mult), 1) for l in layers]\nact = get_act_fn(\n@@ -255,18 +283,29 @@ class CSPResNet(nn.Layer):\nn = len(channels) - 1\nself.stages = nn.Sequential(*[(str(i), CSPResStage(\n- BasicBlock, channels[i], channels[i + 1], layers[i], 2, act=act))\n- for i in range(n)])\n+ BasicBlock,\n+ channels[i],\n+ channels[i + 1],\n+ layers[i],\n+ 2,\n+ act=act,\n+ use_alpha=use_alpha)) for i in range(n)])\nself._out_channels = channels[1:]\n- self._out_strides = [4, 8, 16, 32]\n+ self._out_strides = [4 * 2**i for i in range(n)]\nself.return_idx = return_idx\n+ if use_checkpoint:\n+ paddle.seed(0)\ndef forward(self, inputs):\nx = inputs['image']\nx = self.stem(x)\nouts = []\nfor idx, stage in enumerate(self.stages):\n+ if self.use_checkpoint and self.training:\n+ x = paddle.distributed.fleet.utils.recompute(\n+ stage, x, **{\"preserve_rng_state\": True})\n+ else:\nx = stage(x)\nif idx in self.return_idx:\nouts.append(x)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add use_checkpoint and use_alpha for cspresnet (#6428)
499,298
18.07.2022 10:36:26
-28,800
b9a2d36d656285ce510fab368138257800cb66a2
fix amp training
[ { "change_type": "MODIFY", "old_path": "ppdet/engine/trainer.py", "new_path": "ppdet/engine/trainer.py", "diff": "@@ -449,7 +449,7 @@ class Trainer(object):\nmodel, paddle.\nDataParallel) and use_fused_allreduce_gradients:\nwith model.no_sync():\n- with amp.auto_cast(\n+ with paddle.amp.auto_cast(\nenable=self.cfg.use_gpus,\nlevel=self.amp_level):\n# model forward\n@@ -461,7 +461,7 @@ class Trainer(object):\nfused_allreduce_gradients(\nlist(model.parameters()), None)\nelse:\n- with amp.auto_cast(\n+ with paddle.amp.auto_cast(\nenable=self.cfg.use_gpu, level=self.amp_level):\n# model forward\noutputs = model(data)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix amp training (#6456)
499,298
18.07.2022 17:08:08
-28,800
e6ad10e5cf86676c898fe27af19dc991fc4d98f4
fix ids2names in plot_tracking_dict
[ { "change_type": "MODIFY", "old_path": "deploy/pipeline/pipeline.py", "new_path": "deploy/pipeline/pipeline.py", "diff": "@@ -966,6 +966,7 @@ class PipePredictor(object):\nonline_scores,\nframe_id=frame_id,\nfps=fps,\n+ ids2names=self.mot_predictor.pred_config.labels,\ndo_entrance_counting=self.do_entrance_counting,\ndo_break_in_counting=self.do_break_in_counting,\nentrance=entrance,\n" }, { "change_type": "MODIFY", "old_path": "deploy/pptracking/python/mot/visualize.py", "new_path": "deploy/pptracking/python/mot/visualize.py", "diff": "@@ -191,7 +191,7 @@ def plot_tracking_dict(image,\nscores_dict,\nframe_id=0,\nfps=0.,\n- ids2names=['pedestrian'],\n+ ids2names=[],\ndo_entrance_counting=False,\ndo_break_in_counting=False,\nentrance=None,\n" }, { "change_type": "MODIFY", "old_path": "deploy/pptracking/python/mot_sde_infer.py", "new_path": "deploy/pptracking/python/mot_sde_infer.py", "diff": "@@ -512,14 +512,15 @@ class SDE_Detector(Detector):\nonline_ids,\nonline_scores,\nframe_id=frame_id,\n- ids2names=[])\n+ ids2names=ids2names)\nelse:\nim = plot_tracking(\nframe,\nonline_tlwhs,\nonline_ids,\nonline_scores,\n- frame_id=frame_id)\n+ frame_id=frame_id,\n+ ids2names=ids2names)\nsave_dir = os.path.join(self.output_dir, seq_name)\nif not os.path.exists(save_dir):\nos.makedirs(save_dir)\n@@ -632,6 +633,7 @@ class SDE_Detector(Detector):\nonline_scores,\nframe_id=frame_id,\nfps=fps,\n+ ids2names=ids2names,\ndo_entrance_counting=self.do_entrance_counting,\nentrance=entrance)\nelse:\n" }, { "change_type": "MODIFY", "old_path": "deploy/python/mot_sde_infer.py", "new_path": "deploy/python/mot_sde_infer.py", "diff": "@@ -359,14 +359,15 @@ class SDE_Detector(Detector):\nonline_ids,\nonline_scores,\nframe_id=frame_id,\n- ids2names=[])\n+ ids2names=ids2names)\nelse:\nim = plot_tracking(\nframe,\nonline_tlwhs,\nonline_ids,\nonline_scores,\n- frame_id=frame_id)\n+ frame_id=frame_id,\n+ ids2names=ids2names)\nsave_dir = os.path.join(self.output_dir, seq_name)\nif not os.path.exists(save_dir):\nos.makedirs(save_dir)\n@@ -431,7 +432,8 @@ class SDE_Detector(Detector):\nonline_ids,\nonline_scores,\nframe_id=frame_id,\n- fps=fps)\n+ fps=fps,\n+ ids2names=ids2names)\nelse:\n# use ByteTracker, support multiple class\nfor cls_id in range(num_classes):\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix ids2names in plot_tracking_dict (#6466)
499,339
25.07.2022 11:11:34
-28,800
3645208e752ae8cce1bac934d3453915bb99434b
[dev] alter ppyoloe nms params
[ { "change_type": "MODIFY", "old_path": "configs/ppyoloe/_base_/ppyoloe_crn.yml", "new_path": "configs/ppyoloe/_base_/ppyoloe_crn.yml", "diff": "@@ -39,7 +39,7 @@ PPYOLOEHead:\nbeta: 6.0\nnms:\nname: MultiClassNMS\n- nms_top_k: 10000\n+ nms_top_k: 1000\nkeep_top_k: 300\nscore_threshold: 0.01\nnms_threshold: 0.7\n" }, { "change_type": "MODIFY", "old_path": "configs/ppyoloe/ppyoloe_crn_l_36e_coco_xpu.yml", "new_path": "configs/ppyoloe/ppyoloe_crn_l_36e_coco_xpu.yml", "diff": "@@ -64,6 +64,6 @@ PPYOLOEHead:\nnms:\nname: MultiClassNMS\nnms_top_k: 1000\n- keep_top_k: 100\n+ keep_top_k: 300\nscore_threshold: 0.01\n- nms_threshold: 0.6\n+ nms_threshold: 0.7\n" }, { "change_type": "MODIFY", "old_path": "configs/ppyoloe/ppyoloe_crn_s_400e_coco.yml", "new_path": "configs/ppyoloe/ppyoloe_crn_s_400e_coco.yml", "diff": "@@ -17,13 +17,9 @@ width_mult: 0.50\nTrainReader:\nbatch_size: 32\n-LearningRate:\n- base_lr: 0.04\n-\n-\nepoch: 400\nLearningRate:\n- base_lr: 0.025\n+ base_lr: 0.04\nschedulers:\n- !CosineDecay\nmax_epochs: 480\n@@ -44,7 +40,7 @@ PPYOLOEHead:\nstatic_assigner_epoch: 133\nnms:\nname: MultiClassNMS\n- nms_top_k: 10000\n+ nms_top_k: 1000\nkeep_top_k: 300\nscore_threshold: 0.01\nnms_threshold: 0.7\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[dev] alter ppyoloe nms params (#6491)
499,298
25.07.2022 17:03:55
-28,800
324b0b9961d19679192431ade74f6ce956e67f7f
fix cpp infer of jdetracker
[ { "change_type": "MODIFY", "old_path": "deploy/cpp/src/tracker.cc", "new_path": "deploy/cpp/src/tracker.cc", "diff": "@@ -58,8 +58,8 @@ bool JDETracker::update(const cv::Mat &dets, const cv::Mat &emb, std::vector<Tra\nTrajectoryPool candidates(dets.rows);\nfor (int i = 0; i < dets.rows; ++i)\n{\n- float score = *dets.ptr<float>(i, 4);\n- const cv::Mat &ltrb_ = dets(cv::Rect(0, i, 4, 1));\n+ float score = *dets.ptr<float>(i, 1);\n+ const cv::Mat &ltrb_ = dets(cv::Rect(2, i, 4, 1));\ncv::Vec4f ltrb = mat2vec4f(ltrb_);\nconst cv::Mat &embedding = emb(cv::Rect(0, i, emb.cols, 1));\ncandidates[i] = Trajectory(ltrb, score, embedding);\n" }, { "change_type": "MODIFY", "old_path": "deploy/pptracking/cpp/src/tracker.cc", "new_path": "deploy/pptracking/cpp/src/tracker.cc", "diff": "@@ -56,8 +56,8 @@ bool JDETracker::update(const cv::Mat &dets,\n++timestamp;\nTrajectoryPool candidates(dets.rows);\nfor (int i = 0; i < dets.rows; ++i) {\n- float score = *dets.ptr<float>(i, 4);\n- const cv::Mat &ltrb_ = dets(cv::Rect(0, i, 4, 1));\n+ float score = *dets.ptr<float>(i, 1);\n+ const cv::Mat &ltrb_ = dets(cv::Rect(2, i, 4, 1));\ncv::Vec4f ltrb = mat2vec4f(ltrb_);\nconst cv::Mat &embedding = emb(cv::Rect(0, i, emb.cols, 1));\ncandidates[i] = Trajectory(ltrb, score, embedding);\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix cpp infer of jdetracker (#6500)
499,299
25.07.2022 23:20:52
-28,800
190e237b2114aef65a26045f78faadb6a6744471
fix box filter when box_num > 0 but with no target class
[ { "change_type": "MODIFY", "old_path": "deploy/python/det_keypoint_unite_infer.py", "new_path": "deploy/python/det_keypoint_unite_infer.py", "diff": "@@ -37,8 +37,15 @@ KEYPOINT_SUPPORT_MODELS = {\ndef predict_with_given_det(image, det_res, keypoint_detector,\nkeypoint_batch_size, run_benchmark):\n+ keypoint_res = {}\n+\nrec_images, records, det_rects = keypoint_detector.get_person_from_rect(\nimage, det_res)\n+\n+ if len(det_rects) == 0:\n+ keypoint_res['keypoint'] = [[], []]\n+ return keypoint_res\n+\nkeypoint_vector = []\nscore_vector = []\n@@ -47,7 +54,6 @@ def predict_with_given_det(image, det_res, keypoint_detector,\nrec_images, run_benchmark, repeats=10, visual=False)\nkeypoint_vector, score_vector = translate_to_ori_images(keypoint_results,\nnp.array(records))\n- keypoint_res = {}\nkeypoint_res['keypoint'] = [\nkeypoint_vector.tolist(), score_vector.tolist()\n] if len(keypoint_vector) > 0 else [[], []]\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix box filter when box_num > 0 but with no target class (#6506)
499,333
01.08.2022 10:25:44
-28,800
06c8cf7e5a75be43c51323a6c21e21af291e5728
fix voc save_result in infer
[ { "change_type": "MODIFY", "old_path": "ppdet/data/source/dataset.py", "new_path": "ppdet/data/source/dataset.py", "diff": "@@ -208,6 +208,10 @@ class ImageFolder(DetDataset):\nself.image_dir = images\nself.roidbs = self._load_images()\n+ def get_label_list(self):\n+ # Only VOC dataset needs label list in ImageFold\n+ return self.anno_path\n+\n@register\nclass CommonDataset(object):\n" }, { "change_type": "MODIFY", "old_path": "ppdet/engine/trainer.py", "new_path": "ppdet/engine/trainer.py", "diff": "@@ -287,12 +287,18 @@ class Trainer(object):\nsave_prediction_only=save_prediction_only)\n]\nelif self.cfg.metric == 'VOC':\n+ output_eval = self.cfg['output_eval'] \\\n+ if 'output_eval' in self.cfg else None\n+ save_prediction_only = self.cfg.get('save_prediction_only', False)\n+\nself._metrics = [\nVOCMetric(\nlabel_list=self.dataset.get_label_list(),\nclass_num=self.cfg.num_classes,\nmap_type=self.cfg.map_type,\n- classwise=classwise)\n+ classwise=classwise,\n+ output_eval=output_eval,\n+ save_prediction_only=save_prediction_only)\n]\nelif self.cfg.metric == 'WiderFace':\nmulti_scale = self.cfg.multi_scale_eval if 'multi_scale_eval' in self.cfg else True\n" }, { "change_type": "MODIFY", "old_path": "ppdet/metrics/metrics.py", "new_path": "ppdet/metrics/metrics.py", "diff": "@@ -225,7 +225,9 @@ class VOCMetric(Metric):\nmap_type='11point',\nis_bbox_normalized=False,\nevaluate_difficult=False,\n- classwise=False):\n+ classwise=False,\n+ output_eval=None,\n+ save_prediction_only=False):\nassert os.path.isfile(label_list), \\\n\"label_list {} not a file\".format(label_list)\nself.clsid2catid, self.catid2name = get_categories('VOC', label_list)\n@@ -233,6 +235,8 @@ class VOCMetric(Metric):\nself.overlap_thresh = overlap_thresh\nself.map_type = map_type\nself.evaluate_difficult = evaluate_difficult\n+ self.output_eval = output_eval\n+ self.save_prediction_only = save_prediction_only\nself.detection_map = DetectionMAP(\nclass_num=class_num,\noverlap_thresh=overlap_thresh,\n@@ -245,6 +249,7 @@ class VOCMetric(Metric):\nself.reset()\ndef reset(self):\n+ self.results = {'bbox': [], 'score': [], 'label': []}\nself.detection_map.reset()\ndef update(self, inputs, outputs):\n@@ -256,8 +261,15 @@ class VOCMetric(Metric):\nbbox_lengths = outputs['bbox_num'].numpy() if isinstance(\noutputs['bbox_num'], paddle.Tensor) else outputs['bbox_num']\n+ self.results['bbox'].append(bboxes.tolist())\n+ self.results['score'].append(scores.tolist())\n+ self.results['label'].append(labels.tolist())\n+\nif bboxes.shape == (1, 1) or bboxes is None:\nreturn\n+ if self.save_prediction_only:\n+ return\n+\ngt_boxes = inputs['gt_bbox']\ngt_labels = inputs['gt_class']\ndifficults = inputs['difficult'] if not self.evaluate_difficult \\\n@@ -294,6 +306,15 @@ class VOCMetric(Metric):\nbbox_idx += bbox_num\ndef accumulate(self):\n+ output = \"bbox.json\"\n+ if self.output_eval:\n+ output = os.path.join(self.output_eval, output)\n+ with open(output, 'w') as f:\n+ json.dump(self.results, f)\n+ logger.info('The bbox result is saved to bbox.json.')\n+ if self.save_prediction_only:\n+ return\n+\nlogger.info(\"Accumulating evaluatation results...\")\nself.detection_map.accumulate()\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix voc save_result in infer (#6547)
499,333
02.08.2022 17:46:12
-28,800
34166cd41551d6b9395e63075321ded209bbf5ad
update example, test=document_fix
[ { "change_type": "MODIFY", "old_path": "README_en.md", "new_path": "README_en.md", "diff": "@@ -432,19 +432,15 @@ The comparison between COCO mAP and FPS on Qualcomm Snapdragon 865 processor of\n### [Industrial tutorial examples](./industrial_tutorial/README.md)\n+- [Intelligent fitness recognition based on PP-TinyPose Plus](https://aistudio.baidu.com/aistudio/projectdetail/4385813)\n+\n- [Road litter detection based on PP-PicoDet Plus](https://aistudio.baidu.com/aistudio/projectdetail/3561097)\n- [Communication tower detection based on PP-PicoDet and deployment on Android](https://aistudio.baidu.com/aistudio/projectdetail/3561097)\n-- [Tile surface defect detection based on Faster-RCNN](https://aistudio.baidu.com/aistudio/projectdetail/2571419)\n-\n-- [PCB defect detection based on PaddleDetection](https://aistudio.baidu.com/aistudio/projectdetail/2367089)\n-\n- [Visitor flow statistics based on FairMOT](https://aistudio.baidu.com/aistudio/projectdetail/2421822)\n-- [Falling detection based on YOLOv3](https://aistudio.baidu.com/aistudio/projectdetail/2500639)\n-\n-- [Compliance detection based on human key point detection](https://aistudio.baidu.com/aistudio/projectdetail/4061642?contributionType=1)\n+- [More examples](./industrial_tutorial/README.md)\n## <img title=\"\" src=\"https://user-images.githubusercontent.com/48054808/157836473-1cf451fa-f01f-4148-ba68-b6d06d5da2f9.png\" alt=\"\" width=\"20\"> Applications\n" }, { "change_type": "MODIFY", "old_path": "requirements.txt", "new_path": "requirements.txt", "diff": "tqdm\n-typeguard ; python_version >= '3.4'\n-visualdl>=2.1.0 ; python_version <= '3.7'\n+typeguard\n+visualdl>=2.2.0\nopencv-python\nPyYAML\nshapely\n@@ -8,8 +8,7 @@ scipy\nterminaltables\nCython\npycocotools\n-#xtcocotools==1.6 #only for crowdpose\n-setuptools>=42.0.0\n+setuptools\n# for vehicleplate\npyclipper\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
update example, test=document_fix (#6567)
499,299
02.08.2022 17:48:02
-28,800
14987f65ec199d5916702020a8451ecff8ff9a8c
fix training error for picodet_s_192_lcnet_pedestrian
[ { "change_type": "MODIFY", "old_path": "configs/picodet/application/pedestrian_detection/picodet_s_192_lcnet_pedestrian.yml", "new_path": "configs/picodet/application/pedestrian_detection/picodet_s_192_lcnet_pedestrian.yml", "diff": "@@ -56,7 +56,7 @@ PicoHeadV2:\nuse_align_head: True\nstatic_assigner:\nname: ATSSAssigner\n- topk: 9\n+ topk: 4\nforce_gt_matching: False\nassigner:\nname: TaskAlignedAssigner\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix training error for picodet_s_192_lcnet_pedestrian (#6562)
499,319
03.08.2022 14:34:53
-28,800
b276610c3da6d2c46532549b7a08600733db9909
PPYOLOE fix out_shape, reset return_idx
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/backbones/cspresnet.py", "new_path": "ppdet/modeling/backbones/cspresnet.py", "diff": "@@ -235,7 +235,7 @@ class CSPResNet(nn.Layer):\nlayers=[3, 6, 6, 3],\nchannels=[64, 128, 256, 512, 1024],\nact='swish',\n- return_idx=[0, 1, 2, 3, 4],\n+ return_idx=[1, 2, 3],\ndepth_wise=False,\nuse_large_stem=False,\nwidth_mult=1.0,\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
PPYOLOE fix out_shape, reset return_idx (#6561)
499,304
04.08.2022 14:34:51
-28,800
3e4d5697d9946db21ef10b933658e59c9e97ae1b
add PP-YOLOE Auto Compression demo
[ { "change_type": "ADD", "old_path": null, "new_path": "deploy/auto_compression/configs/ppyoloe_l_qat_dis.yaml", "diff": "+\n+Global:\n+ reader_config: configs/yolo_reader.yml\n+ input_list: ['image', 'scale_factor']\n+ arch: YOLO\n+ Evaluation: True\n+ model_dir: ./ppyoloe_crn_l_300e_coco\n+ model_filename: model.pdmodel\n+ params_filename: model.pdiparams\n+\n+Distillation:\n+ alpha: 1.0\n+ loss: soft_label\n+\n+Quantization:\n+ use_pact: true\n+ activation_quantize_type: 'moving_average_abs_max'\n+ quantize_op_types:\n+ - conv2d\n+ - depthwise_conv2d\n+\n+TrainConfig:\n+ train_iter: 5000\n+ eval_iter: 1000\n+ learning_rate:\n+ type: CosineAnnealingDecay\n+ learning_rate: 0.00003\n+ T_max: 6000\n+ optimizer_builder:\n+ optimizer:\n+ type: SGD\n+ weight_decay: 4.0e-05\n+\n" }, { "change_type": "ADD", "old_path": null, "new_path": "deploy/auto_compression/configs/yolo_reader.yml", "diff": "+metric: COCO\n+num_classes: 80\n+\n+# Datset configuration\n+TrainDataset:\n+ !COCODataSet\n+ image_dir: train2017\n+ anno_path: annotations/instances_train2017.json\n+ dataset_dir: dataset/coco/\n+\n+EvalDataset:\n+ !COCODataSet\n+ image_dir: val2017\n+ anno_path: annotations/instances_val2017.json\n+ dataset_dir: dataset/coco/\n+\n+worker_num: 0\n+\n+# preprocess reader in test\n+EvalReader:\n+ sample_transforms:\n+ - Decode: {}\n+ - Resize: {target_size: [640, 640], keep_ratio: False, interp: 2}\n+ - NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n+ - Permute: {}\n+ batch_size: 4\n" }, { "change_type": "ADD", "old_path": null, "new_path": "deploy/auto_compression/eval.py", "diff": "+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+import sys\n+import numpy as np\n+import argparse\n+import paddle\n+from ppdet.core.workspace import load_config, merge_config\n+from ppdet.core.workspace import create\n+from ppdet.metrics import COCOMetric, VOCMetric, KeyPointTopDownCOCOEval\n+from paddleslim.auto_compression.config_helpers import load_config as load_slim_config\n+from post_process import PPYOLOEPostProcess\n+\n+\n+def argsparser():\n+ parser = argparse.ArgumentParser(description=__doc__)\n+ parser.add_argument(\n+ '--config_path',\n+ type=str,\n+ default=None,\n+ help=\"path of compression strategy config.\",\n+ required=True)\n+ parser.add_argument(\n+ '--devices',\n+ type=str,\n+ default='gpu',\n+ help=\"which device used to compress.\")\n+\n+ return parser\n+\n+\n+def reader_wrapper(reader, input_list):\n+ def gen():\n+ for data in reader:\n+ in_dict = {}\n+ if isinstance(input_list, list):\n+ for input_name in input_list:\n+ in_dict[input_name] = data[input_name]\n+ elif isinstance(input_list, dict):\n+ for input_name in input_list.keys():\n+ in_dict[input_list[input_name]] = data[input_name]\n+ yield in_dict\n+\n+ return gen\n+\n+\n+def convert_numpy_data(data, metric):\n+ data_all = {}\n+ data_all = {k: np.array(v) for k, v in data.items()}\n+ if isinstance(metric, VOCMetric):\n+ for k, v in data_all.items():\n+ if not isinstance(v[0], np.ndarray):\n+ tmp_list = []\n+ for t in v:\n+ tmp_list.append(np.array(t))\n+ data_all[k] = np.array(tmp_list)\n+ else:\n+ data_all = {k: np.array(v) for k, v in data.items()}\n+ return data_all\n+\n+\n+def eval():\n+\n+ place = paddle.CUDAPlace(0) if FLAGS.devices == 'gpu' else paddle.CPUPlace()\n+ exe = paddle.static.Executor(place)\n+\n+ val_program, feed_target_names, fetch_targets = paddle.static.load_inference_model(\n+ global_config[\"model_dir\"].rstrip('/'),\n+ exe,\n+ model_filename=global_config[\"model_filename\"],\n+ params_filename=global_config[\"params_filename\"])\n+ print('Loaded model from: {}'.format(global_config[\"model_dir\"]))\n+\n+ metric = global_config['metric']\n+ for batch_id, data in enumerate(val_loader):\n+ data_all = convert_numpy_data(data, metric)\n+ data_input = {}\n+ for k, v in data.items():\n+ if isinstance(global_config['input_list'], list):\n+ if k in global_config['input_list']:\n+ data_input[k] = np.array(v)\n+ elif isinstance(global_config['input_list'], dict):\n+ if k in global_config['input_list'].keys():\n+ data_input[global_config['input_list'][k]] = np.array(v)\n+\n+ outs = exe.run(val_program,\n+ feed=data_input,\n+ fetch_list=fetch_targets,\n+ return_numpy=False)\n+ res = {}\n+ if 'arch' in global_config and global_config['arch'] == 'PPYOLOE':\n+ postprocess = PPYOLOEPostProcess(\n+ score_threshold=0.01, nms_threshold=0.6)\n+ res = postprocess(np.array(outs[0]), data_all['scale_factor'])\n+ else:\n+ for out in outs:\n+ v = np.array(out)\n+ if len(v.shape) > 1:\n+ res['bbox'] = v\n+ else:\n+ res['bbox_num'] = v\n+ metric.update(data_all, res)\n+ if batch_id % 100 == 0:\n+ print('Eval iter:', batch_id)\n+ metric.accumulate()\n+ metric.log()\n+ metric.reset()\n+\n+\n+def main():\n+ global global_config\n+ all_config = load_slim_config(FLAGS.config_path)\n+ assert \"Global\" in all_config, \"Key 'Global' not found in config file.\"\n+ global_config = all_config[\"Global\"]\n+ reader_cfg = load_config(global_config['reader_config'])\n+\n+ dataset = reader_cfg['EvalDataset']\n+ global val_loader\n+ val_loader = create('EvalReader')(reader_cfg['EvalDataset'],\n+ reader_cfg['worker_num'],\n+ return_list=True)\n+ metric = None\n+ if reader_cfg['metric'] == 'COCO':\n+ clsid2catid = {v: k for k, v in dataset.catid2clsid.items()}\n+ anno_file = dataset.get_anno()\n+ metric = COCOMetric(\n+ anno_file=anno_file, clsid2catid=clsid2catid, IouType='bbox')\n+ elif reader_cfg['metric'] == 'VOC':\n+ metric = VOCMetric(\n+ label_list=dataset.get_label_list(),\n+ class_num=reader_cfg['num_classes'],\n+ map_type=reader_cfg['map_type'])\n+ elif reader_cfg['metric'] == 'KeyPointTopDownCOCOEval':\n+ anno_file = dataset.get_anno()\n+ metric = KeyPointTopDownCOCOEval(anno_file,\n+ len(dataset), 17, 'output_eval')\n+ else:\n+ raise ValueError(\"metric currently only supports COCO and VOC.\")\n+ global_config['metric'] = metric\n+\n+ eval()\n+\n+\n+if __name__ == '__main__':\n+ paddle.enable_static()\n+ parser = argsparser()\n+ FLAGS = parser.parse_args()\n+ assert FLAGS.devices in ['cpu', 'gpu', 'xpu', 'npu']\n+ paddle.set_device(FLAGS.devices)\n+\n+ main()\n" }, { "change_type": "ADD", "old_path": null, "new_path": "deploy/auto_compression/post_process.py", "diff": "+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import numpy as np\n+import cv2\n+\n+\n+def hard_nms(box_scores, iou_threshold, top_k=-1, candidate_size=200):\n+ \"\"\"\n+ Args:\n+ box_scores (N, 5): boxes in corner-form and probabilities.\n+ iou_threshold: intersection over union threshold.\n+ top_k: keep top_k results. If k <= 0, keep all the results.\n+ candidate_size: only consider the candidates with the highest scores.\n+ Returns:\n+ picked: a list of indexes of the kept boxes\n+ \"\"\"\n+ scores = box_scores[:, -1]\n+ boxes = box_scores[:, :-1]\n+ picked = []\n+ indexes = np.argsort(scores)\n+ indexes = indexes[-candidate_size:]\n+ while len(indexes) > 0:\n+ current = indexes[-1]\n+ picked.append(current)\n+ if 0 < top_k == len(picked) or len(indexes) == 1:\n+ break\n+ current_box = boxes[current, :]\n+ indexes = indexes[:-1]\n+ rest_boxes = boxes[indexes, :]\n+ iou = iou_of(\n+ rest_boxes,\n+ np.expand_dims(\n+ current_box, axis=0), )\n+ indexes = indexes[iou <= iou_threshold]\n+\n+ return box_scores[picked, :]\n+\n+\n+def iou_of(boxes0, boxes1, eps=1e-5):\n+ \"\"\"Return intersection-over-union (Jaccard index) of boxes.\n+ Args:\n+ boxes0 (N, 4): ground truth boxes.\n+ boxes1 (N or 1, 4): predicted boxes.\n+ eps: a small number to avoid 0 as denominator.\n+ Returns:\n+ iou (N): IoU values.\n+ \"\"\"\n+ overlap_left_top = np.maximum(boxes0[..., :2], boxes1[..., :2])\n+ overlap_right_bottom = np.minimum(boxes0[..., 2:], boxes1[..., 2:])\n+\n+ overlap_area = area_of(overlap_left_top, overlap_right_bottom)\n+ area0 = area_of(boxes0[..., :2], boxes0[..., 2:])\n+ area1 = area_of(boxes1[..., :2], boxes1[..., 2:])\n+ return overlap_area / (area0 + area1 - overlap_area + eps)\n+\n+\n+def area_of(left_top, right_bottom):\n+ \"\"\"Compute the areas of rectangles given two corners.\n+ Args:\n+ left_top (N, 2): left top corner.\n+ right_bottom (N, 2): right bottom corner.\n+ Returns:\n+ area (N): return the area.\n+ \"\"\"\n+ hw = np.clip(right_bottom - left_top, 0.0, None)\n+ return hw[..., 0] * hw[..., 1]\n+\n+\n+class PPYOLOEPostProcess(object):\n+ \"\"\"\n+ Args:\n+ input_shape (int): network input image size\n+ scale_factor (float): scale factor of ori image\n+ \"\"\"\n+\n+ def __init__(self,\n+ score_threshold=0.4,\n+ nms_threshold=0.5,\n+ nms_top_k=10000,\n+ keep_top_k=300):\n+ self.score_threshold = score_threshold\n+ self.nms_threshold = nms_threshold\n+ self.nms_top_k = nms_top_k\n+ self.keep_top_k = keep_top_k\n+\n+ def _non_max_suppression(self, prediction, scale_factor):\n+ batch_size = prediction.shape[0]\n+ out_boxes_list = []\n+ box_num_list = []\n+ for batch_id in range(batch_size):\n+ bboxes, confidences = prediction[batch_id][..., :4], prediction[\n+ batch_id][..., 4:]\n+ # nms\n+ picked_box_probs = []\n+ picked_labels = []\n+ for class_index in range(0, confidences.shape[1]):\n+ probs = confidences[:, class_index]\n+ mask = probs > self.score_threshold\n+ probs = probs[mask]\n+ if probs.shape[0] == 0:\n+ continue\n+ subset_boxes = bboxes[mask, :]\n+ box_probs = np.concatenate(\n+ [subset_boxes, probs.reshape(-1, 1)], axis=1)\n+ box_probs = hard_nms(\n+ box_probs,\n+ iou_threshold=self.nms_threshold,\n+ top_k=self.nms_top_k)\n+ picked_box_probs.append(box_probs)\n+ picked_labels.extend([class_index] * box_probs.shape[0])\n+\n+ if len(picked_box_probs) == 0:\n+ out_boxes_list.append(np.empty((0, 4)))\n+\n+ else:\n+ picked_box_probs = np.concatenate(picked_box_probs)\n+ # resize output boxes\n+ picked_box_probs[:, 0] /= scale_factor[batch_id][1]\n+ picked_box_probs[:, 2] /= scale_factor[batch_id][1]\n+ picked_box_probs[:, 1] /= scale_factor[batch_id][0]\n+ picked_box_probs[:, 3] /= scale_factor[batch_id][0]\n+\n+ # clas score box\n+ out_box = np.concatenate(\n+ [\n+ np.expand_dims(\n+ np.array(picked_labels), axis=-1), np.expand_dims(\n+ picked_box_probs[:, 4], axis=-1),\n+ picked_box_probs[:, :4]\n+ ],\n+ axis=1)\n+ if out_box.shape[0] > self.keep_top_k:\n+ out_box = out_box[out_box[:, 1].argsort()[::-1]\n+ [:self.keep_top_k]]\n+ out_boxes_list.append(out_box)\n+ box_num_list.append(out_box.shape[0])\n+\n+ out_boxes_list = np.concatenate(out_boxes_list, axis=0)\n+ box_num_list = np.array(box_num_list)\n+ return out_boxes_list, box_num_list\n+\n+ def __call__(self, outs, scale_factor):\n+ out_boxes_list, box_num_list = self._non_max_suppression(outs,\n+ scale_factor)\n+ return {'bbox': out_boxes_list, 'bbox_num': box_num_list}\n" }, { "change_type": "ADD", "old_path": null, "new_path": "deploy/auto_compression/run.py", "diff": "+# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import os\n+import sys\n+import numpy as np\n+import argparse\n+import paddle\n+from ppdet.core.workspace import load_config, merge_config\n+from ppdet.core.workspace import create\n+from ppdet.metrics import COCOMetric, VOCMetric, KeyPointTopDownCOCOEval\n+from paddleslim.auto_compression.config_helpers import load_config as load_slim_config\n+from paddleslim.auto_compression import AutoCompression\n+from post_process import PPYOLOEPostProcess\n+\n+\n+def argsparser():\n+ parser = argparse.ArgumentParser(description=__doc__)\n+ parser.add_argument(\n+ '--config_path',\n+ type=str,\n+ default=None,\n+ help=\"path of compression strategy config.\",\n+ required=True)\n+ parser.add_argument(\n+ '--save_dir',\n+ type=str,\n+ default='output',\n+ help=\"directory to save compressed model.\")\n+ parser.add_argument(\n+ '--devices',\n+ type=str,\n+ default='gpu',\n+ help=\"which device used to compress.\")\n+\n+ return parser\n+\n+\n+def reader_wrapper(reader, input_list):\n+ def gen():\n+ for data in reader:\n+ in_dict = {}\n+ if isinstance(input_list, list):\n+ for input_name in input_list:\n+ in_dict[input_name] = data[input_name]\n+ elif isinstance(input_list, dict):\n+ for input_name in input_list.keys():\n+ in_dict[input_list[input_name]] = data[input_name]\n+ yield in_dict\n+\n+ return gen\n+\n+\n+def convert_numpy_data(data, metric):\n+ data_all = {}\n+ data_all = {k: np.array(v) for k, v in data.items()}\n+ if isinstance(metric, VOCMetric):\n+ for k, v in data_all.items():\n+ if not isinstance(v[0], np.ndarray):\n+ tmp_list = []\n+ for t in v:\n+ tmp_list.append(np.array(t))\n+ data_all[k] = np.array(tmp_list)\n+ else:\n+ data_all = {k: np.array(v) for k, v in data.items()}\n+ return data_all\n+\n+\n+def eval_function(exe, compiled_test_program, test_feed_names, test_fetch_list):\n+ metric = global_config['metric']\n+ for batch_id, data in enumerate(val_loader):\n+ data_all = convert_numpy_data(data, metric)\n+ data_input = {}\n+ for k, v in data.items():\n+ if isinstance(global_config['input_list'], list):\n+ if k in test_feed_names:\n+ data_input[k] = np.array(v)\n+ elif isinstance(global_config['input_list'], dict):\n+ if k in global_config['input_list'].keys():\n+ data_input[global_config['input_list'][k]] = np.array(v)\n+ outs = exe.run(compiled_test_program,\n+ feed=data_input,\n+ fetch_list=test_fetch_list,\n+ return_numpy=False)\n+ res = {}\n+ if 'arch' in global_config and global_config['arch'] == 'PPYOLOE':\n+ postprocess = PPYOLOEPostProcess(\n+ score_threshold=0.01, nms_threshold=0.6)\n+ res = postprocess(np.array(outs[0]), data_all['scale_factor'])\n+ else:\n+ for out in outs:\n+ v = np.array(out)\n+ if len(v.shape) > 1:\n+ res['bbox'] = v\n+ else:\n+ res['bbox_num'] = v\n+\n+ metric.update(data_all, res)\n+ if batch_id % 100 == 0:\n+ print('Eval iter:', batch_id)\n+ metric.accumulate()\n+ metric.log()\n+ map_res = metric.get_results()\n+ metric.reset()\n+ map_key = 'keypoint' if 'arch' in global_config and global_config[\n+ 'arch'] == 'keypoint' else 'bbox'\n+ return map_res[map_key][0]\n+\n+\n+def main():\n+ global global_config\n+ all_config = load_slim_config(FLAGS.config_path)\n+ assert \"Global\" in all_config, \"Key 'Global' not found in config file.\"\n+ global_config = all_config[\"Global\"]\n+ reader_cfg = load_config(global_config['reader_config'])\n+\n+ train_loader = create('EvalReader')(reader_cfg['TrainDataset'],\n+ reader_cfg['worker_num'],\n+ return_list=True)\n+ train_loader = reader_wrapper(train_loader, global_config['input_list'])\n+\n+ if 'Evaluation' in global_config.keys() and global_config[\n+ 'Evaluation'] and paddle.distributed.get_rank() == 0:\n+ eval_func = eval_function\n+ dataset = reader_cfg['EvalDataset']\n+ global val_loader\n+ _eval_batch_sampler = paddle.io.BatchSampler(\n+ dataset, batch_size=reader_cfg['EvalReader']['batch_size'])\n+ val_loader = create('EvalReader')(dataset,\n+ reader_cfg['worker_num'],\n+ batch_sampler=_eval_batch_sampler,\n+ return_list=True)\n+ metric = None\n+ if reader_cfg['metric'] == 'COCO':\n+ clsid2catid = {v: k for k, v in dataset.catid2clsid.items()}\n+ anno_file = dataset.get_anno()\n+ metric = COCOMetric(\n+ anno_file=anno_file, clsid2catid=clsid2catid, IouType='bbox')\n+ elif reader_cfg['metric'] == 'VOC':\n+ metric = VOCMetric(\n+ label_list=dataset.get_label_list(),\n+ class_num=reader_cfg['num_classes'],\n+ map_type=reader_cfg['map_type'])\n+ elif reader_cfg['metric'] == 'KeyPointTopDownCOCOEval':\n+ anno_file = dataset.get_anno()\n+ metric = KeyPointTopDownCOCOEval(anno_file,\n+ len(dataset), 17, 'output_eval')\n+ else:\n+ raise ValueError(\"metric currently only supports COCO and VOC.\")\n+ global_config['metric'] = metric\n+ else:\n+ eval_func = None\n+\n+ ac = AutoCompression(\n+ model_dir=global_config[\"model_dir\"],\n+ model_filename=global_config[\"model_filename\"],\n+ params_filename=global_config[\"params_filename\"],\n+ save_dir=FLAGS.save_dir,\n+ config=all_config,\n+ train_dataloader=train_loader,\n+ eval_callback=eval_func)\n+ ac.compress()\n+\n+\n+if __name__ == '__main__':\n+ paddle.enable_static()\n+ parser = argsparser()\n+ FLAGS = parser.parse_args()\n+ assert FLAGS.devices in ['cpu', 'gpu', 'xpu', 'npu']\n+ paddle.set_device(FLAGS.devices)\n+\n+ main()\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add PP-YOLOE Auto Compression demo (#6568)
499,339
04.08.2022 17:31:10
-28,800
befeaeb5424fcadaa70a2ff646a6a4b9c2ebf848
[dev] add white and black list for amp train
[ { "change_type": "MODIFY", "old_path": "ppdet/engine/trainer.py", "new_path": "ppdet/engine/trainer.py", "diff": "@@ -69,6 +69,8 @@ class Trainer(object):\nself.is_loaded_weights = False\nself.use_amp = self.cfg.get('amp', False)\nself.amp_level = self.cfg.get('amp_level', 'O1')\n+ self.custom_white_list = self.cfg.get('custom_white_list', None)\n+ self.custom_black_list = self.cfg.get('custom_black_list', None)\n# build data loader\ncapital_mode = self.mode.capitalize()\n@@ -155,8 +157,10 @@ class Trainer(object):\nself.pruner = create('UnstructuredPruner')(self.model,\nsteps_per_epoch)\nif self.use_amp and self.amp_level == 'O2':\n- self.model = paddle.amp.decorate(\n- models=self.model, level=self.amp_level)\n+ self.model, self.optimizer = paddle.amp.decorate(\n+ models=self.model,\n+ optimizers=self.optimizer,\n+ level=self.amp_level)\nself.use_ema = ('use_ema' in cfg and cfg['use_ema'])\nif self.use_ema:\nema_decay = self.cfg.get('ema_decay', 0.9998)\n@@ -456,7 +460,9 @@ class Trainer(object):\nDataParallel) and use_fused_allreduce_gradients:\nwith model.no_sync():\nwith paddle.amp.auto_cast(\n- enable=self.cfg.use_gpus,\n+ enable=self.cfg.use_gpu,\n+ custom_white_list=self.custom_white_list,\n+ custom_black_list=self.custom_black_list,\nlevel=self.amp_level):\n# model forward\noutputs = model(data)\n@@ -468,7 +474,10 @@ class Trainer(object):\nlist(model.parameters()), None)\nelse:\nwith paddle.amp.auto_cast(\n- enable=self.cfg.use_gpu, level=self.amp_level):\n+ enable=self.cfg.use_gpu,\n+ custom_white_list=self.custom_white_list,\n+ custom_black_list=self.custom_black_list,\n+ level=self.amp_level):\n# model forward\noutputs = model(data)\nloss = outputs['loss']\n@@ -477,7 +486,6 @@ class Trainer(object):\nscaled_loss.backward()\n# in dygraph mode, optimizer.minimize is equal to optimizer.step\nscaler.minimize(self.optimizer, scaled_loss)\n-\nelse:\nif isinstance(\nmodel, paddle.\n@@ -575,7 +583,10 @@ class Trainer(object):\n# forward\nif self.use_amp:\nwith paddle.amp.auto_cast(\n- enable=self.cfg.use_gpu, level=self.amp_level):\n+ enable=self.cfg.use_gpu,\n+ custom_white_list=self.custom_white_list,\n+ custom_black_list=self.custom_black_list,\n+ level=self.amp_level):\nouts = self.model(data)\nelse:\nouts = self.model(data)\n" }, { "change_type": "MODIFY", "old_path": "ppdet/optimizer/ema.py", "new_path": "ppdet/optimizer/ema.py", "diff": "@@ -66,7 +66,10 @@ class ModelEMA(object):\ndef resume(self, state_dict, step=0):\nfor k, v in state_dict.items():\nif k in self.state_dict:\n+ if self.state_dict[k].dtype == v.dtype:\nself.state_dict[k] = v\n+ else:\n+ self.state_dict[k] = v.astype(self.state_dict[k].dtype)\nself.step = step\ndef update(self, model=None):\n" }, { "change_type": "MODIFY", "old_path": "ppdet/utils/checkpoint.py", "new_path": "ppdet/utils/checkpoint.py", "diff": "@@ -84,9 +84,14 @@ def load_weight(model, weight, optimizer=None, ema=None):\nmodel_weight = {}\nincorrect_keys = 0\n- for key in model_dict.keys():\n+ for key, value in model_dict.items():\nif key in param_state_dict.keys():\n+ if isinstance(param_state_dict[key], np.ndarray):\n+ param_state_dict[key] = paddle.to_tensor(param_state_dict[key])\n+ if value.dtype == param_state_dict[key].dtype:\nmodel_weight[key] = param_state_dict[key]\n+ else:\n+ model_weight[key] = param_state_dict[key].astype(value.dtype)\nelse:\nlogger.info('Unmatched key: {}'.format(key))\nincorrect_keys += 1\n@@ -209,6 +214,12 @@ def load_pretrain_weight(model, pretrain_weight):\nparam_state_dict = paddle.load(weights_path)\nparam_state_dict = match_state_dict(model_dict, param_state_dict)\n+ for k, v in param_state_dict.items():\n+ if isinstance(v, np.ndarray):\n+ v = paddle.to_tensor(v)\n+ if model_dict[k].dtype != v.dtype:\n+ param_state_dict[k] = v.astype(model_dict[k].dtype)\n+\nmodel.set_dict(param_state_dict)\nlogger.info('Finish loading model weights: {}'.format(weights_path))\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[dev] add white and black list for amp train (#6576)
499,299
08.08.2022 11:07:26
-28,800
09e7665d08920f4387396c65c4f1a7be2584574b
add attr doc for ppvehicle
[ { "change_type": "ADD", "old_path": "deploy/pipeline/docs/images/vehicle_attribute.gif", "new_path": "deploy/pipeline/docs/images/vehicle_attribute.gif", "diff": "Binary files /dev/null and b/deploy/pipeline/docs/images/vehicle_attribute.gif differ\n" }, { "change_type": "MODIFY", "old_path": "deploy/python/infer.py", "new_path": "deploy/python/infer.py", "diff": "@@ -42,7 +42,7 @@ from utils import argsparser, Timer, get_current_memory_mb\nSUPPORT_MODELS = {\n'YOLO', 'RCNN', 'SSD', 'Face', 'FCOS', 'SOLOv2', 'TTFNet', 'S2ANet', 'JDE',\n'FairMOT', 'DeepSORT', 'GFL', 'PicoDet', 'CenterNet', 'TOOD', 'RetinaNet',\n- 'StrongBaseline', 'STGCN', 'YOLOX', 'PPHGNet'\n+ 'StrongBaseline', 'STGCN', 'YOLOX', 'PPHGNet', 'PPLCNet'\n}\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add attr doc for ppvehicle (#6593)
499,339
08.08.2022 16:24:26
-28,800
494f381f213b3ca863e74048d193e246916b767b
[TIPC] fix random seed in train benchmark
[ { "change_type": "MODIFY", "old_path": "test_tipc/configs/keypoint/tinypose_128x96_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt", "new_path": "test_tipc/configs/keypoint/tinypose_128x96_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt", "diff": "@@ -10,7 +10,7 @@ TrainReader.batch_size:lite_train_lite_infer=2|lite_train_whole_infer=2|whole_tr\npretrain_weights:https://paddledet.bj.bcebos.com/models/keypoint/tinypose_128x96.pdparams\ntrained_model_name:model_final.pdparams\ntrain_infer_img_dir:./dataset/coco/test2017/\n-amp_level:O2\n+null:null\n##\ntrainer:norm_train\nnorm_train:tools/train.py -c test_tipc/configs/keypoint/tinypose_128x96.yml -o\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt", "new_path": "test_tipc/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt", "diff": "@@ -10,7 +10,7 @@ TrainReader.batch_size:lite_train_lite_infer=2|lite_train_whole_infer=2|whole_tr\npretrain_weights:https://paddledet.bj.bcebos.com/models/mask_rcnn_r50_fpn_1x_coco.pdparams\ntrained_model_name:model_final.pdparams\ntrain_infer_img_dir:./dataset/coco/test2017/\n-amp_level:O2\n+null:null\n##\ntrainer:norm_train\nnorm_train:tools/train.py -c configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.yml -o\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/configs/picodet/picodet_lcnet_1_5x_416_coco_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt", "new_path": "test_tipc/configs/picodet/picodet_lcnet_1_5x_416_coco_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt", "diff": "@@ -10,7 +10,7 @@ TrainReader.batch_size:lite_train_lite_infer=2|lite_train_whole_infer=2|whole_tr\npretrain_weights:https://paddledet.bj.bcebos.com/models/picodet_lcnet_1_5x_416_coco.pdparams\ntrained_model_name:model_final.pdparams\ntrain_infer_img_dir:./dataset/coco/test2017/\n-amp_level:O2\n+null:null\n##\ntrainer:norm_train\nnorm_train:tools/train.py -c configs/picodet/legacy_model/more_config/picodet_lcnet_1_5x_416_coco.yml -o\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/configs/picodet/picodet_s_320_coco_lcnet_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt", "new_path": "test_tipc/configs/picodet/picodet_s_320_coco_lcnet_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt", "diff": "@@ -10,7 +10,7 @@ TrainReader.batch_size:lite_train_lite_infer=2|lite_train_whole_infer=2|whole_tr\npretrain_weights:https://paddledet.bj.bcebos.com/models/picodet_s_320_coco_lcnet.pdparams\ntrained_model_name:model_final.pdparams\ntrain_infer_img_dir:./dataset/coco/test2017/\n-amp_level:O2\n+null:null\n##\ntrainer:norm_train\nnorm_train:tools/train.py -c configs/picodet/picodet_s_320_coco_lcnet.yml -o\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/configs/picodet/picodet_s_320_coco_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt", "new_path": "test_tipc/configs/picodet/picodet_s_320_coco_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt", "diff": "@@ -10,7 +10,7 @@ TrainReader.batch_size:lite_train_lite_infer=2|lite_train_whole_infer=2|whole_tr\npretrain_weights:https://paddledet.bj.bcebos.com/models/picodet_s_320_coco.pdparams\ntrained_model_name:model_final.pdparams\ntrain_infer_img_dir:./dataset/coco/test2017/\n-amp_level:O2\n+null:null\n##\ntrainer:norm_train\nnorm_train:tools/train.py -c configs/picodet/legacy_model/picodet_s_320_coco.yml -o\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/configs/ppyolo/ppyolo_mbv3_large_coco_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt", "new_path": "test_tipc/configs/ppyolo/ppyolo_mbv3_large_coco_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt", "diff": "@@ -10,7 +10,7 @@ TrainReader.batch_size:lite_train_lite_infer=2|lite_train_whole_infer=2|whole_tr\npretrain_weights:https://paddledet.bj.bcebos.com/models/ppyolo_mbv3_large_coco.pdparams\ntrained_model_name:model_final.pdparams\ntrain_infer_img_dir:./dataset/coco/test2017/\n-amp_level:O2\n+null:null\n##\ntrainer:norm_train\nnorm_train:tools/train.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml -o\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/configs/ppyolo/ppyolo_r50vd_dcn_1x_coco_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt", "new_path": "test_tipc/configs/ppyolo/ppyolo_r50vd_dcn_1x_coco_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt", "diff": "@@ -10,7 +10,7 @@ TrainReader.batch_size:lite_train_lite_infer=2|lite_train_whole_infer=2|whole_tr\npretrain_weights:https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams\ntrained_model_name:model_final.pdparams\ntrain_infer_img_dir:./dataset/coco/test2017/\n-amp_level:O2\n+null:null\n##\ntrainer:norm_train\nnorm_train:tools/train.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/configs/ppyolo/ppyolo_tiny_650e_coco_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt", "new_path": "test_tipc/configs/ppyolo/ppyolo_tiny_650e_coco_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt", "diff": "@@ -10,7 +10,7 @@ TrainReader.batch_size:lite_train_lite_infer=2|lite_train_whole_infer=2|whole_tr\npretrain_weights:https://paddledet.bj.bcebos.com/models/ppyolo_tiny_650e_coco.pdparams\ntrained_model_name:model_final.pdparams\ntrain_infer_img_dir:./dataset/coco/test2017/\n-amp_level:O2\n+null:null\n##\ntrainer:norm_train\nnorm_train:tools/train.py -c configs/ppyolo/ppyolo_tiny_650e_coco.yml -o\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt", "new_path": "test_tipc/configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt", "diff": "@@ -10,7 +10,7 @@ TrainReader.batch_size:lite_train_lite_infer=2|lite_train_whole_infer=2|whole_tr\npretrain_weights:https://paddledet.bj.bcebos.com/models/ppyolov2_r50vd_dcn_365e_coco.pdparams\ntrained_model_name:model_final.pdparams\ntrain_infer_img_dir:./dataset/coco/test2017/\n-amp_level:O2\n+null:null\n##\ntrainer:norm_train\nnorm_train:tools/train.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml -o\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/configs/ppyoloe/ppyoloe_crn_s_300e_coco_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt", "new_path": "test_tipc/configs/ppyoloe/ppyoloe_crn_s_300e_coco_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt", "diff": "@@ -10,7 +10,7 @@ TrainReader.batch_size:lite_train_lite_infer=2|lite_train_whole_infer=2|whole_tr\npretrain_weights:https://paddledet.bj.bcebos.com/models/ppyoloe_crn_s_300e_coco.pdparams\ntrained_model_name:model_final.pdparams\ntrain_infer_img_dir:./dataset/coco/test2017/\n-amp_level:O2\n+null:null\n##\ntrainer:norm_train\nnorm_train:tools/train.py -c configs/ppyoloe/ppyoloe_crn_s_300e_coco.yml -o\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt", "new_path": "test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_train_linux_gpu_normal_amp_infer_python_linux_gpu_cpu.txt", "diff": "@@ -10,7 +10,7 @@ TrainReader.batch_size:lite_train_lite_infer=2|lite_train_whole_infer=2|whole_tr\npretrain_weights:https://paddledet.bj.bcebos.com/models/yolov3_darknet53_270e_coco.pdparams\ntrained_model_name:model_final.pdparams\ntrain_infer_img_dir:./dataset/coco/test2017/\n-amp_level:O2\n+null:null\n##\ntrainer:norm_train\nnorm_train:tools/train.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml -o\n" }, { "change_type": "MODIFY", "old_path": "test_tipc/test_train_inference_python.sh", "new_path": "test_tipc/test_train_inference_python.sh", "diff": "@@ -271,17 +271,25 @@ else\nsave_log=\"${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}\"\nif [ ${autocast} = \"amp\" ] || [ ${autocast} = \"fp16\" ]; then\nset_autocast=\"--amp\"\n- set_train_params1=\"amp_level=O2\"\n+ set_amp_level=\"amp_level=O2\"\nelse\nset_autocast=\" \"\n+ set_amp_level=\" \"\n+ fi\n+ if [ ${MODE} = \"benchmark_train\" ]; then\n+ set_shuffle=\"TrainReader.shuffle=False\"\n+ set_enable_ce=\"--enable_ce=True\"\n+ else\n+ set_shuffle=\" \"\n+ set_enable_ce=\" \"\nfi\nset_save_model=$(func_set_params \"${save_model_key}\" \"${save_log}\")\nnodes=\"1\"\nif [ ${#gpu} -le 2 ];then # train with cpu or single gpu\n- cmd=\"${python} ${run_train} LearningRate.base_lr=0.0001 log_iter=1 ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_batchsize} ${set_filename} ${set_train_params1} ${set_autocast}\"\n+ cmd=\"${python} ${run_train} LearningRate.base_lr=0.0001 log_iter=1 ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_batchsize} ${set_filename} ${set_shuffle} ${set_amp_level} ${set_enable_ce} ${set_autocast} ${set_train_params1}\"\nelif [ ${#ips} -le 15 ];then # train with multi-gpu\n- cmd=\"${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} log_iter=1 ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_batchsize} ${set_filename} ${set_train_params1} ${set_autocast}\"\n+ cmd=\"${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} log_iter=1 ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_batchsize} ${set_filename} ${set_shuffle} ${set_amp_level} ${set_enable_ce} ${set_autocast} ${set_train_params1}\"\nelse # train with multi-machine\nIFS=\",\"\nips_array=(${ips})\n@@ -289,7 +297,7 @@ else\nsave_log=\"${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}\"\nIFS=\"|\"\nset_save_model=$(func_set_params \"${save_model_key}\" \"${save_log}\")\n- cmd=\"${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} log_iter=1 ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_batchsize} ${set_filename} ${set_train_params1} ${set_autocast}\"\n+ cmd=\"${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} log_iter=1 ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_batchsize} ${set_filename} ${set_shuffle} ${set_amp_level} ${set_enable_ce} ${set_autocast} ${set_train_params1}\"\nfi\n# run train\ntrain_log_path=\"${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}.log\"\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[TIPC] fix random seed in train benchmark (#6603)
499,339
10.08.2022 11:12:44
-28,800
cb89c8d0567073aa3a1506fc679617c8ac13a1b2
[dev] fix trt nms error output in ppyoloe
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/layers.py", "new_path": "ppdet/modeling/layers.py", "diff": "@@ -481,8 +481,9 @@ class MultiClassNMS(object):\n# TODO(wangxinxin08): tricky switch to run nms on tensorrt\nkwargs.update({'nms_eta': 1.1})\nbbox, bbox_num, _ = ops.multiclass_nms(bboxes, score, **kwargs)\n- mask = paddle.slice(bbox, [-1], [0], [1]) != -1\n- bbox = paddle.masked_select(bbox, mask).reshape((-1, 6))\n+ bbox = bbox.reshape([1, -1, 6])\n+ idx = paddle.nonzero(bbox[..., 0] != -1)\n+ bbox = paddle.gather_nd(bbox, idx)\nreturn bbox, bbox_num, None\nelse:\nreturn ops.multiclass_nms(bboxes, score, **kwargs)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[dev] fix trt nms error output in ppyoloe (#6607)
499,333
10.08.2022 21:16:04
-28,800
42a4d70710f019ab3219308baf17937e5cf5ec34
update qq qr-code, test=document_fix
[ { "change_type": "MODIFY", "old_path": "README_en.md", "new_path": "README_en.md", "diff": "Welcome to join PaddleDetection user groups on QQ, WeChat (scan the QR code, add and reply \"D\" to the assistant)\n<div align=\"center\">\n- <img src=\"https://user-images.githubusercontent.com/48054808/157800129-2f9a0b72-6bb8-4b10-8310-93ab1639253f.jpg\" width = \"200\" />\n- <img src=\"https://user-images.githubusercontent.com/48054808/160531099-9811bbe6-cfbb-47d5-8bdb-c2b40684d7dd.png\" width = \"200\" />\n+ <img src=\"https://user-images.githubusercontent.com/22989727/183843004-baebf75f-af7c-4a7c-8130-1497b9a3ec7e.png\" width = \"200\" />\n+ <img src=\"https://user-images.githubusercontent.com/34162360/177678712-4655747d-4290-4ad9-b7a1-4564a5418ac6.jpg\" width = \"200\" />\n</div>\n## <img src=\"https://user-images.githubusercontent.com/48054808/157827140-03ffaff7-7d14-48b4-9440-c38986ea378c.png\" width=\"20\"/> Kit Structure\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
update qq qr-code, test=document_fix (#6626)
499,301
11.08.2022 17:05:50
-28,800
2607dbca4b643b79c66b245152d491e4afee0327
recompute flag
[ { "change_type": "MODIFY", "old_path": "configs/vitdet/cascade_rcnn_vit_large_hrfpn_cae_1x_coco.yml", "new_path": "configs/vitdet/cascade_rcnn_vit_large_hrfpn_cae_1x_coco.yml", "diff": "@@ -7,6 +7,7 @@ weights: output/cascade_rcnn_vit_large_hrfpn_cae_1x_coco/model_final\ndepth: &depth 24\ndim: &dim 1024\n+use_fused_allreduce_gradients: &use_checkpoint True\nVisionTransformer:\nimg_size: [800, 1344]\n@@ -15,6 +16,7 @@ VisionTransformer:\nnum_heads: 16\ndrop_path_rate: 0.25\nout_indices: [7, 11, 15, 23]\n+ use_checkpoint: *use_checkpoint\npretrained: https://bj.bcebos.com/v1/paddledet/models/pretrained/vit_large_cae_pretrained.pdparams\nHRFPN:\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/backbones/vision_transformer.py", "new_path": "ppdet/modeling/backbones/vision_transformer.py", "diff": "@@ -596,7 +596,7 @@ class VisionTransformer(nn.Layer):\nfeats = []\nfor idx, blk in enumerate(self.blocks):\n- if self.use_checkpoint:\n+ if self.use_checkpoint and self.training:\nx = paddle.distributed.fleet.utils.recompute(\nblk, x, rel_pos_bias, **{\"preserve_rng_state\": True})\nelse:\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
recompute flag (#6628)
499,298
11.08.2022 17:22:26
-28,800
936ec224e9cb64413e962c37dfa5fb5e7f37d31e
add Resize export default interp
[ { "change_type": "MODIFY", "old_path": "ppdet/engine/export_utils.py", "new_path": "ppdet/engine/export_utils.py", "diff": "@@ -92,6 +92,7 @@ def _parse_reader(reader_cfg, dataset_cfg, metric, arch, image_shape):\nif key == 'Resize':\nif int(image_shape[1]) != -1:\nvalue['target_size'] = image_shape[1:]\n+ value['interp'] = value.get('interp', 1) # cv2.INTER_LINEAR\nif fuse_normalize and key == 'NormalizeImage':\ncontinue\np.update(value)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add Resize export default interp (#6632)
499,299
15.08.2022 10:40:43
-28,800
ff8a7b1d090a2f57048d3e87892706a8407dcfe6
move initialize part into class
[ { "change_type": "MODIFY", "old_path": "deploy/pipeline/pipeline.py", "new_path": "deploy/pipeline/pipeline.py", "diff": "@@ -60,29 +60,8 @@ class Pipeline(object):\nPipeline\nArgs:\n+ args (argparse.Namespace): arguments in pipeline, which contains environment and runtime settings\ncfg (dict): config of models in pipeline\n- image_file (string|None): the path of image file, default as None\n- image_dir (string|None): the path of image directory, if not None,\n- then all the images in directory will be predicted, default as None\n- video_file (string|None): the path of video file, default as None\n- camera_id (int): the device id of camera to predict, default as -1\n- device (string): the device to predict, options are: CPU/GPU/XPU,\n- default as CPU\n- run_mode (string): the mode of prediction, options are:\n- paddle/trt_fp32/trt_fp16, default as paddle\n- trt_min_shape (int): min shape for dynamic shape in trt, default as 1\n- trt_max_shape (int): max shape for dynamic shape in trt, default as 1280\n- trt_opt_shape (int): opt shape for dynamic shape in trt, default as 640\n- trt_calib_mode (bool): If the model is produced by TRT offline quantitative\n- calibration, trt_calib_mode need to set True. default as False\n- cpu_threads (int): cpu threads, default as 1\n- enable_mkldnn (bool): whether to open MKLDNN, default as False\n- output_dir (string): The path of output, default as 'output'\n- draw_center_traj (bool): Whether drawing the trajectory of center, default as False\n- secs_interval (int): The seconds interval to count after tracking, default as 10\n- do_entrance_counting(bool): Whether counting the numbers of identifiers entering\n- or getting out from the entrance, default as False, only support single class\n- counting in MOT.\n\"\"\"\ndef __init__(self, args, cfg):\n@@ -108,18 +87,6 @@ class Pipeline(object):\nif self.is_video:\nself.predictor.set_file_name(args.video_file)\n- self.output_dir = args.output_dir\n- self.draw_center_traj = args.draw_center_traj\n- self.secs_interval = args.secs_interval\n- self.do_entrance_counting = args.do_entrance_counting\n- self.do_break_in_counting = args.do_break_in_counting\n- self.region_type = args.region_type\n- self.region_polygon = args.region_polygon\n- if self.region_type == 'custom':\n- assert len(\n- self.region_polygon\n- ) > 6, 'region_type is custom, region_polygon should be at least 3 pairs of point coords.'\n-\ndef _parse_input(self, image_file, image_dir, video_file, video_dir,\ncamera_id):\n@@ -179,8 +146,10 @@ class Pipeline(object):\ndef get_model_dir(cfg):\n- # auto download inference model\n- model_dir_dict = {}\n+ \"\"\"\n+ Auto download inference model if the model_path is a url link.\n+ Otherwise it will use the model_path directly.\n+ \"\"\"\nfor key in cfg.keys():\nif type(cfg[key]) == dict and \\\n(\"enable\" in cfg[key].keys() and cfg[key]['enable']\n@@ -191,30 +160,30 @@ def get_model_dir(cfg):\ndownloaded_model_dir = auto_download_model(model_dir)\nif downloaded_model_dir:\nmodel_dir = downloaded_model_dir\n- model_dir_dict[key] = model_dir\n+ cfg[key][\"model_dir\"] = model_dir\nprint(key, \" model dir: \", model_dir)\nelif key == \"VEHICLE_PLATE\":\ndet_model_dir = cfg[key][\"det_model_dir\"]\ndownloaded_det_model_dir = auto_download_model(det_model_dir)\nif downloaded_det_model_dir:\ndet_model_dir = downloaded_det_model_dir\n- model_dir_dict[\"det_model_dir\"] = det_model_dir\n+ cfg[key][\"det_model_dir\"] = det_model_dir\nprint(\"det_model_dir model dir: \", det_model_dir)\nrec_model_dir = cfg[key][\"rec_model_dir\"]\ndownloaded_rec_model_dir = auto_download_model(rec_model_dir)\nif downloaded_rec_model_dir:\nrec_model_dir = downloaded_rec_model_dir\n- model_dir_dict[\"rec_model_dir\"] = rec_model_dir\n+ cfg[key][\"rec_model_dir\"] = rec_model_dir\nprint(\"rec_model_dir model dir: \", rec_model_dir)\n+\nelif key == \"MOT\": # for idbased and skeletonbased actions\nmodel_dir = cfg[key][\"model_dir\"]\ndownloaded_model_dir = auto_download_model(model_dir)\nif downloaded_model_dir:\nmodel_dir = downloaded_model_dir\n- model_dir_dict[key] = model_dir\n-\n- return model_dir_dict\n+ cfg[key][\"model_dir\"] = model_dir\n+ print(\"mot_model_dir model_dir: \", model_dir)\nclass PipePredictor(object):\n@@ -234,47 +203,14 @@ class PipePredictor(object):\n4. VideoAction Recognition\nArgs:\n+ args (argparse.Namespace): arguments in pipeline, which contains environment and runtime settings\ncfg (dict): config of models in pipeline\nis_video (bool): whether the input is video, default as False\nmulti_camera (bool): whether to use multi camera in pipeline,\ndefault as False\n- camera_id (int): the device id of camera to predict, default as -1\n- device (string): the device to predict, options are: CPU/GPU/XPU,\n- default as CPU\n- run_mode (string): the mode of prediction, options are:\n- paddle/trt_fp32/trt_fp16, default as paddle\n- trt_min_shape (int): min shape for dynamic shape in trt, default as 1\n- trt_max_shape (int): max shape for dynamic shape in trt, default as 1280\n- trt_opt_shape (int): opt shape for dynamic shape in trt, default as 640\n- trt_calib_mode (bool): If the model is produced by TRT offline quantitative\n- calibration, trt_calib_mode need to set True. default as False\n- cpu_threads (int): cpu threads, default as 1\n- enable_mkldnn (bool): whether to open MKLDNN, default as False\n- output_dir (string): The path of output, default as 'output'\n- draw_center_traj (bool): Whether drawing the trajectory of center, default as False\n- secs_interval (int): The seconds interval to count after tracking, default as 10\n- do_entrance_counting(bool): Whether counting the numbers of identifiers entering\n- or getting out from the entrance, default as False, only support single class\n- counting in MOT.\n\"\"\"\ndef __init__(self, args, cfg, is_video=True, multi_camera=False):\n- device = args.device\n- run_mode = args.run_mode\n- trt_min_shape = args.trt_min_shape\n- trt_max_shape = args.trt_max_shape\n- trt_opt_shape = args.trt_opt_shape\n- trt_calib_mode = args.trt_calib_mode\n- cpu_threads = args.cpu_threads\n- enable_mkldnn = args.enable_mkldnn\n- output_dir = args.output_dir\n- draw_center_traj = args.draw_center_traj\n- secs_interval = args.secs_interval\n- do_entrance_counting = args.do_entrance_counting\n- do_break_in_counting = args.do_break_in_counting\n- region_type = args.region_type\n- region_polygon = args.region_polygon\n-\n# general module for pphuman and ppvehicle\nself.with_mot = cfg.get('MOT', False)['enable'] if cfg.get(\n'MOT', False) else False\n@@ -347,13 +283,13 @@ class PipePredictor(object):\nself.is_video = is_video\nself.multi_camera = multi_camera\nself.cfg = cfg\n- self.output_dir = output_dir\n- self.draw_center_traj = draw_center_traj\n- self.secs_interval = secs_interval\n- self.do_entrance_counting = do_entrance_counting\n- self.do_break_in_counting = do_break_in_counting\n- self.region_type = region_type\n- self.region_polygon = region_polygon\n+ self.output_dir = args.output_dir\n+ self.draw_center_traj = args.draw_center_traj\n+ self.secs_interval = args.secs_interval\n+ self.do_entrance_counting = args.do_entrance_counting\n+ self.do_break_in_counting = args.do_break_in_counting\n+ self.region_type = args.region_type\n+ self.region_polygon = args.region_polygon\nself.warmup_frame = self.cfg['warmup_frame']\nself.pipeline_res = Result()\n@@ -362,7 +298,7 @@ class PipePredictor(object):\nself.collector = DataCollector()\n# auto download inference model\n- model_dir_dict = get_model_dir(self.cfg)\n+ get_model_dir(self.cfg)\nif self.with_vehicleplate:\nvehicleplate_cfg = self.cfg['VEHICLE_PLATE']\n@@ -372,148 +308,84 @@ class PipePredictor(object):\nif self.with_human_attr:\nattr_cfg = self.cfg['ATTR']\n- model_dir = model_dir_dict['ATTR']\n- batch_size = attr_cfg['batch_size']\nbasemode = self.basemode['ATTR']\nself.modebase[basemode] = True\n- self.attr_predictor = AttrDetector(\n- model_dir, device, run_mode, batch_size, trt_min_shape,\n- trt_max_shape, trt_opt_shape, trt_calib_mode, cpu_threads,\n- enable_mkldnn)\n+ self.attr_predictor = AttrDetector.init_with_cfg(args, attr_cfg)\nif self.with_vehicle_attr:\nvehicleattr_cfg = self.cfg['VEHICLE_ATTR']\n- model_dir = model_dir_dict['VEHICLE_ATTR']\n- batch_size = vehicleattr_cfg['batch_size']\n- color_threshold = vehicleattr_cfg['color_threshold']\n- type_threshold = vehicleattr_cfg['type_threshold']\nbasemode = self.basemode['VEHICLE_ATTR']\nself.modebase[basemode] = True\n- self.vehicle_attr_predictor = VehicleAttr(\n- model_dir, device, run_mode, batch_size, trt_min_shape,\n- trt_max_shape, trt_opt_shape, trt_calib_mode, cpu_threads,\n- enable_mkldnn, color_threshold, type_threshold)\n+ self.vehicle_attr_predictor = VehicleAttr.init_with_cfg(\n+ args, vehicleattr_cfg)\nif not is_video:\ndet_cfg = self.cfg['DET']\n- model_dir = model_dir_dict['DET']\n+ model_dir = det_cfg['model_dir']\nbatch_size = det_cfg['batch_size']\nself.det_predictor = Detector(\n- model_dir, device, run_mode, batch_size, trt_min_shape,\n- trt_max_shape, trt_opt_shape, trt_calib_mode, cpu_threads,\n- enable_mkldnn)\n-\n+ model_dir, args.device, args.run_mode, batch_size,\n+ args.trt_min_shape, args.trt_max_shape, args.trt_opt_shape,\n+ args.trt_calib_mode, args.cpu_threads, args.enable_mkldnn)\nelse:\nif self.with_idbased_detaction:\nidbased_detaction_cfg = self.cfg['ID_BASED_DETACTION']\n- model_dir = model_dir_dict['ID_BASED_DETACTION']\n- batch_size = idbased_detaction_cfg['batch_size']\nbasemode = self.basemode['ID_BASED_DETACTION']\n- threshold = idbased_detaction_cfg['threshold']\n- display_frames = idbased_detaction_cfg['display_frames']\n- skip_frame_num = idbased_detaction_cfg['skip_frame_num']\nself.modebase[basemode] = True\n- self.det_action_predictor = DetActionRecognizer(\n- model_dir,\n- device,\n- run_mode,\n- batch_size,\n- trt_min_shape,\n- trt_max_shape,\n- trt_opt_shape,\n- trt_calib_mode,\n- cpu_threads,\n- enable_mkldnn,\n- threshold=threshold,\n- display_frames=display_frames,\n- skip_frame_num=skip_frame_num)\n+ self.det_action_predictor = DetActionRecognizer.init_with_cfg(\n+ args, idbased_detaction_cfg)\nself.det_action_visual_helper = ActionVisualHelper(1)\nif self.with_idbased_clsaction:\nidbased_clsaction_cfg = self.cfg['ID_BASED_CLSACTION']\n- model_dir = model_dir_dict['ID_BASED_CLSACTION']\n- batch_size = idbased_clsaction_cfg['batch_size']\nbasemode = self.basemode['ID_BASED_CLSACTION']\n- threshold = idbased_clsaction_cfg['threshold']\nself.modebase[basemode] = True\n- display_frames = idbased_clsaction_cfg['display_frames']\n- skip_frame_num = idbased_clsaction_cfg['skip_frame_num']\n- self.cls_action_predictor = ClsActionRecognizer(\n- model_dir,\n- device,\n- run_mode,\n- batch_size,\n- trt_min_shape,\n- trt_max_shape,\n- trt_opt_shape,\n- trt_calib_mode,\n- cpu_threads,\n- enable_mkldnn,\n- threshold=threshold,\n- display_frames=display_frames,\n- skip_frame_num=skip_frame_num)\n+ self.cls_action_predictor = ClsActionRecognizer.init_with_cfg(\n+ args, idbased_clsaction_cfg)\nself.cls_action_visual_helper = ActionVisualHelper(1)\nif self.with_skeleton_action:\nskeleton_action_cfg = self.cfg['SKELETON_ACTION']\n- skeleton_action_model_dir = model_dir_dict['SKELETON_ACTION']\n- skeleton_action_batch_size = skeleton_action_cfg['batch_size']\n- skeleton_action_frames = skeleton_action_cfg['max_frames']\ndisplay_frames = skeleton_action_cfg['display_frames']\nself.coord_size = skeleton_action_cfg['coord_size']\nbasemode = self.basemode['SKELETON_ACTION']\nself.modebase[basemode] = True\n+ skeleton_action_frames = skeleton_action_cfg['max_frames']\n- self.skeleton_action_predictor = SkeletonActionRecognizer(\n- skeleton_action_model_dir,\n- device,\n- run_mode,\n- skeleton_action_batch_size,\n- trt_min_shape,\n- trt_max_shape,\n- trt_opt_shape,\n- trt_calib_mode,\n- cpu_threads,\n- enable_mkldnn,\n- window_size=skeleton_action_frames)\n+ self.skeleton_action_predictor = SkeletonActionRecognizer.init_with_cfg(\n+ args, skeleton_action_cfg)\nself.skeleton_action_visual_helper = ActionVisualHelper(\ndisplay_frames)\n- if self.modebase[\"skeletonbased\"]:\nkpt_cfg = self.cfg['KPT']\n- kpt_model_dir = model_dir_dict['KPT']\n+ kpt_model_dir = kpt_cfg['model_dir']\nkpt_batch_size = kpt_cfg['batch_size']\nself.kpt_predictor = KeyPointDetector(\nkpt_model_dir,\n- device,\n- run_mode,\n+ args.device,\n+ args.run_mode,\nkpt_batch_size,\n- trt_min_shape,\n- trt_max_shape,\n- trt_opt_shape,\n- trt_calib_mode,\n- cpu_threads,\n- enable_mkldnn,\n+ args.trt_min_shape,\n+ args.trt_max_shape,\n+ args.trt_opt_shape,\n+ args.trt_calib_mode,\n+ args.cpu_threads,\n+ args.enable_mkldnn,\nuse_dark=False)\nself.kpt_buff = KeyPointBuff(skeleton_action_frames)\nif self.with_mtmct:\nreid_cfg = self.cfg['REID']\n- model_dir = model_dir_dict['REID']\n- batch_size = reid_cfg['batch_size']\nbasemode = self.basemode['REID']\nself.modebase[basemode] = True\n- self.reid_predictor = ReID(\n- model_dir, device, run_mode, batch_size, trt_min_shape,\n- trt_max_shape, trt_opt_shape, trt_calib_mode, cpu_threads,\n- enable_mkldnn)\n+ self.reid_predictor = ReID.init_with_cfg(args, reid_cfg)\nif self.with_mot or self.modebase[\"idbased\"] or self.modebase[\n\"skeletonbased\"]:\nmot_cfg = self.cfg['MOT']\n- model_dir = model_dir_dict['MOT']\n+ model_dir = mot_cfg['model_dir']\ntracker_config = mot_cfg['tracker_config']\nbatch_size = mot_cfg['batch_size']\nbasemode = self.basemode['MOT']\n@@ -521,46 +393,28 @@ class PipePredictor(object):\nself.mot_predictor = SDE_Detector(\nmodel_dir,\ntracker_config,\n- device,\n- run_mode,\n+ args.device,\n+ args.run_mode,\nbatch_size,\n- trt_min_shape,\n- trt_max_shape,\n- trt_opt_shape,\n- trt_calib_mode,\n- cpu_threads,\n- enable_mkldnn,\n- draw_center_traj=draw_center_traj,\n- secs_interval=secs_interval,\n- do_entrance_counting=do_entrance_counting,\n- do_break_in_counting=do_break_in_counting,\n- region_type=region_type,\n- region_polygon=region_polygon)\n+ args.trt_min_shape,\n+ args.trt_max_shape,\n+ args.trt_opt_shape,\n+ args.trt_calib_mode,\n+ args.cpu_threads,\n+ args.enable_mkldnn,\n+ draw_center_traj=self.draw_center_traj,\n+ secs_interval=self.secs_interval,\n+ do_entrance_counting=self.do_entrance_counting,\n+ do_break_in_counting=self.do_break_in_counting,\n+ region_type=self.region_type,\n+ region_polygon=self.region_polygon)\nif self.with_video_action:\nvideo_action_cfg = self.cfg['VIDEO_ACTION']\n-\nbasemode = self.basemode['VIDEO_ACTION']\nself.modebase[basemode] = True\n-\n- video_action_model_dir = model_dir_dict['VIDEO_ACTION']\n- video_action_batch_size = video_action_cfg['batch_size']\n- short_size = video_action_cfg[\"short_size\"]\n- target_size = video_action_cfg[\"target_size\"]\n-\n- self.video_action_predictor = VideoActionRecognizer(\n- model_dir=video_action_model_dir,\n- short_size=short_size,\n- target_size=target_size,\n- device=device,\n- run_mode=run_mode,\n- batch_size=video_action_batch_size,\n- trt_min_shape=trt_min_shape,\n- trt_max_shape=trt_max_shape,\n- trt_opt_shape=trt_opt_shape,\n- trt_calib_mode=trt_calib_mode,\n- cpu_threads=cpu_threads,\n- enable_mkldnn=enable_mkldnn)\n+ self.video_action_predictor = VideoActionRecognizer.init_with_cfg(\n+ args, video_action_cfg)\ndef set_file_name(self, path):\nif path is not None:\n@@ -701,6 +555,10 @@ class PipePredictor(object):\nassert len(\nself.region_polygon\n) % 2 == 0, \"region_polygon should be pairs of coords points when do break_in counting.\"\n+ assert len(\n+ self.region_polygon\n+ ) > 6, 'region_type is custom, region_polygon should be at least 3 pairs of point coords.'\n+\nfor i in range(0, len(self.region_polygon), 2):\nentrance.append(\n[self.region_polygon[i], self.region_polygon[i + 1]])\n" }, { "change_type": "MODIFY", "old_path": "deploy/pipeline/pphuman/action_infer.py", "new_path": "deploy/pipeline/pphuman/action_infer.py", "diff": "@@ -84,6 +84,20 @@ class SkeletonActionRecognizer(Detector):\nthreshold=threshold,\ndelete_shuffle_pass=True)\n+ @classmethod\n+ def init_with_cfg(cls, args, cfg):\n+ return cls(model_dir=cfg['model_dir'],\n+ batch_size=cfg['batch_size'],\n+ window_size=cfg['max_frames'],\n+ device=args.device,\n+ run_mode=args.run_mode,\n+ trt_min_shape=args.trt_min_shape,\n+ trt_max_shape=args.trt_max_shape,\n+ trt_opt_shape=args.trt_opt_shape,\n+ trt_calib_mode=args.trt_calib_mode,\n+ cpu_threads=args.cpu_threads,\n+ enable_mkldnn=args.enable_mkldnn)\n+\ndef predict(self, repeats=1):\n'''\nArgs:\n@@ -322,6 +336,22 @@ class DetActionRecognizer(object):\nself.skip_frame_cnt = 0\nself.id_in_last_frame = []\n+ @classmethod\n+ def init_with_cfg(cls, args, cfg):\n+ return cls(model_dir=cfg['model_dir'],\n+ batch_size=cfg['batch_size'],\n+ threshold=cfg['threshold'],\n+ display_frames=cfg['display_frames'],\n+ skip_frame_num=cfg['skip_frame_num'],\n+ device=args.device,\n+ run_mode=args.run_mode,\n+ trt_min_shape=args.trt_min_shape,\n+ trt_max_shape=args.trt_max_shape,\n+ trt_opt_shape=args.trt_opt_shape,\n+ trt_calib_mode=args.trt_calib_mode,\n+ cpu_threads=args.cpu_threads,\n+ enable_mkldnn=args.enable_mkldnn)\n+\ndef predict(self, images, mot_result):\nif self.skip_frame_cnt == 0 or (not self.check_id_is_same(mot_result)):\ndet_result = self.detector.predict_image(images, visual=False)\n@@ -473,6 +503,22 @@ class ClsActionRecognizer(AttrDetector):\nself.skip_frame_cnt = 0\nself.id_in_last_frame = []\n+ @classmethod\n+ def init_with_cfg(cls, args, cfg):\n+ return cls(model_dir=cfg['model_dir'],\n+ batch_size=cfg['batch_size'],\n+ threshold=cfg['threshold'],\n+ display_frames=cfg['display_frames'],\n+ skip_frame_num=cfg['skip_frame_num'],\n+ device=args.device,\n+ run_mode=args.run_mode,\n+ trt_min_shape=args.trt_min_shape,\n+ trt_max_shape=args.trt_max_shape,\n+ trt_opt_shape=args.trt_opt_shape,\n+ trt_calib_mode=args.trt_calib_mode,\n+ cpu_threads=args.cpu_threads,\n+ enable_mkldnn=args.enable_mkldnn)\n+\ndef predict_with_mot(self, images, mot_result):\nif self.skip_frame_cnt == 0 or (not self.check_id_is_same(mot_result)):\nimages = self.crop_half_body(images)\n" }, { "change_type": "MODIFY", "old_path": "deploy/pipeline/pphuman/attr_infer.py", "new_path": "deploy/pipeline/pphuman/attr_infer.py", "diff": "@@ -84,6 +84,19 @@ class AttrDetector(Detector):\noutput_dir=output_dir,\nthreshold=threshold, )\n+ @classmethod\n+ def init_with_cfg(cls, args, cfg):\n+ return cls(model_dir=cfg['model_dir'],\n+ batch_size=cfg['batch_size'],\n+ device=args.device,\n+ run_mode=args.run_mode,\n+ trt_min_shape=args.trt_min_shape,\n+ trt_max_shape=args.trt_max_shape,\n+ trt_opt_shape=args.trt_opt_shape,\n+ trt_calib_mode=args.trt_calib_mode,\n+ cpu_threads=args.cpu_threads,\n+ enable_mkldnn=args.enable_mkldnn)\n+\ndef get_label(self):\nreturn self.pred_config.labels\n" }, { "change_type": "MODIFY", "old_path": "deploy/pipeline/pphuman/reid.py", "new_path": "deploy/pipeline/pphuman/reid.py", "diff": "@@ -75,6 +75,19 @@ class ReID(object):\nself.batch_size = batch_size\nself.input_wh = (128, 256)\n+ @classmethod\n+ def init_with_cfg(cls, args, cfg):\n+ return cls(model_dir=cfg['model_dir'],\n+ batch_size=cfg['batch_size'],\n+ device=args.device,\n+ run_mode=args.run_mode,\n+ trt_min_shape=args.trt_min_shape,\n+ trt_max_shape=args.trt_max_shape,\n+ trt_opt_shape=args.trt_opt_shape,\n+ trt_calib_mode=args.trt_calib_mode,\n+ cpu_threads=args.cpu_threads,\n+ enable_mkldnn=args.enable_mkldnn)\n+\ndef set_config(self, model_dir):\nreturn PredictConfig(model_dir)\n" }, { "change_type": "MODIFY", "old_path": "deploy/pipeline/pphuman/video_action_infer.py", "new_path": "deploy/pipeline/pphuman/video_action_infer.py", "diff": "@@ -126,6 +126,21 @@ class VideoActionRecognizer(object):\nself.predictor = create_predictor(self.config)\n+ @classmethod\n+ def init_with_cfg(cls, args, cfg):\n+ return cls(model_dir=cfg['model_dir'],\n+ short_size=cfg['short_size'],\n+ target_size=cfg['target_size'],\n+ batch_size=cfg['batch_size'],\n+ device=args.device,\n+ run_mode=args.run_mode,\n+ trt_min_shape=args.trt_min_shape,\n+ trt_max_shape=args.trt_max_shape,\n+ trt_opt_shape=args.trt_opt_shape,\n+ trt_calib_mode=args.trt_calib_mode,\n+ cpu_threads=args.cpu_threads,\n+ enable_mkldnn=args.enable_mkldnn)\n+\ndef preprocess_batch(self, file_list):\nbatched_inputs = []\nfor file in file_list:\n" }, { "change_type": "MODIFY", "old_path": "deploy/pipeline/ppvehicle/vehicle_attr.py", "new_path": "deploy/pipeline/ppvehicle/vehicle_attr.py", "diff": "@@ -90,6 +90,21 @@ class VehicleAttr(AttrDetector):\n\"estate\"\n]\n+ @classmethod\n+ def init_with_cfg(cls, args, cfg):\n+ return cls(model_dir=cfg['model_dir'],\n+ batch_size=cfg['batch_size'],\n+ color_threshold=cfg['color_threshold'],\n+ type_threshold=cfg['type_threshold'],\n+ device=args.device,\n+ run_mode=args.run_mode,\n+ trt_min_shape=args.trt_min_shape,\n+ trt_max_shape=args.trt_max_shape,\n+ trt_opt_shape=args.trt_opt_shape,\n+ trt_calib_mode=args.trt_calib_mode,\n+ cpu_threads=args.cpu_threads,\n+ enable_mkldnn=args.enable_mkldnn)\n+\ndef postprocess(self, inputs, result):\n# postprocess output of predictor\nim_results = result['output']\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
move initialize part into class (#6621)
499,299
15.08.2022 20:09:34
-28,800
7f884da6fafdf4033b8039c7fc782e88375e6467
use link for vehicle model
[ { "change_type": "MODIFY", "old_path": "deploy/pipeline/config/examples/infer_cfg_human_attr.yml", "new_path": "deploy/pipeline/config/examples/infer_cfg_human_attr.yml", "diff": "@@ -3,6 +3,10 @@ attr_thresh: 0.5\nvisual: True\nwarmup_frame: 50\n+DET:\n+ model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_pipeline.zip\n+ batch_size: 1\n+\nMOT:\nmodel_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_pipeline.zip\ntracker_config: deploy/pipeline/config/tracker_config.yml\n" }, { "change_type": "ADD", "old_path": null, "new_path": "deploy/pipeline/config/examples/infer_cfg_vehicle_attr.yml", "diff": "+crop_thresh: 0.5\n+visual: True\n+warmup_frame: 50\n+\n+DET:\n+ model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_ppvehicle.zip\n+ batch_size: 1\n+\n+MOT:\n+ model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_ppvehicle.zip\n+ tracker_config: deploy/pipeline/config/tracker_config.yml\n+ batch_size: 1\n+ enable: True\n+\n+VEHICLE_ATTR:\n+ model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/vehicle_attribute_model.zip\n+ batch_size: 8\n+ color_threshold: 0.5\n+ type_threshold: 0.5\n+ enable: True\n" }, { "change_type": "ADD", "old_path": null, "new_path": "deploy/pipeline/config/examples/infer_cfg_vehicle_plate.yml", "diff": "+crop_thresh: 0.5\n+visual: True\n+warmup_frame: 50\n+\n+DET:\n+ model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_ppvehicle.zip\n+ batch_size: 1\n+\n+MOT:\n+ model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_ppvehicle.zip\n+ tracker_config: deploy/pipeline/config/tracker_config.yml\n+ batch_size: 1\n+ enable: True\n+\n+VEHICLE_PLATE:\n+ det_model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/ch_PP-OCRv3_det_infer.tar.gz\n+ det_limit_side_len: 736\n+ det_limit_type: \"min\"\n+ rec_model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/ch_PP-OCRv3_rec_infer.tar.gz\n+ rec_image_shape: [3, 48, 320]\n+ rec_batch_num: 6\n+ word_dict_path: deploy/pipeline/ppvehicle/rec_word_dict.txt\n+ enable: True\n" }, { "change_type": "MODIFY", "old_path": "deploy/pipeline/config/infer_cfg_ppvehicle.yml", "new_path": "deploy/pipeline/config/infer_cfg_ppvehicle.yml", "diff": "@@ -3,33 +3,28 @@ visual: True\nwarmup_frame: 50\nDET:\n- model_dir: output_inference/mot_ppyoloe_l_36e_ppvehicle/\n+ model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_ppvehicle.zip\nbatch_size: 1\nMOT:\n- model_dir: output_inference/mot_ppyoloe_l_36e_ppvehicle/\n+ model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_ppvehicle.zip\ntracker_config: deploy/pipeline/config/tracker_config.yml\nbatch_size: 1\nenable: False\nVEHICLE_PLATE:\n- det_model_dir: output_inference/ch_PP-OCRv3_det_infer/\n+ det_model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/ch_PP-OCRv3_det_infer.tar.gz\ndet_limit_side_len: 736\ndet_limit_type: \"min\"\n- rec_model_dir: output_inference/ch_PP-OCRv3_rec_infer/\n+ rec_model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/ch_PP-OCRv3_rec_infer.tar.gz\nrec_image_shape: [3, 48, 320]\nrec_batch_num: 6\nword_dict_path: deploy/pipeline/ppvehicle/rec_word_dict.txt\nenable: False\nVEHICLE_ATTR:\n- model_dir: output_inference/vehicle_attribute_infer/\n+ model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/vehicle_attribute_model.zip\nbatch_size: 8\ncolor_threshold: 0.5\ntype_threshold: 0.5\nenable: False\n-\n-REID:\n- model_dir: output_inference/vehicle_reid_model/\n- batch_size: 16\n- enable: False\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
use link for vehicle model (#6645)
499,348
17.08.2022 00:04:47
-28,800
28199de73e51ff08f54d941c83ee24056ffd16dd
add per trackid time info
[ { "change_type": "MODIFY", "old_path": "deploy/pipeline/pipe_utils.py", "new_path": "deploy/pipeline/pipe_utils.py", "diff": "@@ -68,6 +68,7 @@ class PipeTimer(Times):\n'vehicleplate': Times()\n}\nself.img_num = 0\n+ self.track_num = 0\ndef get_total_time(self):\ntotal_time = self.total_time.value()\n@@ -86,8 +87,11 @@ class PipeTimer(Times):\nfor k, v in self.module_time.items():\nv_time = round(v.value(), 4)\n- if v_time > 0:\n+ if v_time > 0 and k in ['det', 'mot', 'video_action']:\nprint(\"{} time(ms): {}\".format(k, v_time * 1000))\n+ elif v_time > 0:\n+ print(\"{} time(ms): {}; per trackid average time(ms): {}\".\n+ format(k, v_time * 1000, v_time * 1000 / self.track_num))\nprint(\"average latency time(ms): {:.2f}, QPS: {:2f}\".format(\naverage_latency * 1000, qps))\n" }, { "change_type": "MODIFY", "old_path": "deploy/pipeline/pipeline.py", "new_path": "deploy/pipeline/pipeline.py", "diff": "@@ -598,10 +598,11 @@ class PipePredictor(object):\nif not ret:\nbreak\nframe_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n+ if frame_id > self.warmup_frame:\n+ self.pipe_timer.total_time.start()\nif self.modebase[\"idbased\"] or self.modebase[\"skeletonbased\"]:\nif frame_id > self.warmup_frame:\n- self.pipe_timer.total_time.start()\nself.pipe_timer.module_time['mot'].start()\nmot_skip_frame_num = self.mot_predictor.skip_frame_num\n@@ -612,11 +613,12 @@ class PipePredictor(object):\n[copy.deepcopy(frame_rgb)],\nvisual=False,\nreuse_det_result=reuse_det_result)\n- if frame_id > self.warmup_frame:\n- self.pipe_timer.module_time['mot'].end()\n# mot output format: id, class, score, xmin, ymin, xmax, ymax\nmot_res = parse_mot_res(res)\n+ if frame_id > self.warmup_frame:\n+ self.pipe_timer.module_time['mot'].end()\n+ self.pipe_timer.track_num += len(mot_res['boxes'])\n# flow_statistic only support single class MOT\nboxes, scores, ids = res[0] # batch size = 1 in MOT\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add per trackid time info (#6664)
499,301
18.08.2022 18:32:10
-28,800
8fbdf1cb6bedd8a884d2f3d0482b4ef39a36f142
add ppyoloe plus cfgs
[ { "change_type": "ADD", "old_path": null, "new_path": "deploy/auto_compression/configs/ppyoloe_plus_l_qat_dis.yaml", "diff": "+\n+Global:\n+ reader_config: configs/ppyoloe_plus_reader.yml\n+ input_list: ['image', 'scale_factor']\n+ arch: YOLO\n+ Evaluation: True\n+ model_dir: ./ppyoloe_plus_crn_l_80e_coco\n+ model_filename: model.pdmodel\n+ params_filename: model.pdiparams\n+\n+Distillation:\n+ alpha: 1.0\n+ loss: soft_label\n+\n+Quantization:\n+ use_pact: true\n+ activation_quantize_type: 'moving_average_abs_max'\n+ quantize_op_types:\n+ - conv2d\n+ - depthwise_conv2d\n+\n+TrainConfig:\n+ train_iter: 5000\n+ eval_iter: 1000\n+ learning_rate:\n+ type: CosineAnnealingDecay\n+ learning_rate: 0.00003\n+ T_max: 6000\n+ optimizer_builder:\n+ optimizer:\n+ type: SGD\n+ weight_decay: 4.0e-05\n+\n" }, { "change_type": "ADD", "old_path": null, "new_path": "deploy/auto_compression/configs/ppyoloe_plus_m_qat_dis.yaml", "diff": "+\n+Global:\n+ reader_config: configs/ppyoloe_plus_reader.yml\n+ input_list: ['image', 'scale_factor']\n+ arch: YOLO\n+ Evaluation: True\n+ model_dir: ./ppyoloe_plus_crn_m_80e_coco\n+ model_filename: model.pdmodel\n+ params_filename: model.pdiparams\n+\n+Distillation:\n+ alpha: 1.0\n+ loss: soft_label\n+\n+Quantization:\n+ use_pact: true\n+ activation_quantize_type: 'moving_average_abs_max'\n+ quantize_op_types:\n+ - conv2d\n+ - depthwise_conv2d\n+\n+TrainConfig:\n+ train_iter: 5000\n+ eval_iter: 1000\n+ learning_rate:\n+ type: CosineAnnealingDecay\n+ learning_rate: 0.00003\n+ T_max: 6000\n+ optimizer_builder:\n+ optimizer:\n+ type: SGD\n+ weight_decay: 4.0e-05\n+\n" }, { "change_type": "ADD", "old_path": null, "new_path": "deploy/auto_compression/configs/ppyoloe_plus_reader.yml", "diff": "+\n+\n+metric: COCO\n+num_classes: 80\n+\n+# Datset configuration\n+TrainDataset:\n+ !COCODataSet\n+ image_dir: train2017\n+ anno_path: annotations/instances_train2017.json\n+ dataset_dir: dataset/coco/\n+\n+EvalDataset:\n+ !COCODataSet\n+ image_dir: val2017\n+ anno_path: annotations/instances_val2017.json\n+ dataset_dir: dataset/coco/\n+\n+worker_num: 0\n+\n+# preprocess reader in test\n+EvalReader:\n+ sample_transforms:\n+ - Decode: {}\n+ - Resize: {target_size: [640, 640], keep_ratio: False, interp: 2}\n+ - NormalizeImage: {mean: [0., 0., 0.], std: [1., 1., 1.], is_scale: True}\n+ - Permute: {}\n+ batch_size: 4\n" }, { "change_type": "ADD", "old_path": null, "new_path": "deploy/auto_compression/configs/ppyoloe_plus_s_qat_dis.yaml", "diff": "+\n+Global:\n+ reader_config: configs/ppyoloe_plus_reader.yml\n+ input_list: ['image', 'scale_factor']\n+ arch: YOLO\n+ Evaluation: True\n+ model_dir: ./ppyoloe_plus_crn_s_80e_coco\n+ model_filename: model.pdmodel\n+ params_filename: model.pdiparams\n+\n+Distillation:\n+ alpha: 1.0\n+ loss: soft_label\n+\n+Quantization:\n+ use_pact: true\n+ activation_quantize_type: 'moving_average_abs_max'\n+ quantize_op_types:\n+ - conv2d\n+ - depthwise_conv2d\n+\n+TrainConfig:\n+ train_iter: 5000\n+ eval_iter: 1000\n+ learning_rate:\n+ type: CosineAnnealingDecay\n+ learning_rate: 0.00003\n+ T_max: 6000\n+ optimizer_builder:\n+ optimizer:\n+ type: SGD\n+ weight_decay: 4.0e-05\n+\n" }, { "change_type": "ADD", "old_path": null, "new_path": "deploy/auto_compression/configs/ppyoloe_plus_x_qat_dis.yaml", "diff": "+\n+Global:\n+ reader_config: configs/ppyoloe_plus_reader.yml\n+ input_list: ['image', 'scale_factor']\n+ arch: YOLO\n+ Evaluation: True\n+ model_dir: ./ppyoloe_plus_crn_x_80e_coco\n+ model_filename: model.pdmodel\n+ params_filename: model.pdiparams\n+\n+Distillation:\n+ alpha: 1.0\n+ loss: soft_label\n+\n+Quantization:\n+ use_pact: true\n+ activation_quantize_type: 'moving_average_abs_max'\n+ quantize_op_types:\n+ - conv2d\n+ - depthwise_conv2d\n+\n+TrainConfig:\n+ train_iter: 5000\n+ eval_iter: 1000\n+ learning_rate:\n+ type: CosineAnnealingDecay\n+ learning_rate: 0.00003\n+ T_max: 6000\n+ optimizer_builder:\n+ optimizer:\n+ type: SGD\n+ weight_decay: 4.0e-05\n+\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add ppyoloe plus cfgs (#6686)
499,339
22.08.2022 14:36:01
-28,800
10e7fe232c83dacee0f517d78644b705e5d24a57
[deploy] alter save coco format json in deploy/python/infer.py
[ { "change_type": "MODIFY", "old_path": "deploy/python/infer.py", "new_path": "deploy/python/infer.py", "diff": "@@ -36,7 +36,7 @@ from picodet_postprocess import PicoDetPostProcess\nfrom preprocess import preprocess, Resize, NormalizeImage, Permute, PadStride, LetterBoxResize, WarpAffine, Pad, decode_image\nfrom keypoint_preprocess import EvalAffine, TopDownEvalAffine, expand_crop\nfrom visualize import visualize_box_mask\n-from utils import argsparser, Timer, get_current_memory_mb, multiclass_nms\n+from utils import argsparser, Timer, get_current_memory_mb, multiclass_nms, coco_clsid2catid\n# Global dictionary\nSUPPORT_MODELS = {\n@@ -226,7 +226,7 @@ class Detector(object):\nmatch_threshold=0.6,\nmatch_metric='iou',\nvisual=True,\n- save_file=None):\n+ save_results=False):\n# slice infer only support bs=1\nresults = []\ntry:\n@@ -295,14 +295,13 @@ class Detector(object):\nthreshold=self.threshold)\nresults.append(merged_results)\n- if visual:\nprint('Test iter {}'.format(i))\n- if save_file is not None:\n- Path(self.output_dir).mkdir(exist_ok=True)\n- self.format_coco_results(image_list, results, save_file=save_file)\n-\nresults = self.merge_batch_result(results)\n+ if save_results:\n+ Path(self.output_dir).mkdir(exist_ok=True)\n+ self.save_coco_results(\n+ img_list, results, use_coco_category=FLAGS.use_coco_category)\nreturn results\ndef predict_image(self,\n@@ -310,7 +309,7 @@ class Detector(object):\nrun_benchmark=False,\nrepeats=1,\nvisual=True,\n- save_file=None):\n+ save_results=False):\nbatch_loop_cnt = math.ceil(float(len(image_list)) / self.batch_size)\nresults = []\nfor i in range(batch_loop_cnt):\n@@ -367,14 +366,13 @@ class Detector(object):\nthreshold=self.threshold)\nresults.append(result)\n- if visual:\nprint('Test iter {}'.format(i))\n- if save_file is not None:\n- Path(self.output_dir).mkdir(exist_ok=True)\n- self.format_coco_results(image_list, results, save_file=save_file)\n-\nresults = self.merge_batch_result(results)\n+ if save_results:\n+ Path(self.output_dir).mkdir(exist_ok=True)\n+ self.save_coco_results(\n+ image_list, results, use_coco_category=FLAGS.use_coco_category)\nreturn results\ndef predict_video(self, video_file, camera_id):\n@@ -418,67 +416,62 @@ class Detector(object):\nbreak\nwriter.release()\n- @staticmethod\n- def format_coco_results(image_list, results, save_file=None):\n- coco_results = []\n- image_id = 0\n-\n- for result in results:\n- start_idx = 0\n- for box_num in result['boxes_num']:\n- idx_slice = slice(start_idx, start_idx + box_num)\n- start_idx += box_num\n-\n- image_file = image_list[image_id]\n- image_id += 1\n-\n- if 'boxes' in result:\n- boxes = result['boxes'][idx_slice, :]\n- per_result = [\n- {\n- 'image_file': image_file,\n- 'bbox':\n- [box[2], box[3], box[4] - box[2],\n+ def save_coco_results(self, image_list, results, use_coco_category=False):\n+ bbox_results = []\n+ mask_results = []\n+ idx = 0\n+ print(\"Start saving coco json files...\")\n+ for i, box_num in enumerate(results['boxes_num']):\n+ file_name = os.path.split(image_list[i])[-1]\n+ if use_coco_category:\n+ img_id = int(os.path.splitext(file_name)[0])\n+ else:\n+ img_id = i\n+\n+ if 'boxes' in results:\n+ boxes = results['boxes'][idx:idx + box_num].tolist()\n+ bbox_results.extend([{\n+ 'image_id': img_id,\n+ 'category_id': coco_clsid2catid[int(box[0])] \\\n+ if use_coco_category else int(box[0]),\n+ 'file_name': file_name,\n+ 'bbox': [box[2], box[3], box[4] - box[2],\nbox[5] - box[3]], # xyxy -> xywh\n- 'score': box[1],\n- 'category_id': int(box[0]),\n- } for k, box in enumerate(boxes.tolist())\n- ]\n+ 'score': box[1]} for box in boxes])\n- elif 'segm' in result:\n+ if 'masks' in results:\nimport pycocotools.mask as mask_util\n- scores = result['score'][idx_slice].tolist()\n- category_ids = result['label'][idx_slice].tolist()\n- segms = result['segm'][idx_slice, :]\n- rles = [\n- mask_util.encode(\n+ boxes = results['boxes'][idx:idx + box_num].tolist()\n+ masks = results['masks'][i][:box_num].astype(np.uint8)\n+ seg_res = []\n+ for box, mask in zip(boxes, masks):\n+ rle = mask_util.encode(\nnp.array(\n- mask[:, :, np.newaxis],\n- dtype=np.uint8,\n- order='F'))[0] for mask in segms\n- ]\n- for rle in rles:\n- rle['counts'] = rle['counts'].decode('utf-8')\n-\n- per_result = [{\n- 'image_file': image_file,\n+ mask[:, :, None], dtype=np.uint8, order=\"F\"))[0]\n+ if 'counts' in rle:\n+ rle['counts'] = rle['counts'].decode(\"utf8\")\n+ seg_res.append({\n+ 'image_id': img_id,\n+ 'category_id': coco_clsid2catid[int(box[0])] \\\n+ if use_coco_category else int(box[0]),\n+ 'file_name': file_name,\n'segmentation': rle,\n- 'score': scores[k],\n- 'category_id': category_ids[k],\n- } for k, rle in enumerate(rles)]\n+ 'score': box[1]})\n+ mask_results.extend(seg_res)\n- else:\n- raise RuntimeError('')\n-\n- # per_result = [item for item in per_result if item['score'] > threshold]\n- coco_results.extend(per_result)\n+ idx += box_num\n- if save_file:\n- with open(os.path.join(save_file), 'w') as f:\n- json.dump(coco_results, f)\n-\n- return coco_results\n+ if bbox_results:\n+ bbox_file = os.path.join(self.output_dir, \"bbox.json\")\n+ with open(bbox_file, 'w') as f:\n+ json.dump(bbox_results, f)\n+ print(f\"The bbox result is saved to {bbox_file}\")\n+ if mask_results:\n+ mask_file = os.path.join(self.output_dir, \"mask.json\")\n+ with open(mask_file, 'w') as f:\n+ json.dump(mask_results, f)\n+ print(f\"The mask result is saved to {mask_file}\")\nclass DetectorSOLOv2(Detector):\n@@ -956,8 +949,6 @@ def main():\nif FLAGS.image_dir is None and FLAGS.image_file is not None:\nassert FLAGS.batch_size == 1, \"batch_size should be 1, when image_file is not None\"\nimg_list = get_test_images(FLAGS.image_dir, FLAGS.image_file)\n- save_file = os.path.join(FLAGS.output_dir,\n- 'results.json') if FLAGS.save_results else None\nif FLAGS.slice_infer:\ndetector.predict_image_slice(\nimg_list,\n@@ -966,10 +957,15 @@ def main():\nFLAGS.combine_method,\nFLAGS.match_threshold,\nFLAGS.match_metric,\n- save_file=save_file)\n+ visual=FLAGS.save_images,\n+ save_results=FLAGS.save_results)\nelse:\ndetector.predict_image(\n- img_list, FLAGS.run_benchmark, repeats=100, save_file=save_file)\n+ img_list,\n+ FLAGS.run_benchmark,\n+ repeats=100,\n+ visual=FLAGS.save_images,\n+ save_results=FLAGS.save_results)\nif not FLAGS.run_benchmark:\ndetector.det_times.info(average=True)\nelse:\n" }, { "change_type": "MODIFY", "old_path": "deploy/python/utils.py", "new_path": "deploy/python/utils.py", "diff": "@@ -109,6 +109,7 @@ def argsparser():\nparser.add_argument(\n'--save_images',\naction='store_true',\n+ default=False,\nhelp='Save visualization image results.')\nparser.add_argument(\n'--save_mot_txts',\n@@ -159,9 +160,14 @@ def argsparser():\nhelp=\"Whether do random padding for action recognition.\")\nparser.add_argument(\n\"--save_results\",\n- type=bool,\n+ action='store_true',\ndefault=False,\nhelp=\"Whether save detection result to file using coco format\")\n+ parser.add_argument(\n+ '--use_coco_category',\n+ action='store_true',\n+ default=False,\n+ help='Whether to use the coco format dictionary `clsid2catid`')\nparser.add_argument(\n\"--slice_infer\",\naction='store_true',\n@@ -386,3 +392,87 @@ def nms(dets, match_threshold=0.6, match_metric='iou'):\nkeep = np.where(suppressed == 0)[0]\ndets = dets[keep, :]\nreturn dets\n+\n+\n+coco_clsid2catid = {\n+ 0: 1,\n+ 1: 2,\n+ 2: 3,\n+ 3: 4,\n+ 4: 5,\n+ 5: 6,\n+ 6: 7,\n+ 7: 8,\n+ 8: 9,\n+ 9: 10,\n+ 10: 11,\n+ 11: 13,\n+ 12: 14,\n+ 13: 15,\n+ 14: 16,\n+ 15: 17,\n+ 16: 18,\n+ 17: 19,\n+ 18: 20,\n+ 19: 21,\n+ 20: 22,\n+ 21: 23,\n+ 22: 24,\n+ 23: 25,\n+ 24: 27,\n+ 25: 28,\n+ 26: 31,\n+ 27: 32,\n+ 28: 33,\n+ 29: 34,\n+ 30: 35,\n+ 31: 36,\n+ 32: 37,\n+ 33: 38,\n+ 34: 39,\n+ 35: 40,\n+ 36: 41,\n+ 37: 42,\n+ 38: 43,\n+ 39: 44,\n+ 40: 46,\n+ 41: 47,\n+ 42: 48,\n+ 43: 49,\n+ 44: 50,\n+ 45: 51,\n+ 46: 52,\n+ 47: 53,\n+ 48: 54,\n+ 49: 55,\n+ 50: 56,\n+ 51: 57,\n+ 52: 58,\n+ 53: 59,\n+ 54: 60,\n+ 55: 61,\n+ 56: 62,\n+ 57: 63,\n+ 58: 64,\n+ 59: 65,\n+ 60: 67,\n+ 61: 70,\n+ 62: 72,\n+ 63: 73,\n+ 64: 74,\n+ 65: 75,\n+ 66: 76,\n+ 67: 77,\n+ 68: 78,\n+ 69: 79,\n+ 70: 80,\n+ 71: 81,\n+ 72: 82,\n+ 73: 84,\n+ 74: 85,\n+ 75: 86,\n+ 76: 87,\n+ 77: 88,\n+ 78: 89,\n+ 79: 90\n+}\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[deploy] alter save coco format json in deploy/python/infer.py (#6705)
499,339
22.08.2022 17:28:45
-28,800
b84d8fd65dfc8ee608bc6bde5bdee1cff4d11d4f
[ppyoloe-plus] update ppyoloe legacy configs
[ { "change_type": "RENAME", "old_path": "configs/ppyoloe/legacy_model/README.md", "new_path": "configs/ppyoloe/README_legacy.md", "diff": "" }, { "change_type": "RENAME", "old_path": "configs/ppyoloe/legacy_model/_base_/optimizer_300e.yml", "new_path": "configs/ppyoloe/_base_/optimizer_300e.yml", "diff": "" }, { "change_type": "RENAME", "old_path": "configs/ppyoloe/legacy_model/_base_/optimizer_36e_xpu.yml", "new_path": "configs/ppyoloe/_base_/optimizer_36e_xpu.yml", "diff": "" }, { "change_type": "RENAME", "old_path": "configs/ppyoloe/legacy_model/_base_/optimizer_400e.yml", "new_path": "configs/ppyoloe/_base_/optimizer_400e.yml", "diff": "" }, { "change_type": "RENAME", "old_path": "configs/ppyoloe/legacy_model/_base_/ppyoloe_crn.yml", "new_path": "configs/ppyoloe/_base_/ppyoloe_crn.yml", "diff": "" }, { "change_type": "RENAME", "old_path": "configs/ppyoloe/legacy_model/_base_/ppyoloe_reader.yml", "new_path": "configs/ppyoloe/_base_/ppyoloe_reader.yml", "diff": "" }, { "change_type": "RENAME", "old_path": "configs/ppyoloe/legacy_model/ppyoloe_crn_l_300e_coco.yml", "new_path": "configs/ppyoloe/ppyoloe_crn_l_300e_coco.yml", "diff": "_BASE_: [\n- '../../datasets/coco_detection.yml',\n- '../../runtime.yml',\n+ '../datasets/coco_detection.yml',\n+ '../runtime.yml',\n'./_base_/optimizer_300e.yml',\n'./_base_/ppyoloe_crn.yml',\n'./_base_/ppyoloe_reader.yml',\n" }, { "change_type": "RENAME", "old_path": "configs/ppyoloe/legacy_model/ppyoloe_crn_l_36e_coco_xpu.yml", "new_path": "configs/ppyoloe/ppyoloe_crn_l_36e_coco_xpu.yml", "diff": "_BASE_: [\n- '../../datasets/coco_detection.yml',\n- '../../runtime.yml',\n+ '../datasets/coco_detection.yml',\n+ '../runtime.yml',\n'./_base_/optimizer_36e_xpu.yml',\n'./_base_/ppyoloe_reader.yml',\n]\n" }, { "change_type": "RENAME", "old_path": "configs/ppyoloe/legacy_model/ppyoloe_crn_m_300e_coco.yml", "new_path": "configs/ppyoloe/ppyoloe_crn_m_300e_coco.yml", "diff": "_BASE_: [\n- '../../datasets/coco_detection.yml',\n- '../../runtime.yml',\n+ '../datasets/coco_detection.yml',\n+ '../runtime.yml',\n'./_base_/optimizer_300e.yml',\n'./_base_/ppyoloe_crn.yml',\n'./_base_/ppyoloe_reader.yml',\n" }, { "change_type": "RENAME", "old_path": "configs/ppyoloe/legacy_model/ppyoloe_crn_s_300e_coco.yml", "new_path": "configs/ppyoloe/ppyoloe_crn_s_300e_coco.yml", "diff": "_BASE_: [\n- '../../datasets/coco_detection.yml',\n- '../../runtime.yml',\n+ '../datasets/coco_detection.yml',\n+ '../runtime.yml',\n'./_base_/optimizer_300e.yml',\n'./_base_/ppyoloe_crn.yml',\n'./_base_/ppyoloe_reader.yml',\n" }, { "change_type": "RENAME", "old_path": "configs/ppyoloe/legacy_model/ppyoloe_crn_s_400e_coco.yml", "new_path": "configs/ppyoloe/ppyoloe_crn_s_400e_coco.yml", "diff": "_BASE_: [\n- '../../datasets/coco_detection.yml',\n- '../../runtime.yml',\n+ '../datasets/coco_detection.yml',\n+ '../runtime.yml',\n'./_base_/optimizer_400e.yml',\n'./_base_/ppyoloe_crn.yml',\n'./_base_/ppyoloe_reader.yml',\n" }, { "change_type": "RENAME", "old_path": "configs/ppyoloe/legacy_model/ppyoloe_crn_x_300e_coco.yml", "new_path": "configs/ppyoloe/ppyoloe_crn_x_300e_coco.yml", "diff": "_BASE_: [\n- '../../datasets/coco_detection.yml',\n- '../../runtime.yml',\n+ '../datasets/coco_detection.yml',\n+ '../runtime.yml',\n'./_base_/optimizer_300e.yml',\n'./_base_/ppyoloe_crn.yml',\n'./_base_/ppyoloe_reader.yml',\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[ppyoloe-plus] update ppyoloe legacy configs (#6718)
499,298
23.08.2022 22:27:32
-28,800
4708b0811ee99bf9d4cf23f309817675191a434a
fix iters less than batchsize in warmup
[ { "change_type": "MODIFY", "old_path": "configs/mot/fairmot/_base_/optimizer_30e_momentum.yml", "new_path": "configs/mot/fairmot/_base_/optimizer_30e_momentum.yml", "diff": "@@ -7,8 +7,9 @@ LearningRate:\ngamma: 0.1\nmilestones: [15, 22]\nuse_warmup: True\n- - !BurninWarmup\n+ - !ExpWarmup\nsteps: 1000\n+ power: 4\nOptimizerBuilder:\noptimizer:\n" }, { "change_type": "MODIFY", "old_path": "configs/mot/jde/_base_/optimizer_30e.yml", "new_path": "configs/mot/jde/_base_/optimizer_30e.yml", "diff": "@@ -7,8 +7,9 @@ LearningRate:\ngamma: 0.1\nmilestones: [15, 22]\nuse_warmup: True\n- - !BurninWarmup\n+ - !ExpWarmup\nsteps: 1000\n+ power: 4\nOptimizerBuilder:\noptimizer:\n" }, { "change_type": "MODIFY", "old_path": "configs/mot/jde/_base_/optimizer_60e.yml", "new_path": "configs/mot/jde/_base_/optimizer_60e.yml", "diff": "@@ -7,8 +7,9 @@ LearningRate:\ngamma: 0.1\nmilestones: [30, 44]\nuse_warmup: True\n- - !BurninWarmup\n+ - !ExpWarmup\nsteps: 1000\n+ power: 4\nOptimizerBuilder:\noptimizer:\n" }, { "change_type": "MODIFY", "old_path": "configs/mot/mcfairmot/mcfairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone_vehicle_bytetracker.yml", "new_path": "configs/mot/mcfairmot/mcfairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone_vehicle_bytetracker.yml", "diff": "@@ -63,8 +63,9 @@ LearningRate:\ngamma: 0.1\nmilestones: [15, 22]\nuse_warmup: True\n- - !BurninWarmup\n+ - !ExpWarmup\nsteps: 1000\n+ power: 4\nOptimizerBuilder:\noptimizer:\n" }, { "change_type": "MODIFY", "old_path": "ppdet/engine/trainer.py", "new_path": "ppdet/engine/trainer.py", "diff": "@@ -150,6 +150,10 @@ class Trainer(object):\n# build optimizer in train mode\nif self.mode == 'train':\nsteps_per_epoch = len(self.loader)\n+ if steps_per_epoch < 1:\n+ logger.warning(\n+ \"Samples in dataset are less than batch_size, please set smaller batch_size in TrainReader.\"\n+ )\nself.lr = create('LearningRate')(steps_per_epoch)\nself.optimizer = create('OptimizerBuilder')(self.lr, self.model)\n" }, { "change_type": "MODIFY", "old_path": "ppdet/optimizer/optimizer.py", "new_path": "ppdet/optimizer/optimizer.py", "diff": "@@ -176,6 +176,7 @@ class LinearWarmup(object):\nvalue = []\nwarmup_steps = self.epochs * step_per_epoch \\\nif self.epochs is not None else self.steps\n+ warmup_steps = max(warmup_steps, 1)\nfor i in range(warmup_steps + 1):\nif warmup_steps > 0:\nalpha = i / warmup_steps\n@@ -187,31 +188,6 @@ class LinearWarmup(object):\nreturn boundary, value\n-@serializable\n-class BurninWarmup(object):\n- \"\"\"\n- Warm up learning rate in burnin mode\n- Args:\n- steps (int): warm up steps\n- \"\"\"\n-\n- def __init__(self, steps=1000):\n- super(BurninWarmup, self).__init__()\n- self.steps = steps\n-\n- def __call__(self, base_lr, step_per_epoch):\n- boundary = []\n- value = []\n- burnin = min(self.steps, step_per_epoch)\n- for i in range(burnin + 1):\n- factor = (i * 1.0 / burnin)**4\n- lr = base_lr * factor\n- value.append(lr)\n- if i > 0:\n- boundary.append(i)\n- return boundary, value\n-\n-\n@serializable\nclass ExpWarmup(object):\n\"\"\"\n@@ -220,19 +196,22 @@ class ExpWarmup(object):\nsteps (int): warm up steps.\nepochs (int|None): use epochs as warm up steps, the priority\nof `epochs` is higher than `steps`. Default: None.\n+ power (int): Exponential coefficient. Default: 2.\n\"\"\"\n- def __init__(self, steps=5, epochs=None):\n+ def __init__(self, steps=1000, epochs=None, power=2):\nsuper(ExpWarmup, self).__init__()\nself.steps = steps\nself.epochs = epochs\n+ self.power = power\ndef __call__(self, base_lr, step_per_epoch):\nboundary = []\nvalue = []\nwarmup_steps = self.epochs * step_per_epoch if self.epochs is not None else self.steps\n+ warmup_steps = max(warmup_steps, 1)\nfor i in range(warmup_steps + 1):\n- factor = (i / float(warmup_steps))**2\n+ factor = (i / float(warmup_steps))**self.power\nvalue.append(base_lr * factor)\nif i > 0:\nboundary.append(i)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix iters less than batchsize in warmup (#6724)
499,364
24.08.2022 10:17:08
-28,800
da3a1fc995d5e0f35a0d86c222ca9845f62c9d82
fix distill bug
[ { "change_type": "MODIFY", "old_path": "ppdet/slim/distill.py", "new_path": "ppdet/slim/distill.py", "diff": "@@ -436,7 +436,7 @@ class FGDFeatureLoss(nn.Layer):\nMask_bg = paddle.ones_like(tea_spatial_att)\none_tmp = paddle.ones([*tea_spatial_att.shape[1:]])\nzero_tmp = paddle.zeros([*tea_spatial_att.shape[1:]])\n- mask_fg.stop_gradient = True\n+ Mask_fg.stop_gradient = True\nMask_bg.stop_gradient = True\none_tmp.stop_gradient = True\nzero_tmp.stop_gradient = True\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix distill bug (#6733)
499,339
24.08.2022 12:15:33
-28,800
8c790b977e241ad595dc2864a2d12ae10d7efbd3
[docs] update ppyoloe_plus docs, test=document_fix
[ { "change_type": "DELETE", "old_path": "docs/images/ppyoloe_map_fps.png", "new_path": "docs/images/ppyoloe_map_fps.png", "diff": "Binary files a/docs/images/ppyoloe_map_fps.png and /dev/null differ\n" }, { "change_type": "ADD", "old_path": "docs/images/ppyoloe_plus_map_fps.png", "new_path": "docs/images/ppyoloe_plus_map_fps.png", "diff": "Binary files /dev/null and b/docs/images/ppyoloe_plus_map_fps.png differ\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[docs] update ppyoloe_plus docs, test=document_fix (#6729)
499,298
29.08.2022 14:05:46
-28,800
73ef70f030955d2bfb88a0a3844668228643eba2
[cherry-pick] fix illegal parking doc
[ { "change_type": "MODIFY", "old_path": "deploy/pipeline/pipeline.py", "new_path": "deploy/pipeline/pipeline.py", "diff": "@@ -626,10 +626,20 @@ class PipePredictor(object):\nmot_result = (frame_id + 1, boxes[0], scores[0],\nids[0]) # single class\nstatistic = flow_statistic(\n- mot_result, self.secs_interval, self.do_entrance_counting,\n- self.do_break_in_counting, self.region_type, video_fps,\n- entrance, id_set, interval_id_set, in_id_list, out_id_list,\n- prev_center, records)\n+ mot_result,\n+ self.secs_interval,\n+ self.do_entrance_counting,\n+ self.do_break_in_counting,\n+ self.region_type,\n+ video_fps,\n+ entrance,\n+ id_set,\n+ interval_id_set,\n+ in_id_list,\n+ out_id_list,\n+ prev_center,\n+ records,\n+ ids2names=self.mot_predictor.pred_config.labels)\nrecords = statistic['records']\nif self.illegal_parking_time != -1:\n" }, { "change_type": "MODIFY", "old_path": "deploy/pptracking/python/mot/utils.py", "new_path": "deploy/pptracking/python/mot/utils.py", "diff": "@@ -224,7 +224,7 @@ def flow_statistic(result,\nprev_center,\nrecords,\ndata_type='mot',\n- num_classes=1):\n+ ids2names=['pedestrian']):\n# Count in/out number:\n# Note that 'region_type' should be one of ['horizontal', 'vertical', 'custom'],\n# 'horizontal' and 'vertical' means entrance is the center line as the entrance when do_entrance_counting,\n@@ -282,25 +282,27 @@ def flow_statistic(result,\nframe_id -= 1\nx1, y1, w, h = tlwh\ncenter_x = min(x1 + w / 2., im_w - 1)\n- center_down_y = min(y1 + h, im_h - 1)\n+ if ids2names[0] == 'pedestrian':\n+ center_y = min(y1 + h, im_h - 1)\n+ else:\n+ center_y = min(y1 + h / 2, im_h - 1)\n# counting objects in region of the first frame\nif frame_id == 1:\n- if in_quadrangle([center_x, center_down_y], entrance, im_h,\n- im_w):\n+ if in_quadrangle([center_x, center_y], entrance, im_h, im_w):\nin_id_list.append(-1)\nelse:\n- prev_center[track_id] = [center_x, center_down_y]\n+ prev_center[track_id] = [center_x, center_y]\nelse:\nif track_id in prev_center:\nif not in_quadrangle(prev_center[track_id], entrance, im_h,\nim_w) and in_quadrangle(\n- [center_x, center_down_y],\n- entrance, im_h, im_w):\n+ [center_x, center_y], entrance,\n+ im_h, im_w):\nin_id_list.append(track_id)\n- prev_center[track_id] = [center_x, center_down_y]\n+ prev_center[track_id] = [center_x, center_y]\nelse:\n- prev_center[track_id] = [center_x, center_down_y]\n+ prev_center[track_id] = [center_x, center_y]\n# Count totol number, number at a manual-setting interval\nframe_id, tlwhs, tscores, track_ids = result\n" }, { "change_type": "MODIFY", "old_path": "deploy/pptracking/python/mot_jde_infer.py", "new_path": "deploy/pptracking/python/mot_jde_infer.py", "diff": "@@ -393,10 +393,21 @@ class JDE_Detector(Detector):\nresult = (frame_id + 1, online_tlwhs[0], online_scores[0],\nonline_ids[0])\nstatistic = flow_statistic(\n- result, self.secs_interval, self.do_entrance_counting,\n- self.do_break_in_counting, self.region_type, video_fps,\n- entrance, id_set, interval_id_set, in_id_list, out_id_list,\n- prev_center, records, data_type, num_classes)\n+ result,\n+ self.secs_interval,\n+ self.do_entrance_counting,\n+ self.do_break_in_counting,\n+ self.region_type,\n+ video_fps,\n+ entrance,\n+ id_set,\n+ interval_id_set,\n+ in_id_list,\n+ out_id_list,\n+ prev_center,\n+ records,\n+ data_type,\n+ ids2names=self.pred_config.labels)\nrecords = statistic['records']\nfps = 1. / timer.duration\n" }, { "change_type": "MODIFY", "old_path": "deploy/pptracking/python/mot_sde_infer.py", "new_path": "deploy/pptracking/python/mot_sde_infer.py", "diff": "@@ -634,10 +634,21 @@ class SDE_Detector(Detector):\nresult = (frame_id + 1, online_tlwhs[0], online_scores[0],\nonline_ids[0])\nstatistic = flow_statistic(\n- result, self.secs_interval, self.do_entrance_counting,\n- self.do_break_in_counting, self.region_type, video_fps,\n- entrance, id_set, interval_id_set, in_id_list, out_id_list,\n- prev_center, records, data_type, num_classes)\n+ result,\n+ self.secs_interval,\n+ self.do_entrance_counting,\n+ self.do_break_in_counting,\n+ self.region_type,\n+ video_fps,\n+ entrance,\n+ id_set,\n+ interval_id_set,\n+ in_id_list,\n+ out_id_list,\n+ prev_center,\n+ records,\n+ data_type,\n+ ids2names=self.pred_config.labels)\nrecords = statistic['records']\nfps = 1. / timer.duration\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[cherry-pick] fix illegal parking doc (#6764)
499,339
29.08.2022 14:18:33
-28,800
f52c63bf9f4c5dcbc9352e3f1bcf527ffd1c61ef
[cherry-pick] fix params save_images
[ { "change_type": "MODIFY", "old_path": "deploy/python/utils.py", "new_path": "deploy/python/utils.py", "diff": "@@ -108,8 +108,8 @@ def argsparser():\n\"calibration, trt_calib_mode need to set True.\")\nparser.add_argument(\n'--save_images',\n- action='store_true',\n- default=False,\n+ type=bool,\n+ default=True,\nhelp='Save visualization image results.')\nparser.add_argument(\n'--save_mot_txts',\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[cherry-pick] fix params save_images (#6779)
499,299
29.08.2022 15:36:48
-28,800
2382374e7d2161aa95a33c28be4ad1c75b17ae58
fix bug in pipeline doc and update kpt training config
[ { "change_type": "RENAME", "old_path": "configs/pphuman/hrnet_w32_256x192.yml", "new_path": "configs/pphuman/dark_hrnet_w32_256x192.yml", "diff": "@@ -101,7 +101,7 @@ TrainReader:\nflip_pairs: *flip_perm\n- TopDownAffine:\ntrainsize: *trainsize\n- - ToHeatmapsTopDown:\n+ - ToHeatmapsTopDown_DARK:\nhmsize: *hmsize\nsigma: 2\nbatch_transforms:\n@@ -125,6 +125,7 @@ EvalReader:\nis_scale: true\n- Permute: {}\nbatch_size: 16\n+ drop_empty: false\nTestReader:\ninputs_def:\n@@ -139,4 +140,3 @@ TestReader:\nis_scale: true\n- Permute: {}\nbatch_size: 1\n- fuse_normalize: false #whether to fuse nomalize layer into model while export model\n" }, { "change_type": "MODIFY", "old_path": "deploy/pipeline/docs/tutorials/ppvehicle_attribute_en.md", "new_path": "deploy/pipeline/docs/tutorials/ppvehicle_attribute_en.md", "diff": "@@ -6,12 +6,12 @@ Vehicle attribute recognition is widely used in smart cities, smart transportati\n| Task | Algorithm | Precision | Inference Speed | Download |\n|-----------|------|-----------|----------|---------------------|\n-| Vehicle Detection/Tracking | PP-YOLOE | - | - | [Inference and Deployment Model](https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_ppvehicle.zip) |\n-| Vehicle Attribute Recognition | PPLCNet | 90.81 | 2.36 ms | [Inference and Deployment Model](https://bj.bcebos.com/v1/paddledet/models/pipeline/vehicle_attribute_model.zip) |\n+| Vehicle Detection/Tracking | PP-YOLOE | mAP 63.9 | 38.67ms | [Inference and Deployment Model](https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_ppvehicle.zip) |\n+| Vehicle Attribute Recognition | PPLCNet | 90.81 | 7.31 ms | [Inference and Deployment Model](https://bj.bcebos.com/v1/paddledet/models/pipeline/vehicle_attribute_model.zip) |\nNote:\n-1. The inference speed of the attribute model is obtained from the test on Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz, with the MKLDNN acceleration strategy enabled, and 10 threads.\n+1. The inference speed of the attribute model is obtained from the test on NVIDIA T4, with TensorRT FP16. The time includes data pre-process, model inference and post-process.\n2. For introductions, please refer to [PP-LCNet Series](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/en/models/PP-LCNet_en.md). Related paper is available on PP-LCNet paper\n3. The training and test phase of vehicle attribute recognition model are both obtained from [VeRi dataset](https://www.v7labs.com/open-datasets/veri-dataset).\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix bug in pipeline doc and update kpt training config (#6769) (#6783)
499,374
29.08.2022 18:01:57
-28,800
b10ef7d9290562c90962032ae9df55849bbb22b2
[cherry-pick] fix recursive call of DLA
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/necks/centernet_fpn.py", "new_path": "ppdet/modeling/necks/centernet_fpn.py", "diff": "@@ -164,11 +164,11 @@ class IDAUp(nn.Layer):\nfor i in range(start_level + 1, end_level):\nupsample = getattr(self, 'up_' + str(i - start_level))\nproject = getattr(self, 'proj_' + str(i - start_level))\n-\ninputs[i] = project(inputs[i])\ninputs[i] = upsample(inputs[i])\nnode = getattr(self, 'node_' + str(i - start_level))\ninputs[i] = node(paddle.add(inputs[i], inputs[i - 1]))\n+ return inputs\nclass DLAUp(nn.Layer):\n@@ -197,8 +197,8 @@ class DLAUp(nn.Layer):\nout = [inputs[-1]] # start with 32\nfor i in range(len(inputs) - self.start_level - 1):\nida = getattr(self, 'ida_{}'.format(i))\n- ida(inputs, len(inputs) - i - 2, len(inputs))\n- out.insert(0, inputs[-1])\n+ outputs = ida(inputs, len(inputs) - i - 2, len(inputs))\n+ out.insert(0, outputs[-1])\nreturn out\n@@ -259,7 +259,9 @@ class CenterNetDLAFPN(nn.Layer):\ndef forward(self, body_feats):\n- dla_up_feats = self.dla_up(body_feats)\n+ inputs = [body_feats[i] for i in range(len(body_feats))]\n+\n+ dla_up_feats = self.dla_up(inputs)\nida_up_feats = []\nfor i in range(self.last_level - self.first_level):\n@@ -271,7 +273,11 @@ class CenterNetDLAFPN(nn.Layer):\nif self.with_sge:\nfeat = self.sge_attention(feat)\nif self.down_ratio != 4:\n- feat = F.interpolate(feat, scale_factor=self.down_ratio // 4, mode=\"bilinear\", align_corners=True)\n+ feat = F.interpolate(\n+ feat,\n+ scale_factor=self.down_ratio // 4,\n+ mode=\"bilinear\",\n+ align_corners=True)\nreturn feat\n@property\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[cherry-pick] fix recursive call of DLA (#6771) (#6786)
499,339
30.08.2022 13:46:51
-28,800
d02bcd932c2f76bc1dd52e7c7c593743a7f0e8b7
[cherry-pick] fix ppyoloe amp bug, add reduce_mean to custom_black_list
[ { "change_type": "MODIFY", "old_path": "configs/ppyoloe/README.md", "new_path": "configs/ppyoloe/README.md", "diff": "@@ -78,12 +78,12 @@ The PaddleDetection team provides configs and weights of various feature detecti\nTraining PP-YOLOE+ on 8 GPUs with following command\n```bash\n-python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/ppyoloe/ppyoloe_plus_crn_l_80e_coco.yml\n+python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/ppyoloe/ppyoloe_plus_crn_l_80e_coco.yml --eval --amp\n```\n**Notes:**\n-- use `--amp` to train with default config to avoid out of memeory.\n- If you need to evaluate while training, please add `--eval`.\n+- PP-YOLOE+ supports mixed precision training, please add `--amp`.\n- PaddleDetection supports multi-machine distribued training, you can refer to [DistributedTraining tutorial](../../docs/DistributedTraining_en.md).\n" }, { "change_type": "MODIFY", "old_path": "configs/ppyoloe/_base_/ppyoloe_crn.yml", "new_path": "configs/ppyoloe/_base_/ppyoloe_crn.yml", "diff": "@@ -2,6 +2,7 @@ architecture: YOLOv3\nnorm_type: sync_bn\nuse_ema: true\nema_decay: 0.9998\n+custom_black_list: ['reduce_mean']\nYOLOv3:\nbackbone: CSPResNet\n" }, { "change_type": "MODIFY", "old_path": "configs/ppyoloe/_base_/ppyoloe_plus_crn.yml", "new_path": "configs/ppyoloe/_base_/ppyoloe_plus_crn.yml", "diff": "@@ -2,6 +2,7 @@ architecture: YOLOv3\nnorm_type: sync_bn\nuse_ema: true\nema_decay: 0.9998\n+custom_black_list: ['reduce_mean']\nYOLOv3:\nbackbone: CSPResNet\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[cherry-pick] fix ppyoloe amp bug, add reduce_mean to custom_black_list (#6797)
499,327
30.08.2022 16:39:04
-28,800
42c7468b5f0d59d8da5830d793a4cb07312000d0
[cherry-pick] support for edgeboard
[ { "change_type": "MODIFY", "old_path": "ppdet/engine/export_utils.py", "new_path": "ppdet/engine/export_utils.py", "diff": "@@ -131,12 +131,15 @@ def _dump_infer_config(config, path, image_shape, model):\n'use_dynamic_shape': use_dynamic_shape\n})\nexport_onnx = config.get('export_onnx', False)\n+ export_eb = config.get('export_eb', False)\ninfer_arch = config['architecture']\nif 'RCNN' in infer_arch and export_onnx:\nlogger.warning(\n\"Exporting RCNN model to ONNX only support batch_size = 1\")\ninfer_cfg['export_onnx'] = True\n+ infer_cfg['export_eb'] = export_eb\n+\nif infer_arch in MOT_ARCH:\nif infer_arch == 'DeepSORT':\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/post_process.py", "new_path": "ppdet/modeling/post_process.py", "diff": "@@ -34,16 +34,17 @@ __all__ = [\n@register\nclass BBoxPostProcess(object):\n- __shared__ = ['num_classes', 'export_onnx']\n+ __shared__ = ['num_classes', 'export_onnx', 'export_eb']\n__inject__ = ['decode', 'nms']\ndef __init__(self, num_classes=80, decode=None, nms=None,\n- export_onnx=False):\n+ export_onnx=False, export_eb=False):\nsuper(BBoxPostProcess, self).__init__()\nself.num_classes = num_classes\nself.decode = decode\nself.nms = nms\nself.export_onnx = export_onnx\n+ self.export_eb = export_eb\ndef __call__(self, head_out, rois, im_shape, scale_factor):\n\"\"\"\n@@ -100,6 +101,10 @@ class BBoxPostProcess(object):\npred_result (Tensor): The final prediction results with shape [N, 6]\nincluding labels, scores and bboxes.\n\"\"\"\n+ if self.export_eb:\n+ # enable rcnn models for edgeboard hw to skip the following postprocess.\n+ return bboxes, bboxes, bbox_num\n+\nif not self.export_onnx:\nbboxes_list = []\nbbox_num_list = []\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[cherry-pick] support for edgeboard #6719 (#6798)
499,348
30.08.2022 17:15:05
-28,800
ede22043927a944bb4cbea0e9455dd9c91b295f0
update demo&fix mtmct vis; fix=document_fix
[ { "change_type": "MODIFY", "old_path": "demo/car.jpg", "new_path": "demo/car.jpg", "diff": "Binary files a/demo/car.jpg and b/demo/car.jpg differ\n" }, { "change_type": "MODIFY", "old_path": "deploy/pipeline/config/examples/infer_cfg_illegal_parking.yml", "new_path": "deploy/pipeline/config/examples/infer_cfg_illegal_parking.yml", "diff": "@@ -11,7 +11,7 @@ MOT:\nVEHICLE_PLATE:\ndet_model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/ch_PP-OCRv3_det_infer.tar.gz\ndet_limit_side_len: 736\n- det_limit_type: \"max\"\n+ det_limit_type: \"min\"\nrec_model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/ch_PP-OCRv3_rec_infer.tar.gz\nrec_image_shape: [3, 48, 320]\nrec_batch_num: 6\n" }, { "change_type": "MODIFY", "old_path": "deploy/pipeline/config/examples/infer_cfg_vehicle_plate.yml", "new_path": "deploy/pipeline/config/examples/infer_cfg_vehicle_plate.yml", "diff": "@@ -15,7 +15,7 @@ MOT:\nVEHICLE_PLATE:\ndet_model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/ch_PP-OCRv3_det_infer.tar.gz\ndet_limit_side_len: 736\n- det_limit_type: \"max\"\n+ det_limit_type: \"min\"\nrec_model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/ch_PP-OCRv3_rec_infer.tar.gz\nrec_image_shape: [3, 48, 320]\nrec_batch_num: 6\n" }, { "change_type": "MODIFY", "old_path": "deploy/pipeline/config/infer_cfg_ppvehicle.yml", "new_path": "deploy/pipeline/config/infer_cfg_ppvehicle.yml", "diff": "@@ -16,7 +16,7 @@ MOT:\nVEHICLE_PLATE:\ndet_model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/ch_PP-OCRv3_det_infer.tar.gz\ndet_limit_side_len: 736\n- det_limit_type: \"max\"\n+ det_limit_type: \"min\"\nrec_model_dir: https://bj.bcebos.com/v1/paddledet/models/pipeline/ch_PP-OCRv3_rec_infer.tar.gz\nrec_image_shape: [3, 48, 320]\nrec_batch_num: 6\n" }, { "change_type": "MODIFY", "old_path": "deploy/pipeline/pphuman/mtmct.py", "new_path": "deploy/pipeline/pphuman/mtmct.py", "diff": "@@ -148,10 +148,7 @@ def save_mtmct_vis_results(camera_results, captures, output_dir,\n# add attr vis\nif multi_res:\n- tid_list = [\n- 'c' + str(idx) + '_' + 't' + str(int(j))\n- for j in range(1, len(ids) + 1)\n- ] # c0_t1, c0_t2...\n+ tid_list = multi_res.keys() # c0_t1, c0_t2...\nall_attr_result = [multi_res[i][\"attrs\"]\nfor i in tid_list] # all cid_tid result\nif any(\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
update demo&fix mtmct vis; fix=document_fix (#6806)
499,363
30.08.2022 18:26:49
-28,800
1b1f5909d013ffabb008eafa636a4d6cb689207d
optimize doc for illegal parking
[ { "change_type": "MODIFY", "old_path": "deploy/pptracking/python/mot/visualize.py", "new_path": "deploy/pptracking/python/mot/visualize.py", "diff": "@@ -267,6 +267,8 @@ def plot_tracking_dict(image,\nfor key, value in illegal_parking_dict.items():\nx1, y1, w, h = value['bbox']\nplate = value['plate']\n+ if plate is None:\n+ plate = \"\"\n# red box\ncv2.rectangle(im, (int(x1), int(y1)),\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
optimize doc for illegal parking (#6811)
499,333
31.08.2022 13:51:56
-28,800
747c02a7a67a35ffba8a252c82951da11fffeae1
fix deadlink, test=document_fix
[ { "change_type": "MODIFY", "old_path": "README_en.md", "new_path": "README_en.md", "diff": "@@ -407,7 +407,6 @@ Please refer to [docs](deploy/pipeline/README_en.md) for details.\n- [Quick start](docs/tutorials/QUICK_STARTED_cn.md)\n- [Data preparation](docs/tutorials/data/README.md)\n- [Geting Started on PaddleDetection](docs/tutorials/GETTING_STARTED_cn.md)\n-- [Customize data training]((docs/tutorials/CustomizeDataTraining.md)\n- [FAQ]((docs/tutorials/FAQ)\n### Advanced tutorials\n@@ -446,7 +445,7 @@ Please refer to [docs](deploy/pipeline/README_en.md) for details.\n- [Object detection](docs/advanced_tutorials/customization/detection.md)\n- [Keypoint detection](docs/advanced_tutorials/customization/keypoint_detection.md)\n- [Multiple object tracking](docs/advanced_tutorials/customization/pphuman_mot.md)\n- - [Action recognition](docs/advanced_tutorials/customization/pphuman_action.md)\n+ - [Action recognition](docs/advanced_tutorials/customization/action_recognotion/)\n- [Attribute recognition](docs/advanced_tutorials/customization/pphuman_attribute.md)\n### Courses\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix deadlink, test=document_fix (#6816)
499,333
31.08.2022 17:46:16
-28,800
32be7960c23d10bf797173059f0002de6d30ae44
fix unittest & whl
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/tests/test_mstest.py", "new_path": "ppdet/modeling/tests/test_mstest.py", "diff": "@@ -21,6 +21,7 @@ import unittest\nfrom ppdet.core.workspace import load_config\nfrom ppdet.engine import Trainer\n+\nclass TestMultiScaleInference(unittest.TestCase):\ndef setUp(self):\nself.set_config()\n@@ -48,12 +49,13 @@ class TestMultiScaleInference(unittest.TestCase):\ntests_img_root = os.path.join(os.path.dirname(__file__), 'imgs')\n# input images to predict\n- imgs = ['coco2017_val2017_000000000139.jpg', 'coco2017_val2017_000000000724.jpg']\n+ imgs = [\n+ 'coco2017_val2017_000000000139.jpg',\n+ 'coco2017_val2017_000000000724.jpg'\n+ ]\nimgs = [os.path.join(tests_img_root, img) for img in imgs]\n- trainer.predict(imgs,\n- draw_threshold=0.5,\n- output_dir='output',\n- save_txt=True)\n+ trainer.predict(\n+ imgs, draw_threshold=0.5, output_dir='output', save_results=False)\nif __name__ == '__main__':\n" }, { "change_type": "MODIFY", "old_path": "scripts/build_wheel.sh", "new_path": "scripts/build_wheel.sh", "diff": "@@ -26,6 +26,7 @@ EGG_DIR=\"paddledet.egg-info\"\nCFG_DIR=\"configs\"\nTEST_DIR=\".tests\"\n+DATA_DIR=\"dataset\"\n# command line log config\nRED='\\033[0;31m'\n@@ -86,6 +87,7 @@ function unittest() {\n# make sure installed paddledet is used\nmkdir $TEST_DIR\ncp -r $CFG_DIR $TEST_DIR\n+ cp -r $DATA_DIR $TEST_DIR\ncd $TEST_DIR\nif [ $? != 0 ]; then\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix unittest & whl (#6820)
499,304
01.09.2022 17:04:44
-28,800
da0177b404ca9f864651c3476a8aeb232d82df94
fix avh demo
[ { "change_type": "ADD", "old_path": null, "new_path": "deploy/third_engine/demo_avh/.gitignore", "diff": "+include/inputs.h\n+include/outputs.h\n+\n+__pycache__/\n+build/\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "deploy/third_engine/demo_avh/Makefile", "new_path": "deploy/third_engine/demo_avh/Makefile", "diff": "@@ -81,13 +81,27 @@ ${BUILD_DIR}/libcmsis_startup.a: $(CMSIS_STARTUP_SRCS)\n$(QUIET)$(AR) -cr $(abspath $(BUILD_DIR)/libcmsis_startup.a) $(abspath $(BUILD_DIR))/libcmsis_startup/*.o\n$(QUIET)$(RANLIB) $(abspath $(BUILD_DIR)/libcmsis_startup.a)\n+CMSIS_SHA_FILE=${CMSIS_PATH}/977abe9849781a2e788b02282986480ff4e25ea6.sha\n+ifneq (\"$(wildcard $(CMSIS_SHA_FILE))\",\"\")\n+${BUILD_DIR}/cmsis_nn/Source/libcmsis-nn.a:\n+ $(QUIET)mkdir -p $(@D)\n+ $(QUIET)cd $(CMSIS_PATH)/CMSIS/NN && $(CMAKE) -B $(abspath $(BUILD_DIR)/cmsis_nn) $(CMSIS_NN_CMAKE_FLAGS)\n+ $(QUIET)cd $(abspath $(BUILD_DIR)/cmsis_nn) && $(MAKE) all\n+else\n# Build CMSIS-NN\n${BUILD_DIR}/cmsis_nn/Source/SoftmaxFunctions/libCMSISNNSoftmax.a:\n$(QUIET)mkdir -p $(@D)\n$(QUIET)cd $(CMSIS_PATH)/CMSIS/NN && $(CMAKE) -B $(abspath $(BUILD_DIR)/cmsis_nn) $(CMSIS_NN_CMAKE_FLAGS)\n$(QUIET)cd $(abspath $(BUILD_DIR)/cmsis_nn) && $(MAKE) all\n+endif\n# Build demo application\n+ifneq (\"$(wildcard $(CMSIS_SHA_FILE))\",\"\")\n+$(BUILD_DIR)/demo: $(DEMO_MAIN) $(UART_SRCS) $(BUILD_DIR)/stack_allocator.o $(BUILD_DIR)/crt_backend_api.o \\\n+ ${BUILD_DIR}/libcodegen.a ${BUILD_DIR}/libcmsis_startup.a ${BUILD_DIR}/cmsis_nn/Source/libcmsis-nn.a\n+ $(QUIET)mkdir -p $(@D)\n+ $(QUIET)$(CC) $(PKG_CFLAGS) $(FREERTOS_FLAGS) -o $@ -Wl,--whole-archive $^ -Wl,--no-whole-archive $(PKG_LDFLAGS)\n+else\n$(BUILD_DIR)/demo: $(DEMO_MAIN) $(UART_SRCS) $(BUILD_DIR)/stack_allocator.o $(BUILD_DIR)/crt_backend_api.o \\\n${BUILD_DIR}/libcodegen.a ${BUILD_DIR}/libcmsis_startup.a \\\n${BUILD_DIR}/cmsis_nn/Source/SoftmaxFunctions/libCMSISNNSoftmax.a \\\n@@ -102,6 +116,7 @@ $(BUILD_DIR)/demo: $(DEMO_MAIN) $(UART_SRCS) $(BUILD_DIR)/stack_allocator.o $(BU\n${BUILD_DIR}/cmsis_nn/Source/PoolingFunctions/libCMSISNNPooling.a\n$(QUIET)mkdir -p $(@D)\n$(QUIET)$(CC) $(PKG_CFLAGS) $(FREERTOS_FLAGS) -o $@ -Wl,--whole-archive $^ -Wl,--no-whole-archive $(PKG_LDFLAGS)\n+endif\nclean:\n$(QUIET)rm -rf $(BUILD_DIR)/codegen\n" }, { "change_type": "MODIFY", "old_path": "deploy/third_engine/demo_avh/README.md", "new_path": "deploy/third_engine/demo_avh/README.md", "diff": "<!--- to you under the Apache License, Version 2.0 (the -->\n<!--- \"License\"); you may not use this file except in compliance -->\n<!--- with the License. You may obtain a copy of the License at -->\n+\n<!--- http://www.apache.org/licenses/LICENSE-2.0 -->\n+\n<!--- Unless required by applicable law or agreed to in writing, -->\n<!--- software distributed under the License is distributed on an -->\n<!--- \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -->\n<!--- KIND, either express or implied. See the License for the -->\n<!--- specific language governing permissions and limitations -->\n<!--- under the License. -->\n-Running PP-PicoDet via TVM on bare metal Arm(R) Cortex(R)-M55 CPU and CMSIS-NN\n-===============================================================\n-This folder contains an example of how to use TVM to run a PP-PicoDet model\n-on bare metal Cortex(R)-M55 CPU and CMSIS-NN.\n+Running PP-PicoDet object detection model on bare metal Arm(R) Cortex(R)-M55 CPU using Arm Virtual Hardware\n+======================================================================\n+\n+This folder contains an example of how to run a PP-PicoDet model on bare metal [Cortex(R)-M55 CPU](https://www.arm.com/products/silicon-ip-cpu/cortex-m/cortex-m55) using [Arm Virtual Hardware](https://www.arm.com/products/development-tools/simulation/virtual-hardware).\n+\n-Prerequisites\n+Running environment and prerequisites\n-------------\n-If the demo is run in the ci_cpu Docker container provided with TVM, then the following\n-software will already be installed.\n+Case 1: If the demo is run in Arm Virtual Hardware Amazon Machine Image(AMI) instance hosted by [AWS](https://aws.amazon.com/marketplace/pp/prodview-urbpq7yo5va7g?sr=0-1&ref_=beagle&applicationId=AWSMPContessa)/[AWS China](https://awsmarketplace.amazonaws.cn/marketplace/pp/prodview-2y7nefntbmybu), the following software will be installed through [configure_avh.sh](./configure_avh.sh) script. It will install automatically when you run the application through [run_demo.sh](./run_demo.sh) script.\n+You can refer to this [guide](https://arm-software.github.io/AVH/main/examples/html/MicroSpeech.html#amilaunch) to launch an Arm Virtual Hardware AMI instance.\n-If the demo is not run in the ci_cpu Docker container, then you will need the following:\n+Case 2: If the demo is run in the [ci_cpu Docker container](https://github.com/apache/tvm/blob/main/docker/Dockerfile.ci_cpu) provided with [TVM](https://github.com/apache/tvm), then the following software will already be installed.\n+\n+Case 3: If the demo is not run in the ci_cpu Docker container, then you will need the following:\n- Software required to build and run the demo (These can all be installed by running\ntvm/docker/install/ubuntu_install_ethosu_driver_stack.sh.)\n- [Fixed Virtual Platform (FVP) based on Arm(R) Corstone(TM)-300 software](https://release/2.5er.arm.com/tools-and-software/open-source-software/arm-platforms-software/arm-ecosystem-fvps)\n@@ -37,26 +42,37 @@ If the demo is not run in the ci_cpu Docker container, then you will need the fo\npip install -r ./requirements.txt\n```\n+In case2 and case3:\n+\n+You will need to update your PATH environment variable to include the path to cmake 3.19.5 and the FVP.\n+For example if you've installed these in ```/opt/arm``` , then you would do the following:\n+```bash\n+export PATH=/opt/arm/FVP_Corstone_SSE-300/models/Linux64_GCC-6.4:/opt/arm/cmake/bin:$PATH\n+```\n+\nYou will also need TVM which can either be:\n+ - Installed from TLCPack(see [TLCPack](https://tlcpack.ai/))\n- Built from source (see [Install from Source](https://tvm.apache.org/docs/install/from_source.html))\n- When building from source, the following need to be set in config.cmake:\n- set(USE_CMSISNN ON)\n- set(USE_MICRO ON)\n- set(USE_LLVM ON)\n- - Installed from TLCPack(see [TLCPack](https://tlcpack.ai/))\n-You will need to update your PATH environment variable to include the path to cmake 3.19.5 and the FVP.\n-For example if you've installed these in ```/opt/arm``` , then you would do the following:\n-```bash\n-export PATH=/opt/arm/FVP_Corstone_SSE-300/models/Linux64_GCC-6.4:/opt/arm/cmake/bin:$PATH\n-```\nRunning the demo application\n----------------------------\nType the following command to run the bare metal text recognition application ([src/demo_bare_metal.c](./src/demo_bare_metal.c)):\n+\n```bash\n./run_demo.sh\n```\n+\n+If you are not able to use Arm Virtual Hardware Amazon Machine Image(AMI) instance hosted by AWS/AWS China, specify argument --enable_FVP to 1 to make the application run on local Fixed Virtual Platforms (FVPs) executables.\n+\n+```bash\n+./run_demo.sh --enable_FVP 1\n+```\n+\nIf the Ethos(TM)-U platform and/or CMSIS have not been installed in /opt/arm/ethosu then\nthe locations for these can be specified as arguments to run_demo.sh, for example:\n@@ -65,13 +81,14 @@ the locations for these can be specified as arguments to run_demo.sh, for exampl\n--ethosu_platform_path /home/tvm-user/ethosu/core_platform\n```\n-This will:\n-- Download a PP-PicoDet text recognition model\n+With [run_demo.sh](./run_demo.sh) to run the demo application, it will:\n+- Set up running environment by installing the required prerequisites automatically if running in Arm Virtual Hardware Amazon AMI instance(not specify --enable_FVP to 1)\n+- Download a PP-PicoDet model\n- Use tvmc to compile the text recognition model for Cortex(R)-M55 CPU and CMSIS-NN\n- Create a C header file inputs.c containing the image data as a C array\n- Create a C header file outputs.c containing a C array where the output of inference will be stored\n- Build the demo application\n-- Run the demo application on a Fixed Virtual Platform (FVP) based on Arm(R) Corstone(TM)-300 software\n+- Run the demo application on a Arm Virtual Hardware based on Arm(R) Corstone(TM)-300 software\n- The application will report the text on the image and the corresponding score.\nUsing your own image\n@@ -82,9 +99,9 @@ image to be converted into an array of bytes for consumption by the model.\nThe demo can be modified to use an image of your choice by changing the following line in run_demo.sh\n```bash\n-python3 ./convert_image.py ../../demo/000000014439_640x640.jpg\n+python3 ./convert_image.py path/to/image\n```\nModel description\n-----------------\n-In this demo, the model we used is based on [PP-PicoDet](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.4/configs/picodet). Because of the excellent performance, PP-PicoDet are very suitable for deployment on mobile or CPU. And it is released by [PaddleDetection](https://github.com/PaddlePaddle/PaddleDetection).\n+In this demo, the model we used is based on [PP-PicoDet](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.5/configs/picodet). Because of the excellent performance, PP-PicoDet are very suitable for deployment on mobile or CPU. And it is released by [PaddleDetection](https://github.com/PaddlePaddle/PaddleDetection).\n" }, { "change_type": "ADD", "old_path": null, "new_path": "deploy/third_engine/demo_avh/README.md.bak", "diff": "+<!--- Licensed to the Apache Software Foundation (ASF) under one -->\n+<!--- or more contributor license agreements. See the NOTICE file -->\n+<!--- distributed with this work for additional information -->\n+<!--- regarding copyright ownership. The ASF licenses this file -->\n+<!--- to you under the Apache License, Version 2.0 (the -->\n+<!--- \"License\"); you may not use this file except in compliance -->\n+<!--- with the License. You may obtain a copy of the License at -->\n+<!--- http://www.apache.org/licenses/LICENSE-2.0 -->\n+<!--- Unless required by applicable law or agreed to in writing, -->\n+<!--- software distributed under the License is distributed on an -->\n+<!--- \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -->\n+<!--- KIND, either express or implied. See the License for the -->\n+<!--- specific language governing permissions and limitations -->\n+<!--- under the License. -->\n+Running PP-PicoDet via TVM on bare metal Arm(R) Cortex(R)-M55 CPU and CMSIS-NN\n+===============================================================\n+\n+This folder contains an example of how to use TVM to run a PP-PicoDet model\n+on bare metal Cortex(R)-M55 CPU and CMSIS-NN.\n+\n+Prerequisites\n+-------------\n+If the demo is run in the ci_cpu Docker container provided with TVM, then the following\n+software will already be installed.\n+\n+If the demo is not run in the ci_cpu Docker container, then you will need the following:\n+- Software required to build and run the demo (These can all be installed by running\n+ tvm/docker/install/ubuntu_install_ethosu_driver_stack.sh.)\n+ - [Fixed Virtual Platform (FVP) based on Arm(R) Corstone(TM)-300 software](https://developer.arm.com/tools-and-software/open-source-software/arm-platforms-software/arm-ecosystem-fvps)\n+ - [cmake 3.19.5](https://github.com/Kitware/CMake/releases/)\n+ - [GCC toolchain from Arm(R)](https://developer.arm.com/-/media/Files/downloads/gnu-rm/10-2020q4/gcc-arm-none-eabi-10-2020-q4-major-x86_64-linux.tar.bz2)\n+ - [Arm(R) Ethos(TM)-U NPU driver stack](https://review.mlplatform.org)\n+ - [CMSIS](https://github.com/ARM-software/CMSIS_5)\n+- The python libraries listed in the requirements.txt of this directory\n+ - These can be installed by running the following from the current directory:\n+ ```bash\n+ pip install -r ./requirements.txt\n+ ```\n+\n+You will also need TVM which can either be:\n+ - Built from source (see [Install from Source](https://tvm.apache.org/docs/install/from_source.html))\n+ - When building from source, the following need to be set in config.cmake:\n+ - set(USE_CMSISNN ON)\n+ - set(USE_MICRO ON)\n+ - set(USE_LLVM ON)\n+ - Installed from TLCPack(see [TLCPack](https://tlcpack.ai/))\n+\n+You will need to update your PATH environment variable to include the path to cmake 3.19.5 and the FVP.\n+For example if you've installed these in ```/opt/arm``` , then you would do the following:\n+```bash\n+export PATH=/opt/arm/FVP_Corstone_SSE-300/models/Linux64_GCC-6.4:/opt/arm/cmake/bin:$PATH\n+```\n+\n+Running the demo application\n+----------------------------\n+Type the following command to run the bare metal text recognition application ([src/demo_bare_metal.c](./src/demo_bare_metal.c)):\n+```bash\n+./run_demo.sh\n+```\n+If the Ethos(TM)-U platform and/or CMSIS have not been installed in /opt/arm/ethosu then\n+the locations for these can be specified as arguments to run_demo.sh, for example:\n+\n+```bash\n+./run_demo.sh --cmsis_path /home/tvm-user/cmsis \\\n+--ethosu_platform_path /home/tvm-user/ethosu/core_platform\n+```\n+\n+This will:\n+- Download a PP-PicoDet text recognition model\n+- Use tvmc to compile the text recognition model for Cortex(R)-M55 CPU and CMSIS-NN\n+- Create a C header file inputs.c containing the image data as a C array\n+- Create a C header file outputs.c containing a C array where the output of inference will be stored\n+- Build the demo application\n+- Run the demo application on a Fixed Virtual Platform (FVP) based on Arm(R) Corstone(TM)-300 software\n+- The application will report the text on the image and the corresponding score.\n+\n+Using your own image\n+--------------------\n+The create_image.py script takes a single argument on the command line which is the path of the\n+image to be converted into an array of bytes for consumption by the model.\n+\n+The demo can be modified to use an image of your choice by changing the following line in run_demo.sh\n+\n+```bash\n+python3 ./convert_image.py ../../demo/000000014439_640x640.jpg\n+```\n+\n+Model description\n+-----------------\n" }, { "change_type": "ADD", "old_path": null, "new_path": "deploy/third_engine/demo_avh/configure_avh.sh", "diff": "+#!/bin/bash\n+# Copyright (c) 2022 Arm Limited and Contributors. All rights reserved.\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+\n+set -e\n+set -u\n+set -o pipefail\n+\n+# Show usage\n+function show_usage() {\n+ cat <<EOF\n+Usage: Set up running environment by installing the required prerequisites.\n+-h, --help\n+ Display this help message.\n+EOF\n+}\n+\n+if [ \"$#\" -eq 1 ] && [ \"$1\" == \"--help\" -o \"$1\" == \"-h\" ]; then\n+ show_usage\n+ exit 0\n+elif [ \"$#\" -ge 1 ]; then\n+ show_usage\n+ exit 1\n+fi\n+\n+echo -e \"\\e[36mStart setting up running environment\\e[0m\"\n+\n+# Install CMSIS\n+echo -e \"\\e[36mStart installing CMSIS\\e[0m\"\n+CMSIS_PATH=\"/opt/arm/ethosu/cmsis\"\n+mkdir -p \"${CMSIS_PATH}\"\n+\n+CMSIS_SHA=\"977abe9849781a2e788b02282986480ff4e25ea6\"\n+CMSIS_SHASUM=\"86c88d9341439fbb78664f11f3f25bc9fda3cd7de89359324019a4d87d169939eea85b7fdbfa6ad03aa428c6b515ef2f8cd52299ce1959a5444d4ac305f934cc\"\n+CMSIS_URL=\"http://github.com/ARM-software/CMSIS_5/archive/${CMSIS_SHA}.tar.gz\"\n+DOWNLOAD_PATH=\"/tmp/${CMSIS_SHA}.tar.gz\"\n+\n+wget ${CMSIS_URL} -O \"${DOWNLOAD_PATH}\"\n+echo \"$CMSIS_SHASUM\" ${DOWNLOAD_PATH} | sha512sum -c\n+tar -xf \"${DOWNLOAD_PATH}\" -C \"${CMSIS_PATH}\" --strip-components=1\n+touch \"${CMSIS_PATH}\"/\"${CMSIS_SHA}\".sha\n+echo -e \"\\e[36mCMSIS Installation SUCCESS\\e[0m\"\n+\n+# Install Arm(R) Ethos(TM)-U NPU driver stack\n+echo -e \"\\e[36mStart installing Arm(R) Ethos(TM)-U NPU driver stack\\e[0m\"\n+git clone \"https://review.mlplatform.org/ml/ethos-u/ethos-u-core-platform\" /opt/arm/ethosu/core_platform\n+cd /opt/arm/ethosu/core_platform\n+git checkout tags/\"21.11\"\n+echo -e \"\\e[36mArm(R) Ethos(TM)-U Core Platform Installation SUCCESS\\e[0m\"\n+\n+# Install Arm(R) GNU Toolchain\n+echo -e \"\\e[36mStart installing Arm(R) GNU Toolchain\\e[0m\"\n+mkdir -p /opt/arm/gcc-arm-none-eabi\n+export gcc_arm_url='https://developer.arm.com/-/media/Files/downloads/gnu-rm/10-2020q4/gcc-arm-none-eabi-10-2020-q4-major-x86_64-linux.tar.bz2?revision=ca0cbf9c-9de2-491c-ac48-898b5bbc0443&la=en&hash=68760A8AE66026BCF99F05AC017A6A50C6FD832A'\n+curl --retry 64 -sSL ${gcc_arm_url} | tar -C /opt/arm/gcc-arm-none-eabi --strip-components=1 -jx\n+export PATH=/opt/arm/gcc-arm-none-eabi/bin:$PATH\n+arm-none-eabi-gcc --version\n+arm-none-eabi-g++ --version\n+echo -e \"\\e[36mArm(R) Arm(R) GNU Toolchain Installation SUCCESS\\e[0m\"\n+\n+# Install TVM from TLCPack\n+echo -e \"\\e[36mStart installing TVM\\e[0m\"\n+pip install tlcpack-nightly -f https://tlcpack.ai/wheels\n+echo -e \"\\e[36mTVM Installation SUCCESS\\e[0m\"\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "deploy/third_engine/demo_avh/convert_image.py", "new_path": "deploy/third_engine/demo_avh/convert_image.py", "diff": "@@ -24,10 +24,10 @@ import math\nfrom PIL import Image\nimport numpy as np\n+\ndef resize_norm_img(img, image_shape, padding=True):\nimgC, imgH, imgW = image_shape\n- img = cv2.resize(\n- img, (imgW, imgH), interpolation=cv2.INTER_LINEAR)\n+ img = cv2.resize(img, (imgW, imgH), interpolation=cv2.INTER_LINEAR)\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\nimg = np.transpose(img, [2, 0, 1]) / 255\nimg = np.expand_dims(img, 0)\n@@ -47,9 +47,8 @@ def create_header_file(name, tensor_name, tensor_data, output_path):\nraw_path = file_path.with_suffix(\".h\").resolve()\nwith open(raw_path, \"a\") as header_file:\nheader_file.write(\n- \"\\n\"\n- + f\"const size_t {tensor_name}_len = {tensor_data.size};\\n\"\n- + f'__attribute__((section(\".data.tvm\"), aligned(16))) float {tensor_name}[] = '\n+ \"\\n\" + f\"const size_t {tensor_name}_len = {tensor_data.size};\\n\" +\n+ f'__attribute__((section(\".data.tvm\"), aligned(16))) float {tensor_name}[] = '\n)\nheader_file.write(\"{\")\n@@ -72,7 +71,9 @@ def create_headers(image_name):\n# # Add the batch dimension, as we are expecting 4-dimensional input: NCHW.\nimg_data = np.expand_dims(img_data, axis=0)\n+ if os.path.exists(\"./include/inputs.h\"):\nos.remove(\"./include/inputs.h\")\n+ if os.path.exists(\"./include/outputs.h\"):\nos.remove(\"./include/outputs.h\")\n# Create input header file\ncreate_header_file(\"inputs\", \"input\", img_data, \"./include\")\n@@ -82,15 +83,13 @@ def create_headers(image_name):\n\"outputs\",\n\"output0\",\noutput_data,\n- \"./include\",\n- )\n+ \"./include\", )\noutput_data = np.zeros([170000], np.float)\ncreate_header_file(\n\"outputs\",\n\"output1\",\noutput_data,\n- \"./include\",\n- )\n+ \"./include\", )\nif __name__ == \"__main__\":\n" }, { "change_type": "MODIFY", "old_path": "deploy/third_engine/demo_avh/corstone300.ld", "new_path": "deploy/third_engine/demo_avh/corstone300.ld", "diff": "@@ -247,10 +247,10 @@ SECTIONS\n} > DTCM\n- .bss.NoInit :\n+ .bss.noinit (NOLOAD):\n{\n. = ALIGN(16);\n- *(.bss.NoInit)\n+ *(.bss.noinit.*)\n. = ALIGN(16);\n} > DDR AT > DDR\n" }, { "change_type": "ADD", "old_path": "deploy/third_engine/demo_avh/image/000000014439_640x640.jpg", "new_path": "deploy/third_engine/demo_avh/image/000000014439_640x640.jpg", "diff": "Binary files /dev/null and b/deploy/third_engine/demo_avh/image/000000014439_640x640.jpg differ\n" }, { "change_type": "MODIFY", "old_path": "deploy/third_engine/demo_avh/run_demo.sh", "new_path": "deploy/third_engine/demo_avh/run_demo.sh", "diff": "# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n-export PATH=/opt/arm/FVP_Corstone_SSE-300/models/Linux64_GCC-6.4:/opt/arm/cmake/bin:$PATH\nset -e\nset -u\nset -o pipefail\n@@ -34,9 +33,19 @@ Usage: run_demo.sh\nSet path to FVP.\n--cmake_path\nSet path to cmake.\n+--enable_FVP\n+ Set 1 to run application on local Fixed Virtual Platforms (FVPs) executables.\nEOF\n}\n+# Configure environment variables\n+FVP_enable=0\n+export PATH=/opt/arm/gcc-arm-none-eabi/bin:$PATH\n+\n+# Install python libraries\n+echo -e \"\\e[36mInstall python libraries\\e[0m\"\n+sudo pip install -r ./requirements.txt\n+\n# Parse arguments\nwhile (( $# )); do\ncase \"$1\" in\n@@ -93,6 +102,18 @@ while (( $# )); do\nfi\n;;\n+ --enable_FVP)\n+ if [ $# -gt 1 ] && [ \"$2\" == \"1\" -o \"$2\" == \"0\" ];\n+ then\n+ FVP_enable=\"$2\"\n+ shift 2\n+ else\n+ echo 'ERROR: --enable_FVP requires a right argument 1 or 0' >&2\n+ show_usage >&2\n+ exit 1\n+ fi\n+ ;;\n+\n-*|--*)\necho \"Error: Unknown flag: $1\" >&2\nshow_usage >&2\n@@ -101,6 +122,16 @@ while (( $# )); do\nesac\ndone\n+# Choose running environment: cloud(default) or local environment\n+Platform=\"VHT_Corstone_SSE-300_Ethos-U55\"\n+if [ $FVP_enable == \"1\" ]; then\n+ Platform=\"FVP_Corstone_SSE-300_Ethos-U55\"\n+ echo -e \"\\e[36mRun application on local Fixed Virtual Platforms (FVPs)\\e[0m\"\n+else\n+ if [ ! -d \"/opt/arm/\" ]; then\n+ sudo ./configure_avh.sh\n+ fi\n+fi\n# Directories\nscript_dir=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" &> /dev/null && pwd )\"\n@@ -110,6 +141,11 @@ make cleanall\nmkdir -p build\ncd build\n+# Get PaddlePaddle inference model\n+echo -e \"\\e[36mDownload PaddlePaddle inference model\\e[0m\"\n+wget https://bj.bcebos.com/v1/paddledet/deploy/Inference/picodet_s_320_coco_lcnet_no_nms.tar\n+tar -xf picodet_s_320_coco_lcnet_no_nms.tar\n+\n# Compile model for Arm(R) Cortex(R)-M55 CPU and CMSIS-NN\n# An alternative to using \"python3 -m tvm.driver.tvmc\" is to call\n# \"tvmc\" directly once TVM has been pip installed.\n@@ -123,7 +159,7 @@ python3 -m tvm.driver.tvmc compile --target=cmsis-nn,c \\\n--pass-config tir.usmp.enable=1 \\\n--pass-config tir.usmp.algorithm=hill_climb \\\n--pass-config tir.disable_storage_rewrite=1 \\\n- --pass-config tir.disable_vectorize=1 ../models/picodet_s_320_coco_lcnet_no_nms/model \\\n+ --pass-config tir.disable_vectorize=1 picodet_s_320_coco_lcnet_no_nms/model.pdmodel \\\n--output-format=mlf \\\n--model-format=paddle \\\n--module-name=picodet \\\n@@ -131,21 +167,18 @@ python3 -m tvm.driver.tvmc compile --target=cmsis-nn,c \\\n--output=picodet.tar\ntar -xf picodet.tar\n-\n# Create C header files\ncd ..\n-python3 ./convert_image.py ../../demo/000000014439_640x640.jpg\n+python3 ./convert_image.py ./image/000000014439_640x640.jpg\n# Build demo executable\n-echo \"Build demo executable...\"\ncd ${script_dir}\necho ${script_dir}\nmake\n-echo \"End build demo executable...\"\n-# Run demo executable on the FVP\n-FVP_Corstone_SSE-300_Ethos-U55 -C cpu0.CFGDTCMSZ=15 \\\n+# Run demo executable on the AVH\n+$Platform -C cpu0.CFGDTCMSZ=15 \\\n-C cpu0.CFGITCMSZ=15 -C mps3_board.uart0.out_file=\\\"-\\\" -C mps3_board.uart0.shutdown_tag=\\\"EXITTHESIM\\\" \\\n-C mps3_board.visualisation.disable-visualisation=1 -C mps3_board.telnetterminal0.start_telnet=0 \\\n-C mps3_board.telnetterminal1.start_telnet=0 -C mps3_board.telnetterminal2.start_telnet=0 -C mps3_board.telnetterminal5.start_telnet=0 \\\n-./build/demo\n+./build/demo --stat\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix avh demo (#6836)
499,298
05.09.2022 17:42:32
-28,800
b127979f34289dd6b47303a9d06fd253b09a092a
[cherry-pick] add fuse_normalize for ppyoloe smalldet
[ { "change_type": "MODIFY", "old_path": "configs/smalldet/ppyoloe_crn_l_80e_sliced_visdrone_640_025.yml", "new_path": "configs/smalldet/ppyoloe_crn_l_80e_sliced_visdrone_640_025.yml", "diff": "@@ -20,6 +20,10 @@ TrainReader:\nEvalReader:\nbatch_size: 1\n+TestReader:\n+ batch_size: 1\n+ fuse_normalize: True\n+\nepoch: 80\nLearningRate:\n" }, { "change_type": "MODIFY", "old_path": "configs/smalldet/ppyoloe_crn_l_80e_sliced_visdrone_640_025_slice_infer.yml", "new_path": "configs/smalldet/ppyoloe_crn_l_80e_sliced_visdrone_640_025_slice_infer.yml", "diff": "@@ -20,6 +20,10 @@ TrainReader:\nEvalReader:\nbatch_size: 1 # only support bs=1 when slice infer\n+TestReader:\n+ batch_size: 1\n+ fuse_normalize: True\n+\nepoch: 80\nLearningRate:\n" }, { "change_type": "MODIFY", "old_path": "configs/smalldet/ppyoloe_p2_crn_l_80e_sliced_DOTA_500_025.yml", "new_path": "configs/smalldet/ppyoloe_p2_crn_l_80e_sliced_DOTA_500_025.yml", "diff": "@@ -28,6 +28,10 @@ TrainReader:\nEvalReader:\nbatch_size: 1\n+TestReader:\n+ batch_size: 1\n+ fuse_normalize: True\n+\nepoch: 80\nLearningRate:\n" }, { "change_type": "MODIFY", "old_path": "configs/smalldet/ppyoloe_p2_crn_l_80e_sliced_xview_400_025.yml", "new_path": "configs/smalldet/ppyoloe_p2_crn_l_80e_sliced_xview_400_025.yml", "diff": "@@ -28,6 +28,10 @@ TrainReader:\nEvalReader:\nbatch_size: 1\n+TestReader:\n+ batch_size: 1\n+ fuse_normalize: True\n+\nepoch: 80\nLearningRate:\n" }, { "change_type": "MODIFY", "old_path": "configs/visdrone/ppyoloe_crn_l_80e_visdrone.yml", "new_path": "configs/visdrone/ppyoloe_crn_l_80e_visdrone.yml", "diff": "@@ -20,6 +20,10 @@ TrainReader:\nEvalReader:\nbatch_size: 1\n+TestReader:\n+ batch_size: 1\n+ fuse_normalize: True\n+\nepoch: 80\nLearningRate:\n" }, { "change_type": "MODIFY", "old_path": "configs/visdrone/ppyoloe_crn_l_alpha_largesize_80e_visdrone.yml", "new_path": "configs/visdrone/ppyoloe_crn_l_alpha_largesize_80e_visdrone.yml", "diff": "@@ -55,3 +55,4 @@ TestReader:\n- NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n- Permute: {}\nbatch_size: 1\n+ fuse_normalize: True\n" }, { "change_type": "MODIFY", "old_path": "configs/visdrone/ppyoloe_crn_l_p2_alpha_80e_visdrone.yml", "new_path": "configs/visdrone/ppyoloe_crn_l_p2_alpha_80e_visdrone.yml", "diff": "@@ -13,6 +13,10 @@ TrainReader:\nEvalReader:\nbatch_size: 1\n+TestReader:\n+ batch_size: 1\n+ fuse_normalize: True\n+\nLearningRate:\nbase_lr: 0.005\n" }, { "change_type": "MODIFY", "old_path": "configs/visdrone/ppyoloe_crn_l_p2_alpha_largesize_80e_visdrone.yml", "new_path": "configs/visdrone/ppyoloe_crn_l_p2_alpha_largesize_80e_visdrone.yml", "diff": "@@ -62,3 +62,4 @@ TestReader:\n- NormalizeImage: {mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225], is_scale: True}\n- Permute: {}\nbatch_size: 1\n+ fuse_normalize: True\n" }, { "change_type": "MODIFY", "old_path": "configs/visdrone/ppyoloe_crn_s_80e_visdrone.yml", "new_path": "configs/visdrone/ppyoloe_crn_s_80e_visdrone.yml", "diff": "@@ -20,6 +20,10 @@ TrainReader:\nEvalReader:\nbatch_size: 1\n+TestReader:\n+ batch_size: 1\n+ fuse_normalize: True\n+\nepoch: 80\nLearningRate:\n" }, { "change_type": "MODIFY", "old_path": "configs/visdrone/ppyoloe_crn_s_p2_alpha_80e_visdrone.yml", "new_path": "configs/visdrone/ppyoloe_crn_s_p2_alpha_80e_visdrone.yml", "diff": "@@ -13,6 +13,10 @@ TrainReader:\nEvalReader:\nbatch_size: 1\n+TestReader:\n+ batch_size: 1\n+ fuse_normalize: True\n+\nLearningRate:\nbase_lr: 0.005\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[cherry-pick] add fuse_normalize for ppyoloe smalldet (#6861)
499,298
05.09.2022 20:24:28
-28,800
50d9662c20dae25b1faebd1ccfaccddd26c563d5
add ppyoloeplus visdrone
[ { "change_type": "ADD", "old_path": null, "new_path": "configs/visdrone/ppyoloe_plus_crn_l_largesize_80e_visdrone.yml", "diff": "+_BASE_: [\n+ 'ppyoloe_crn_l_80e_visdrone.yml',\n+]\n+log_iter: 100\n+snapshot_epoch: 10\n+weights: output/ppyoloe_plus_crn_l_largesize_80e_visdrone/model_final\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/ppyoloe_plus_crn_l_80e_coco.pdparams\n+\n+\n+CSPResNet:\n+ use_alpha: True\n+\n+\n+LearningRate:\n+ base_lr: 0.0025\n+\n+\n+worker_num: 2\n+eval_height: &eval_height 1920\n+eval_width: &eval_width 1920\n+eval_size: &eval_size [*eval_height, *eval_width]\n+\n+TrainReader:\n+ sample_transforms:\n+ - Decode: {}\n+ - RandomDistort: {}\n+ - RandomExpand: {fill_value: [123.675, 116.28, 103.53]}\n+ - RandomCrop: {}\n+ - RandomFlip: {}\n+ batch_transforms:\n+ - BatchRandomResize: {target_size: [1024, 1088, 1152, 1216, 1280, 1344, 1408, 1472, 1536, 1600, 1664, 1728, 1792, 1856, 1920], random_size: True, random_interp: True, keep_ratio: False}\n+ - NormalizeImage: {mean: [0., 0., 0.], std: [1., 1., 1.], norm_type: none}\n+ - Permute: {}\n+ - PadGT: {}\n+ batch_size: 2\n+ shuffle: true\n+ drop_last: true\n+ use_shared_memory: true\n+ collate_batch: true\n+\n+EvalReader:\n+ sample_transforms:\n+ - Decode: {}\n+ - Resize: {target_size: *eval_size, keep_ratio: False, interp: 2}\n+ - NormalizeImage: {mean: [0., 0., 0.], std: [1., 1., 1.], norm_type: none}\n+ - Permute: {}\n+ batch_size: 1\n+\n+TestReader:\n+ inputs_def:\n+ image_shape: [3, *eval_height, *eval_width]\n+ sample_transforms:\n+ - Decode: {}\n+ - Resize: {target_size: *eval_size, keep_ratio: False, interp: 2}\n+ - NormalizeImage: {mean: [0., 0., 0.], std: [1., 1., 1.], norm_type: none}\n+ - Permute: {}\n+ batch_size: 1\n+ fuse_normalize: True\n" }, { "change_type": "MODIFY", "old_path": "deploy/python/infer.py", "new_path": "deploy/python/infer.py", "diff": "@@ -235,7 +235,7 @@ class Detector(object):\nimport sahi\nfrom sahi.slicing import slice_image\nexcept Exception as e:\n- logger.error(\n+ print(\n'sahi not found, plaese install sahi. '\n'for example: `pip install sahi`, see https://github.com/obss/sahi.'\n)\n@@ -251,6 +251,7 @@ class Detector(object):\noverlap_width_ratio=overlap_ratio[1])\nsub_img_num = len(slice_image_result)\nmerged_bboxs = []\n+ print('sub_img_num', sub_img_num)\nbatch_image_list = [\nslice_image_result.images[_ind] for _ind in range(sub_img_num)\n" }, { "change_type": "MODIFY", "old_path": "deploy/python/visualize.py", "new_path": "deploy/python/visualize.py", "diff": "@@ -237,7 +237,7 @@ def visualize_pose(imgfile,\nimport matplotlib\nplt.switch_backend('agg')\nexcept Exception as e:\n- logger.error('Matplotlib not found, please install matplotlib.'\n+ print('Matplotlib not found, please install matplotlib.'\n'for example: `pip install matplotlib`.')\nraise e\nskeletons, scores = results['keypoint']\n" }, { "change_type": "MODIFY", "old_path": "ppdet/data/source/dataset.py", "new_path": "ppdet/data/source/dataset.py", "diff": "# limitations under the License.\nimport os\n+import copy\nimport numpy as np\n-\ntry:\nfrom collections.abc import Sequence\nexcept Exception:\n@@ -22,9 +22,11 @@ except Exception:\nfrom paddle.io import Dataset\nfrom ppdet.core.workspace import register, serializable\nfrom ppdet.utils.download import get_dataset_path\n-import copy\nfrom ppdet.data import source\n+from ppdet.utils.logger import setup_logger\n+logger = setup_logger(__name__)\n+\n@serializable\nclass DetDataset(Dataset):\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add ppyoloeplus visdrone (#6863)
499,333
07.09.2022 12:12:31
-28,800
06a719830cd39619f771f0ab1007bd86cbad63b6
update en doc and version check
[ { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "-README_cn.md\n\\ No newline at end of file\n+README_en.md\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "docs/tutorials/INSTALL.md", "new_path": "docs/tutorials/INSTALL.md", "diff": "@@ -22,7 +22,7 @@ Dependency of PaddleDetection and PaddlePaddle:\n| PaddleDetection version | PaddlePaddle version | tips |\n| :----------------: | :---------------: | :-------: |\n-| develop | >= 2.2.2 | Dygraph mode is set as default |\n+| develop | develop | Dygraph mode is set as default |\n| release/2.5 | >= 2.2.2 | Dygraph mode is set as default |\n| release/2.4 | >= 2.2.2 | Dygraph mode is set as default |\n| release/2.3 | >= 2.2.0rc | Dygraph mode is set as default |\n" }, { "change_type": "MODIFY", "old_path": "ppdet/utils/check.py", "new_path": "ppdet/utils/check.py", "diff": "@@ -87,7 +87,7 @@ def check_gpu(use_gpu):\npass\n-def check_version(version='2.0'):\n+def check_version(version='2.2'):\n\"\"\"\nLog error and exit when the installed version of paddlepaddle is\nnot satisfied.\n@@ -100,8 +100,19 @@ def check_version(version='2.0'):\npaddle_version.major, paddle_version.minor, paddle_version.patch,\npaddle_version.rc\n]\n+\n+ # Paddledet develop version is only used on Paddle develop\n+ if version_installed == ['0', '0', '0', '0'] and version != 'develop':\n+ raise Exception(\n+ \"PaddlePaddle version {} or higher is required, and develop version is only used for PaddleDetection develop version!\".\n+ format(version))\n+\nif version_installed == ['0', '0', '0', '0']:\nreturn\n+\n+ if version == 'develop':\n+ raise Exception(\"PaddlePaddle develop version is required!\")\n+\nversion_split = version.split('.')\nlength = min(len(version_installed), len(version_split))\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
update en doc and version check (#6876)
499,304
08.09.2022 21:16:40
-28,800
3b36a65514c7b55461db98b02f95bd3216e4a637
fix distill error
[ { "change_type": "MODIFY", "old_path": "ppdet/slim/distill.py", "new_path": "ppdet/slim/distill.py", "diff": "@@ -262,7 +262,7 @@ class FGDFeatureLoss(nn.Layer):\nzeros_init = parameter_init(\"constant\", 0.0)\nif student_channels != teacher_channels:\n- self.align = nn.Conv2d(\n+ self.align = nn.Conv2D(\nstudent_channels,\nteacher_channels,\nkernel_size=1,\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix distill error (#6907)
499,339
08.09.2022 21:36:31
-28,800
805420551109750bb68aeadbefb66928cb6a4f4a
[PPYOLOE] fix proj_conv in ptq bug
[ { "change_type": "MODIFY", "old_path": "configs/ppyoloe/_base_/ppyoloe_crn.yml", "new_path": "configs/ppyoloe/_base_/ppyoloe_crn.yml", "diff": "@@ -2,6 +2,7 @@ architecture: YOLOv3\nnorm_type: sync_bn\nuse_ema: true\nema_decay: 0.9998\n+ema_black_list: ['proj_conv.weight']\ncustom_black_list: ['reduce_mean']\nYOLOv3:\n" }, { "change_type": "MODIFY", "old_path": "configs/ppyoloe/_base_/ppyoloe_plus_crn.yml", "new_path": "configs/ppyoloe/_base_/ppyoloe_plus_crn.yml", "diff": "@@ -2,6 +2,7 @@ architecture: YOLOv3\nnorm_type: sync_bn\nuse_ema: true\nema_decay: 0.9998\n+ema_black_list: ['proj_conv.weight']\ncustom_black_list: ['reduce_mean']\nYOLOv3:\n" }, { "change_type": "MODIFY", "old_path": "configs/ppyoloe/ppyoloe_crn_l_36e_coco_xpu.yml", "new_path": "configs/ppyoloe/ppyoloe_crn_l_36e_coco_xpu.yml", "diff": "@@ -26,6 +26,8 @@ architecture: YOLOv3\nnorm_type: sync_bn\nuse_ema: true\nema_decay: 0.9998\n+ema_black_list: ['proj_conv.weight']\n+custom_black_list: ['reduce_mean']\nYOLOv3:\nbackbone: CSPResNet\n" }, { "change_type": "MODIFY", "old_path": "deploy/auto_compression/configs/ppyoloe_plus_m_qat_dis.yaml", "new_path": "deploy/auto_compression/configs/ppyoloe_plus_m_qat_dis.yaml", "diff": "@@ -14,6 +14,7 @@ Distillation:\nQuantization:\nuse_pact: true\n+ onnx_format: True\nactivation_quantize_type: 'moving_average_abs_max'\nquantize_op_types:\n- conv2d\n" }, { "change_type": "MODIFY", "old_path": "deploy/auto_compression/configs/ppyoloe_plus_reader.yml", "new_path": "deploy/auto_compression/configs/ppyoloe_plus_reader.yml", "diff": "-\n-\nmetric: COCO\nnum_classes: 80\n@@ -23,6 +21,6 @@ EvalReader:\nsample_transforms:\n- Decode: {}\n- Resize: {target_size: [640, 640], keep_ratio: False, interp: 2}\n- - NormalizeImage: {mean: [0., 0., 0.], std: [1., 1., 1.], is_scale: True}\n+ - NormalizeImage: {mean: [0., 0., 0.], std: [1., 1., 1.], norm_type: none}\n- Permute: {}\nbatch_size: 4\n" }, { "change_type": "MODIFY", "old_path": "deploy/auto_compression/configs/ppyoloe_plus_x_qat_dis.yaml", "new_path": "deploy/auto_compression/configs/ppyoloe_plus_x_qat_dis.yaml", "diff": "@@ -14,6 +14,7 @@ Distillation:\nQuantization:\nuse_pact: true\n+ onnx_format: True\nactivation_quantize_type: 'moving_average_abs_max'\nquantize_op_types:\n- conv2d\n" }, { "change_type": "MODIFY", "old_path": "deploy/python/utils.py", "new_path": "deploy/python/utils.py", "diff": "@@ -108,7 +108,7 @@ def argsparser():\n\"calibration, trt_calib_mode need to set True.\")\nparser.add_argument(\n'--save_images',\n- type=bool,\n+ type=ast.literal_eval,\ndefault=True,\nhelp='Save visualization image results.')\nparser.add_argument(\n" }, { "change_type": "MODIFY", "old_path": "ppdet/engine/trainer.py", "new_path": "ppdet/engine/trainer.py", "diff": "@@ -169,13 +169,15 @@ class Trainer(object):\nself.use_ema = ('use_ema' in cfg and cfg['use_ema'])\nif self.use_ema:\nema_decay = self.cfg.get('ema_decay', 0.9998)\n- cycle_epoch = self.cfg.get('cycle_epoch', -1)\nema_decay_type = self.cfg.get('ema_decay_type', 'threshold')\n+ cycle_epoch = self.cfg.get('cycle_epoch', -1)\n+ ema_black_list = self.cfg.get('ema_black_list', None)\nself.ema = ModelEMA(\nself.model,\ndecay=ema_decay,\nema_decay_type=ema_decay_type,\n- cycle_epoch=cycle_epoch)\n+ cycle_epoch=cycle_epoch,\n+ ema_black_list=ema_black_list)\nself._nranks = dist.get_world_size()\nself._local_rank = dist.get_rank()\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/assigners/atss_assigner.py", "new_path": "ppdet/modeling/assigners/atss_assigner.py", "diff": "@@ -120,7 +120,7 @@ class ATSSAssigner(nn.Layer):\n# negative batch\nif num_max_boxes == 0:\nassigned_labels = paddle.full(\n- [batch_size, num_anchors], bg_index, dtype=gt_labels.dtype)\n+ [batch_size, num_anchors], bg_index, dtype='int32')\nassigned_bboxes = paddle.zeros([batch_size, num_anchors, 4])\nassigned_scores = paddle.zeros(\n[batch_size, num_anchors, self.num_classes])\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/assigners/task_aligned_assigner.py", "new_path": "ppdet/modeling/assigners/task_aligned_assigner.py", "diff": "@@ -86,7 +86,7 @@ class TaskAlignedAssigner(nn.Layer):\n# negative batch\nif num_max_boxes == 0:\nassigned_labels = paddle.full(\n- [batch_size, num_anchors], bg_index, dtype=gt_labels.dtype)\n+ [batch_size, num_anchors], bg_index, dtype='int32')\nassigned_bboxes = paddle.zeros([batch_size, num_anchors, 4])\nassigned_scores = paddle.zeros(\n[batch_size, num_anchors, num_classes])\n" }, { "change_type": "MODIFY", "old_path": "ppdet/modeling/heads/ppyoloe_head.py", "new_path": "ppdet/modeling/heads/ppyoloe_head.py", "diff": "@@ -130,11 +130,10 @@ class PPYOLOEHead(nn.Layer):\nconstant_(reg_.weight)\nconstant_(reg_.bias, 1.0)\n- self.proj = paddle.linspace(0, self.reg_max, self.reg_max + 1)\n- self.proj_conv.weight.set_value(\n- self.proj.reshape([1, self.reg_max + 1, 1, 1]))\n+ proj = paddle.linspace(0, self.reg_max, self.reg_max + 1).reshape(\n+ [1, self.reg_max + 1, 1, 1])\n+ self.proj_conv.weight.set_value(proj)\nself.proj_conv.weight.stop_gradient = True\n-\nif self.eval_size:\nanchor_points, stride_tensor = self._generate_anchors()\nself.anchor_points = anchor_points\n@@ -200,15 +199,15 @@ class PPYOLOEHead(nn.Layer):\nfeat)\nreg_dist = self.pred_reg[i](self.stem_reg[i](feat, avg_feat))\nreg_dist = reg_dist.reshape([-1, 4, self.reg_max + 1, l]).transpose(\n- [0, 2, 1, 3])\n- reg_dist = self.proj_conv(F.softmax(reg_dist, axis=1))\n+ [0, 2, 3, 1])\n+ reg_dist = self.proj_conv(F.softmax(reg_dist, axis=1)).squeeze(1)\n# cls and reg\ncls_score = F.sigmoid(cls_logit)\ncls_score_list.append(cls_score.reshape([b, self.num_classes, l]))\n- reg_dist_list.append(reg_dist.reshape([b, 4, l]))\n+ reg_dist_list.append(reg_dist)\ncls_score_list = paddle.concat(cls_score_list, axis=-1)\n- reg_dist_list = paddle.concat(reg_dist_list, axis=-1)\n+ reg_dist_list = paddle.concat(reg_dist_list, axis=1)\nreturn cls_score_list, reg_dist_list, anchor_points, stride_tensor\n@@ -240,8 +239,8 @@ class PPYOLOEHead(nn.Layer):\ndef _bbox_decode(self, anchor_points, pred_dist):\nb, l, _ = get_static_shape(pred_dist)\n- pred_dist = F.softmax(pred_dist.reshape([b, l, 4, self.reg_max + 1\n- ])).matmul(self.proj)\n+ pred_dist = F.softmax(pred_dist.reshape([b, l, 4, self.reg_max + 1]))\n+ pred_dist = self.proj_conv(pred_dist.transpose([0, 3, 1, 2])).squeeze(1)\nreturn batch_distance2bbox(anchor_points, pred_dist)\ndef _bbox2distance(self, points, bbox):\n@@ -347,9 +346,8 @@ class PPYOLOEHead(nn.Layer):\nassigned_scores_sum = assigned_scores.sum()\nif paddle.distributed.get_world_size() > 1:\npaddle.distributed.all_reduce(assigned_scores_sum)\n- assigned_scores_sum = paddle.clip(\n- assigned_scores_sum / paddle.distributed.get_world_size(),\n- min=1)\n+ assigned_scores_sum /= paddle.distributed.get_world_size()\n+ assigned_scores_sum = paddle.clip(assigned_scores_sum, min=1.)\nloss_cls /= assigned_scores_sum\nloss_l1, loss_iou, loss_dfl = \\\n@@ -370,8 +368,7 @@ class PPYOLOEHead(nn.Layer):\ndef post_process(self, head_outs, scale_factor):\npred_scores, pred_dist, anchor_points, stride_tensor = head_outs\n- pred_bboxes = batch_distance2bbox(anchor_points,\n- pred_dist.transpose([0, 2, 1]))\n+ pred_bboxes = batch_distance2bbox(anchor_points, pred_dist)\npred_bboxes *= stride_tensor\nif self.exclude_post_process:\nreturn paddle.concat(\n" }, { "change_type": "MODIFY", "old_path": "ppdet/optimizer/ema.py", "new_path": "ppdet/optimizer/ema.py", "diff": "@@ -36,21 +36,30 @@ class ModelEMA(object):\nstep. Defaults is -1, which means not reset. Its function is to\nadd a regular effect to ema, which is set according to experience\nand is effective when the total training epoch is large.\n+ ema_black_list (set|list|tuple, optional): The custom EMA black_list.\n+ Blacklist of weight names that will not participate in EMA\n+ calculation. Default: None.\n\"\"\"\ndef __init__(self,\nmodel,\ndecay=0.9998,\nema_decay_type='threshold',\n- cycle_epoch=-1):\n+ cycle_epoch=-1,\n+ ema_black_list=None):\nself.step = 0\nself.epoch = 0\nself.decay = decay\n+ self.ema_decay_type = ema_decay_type\n+ self.cycle_epoch = cycle_epoch\n+ self.ema_black_list = self._match_ema_black_list(\n+ model.state_dict().keys(), ema_black_list)\nself.state_dict = dict()\nfor k, v in model.state_dict().items():\n+ if k in self.ema_black_list:\n+ self.state_dict[k] = v\n+ else:\nself.state_dict[k] = paddle.zeros_like(v)\n- self.ema_decay_type = ema_decay_type\n- self.cycle_epoch = cycle_epoch\nself._model_state = {\nk: weakref.ref(p)\n@@ -61,6 +70,9 @@ class ModelEMA(object):\nself.step = 0\nself.epoch = 0\nfor k, v in self.state_dict.items():\n+ if k in self.ema_black_list:\n+ self.state_dict[k] = v\n+ else:\nself.state_dict[k] = paddle.zeros_like(v)\ndef resume(self, state_dict, step=0):\n@@ -89,6 +101,7 @@ class ModelEMA(object):\n[v is not None for _, v in model_dict.items()]), 'python gc.'\nfor k, v in self.state_dict.items():\n+ if k not in self.ema_black_list:\nv = decay * v + (1 - decay) * model_dict[k]\nv.stop_gradient = True\nself.state_dict[k] = v\n@@ -99,6 +112,10 @@ class ModelEMA(object):\nreturn self.state_dict\nstate_dict = dict()\nfor k, v in self.state_dict.items():\n+ if k in self.ema_black_list:\n+ v.stop_gradient = True\n+ state_dict[k] = v\n+ else:\nif self.ema_decay_type != 'exponential':\nv = v / (1 - self._decay**self.step)\nv.stop_gradient = True\n@@ -108,3 +125,12 @@ class ModelEMA(object):\nself.reset()\nreturn state_dict\n+\n+ def _match_ema_black_list(self, weight_name, ema_black_list=None):\n+ out_list = set()\n+ if ema_black_list:\n+ for name in weight_name:\n+ for key in ema_black_list:\n+ if key in name:\n+ out_list.add(name)\n+ return out_list\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[PPYOLOE] fix proj_conv in ptq bug (#6908)
499,301
14.09.2022 11:30:22
-28,800
7bd1ef4a473444048ab9b3a5568971f6fb76e5b6
repleace letterbox to resize_pad
[ { "change_type": "MODIFY", "old_path": "configs/faster_rcnn/_base_/faster_rcnn_swin_reader.yml", "new_path": "configs/faster_rcnn/_base_/faster_rcnn_swin_reader.yml", "diff": "@@ -33,7 +33,8 @@ TestReader:\nimage_shape: [-1, 3, 640, 640]\nsample_transforms:\n- Decode: {}\n- - LetterBoxResize: {target_size: 640}\n+ - Resize: {interp: 2, target_size: 640, keep_ratio: True}\n+ - Pad: {size: 640}\n- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n- Permute: {}\nbatch_size: 1\n" }, { "change_type": "MODIFY", "old_path": "configs/vitdet/_base_/reader.yml", "new_path": "configs/vitdet/_base_/reader.yml", "diff": "@@ -33,7 +33,8 @@ TestReader:\nimage_shape: [-1, 3, 640, 640]\nsample_transforms:\n- Decode: {}\n- - LetterBoxResize: {target_size: 640}\n+ - Resize: {interp: 2, target_size: 640, keep_ratio: True}\n+ - Pad: {size: 640}\n- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n- Permute: {}\nbatch_size: 1\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
repleace letterbox to resize_pad (#6934)
499,333
14.09.2022 20:08:00
-28,800
6bc4f9437a459348b829cced7b5fe2e8001aa052
fix target when fg is empty
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/proposal_generator/target.py", "new_path": "ppdet/modeling/proposal_generator/target.py", "diff": "@@ -340,7 +340,7 @@ def generate_mask_target(gt_segms, rois, labels_int32, sampled_gt_inds,\n# generate fake roi if foreground is empty\nif fg_inds.numel() == 0:\nhas_fg = False\n- fg_inds = paddle.ones([1], dtype='int32')\n+ fg_inds = paddle.ones([1, 1], dtype='int64')\ninds_per_im = sampled_gt_inds[k]\ninds_per_im = paddle.gather(inds_per_im, fg_inds)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix target when fg is empty (#6937)
499,363
15.09.2022 13:45:26
-28,800
92313771e21253ba345704ef5f71e06b3e41b857
add industry tutorial examples
[ { "change_type": "MODIFY", "old_path": "README_en.md", "new_path": "README_en.md", "diff": "@@ -464,6 +464,10 @@ Please refer to [docs](deploy/pipeline/README_en.md) for details.\n- [Visitor flow statistics based on FairMOT](https://aistudio.baidu.com/aistudio/projectdetail/2421822)\n+- [Guest analysis based on PP-Human](https://aistudio.baidu.com/aistudio/projectdetail/4537344)\n+\n+- [Fight recognition based on video classification](https://aistudio.baidu.com/aistudio/projectdetail/4512242)\n+\n- [More examples](./industrial_tutorial/README.md)\n## <img title=\"\" src=\"https://user-images.githubusercontent.com/48054808/157836473-1cf451fa-f01f-4148-ba68-b6d06d5da2f9.png\" alt=\"\" width=\"20\"> Applications\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
add industry tutorial examples (#6945)
499,333
15.09.2022 15:41:57
-28,800
92fb8b2e89db282cc425bc04ce20a942d1038062
fix deploy when bs=2
[ { "change_type": "MODIFY", "old_path": "deploy/python/infer.py", "new_path": "deploy/python/infer.py", "diff": "@@ -154,7 +154,7 @@ class Detector(object):\nnp_boxes_num = result['boxes_num']\nif sum(np_boxes_num) <= 0:\nprint('[WARNNING] No object detected.')\n- result = {'boxes': np.zeros([0, 6]), 'boxes_num': [0]}\n+ result = {'boxes': np.zeros([0, 6]), 'boxes_num': np_boxes_num}\nresult = {k: v for k, v in result.items() if v is not None}\nreturn result\n@@ -402,10 +402,8 @@ class Detector(object):\nself.pred_config.labels,\noutput_dir=self.output_dir,\nthreshold=self.threshold)\n-\nresults.append(result)\nprint('Test iter {}'.format(i))\n-\nresults = self.merge_batch_result(results)\nif save_results:\nPath(self.output_dir).mkdir(exist_ok=True)\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix deploy when bs=2 (#6915)
499,298
23.09.2022 10:42:58
-28,800
f8e57aebe7a7cec94a697445a9a425c0a4300957
[cherry-pick] update yoloseries doc
[ { "change_type": "MODIFY", "old_path": "docs/MODEL_ZOO_en.md", "new_path": "docs/MODEL_ZOO_en.md", "diff": "@@ -92,7 +92,19 @@ Please refer to[PP-YOLOE](https://github.com/PaddlePaddle/PaddleDetection/tree/r\n### YOLOX\n-Please refer to[YOLOX](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.5/configs/yolox)\n+Please refer to[YOLOX](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/yolox)\n+\n+### YOLOv5\n+\n+Please refer to[YOLOv5](https://github.com/nemonameless/PaddleDetection_YOLOSeries/tree/develop/configs/yolov5)\n+\n+### YOLOv6\n+\n+Please refer to[YOLOv6](https://github.com/nemonameless/PaddleDetection_YOLOSeries/tree/develop/configs/yolov6)\n+\n+### YOLOv7\n+\n+Please refer to[YOLOv7](https://github.com/nemonameless/PaddleDetection_YOLOSeries/tree/develop/configs/yolov7)\n## Rotating frame detection\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[cherry-pick] update yoloseries doc (#7006)
499,298
23.09.2022 10:58:36
-28,800
54a5f0925f3e15c9d59f127f93be847c3cd48894
[cherry-pick] fix mot dataset link
[ { "change_type": "MODIFY", "old_path": "configs/mot/README_en.md", "new_path": "configs/mot/README_en.md", "diff": "@@ -86,18 +86,18 @@ PaddleDetection implement [JDE](https://github.com/Zhongdao/Towards-Realtime-MOT\n### Dataset Directory\nFirst, download the image_lists.zip using the following command, and unzip them into `PaddleDetection/dataset/mot`:\n```\n-wget https://dataset.bj.bcebos.com/mot/image_lists.zip\n+wget https://bj.bcebos.com/v1/paddledet/data/mot/image_lists.zip\n```\nThen, download the MIX dataset using the following command, and unzip them into `PaddleDetection/dataset/mot`:\n```\n-wget https://dataset.bj.bcebos.com/mot/MOT17.zip\n-wget https://dataset.bj.bcebos.com/mot/Caltech.zip\n-wget https://dataset.bj.bcebos.com/mot/CUHKSYSU.zip\n-wget https://dataset.bj.bcebos.com/mot/PRW.zip\n-wget https://dataset.bj.bcebos.com/mot/Cityscapes.zip\n-wget https://dataset.bj.bcebos.com/mot/ETHZ.zip\n-wget https://dataset.bj.bcebos.com/mot/MOT16.zip\n+wget https://bj.bcebos.com/v1/paddledet/data/mot/MOT17.zip\n+wget https://bj.bcebos.com/v1/paddledet/data/mot/Caltech.zip\n+wget https://bj.bcebos.com/v1/paddledet/data/mot/CUHKSYSU.zip\n+wget https://bj.bcebos.com/v1/paddledet/data/mot/PRW.zip\n+wget https://bj.bcebos.com/v1/paddledet/data/mot/Cityscapes.zip\n+wget https://bj.bcebos.com/v1/paddledet/data/mot/ETHZ.zip\n+wget https://bj.bcebos.com/v1/paddledet/data/mot/MOT16.zip\n```\nThe final directory is:\n" }, { "change_type": "MODIFY", "old_path": "docs/tutorials/data/PrepareMOTDataSet_en.md", "new_path": "docs/tutorials/data/PrepareMOTDataSet_en.md", "diff": "@@ -20,18 +20,18 @@ PaddleDetection implement [JDE](https://github.com/Zhongdao/Towards-Realtime-MOT\n### Dataset Directory\nFirst, download the image_lists.zip using the following command, and unzip them into `PaddleDetection/dataset/mot`:\n```\n-wget https://dataset.bj.bcebos.com/mot/image_lists.zip\n+wget https://bj.bcebos.com/v1/paddledet/data/mot/image_lists.zip\n```\nThen, download the MIX dataset using the following command, and unzip them into `PaddleDetection/dataset/mot`:\n```\n-wget https://dataset.bj.bcebos.com/mot/MOT17.zip\n-wget https://dataset.bj.bcebos.com/mot/Caltech.zip\n-wget https://dataset.bj.bcebos.com/mot/CUHKSYSU.zip\n-wget https://dataset.bj.bcebos.com/mot/PRW.zip\n-wget https://dataset.bj.bcebos.com/mot/Cityscapes.zip\n-wget https://dataset.bj.bcebos.com/mot/ETHZ.zip\n-wget https://dataset.bj.bcebos.com/mot/MOT16.zip\n+wget https://bj.bcebos.com/v1/paddledet/data/mot/MOT17.zip\n+wget https://bj.bcebos.com/v1/paddledet/data/mot/Caltech.zip\n+wget https://bj.bcebos.com/v1/paddledet/data/mot/CUHKSYSU.zip\n+wget https://bj.bcebos.com/v1/paddledet/data/mot/PRW.zip\n+wget https://bj.bcebos.com/v1/paddledet/data/mot/Cityscapes.zip\n+wget https://bj.bcebos.com/v1/paddledet/data/mot/ETHZ.zip\n+wget https://bj.bcebos.com/v1/paddledet/data/mot/MOT16.zip\n```\nThe final directory is:\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[cherry-pick] fix mot dataset link (#7003)
499,301
23.09.2022 11:19:39
-28,800
7e19ac76273e30b64f5e34ce3a2f98e54abe4c8d
fix simota candidate_topk
[ { "change_type": "MODIFY", "old_path": "ppdet/modeling/assigners/simota_assigner.py", "new_path": "ppdet/modeling/assigners/simota_assigner.py", "diff": "@@ -115,7 +115,10 @@ class SimOTAAssigner(object):\ndef dynamic_k_matching(self, cost_matrix, pairwise_ious, num_gt):\nmatch_matrix = np.zeros_like(cost_matrix.numpy())\n# select candidate topk ious for dynamic-k calculation\n- topk_ious, _ = paddle.topk(pairwise_ious, self.candidate_topk, axis=0)\n+ topk_ious, _ = paddle.topk(\n+ pairwise_ious,\n+ min(self.candidate_topk, pairwise_ious.shape[0]),\n+ axis=0)\n# calculate dynamic k for each gt\ndynamic_ks = paddle.clip(topk_ious.sum(0).cast('int'), min=1)\nfor gt_idx in range(num_gt):\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
fix simota candidate_topk (#6979) (#7001)
499,339
27.09.2022 16:34:00
-28,800
94a6077b70700799f2db998a99cd06cff23375db
[dev] fix postprocess bug in deploy benchmark bug
[ { "change_type": "MODIFY", "old_path": "deploy/python/infer.py", "new_path": "deploy/python/infer.py", "diff": "@@ -152,7 +152,10 @@ class Detector(object):\ndef postprocess(self, inputs, result):\n# postprocess output of predictor\nnp_boxes_num = result['boxes_num']\n- if sum(np_boxes_num) <= 0:\n+ assert isinstance(np_boxes_num, np.ndarray), \\\n+ '`np_boxes_num` should be a `numpy.ndarray`'\n+\n+ if np_boxes_num.sum() <= 0:\nprint('[WARNNING] No object detected.')\nresult = {'boxes': np.zeros([0, 6]), 'boxes_num': np_boxes_num}\nresult = {k: v for k, v in result.items() if v is not None}\n@@ -188,7 +191,7 @@ class Detector(object):\nshape: [N, im_h, im_w]\n'''\n# model prediction\n- np_boxes, np_masks = None, None\n+ np_boxes_num, np_boxes, np_masks = np.array([0]), None, None\nfor i in range(repeats):\nself.predictor.run()\noutput_names = self.predictor.get_output_names()\n" } ]
Python
Apache License 2.0
paddlepaddle/paddledetection
[dev] fix postprocess bug in deploy benchmark bug (#7028) (#7031)