author
int64 658
755k
| date
stringlengths 19
19
| timezone
int64 -46,800
43.2k
| hash
stringlengths 40
40
| message
stringlengths 5
490
| mods
list | language
stringclasses 20
values | license
stringclasses 3
values | repo
stringlengths 5
68
| original_message
stringlengths 12
491
|
---|---|---|---|---|---|---|---|---|---|
499,301 |
15.09.2021 15:39:22
| -28,800 |
efcca773cbc0c7a3d532bdd9d748e78f2f005b4d
|
add faster rcnn swin models
|
[
{
"change_type": "RENAME",
"old_path": "configs/faster_rcnn/_base_/faster_rcnn_swin_transformer_reader.yml",
"new_path": "configs/faster_rcnn/_base_/faster_rcnn_swin_reader.yml",
"diff": ""
},
{
"change_type": "RENAME",
"old_path": "configs/faster_rcnn/_base_/faster_rcnn_swin_transformer.yml",
"new_path": "configs/faster_rcnn/_base_/faster_rcnn_swin_tiny.yml",
"diff": ""
},
{
"change_type": "RENAME",
"old_path": "configs/faster_rcnn/_base_/optimizer_swin_transformer_1x.yml",
"new_path": "configs/faster_rcnn/_base_/optimizer_swin_1x.yml",
"diff": ""
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/faster_rcnn/faster_rcnn_swin_tiny_1x_coco.yml",
"diff": "+_BASE_: [\n+ '../datasets/coco_detection.yml',\n+ '../runtime.yml',\n+ '_base_/optimizer_swin_1x.yml',\n+ '_base_/faster_rcnn_swin_tiny.yml',\n+ '_base_/faster_rcnn_swin_reader.yml',\n+]\n+weights: output/faster_rcnn_swin_tiny_1x_coco/model_final\n"
},
{
"change_type": "RENAME",
"old_path": "configs/faster_rcnn/faster_rcnn_swin_transformer_tiny_2x_coco.yml",
"new_path": "configs/faster_rcnn/faster_rcnn_swin_tiny_2x_coco.yml",
"diff": "_BASE_: [\n- 'faster_rcnn_swin_transformer_tiny_1x_coco.yml',\n+ 'faster_rcnn_swin_tiny_1x_coco.yml',\n]\n-weights: output/faster_swin_transformer_tiny_2x/model_final\n+weights: output/faster_rcnn_swin_tiny_2x_coco/model_final\nepoch: 24\n"
},
{
"change_type": "RENAME",
"old_path": "configs/faster_rcnn/faster_rcnn_swin_transformer_tiny_3x_coco.yml",
"new_path": "configs/faster_rcnn/faster_rcnn_swin_tiny_3x_coco.yml",
"diff": "_BASE_: [\n- 'faster_rcnn_swin_transformer_tiny_1x_coco.yml',\n+ 'faster_rcnn_swin_tiny_1x_coco.yml',\n]\n-weights: output/faster_swin_transformer_tiny_3x/model_final\n+weights: output/faster_rcnn_swin_tiny_3x_coco/model_final\nepoch: 36\n"
},
{
"change_type": "DELETE",
"old_path": "configs/faster_rcnn/faster_rcnn_swin_transformer_tiny_1x_coco.yml",
"new_path": null,
"diff": "-_BASE_: [\n- '../datasets/coco_detection.yml',\n- '../runtime.yml',\n- '_base_/optimizer_swin_transformer_1x.yml',\n- '_base_/faster_rcnn_swin_transformer.yml',\n- '_base_/faster_rcnn_swin_transformer_reader.yml',\n-]\n-weights: output/faster_swin_transformer_tiny_1x/model_final\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add faster rcnn swin models (#4187)
|
499,339 |
15.09.2021 22:03:52
| -28,800 |
59c954630a58cd62e3394eb41851ad92c25e0963
|
fix device key
|
[
{
"change_type": "MODIFY",
"old_path": "tests/test.sh",
"new_path": "tests/test.sh",
"diff": "@@ -124,8 +124,8 @@ infer_export_list=$(func_parser_value \"${lines[37]}\")\ninfer_is_quant=$(func_parser_value \"${lines[38]}\")\n# parser inference\ninference_py=$(func_parser_value \"${lines[39]}\")\n-use_gpu_key=$(func_parser_key \"${lines[40]}\")\n-use_gpu_list=$(func_parser_value \"${lines[40]}\")\n+device_key=$(func_parser_key \"${lines[40]}\")\n+device_list=$(func_parser_value \"${lines[40]}\")\nuse_mkldnn_key=$(func_parser_key \"${lines[41]}\")\nuse_mkldnn_list=$(func_parser_value \"${lines[41]}\")\ncpu_threads_key=$(func_parser_key \"${lines[42]}\")\n@@ -159,8 +159,8 @@ function func_inference(){\n_img_dir=$5\n_flag_quant=$6\n# inference\n- for use_gpu in ${use_gpu_list[*]}; do\n- if [ ${use_gpu} = \"False\" ] || [ ${use_gpu} = \"cpu\" ]; then\n+ for device in ${device_list[*]}; do\n+ if [ ${device} = \"False\" ] || [ ${device} = \"cpu\" ]; then\nfor use_mkldnn in ${use_mkldnn_list[*]}; do\nif [ ${use_mkldnn} = \"False\" ] && [ ${_flag_quant} = \"True\" ]; then\ncontinue\n@@ -174,7 +174,7 @@ function func_inference(){\nset_cpu_threads=$(func_set_params \"${cpu_threads_key}\" \"${threads}\")\nset_model_dir=$(func_set_params \"${infer_model_key}\" \"${_model_dir}\")\nset_infer_params1=$(func_set_params \"${infer_key1}\" \"${infer_value1}\")\n- command=\"${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 \"\n+ command=\"${_python} ${_script} ${device_key}=${device} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 \"\neval $command\nlast_status=${PIPESTATUS[0]}\neval \"cat ${_save_log_path}\"\n@@ -182,7 +182,7 @@ function func_inference(){\ndone\ndone\ndone\n- elif [ ${use_gpu} = \"True\" ] || [ ${use_gpu} = \"gpu\" ]; then\n+ elif [ ${device} = \"True\" ] || [ ${device} = \"gpu\" ]; then\nfor use_trt in ${use_trt_list[*]}; do\nfor precision in ${precision_list[*]}; do\nif [[ ${_flag_quant} = \"False\" ]] && [[ ${precision} =~ \"int8\" ]]; then\n@@ -203,7 +203,7 @@ function func_inference(){\nset_precision=$(func_set_params \"${precision_key}\" \"${precision}\")\nset_model_dir=$(func_set_params \"${infer_model_key}\" \"${_model_dir}\")\nset_infer_params1=$(func_set_params \"${infer_key1}\" \"${infer_value1}\")\n- command=\"${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 \"\n+ command=\"${_python} ${_script} ${device_key}=${device} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 \"\neval $command\nlast_status=${PIPESTATUS[0]}\neval \"cat ${_save_log_path}\"\n@@ -330,7 +330,7 @@ else\ncmd=\"${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${set_save_model} ${set_pretrain} ${set_epoch} ${set_batchsize} ${set_train_params1} ${set_autocast}\"\nfi\n# run train\n- eval \"unset CUDA_VISIBLE_DEVICES\"\n+ # eval \"unset CUDA_VISIBLE_DEVICES\"\neval $cmd\nstatus_check $? \"${cmd}\" \"${status_log}\"\n@@ -356,7 +356,7 @@ else\neval $env\nsave_infer_path=\"${save_log}/${train_param_value1}\"\nfunc_inference \"${python}\" \"${inference_py}\" \"${save_infer_path}\" \"${LOG_PATH}\" \"${train_infer_img_dir}\" \"${flag_quant}\"\n- eval \"unset CUDA_VISIBLE_DEVICES\"\n+ # eval \"unset CUDA_VISIBLE_DEVICES\"\nfi\ndone # done with: for trainer in ${trainer_list[*]}; do\ndone # done with: for autocast in ${autocast_list[*]}; do\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix device key (#4188)
|
499,339 |
17.09.2021 20:25:32
| -28,800 |
3a9d38b5820551be58949fc2581394b9094414be
|
[ce test] fix amp bug in ce test
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/necks/yolo_fpn.py",
"new_path": "ppdet/modeling/necks/yolo_fpn.py",
"diff": "@@ -30,8 +30,8 @@ def add_coord(x, data_format):\nelse:\nh, w = x.shape[1], x.shape[2]\n- gx = paddle.arange(w, dtype=x.dtype) / ((w - 1.) * 2.0) - 1.\n- gy = paddle.arange(h, dtype=x.dtype) / ((h - 1.) * 2.0) - 1.\n+ gx = paddle.cast(paddle.arange(w) / ((w - 1.) * 2.0) - 1., x.dtype)\n+ gy = paddle.cast(paddle.arange(h) / ((h - 1.) * 2.0) - 1., x.dtype)\nif data_format == 'NCHW':\ngx = gx.reshape([1, 1, 1, w]).expand([b, 1, h, w])\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test.sh",
"new_path": "tests/test.sh",
"diff": "@@ -283,6 +283,9 @@ else\nrun_train=${pact_trainer}\nrun_export=${pact_export}\nflag_quant=True\n+ if [ ${autocast} = \"amp\" ]; then\n+ continue\n+ fi\nelif [ ${trainer} = \"${fpgm_key}\" ]; then\nrun_train=${fpgm_trainer}\nrun_export=${fpgm_export}\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
[ce test] fix amp bug in ce test (#4196)
|
499,298 |
22.09.2021 10:42:06
| -28,800 |
155190c2b643466113e1a9974046d79964df6e18
|
[MOT] fix fairmot centernet_head bias init
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/heads/centernet_head.py",
"new_path": "ppdet/modeling/heads/centernet_head.py",
"diff": "@@ -98,6 +98,7 @@ class CenterNetHead(nn.Layer):\nstride=1,\npadding=0,\nbias=True))\n+ with paddle.no_grad():\nself.heatmap[2].conv.bias[:] = -2.19\nself.size = nn.Sequential(\nConvLayer(\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
[MOT] fix fairmot centernet_head bias init (#4206)
|
499,301 |
23.09.2021 10:52:34
| -28,800 |
e86d59ff5e0255690217c33c38c38b1213148bfd
|
update bias init
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/heads/detr_head.py",
"new_path": "ppdet/modeling/heads/detr_head.py",
"diff": "@@ -307,6 +307,8 @@ class DeformableDETRHead(nn.Layer):\nlinear_init_(self.score_head)\nconstant_(self.score_head.bias, -4.595)\nconstant_(self.bbox_head.layers[-1].weight)\n+\n+ with paddle.no_grad():\nbias = paddle.zeros_like(self.bbox_head.layers[-1].bias)\nbias[2:] = -2.0\nself.bbox_head.layers[-1].bias.set_value(bias)\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
update bias init (#4212)
|
499,298 |
23.09.2021 19:33:27
| -28,800 |
3adb10630917c20266d798e525a9508fcc32e93c
|
fix kitti metric deploy
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/data/source/category.py",
"new_path": "ppdet/data/source/category.py",
"diff": "@@ -90,16 +90,19 @@ def get_categories(metric_type, anno_file=None, arch=None):\nelif metric_type.lower() in ['mot', 'motdet', 'reid']:\nreturn _mot_category()\n+ elif metric_type.lower() in ['kitti', 'bdd100k']:\n+ return _mot_category(category='car')\n+\nelse:\nraise ValueError(\"unknown metric type {}\".format(metric_type))\n-def _mot_category():\n+def _mot_category(category='person'):\n\"\"\"\nGet class id to category id map and category id\nto category name map of mot dataset\n\"\"\"\n- label_map = {'person': 0}\n+ label_map = {category: 0}\nlabel_map = sorted(label_map.items(), key=lambda x: x[1])\ncats = [l[0] for l in label_map]\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix kitti metric deploy (#4227)
|
499,348 |
28.09.2021 10:22:49
| -28,800 |
19bc329408c41475b34d0ca483ab9c3fdebb407e
|
add darkpose support
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/lite/include/keypoint_detector.h",
"new_path": "deploy/lite/include/keypoint_detector.h",
"diff": "@@ -49,9 +49,11 @@ class KeyPointDetector {\npublic:\nexplicit KeyPointDetector(const std::string& model_dir,\nint cpu_threads = 1,\n- const int batch_size = 1) {\n+ const int batch_size = 1,\n+ bool use_dark = true) {\nconfig_.load_config(model_dir);\nthreshold_ = config_.draw_threshold_;\n+ use_dark_ = use_dark;\npreprocessor_.Init(config_.preprocess_info_);\nprintf(\"before keypoint detector\\n\");\nLoadModel(model_dir, cpu_threads);\n@@ -76,14 +78,16 @@ class KeyPointDetector {\nreturn config_.label_list_;\n}\n+ bool use_dark(){return this->use_dark_;}\n+\nprivate:\n// Preprocess image and copy data to input buffer\nvoid Preprocess(const cv::Mat& image_mat);\n// Postprocess result\n- void Postprocess(const std::vector<float> output,\n- const std::vector<int64_t> output_shape,\n- const std::vector<int64_t> idxout,\n- const std::vector<int64_t> idx_shape,\n+ void Postprocess(std::vector<float>& output,\n+ std::vector<int64_t>& output_shape,\n+ std::vector<int64_t>& idxout,\n+ std::vector<int64_t>& idx_shape,\nstd::vector<KeyPointResult>* result,\nstd::vector<std::vector<float>>& center,\nstd::vector<std::vector<float>>& scale);\n@@ -95,6 +99,7 @@ class KeyPointDetector {\nstd::vector<int64_t> idx_data_;\nfloat threshold_;\nConfigPaser config_;\n+ bool use_dark_;\n};\n} // namespace PaddleDetection\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/lite/include/keypoint_postprocess.h",
"new_path": "deploy/lite/include/keypoint_postprocess.h",
"diff": "std::vector<float> get_3rd_point(std::vector<float>& a, std::vector<float>& b);\nstd::vector<float> get_dir(float src_point_x, float src_point_y, float rot_rad);\nvoid affine_tranform(\n- float pt_x, float pt_y, cv::Mat& trans, float* x, int p, int num);\n+ float pt_x, float pt_y, cv::Mat& trans, std::vector<float>& x, int p, int num);\ncv::Mat get_affine_transform(std::vector<float>& center,\nstd::vector<float>& scale,\nfloat rot,\nstd::vector<int>& output_size,\nint inv);\n-void transform_preds(float* coords,\n+void transform_preds(std::vector<float>& coords,\nstd::vector<float>& center,\nstd::vector<float>& scale,\nstd::vector<int>& output_size,\nstd::vector<int>& dim,\n- float* target_coords);\n+ std::vector<float>& target_coords);\nvoid box_to_center_scale(std::vector<int>& box,\nint width,\nint height,\nstd::vector<float>& center,\nstd::vector<float>& scale);\n-void get_max_preds(float* heatmap,\n+void get_max_preds(std::vector<float>& heatmap,\nstd::vector<int64_t>& dim,\n- float* preds,\n- float* maxvals,\n+ std::vector<float>& preds,\n+ std::vector<float>& maxvals,\nint batchid,\nint joint_idx);\n-void get_final_preds(float* heatmap,\n+void get_final_preds(std::vector<float>& heatmap,\nstd::vector<int64_t>& dim,\n- int64_t* idxout,\n+ std::vector<int64_t>& idxout,\nstd::vector<int64_t>& idxdim,\nstd::vector<float>& center,\nstd::vector<float> scale,\n- float* preds,\n- int batchid);\n+ std::vector<float>& preds,\n+ int batchid,\n+ bool DARK = true);\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/lite/runtime_config.json",
"new_path": "deploy/lite/runtime_config.json",
"diff": "\"model_dir_keypoint\": \"./model_keypoint/\",\n\"batch_size_keypoint\": 8,\n\"threshold_keypoint\": 0.5,\n- \"image_file\": \"\",\n+ \"image_file\": \"./demo.jpg\",\n\"image_dir\": \"\",\n\"run_benchmark\": false,\n- \"cpu_threads\": 1\n+ \"cpu_threads\": 4,\n+ \"use_dark_decode\": true\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/lite/src/keypoint_detector.cc",
"new_path": "deploy/lite/src/keypoint_detector.cc",
"diff": "@@ -29,6 +29,10 @@ void KeyPointDetector::LoadModel(std::string model_file, int num_theads) {\npredictor_ = std::move(CreatePaddlePredictor<MobileConfig>(config));\n}\n+// Visualiztion MaskDetector results\n+cv::Mat VisualizeKptsResult(const cv::Mat& img,\n+ const std::vector<KeyPointResult>& results,\n+ const std::vector<int>& colormap) {\nconst int edge[][2] = {{0, 1},\n{0, 2},\n{1, 3},\n@@ -46,10 +50,6 @@ const int edge[][2] = {{0, 1},\n{13, 15},\n{14, 16},\n{11, 12}};\n-// Visualiztion MaskDetector results\n-cv::Mat VisualizeKptsResult(const cv::Mat& img,\n- const std::vector<KeyPointResult>& results,\n- const std::vector<int>& colormap) {\ncv::Mat vis_img = img.clone();\nfor (int batchid = 0; batchid < results.size(); batchid++) {\nfor (int i = 0; i < results[batchid].num_joints; i++) {\n@@ -85,24 +85,25 @@ void KeyPointDetector::Preprocess(const cv::Mat& ori_im) {\npreprocessor_.Run(&im, &inputs_);\n}\n-void KeyPointDetector::Postprocess(std::vector<float> output,\n- std::vector<int64_t> output_shape,\n- std::vector<int64_t> idxout,\n- std::vector<int64_t> idx_shape,\n+void KeyPointDetector::Postprocess(std::vector<float>& output,\n+ std::vector<int64_t>& output_shape,\n+ std::vector<int64_t>& idxout,\n+ std::vector<int64_t>& idx_shape,\nstd::vector<KeyPointResult>* result,\nstd::vector<std::vector<float>>& center_bs,\nstd::vector<std::vector<float>>& scale_bs) {\n- float* preds = new float[output_shape[1] * 3]{0};\n+ std::vector<float> preds(output_shape[1] * 3, 0);\nfor (int batchid = 0; batchid < output_shape[0]; batchid++) {\n- get_final_preds(const_cast<float*>(output.data()),\n+ get_final_preds(output,\noutput_shape,\n- idxout.data(),\n+ idxout,\nidx_shape,\ncenter_bs[batchid],\nscale_bs[batchid],\npreds,\n- batchid);\n+ batchid,\n+ this->use_dark());\nKeyPointResult result_item;\nresult_item.num_joints = output_shape[1];\nresult_item.keypoints.clear();\n@@ -113,7 +114,6 @@ void KeyPointDetector::Postprocess(std::vector<float> output,\n}\nresult->push_back(result_item);\n}\n- delete[] preds;\n}\nvoid KeyPointDetector::Predict(const std::vector<cv::Mat> imgs,\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/lite/src/keypoint_postprocess.cc",
"new_path": "deploy/lite/src/keypoint_postprocess.cc",
"diff": "// limitations under the License.\n#include \"include/keypoint_postprocess.h\"\n+#define PI 3.1415926535\n+#define HALF_CIRCLE_DEGREE 180\ncv::Point2f get_3rd_point(cv::Point2f& a, cv::Point2f& b) {\ncv::Point2f direct{a.x - b.x, a.y - b.y};\n@@ -31,7 +33,7 @@ std::vector<float> get_dir(float src_point_x,\n}\nvoid affine_tranform(\n- float pt_x, float pt_y, cv::Mat& trans, float* preds, int p) {\n+ float pt_x, float pt_y, cv::Mat& trans, std::vector<float>& preds, int p) {\ndouble new1[3] = {pt_x, pt_y, 1.0};\ncv::Mat new_pt(3, 1, trans.type(), new1);\ncv::Mat w = trans * new_pt;\n@@ -48,7 +50,7 @@ void get_affine_transform(std::vector<float>& center,\nfloat src_w = scale[0];\nfloat dst_w = static_cast<float>(output_size[0]);\nfloat dst_h = static_cast<float>(output_size[1]);\n- float rot_rad = rot * 3.1415926535 / 180;\n+ float rot_rad = rot * PI / HALF_CIRCLE_DEGREE;\nstd::vector<float> src_dir = get_dir(-0.5 * src_w, 0, rot_rad);\nstd::vector<float> dst_dir{-0.5 * dst_w, 0.0};\ncv::Point2f srcPoint2f[3], dstPoint2f[3];\n@@ -67,12 +69,12 @@ void get_affine_transform(std::vector<float>& center,\n}\n}\n-void transform_preds(float* coords,\n+void transform_preds(std::vector<float>& coords,\nstd::vector<float>& center,\nstd::vector<float>& scale,\nstd::vector<int>& output_size,\nstd::vector<int64_t>& dim,\n- float* target_coords) {\n+ std::vector<float>& target_coords) {\ncv::Mat trans(2, 3, CV_64FC1);\nget_affine_transform(center, scale, 0, output_size, trans, 1);\nfor (int p = 0; p < dim[1]; ++p) {\n@@ -81,10 +83,10 @@ void transform_preds(float* coords,\n}\n// only for batchsize == 1\n-void get_max_preds(float* heatmap,\n+void get_max_preds(std::vector<float>& heatmap,\nstd::vector<int>& dim,\n- float* preds,\n- float* maxvals,\n+ std::vector<float>& preds,\n+ std::vector<float>& maxvals,\nint batchid,\nint joint_idx) {\nint num_joints = dim[1];\n@@ -106,14 +108,75 @@ void get_max_preds(float* heatmap,\n}\n}\n-void get_final_preds(float* heatmap,\n+\n+void dark_parse(std::vector<float>& heatmap,\n+ std::vector<int64_t>& dim,\n+ std::vector<float>& coords,\n+ int px,\n+ int py,\n+ int index,\n+ int ch){\n+ /*DARK postpocessing, Zhang et al. Distribution-Aware Coordinate\n+ Representation for Human Pose Estimation (CVPR 2020).\n+ 1) offset = - hassian.inv() * derivative\n+ 2) dx = (heatmap[x+1] - heatmap[x-1])/2.\n+ 3) dxx = (dx[x+1] - dx[x-1])/2.\n+ 4) derivative = Mat([dx, dy])\n+ 5) hassian = Mat([[dxx, dxy], [dxy, dyy]])\n+ */\n+ std::vector<float>::const_iterator first1 = heatmap.begin() + index;\n+ std::vector<float>::const_iterator last1 = heatmap.begin() + index + dim[2]*dim[3];\n+ std::vector<float> heatmap_ch(first1, last1);\n+ cv::Mat heatmap_mat{heatmap_ch};\n+ heatmap_mat.resize(dim[2],dim[3]);\n+ cv::GaussianBlur(heatmap_mat, heatmap_mat, cv::Size(3,3), 0, 0);\n+ heatmap_ch.assign(heatmap_mat.datastart, heatmap_mat.dataend);\n+\n+ float epsilon = 1e-10;\n+ //sample heatmap to get values in around target location\n+ float xy = log(fmax(heatmap_ch[py * dim[3] + px], epsilon));\n+ float xr = log(fmax(heatmap_ch[py * dim[3] + px + 1], epsilon));\n+ float xl = log(fmax(heatmap_ch[py * dim[3] + px - 1], epsilon));\n+\n+ float xr2 = log(fmax(heatmap_ch[py * dim[3] + px + 2], epsilon));\n+ float xl2 = log(fmax(heatmap_ch[py * dim[3] + px - 2], epsilon));\n+ float yu = log(fmax(heatmap_ch[(py + 1) * dim[3] + px], epsilon));\n+ float yd = log(fmax(heatmap_ch[(py - 1) * dim[3] + px], epsilon));\n+ float yu2 = log(fmax(heatmap_ch[(py + 2) * dim[3] + px], epsilon));\n+ float yd2 = log(fmax(heatmap_ch[(py - 2) * dim[3] + px], epsilon));\n+ float xryu = log(fmax(heatmap_ch[(py + 1) * dim[3] + px + 1], epsilon));\n+ float xryd = log(fmax(heatmap_ch[(py - 1) * dim[3] + px + 1], epsilon));\n+ float xlyu = log(fmax(heatmap_ch[(py + 1) * dim[3] + px - 1], epsilon));\n+ float xlyd = log(fmax(heatmap_ch[(py - 1) * dim[3] + px - 1], epsilon));\n+\n+ //compute dx/dy and dxx/dyy with sampled values\n+ float dx = 0.5 * (xr - xl);\n+ float dy = 0.5 * (yu - yd);\n+ float dxx = 0.25 * (xr2 - 2*xy + xl2);\n+ float dxy = 0.25 * (xryu - xryd - xlyu + xlyd);\n+ float dyy = 0.25 * (yu2 - 2*xy + yd2);\n+\n+ //finally get offset by derivative and hassian, which combined by dx/dy and dxx/dyy\n+ if(dxx * dyy - dxy*dxy != 0){\n+ float M[2][2] = {dxx, dxy, dxy, dyy};\n+ float D[2] = {dx, dy};\n+ cv::Mat hassian(2,2,CV_32F,M);\n+ cv::Mat derivative(2,1,CV_32F,D);\n+ cv::Mat offset = - hassian.inv() * derivative;\n+ coords[ch * 2] += offset.at<float>(0,0);\n+ coords[ch * 2 + 1] += offset.at<float>(1,0);\n+ }\n+}\n+\n+void get_final_preds(std::vector<float>& heatmap,\nstd::vector<int64_t>& dim,\n- int64_t* idxout,\n+ std::vector<int64_t>& idxout,\nstd::vector<int64_t>& idxdim,\nstd::vector<float>& center,\nstd::vector<float> scale,\n- float* preds,\n- int batchid) {\n+ std::vector<float>& preds,\n+ int batchid,\n+ bool DARK) {\nstd::vector<float> coords;\ncoords.resize(dim[1] * 2);\nint heatmap_height = dim[2];\n@@ -130,18 +193,23 @@ void get_final_preds(float* heatmap,\nint px = int(coords[j * 2] + 0.5);\nint py = int(coords[j * 2 + 1] + 0.5);\n- if (px > 1 && px < heatmap_width - 1) {\n+ if(DARK && px > 1 && px < heatmap_width - 2){\n+ dark_parse(heatmap, dim, coords, px, py, index, j);\n+ }\n+ else{\n+ if (px > 0 && px < heatmap_width - 1) {\nfloat diff_x = heatmap[index + py * dim[3] + px + 1] -\nheatmap[index + py * dim[3] + px - 1];\ncoords[j * 2] += diff_x > 0 ? 1 : -1 * 0.25;\n}\n- if (py > 1 && py < heatmap_height - 1) {\n+ if (py > 0 && py < heatmap_height - 1) {\nfloat diff_y = heatmap[index + (py + 1) * dim[3] + px] -\nheatmap[index + (py - 1) * dim[3] + px];\ncoords[j * 2 + 1] += diff_y > 0 ? 1 : -1 * 0.25;\n}\n}\n+ }\nstd::vector<int> img_size{heatmap_width, heatmap_height};\n- transform_preds(coords.data(), center, scale, img_size, dim, preds);\n+ transform_preds(coords, center, scale, img_size, dim, preds);\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/lite/src/main.cc",
"new_path": "deploy/lite/src/main.cc",
"diff": "@@ -308,7 +308,8 @@ int main(int argc, char** argv) {\nkeypoint = new PaddleDetection::KeyPointDetector(\nRT_Config[\"model_dir_keypoint\"].as<std::string>(),\nRT_Config[\"cpu_threads\"].as<int>(),\n- RT_Config[\"batch_size_keypoint\"].as<int>());\n+ RT_Config[\"batch_size_keypoint\"].as<int>(),\n+ RT_Config[\"use_dark_decode\"].as<bool>());\nRT_Config[\"batch_size_det\"] = 1;\nprintf(\n\"batchsize of detection forced to be 1 while keypoint model is not \"\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/lite/src/preprocess_op.cc",
"new_path": "deploy/lite/src/preprocess_op.cc",
"diff": "@@ -31,7 +31,7 @@ void InitInfo::Run(cv::Mat* im, ImageBlob* data) {\nvoid NormalizeImage::Run(cv::Mat* im, ImageBlob* data) {\ndouble e = 1.0;\nif (is_scale_) {\n- e /= 255.0;\n+ e *= 1./255.0;\n}\n(*im).convertTo(*im, CV_32FC3, e);\nfor (int h = 0; h < im->rows; h++) {\n@@ -151,15 +151,18 @@ void CropImg(cv::Mat& img,\nint crop_y1 = std::max(0, area[1]);\nint crop_x2 = std::min(img.cols - 1, area[2]);\nint crop_y2 = std::min(img.rows - 1, area[3]);\n+\nint center_x = (crop_x1 + crop_x2) / 2.;\nint center_y = (crop_y1 + crop_y2) / 2.;\nint half_h = (crop_y2 - crop_y1) / 2.;\nint half_w = (crop_x2 - crop_x1) / 2.;\n+\nif (half_h * 3 > half_w * 4) {\nhalf_w = static_cast<int>(half_h * 0.75);\n} else {\nhalf_h = static_cast<int>(half_w * 4 / 3);\n}\n+\ncrop_x1 =\nstd::max(0, center_x - static_cast<int>(half_w * (1 + expandratio)));\ncrop_y1 =\n@@ -170,6 +173,7 @@ void CropImg(cv::Mat& img,\nstatic_cast<int>(center_y + half_h * (1 + expandratio)));\ncrop_img =\nimg(cv::Range(crop_y1, crop_y2 + 1), cv::Range(crop_x1, crop_x2 + 1));\n+\ncenter.clear();\ncenter.emplace_back((crop_x1 + crop_x2) / 2);\ncenter.emplace_back((crop_y1 + crop_y2) / 2);\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add darkpose support (#4232)
|
499,301 |
12.10.2021 10:38:00
| -28,800 |
16e3d7408161713c765886cfb952f98d9f68713c
|
add conv mixer
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/layers.py",
"new_path": "ppdet/modeling/layers.py",
"diff": "@@ -1388,3 +1388,37 @@ class MultiHeadAttention(nn.Layer):\nif self.need_weights:\nouts.append(weights)\nreturn out if len(outs) == 1 else tuple(outs)\n+\n+\n+@register\n+class ConvMixer(nn.Layer):\n+ def __init__(\n+ self,\n+ dim,\n+ depth,\n+ kernel_size=3, ):\n+ super().__init__()\n+ self.dim = dim\n+ self.depth = depth\n+ self.kernel_size = kernel_size\n+\n+ self.mixer = self.conv_mixer(dim, depth, kernel_size)\n+\n+ def forward(self, x):\n+ return self.mixer(x)\n+\n+ @staticmethod\n+ def conv_mixer(\n+ dim,\n+ depth,\n+ kernel_size, ):\n+ Seq, ActBn = nn.Sequential, lambda x: Seq(x, nn.GELU(), nn.BatchNorm2D(dim))\n+ Residual = type('Residual', (Seq, ),\n+ {'forward': lambda self, x: self[0](x) + x})\n+ return Seq(*[\n+ Seq(Residual(\n+ ActBn(\n+ nn.Conv2D(\n+ dim, dim, kernel_size, groups=dim, padding=\"same\"))),\n+ ActBn(nn.Conv2D(dim, dim, 1))) for i in range(depth)\n+ ])\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add conv mixer (#4280)
|
499,339 |
12.10.2021 10:50:42
| -28,800 |
bceb6e4ef3139e04558fac8f16f6bbfc6e6d3af0
|
[benchmark] add detection train benchmark scripts
|
[
{
"change_type": "ADD",
"old_path": null,
"new_path": "benchmark/prepare.sh",
"diff": "+#!/usr/bin/env bash\n+\n+pip3.7 install -U pip Cython\n+pip3.7 install -r requirements.txt\n+mv ./dataset/coco/download_coco.py . && rm -rf ./dataset/coco/* && mv ./download_coco.py ./dataset/coco/\n+# prepare lite train data\n+wget -nc -P ./dataset/coco/ https://paddledet.bj.bcebos.com/data/coco_benchmark.tar\n+cd ./dataset/coco/ && tar -xvf coco_benchmark.tar && mv -u coco_benchmark/* .\n+rm -rf coco_benchmark/\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "benchmark/run_all.sh",
"diff": "+# Use docker: paddlepaddle/paddle:latest-gpu-cuda10.1-cudnn7 paddle=2.1.2 python3.7\n+#\n+# Usage:\n+# git clone https://github.com/PaddlePaddle/PaddleDetection.git\n+# cd PaddleDetection\n+# bash benchmark/run_all.sh\n+\n+# run prepare.sh\n+bash benchmark/prepare.sh\n+\n+model_name_list=(faster_rcnn fcos deformable_detr gfl)\n+fp_item_list=(fp32)\n+max_epoch=1\n+\n+for model_name in ${model_name_list[@]}; do\n+ for fp_item in ${fp_item_list[@]}; do\n+ case ${model_name} in\n+ faster_rcnn) bs_list=(1 8) ;;\n+ fcos) bs_list=(2 8) ;;\n+ deformable_detr) bs_list=(2) ;;\n+ gfl) bs_list=(2 8) ;;\n+ *) echo \"wrong model_name\"; exit 1;\n+ esac\n+ for bs_item in ${bs_list[@]}\n+ do\n+ echo \"index is speed, 1gpus, begin, ${model_name}\"\n+ run_mode=sp\n+ CUDA_VISIBLE_DEVICES=0 bash benchmark/run_benchmark.sh ${run_mode} ${bs_item} \\\n+ ${fp_item} ${max_epoch} ${model_name} # (5min)\n+ sleep 60\n+\n+ echo \"index is speed, 8gpus, run_mode is multi_process, begin, ${model_name}\"\n+ run_mode=mp\n+ CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash benchmark/run_benchmark.sh ${run_mode} \\\n+ ${bs_item} ${fp_item} ${max_epoch} ${model_name}\n+ sleep 60\n+ done\n+ done\n+done\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/engine/trainer.py",
"new_path": "ppdet/engine/trainer.py",
"diff": "@@ -38,6 +38,7 @@ from ppdet.metrics import Metric, COCOMetric, VOCMetric, WiderFaceMetric, get_in\nfrom ppdet.metrics import RBoxMetric, JDEDetMetric\nfrom ppdet.data.source.category import get_categories\nimport ppdet.utils.stats as stats\n+from ppdet.utils import profiler\nfrom .callbacks import Callback, ComposeCallback, LogPrinter, Checkpointer, WiferFaceEval, VisualDLWriter\nfrom .export_utils import _dump_infer_config\n@@ -340,6 +341,7 @@ class Trainer(object):\nif self.cfg.get('print_flops', False):\nself._flops(self.loader)\n+ profiler_options = self.cfg.get('profiler_options', None)\nfor epoch_id in range(self.start_epoch, self.cfg.epoch):\nself.status['mode'] = 'train'\n@@ -351,6 +353,7 @@ class Trainer(object):\nfor step_id, data in enumerate(self.loader):\nself.status['data_time'].update(time.time() - iter_tic)\nself.status['step_id'] = step_id\n+ profiler.add_profiler_step(profiler_options)\nself._compose_callback.on_step_begin(self.status)\ndata['epoch_id'] = epoch_id\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "ppdet/utils/profiler.py",
"diff": "+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+import sys\n+import paddle\n+\n+# A global variable to record the number of calling times for profiler\n+# functions. It is used to specify the tracing range of training steps.\n+_profiler_step_id = 0\n+\n+# A global variable to avoid parsing from string every time.\n+_profiler_options = None\n+\n+\n+class ProfilerOptions(object):\n+ '''\n+ Use a string to initialize a ProfilerOptions.\n+ The string should be in the format: \"key1=value1;key2=value;key3=value3\".\n+ For example:\n+ \"profile_path=model.profile\"\n+ \"batch_range=[50, 60]; profile_path=model.profile\"\n+ \"batch_range=[50, 60]; tracer_option=OpDetail; profile_path=model.profile\"\n+\n+ ProfilerOptions supports following key-value pair:\n+ batch_range - a integer list, e.g. [100, 110].\n+ state - a string, the optional values are 'CPU', 'GPU' or 'All'.\n+ sorted_key - a string, the optional values are 'calls', 'total',\n+ 'max', 'min' or 'ave.\n+ tracer_option - a string, the optional values are 'Default', 'OpDetail',\n+ 'AllOpDetail'.\n+ profile_path - a string, the path to save the serialized profile data,\n+ which can be used to generate a timeline.\n+ exit_on_finished - a boolean.\n+ '''\n+\n+ def __init__(self, options_str):\n+ assert isinstance(options_str, str)\n+\n+ self._options = {\n+ 'batch_range': [10, 20],\n+ 'state': 'All',\n+ 'sorted_key': 'total',\n+ 'tracer_option': 'Default',\n+ 'profile_path': '/tmp/profile',\n+ 'exit_on_finished': True\n+ }\n+ self._parse_from_string(options_str)\n+\n+ def _parse_from_string(self, options_str):\n+ for kv in options_str.replace(' ', '').split(';'):\n+ key, value = kv.split('=')\n+ if key == 'batch_range':\n+ value_list = value.replace('[', '').replace(']', '').split(',')\n+ value_list = list(map(int, value_list))\n+ if len(value_list) >= 2 and value_list[0] >= 0 and value_list[\n+ 1] > value_list[0]:\n+ self._options[key] = value_list\n+ elif key == 'exit_on_finished':\n+ self._options[key] = value.lower() in (\"yes\", \"true\", \"t\", \"1\")\n+ elif key in [\n+ 'state', 'sorted_key', 'tracer_option', 'profile_path'\n+ ]:\n+ self._options[key] = value\n+\n+ def __getitem__(self, name):\n+ if self._options.get(name, None) is None:\n+ raise ValueError(\n+ \"ProfilerOptions does not have an option named %s.\" % name)\n+ return self._options[name]\n+\n+\n+def add_profiler_step(options_str=None):\n+ '''\n+ Enable the operator-level timing using PaddlePaddle's profiler.\n+ The profiler uses a independent variable to count the profiler steps.\n+ One call of this function is treated as a profiler step.\n+\n+ Args:\n+ profiler_options - a string to initialize the ProfilerOptions.\n+ Default is None, and the profiler is disabled.\n+ '''\n+ if options_str is None:\n+ return\n+\n+ global _profiler_step_id\n+ global _profiler_options\n+\n+ if _profiler_options is None:\n+ _profiler_options = ProfilerOptions(options_str)\n+\n+ if _profiler_step_id == _profiler_options['batch_range'][0]:\n+ paddle.utils.profiler.start_profiler(_profiler_options['state'],\n+ _profiler_options['tracer_option'])\n+ elif _profiler_step_id == _profiler_options['batch_range'][1]:\n+ paddle.utils.profiler.stop_profiler(_profiler_options['sorted_key'],\n+ _profiler_options['profile_path'])\n+ if _profiler_options['exit_on_finished']:\n+ sys.exit(0)\n+\n+ _profiler_step_id += 1\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/train.py",
"new_path": "tools/train.py",
"diff": "@@ -81,6 +81,13 @@ def parse_args():\naction='store_true',\ndefault=False,\nhelp='Whether to save the evaluation results only')\n+ parser.add_argument(\n+ '--profiler_options',\n+ type=str,\n+ default=None,\n+ help=\"The option of profiler, which should be in \"\n+ \"format \\\"key1=value1;key2=value2;key3=value3\\\".\"\n+ \"please see ppdet/utils/profiler.py for detail.\")\nargs = parser.parse_args()\nreturn args\n@@ -117,6 +124,7 @@ def main():\ncfg['use_vdl'] = FLAGS.use_vdl\ncfg['vdl_log_dir'] = FLAGS.vdl_log_dir\ncfg['save_prediction_only'] = FLAGS.save_prediction_only\n+ cfg['profiler_options'] = FLAGS.profiler_options\nmerge_config(FLAGS.opt)\nplace = paddle.set_device('gpu' if cfg.use_gpu else 'cpu')\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
[benchmark] add detection train benchmark scripts (#4250)
|
499,301 |
13.10.2021 17:12:33
| -28,800 |
1abb4b8bf6498be3bb418fd3f0cdfd2c6180eba9
|
pop str im_file
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/data/transform/operators.py",
"new_path": "ppdet/data/transform/operators.py",
"diff": "@@ -204,6 +204,8 @@ class DecodeCache(BaseOperator):\nsample['im_shape'] = np.array(im.shape[:2], dtype=np.float32)\nsample['scale_factor'] = np.array([1., 1.], dtype=np.float32)\n+ sample.pop('im_file')\n+\nreturn sample\n@staticmethod\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
pop str im_file (#4299)
|
499,304 |
14.10.2021 14:56:36
| -28,800 |
846f6d5c60defa1a04cc5798311951ea774c9fcc
|
add post quant
|
[
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/slim/post_quant/ppyolo_mbv3_large_ptq.yml",
"diff": "+weights: https://paddledet.bj.bcebos.com/models/ppyolo_mbv3_large_coco.pdparams\n+slim: PTQ\n+\n+PTQ:\n+ ptq_config: {\n+ 'activation_quantizer': 'HistQuantizer',\n+ 'upsample_bins': 127,\n+ 'hist_percent': 0.999}\n+ quant_batch_num: 10\n+ fuse: True\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/engine/export_utils.py",
"new_path": "ppdet/engine/export_utils.py",
"diff": "@@ -20,6 +20,7 @@ import os\nimport yaml\nfrom collections import OrderedDict\n+import paddle\nfrom ppdet.data.source.category import get_categories\nfrom ppdet.utils.logger import setup_logger\n@@ -50,6 +51,24 @@ KEYPOINT_ARCH = ['HigherHRNet', 'TopDownHRNet']\nMOT_ARCH = ['DeepSORT', 'JDE', 'FairMOT']\n+def _prune_input_spec(input_spec, program, targets):\n+ # try to prune static program to figure out pruned input spec\n+ # so we perform following operations in static mode\n+ paddle.enable_static()\n+ pruned_input_spec = [{}]\n+ program = program.clone()\n+ program = program._prune(targets=targets)\n+ global_block = program.global_block()\n+ for name, spec in input_spec[0].items():\n+ try:\n+ v = global_block.var(name)\n+ pruned_input_spec[0][name] = spec\n+ except Exception:\n+ pass\n+ paddle.disable_static()\n+ return pruned_input_spec\n+\n+\ndef _parse_reader(reader_cfg, dataset_cfg, metric, arch, image_shape):\npreprocess_list = []\n@@ -97,7 +116,7 @@ def _dump_infer_config(config, path, image_shape, model):\narch_state = False\nfrom ppdet.core.config.yaml_helpers import setup_orderdict\nsetup_orderdict()\n- use_dynamic_shape = True if image_shape[1] == -1 else False\n+ use_dynamic_shape = True if image_shape[2] == -1 else False\ninfer_cfg = OrderedDict({\n'mode': 'fluid',\n'draw_threshold': 0.5,\n@@ -141,7 +160,7 @@ def _dump_infer_config(config, path, image_shape, model):\ndataset_cfg = config['TestDataset']\ninfer_cfg['Preprocess'], infer_cfg['label_list'] = _parse_reader(\n- reader_cfg, dataset_cfg, config['metric'], label_arch, image_shape)\n+ reader_cfg, dataset_cfg, config['metric'], label_arch, image_shape[1:])\nyaml.dump(infer_cfg, open(path, 'w'))\nlogger.info(\"Export inference config file to {}\".format(os.path.join(path)))\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/engine/trainer.py",
"new_path": "ppdet/engine/trainer.py",
"diff": "@@ -41,7 +41,7 @@ import ppdet.utils.stats as stats\nfrom ppdet.utils import profiler\nfrom .callbacks import Callback, ComposeCallback, LogPrinter, Checkpointer, WiferFaceEval, VisualDLWriter\n-from .export_utils import _dump_infer_config\n+from .export_utils import _dump_infer_config, _prune_input_spec\nfrom ppdet.utils.logger import setup_logger\nlogger = setup_logger('ppdet.engine')\n@@ -541,12 +541,7 @@ class Trainer(object):\nname, ext = os.path.splitext(image_name)\nreturn os.path.join(output_dir, \"{}\".format(name)) + ext\n- def export(self, output_dir='output_inference'):\n- self.model.eval()\n- model_name = os.path.splitext(os.path.split(self.cfg.filename)[-1])[0]\n- save_dir = os.path.join(output_dir, model_name)\n- if not os.path.exists(save_dir):\n- os.makedirs(save_dir)\n+ def _get_infer_cfg_and_input_spec(self, save_dir, prune_input=True):\nimage_shape = None\nif self.cfg.architecture in MOT_ARCH:\ntest_reader_name = 'TestMOTReader'\n@@ -555,9 +550,11 @@ class Trainer(object):\nif 'inputs_def' in self.cfg[test_reader_name]:\ninputs_def = self.cfg[test_reader_name]['inputs_def']\nimage_shape = inputs_def.get('image_shape', None)\n- # set image_shape=[3, -1, -1] as default\n+ # set image_shape=[None, 3, -1, -1] as default\nif image_shape is None:\n- image_shape = [3, -1, -1]\n+ image_shape = [None, 3, -1, -1]\n+ if len(image_shape) == 3:\n+ image_shape = [None] + image_shape\nif hasattr(self.model, 'deploy'):\nself.model.deploy = True\n@@ -574,7 +571,7 @@ class Trainer(object):\ninput_spec = [{\n\"image\": InputSpec(\n- shape=[None] + image_shape, name='image'),\n+ shape=image_shape, name='image'),\n\"im_shape\": InputSpec(\nshape=[None, 2], name='im_shape'),\n\"scale_factor\": InputSpec(\n@@ -585,13 +582,29 @@ class Trainer(object):\n\"crops\": InputSpec(\nshape=[None, 3, 192, 64], name='crops')\n})\n-\n- static_model = paddle.jit.to_static(self.model, input_spec=input_spec)\n+ if prune_input:\n+ static_model = paddle.jit.to_static(\n+ self.model, input_spec=input_spec)\n# NOTE: dy2st do not pruned program, but jit.save will prune program\n# input spec, prune input spec here and save with pruned input spec\n- pruned_input_spec = self._prune_input_spec(\n+ pruned_input_spec = _prune_input_spec(\ninput_spec, static_model.forward.main_program,\nstatic_model.forward.outputs)\n+ else:\n+ static_model = None\n+ pruned_input_spec = input_spec\n+\n+ return static_model, pruned_input_spec\n+\n+ def export(self, output_dir='output_inference'):\n+ self.model.eval()\n+ model_name = os.path.splitext(os.path.split(self.cfg.filename)[-1])[0]\n+ save_dir = os.path.join(output_dir, model_name)\n+ if not os.path.exists(save_dir):\n+ os.makedirs(save_dir)\n+\n+ static_model, pruned_input_spec = self._get_infer_cfg_and_input_spec(\n+ save_dir)\n# dy2st and save model\nif 'slim' not in self.cfg or self.cfg['slim_type'] != 'QAT':\n@@ -606,22 +619,26 @@ class Trainer(object):\ninput_spec=pruned_input_spec)\nlogger.info(\"Export model and saved in {}\".format(save_dir))\n- def _prune_input_spec(self, input_spec, program, targets):\n- # try to prune static program to figure out pruned input spec\n- # so we perform following operations in static mode\n- paddle.enable_static()\n- pruned_input_spec = [{}]\n- program = program.clone()\n- program = program._prune(targets=targets)\n- global_block = program.global_block()\n- for name, spec in input_spec[0].items():\n- try:\n- v = global_block.var(name)\n- pruned_input_spec[0][name] = spec\n- except Exception:\n- pass\n- paddle.disable_static()\n- return pruned_input_spec\n+ def post_quant(self, output_dir='output_inference'):\n+ model_name = os.path.splitext(os.path.split(self.cfg.filename)[-1])[0]\n+ save_dir = os.path.join(output_dir, model_name)\n+ if not os.path.exists(save_dir):\n+ os.makedirs(save_dir)\n+\n+ for idx, data in enumerate(self.loader):\n+ self.model(data)\n+ if idx == int(self.cfg.get('quant_batch_num', 10)):\n+ break\n+\n+ # TODO: support prune input_spec\n+ _, pruned_input_spec = self._get_infer_cfg_and_input_spec(\n+ save_dir, prune_input=False)\n+\n+ self.cfg.slim.save_quantized_model(\n+ self.model,\n+ os.path.join(save_dir, 'model'),\n+ input_spec=pruned_input_spec)\n+ logger.info(\"Export Post-Quant model and saved in {}\".format(save_dir))\ndef _flops(self, loader):\nself.model.eval()\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/slim/__init__.py",
"new_path": "ppdet/slim/__init__.py",
"diff": "@@ -48,6 +48,14 @@ def build_slim_model(cfg, slim_cfg, mode='train'):\nload_pretrain_weight(model, weights)\ncfg['model'] = model\ncfg['slim_type'] = cfg.slim\n+ elif slim_load_cfg['slim'] == 'PTQ':\n+ model = create(cfg.architecture)\n+ load_config(slim_cfg)\n+ load_pretrain_weight(model, cfg.weights)\n+ slim = create(cfg.slim)\n+ cfg['slim_type'] = cfg.slim\n+ cfg['model'] = slim(model)\n+ cfg['slim'] = slim\nelse:\nload_config(slim_cfg)\nmodel = create(cfg.architecture)\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/slim/quant.py",
"new_path": "ppdet/slim/quant.py",
"diff": "@@ -49,3 +49,36 @@ class QAT(object):\ndef save_quantized_model(self, layer, path, input_spec=None, **config):\nself.quanter.save_quantized_model(\nmodel=layer, path=path, input_spec=input_spec, **config)\n+\n+\n+@register\n+@serializable\n+class PTQ(object):\n+ def __init__(self,\n+ ptq_config,\n+ quant_batch_num=10,\n+ output_dir='output_inference',\n+ fuse=True,\n+ fuse_list=None):\n+ super(PTQ, self).__init__()\n+ self.ptq_config = ptq_config\n+ self.quant_batch_num = quant_batch_num\n+ self.output_dir = output_dir\n+ self.fuse = fuse\n+ self.fuse_list = fuse_list\n+\n+ def __call__(self, model):\n+ paddleslim = try_import('paddleslim')\n+ self.ptq = paddleslim.PTQ(**self.ptq_config)\n+ model.eval()\n+ quant_model = self.ptq.quantize(\n+ model, fuse=self.fuse, fuse_list=self.fuse_list)\n+\n+ return quant_model\n+\n+ def save_quantized_model(self,\n+ quant_model,\n+ quantize_model_path,\n+ input_spec=None):\n+ self.ptq.save_quantized_model(quant_model, quantize_model_path,\n+ input_spec)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "tools/post_quant.py",
"diff": "+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from __future__ import absolute_import\n+from __future__ import division\n+from __future__ import print_function\n+\n+import os\n+import sys\n+\n+# add python path of PadleDetection to sys.path\n+parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))\n+sys.path.insert(0, parent_path)\n+\n+# ignore warning log\n+import warnings\n+warnings.filterwarnings('ignore')\n+\n+import paddle\n+\n+from ppdet.core.workspace import load_config, merge_config\n+from ppdet.utils.check import check_gpu, check_version, check_config\n+from ppdet.utils.cli import ArgsParser\n+from ppdet.engine import Trainer\n+from ppdet.slim import build_slim_model\n+\n+from ppdet.utils.logger import setup_logger\n+logger = setup_logger('post_quant')\n+\n+\n+def parse_args():\n+ parser = ArgsParser()\n+ parser.add_argument(\n+ \"--output_dir\",\n+ type=str,\n+ default=\"output_inference\",\n+ help=\"Directory for storing the output model files.\")\n+ parser.add_argument(\n+ \"--slim_config\",\n+ default=None,\n+ type=str,\n+ help=\"Configuration file of slim method.\")\n+ args = parser.parse_args()\n+ return args\n+\n+\n+def run(FLAGS, cfg):\n+ # build detector\n+ trainer = Trainer(cfg, mode='eval')\n+\n+ # load weights\n+ if cfg.architecture in ['DeepSORT']:\n+ if cfg.det_weights != 'None':\n+ trainer.load_weights_sde(cfg.det_weights, cfg.reid_weights)\n+ else:\n+ trainer.load_weights_sde(None, cfg.reid_weights)\n+ else:\n+ trainer.load_weights(cfg.weights)\n+\n+ # post quant model\n+ trainer.post_quant(FLAGS.output_dir)\n+\n+\n+def main():\n+ FLAGS = parse_args()\n+ cfg = load_config(FLAGS.config)\n+ # TODO: to be refined in the future\n+ if 'norm_type' in cfg and cfg['norm_type'] == 'sync_bn':\n+ FLAGS.opt['norm_type'] = 'bn'\n+ merge_config(FLAGS.opt)\n+\n+ if FLAGS.slim_config:\n+ cfg = build_slim_model(cfg, FLAGS.slim_config, mode='test')\n+\n+ # FIXME: Temporarily solve the priority problem of FLAGS.opt\n+ merge_config(FLAGS.opt)\n+ check_config(cfg)\n+ check_gpu(cfg.use_gpu)\n+ check_version()\n+\n+ run(FLAGS, cfg)\n+\n+\n+if __name__ == '__main__':\n+ main()\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add post quant (#4255)
|
499,299 |
15.10.2021 10:15:00
| -28,800 |
7da4cf719a2757c35040addedeaa6fff1c999aea
|
add pose/solov2 model training benchmark
|
[
{
"change_type": "MODIFY",
"old_path": "benchmark/run_all.sh",
"new_path": "benchmark/run_all.sh",
"diff": "@@ -19,6 +19,9 @@ for model_name in ${model_name_list[@]}; do\nfcos) bs_list=(2 8) ;;\ndeformable_detr) bs_list=(2) ;;\ngfl) bs_list=(2 8) ;;\n+ hrnet) bs_list=(64 160) ;;\n+ higherhrnet) bs_list=(20 24) ;;\n+ solov2) bs_list=(2 4) ;;\n*) echo \"wrong model_name\"; exit 1;\nesac\nfor bs_item in ${bs_list[@]}\n"
},
{
"change_type": "MODIFY",
"old_path": "benchmark/run_benchmark.sh",
"new_path": "benchmark/run_benchmark.sh",
"diff": "@@ -26,12 +26,15 @@ function _train(){\nfcos) model_yml=\"configs/fcos/fcos_r50_fpn_1x_coco.yml\" ;;\ndeformable_detr) model_yml=\"configs/deformable_detr/deformable_detr_r50_1x_coco.yml\" ;;\ngfl) model_yml=\"configs/gfl/gfl_r50_fpn_1x_coco.yml\" ;;\n+ hrnet) model_yml=\"configs/keypoint/hrnet/hrnet_w32_256x192.yml\" ;;\n+ higherhrnet) model_yml=\"configs/keypoint/higherhrnet/higherhrnet_hrnet_w32_512.yml\" ;;\n+ solov2) model_yml=\"configs/solov2/solov2_r50_fpn_1x_coco.yml\" ;;\n*) echo \"Undefined model_name\"; exit 1;\nesac\nset_batch_size=\"TrainReader.batch_size=${batch_size}\"\nset_max_epoch=\"epoch=${max_epoch}\"\n- set_log_iter=\"log_iter=10\"\n+ set_log_iter=\"log_iter=1\"\nif [ ${fp_item} = \"fp16\" ]; then\nset_fp_item=\"--fp16\"\nelse\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add pose/solov2 model training benchmark (#4303)
|
499,301 |
15.10.2021 13:08:37
| -28,800 |
e14387f81e93c3da7295a70c7b34ca9c8d8bcdc1
|
swin config update
|
[
{
"change_type": "MODIFY",
"old_path": "configs/faster_rcnn/_base_/faster_rcnn_swin_tiny_fpn.yml",
"new_path": "configs/faster_rcnn/_base_/faster_rcnn_swin_tiny_fpn.yml",
"diff": "@@ -13,7 +13,7 @@ SwinTransformer:\nnum_heads: [3, 6, 12, 24]\nwindow_size: 7\nape: false\n- drop_path_rate: 0.2\n+ drop_path_rate: 0.1\npatch_norm: true\nout_indices: [0,1,2,3]\npretrained: https://paddledet.bj.bcebos.com/models/pretrained/swin_tiny_patch4_window7_224.pdparams\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
swin config update (#4308)
|
499,298 |
15.10.2021 14:18:00
| -28,800 |
6bf1b44338c5b3ee835d382ed01a92e671bba2f9
|
[benchmark] add jde/fairmot model training benchmark
|
[
{
"change_type": "MODIFY",
"old_path": "benchmark/prepare.sh",
"new_path": "benchmark/prepare.sh",
"diff": "pip3.7 install -U pip Cython\npip3.7 install -r requirements.txt\n+\nmv ./dataset/coco/download_coco.py . && rm -rf ./dataset/coco/* && mv ./download_coco.py ./dataset/coco/\n# prepare lite train data\nwget -nc -P ./dataset/coco/ https://paddledet.bj.bcebos.com/data/coco_benchmark.tar\ncd ./dataset/coco/ && tar -xvf coco_benchmark.tar && mv -u coco_benchmark/* .\nrm -rf coco_benchmark/\n+\n+rm -rf ./dataset/mot/*\n+# prepare mot mini train data\n+wget -nc -P ./dataset/mot/ https://paddledet.bj.bcebos.com/data/mot_benchmark.tar\n+cd ./dataset/mot/ && tar -xvf mot_benchmark.tar && mv -u mot_benchmark/* .\n+rm -rf mot_benchmark/\n"
},
{
"change_type": "MODIFY",
"old_path": "benchmark/run_all.sh",
"new_path": "benchmark/run_all.sh",
"diff": "# run prepare.sh\nbash benchmark/prepare.sh\n-model_name_list=(faster_rcnn fcos deformable_detr gfl)\n+model_name_list=(faster_rcnn fcos deformable_detr gfl hrnet higherhrnet solov2 jde fairmot)\nfp_item_list=(fp32)\nmax_epoch=1\n@@ -22,6 +22,8 @@ for model_name in ${model_name_list[@]}; do\nhrnet) bs_list=(64 160) ;;\nhigherhrnet) bs_list=(20 24) ;;\nsolov2) bs_list=(2 4) ;;\n+ jde) bs_list=(4 14) ;;\n+ fairmot) bs_list=(6 22) ;;\n*) echo \"wrong model_name\"; exit 1;\nesac\nfor bs_item in ${bs_list[@]}\n@@ -29,7 +31,7 @@ for model_name in ${model_name_list[@]}; do\necho \"index is speed, 1gpus, begin, ${model_name}\"\nrun_mode=sp\nCUDA_VISIBLE_DEVICES=0 bash benchmark/run_benchmark.sh ${run_mode} ${bs_item} \\\n- ${fp_item} ${max_epoch} ${model_name} # (5min)\n+ ${fp_item} ${max_epoch} ${model_name}\nsleep 60\necho \"index is speed, 8gpus, run_mode is multi_process, begin, ${model_name}\"\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
[benchmark] add jde/fairmot model training benchmark (#4307)
|
499,348 |
15.10.2021 21:00:46
| -28,800 |
54fdd88764ed592c20840ac5ad2ca5fd14b27b6b
|
add Pipeline Total time
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/lite/src/main.cc",
"new_path": "deploy/lite/src/main.cc",
"diff": "@@ -71,6 +71,18 @@ void PrintBenchmarkLog(std::vector<double> det_time, int img_num) {\n<< \", postprocess_time(ms): \" << det_time[2] / img_num << std::endl;\n}\n+void PrintTotalIimeLog(double det_time,\n+ double keypoint_time,\n+ double crop_time) {\n+ std::cout << \"----------------------- Time info ------------------------\"\n+ << std::endl;\n+ std::cout << \"Total Pipeline time(ms): \"\n+ << det_time + keypoint_time + crop_time << std::endl;\n+ std::cout << \"average det time(ms): \" << det_time\n+ << \", average keypoint time(ms): \" << keypoint_time\n+ << \", average crop time(ms): \" << crop_time << std::endl;\n+}\n+\nstatic std::string DirName(const std::string& filepath) {\nauto pos = filepath.rfind(OS_PATH_SEP);\nif (pos == std::string::npos) {\n@@ -114,6 +126,7 @@ void PredictImage(const std::vector<std::string> all_img_paths,\nint steps = ceil(float(all_img_paths.size()) / batch_size_det);\nint kpts_imgs = 0;\nstd::vector<double> keypoint_t = {0, 0, 0};\n+ double midtimecost = 0;\nfor (int idx = 0; idx < steps; idx++) {\nstd::vector<cv::Mat> batch_imgs;\nint left_image_cnt = all_img_paths.size() - idx * batch_size_det;\n@@ -143,6 +156,7 @@ void PredictImage(const std::vector<std::string> all_img_paths,\n} else {\ndet->Predict(batch_imgs, 0.5, 0, 1, &result, &bbox_num, &det_times);\n}\n+\n// get labels and colormap\nauto labels = det->GetLabelList();\nauto colormap = PaddleDetection::GenerateColorMap(labels.size());\n@@ -196,6 +210,7 @@ void PredictImage(const std::vector<std::string> all_img_paths,\nif (keypoint) {\nint imsize = im_result.size();\nfor (int i = 0; i < imsize; i++) {\n+ auto keypoint_start_time = std::chrono::steady_clock::now();\nauto item = im_result[i];\ncv::Mat crop_img;\nstd::vector<double> keypoint_times;\n@@ -210,6 +225,11 @@ void PredictImage(const std::vector<std::string> all_img_paths,\nimgs_kpts.emplace_back(crop_img);\nkpts_imgs += 1;\n}\n+ auto keypoint_crop_time = std::chrono::steady_clock::now();\n+\n+ std::chrono::duration<float> midtimediff =\n+ keypoint_crop_time - keypoint_start_time;\n+ midtimecost += double(midtimediff.count() * 1000);\nif (imgs_kpts.size() == RT_Config[\"batch_size_keypoint\"].as<int>() ||\n((i == imsize - 1) && !imgs_kpts.empty())) {\n@@ -265,6 +285,9 @@ void PredictImage(const std::vector<std::string> all_img_paths,\n}\nPrintBenchmarkLog(det_t, all_img_paths.size());\nPrintBenchmarkLog(keypoint_t, kpts_imgs);\n+ PrintTotalIimeLog((det_t[0] + det_t[1] + det_t[2]) / all_img_paths.size(),\n+ (keypoint_t[0] + keypoint_t[1] + keypoint_t[2]) / kpts_imgs,\n+ midtimecost / all_img_paths.size());\n}\nint main(int argc, char** argv) {\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add Pipeline Total time (#4318)
|
499,339 |
18.10.2021 19:45:00
| -28,800 |
48db9a8c5b01b36492798037f6cad9e71758f905
|
[ce tests] add kl to ce
|
[
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/slim/post_quant/ppyolo_r50vd_dcn_ptq.yml",
"diff": "+weights: https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams\n+slim: PTQ\n+\n+PTQ:\n+ ptq_config: {\n+ 'activation_quantizer': 'HistQuantizer',\n+ 'upsample_bins': 127,\n+ 'hist_percent': 0.999}\n+ quant_batch_num: 10\n+ fuse: True\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/slim/post_quant/yolov3_darknet53_ptq.yml",
"diff": "+weights: https://paddledet.bj.bcebos.com/models/yolov3_darknet53_270e_coco.pdparams\n+slim: PTQ\n+\n+PTQ:\n+ ptq_config: {\n+ 'activation_quantizer': 'HistQuantizer',\n+ 'upsample_bins': 127,\n+ 'hist_percent': 0.999}\n+ quant_batch_num: 10\n+ fuse: True\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ppdet_params/ppyolo_mbv3_large_coco_params.txt",
"new_path": "tests/ppdet_params/ppyolo_mbv3_large_coco_params.txt",
"diff": "@@ -31,11 +31,11 @@ norm_export:tools/export_model.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml -\nquant_export:tools/export_model.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml --slim_config configs/slim/quant/ppyolo_mbv3_large_qat.yml -o\nfpgm_export:tools/export_model.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml --slim_config configs/slim/prune/ppyolo_mbv3_large_prune_fpgm.yml -o\ndistill_export:null\n-export1:null\n+kl_quant:tools/post_quant.py -c configs/ppyolo/ppyolo_mbv3_large_coco.yml --slim_config configs/slim/post_quant/ppyolo_mbv3_large_ptq.yml -o\nexport2:null\n##\ninfer_model:ppyolo_mbv3_large_coco.pdparams|ppyolo_mbv3_large_qat.pdparams|ppyolo_mbv3_large_prune_fpgm.pdparams\n-infer_export:./tests/weights|./tests/weights|./tests/weights\n+infer_model_dir:./tests/weights\ninfer_quant:False|True|False\ninference:deploy/python/infer.py\n--device:gpu|cpu\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ppdet_params/ppyolo_r50vd_dcn_1x_coco_params.txt",
"new_path": "tests/ppdet_params/ppyolo_r50vd_dcn_1x_coco_params.txt",
"diff": "@@ -31,11 +31,11 @@ norm_export:tools/export_model.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml\nquant_export:tools/export_model.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml --slim_config configs/slim/quant/ppyolo_r50vd_qat_pact.yml -o\nfpgm_export:tools/export_model.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml --slim_config configs/slim/prune/ppyolo_r50vd_prune_fpgm.yml -o\ndistill_export:null\n-export1:null\n+kl_quant:tools/post_quant.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml --slim_config configs/slim/post_quant/ppyolo_r50vd_dcn_ptq.yml -o\nexport2:null\n##\ninfer_model:ppyolo_r50vd_dcn_1x_coco.pdparams|ppyolo_r50vd_qat_pact.pdparams|ppyolo_r50vd_prune_fpgm.pdparams\n-infer_export:./tests/weights|./tests/weights|./tests/weights\n+infer_model_dir:./tests/weights\ninfer_quant:False|True|False\ninference:deploy/python/infer.py\n--device:gpu|cpu\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/ppdet_params/yolov3_darknet53_270e_coco_params.txt",
"new_path": "tests/ppdet_params/yolov3_darknet53_270e_coco_params.txt",
"diff": "@@ -31,11 +31,11 @@ norm_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.y\nquant_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/quant/yolov3_darknet_qat.yml -o\nfpgm_export:tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/prune/yolov3_darknet_prune_fpgm.yml -o\ndistill_export:null\n-export1:null\n+kl_quant:tools/post_quant.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml --slim_config configs/slim/post_quant/yolov3_darknet53_ptq.yml -o\nexport2:null\n##\ninfer_model:yolov3_darknet53_270e_coco.pdparams|yolov3_darknet_coco_qat.pdparams|yolov3_darknet_prune_fpgm.pdparams\n-infer_export:./tests/weights|./tests/weights|./tests/weights\n+infer_model_dir:./tests/weights\ninfer_quant:False|True|False\ninference:deploy/python/infer.py\n--device:gpu|cpu\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/prepare.sh",
"new_path": "tests/prepare.sh",
"diff": "@@ -60,6 +60,7 @@ elif [ ${MODE} = \"whole_infer\" ];then\ncd ../../ && mkdir -p ./tests/demo/\ncp -u dataset/coco/val2017/* ./tests/demo/\nelse\n+ mv ./dataset/coco/download_coco.py . && rm -rf ./dataset/coco/* && mv ./download_coco.py ./dataset/coco/\n# prepare infer data\nwget -nc -P ./dataset/coco/ https://paddledet.bj.bcebos.com/data/coco_ce.tar\ncd ./dataset/coco/ && tar -xvf coco_ce.tar && mv -u coco_ce/* .\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test.sh",
"new_path": "tests/test.sh",
"diff": "@@ -119,8 +119,8 @@ export_key2=$(func_parser_key \"${lines[34]}\")\nexport_value2=$(func_parser_value \"${lines[34]}\")\n# parser inference model\n-infer_model_dir_list=$(func_parser_value \"${lines[36]}\")\n-infer_export_list=$(func_parser_value \"${lines[37]}\")\n+infer_model_name_list=$(func_parser_value \"${lines[36]}\")\n+infer_model_dir=$(func_parser_value \"${lines[37]}\")\ninfer_is_quant=$(func_parser_value \"${lines[38]}\")\n# parser inference\ninference_py=$(func_parser_value \"${lines[39]}\")\n@@ -185,14 +185,13 @@ function func_inference(){\nelif [ ${device} = \"True\" ] || [ ${device} = \"gpu\" ]; then\nfor use_trt in ${use_trt_list[*]}; do\nfor precision in ${precision_list[*]}; do\n- if [[ ${_flag_quant} = \"False\" ]] && [[ ${precision} =~ \"int8\" ]]; then\n+ if [[ ${precision} != \"fluid\" ]]; then\n+ if [[ ${_flag_quant} = \"False\" ]] && [[ ${precision} = \"trt_int8\" ]]; then\ncontinue\nfi\n- if [[ ${precision} =~ \"fp16\" || ${precision} =~ \"int8\" ]] && [ ${use_trt} = \"False\" ]; then\n+ if [[ ${_flag_quant} = \"True\" ]] && [[ ${precision} != \"trt_int8\" ]]; then\ncontinue\nfi\n- if [[ ${use_trt} = \"False\" || ${precision} =~ \"int8\" ]] && [ ${_flag_quant} = \"True\" ]; then\n- continue\nfi\nfor batch_size in ${batch_size_list[*]}; do\n_save_log_path=\"${_log_path}/infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log\"\n@@ -229,28 +228,45 @@ if [ ${MODE} = \"infer\" ]; then\neval $env\nexport Count=0\nIFS=\"|\"\n- infer_run_exports=(${infer_export_list})\ninfer_quant_flag=(${infer_is_quant})\nset_train_params1=$(func_set_params \"${train_param_key1}\" \"${train_param_value1}\")\n- for infer_model in ${infer_model_dir_list[*]}; do\n+ set_save_infer_key=$(func_set_params \"${save_infer_key}\" \"${infer_model_dir}\")\n+ infer_model=\"${infer_model_dir}/${train_param_value1}\"\n+ for infer_model_name in ${infer_model_name_list[*]}; do\n# run export\n- if [ ${infer_run_exports[Count]} != \"null\" ];then\n- set_export_weight=$(func_set_params \"${export_weight}\" \"${infer_run_exports[Count]}/${infer_model}\")\n- set_save_infer_key=$(func_set_params \"${save_infer_key}\" \"${infer_run_exports[Count]}\")\n- export_cmd=\"${python} ${norm_export} ${set_export_weight} ${set_train_params1} ${set_save_infer_key}\"\n+ case ${Count} in\n+ 0) run_export=${norm_export} ;;\n+ 1) run_export=${pact_export} ;;\n+ 2) run_export=${fpgm_export} ;;\n+ *) echo \"Undefined run_export\"; exit 1;\n+ esac\n+ set_export_weight=$(func_set_params \"${export_weight}\" \"${infer_model_dir}/${infer_model_name}\")\n+ export_cmd=\"${python} ${run_export} ${set_export_weight} ${set_train_params1} ${set_save_infer_key}\"\neval $export_cmd\nstatus_export=$?\nif [ ${status_export} = 0 ];then\nstatus_check $status_export \"${export_cmd}\" \"${status_log}\"\nfi\n- fi\n#run inference\nis_quant=${infer_quant_flag[Count]}\n- infer_model=\"${infer_run_exports[Count]}/${train_param_value1}\"\nfunc_inference \"${python}\" \"${inference_py}\" \"${infer_model}\" \"${LOG_PATH}\" \"${infer_img_dir}\" ${is_quant}\nCount=$(($Count + 1))\ndone\n+ # kl quant\n+ if [ ${export_key1} = \"kl_quant\" ]; then\n+ # run kl quant\n+ kl_cmd=\"${python} ${export_value1} ${set_train_params1} ${set_save_infer_key}\"\n+ eval $kl_cmd\n+ status_export=$?\n+ if [ ${status_export} = 0 ];then\n+ status_check $status_export \"${kl_cmd}\" \"${status_log}\"\n+ fi\n+ # run inference\n+ is_quant=True\n+ func_inference \"${python}\" \"${inference_py}\" \"${infer_model}\" \"${LOG_PATH}\" \"${infer_img_dir}\" ${is_quant}\n+ fi\n+\nelse\nIFS=\"|\"\nexport Count=0\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
[ce tests] add kl to ce (#4311)
|
499,301 |
19.10.2021 10:57:25
| -28,800 |
98c6934e899e4c42cb1a1ac85a33098e5d7f3c6f
|
support export
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/backbones/swin_transformer.py",
"new_path": "ppdet/modeling/backbones/swin_transformer.py",
"diff": "@@ -537,7 +537,7 @@ class PatchEmbed(nn.Layer):\nB, C, H, W = x.shape\n# assert [H, W] == self.img_size[:2], \"Input image size ({H}*{W}) doesn't match model ({}*{}).\".format(H, W, self.img_size[0], self.img_size[1])\nif W % self.patch_size[1] != 0:\n- x = F.pad(x, [0, self.patch_size[1] - W % self.patch_size[1]])\n+ x = F.pad(x, [0, self.patch_size[1] - W % self.patch_size[1], 0, 0])\nif H % self.patch_size[0] != 0:\nx = F.pad(x, [0, 0, 0, self.patch_size[0] - H % self.patch_size[0]])\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
support export (#4326)
|
499,348 |
19.10.2021 17:58:29
| -28,800 |
488a683815847ea65a4f38779c9f7076f1d0edc2
|
update keypoint config
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/lite/keypoint_runtime_config.json",
"new_path": "deploy/lite/keypoint_runtime_config.json",
"diff": "{\n+ \"model_dir_det\": \"./model_det/\",\n+ \"batch_size_det\": 1,\n+ \"threshold_det\": 0.5,\n\"model_dir_keypoint\": \"./model_keypoint/\",\n\"batch_size_keypoint\": 8,\n\"threshold_keypoint\": 0.5,\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
update keypoint config (#4334)
|
499,348 |
20.10.2021 16:48:54
| -28,800 |
bc437a49d39f85cea58b37863341bd11e3bef368
|
infer deploy:fix trt keypointbatchsize; lite deploy: update total pipeline time from per person to per image
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/cpp/src/main_keypoint.cc",
"new_path": "deploy/cpp/src/main_keypoint.cc",
"diff": "@@ -445,7 +445,7 @@ int main(int argc, char** argv) {\nif (!FLAGS_model_dir_keypoint.empty())\n{\nkeypoint = new PaddleDetection::KeyPointDetector(FLAGS_model_dir_keypoint, FLAGS_device, FLAGS_use_mkldnn,\n- FLAGS_cpu_threads, FLAGS_run_mode, FLAGS_batch_size,FLAGS_gpu_id,\n+ FLAGS_cpu_threads, FLAGS_run_mode, FLAGS_batch_size_keypoint, FLAGS_gpu_id,\nFLAGS_trt_min_shape, FLAGS_trt_max_shape, FLAGS_trt_opt_shape,\nFLAGS_trt_calib_mode, FLAGS_use_dark);\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/lite/src/main.cc",
"new_path": "deploy/lite/src/main.cc",
"diff": "@@ -76,11 +76,11 @@ void PrintTotalIimeLog(double det_time,\ndouble crop_time) {\nstd::cout << \"----------------------- Time info ------------------------\"\n<< std::endl;\n- std::cout << \"Total Pipeline time(ms): \"\n+ std::cout << \"Total Pipeline time(ms) per image: \"\n<< det_time + keypoint_time + crop_time << std::endl;\n- std::cout << \"average det time(ms): \" << det_time\n- << \", average keypoint time(ms): \" << keypoint_time\n- << \", average crop time(ms): \" << crop_time << std::endl;\n+ std::cout << \"Average det time(ms) per image: \" << det_time\n+ << \", average keypoint time(ms) per image: \" << keypoint_time\n+ << \", average crop time(ms) per image: \" << crop_time << std::endl;\n}\nstatic std::string DirName(const std::string& filepath) {\n@@ -287,12 +287,13 @@ void PredictImage(const std::vector<std::string> all_img_paths,\nPrintBenchmarkLog(det_t, all_img_paths.size());\nif (keypoint) {\nPrintBenchmarkLog(keypoint_t, kpts_imgs);\n- }\nPrintTotalIimeLog((det_t[0] + det_t[1] + det_t[2]) / all_img_paths.size(),\n- (keypoint_t[0] + keypoint_t[1] + keypoint_t[2]) / kpts_imgs,\n+ (keypoint_t[0] + keypoint_t[1] + keypoint_t[2]) / all_img_paths.size(),\nmidtimecost / all_img_paths.size());\n}\n+}\n+\nint main(int argc, char** argv) {\nstd::cout << \"Usage: \" << argv[0]\n<< \" [config_path](option) [image_dir](option)\\n\";\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
infer deploy:fix trt keypointbatchsize; lite deploy: update total pipeline time from per person to per image (#4339)
|
499,301 |
21.10.2021 10:21:44
| -28,800 |
1755a2b215217fa3293e475a9c581ac33d1ffd05
|
add w/o weight decay params groups
|
[
{
"change_type": "MODIFY",
"old_path": "configs/faster_rcnn/_base_/optimizer_swin_1x.yml",
"new_path": "configs/faster_rcnn/_base_/optimizer_swin_1x.yml",
"diff": "@@ -15,3 +15,4 @@ OptimizerBuilder:\noptimizer:\ntype: AdamW\nweight_decay: 0.05\n+ without_weight_decay_params: ['absolute_pos_embed', 'relative_position_bias_table', 'norm']\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/engine/trainer.py",
"new_path": "ppdet/engine/trainer.py",
"diff": "@@ -115,8 +115,7 @@ class Trainer(object):\nif self.mode == 'train':\nsteps_per_epoch = len(self.loader)\nself.lr = create('LearningRate')(steps_per_epoch)\n- self.optimizer = create('OptimizerBuilder')(self.lr,\n- self.model.parameters())\n+ self.optimizer = create('OptimizerBuilder')(self.lr, self.model)\nself._nranks = dist.get_world_size()\nself._local_rank = dist.get_rank()\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/optimizer.py",
"new_path": "ppdet/optimizer.py",
"diff": "@@ -225,7 +225,7 @@ class OptimizerBuilder():\nself.regularizer = regularizer\nself.optimizer = optimizer\n- def __call__(self, learning_rate, params=None):\n+ def __call__(self, learning_rate, model=None):\nif self.clip_grad_by_norm is not None:\ngrad_clip = nn.ClipGradByGlobalNorm(\nclip_norm=self.clip_grad_by_norm)\n@@ -244,6 +244,25 @@ class OptimizerBuilder():\nif optim_type != 'AdamW':\noptim_args['weight_decay'] = regularization\nop = getattr(optimizer, optim_type)\n+\n+ if 'without_weight_decay_params' in optim_args:\n+ keys = optim_args['without_weight_decay_params']\n+ params = [{\n+ 'params': [\n+ p for n, p in model.named_parameters()\n+ if any([k in n for k in keys])\n+ ],\n+ 'weight_decay': 0.\n+ }, {\n+ 'params': [\n+ p for n, p in model.named_parameters()\n+ if all([k not in n for k in keys])\n+ ]\n+ }]\n+ del optim_args['without_weight_decay_params']\n+ else:\n+ params = model.parameters()\n+\nreturn op(learning_rate=learning_rate,\nparameters=params,\ngrad_clip=grad_clip,\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add w/o weight decay params groups (#4337)
|
499,302 |
21.10.2021 10:27:12
| -28,800 |
69d21d887e4db3f3d38bff3bb9cb8a29c448e0fb
|
[NPU] add npu support for yolov3, test=develop
|
[
{
"change_type": "MODIFY",
"old_path": ".gitignore",
"new_path": ".gitignore",
"diff": "@@ -79,3 +79,6 @@ dataset/wider_face/WIDER_val\ndataset/wider_face/wider_face_split\nppdet/version.py\n+\n+# NPU meta folder\n+kernel_meta/\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/utils/check.py",
"new_path": "ppdet/utils/check.py",
"diff": "@@ -25,7 +25,26 @@ import paddle.version as fluid_version\nfrom .logger import setup_logger\nlogger = setup_logger(__name__)\n-__all__ = ['check_gpu', 'check_version', 'check_config']\n+__all__ = ['check_gpu', 'check_npu', 'check_version', 'check_config']\n+\n+\n+def check_npu(use_npu):\n+ \"\"\"\n+ Log error and exit when set use_npu=true in paddlepaddle\n+ cpu/gpu/xpu version.\n+ \"\"\"\n+ err = \"Config use_npu cannot be set as true while you are \" \\\n+ \"using paddlepaddle cpu/gpu/xpu version ! \\nPlease try: \\n\" \\\n+ \"\\t1. Install paddlepaddle-npu to run model on NPU \\n\" \\\n+ \"\\t2. Set use_npu as false in config file to run \" \\\n+ \"model on CPU/GPU/XPU\"\n+\n+ try:\n+ if use_npu and not paddle.is_compiled_with_npu():\n+ logger.error(err)\n+ sys.exit(1)\n+ except Exception as e:\n+ pass\ndef check_gpu(use_gpu):\n"
},
{
"change_type": "MODIFY",
"old_path": "static/ppdet/modeling/anchor_heads/yolo_head.py",
"new_path": "static/ppdet/modeling/anchor_heads/yolo_head.py",
"diff": "@@ -303,8 +303,14 @@ class YOLOv3Head(object):\nreturn route, tip\ndef _upsample(self, input, scale=2, name=None):\n+ align_corners = True\n+ if fluid.core.is_compiled_with_npu():\n+ align_corners = False\nout = fluid.layers.resize_nearest(\n- input=input, scale=float(scale), name=name)\n+ input=input,\n+ scale=float(scale),\n+ name=name,\n+ align_corners=align_corners)\nreturn out\ndef _parse_anchors(self, anchors):\n@@ -520,8 +526,14 @@ class YOLOv4Head(YOLOv3Head):\nself.spp_stage = spp_stage\ndef _upsample(self, input, scale=2, name=None):\n+ align_corners = True\n+ if fluid.core.is_compiled_with_npu():\n+ align_corners = False\nout = fluid.layers.resize_nearest(\n- input=input, scale=float(scale), name=name)\n+ input=input,\n+ scale=float(scale),\n+ name=name,\n+ align_corners=align_corners)\nreturn out\ndef max_pool(self, input, size):\n"
},
{
"change_type": "MODIFY",
"old_path": "static/ppdet/utils/check.py",
"new_path": "static/ppdet/utils/check.py",
"diff": "@@ -29,6 +29,7 @@ logger = logging.getLogger(__name__)\n__all__ = [\n'check_gpu',\n'check_xpu',\n+ 'check_npu',\n'check_version',\n'check_config',\n'check_py_func',\n@@ -54,6 +55,25 @@ def check_xpu(use_xpu):\npass\n+def check_npu(use_npu):\n+ \"\"\"\n+ Log error and exit when set use_npu=true in paddlepaddle\n+ cpu/gpu/xpu version.\n+ \"\"\"\n+ err = \"Config use_npu cannot be set as true while you are \" \\\n+ \"using paddlepaddle cpu/gpu/xpu version ! \\nPlease try: \\n\" \\\n+ \"\\t1. Install paddlepaddle-npu to run model on NPU \\n\" \\\n+ \"\\t2. Set use_npu as false in config file to run \" \\\n+ \"model on CPU/GPU/XPU\"\n+\n+ try:\n+ if use_npu and not fluid.is_compiled_with_npu():\n+ logger.error(err)\n+ sys.exit(1)\n+ except Exception as e:\n+ pass\n+\n+\ndef check_gpu(use_gpu):\n\"\"\"\nLog error and exit when set use_gpu=true in paddlepaddle\n"
},
{
"change_type": "MODIFY",
"old_path": "static/ppdet/utils/dist_utils.py",
"new_path": "static/ppdet/utils/dist_utils.py",
"diff": "@@ -31,6 +31,19 @@ def nccl2_prepare(trainer_id, startup_prog, main_prog):\nprogram=main_prog)\n+def collective_prepare(trainer_id, startup_prog, main_prog):\n+ config = fluid.DistributeTranspilerConfig()\n+ config.mode = \"collective\"\n+ config.collective_mode = \"grad_allreduce\"\n+ t = fluid.DistributeTranspiler(config=config)\n+ t.transpile(\n+ trainer_id,\n+ trainers=os.environ.get('PADDLE_TRAINER_ENDPOINTS'),\n+ current_endpoint=os.environ.get('PADDLE_CURRENT_ENDPOINT'),\n+ startup_program=startup_prog,\n+ program=main_prog)\n+\n+\ndef prepare_for_multi_process(exe, build_strategy, startup_prog, main_prog):\ntrainer_id = int(os.environ.get('PADDLE_TRAINER_ID', 0))\nnum_trainers = int(os.environ.get('PADDLE_TRAINERS_NUM', 1))\n@@ -38,4 +51,7 @@ def prepare_for_multi_process(exe, build_strategy, startup_prog, main_prog):\nreturn\nbuild_strategy.num_trainers = num_trainers\nbuild_strategy.trainer_id = trainer_id\n+ if fluid.core.is_compiled_with_npu():\n+ collective_prepare(trainer_id, startup_prog, main_prog)\n+ else:\nnccl2_prepare(trainer_id, startup_prog, main_prog)\n"
},
{
"change_type": "MODIFY",
"old_path": "static/tools/eval.py",
"new_path": "static/tools/eval.py",
"diff": "@@ -34,7 +34,7 @@ logger = logging.getLogger(__name__)\ntry:\nfrom ppdet.utils.eval_utils import parse_fetches, eval_run, eval_results, json_eval_results\nimport ppdet.utils.checkpoint as checkpoint\n- from ppdet.utils.check import check_gpu, check_xpu, check_version, check_config, enable_static_mode\n+ from ppdet.utils.check import check_gpu, check_xpu, check_npu, check_version, check_config, enable_static_mode\nfrom ppdet.data.reader import create_reader\n@@ -63,6 +63,10 @@ def main():\ncheck_config(cfg)\n# check if set use_gpu=True in paddlepaddle cpu version\ncheck_gpu(cfg.use_gpu)\n+ # disable npu in config by default and check use_npu\n+ if 'use_npu' not in cfg:\n+ cfg.use_npu = False\n+ check_npu(cfg.use_npu)\nuse_xpu = False\nif hasattr(cfg, 'use_xpu'):\ncheck_xpu(cfg.use_xpu)\n@@ -73,6 +77,9 @@ def main():\nassert not (use_xpu and cfg.use_gpu), \\\n'Can not run on both XPU and GPU'\n+ assert not (cfg.use_npu and cfg.use_gpu), \\\n+ 'Can not run on both NPU and GPU'\n+\nmain_arch = cfg.architecture\nmulti_scale_test = getattr(cfg, 'MultiScaleTEST', None)\n@@ -80,6 +87,8 @@ def main():\n# define executor\nif cfg.use_gpu:\nplace = fluid.CUDAPlace(0)\n+ elif cfg.use_npu:\n+ place = fluid.NPUPlace(0)\nelif use_xpu:\nplace = fluid.XPUPlace(0)\nelse:\n@@ -117,7 +126,7 @@ def main():\nreturn\ncompile_program = fluid.CompiledProgram(eval_prog).with_data_parallel()\n- if use_xpu:\n+ if use_xpu or cfg.use_npu:\ncompile_program = eval_prog\nassert cfg.metric != 'OID', \"eval process of OID dataset \\\n"
},
{
"change_type": "MODIFY",
"old_path": "static/tools/infer.py",
"new_path": "static/tools/infer.py",
"diff": "@@ -41,7 +41,7 @@ try:\nfrom ppdet.utils.eval_utils import parse_fetches\nfrom ppdet.utils.cli import ArgsParser\n- from ppdet.utils.check import check_gpu, check_version, check_config, enable_static_mode\n+ from ppdet.utils.check import check_gpu, check_npu, check_version, check_config, enable_static_mode\nfrom ppdet.utils.visualizer import visualize_results\nimport ppdet.utils.checkpoint as checkpoint\n@@ -109,6 +109,10 @@ def main():\ncheck_config(cfg)\n# check if set use_gpu=True in paddlepaddle cpu version\ncheck_gpu(cfg.use_gpu)\n+ # disable npu in config by default and check use_npu\n+ if 'use_npu' not in cfg:\n+ cfg.use_npu = False\n+ check_npu(cfg.use_npu)\n# check if paddlepaddle version is satisfied\ncheck_version()\n@@ -119,7 +123,12 @@ def main():\ntest_images = get_test_images(FLAGS.infer_dir, FLAGS.infer_img)\ndataset.set_images(test_images)\n- place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()\n+ if cfg.use_gpu:\n+ place = fluid.CUDAPlace(0)\n+ elif cfg.use_npu:\n+ place = fluid.NPUPlace(0)\n+ else:\n+ place = fluid.CPUPlace()\nexe = fluid.Executor(place)\nmodel = create(main_arch)\n"
},
{
"change_type": "MODIFY",
"old_path": "static/tools/train.py",
"new_path": "static/tools/train.py",
"diff": "@@ -50,7 +50,7 @@ try:\nfrom ppdet.utils.eval_utils import parse_fetches, eval_run, eval_results\nfrom ppdet.utils.stats import TrainingStats\nfrom ppdet.utils.cli import ArgsParser\n- from ppdet.utils.check import check_gpu, check_xpu, check_version, check_config, enable_static_mode\n+ from ppdet.utils.check import check_gpu, check_xpu, check_npu, check_version, check_config, enable_static_mode\nimport ppdet.utils.checkpoint as checkpoint\nexcept ImportError as e:\nif sys.argv[0].find('static') >= 0:\n@@ -87,6 +87,10 @@ def main():\ncheck_config(cfg)\n# check if set use_gpu=True in paddlepaddle cpu version\ncheck_gpu(cfg.use_gpu)\n+ # disable npu in config by default and check use_npu\n+ if 'use_npu' not in cfg:\n+ cfg.use_npu = False\n+ check_npu(cfg.use_npu)\nuse_xpu = False\nif hasattr(cfg, 'use_xpu'):\ncheck_xpu(cfg.use_xpu)\n@@ -97,6 +101,9 @@ def main():\nassert not (use_xpu and cfg.use_gpu), \\\n'Can not run on both XPU and GPU'\n+ assert not (cfg.use_npu and cfg.use_gpu), \\\n+ 'Can not run on both NPU and GPU'\n+\nsave_only = getattr(cfg, 'save_prediction_only', False)\nif save_only:\nraise NotImplementedError('The config file only support prediction,'\n@@ -105,6 +112,8 @@ def main():\nif cfg.use_gpu:\ndevices_num = fluid.core.get_cuda_device_count()\n+ if cfg.use_npu:\n+ devices_num = fluid.core.get_npu_device_count()\nelif use_xpu:\n# ToDo(qingshu): XPU only support single card now\ndevices_num = 1\n@@ -113,6 +122,8 @@ def main():\nif cfg.use_gpu and 'FLAGS_selected_gpus' in env:\ndevice_id = int(env['FLAGS_selected_gpus'])\n+ elif cfg.use_npu and 'FLAGS_selected_npus' in env:\n+ device_id = int(env['FLAGS_selected_npus'])\nelif use_xpu and 'FLAGS_selected_xpus' in env:\ndevice_id = int(env['FLAGS_selected_xpus'])\nelse:\n@@ -120,6 +131,8 @@ def main():\nif cfg.use_gpu:\nplace = fluid.CUDAPlace(device_id)\n+ elif cfg.use_npu:\n+ place = fluid.NPUPlace(device_id)\nelif use_xpu:\nplace = fluid.XPUPlace(device_id)\nelse:\n@@ -216,12 +229,12 @@ def main():\nloss_name=loss.name,\nbuild_strategy=build_strategy,\nexec_strategy=exec_strategy)\n- if use_xpu:\n+ if use_xpu or cfg.use_npu:\ncompiled_train_prog = train_prog\nif FLAGS.eval:\ncompiled_eval_prog = fluid.CompiledProgram(eval_prog)\n- if use_xpu:\n+ if use_xpu or cfg.use_npu:\ncompiled_eval_prog = eval_prog\nfuse_bn = getattr(model.backbone, 'norm_type', None) == 'affine_channel'\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/eval.py",
"new_path": "tools/eval.py",
"diff": "@@ -30,7 +30,7 @@ warnings.filterwarnings('ignore')\nimport paddle\nfrom ppdet.core.workspace import load_config, merge_config\n-from ppdet.utils.check import check_gpu, check_version, check_config\n+from ppdet.utils.check import check_gpu, check_npu, check_version, check_config\nfrom ppdet.utils.cli import ArgsParser\nfrom ppdet.engine import Trainer, init_parallel_env\nfrom ppdet.metrics.coco_utils import json_eval_results\n@@ -116,7 +116,16 @@ def main():\ncfg['save_prediction_only'] = FLAGS.save_prediction_only\nmerge_config(FLAGS.opt)\n- place = paddle.set_device('gpu' if cfg.use_gpu else 'cpu')\n+ # disable npu in config by default\n+ if 'use_npu' not in cfg:\n+ cfg.use_npu = False\n+\n+ if cfg.use_gpu:\n+ place = paddle.set_device('gpu')\n+ elif cfg.use_npu:\n+ place = paddle.set_device('npu')\n+ else:\n+ place = paddle.set_device('cpu')\nif 'norm_type' in cfg and cfg['norm_type'] == 'sync_bn' and not cfg.use_gpu:\ncfg['norm_type'] = 'bn'\n@@ -126,6 +135,7 @@ def main():\ncheck_config(cfg)\ncheck_gpu(cfg.use_gpu)\n+ check_npu(cfg.use_npu)\ncheck_version()\nrun(FLAGS, cfg)\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/infer.py",
"new_path": "tools/infer.py",
"diff": "@@ -31,7 +31,7 @@ import glob\nimport paddle\nfrom ppdet.core.workspace import load_config, merge_config\nfrom ppdet.engine import Trainer\n-from ppdet.utils.check import check_gpu, check_version, check_config\n+from ppdet.utils.check import check_gpu, check_npu, check_version, check_config\nfrom ppdet.utils.cli import ArgsParser\nfrom ppdet.slim import build_slim_model\n@@ -141,7 +141,16 @@ def main():\ncfg['vdl_log_dir'] = FLAGS.vdl_log_dir\nmerge_config(FLAGS.opt)\n- place = paddle.set_device('gpu' if cfg.use_gpu else 'cpu')\n+ # disable npu in config by default\n+ if 'use_npu' not in cfg:\n+ cfg.use_npu = False\n+\n+ if cfg.use_gpu:\n+ place = paddle.set_device('gpu')\n+ elif cfg.use_npu:\n+ place = paddle.set_device('npu')\n+ else:\n+ place = paddle.set_device('cpu')\nif 'norm_type' in cfg and cfg['norm_type'] == 'sync_bn' and not cfg.use_gpu:\ncfg['norm_type'] = 'bn'\n@@ -151,6 +160,7 @@ def main():\ncheck_config(cfg)\ncheck_gpu(cfg.use_gpu)\n+ check_npu(cfg.use_npu)\ncheck_version()\nrun(FLAGS, cfg)\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/train.py",
"new_path": "tools/train.py",
"diff": "@@ -127,7 +127,16 @@ def main():\ncfg['profiler_options'] = FLAGS.profiler_options\nmerge_config(FLAGS.opt)\n- place = paddle.set_device('gpu' if cfg.use_gpu else 'cpu')\n+ # disable npu in config by default\n+ if 'use_npu' not in cfg:\n+ cfg.use_npu = False\n+\n+ if cfg.use_gpu:\n+ place = paddle.set_device('gpu')\n+ elif cfg.use_npu:\n+ place = paddle.set_device('npu')\n+ else:\n+ place = paddle.set_device('cpu')\nif 'norm_type' in cfg and cfg['norm_type'] == 'sync_bn' and not cfg.use_gpu:\ncfg['norm_type'] = 'bn'\n@@ -139,6 +148,7 @@ def main():\nmerge_config(FLAGS.opt)\ncheck.check_config(cfg)\ncheck.check_gpu(cfg.use_gpu)\n+ check.check_npu(cfg.use_npu)\ncheck.check_version()\nrun(FLAGS, cfg)\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
[NPU] add npu support for yolov3, test=develop (#4344)
|
499,339 |
21.10.2021 10:32:58
| -28,800 |
8474ac990c391d5c14b45ba7f38251b834d67915
|
[deploy] fix no object in Detector.predict
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/python/infer.py",
"new_path": "deploy/python/infer.py",
"diff": "@@ -170,7 +170,7 @@ class Detector(object):\nresults = []\nif reduce(lambda x, y: x * y, np_boxes.shape) < 6:\nprint('[WARNNING] No object detected.')\n- results = {'boxes': np.array([[]]), 'boxes_num': [0]}\n+ results = {'boxes': np.zeros([0, 6]), 'boxes_num': [0]}\nelse:\nresults = self.postprocess(\nnp_boxes, np_masks, inputs, np_boxes_num, threshold=threshold)\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
[deploy] fix no object in Detector.predict (#4325)
|
499,339 |
21.10.2021 10:41:38
| -28,800 |
4f0aa7effa44fc91f26548e262668aad8b51c3da
|
[benchmark] fix nan in training
|
[
{
"change_type": "MODIFY",
"old_path": "benchmark/run_benchmark.sh",
"new_path": "benchmark/run_benchmark.sh",
"diff": "@@ -20,12 +20,18 @@ function _train(){\necho \"Train on ${num_gpu_devices} GPUs\"\necho \"current CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES, gpus=$num_gpu_devices, batch_size=$batch_size\"\n+ # set runtime params\n+ set_optimizer_lr_sp=\" \"\n+ set_optimizer_lr_mp=\" \"\n# parse model_name\ncase ${model_name} in\n- faster_rcnn) model_yml=\"configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.yml\" ;;\n- fcos) model_yml=\"configs/fcos/fcos_r50_fpn_1x_coco.yml\" ;;\n+ faster_rcnn) model_yml=\"configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.yml\"\n+ set_optimizer_lr_sp=\"LearningRate.base_lr=0.001\" ;;\n+ fcos) model_yml=\"configs/fcos/fcos_r50_fpn_1x_coco.yml\"\n+ set_optimizer_lr_sp=\"LearningRate.base_lr=0.001\" ;;\ndeformable_detr) model_yml=\"configs/deformable_detr/deformable_detr_r50_1x_coco.yml\" ;;\n- gfl) model_yml=\"configs/gfl/gfl_r50_fpn_1x_coco.yml\" ;;\n+ gfl) model_yml=\"configs/gfl/gfl_r50_fpn_1x_coco.yml\"\n+ set_optimizer_lr_sp=\"LearningRate.base_lr=0.001\" ;;\nhrnet) model_yml=\"configs/keypoint/hrnet/hrnet_w32_256x192.yml\" ;;\nhigherhrnet) model_yml=\"configs/keypoint/higherhrnet/higherhrnet_hrnet_w32_512.yml\" ;;\nsolov2) model_yml=\"configs/solov2/solov2_r50_fpn_1x_coco.yml\" ;;\n@@ -45,10 +51,10 @@ function _train(){\ncase ${run_mode} in\nsp) train_cmd=\"${python} -u tools/train.py -c ${model_yml} ${set_fp_item} \\\n- -o ${set_batch_size} ${set_max_epoch} ${set_log_iter}\" ;;\n+ -o ${set_batch_size} ${set_max_epoch} ${set_log_iter} ${set_optimizer_lr_sp}\" ;;\nmp) train_cmd=\"${python} -m paddle.distributed.launch --log_dir=./mylog \\\n--gpus=${CUDA_VISIBLE_DEVICES} tools/train.py -c ${model_yml} ${set_fp_item} \\\n- -o ${set_batch_size} ${set_max_epoch} ${set_log_iter}\"\n+ -o ${set_batch_size} ${set_max_epoch} ${set_log_iter} ${set_optimizer_lr_mp}\"\nlog_parse_file=\"mylog/workerlog.0\" ;;\n*) echo \"choose run_mode(sp or mp)\"; exit 1;\nesac\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
[benchmark] fix nan in training (#4345)
|
499,348 |
21.10.2021 19:34:54
| -28,800 |
85218f9a64943478dfcd557dd9c44023513c7922
|
lite deploy: fix pose visualize
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/lite/include/keypoint_detector.h",
"new_path": "deploy/lite/include/keypoint_detector.h",
"diff": "@@ -43,7 +43,8 @@ struct KeyPointResult {\n// Visualiztion KeyPoint Result\ncv::Mat VisualizeKptsResult(const cv::Mat& img,\nconst std::vector<KeyPointResult>& results,\n- const std::vector<int>& colormap);\n+ const std::vector<int>& colormap,\n+ float threshold = 0.2);\nclass KeyPointDetector {\npublic:\n@@ -67,7 +68,6 @@ class KeyPointDetector {\nvoid Predict(const std::vector<cv::Mat> imgs,\nstd::vector<std::vector<float>>& center,\nstd::vector<std::vector<float>>& scale,\n- const double threshold = 0.5,\nconst int warmup = 0,\nconst int repeats = 1,\nstd::vector<KeyPointResult>* result = nullptr,\n@@ -80,6 +80,8 @@ class KeyPointDetector {\nbool use_dark(){return this->use_dark_;}\n+ inline float get_threshold() {return threshold_;};\n+\nprivate:\n// Preprocess image and copy data to input buffer\nvoid Preprocess(const cv::Mat& image_mat);\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/lite/src/keypoint_detector.cc",
"new_path": "deploy/lite/src/keypoint_detector.cc",
"diff": "@@ -32,7 +32,8 @@ void KeyPointDetector::LoadModel(std::string model_file, int num_theads) {\n// Visualiztion MaskDetector results\ncv::Mat VisualizeKptsResult(const cv::Mat& img,\nconst std::vector<KeyPointResult>& results,\n- const std::vector<int>& colormap) {\n+ const std::vector<int>& colormap,\n+ float threshold) {\nconst int edge[][2] = {{0, 1},\n{0, 2},\n{1, 3},\n@@ -53,7 +54,7 @@ cv::Mat VisualizeKptsResult(const cv::Mat& img,\ncv::Mat vis_img = img.clone();\nfor (int batchid = 0; batchid < results.size(); batchid++) {\nfor (int i = 0; i < results[batchid].num_joints; i++) {\n- if (results[batchid].keypoints[i * 3] > 0.5) {\n+ if (results[batchid].keypoints[i * 3] > threshold) {\nint x_coord = int(results[batchid].keypoints[i * 3 + 1]);\nint y_coord = int(results[batchid].keypoints[i * 3 + 2]);\ncv::circle(vis_img,\n@@ -64,6 +65,8 @@ cv::Mat VisualizeKptsResult(const cv::Mat& img,\n}\n}\nfor (int i = 0; i < results[batchid].num_joints; i++) {\n+ if (results[batchid].keypoints[edge[i][0] * 3] > threshold &&\n+ results[batchid].keypoints[edge[i][1] * 3] > threshold) {\nint x_start = int(results[batchid].keypoints[edge[i][0] * 3 + 1]);\nint y_start = int(results[batchid].keypoints[edge[i][0] * 3 + 2]);\nint x_end = int(results[batchid].keypoints[edge[i][1] * 3 + 1]);\n@@ -75,6 +78,7 @@ cv::Mat VisualizeKptsResult(const cv::Mat& img,\n1);\n}\n}\n+ }\nreturn vis_img;\n}\n@@ -119,7 +123,6 @@ void KeyPointDetector::Postprocess(std::vector<float>& output,\nvoid KeyPointDetector::Predict(const std::vector<cv::Mat> imgs,\nstd::vector<std::vector<float>>& center_bs,\nstd::vector<std::vector<float>>& scale_bs,\n- const double threshold,\nconst int warmup,\nconst int repeats,\nstd::vector<KeyPointResult>* result,\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/lite/src/main.cc",
"new_path": "deploy/lite/src/main.cc",
"diff": "@@ -238,7 +238,6 @@ void PredictImage(const std::vector<std::string> all_img_paths,\nkeypoint->Predict(imgs_kpts,\ncenter_bs,\nscale_bs,\n- 0.5,\n10,\n10,\n&result_kpts,\n@@ -247,7 +246,6 @@ void PredictImage(const std::vector<std::string> all_img_paths,\nkeypoint->Predict(imgs_kpts,\ncenter_bs,\nscale_bs,\n- 0.5,\n0,\n1,\n&result_kpts,\n@@ -265,7 +263,7 @@ void PredictImage(const std::vector<std::string> all_img_paths,\noutput_path + \"keypoint_\" +\nimage_file_path.substr(image_file_path.find_last_of('/') + 1);\ncv::Mat kpts_vis_img =\n- VisualizeKptsResult(im, result_kpts, colormap_kpts);\n+ VisualizeKptsResult(im, result_kpts, colormap_kpts, keypoint->get_threshold());\ncv::imwrite(kpts_savepath, kpts_vis_img, compression_params);\nprintf(\"Visualized output saved as %s\\n\", kpts_savepath.c_str());\n} else {\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
lite deploy: fix pose visualize (#4349)
|
499,299 |
22.10.2021 10:59:42
| -28,800 |
8a3376b8f20829926a0d3dde6f88f97e2ea90646
|
fix trt inference error in lite_hrnet
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/backbones/lite_hrnet.py",
"new_path": "ppdet/modeling/backbones/lite_hrnet.py",
"diff": "@@ -651,7 +651,9 @@ class LiteHRNetModule(nn.Layer):\nfor i in range(len(self.fuse_layers)):\ny = out[0] if i == 0 else self.fuse_layers[i][0](out[0])\nfor j in range(self.num_branches):\n- if i == j:\n+ if j == 0:\n+ y += y\n+ elif i == j:\ny += out[j]\nelse:\ny += self.fuse_layers[i][j](out[j])\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix trt inference error in lite_hrnet (#4355)
|
499,395 |
22.10.2021 11:00:56
| -28,800 |
6596f23cbcfbf3d28afc832da634a305d70696ef
|
modify doc of ppyolo, test=document_fix
|
[
{
"change_type": "MODIFY",
"old_path": "configs/ppyolo/README.md",
"new_path": "configs/ppyolo/README.md",
"diff": "@@ -19,7 +19,7 @@ PP-YOLO reached mmAP(IoU=0.5:0.95) as 45.9% on COCO test-dev2017 dataset, and in\n<img src=\"../../docs/images/ppyolo_map_fps.png\" width=500 />\n</div>\n-PP-YOLO improved performance and speed of YOLOv3 with following methods:\n+PP-YOLO and PP-YOLOv2 improved performance and speed of YOLOv3 with following methods:\n- Better backbone: ResNet50vd-DCN\n- Larger training batch size: 8 GPUs and mini-batch size as 24 on each GPU\n@@ -31,6 +31,9 @@ PP-YOLO improved performance and speed of YOLOv3 with following methods:\n- [CoordConv](https://arxiv.org/abs/1807.03247)\n- [Spatial Pyramid Pooling](https://arxiv.org/abs/1406.4729)\n- Better ImageNet pretrain weights\n+- [PAN](https://arxiv.org/abs/1803.01534)\n+- Iou aware Loss\n+- larger input size\n## Model Zoo\n@@ -219,6 +222,12 @@ Optimizing method and ablation experiments of PP-YOLO compared with YOLOv3.\n## Citation\n```\n+@article{huang2021pp,\n+ title={PP-YOLOv2: A Practical Object Detector},\n+ author={Huang, Xin and Wang, Xinxin and Lv, Wenyu and Bai, Xiaying and Long, Xiang and Deng, Kaipeng and Dang, Qingqing and Han, Shumin and Liu, Qiwen and Hu, Xiaoguang and others},\n+ journal={arXiv preprint arXiv:2104.10419},\n+ year={2021}\n+}\n@misc{long2020ppyolo,\ntitle={PP-YOLO: An Effective and Efficient Implementation of Object Detector},\nauthor={Xiang Long and Kaipeng Deng and Guanzhong Wang and Yang Zhang and Qingqing Dang and Yuan Gao and Hui Shen and Jianguo Ren and Shumin Han and Errui Ding and Shilei Wen},\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/images/ppyolo_map_fps.png",
"new_path": "docs/images/ppyolo_map_fps.png",
"diff": "Binary files a/docs/images/ppyolo_map_fps.png and b/docs/images/ppyolo_map_fps.png differ\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
modify doc of ppyolo, test=document_fix (#3748)
|
499,301 |
23.10.2021 00:02:04
| -28,800 |
9e55e67195ae6872c3b1742a7281765eddeef4ce
|
update export config
|
[
{
"change_type": "MODIFY",
"old_path": "configs/faster_rcnn/_base_/faster_rcnn_swin_reader.yml",
"new_path": "configs/faster_rcnn/_base_/faster_rcnn_swin_reader.yml",
"diff": "@@ -29,9 +29,11 @@ EvalReader:\nTestReader:\n+ inputs_def:\n+ image_shape: [1, 3, 800, 1344]\nsample_transforms:\n- Decode: {}\n- - Resize: {interp: 2, target_size: [800, 1333], keep_ratio: True}\n+ - Resize: {interp: 2, target_size: [800, 1344], keep_ratio: True}\n- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n- Permute: {}\nbatch_transforms:\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
update export config (#4362)
|
499,304 |
25.10.2021 17:14:02
| -28,800 |
1fa998be68f98cb5be46f8d6c5559b6239fffc54
|
update PicoDet readme
|
[
{
"change_type": "RENAME",
"old_path": "configs/picodet/more_config/picodet_lcnet_416_coco.yml",
"new_path": "configs/picodet/more_config/picodet_lcnet_1_5x_416_coco.yml",
"diff": "@@ -6,8 +6,8 @@ _BASE_: [\n'../_base_/picodet_416_reader.yml',\n]\n-pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/LCNet_x1_0_pretrained.pdparams\n-weights: output/picodet_lcnet_416_coco/model_final\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/LCNet_x1_5_pretrained.pdparams\n+weights: output/picodet_lcnet_1_5x_416_coco/model_final\nfind_unused_parameters: True\nuse_ema: true\ncycle_epoch: 40\n@@ -19,19 +19,5 @@ PicoDet:\nhead: PicoHead\nLCNet:\n- scale: 1.0\n+ scale: 1.5\nfeature_maps: [3, 4, 5]\n-\n-CSPPAN:\n- out_channels: 96\n-\n-PicoHead:\n- conv_feat:\n- name: PicoFeat\n- feat_in: 96\n- feat_out: 96\n- num_convs: 2\n- num_fpn_stride: 4\n- norm_type: bn\n- share_cls_reg: True\n- feat_in_chan: 96\n"
},
{
"change_type": "RENAME",
"old_path": "configs/picodet/more_config/picodet_mobilenetv3_416_coco.yml",
"new_path": "configs/picodet/more_config/picodet_mobilenetv3_large_1x_416_coco.yml",
"diff": "@@ -7,11 +7,12 @@ _BASE_: [\n]\npretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/MobileNetV3_large_x1_0_ssld_pretrained.pdparams\n-weights: output/picodet_mobilenetv3_416_coco/model_final\n+weights: output/picodet_mobilenetv3_large_1x_416_coco/model_final\nfind_unused_parameters: True\nuse_ema: true\ncycle_epoch: 40\nsnapshot_epoch: 10\n+epoch: 180\nPicoDet:\nbackbone: MobileNetV3\n"
},
{
"change_type": "RENAME",
"old_path": "configs/picodet/more_config/picodet_shufflenetv2_416_coco.yml",
"new_path": "configs/picodet/more_config/picodet_shufflenetv2_1x_416_coco.yml",
"diff": "@@ -7,7 +7,7 @@ _BASE_: [\n]\npretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ShuffleNetV2_x1_0_pretrained.pdparams\n-weights: output/picodet_shufflenetv2_416_coco/model_final\n+weights: output/picodet_shufflenetv2_1x_416_coco/model_final\nfind_unused_parameters: True\nuse_ema: true\ncycle_epoch: 40\n"
},
{
"change_type": "MODIFY",
"old_path": "configs/picodet/picodet_l_320_coco.yml",
"new_path": "configs/picodet/picodet_l_320_coco.yml",
"diff": "@@ -12,6 +12,7 @@ find_unused_parameters: True\nuse_ema: true\ncycle_epoch: 40\nsnapshot_epoch: 10\n+epoch: 250\nESNet:\nscale: 1.25\n"
},
{
"change_type": "MODIFY",
"old_path": "configs/picodet/picodet_l_416_coco.yml",
"new_path": "configs/picodet/picodet_l_416_coco.yml",
"diff": "@@ -12,6 +12,7 @@ find_unused_parameters: True\nuse_ema: true\ncycle_epoch: 40\nsnapshot_epoch: 10\n+epoch: 250\nESNet:\nscale: 1.25\n"
},
{
"change_type": "MODIFY",
"old_path": "configs/picodet/picodet_l_640_coco.yml",
"new_path": "configs/picodet/picodet_l_640_coco.yml",
"diff": "@@ -12,6 +12,7 @@ find_unused_parameters: True\nuse_ema: true\ncycle_epoch: 40\nsnapshot_epoch: 10\n+epoch: 250\nESNet:\nscale: 1.25\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
update PicoDet readme (#4369)
|
499,339 |
26.10.2021 16:14:32
| -28,800 |
26ae0baca0ddbfd8ee4810b3a89cebe603e4f424
|
[dev] fix tood negative training
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/assigners/atss_assigner.py",
"new_path": "ppdet/modeling/assigners/atss_assigner.py",
"diff": "@@ -114,6 +114,14 @@ class ATSSAssigner(nn.Layer):\nnum_anchors, _ = anchor_bboxes.shape\nbatch_size, num_max_boxes, _ = gt_bboxes.shape\n+ # negative batch\n+ if num_max_boxes == 0:\n+ assigned_labels = paddle.full([batch_size, num_anchors], bg_index)\n+ assigned_bboxes = paddle.zeros([batch_size, num_anchors, 4])\n+ assigned_scores = paddle.zeros(\n+ [batch_size, num_anchors, self.num_classes])\n+ return assigned_labels, assigned_bboxes, assigned_scores\n+\n# 1. compute iou between gt and anchor bbox, [B, n, L]\nious = iou_similarity(gt_bboxes.reshape([-1, 4]), anchor_bboxes)\nious = ious.reshape([batch_size, -1, num_anchors])\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/assigners/task_aligned_assigner.py",
"new_path": "ppdet/modeling/assigners/task_aligned_assigner.py",
"diff": "@@ -78,6 +78,14 @@ class TaskAlignedAssigner(nn.Layer):\nbatch_size, num_anchors, num_classes = pred_scores.shape\n_, num_max_boxes, _ = gt_bboxes.shape\n+ # negative batch\n+ if num_max_boxes == 0:\n+ assigned_labels = paddle.full([batch_size, num_anchors], bg_index)\n+ assigned_bboxes = paddle.zeros([batch_size, num_anchors, 4])\n+ assigned_scores = paddle.zeros(\n+ [batch_size, num_anchors, num_classes])\n+ return assigned_labels, assigned_bboxes, assigned_scores\n+\n# compute iou between gt and pred bbox, [B, n, L]\nious = iou_similarity(gt_bboxes, pred_bboxes)\n# gather pred bboxes class score\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
[dev] fix tood negative training (#4371)
|
499,304 |
26.10.2021 17:13:46
| -28,800 |
35a93b11cfc09136a9e8b630bfe514f5a59d5f4d
|
fix quant & ptq export
|
[
{
"change_type": "MODIFY",
"old_path": "configs/picodet/README.md",
"new_path": "configs/picodet/README.md",
"diff": "@@ -23,13 +23,13 @@ We developed a series of lightweight models, which named `PicoDet`. Because of i\n| Model | Input size | mAP<sup>val<br>0.5:0.95 | mAP<sup>val<br>0.5 | Params<br><sup>(M) | FLOPS<br><sup>(G) | Latency<br><sup>(ms) | download | config |\n| :------------------------ | :-------: | :------: | :---: | :---: | :---: | :------------: | :-------------------------------------------------: | :-----: |\n-| PicoDet-S | 320*320 | 27.1 | 41.4 | 0.99 | 0.73 | -- | [model](https://paddledet.bj.bcebos.com/models/picodet_s_320_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_s_320_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_s_320_coco.yml) |\n-| PicoDet-S | 416*416 | 30.6 | 45.5 | 0.99 | 1.24 | -- | [model](https://paddledet.bj.bcebos.com/models/picodet_s_416_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_s_416_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_s_416_coco.yml) |\n-| PicoDet-M | 320*320 | 30.9 | 45.7 | 2.15 | 1.48 | -- | [model](https://paddledet.bj.bcebos.com/models/picodet_m_320_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_m_320_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_m_320_coco.yml) |\n-| PicoDet-M | 416*416 | 34.3 | 49.8 | 2.15 | 2.50 | -- | [model](https://paddledet.bj.bcebos.com/models/picodet_m_416_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_m_416_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_m_416_coco.yml) |\n-| PicoDet-L | 320*320 | 32.6 | 47.9 | 3.24 | 2.18 | -- | [model](https://paddledet.bj.bcebos.com/models/picodet_l_320_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_320_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_320_coco.yml) |\n-| PicoDet-L | 416*416 | 35.9 | 51.7 | 3.24 | 3.69 | -- | [model](https://paddledet.bj.bcebos.com/models/picodet_l_416_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_416_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_416_coco.yml) |\n-| PicoDet-L | 640*640 | 40.3 | 57.1 | 3.24 | 8.74 | -- | [model](https://paddledet.bj.bcebos.com/models/picodet_l_640_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_640_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_640_coco.yml) |\n+| PicoDet-S | 320*320 | 27.1 | 41.4 | 0.99 | 0.73 | 7.78 | [model](https://paddledet.bj.bcebos.com/models/picodet_s_320_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_s_320_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_s_320_coco.yml) |\n+| PicoDet-S | 416*416 | 30.6 | 45.5 | 0.99 | 1.24 | 11.84 | [model](https://paddledet.bj.bcebos.com/models/picodet_s_416_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_s_416_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_s_416_coco.yml) |\n+| PicoDet-M | 320*320 | 30.9 | 45.7 | 2.15 | 1.48 | 10.56 | [model](https://paddledet.bj.bcebos.com/models/picodet_m_320_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_m_320_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_m_320_coco.yml) |\n+| PicoDet-M | 416*416 | 34.3 | 49.8 | 2.15 | 2.50 | 15.87 | [model](https://paddledet.bj.bcebos.com/models/picodet_m_416_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_m_416_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_m_416_coco.yml) |\n+| PicoDet-L | 320*320 | 32.6 | 47.9 | 3.24 | 2.18 | 12.82 | [model](https://paddledet.bj.bcebos.com/models/picodet_l_320_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_320_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_320_coco.yml) |\n+| PicoDet-L | 416*416 | 35.9 | 51.7 | 3.24 | 3.69 | 19.42 | [model](https://paddledet.bj.bcebos.com/models/picodet_l_416_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_416_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_416_coco.yml) |\n+| PicoDet-L | 640*640 | 40.3 | 57.1 | 3.24 | 8.74 | 41.52 | [model](https://paddledet.bj.bcebos.com/models/picodet_l_640_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_640_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_640_coco.yml) |\n<details>\n@@ -45,9 +45,9 @@ We developed a series of lightweight models, which named `PicoDet`. Because of i\n| Model | Input size | mAP<sup>val<br>0.5:0.95 | mAP<sup>val<br>0.5 | Params<br><sup>(M) | FLOPS<br><sup>(G) | Latency<br><sup>(ms) | download | config |\n| :------------------------ | :-------: | :------: | :---: | :---: | :---: | :------------: | :-------------------------------------------------: | :-----: |\n-| PicoDet-Shufflenetv2 1x | 416*416 | 30.0 | 44.6 | 1.17 | 1.53 | -- | [model](https://paddledet.bj.bcebos.com/models/picodet_shufflenetv2_1x_416_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_shufflenetv2_1x_416_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/more_config/picodet_shufflenetv2_1x_416_coco.yml) |\n-| PicoDet-MobileNetv3-large 1x | 416*416 | 35.6 | 52.0 | 3.55 | 2.80 | -- | [model](https://paddledet.bj.bcebos.com/models/picodet_mobilenetv3_large_1x_416_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_mobilenetv3_large_1x_416_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/more_config/picodet_mobilenetv3_large_1x_416_coco.yml) |\n-| PicoDet-LCNet 1.5x | 416*416 | 36.3 | 52.2 | 3.10 | 3.85 | -- | [model](https://paddledet.bj.bcebos.com/models/picodet_lcnet_1_5x_416_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_lcnet_1_5x_416_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/more_config/picodet_lcnet_1_5x_416_coco.yml) |\n+| PicoDet-Shufflenetv2 1x | 416*416 | 30.0 | 44.6 | 1.17 | 1.53 | 14.76 | [model](https://paddledet.bj.bcebos.com/models/picodet_shufflenetv2_1x_416_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_shufflenetv2_1x_416_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/more_config/picodet_shufflenetv2_1x_416_coco.yml) |\n+| PicoDet-MobileNetv3-large 1x | 416*416 | 35.6 | 52.0 | 3.55 | 2.80 | 18.87 | [model](https://paddledet.bj.bcebos.com/models/picodet_mobilenetv3_large_1x_416_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_mobilenetv3_large_1x_416_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/more_config/picodet_mobilenetv3_large_1x_416_coco.yml) |\n+| PicoDet-LCNet 1.5x | 416*416 | 36.3 | 52.2 | 3.10 | 3.85 | 19.75 | [model](https://paddledet.bj.bcebos.com/models/picodet_lcnet_1_5x_416_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_lcnet_1_5x_416_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/more_config/picodet_lcnet_1_5x_416_coco.yml) |\n## Deployment\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/engine/trainer.py",
"new_path": "ppdet/engine/trainer.py",
"diff": "@@ -597,6 +597,13 @@ class Trainer(object):\nstatic_model = None\npruned_input_spec = input_spec\n+ # TODO: Hard code, delete it when support prune input_spec.\n+ if self.cfg.architecture == 'PicoDet':\n+ pruned_input_spec = [{\n+ \"image\": InputSpec(\n+ shape=image_shape, name='image')\n+ }]\n+\nreturn static_model, pruned_input_spec\ndef export(self, output_dir='output_inference'):\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/slim/__init__.py",
"new_path": "ppdet/slim/__init__.py",
"diff": "@@ -63,6 +63,9 @@ def build_slim_model(cfg, slim_cfg, mode='train'):\nload_pretrain_weight(model, cfg.pretrain_weights)\nslim = create(cfg.slim)\ncfg['slim_type'] = cfg.slim\n+ # TODO: fix quant export model in framework.\n+ if mode == 'test' and slim_load_cfg['slim'] == 'QAT':\n+ slim.quant_config['activation_preprocess_type'] = None\ncfg['model'] = slim(model)\ncfg['slim'] = slim\nif mode != 'train':\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix quant & ptq export (#4370)
|
499,339 |
28.10.2021 20:23:26
| -28,800 |
9737390102600d2e9fd440553d857ebdc6d1698f
|
[depoly] fix inconsistency between cpp and python
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/cpp/src/main.cc",
"new_path": "deploy/cpp/src/main.cc",
"diff": "@@ -128,27 +128,36 @@ static void MkDirs(const std::string& path) {\n}\nvoid PredictVideo(const std::string& video_path,\n- PaddleDetection::ObjectDetector* det) {\n+ PaddleDetection::ObjectDetector* det,\n+ const std::string& output_dir = \"output\") {\n// Open video\ncv::VideoCapture capture;\n+ std::string video_out_name = \"output.mp4\";\nif (FLAGS_camera_id != -1){\ncapture.open(FLAGS_camera_id);\n}else{\ncapture.open(video_path.c_str());\n+ video_out_name = video_path.substr(video_path.find_last_of(OS_PATH_SEP) + 1);\n}\nif (!capture.isOpened()) {\nprintf(\"can not open video : %s\\n\", video_path.c_str());\nreturn;\n}\n- // Get Video info : resolution, fps\n+ // Get Video info : resolution, fps, frame count\nint video_width = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_WIDTH));\nint video_height = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_HEIGHT));\nint video_fps = static_cast<int>(capture.get(CV_CAP_PROP_FPS));\n+ int video_frame_count = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_COUNT));\n+ printf(\"fps: %d, frame_count: %d\\n\", video_fps, video_frame_count);\n// Create VideoWriter for output\ncv::VideoWriter video_out;\n- std::string video_out_path = \"output.mp4\";\n+ std::string video_out_path(output_dir);\n+ if (output_dir.rfind(OS_PATH_SEP) != output_dir.size() - 1) {\n+ video_out_path += OS_PATH_SEP;\n+ }\n+ video_out_path += video_out_name;\nvideo_out.open(video_out_path.c_str(),\n0x00000021,\nvideo_fps,\n@@ -166,7 +175,7 @@ void PredictVideo(const std::string& video_path,\nauto colormap = PaddleDetection::GenerateColorMap(labels.size());\n// Capture all frames and do inference\ncv::Mat frame;\n- int frame_id = 0;\n+ int frame_id = 1;\nbool is_rbox = false;\nwhile (capture.read(frame)) {\nif (frame.empty()) {\n@@ -174,8 +183,14 @@ void PredictVideo(const std::string& video_path,\n}\nstd::vector<cv::Mat> imgs;\nimgs.push_back(frame);\n- det->Predict(imgs, 0.5, 0, 1, &result, &bbox_num, &det_times);\n+ printf(\"detect frame: %d\\n\", frame_id);\n+ det->Predict(imgs, FLAGS_threshold, 0, 1, &result, &bbox_num, &det_times);\n+ std::vector<PaddleDetection::ObjectResult> out_result;\nfor (const auto& item : result) {\n+ if (item.confidence < FLAGS_threshold || item.class_id == -1) {\n+ continue;\n+ }\n+ out_result.push_back(item);\nif (item.rect.size() > 6){\nis_rbox = true;\nprintf(\"class=%d confidence=%.4f rect=[%d %d %d %d %d %d %d %d]\\n\",\n@@ -202,7 +217,7 @@ void PredictVideo(const std::string& video_path,\n}\ncv::Mat out_im = PaddleDetection::VisualizeResult(\n- frame, result, labels, colormap, is_rbox);\n+ frame, out_result, labels, colormap, is_rbox);\nvideo_out.write(out_im);\nframe_id += 1;\n@@ -337,12 +352,12 @@ int main(int argc, char** argv) {\nFLAGS_trt_min_shape, FLAGS_trt_max_shape, FLAGS_trt_opt_shape,\nFLAGS_trt_calib_mode);\n// Do inference on input video or image\n- if (!FLAGS_video_file.empty() || FLAGS_camera_id != -1) {\n- PredictVideo(FLAGS_video_file, &det);\n- } else if (!FLAGS_image_file.empty() || !FLAGS_image_dir.empty()) {\nif (!PathExists(FLAGS_output_dir)) {\nMkDirs(FLAGS_output_dir);\n}\n+ if (!FLAGS_video_file.empty() || FLAGS_camera_id != -1) {\n+ PredictVideo(FLAGS_video_file, &det, FLAGS_output_dir);\n+ } else if (!FLAGS_image_file.empty() || !FLAGS_image_dir.empty()) {\nstd::vector<std::string> all_img_paths;\nstd::vector<cv::String> cv_all_img_paths;\nif (!FLAGS_image_file.empty()) {\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/cpp/src/main_jde.cc",
"new_path": "deploy/cpp/src/main_jde.cc",
"diff": "@@ -128,27 +128,36 @@ static void MkDirs(const std::string& path) {\n}\nvoid PredictVideo(const std::string& video_path,\n- PaddleDetection::JDEDetector* mot) {\n+ PaddleDetection::JDEDetector* mot,\n+ const std::string& output_dir = \"output\") {\n// Open video\ncv::VideoCapture capture;\n+ std::string video_out_name = \"output.mp4\";\nif (FLAGS_camera_id != -1){\ncapture.open(FLAGS_camera_id);\n}else{\ncapture.open(video_path.c_str());\n+ video_out_name = video_path.substr(video_path.find_last_of(OS_PATH_SEP) + 1);\n}\nif (!capture.isOpened()) {\nprintf(\"can not open video : %s\\n\", video_path.c_str());\nreturn;\n}\n- // Get Video info : resolution, fps\n+ // Get Video info : resolution, fps, frame count\nint video_width = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_WIDTH));\nint video_height = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_HEIGHT));\nint video_fps = static_cast<int>(capture.get(CV_CAP_PROP_FPS));\n+ int video_frame_count = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_COUNT));\n+ printf(\"fps: %d, frame_count: %d\\n\", video_fps, video_frame_count);\n// Create VideoWriter for output\ncv::VideoWriter video_out;\n- std::string video_out_path = \"mot_output.mp4\";\n+ std::string video_out_path(output_dir);\n+ if (output_dir.rfind(OS_PATH_SEP) != output_dir.size() - 1) {\n+ video_out_path += OS_PATH_SEP;\n+ }\n+ video_out_path += video_out_name;\nvideo_out.open(video_out_path.c_str(),\n0x00000021,\nvideo_fps,\n@@ -164,14 +173,15 @@ void PredictVideo(const std::string& video_path,\ndouble times;\n// Capture all frames and do inference\ncv::Mat frame;\n- int frame_id = 0;\n+ int frame_id = 1;\nwhile (capture.read(frame)) {\nif (frame.empty()) {\nbreak;\n}\nstd::vector<cv::Mat> imgs;\nimgs.push_back(frame);\n- mot->Predict(imgs, 0.5, 0, 1, &result, &det_times);\n+ printf(\"detect frame: %d\\n\", frame_id);\n+ mot->Predict(imgs, FLAGS_threshold, 0, 1, &result, &det_times);\nframe_id += 1;\ntimes = std::accumulate(det_times.begin(), det_times.end(), 0) / frame_id;\n@@ -215,7 +225,9 @@ int main(int argc, char** argv) {\nFLAGS_cpu_threads, FLAGS_run_mode, FLAGS_batch_size,FLAGS_gpu_id,\nFLAGS_trt_min_shape, FLAGS_trt_max_shape, FLAGS_trt_opt_shape,\nFLAGS_trt_calib_mode);\n-\n- PredictVideo(FLAGS_video_file, &mot);\n+ if (!PathExists(FLAGS_output_dir)) {\n+ MkDirs(FLAGS_output_dir);\n+ }\n+ PredictVideo(FLAGS_video_file, &mot, FLAGS_output_dir);\nreturn 0;\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/cpp/src/main_keypoint.cc",
"new_path": "deploy/cpp/src/main_keypoint.cc",
"diff": "@@ -138,27 +138,36 @@ static void MkDirs(const std::string& path) {\nvoid PredictVideo(const std::string& video_path,\nPaddleDetection::ObjectDetector* det,\n- PaddleDetection::KeyPointDetector* keypoint) {\n+ PaddleDetection::KeyPointDetector* keypoint,\n+ const std::string& output_dir = \"output\") {\n// Open video\ncv::VideoCapture capture;\n+ std::string video_out_name = \"output.mp4\";\nif (FLAGS_camera_id != -1){\ncapture.open(FLAGS_camera_id);\n}else{\ncapture.open(video_path.c_str());\n+ video_out_name = video_path.substr(video_path.find_last_of(OS_PATH_SEP) + 1);\n}\nif (!capture.isOpened()) {\nprintf(\"can not open video : %s\\n\", video_path.c_str());\nreturn;\n}\n- // Get Video info : resolution, fps\n+ // Get Video info : resolution, fps, frame count\nint video_width = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_WIDTH));\nint video_height = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_HEIGHT));\nint video_fps = static_cast<int>(capture.get(CV_CAP_PROP_FPS));\n+ int video_frame_count = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_COUNT));\n+ printf(\"fps: %d, frame_count: %d\\n\", video_fps, video_frame_count);\n// Create VideoWriter for output\ncv::VideoWriter video_out;\n- std::string video_out_path = \"output.mp4\";\n+ std::string video_out_path(output_dir);\n+ if (output_dir.rfind(OS_PATH_SEP) != output_dir.size() - 1) {\n+ video_out_path += OS_PATH_SEP;\n+ }\n+ video_out_path += video_out_name;\nvideo_out.open(video_out_path.c_str(),\n0x00000021,\nvideo_fps,\n@@ -184,7 +193,7 @@ void PredictVideo(const std::string& video_path,\nstd::vector<int> colormap_kpts = PaddleDetection::GenerateColorMap(20);\n// Capture all frames and do inference\ncv::Mat frame;\n- int frame_id = 0;\n+ int frame_id = 1;\nbool is_rbox = false;\nwhile (capture.read(frame)) {\nif (frame.empty()) {\n@@ -192,8 +201,14 @@ void PredictVideo(const std::string& video_path,\n}\nstd::vector<cv::Mat> imgs;\nimgs.push_back(frame);\n- det->Predict(imgs, 0.5, 0, 1, &result, &bbox_num, &det_times);\n+ printf(\"detect frame: %d\\n\", frame_id);\n+ det->Predict(imgs, FLAGS_threshold, 0, 1, &result, &bbox_num, &det_times);\n+ std::vector<PaddleDetection::ObjectResult> out_result;\nfor (const auto& item : result) {\n+ if (item.confidence < FLAGS_threshold || item.class_id == -1) {\n+ continue;\n+ }\n+ out_result.push_back(item);\nif (item.rect.size() > 6){\nis_rbox = true;\nprintf(\"class=%d confidence=%.4f rect=[%d %d %d %d %d %d %d %d]\\n\",\n@@ -221,9 +236,9 @@ void PredictVideo(const std::string& video_path,\nif(keypoint)\n{\n- int imsize = result.size();\n+ int imsize = out_result.size();\nfor (int i=0; i<imsize; i++){\n- auto item = result[i];\n+ auto item = out_result[i];\ncv::Mat crop_img;\nstd::vector<double> keypoint_times;\nstd::vector<int> rect = {item.rect[0], item.rect[1], item.rect[2], item.rect[3]};\n@@ -239,7 +254,7 @@ void PredictVideo(const std::string& video_path,\nif (imgs_kpts.size()==FLAGS_batch_size_keypoint || ((i==imsize-1)&&!imgs_kpts.empty()))\n{\n- keypoint->Predict(imgs_kpts, center_bs, scale_bs, 0.5, 0, 1, &result_kpts, &keypoint_times);\n+ keypoint->Predict(imgs_kpts, center_bs, scale_bs, FLAGS_threshold, 0, 1, &result_kpts, &keypoint_times);\nimgs_kpts.clear();\ncenter_bs.clear();\nscale_bs.clear();\n@@ -251,7 +266,7 @@ void PredictVideo(const std::string& video_path,\nelse{\n// Visualization result\ncv::Mat out_im = PaddleDetection::VisualizeResult(\n- frame, result, labels, colormap, is_rbox);\n+ frame, out_result, labels, colormap, is_rbox);\nvideo_out.write(out_im);\n}\n@@ -450,12 +465,12 @@ int main(int argc, char** argv) {\nFLAGS_trt_calib_mode, FLAGS_use_dark);\n}\n// Do inference on input video or image\n- if (!FLAGS_video_file.empty() || FLAGS_camera_id != -1) {\n- PredictVideo(FLAGS_video_file, &det, keypoint);\n- } else if (!FLAGS_image_file.empty() || !FLAGS_image_dir.empty()) {\nif (!PathExists(FLAGS_output_dir)) {\nMkDirs(FLAGS_output_dir);\n}\n+ if (!FLAGS_video_file.empty() || FLAGS_camera_id != -1) {\n+ PredictVideo(FLAGS_video_file, &det, keypoint, FLAGS_output_dir);\n+ } else if (!FLAGS_image_file.empty() || !FLAGS_image_dir.empty()) {\nstd::vector<std::string> all_img_paths;\nstd::vector<cv::String> cv_all_img_paths;\nif (!FLAGS_image_file.empty()) {\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/python/det_keypoint_unite_infer.py",
"new_path": "deploy/python/det_keypoint_unite_infer.py",
"diff": "@@ -133,22 +133,24 @@ def topdown_unite_predict_video(detector,\ntopdown_keypoint_detector,\ncamera_id,\nkeypoint_batch_size=1):\n+ video_name = 'output.mp4'\nif camera_id != -1:\ncapture = cv2.VideoCapture(camera_id)\n- video_name = 'output.mp4'\nelse:\ncapture = cv2.VideoCapture(FLAGS.video_file)\nvideo_name = os.path.splitext(os.path.basename(FLAGS.video_file))[\n0] + '.mp4'\n- fps = 30\n+ # Get Video info : resolution, fps, frame count\nwidth = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))\nheight = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n- # yapf: disable\n- fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n- # yapf: enable\n+ fps = int(capture.get(cv2.CAP_PROP_FPS))\n+ frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n+ print(\"fps: %d, frame_count: %d\" % (fps, frame_count))\n+\nif not os.path.exists(FLAGS.output_dir):\nos.makedirs(FLAGS.output_dir)\nout_path = os.path.join(FLAGS.output_dir, video_name)\n+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')\nwriter = cv2.VideoWriter(out_path, fourcc, fps, (width, height))\nindex = 0\nwhile (1):\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/python/infer.py",
"new_path": "deploy/python/infer.py",
"diff": "@@ -664,23 +664,23 @@ def predict_image(detector, image_list, batch_size=1):\ndef predict_video(detector, camera_id):\n+ video_out_name = 'output.mp4'\nif camera_id != -1:\ncapture = cv2.VideoCapture(camera_id)\n- video_name = 'output.mp4'\nelse:\ncapture = cv2.VideoCapture(FLAGS.video_file)\n- video_name = os.path.split(FLAGS.video_file)[-1]\n- fps = 30\n- frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n- print('frame_count', frame_count)\n+ video_out_name = os.path.split(FLAGS.video_file)[-1]\n+ # Get Video info : resolution, fps, frame count\nwidth = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))\nheight = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n- # yapf: disable\n- fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n- # yapf: enable\n+ fps = int(capture.get(cv2.CAP_PROP_FPS))\n+ frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n+ print(\"fps: %d, frame_count: %d\" % (fps, frame_count))\n+\nif not os.path.exists(FLAGS.output_dir):\nos.makedirs(FLAGS.output_dir)\n- out_path = os.path.join(FLAGS.output_dir, video_name)\n+ out_path = os.path.join(FLAGS.output_dir, video_out_name)\n+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')\nwriter = cv2.VideoWriter(out_path, fourcc, fps, (width, height))\nindex = 1\nwhile (1):\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/python/keypoint_infer.py",
"new_path": "deploy/python/keypoint_infer.py",
"diff": "@@ -284,21 +284,23 @@ def predict_image(detector, image_list):\ndef predict_video(detector, camera_id):\n+ video_name = 'output.mp4'\nif camera_id != -1:\ncapture = cv2.VideoCapture(camera_id)\n- video_name = 'output.mp4'\nelse:\ncapture = cv2.VideoCapture(FLAGS.video_file)\nvideo_name = os.path.split(FLAGS.video_file)[-1]\n- fps = 30\n+ # Get Video info : resolution, fps, frame count\nwidth = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))\nheight = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n- # yapf: disable\n- fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n- # yapf: enable\n+ fps = int(capture.get(cv2.CAP_PROP_FPS))\n+ frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n+ print(\"fps: %d, frame_count: %d\" % (fps, frame_count))\n+\nif not os.path.exists(FLAGS.output_dir):\nos.makedirs(FLAGS.output_dir)\nout_path = os.path.join(FLAGS.output_dir, video_name + '.mp4')\n+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')\nwriter = cv2.VideoWriter(out_path, fourcc, fps, (width, height))\nindex = 1\nwhile (1):\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/python/mot_jde_infer.py",
"new_path": "deploy/python/mot_jde_infer.py",
"diff": "@@ -212,24 +212,24 @@ def predict_image(detector, image_list):\ndef predict_video(detector, camera_id):\n+ video_name = 'mot_output.mp4'\nif camera_id != -1:\ncapture = cv2.VideoCapture(camera_id)\n- video_name = 'mot_output.mp4'\nelse:\ncapture = cv2.VideoCapture(FLAGS.video_file)\nvideo_name = os.path.split(FLAGS.video_file)[-1]\n- fps = 30\n- frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n- print('frame_count', frame_count)\n+ # Get Video info : resolution, fps, frame count\nwidth = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))\nheight = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n- # yapf: disable\n- fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n- # yapf: enable\n+ fps = int(capture.get(cv2.CAP_PROP_FPS))\n+ frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n+ print(\"fps: %d, frame_count: %d\" % (fps, frame_count))\n+\nif not os.path.exists(FLAGS.output_dir):\nos.makedirs(FLAGS.output_dir)\nout_path = os.path.join(FLAGS.output_dir, video_name)\nif not FLAGS.save_images:\n+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')\nwriter = cv2.VideoWriter(out_path, fourcc, fps, (width, height))\nframe_id = 0\ntimer = MOTTimer()\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/python/mot_keypoint_unite_infer.py",
"new_path": "deploy/python/mot_keypoint_unite_infer.py",
"diff": "@@ -126,18 +126,18 @@ def mot_keypoint_unite_predict_video(mot_model,\nelse:\ncapture = cv2.VideoCapture(FLAGS.video_file)\nvideo_name = os.path.split(FLAGS.video_file)[-1]\n- fps = 30\n- frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n- print('frame_count', frame_count)\n+ # Get Video info : resolution, fps, frame count\nwidth = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))\nheight = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n- # yapf: disable\n- fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n- # yapf: enable\n+ fps = int(capture.get(cv2.CAP_PROP_FPS))\n+ frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n+ print(\"fps: %d, frame_count: %d\" % (fps, frame_count))\n+\nif not os.path.exists(FLAGS.output_dir):\nos.makedirs(FLAGS.output_dir)\nout_path = os.path.join(FLAGS.output_dir, video_name)\nif not FLAGS.save_images:\n+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')\nwriter = cv2.VideoWriter(out_path, fourcc, fps, (width, height))\nframe_id = 0\ntimer_mot = FPSTimer()\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/python/mot_sde_infer.py",
"new_path": "deploy/python/mot_sde_infer.py",
"diff": "@@ -355,18 +355,18 @@ def predict_video(detector, reid_model, camera_id):\nelse:\ncapture = cv2.VideoCapture(FLAGS.video_file)\nvideo_name = os.path.split(FLAGS.video_file)[-1]\n- fps = 30\n- frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n- print('frame_count', frame_count)\n+ # Get Video info : resolution, fps, frame count\nwidth = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))\nheight = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n- # yapf: disable\n- fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n- # yapf: enable\n+ fps = int(capture.get(cv2.CAP_PROP_FPS))\n+ frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n+ print(\"fps: %d, frame_count: %d\" % (fps, frame_count))\n+\nif not os.path.exists(FLAGS.output_dir):\nos.makedirs(FLAGS.output_dir)\nout_path = os.path.join(FLAGS.output_dir, video_name)\nif not FLAGS.save_images:\n+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')\nwriter = cv2.VideoWriter(out_path, fourcc, fps, (width, height))\nframe_id = 0\ntimer = MOTTimer()\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
[depoly] fix inconsistency between cpp and python (#4351)
|
499,348 |
28.10.2021 20:59:15
| -28,800 |
df98ac6150e021d0e3cca796b3ee49ad8f1982ea
|
update BenchmarkLog and fix some bug
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/cpp/src/main_keypoint.cc",
"new_path": "deploy/cpp/src/main_keypoint.cc",
"diff": "@@ -79,17 +79,32 @@ void PrintBenchmarkLog(std::vector<double> det_time, int img_num){\nLOG(INFO) << \"cpu_math_library_num_threads: \" << FLAGS_cpu_threads;\nLOG(INFO) << \"----------------------- Data info -----------------------\";\nLOG(INFO) << \"batch_size: \" << FLAGS_batch_size;\n- LOG(INFO) << \"batch_size_keypoint: \" << FLAGS_batch_size_keypoint;\nLOG(INFO) << \"input_shape: \" << \"dynamic shape\";\nLOG(INFO) << \"----------------------- Model info -----------------------\";\nFLAGS_model_dir.erase(FLAGS_model_dir.find_last_not_of(\"/\") + 1);\n- LOG(INFO) << \"model_name: \" << FLAGS_model_dir.substr(FLAGS_model_dir.find_last_of('/') + 1);\n+ LOG(INFO) << \"model_name: \" << FLAGS_model_dir;\n+ LOG(INFO) << \"----------------------- Perf info ------------------------\";\n+ LOG(INFO) << \"Total number of predicted data: \" << img_num\n+ << \" and total time spent(ms): \"\n+ << std::accumulate(det_time.begin(), det_time.end(), 0.);\n+ img_num = std::max(1, img_num);\n+ LOG(INFO) << \"preproce_time(ms): \" << det_time[0] / img_num\n+ << \", inference_time(ms): \" << det_time[1] / img_num\n+ << \", postprocess_time(ms): \" << det_time[2] / img_num;\n+}\n+\n+void PrintKptsBenchmarkLog(std::vector<double> det_time, int img_num){\n+ LOG(INFO) << \"----------------------- Data info -----------------------\";\n+ LOG(INFO) << \"batch_size_keypoint: \" << FLAGS_batch_size_keypoint;\n+ LOG(INFO) << \"----------------------- Model info -----------------------\";\nFLAGS_model_dir_keypoint.erase(FLAGS_model_dir_keypoint.find_last_not_of(\"/\") + 1);\n- LOG(INFO) << \"model_name: \" << FLAGS_model_dir_keypoint.substr(FLAGS_model_dir_keypoint.find_last_of('/') + 1);\n+ LOG(INFO) << \"keypoint_model_name: \" << FLAGS_model_dir_keypoint;\nLOG(INFO) << \"----------------------- Perf info ------------------------\";\nLOG(INFO) << \"Total number of predicted data: \" << img_num\n<< \" and total time spent(ms): \"\n- << std::accumulate(det_time.begin(), det_time.end(), 0);\n+ << std::accumulate(det_time.begin(), det_time.end(), 0.);\n+ img_num = std::max(1, img_num);\n+ LOG(INFO) << \"Average time cost per person:\";\nLOG(INFO) << \"preproce_time(ms): \" << det_time[0] / img_num\n<< \", inference_time(ms): \" << det_time[1] / img_num\n<< \", postprocess_time(ms): \" << det_time[2] / img_num;\n@@ -424,7 +439,9 @@ void PredictImage(const std::vector<std::string> all_img_paths,\ndet_t[2] += det_times[2];\n}\nPrintBenchmarkLog(det_t, all_img_paths.size());\n- PrintBenchmarkLog(keypoint_t, kpts_imgs);\n+ if (keypoint) {\n+ PrintKptsBenchmarkLog(keypoint_t, kpts_imgs);\n+ }\n}\nint main(int argc, char** argv) {\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/lite/src/main.cc",
"new_path": "deploy/lite/src/main.cc",
"diff": "@@ -39,34 +39,47 @@ void PrintBenchmarkLog(std::vector<double> det_time, int img_num) {\n<< std::endl;\nstd::cout << \"batch_size_det: \" << RT_Config[\"batch_size_det\"].as<int>()\n<< std::endl;\n- std::cout << \"batch_size_keypoint: \"\n- << RT_Config[\"batch_size_keypoint\"].as<int>() << std::endl;\nstd::cout << \"----------------------- Model info -----------------------\"\n<< std::endl;\nRT_Config[\"model_dir_det\"].as<std::string>().erase(\nRT_Config[\"model_dir_det\"].as<std::string>().find_last_not_of(\"/\") + 1);\nstd::cout\n<< \"detection model_name: \"\n- << RT_Config[\"model_dir_det\"].as<std::string>().substr(\n- RT_Config[\"model_dir_det\"].as<std::string>().find_last_of('/') + 1)\n+ << RT_Config[\"model_dir_det\"].as<std::string>()\n+ << std::endl;\n+ std::cout << \"----------------------- Perf info ------------------------\"\n+ << std::endl;\n+ std::cout << \"Total number of predicted data: \" << img_num\n+ << \" and total time spent(ms): \"\n+ << std::accumulate(det_time.begin(), det_time.end(), 0.)\n+ << std::endl;\n+ img_num = std::max(1, img_num);\n+ std::cout << \"preproce_time(ms): \" << det_time[0] / img_num\n+ << \", inference_time(ms): \" << det_time[1] / img_num\n+ << \", postprocess_time(ms): \" << det_time[2] / img_num << std::endl;\n+}\n+\n+void PrintKptsBenchmarkLog(std::vector<double> det_time, int img_num){\n+ std::cout << \"----------------------- Data info -----------------------\"\n+ << std::endl;\n+ std::cout << \"batch_size_keypoint: \"\n+ << RT_Config[\"batch_size_keypoint\"].as<int>() << std::endl;\n+ std::cout << \"----------------------- Model info -----------------------\"\n<< std::endl;\nRT_Config[\"model_dir_keypoint\"].as<std::string>().erase(\nRT_Config[\"model_dir_keypoint\"].as<std::string>().find_last_not_of(\"/\") +\n1);\nstd::cout\n<< \"keypoint model_name: \"\n- << RT_Config[\"model_dir_keypoint\"].as<std::string>().substr(\n- RT_Config[\"model_dir_keypoint\"].as<std::string>().find_last_of(\n- '/') +\n- 1)\n- << std::endl;\n+ << RT_Config[\"model_dir_keypoint\"].as<std::string>() << std::endl;\nstd::cout << \"----------------------- Perf info ------------------------\"\n<< std::endl;\nstd::cout << \"Total number of predicted data: \" << img_num\n<< \" and total time spent(ms): \"\n- << std::accumulate(det_time.begin(), det_time.end(), 0)\n- << std::endl;\n- std::cout << \"preproce_time(ms): \" << det_time[0] / img_num\n+ << std::accumulate(det_time.begin(), det_time.end(), 0.) << std::endl;\n+ img_num = std::max(1, img_num);\n+ std::cout << \"Average time cost per person:\" << std::endl\n+ << \"preproce_time(ms): \" << det_time[0] / img_num\n<< \", inference_time(ms): \" << det_time[1] / img_num\n<< \", postprocess_time(ms): \" << det_time[2] / img_num << std::endl;\n}\n@@ -284,7 +297,7 @@ void PredictImage(const std::vector<std::string> all_img_paths,\n}\nPrintBenchmarkLog(det_t, all_img_paths.size());\nif (keypoint) {\n- PrintBenchmarkLog(keypoint_t, kpts_imgs);\n+ PrintKptsBenchmarkLog(keypoint_t, kpts_imgs);\nPrintTotalIimeLog((det_t[0] + det_t[1] + det_t[2]) / all_img_paths.size(),\n(keypoint_t[0] + keypoint_t[1] + keypoint_t[2]) / all_img_paths.size(),\nmidtimecost / all_img_paths.size());\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
update BenchmarkLog and fix some bug (#4383)
|
499,299 |
29.10.2021 11:40:22
| -28,800 |
87b1fccdd6c74439953422d3fa30845be3e0bfeb
|
add tinypose models
|
[
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/keypoint/tiny_pose/keypoint/tinypose_128x96.yml",
"diff": "+use_gpu: true\n+log_iter: 5\n+save_dir: output\n+snapshot_epoch: 10\n+weights: output/tinypose_128x96/model_final\n+epoch: 420\n+num_joints: &num_joints 17\n+pixel_std: &pixel_std 200\n+metric: KeyPointTopDownCOCOEval\n+num_classes: 1\n+train_height: &train_height 128\n+train_width: &train_width 96\n+trainsize: &trainsize [*train_width, *train_height]\n+hmsize: &hmsize [24, 32]\n+flip_perm: &flip_perm [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]\n+\n+\n+#####model\n+architecture: TopDownHRNet\n+\n+TopDownHRNet:\n+ backbone: LiteHRNet\n+ post_process: HRNetPostProcess\n+ flip_perm: *flip_perm\n+ num_joints: *num_joints\n+ width: &width 40\n+ loss: KeyPointMSELoss\n+ use_dark: true\n+\n+LiteHRNet:\n+ network_type: wider_naive\n+ freeze_at: -1\n+ freeze_norm: false\n+ return_idx: [0]\n+\n+KeyPointMSELoss:\n+ use_target_weight: true\n+ loss_scale: 1.0\n+\n+#####optimizer\n+LearningRate:\n+ base_lr: 0.008\n+ schedulers:\n+ - !PiecewiseDecay\n+ milestones: [380, 410]\n+ gamma: 0.1\n+ - !LinearWarmup\n+ start_factor: 0.001\n+ steps: 500\n+\n+OptimizerBuilder:\n+ optimizer:\n+ type: Adam\n+ regularizer:\n+ factor: 0.0\n+ type: L2\n+\n+\n+#####data\n+TrainDataset:\n+ !KeypointTopDownCocoDataset\n+ image_dir: \"\"\n+ anno_path: aic_coco_train_cocoformat.json\n+ dataset_dir: dataset\n+ num_joints: *num_joints\n+ trainsize: *trainsize\n+ pixel_std: *pixel_std\n+ use_gt_bbox: True\n+\n+\n+EvalDataset:\n+ !KeypointTopDownCocoDataset\n+ image_dir: val2017\n+ anno_path: annotations/person_keypoints_val2017.json\n+ dataset_dir: dataset/coco\n+ num_joints: *num_joints\n+ trainsize: *trainsize\n+ pixel_std: *pixel_std\n+ use_gt_bbox: True\n+ image_thre: 0.0\n+\n+TestDataset:\n+ !ImageFolder\n+ anno_path: dataset/coco/keypoint_imagelist.txt\n+\n+worker_num: 2\n+global_mean: &global_mean [0.485, 0.456, 0.406]\n+global_std: &global_std [0.229, 0.224, 0.225]\n+TrainReader:\n+ sample_transforms:\n+ - RandomFlipHalfBodyTransform:\n+ scale: 0.25\n+ rot: 30\n+ num_joints_half_body: 8\n+ prob_half_body: 0.3\n+ pixel_std: *pixel_std\n+ trainsize: *trainsize\n+ upper_body_ids: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n+ flip_pairs: *flip_perm\n+ - AugmentationbyInformantionDropping:\n+ prob_cutout: 0.5\n+ offset_factor: 0.05\n+ num_patch: 1\n+ trainsize: *trainsize\n+ - TopDownAffine:\n+ trainsize: *trainsize\n+ use_udp: true\n+ - ToHeatmapsTopDown_DARK:\n+ hmsize: *hmsize\n+ sigma: 1\n+ batch_transforms:\n+ - NormalizeImage:\n+ mean: *global_mean\n+ std: *global_std\n+ is_scale: true\n+ - Permute: {}\n+ batch_size: 512\n+ shuffle: true\n+ drop_last: false\n+\n+EvalReader:\n+ sample_transforms:\n+ - TopDownAffine:\n+ trainsize: *trainsize\n+ use_udp: true\n+ batch_transforms:\n+ - NormalizeImage:\n+ mean: *global_mean\n+ std: *global_std\n+ is_scale: true\n+ - Permute: {}\n+ batch_size: 16\n+\n+TestReader:\n+ inputs_def:\n+ image_shape: [3, *train_height, *train_width]\n+ sample_transforms:\n+ - Decode: {}\n+ - TopDownEvalAffine:\n+ trainsize: *trainsize\n+ - NormalizeImage:\n+ mean: *global_mean\n+ std: *global_std\n+ is_scale: true\n+ - Permute: {}\n+ batch_size: 1\n+ fuse_normalize: true\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/keypoint/tiny_pose/keypoint/tinypose_256x192.yml",
"diff": "+use_gpu: true\n+log_iter: 5\n+save_dir: output\n+snapshot_epoch: 10\n+weights: output/tinypose_256x192/model_final\n+epoch: 420\n+num_joints: &num_joints 17\n+pixel_std: &pixel_std 200\n+metric: KeyPointTopDownCOCOEval\n+num_classes: 1\n+train_height: &train_height 256\n+train_width: &train_width 192\n+trainsize: &trainsize [*train_width, *train_height]\n+hmsize: &hmsize [48, 64]\n+flip_perm: &flip_perm [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]\n+\n+\n+#####model\n+architecture: TopDownHRNet\n+\n+TopDownHRNet:\n+ backbone: LiteHRNet\n+ post_process: HRNetPostProcess\n+ flip_perm: *flip_perm\n+ num_joints: *num_joints\n+ width: &width 40\n+ loss: KeyPointMSELoss\n+ use_dark: true\n+\n+LiteHRNet:\n+ network_type: wider_naive\n+ freeze_at: -1\n+ freeze_norm: false\n+ return_idx: [0]\n+\n+KeyPointMSELoss:\n+ use_target_weight: true\n+ loss_scale: 1.0\n+\n+#####optimizer\n+LearningRate:\n+ base_lr: 0.002\n+ schedulers:\n+ - !PiecewiseDecay\n+ milestones: [380, 410]\n+ gamma: 0.1\n+ - !LinearWarmup\n+ start_factor: 0.001\n+ steps: 500\n+\n+OptimizerBuilder:\n+ optimizer:\n+ type: Adam\n+ regularizer:\n+ factor: 0.0\n+ type: L2\n+\n+\n+#####data\n+TrainDataset:\n+ !KeypointTopDownCocoDataset\n+ image_dir: \"\"\n+ anno_path: aic_coco_train_cocoformat.json\n+ dataset_dir: dataset\n+ num_joints: *num_joints\n+ trainsize: *trainsize\n+ pixel_std: *pixel_std\n+ use_gt_bbox: True\n+\n+\n+EvalDataset:\n+ !KeypointTopDownCocoDataset\n+ image_dir: val2017\n+ anno_path: annotations/person_keypoints_val2017.json\n+ dataset_dir: dataset/coco\n+ num_joints: *num_joints\n+ trainsize: *trainsize\n+ pixel_std: *pixel_std\n+ use_gt_bbox: True\n+ image_thre: 0.0\n+\n+TestDataset:\n+ !ImageFolder\n+ anno_path: dataset/coco/keypoint_imagelist.txt\n+\n+worker_num: 2\n+global_mean: &global_mean [0.485, 0.456, 0.406]\n+global_std: &global_std [0.229, 0.224, 0.225]\n+TrainReader:\n+ sample_transforms:\n+ - RandomFlipHalfBodyTransform:\n+ scale: 0.25\n+ rot: 30\n+ num_joints_half_body: 8\n+ prob_half_body: 0.3\n+ pixel_std: *pixel_std\n+ trainsize: *trainsize\n+ upper_body_ids: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n+ flip_pairs: *flip_perm\n+ - AugmentationbyInformantionDropping:\n+ prob_cutout: 0.5\n+ offset_factor: 0.05\n+ num_patch: 1\n+ trainsize: *trainsize\n+ - TopDownAffine:\n+ trainsize: *trainsize\n+ use_udp: true\n+ - ToHeatmapsTopDown_DARK:\n+ hmsize: *hmsize\n+ sigma: 2\n+ batch_transforms:\n+ - NormalizeImage:\n+ mean: *global_mean\n+ std: *global_std\n+ is_scale: true\n+ - Permute: {}\n+ batch_size: 128\n+ shuffle: true\n+ drop_last: false\n+\n+EvalReader:\n+ sample_transforms:\n+ - TopDownAffine:\n+ trainsize: *trainsize\n+ use_udp: true\n+ batch_transforms:\n+ - NormalizeImage:\n+ mean: *global_mean\n+ std: *global_std\n+ is_scale: true\n+ - Permute: {}\n+ batch_size: 16\n+\n+TestReader:\n+ inputs_def:\n+ image_shape: [3, *train_height, *train_width]\n+ sample_transforms:\n+ - Decode: {}\n+ - TopDownEvalAffine:\n+ trainsize: *trainsize\n+ - NormalizeImage:\n+ mean: *global_mean\n+ std: *global_std\n+ is_scale: true\n+ - Permute: {}\n+ batch_size: 1\n+ fuse_normalize: true\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/keypoint/tiny_pose/pedestrian_detection/picodet_s_320_pedestrian.yml",
"diff": "+use_gpu: true\n+log_iter: 20\n+save_dir: output\n+snapshot_epoch: 1\n+print_flops: false\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ESNet_x0_75_pretrained.pdparams\n+weights: output/picodet_s_320_pedestrian/model_final\n+find_unused_parameters: True\n+use_ema: true\n+cycle_epoch: 40\n+snapshot_epoch: 10\n+epoch: 300\n+metric: COCO\n+num_classes: 1\n+\n+architecture: PicoDet\n+\n+PicoDet:\n+ backbone: ESNet\n+ neck: CSPPAN\n+ head: PicoHead\n+\n+ESNet:\n+ scale: 0.75\n+ feature_maps: [4, 11, 14]\n+ act: hard_swish\n+ channel_ratio: [0.875, 0.5, 0.5, 0.5, 0.625, 0.5, 0.625, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]\n+\n+CSPPAN:\n+ out_channels: 96\n+ use_depthwise: True\n+ num_csp_blocks: 1\n+ num_features: 4\n+\n+PicoHead:\n+ conv_feat:\n+ name: PicoFeat\n+ feat_in: 96\n+ feat_out: 96\n+ num_convs: 2\n+ num_fpn_stride: 4\n+ norm_type: bn\n+ share_cls_reg: True\n+ fpn_stride: [8, 16, 32, 64]\n+ feat_in_chan: 96\n+ prior_prob: 0.01\n+ reg_max: 7\n+ cell_offset: 0.5\n+ loss_class:\n+ name: VarifocalLoss\n+ use_sigmoid: True\n+ iou_weighted: True\n+ loss_weight: 1.0\n+ loss_dfl:\n+ name: DistributionFocalLoss\n+ loss_weight: 0.25\n+ loss_bbox:\n+ name: GIoULoss\n+ loss_weight: 2.0\n+ assigner:\n+ name: SimOTAAssigner\n+ candidate_topk: 10\n+ iou_weight: 6\n+ nms:\n+ name: MultiClassNMS\n+ nms_top_k: 1000\n+ keep_top_k: 100\n+ score_threshold: 0.025\n+ nms_threshold: 0.6\n+\n+LearningRate:\n+ base_lr: 0.4\n+ schedulers:\n+ - !CosineDecay\n+ max_epochs: 300\n+ - !LinearWarmup\n+ start_factor: 0.1\n+ steps: 300\n+\n+OptimizerBuilder:\n+ optimizer:\n+ momentum: 0.9\n+ type: Momentum\n+ regularizer:\n+ factor: 0.00004\n+ type: L2\n+\n+TrainDataset:\n+ !COCODataSet\n+ image_dir: \"\"\n+ anno_path: aic_coco_train_cocoformat.json\n+ dataset_dir: dataset\n+ data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd']\n+\n+EvalDataset:\n+ !COCODataSet\n+ image_dir: val2017\n+ anno_path: annotations/instances_val2017.json\n+ dataset_dir: dataset/coco\n+\n+TestDataset:\n+ !ImageFolder\n+ anno_path: annotations/instances_val2017.json\n+\n+worker_num: 8\n+TrainReader:\n+ sample_transforms:\n+ - Decode: {}\n+ - RandomCrop: {}\n+ - RandomFlip: {prob: 0.5}\n+ - RandomDistort: {}\n+ batch_transforms:\n+ - BatchRandomResize: {target_size: [256, 288, 320, 352, 384], random_size: True, random_interp: True, keep_ratio: False}\n+ - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n+ - Permute: {}\n+ batch_size: 128\n+ shuffle: true\n+ drop_last: true\n+ collate_batch: false\n+\n+EvalReader:\n+ sample_transforms:\n+ - Decode: {}\n+ - Resize: {interp: 2, target_size: [320, 320], keep_ratio: False}\n+ - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n+ - Permute: {}\n+ batch_transforms:\n+ - PadBatch: {pad_to_stride: 32}\n+ batch_size: 8\n+ shuffle: false\n+\n+TestReader:\n+ inputs_def:\n+ image_shape: [1, 3, 320, 320]\n+ sample_transforms:\n+ - Decode: {}\n+ - Resize: {interp: 2, target_size: [320, 320], keep_ratio: False}\n+ - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n+ - Permute: {}\n+ batch_transforms:\n+ - PadBatch: {pad_to_stride: 32}\n+ batch_size: 1\n+ shuffle: false\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/python/keypoint_preprocess.py",
"new_path": "deploy/python/keypoint_preprocess.py",
"diff": "@@ -108,6 +108,37 @@ def get_affine_transform(center,\nreturn trans\n+def get_warp_matrix(theta, size_input, size_dst, size_target):\n+ \"\"\"Calculate the transformation matrix under the constraint of unbiased.\n+ Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased\n+ Data Processing for Human Pose Estimation (CVPR 2020).\n+\n+ Args:\n+ theta (float): Rotation angle in degrees.\n+ size_input (np.ndarray): Size of input image [w, h].\n+ size_dst (np.ndarray): Size of output image [w, h].\n+ size_target (np.ndarray): Size of ROI in input plane [w, h].\n+\n+ Returns:\n+ matrix (np.ndarray): A matrix for transformation.\n+ \"\"\"\n+ theta = np.deg2rad(theta)\n+ matrix = np.zeros((2, 3), dtype=np.float32)\n+ scale_x = size_dst[0] / size_target[0]\n+ scale_y = size_dst[1] / size_target[1]\n+ matrix[0, 0] = np.cos(theta) * scale_x\n+ matrix[0, 1] = -np.sin(theta) * scale_x\n+ matrix[0, 2] = scale_x * (\n+ -0.5 * size_input[0] * np.cos(theta) + 0.5 * size_input[1] *\n+ np.sin(theta) + 0.5 * size_target[0])\n+ matrix[1, 0] = np.sin(theta) * scale_y\n+ matrix[1, 1] = np.cos(theta) * scale_y\n+ matrix[1, 2] = scale_y * (\n+ -0.5 * size_input[0] * np.sin(theta) - 0.5 * size_input[1] *\n+ np.cos(theta) + 0.5 * size_target[1])\n+ return matrix\n+\n+\ndef rotate_point(pt, angle_rad):\n\"\"\"Rotate a point by an angle.\n@@ -154,6 +185,7 @@ class TopDownEvalAffine(object):\nArgs:\ntrainsize (list): [w, h], the standard size used to train\n+ use_udp (bool): whether to use Unbiased Data Processing.\nrecords(dict): the dict contained the image and coords\nReturns:\n@@ -161,14 +193,24 @@ class TopDownEvalAffine(object):\n\"\"\"\n- def __init__(self, trainsize):\n+ def __init__(self, trainsize, use_udp=False):\nself.trainsize = trainsize\n+ self.use_udp = use_udp\ndef __call__(self, image, im_info):\nrot = 0\nimshape = im_info['im_shape'][::-1]\ncenter = im_info['center'] if 'center' in im_info else imshape / 2.\nscale = im_info['scale'] if 'scale' in im_info else imshape\n+ if self.use_udp:\n+ trans = get_warp_matrix(\n+ rot, center * 2.0,\n+ [self.trainsize[0] - 1.0, self.trainsize[1] - 1.0], scale)\n+ image = cv2.warpAffine(\n+ image,\n+ trans, (int(self.trainsize[0]), int(self.trainsize[1])),\n+ flags=cv2.INTER_LINEAR)\n+ else:\ntrans = get_affine_transform(center, scale, rot, self.trainsize)\nimage = cv2.warpAffine(\nimage,\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/data/transform/keypoint_operators.py",
"new_path": "ppdet/data/transform/keypoint_operators.py",
"diff": "@@ -28,7 +28,7 @@ import numpy as np\nimport math\nimport copy\n-from ...modeling.keypoint_utils import get_affine_mat_kernel, warp_affine_joints, get_affine_transform, affine_transform\n+from ...modeling.keypoint_utils import get_affine_mat_kernel, warp_affine_joints, get_affine_transform, affine_transform, get_warp_matrix\nfrom ppdet.core.workspace import serializable\nfrom ppdet.utils.logger import setup_logger\nlogger = setup_logger(__name__)\n@@ -36,10 +36,19 @@ logger = setup_logger(__name__)\nregistered_ops = []\n__all__ = [\n- 'RandomAffine', 'KeyPointFlip', 'TagGenerate', 'ToHeatmaps',\n- 'NormalizePermute', 'EvalAffine', 'RandomFlipHalfBodyTransform',\n- 'TopDownAffine', 'ToHeatmapsTopDown', 'ToHeatmapsTopDown_DARK',\n- 'TopDownEvalAffine'\n+ 'RandomAffine',\n+ 'KeyPointFlip',\n+ 'TagGenerate',\n+ 'ToHeatmaps',\n+ 'NormalizePermute',\n+ 'EvalAffine',\n+ 'RandomFlipHalfBodyTransform',\n+ 'TopDownAffine',\n+ 'ToHeatmapsTopDown',\n+ 'ToHeatmapsTopDown_DARK',\n+ 'ToHeatmapsTopDown_UDP',\n+ 'TopDownEvalAffine',\n+ 'AugmentationbyInformantionDropping',\n]\n@@ -96,37 +105,6 @@ class KeyPointFlip(object):\nreturn records\n-def get_warp_matrix(theta, size_input, size_dst, size_target):\n- \"\"\"Calculate the transformation matrix under the constraint of unbiased.\n- Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased\n- Data Processing for Human Pose Estimation (CVPR 2020).\n-\n- Args:\n- theta (float): Rotation angle in degrees.\n- size_input (np.ndarray): Size of input image [w, h].\n- size_dst (np.ndarray): Size of output image [w, h].\n- size_target (np.ndarray): Size of ROI in input plane [w, h].\n-\n- Returns:\n- matrix (np.ndarray): A matrix for transformation.\n- \"\"\"\n- theta = np.deg2rad(theta)\n- matrix = np.zeros((2, 3), dtype=np.float32)\n- scale_x = size_dst[0] / size_target[0]\n- scale_y = size_dst[1] / size_target[1]\n- matrix[0, 0] = math.cos(theta) * scale_x\n- matrix[0, 1] = -math.sin(theta) * scale_x\n- matrix[0, 2] = scale_x * (\n- -0.5 * size_input[0] * math.cos(theta) + 0.5 * size_input[1] *\n- math.sin(theta) + 0.5 * size_target[0])\n- matrix[1, 0] = math.sin(theta) * scale_y\n- matrix[1, 1] = math.cos(theta) * scale_y\n- matrix[1, 2] = scale_y * (\n- -0.5 * size_input[0] * math.sin(theta) - 0.5 * size_input[1] *\n- math.cos(theta) + 0.5 * size_target[1])\n- return matrix\n-\n-\n@register_keypointop\nclass RandomAffine(object):\n\"\"\"apply affine transform to image, mask and coords\n@@ -531,12 +509,72 @@ class RandomFlipHalfBodyTransform(object):\nreturn records\n+@register_keypointop\n+class AugmentationbyInformantionDropping(object):\n+ \"\"\"AID: Augmentation by Informantion Dropping. Please refer\n+ to https://arxiv.org/abs/2008.07139\n+\n+ Args:\n+ prob_cutout (float): The probability of the Cutout augmentation.\n+ offset_factor (float): Offset factor of cutout center.\n+ num_patch (int): Number of patches to be cutout.\n+ records(dict): the dict contained the image and coords\n+\n+ Returns:\n+ records (dict): contain the image and coords after tranformed\n+\n+ \"\"\"\n+\n+ def __init__(self,\n+ trainsize,\n+ prob_cutout=0.0,\n+ offset_factor=0.2,\n+ num_patch=1):\n+ self.prob_cutout = prob_cutout\n+ self.offset_factor = offset_factor\n+ self.num_patch = num_patch\n+ self.trainsize = trainsize\n+\n+ def _cutout(self, img, joints, joints_vis):\n+ height, width, _ = img.shape\n+ img = img.reshape((height * width, -1))\n+ feat_x_int = np.arange(0, width)\n+ feat_y_int = np.arange(0, height)\n+ feat_x_int, feat_y_int = np.meshgrid(feat_x_int, feat_y_int)\n+ feat_x_int = feat_x_int.reshape((-1, ))\n+ feat_y_int = feat_y_int.reshape((-1, ))\n+ for _ in range(self.num_patch):\n+ vis_idx, _ = np.where(joints_vis > 0)\n+ occlusion_joint_id = np.random.choice(vis_idx)\n+ center = joints[occlusion_joint_id, 0:2]\n+ offset = np.random.randn(2) * self.trainsize[0] * self.offset_factor\n+ center = center + offset\n+ radius = np.random.uniform(0.1, 0.2) * self.trainsize[0]\n+ x_offset = (center[0] - feat_x_int) / radius\n+ y_offset = (center[1] - feat_y_int) / radius\n+ dis = x_offset**2 + y_offset**2\n+ keep_pos = np.where((dis <= 1) & (dis >= 0))[0]\n+ img[keep_pos, :] = 0\n+ img = img.reshape((height, width, -1))\n+ return img\n+\n+ def __call__(self, records):\n+ img = records['image']\n+ joints = records['joints']\n+ joints_vis = records['joints_vis']\n+ if np.random.rand() < self.prob_cutout:\n+ img = self._cutout(img, joints, joints_vis)\n+ records['image'] = img\n+ return records\n+\n+\n@register_keypointop\nclass TopDownAffine(object):\n\"\"\"apply affine transform to image and coords\nArgs:\ntrainsize (list): [w, h], the standard size used to train\n+ use_udp (bool): whether to use Unbiased Data Processing.\nrecords(dict): the dict contained the image and coords\nReturns:\n@@ -544,26 +582,36 @@ class TopDownAffine(object):\n\"\"\"\n- def __init__(self, trainsize):\n+ def __init__(self, trainsize, use_udp=False):\nself.trainsize = trainsize\n+ self.use_udp = use_udp\ndef __call__(self, records):\nimage = records['image']\njoints = records['joints']\njoints_vis = records['joints_vis']\nrot = records['rotate'] if \"rotate\" in records else 0\n- trans = get_affine_transform(records['center'], records['scale'] * 200,\n- rot, self.trainsize)\n- trans_joint = get_affine_transform(\n- records['center'], records['scale'] * 200, rot,\n- [self.trainsize[0] / 4, self.trainsize[1] / 4])\n+ if self.use_udp:\n+ trans = get_warp_matrix(\n+ rot, records['center'] * 2.0,\n+ [self.trainsize[0] - 1.0, self.trainsize[1] - 1.0],\n+ records['scale'] * 200.0)\n+ image = cv2.warpAffine(\n+ image,\n+ trans, (int(self.trainsize[0]), int(self.trainsize[1])),\n+ flags=cv2.INTER_LINEAR)\n+ joints[:, 0:2] = warp_affine_joints(joints[:, 0:2].copy(), trans)\n+ else:\n+ trans = get_affine_transform(records['center'], records['scale'] *\n+ 200, rot, self.trainsize)\nimage = cv2.warpAffine(\nimage,\ntrans, (int(self.trainsize[0]), int(self.trainsize[1])),\nflags=cv2.INTER_LINEAR)\nfor i in range(joints.shape[0]):\nif joints_vis[i, 0] > 0.0:\n- joints[i, 0:2] = affine_transform(joints[i, 0:2], trans_joint)\n+ joints[i, 0:2] = affine_transform(joints[i, 0:2], trans)\n+\nrecords['image'] = image\nrecords['joints'] = joints\n@@ -576,6 +624,7 @@ class TopDownEvalAffine(object):\nArgs:\ntrainsize (list): [w, h], the standard size used to train\n+ use_udp (bool): whether to use Unbiased Data Processing.\nrecords(dict): the dict contained the image and coords\nReturns:\n@@ -583,8 +632,9 @@ class TopDownEvalAffine(object):\n\"\"\"\n- def __init__(self, trainsize):\n+ def __init__(self, trainsize, use_udp=False):\nself.trainsize = trainsize\n+ self.use_udp = use_udp\ndef __call__(self, records):\nimage = records['image']\n@@ -592,6 +642,16 @@ class TopDownEvalAffine(object):\nimshape = records['im_shape'][::-1]\ncenter = imshape / 2.\nscale = imshape\n+\n+ if self.use_udp:\n+ trans = get_warp_matrix(\n+ rot, center * 2.0,\n+ [self.trainsize[0] - 1.0, self.trainsize[1] - 1.0], scale)\n+ image = cv2.warpAffine(\n+ image,\n+ trans, (int(self.trainsize[0]), int(self.trainsize[1])),\n+ flags=cv2.INTER_LINEAR)\n+ else:\ntrans = get_affine_transform(center, scale, rot, self.trainsize)\nimage = cv2.warpAffine(\nimage,\n@@ -632,10 +692,10 @@ class ToHeatmapsTopDown(object):\ntarget = np.zeros(\n(num_joints, self.hmsize[1], self.hmsize[0]), dtype=np.float32)\ntmp_size = self.sigma * 3\n- for joint_id in range(num_joints):\nfeat_stride = image_size / self.hmsize\n- mu_x = int(joints[joint_id][0] + 0.5)\n- mu_y = int(joints[joint_id][1] + 0.5)\n+ for joint_id in range(num_joints):\n+ mu_x = int(joints[joint_id][0] + 0.5) / feat_stride[0]\n+ mu_y = int(joints[joint_id][1] + 0.5) / feat_stride[1]\n# Check that any part of the gaussian is in-bounds\nul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]\nbr = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]\n@@ -693,14 +753,17 @@ class ToHeatmapsTopDown_DARK(object):\njoints = records['joints']\njoints_vis = records['joints_vis']\nnum_joints = joints.shape[0]\n+ image_size = np.array(\n+ [records['image'].shape[1], records['image'].shape[0]])\ntarget_weight = np.ones((num_joints, 1), dtype=np.float32)\ntarget_weight[:, 0] = joints_vis[:, 0]\ntarget = np.zeros(\n(num_joints, self.hmsize[1], self.hmsize[0]), dtype=np.float32)\ntmp_size = self.sigma * 3\n+ feat_stride = image_size / self.hmsize\nfor joint_id in range(num_joints):\n- mu_x = joints[joint_id][0]\n- mu_y = joints[joint_id][1]\n+ mu_x = joints[joint_id][0] / feat_stride[0]\n+ mu_y = joints[joint_id][1] / feat_stride[1]\n# Check that any part of the gaussian is in-bounds\nul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]\nbr = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]\n@@ -723,3 +786,74 @@ class ToHeatmapsTopDown_DARK(object):\ndel records['joints'], records['joints_vis']\nreturn records\n+\n+\n+@register_keypointop\n+class ToHeatmapsTopDown_UDP(object):\n+ \"\"\"to generate the gaussian heatmaps of keypoint for heatmap loss.\n+ ref: Huang et al. The Devil is in the Details: Delving into Unbiased Data Processing\n+ for Human Pose Estimation (CVPR 2020).\n+\n+ Args:\n+ hmsize (list): [w, h] output heatmap's size\n+ sigma (float): the std of gaussin kernel genereted\n+ records(dict): the dict contained the image and coords\n+\n+ Returns:\n+ records (dict): contain the heatmaps used to heatmaploss\n+ \"\"\"\n+\n+ def __init__(self, hmsize, sigma):\n+ super(ToHeatmapsTopDown_UDP, self).__init__()\n+ self.hmsize = np.array(hmsize)\n+ self.sigma = sigma\n+\n+ def __call__(self, records):\n+ joints = records['joints']\n+ joints_vis = records['joints_vis']\n+ num_joints = joints.shape[0]\n+ image_size = np.array(\n+ [records['image'].shape[1], records['image'].shape[0]])\n+ target_weight = np.ones((num_joints, 1), dtype=np.float32)\n+ target_weight[:, 0] = joints_vis[:, 0]\n+ target = np.zeros(\n+ (num_joints, self.hmsize[1], self.hmsize[0]), dtype=np.float32)\n+ tmp_size = self.sigma * 3\n+ size = 2 * tmp_size + 1\n+ x = np.arange(0, size, 1, np.float32)\n+ y = x[:, None]\n+ feat_stride = (image_size - 1.0) / (self.hmsize - 1.0)\n+ for joint_id in range(num_joints):\n+ mu_x = int(joints[joint_id][0] / feat_stride[0] + 0.5)\n+ mu_y = int(joints[joint_id][1] / feat_stride[1] + 0.5)\n+ # Check that any part of the gaussian is in-bounds\n+ ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]\n+ br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]\n+ if ul[0] >= self.hmsize[0] or ul[1] >= self.hmsize[1] or br[\n+ 0] < 0 or br[1] < 0:\n+ # If not, just return the image as is\n+ target_weight[joint_id] = 0\n+ continue\n+\n+ mu_x_ac = joints[joint_id][0] / feat_stride[0]\n+ mu_y_ac = joints[joint_id][1] / feat_stride[1]\n+ x0 = y0 = size // 2\n+ x0 += mu_x_ac - mu_x\n+ y0 += mu_y_ac - mu_y\n+ g = np.exp(-((x - x0)**2 + (y - y0)**2) / (2 * self.sigma**2))\n+ # Usable gaussian range\n+ g_x = max(0, -ul[0]), min(br[0], self.hmsize[0]) - ul[0]\n+ g_y = max(0, -ul[1]), min(br[1], self.hmsize[1]) - ul[1]\n+ # Image range\n+ img_x = max(0, ul[0]), min(br[0], self.hmsize[0])\n+ img_y = max(0, ul[1]), min(br[1], self.hmsize[1])\n+\n+ v = target_weight[joint_id]\n+ if v > 0.5:\n+ target[joint_id][img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[\n+ 0]:g_y[1], g_x[0]:g_x[1]]\n+ records['target'] = target\n+ records['target_weight'] = target_weight\n+ del records['joints'], records['joints_vis']\n+\n+ return records\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/keypoint_utils.py",
"new_path": "ppdet/modeling/keypoint_utils.py",
"diff": "@@ -95,6 +95,37 @@ def get_affine_transform(center,\nreturn trans\n+def get_warp_matrix(theta, size_input, size_dst, size_target):\n+ \"\"\"Calculate the transformation matrix under the constraint of unbiased.\n+ Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased\n+ Data Processing for Human Pose Estimation (CVPR 2020).\n+\n+ Args:\n+ theta (float): Rotation angle in degrees.\n+ size_input (np.ndarray): Size of input image [w, h].\n+ size_dst (np.ndarray): Size of output image [w, h].\n+ size_target (np.ndarray): Size of ROI in input plane [w, h].\n+\n+ Returns:\n+ matrix (np.ndarray): A matrix for transformation.\n+ \"\"\"\n+ theta = np.deg2rad(theta)\n+ matrix = np.zeros((2, 3), dtype=np.float32)\n+ scale_x = size_dst[0] / size_target[0]\n+ scale_y = size_dst[1] / size_target[1]\n+ matrix[0, 0] = np.cos(theta) * scale_x\n+ matrix[0, 1] = -np.sin(theta) * scale_x\n+ matrix[0, 2] = scale_x * (\n+ -0.5 * size_input[0] * np.cos(theta) + 0.5 * size_input[1] *\n+ np.sin(theta) + 0.5 * size_target[0])\n+ matrix[1, 0] = np.sin(theta) * scale_y\n+ matrix[1, 1] = np.cos(theta) * scale_y\n+ matrix[1, 2] = scale_y * (\n+ -0.5 * size_input[0] * np.sin(theta) - 0.5 * size_input[1] *\n+ np.cos(theta) + 0.5 * size_target[1])\n+ return matrix\n+\n+\ndef _get_3rd_point(a, b):\n\"\"\"To calculate the affine matrix, three pairs of points are required. This\nfunction is used to get the 3rd point, given 2D points a & b.\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add tinypose models (#4388)
|
499,301 |
02.11.2021 00:51:16
| -28,800 |
2348dcded813412879dcfedac202d2b1210840a8
|
update sniper docs
|
[
{
"change_type": "MODIFY",
"old_path": "configs/sniper/faster_rcnn_r50_fpn_1x_sniper_visdrone.yml",
"new_path": "configs/sniper/faster_rcnn_r50_fpn_1x_sniper_visdrone.yml",
"diff": "@@ -5,7 +5,27 @@ _BASE_: [\n'../faster_rcnn/_base_/optimizer_1x.yml',\n'_base_/faster_fpn_reader.yml',\n]\n-weights: output/faster_rcnn_r50_1x_visdrone_coco/model_final\n+weights: output/faster_rcnn_r50_fpn_1x_sniper_visdrone/model_final\nfind_unused_parameters: true\n+\n+metric: COCO\n+num_classes: 9\n+\n+TrainDataset:\n+ !COCODataSet\n+ image_dir: train\n+ anno_path: annotations/train.json\n+ dataset_dir: dataset/VisDrone2019_coco\n+ data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd']\n+\n+EvalDataset:\n+ !COCODataSet\n+ image_dir: val\n+ anno_path: annotations/val.json\n+ dataset_dir: dataset/VisDrone2019_coco\n+\n+TestDataset:\n+ !ImageFolder\n+ anno_path: annotations/val.json\n"
},
{
"change_type": "RENAME",
"old_path": "configs/faster_rcnn/faster_rcnn_r50_fpn_1x_visdrone.yml",
"new_path": "configs/sniper/faster_rcnn_r50_fpn_1x_visdrone.yml",
"diff": "@@ -5,7 +5,7 @@ _BASE_: [\n'_base_/faster_rcnn_r50_fpn.yml',\n'_base_/faster_fpn_reader.yml',\n]\n-weights: output/faster_rcnn_r50_fpn_1x_coco_visdrone/model_final\n+weights: output/faster_rcnn_r50_fpn_1x_visdrone/model_final\nmetric: COCO\n"
},
{
"change_type": "DELETE",
"old_path": "configs/sniper/faster_rcnn_r50_fpn_2x_sniper_coco.yml",
"new_path": null,
"diff": "-_BASE_: [\n- 'faster_rcnn_r50_fpn_1x_sniper_coco.yml',\n-]\n-weights: output/faster_rcnn_r50_fpn_2x_sniper_coco/model_final\n-\n-epoch: 24\n-LearningRate:\n- base_lr: 0.01\n- schedulers:\n- - !PiecewiseDecay\n- gamma: 0.1\n- milestones: [16, 22]\n- - !LinearWarmup\n- start_factor: 0.1\n- steps: 1000\n"
},
{
"change_type": "DELETE",
"old_path": "configs/sniper/faster_rcnn_r50_vd_1x_coco_sniper.yml",
"new_path": null,
"diff": "-_BASE_: [\n- '../datasets/sniper_coco_detection.yml',\n- '../runtime.yml',\n- '../faster_rcnn/_base_/optimizer_1x.yml',\n- '../faster_rcnn/_base_/faster_rcnn_r50.yml',\n- '_base_/faster_reader.yml',\n-]\n-\n-pretrain_weights: https://paddledet.bj.bcebos.com/models/faster_rcnn_r50_vd_1x_coco.pdparams\n-weights: output/faster_rcnn_r50_vd_1x_coco/model_final\n-\n-ResNet:\n- # index 0 stands for res2\n- depth: 50\n- variant: d\n- norm_type: bn\n- freeze_at: 0\n- return_idx: [2]\n- num_stages: 3\n"
},
{
"change_type": "DELETE",
"old_path": "configs/sniper/faster_rcnn_r50_vd_fpn_2x_sniper_coco.yml",
"new_path": null,
"diff": "-_BASE_: [\n- 'faster_rcnn_r50_fpn_1x_sniper_coco.yml',\n-]\n-pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_vd_pretrained.pdparams\n-weights: output/faster_rcnn_r50_vd_fpn_2x_sniper_coco/model_final\n-\n-ResNet:\n- # index 0 stands for res2\n- depth: 50\n- variant: d\n- norm_type: bn\n- freeze_at: 0\n- return_idx: [0,1,2,3]\n- num_stages: 4\n-\n-epoch: 24\n-LearningRate:\n- base_lr: 0.01\n- schedulers:\n- - !PiecewiseDecay\n- gamma: 0.1\n- milestones: [16, 22]\n- - !LinearWarmup\n- start_factor: 0.1\n- steps: 1000\n"
},
{
"change_type": "DELETE",
"old_path": "configs/sniper/ppyolo_r50vd_dcn_1x_sniper.yml",
"new_path": null,
"diff": "-_BASE_: [\n- '../datasets/sniper_coco_detection.yml',\n- '../runtime.yml',\n- '../ppyolo/_base_/ppyolo_r50vd_dcn.yml',\n- '../ppyolo/_base_/optimizer_1x.yml',\n- './_base_/ppyolo_reader.yml',\n-]\n-\n-snapshot_epoch: 8\n-use_ema: true\n-weights: output/ppyolo_r50vd_dcn_1x_minicoco/model_final\n-\n-\n-LearningRate:\n- base_lr: 0.005\n- schedulers:\n- - !PiecewiseDecay\n- gamma: 0.1\n- milestones:\n- - 153\n- - 173\n- - !LinearWarmup\n- start_factor: 0.\n- steps: 4000\n-\n-OptimizerBuilder:\n- optimizer:\n- momentum: 0.9\n- type: Momentum\n- regularizer:\n- factor: 0.0005\n- type: L2\n"
},
{
"change_type": "MODIFY",
"old_path": "configs/sniper/ppyolo_r50vd_dcn_1x_sniper_visdrone.yml",
"new_path": "configs/sniper/ppyolo_r50vd_dcn_1x_sniper_visdrone.yml",
"diff": "@@ -8,7 +8,7 @@ _BASE_: [\nsnapshot_epoch: 8\nuse_ema: true\n-weights: output/ppyolo_r50vd_dcn_1x_visdrone/model_final\n+weights: output/ppyolo_r50vd_dcn_1x_sniper_visdrone/model_final\n@@ -31,3 +31,26 @@ OptimizerBuilder:\nregularizer:\nfactor: 0.0005\ntype: L2\n+\n+\n+\n+\n+metric: COCO\n+num_classes: 9\n+\n+TrainDataset:\n+ !COCODataSet\n+ image_dir: train\n+ anno_path: annotations/train.json\n+ dataset_dir: dataset/VisDrone2019_coco\n+ data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd']\n+\n+EvalDataset:\n+ !COCODataSet\n+ image_dir: val\n+ anno_path: annotations/val.json\n+ dataset_dir: dataset/VisDrone2019_coco\n+\n+TestDataset:\n+ !ImageFolder\n+ anno_path: annotations/val.json\n"
},
{
"change_type": "RENAME",
"old_path": "configs/ppyolo/ppyolo_r50vd_dcn_1x_visdrone.yml",
"new_path": "configs/sniper/ppyolo_r50vd_dcn_1x_visdrone.yml",
"diff": "@@ -8,7 +8,7 @@ _BASE_: [\nsnapshot_epoch: 8\nuse_ema: true\n-weights: output/ppyolo_r50vd_dcn_1x_visdrone_coco/model_final\n+weights: output/ppyolo_r50vd_dcn_1x_visdrone/model_final\nepoch: 192\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
update sniper docs (#4411)
|
499,304 |
02.11.2021 11:46:29
| -28,800 |
a2f165adfcb35732b5f968a54e855f35711222df
|
update PicoDet cite
|
[
{
"change_type": "ADD",
"old_path": "docs/images/picodet_android_demo1.jpg",
"new_path": "docs/images/picodet_android_demo1.jpg",
"diff": "Binary files /dev/null and b/docs/images/picodet_android_demo1.jpg differ\n"
},
{
"change_type": "ADD",
"old_path": "docs/images/picodet_android_demo2.jpg",
"new_path": "docs/images/picodet_android_demo2.jpg",
"diff": "Binary files /dev/null and b/docs/images/picodet_android_demo2.jpg differ\n"
},
{
"change_type": "ADD",
"old_path": "docs/images/picodet_android_demo3.jpg",
"new_path": "docs/images/picodet_android_demo3.jpg",
"diff": "Binary files /dev/null and b/docs/images/picodet_android_demo3.jpg differ\n"
},
{
"change_type": "ADD",
"old_path": "docs/images/picodet_android_demo4.jpg",
"new_path": "docs/images/picodet_android_demo4.jpg",
"diff": "Binary files /dev/null and b/docs/images/picodet_android_demo4.jpg differ\n"
},
{
"change_type": "ADD",
"old_path": "docs/images/picodet_map.png",
"new_path": "docs/images/picodet_map.png",
"diff": "Binary files /dev/null and b/docs/images/picodet_map.png differ\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/tests/test_architectures.py",
"new_path": "ppdet/modeling/tests/test_architectures.py",
"diff": "@@ -62,7 +62,7 @@ class TestGFL(TestFasterRCNN):\nclass TestPicoDet(TestFasterRCNN):\ndef set_config(self):\n- self.cfg_file = 'configs/picodet/picodet_s_shufflenetv2_320_coco.yml'\n+ self.cfg_file = 'configs/picodet/picodet_s_320_coco.yml'\nif __name__ == '__main__':\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
update PicoDet cite (#4415)
|
499,301 |
02.11.2021 13:23:42
| -28,800 |
54631803755e7b61bc329e910d9a63ae45d1cb72
|
update swin export config
|
[
{
"change_type": "MODIFY",
"old_path": "configs/faster_rcnn/_base_/faster_rcnn_swin_reader.yml",
"new_path": "configs/faster_rcnn/_base_/faster_rcnn_swin_reader.yml",
"diff": "@@ -30,10 +30,10 @@ EvalReader:\nTestReader:\ninputs_def:\n- image_shape: [1, 3, 800, 1344]\n+ image_shape: [1, 3, 640, 640]\nsample_transforms:\n- Decode: {}\n- - Resize: {interp: 2, target_size: [800, 1344], keep_ratio: True}\n+ - Resize: {interp: 2, target_size: [640, 640], keep_ratio: False}\n- NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n- Permute: {}\nbatch_transforms:\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
update swin export config (#4418)
|
499,348 |
02.11.2021 13:26:21
| -28,800 |
e56212b546e997da7aea8f35883c773e23c5b910
|
fix qps nan int python/utils.pyL192
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/python/utils.py",
"new_path": "deploy/python/utils.py",
"diff": "@@ -190,8 +190,11 @@ class Timer(Times):\n4) if average else self.inference_time_s.value()\naverage_latency = total_time / max(1, self.img_num)\n+ qps = 0\n+ if total_time > 0:\n+ qps = 1 / average_latency\nprint(\"average latency time(ms): {:.2f}, QPS: {:2f}\".format(\n- average_latency * 1000, 1 / average_latency))\n+ average_latency * 1000, qps))\nprint(\n\"preprocess_time(ms): {:.2f}, inference_time(ms): {:.2f}, postprocess_time(ms): {:.2f}\".\nformat(preprocess_time * 1000, inference_time * 1000,\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix qps nan int python/utils.pyL192 (#4414)
|
499,304 |
02.11.2021 22:23:52
| -28,800 |
60296806335b538743119d73a9faad3cfe6e1640
|
fix PicoDet export and simOTA
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/architectures/picodet.py",
"new_path": "ppdet/modeling/architectures/picodet.py",
"diff": "@@ -64,7 +64,7 @@ class PicoDet(BaseArch):\nfpn_feats = self.neck(body_feats)\nhead_outs = self.head(fpn_feats, self.deploy)\nif self.training or self.deploy:\n- return head_outs\n+ return head_outs, None\nelse:\nim_shape = self.inputs['im_shape']\nscale_factor = self.inputs['scale_factor']\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/assigners/simota_assigner.py",
"new_path": "ppdet/modeling/assigners/simota_assigner.py",
"diff": "@@ -172,16 +172,11 @@ class SimOTAAssigner(object):\n(num_bboxes, ), 0, dtype=paddle.int64).numpy()\nif num_gt == 0 or num_bboxes == 0:\n# No ground truth or boxes, return empty assignment\n- max_overlaps = decoded_bboxes.new_zeros((num_bboxes, ))\n- if num_gt == 0:\n- # No truth, assign everything to background\n- assigned_gt_inds[:] = 0\n- if gt_labels is None:\n- assigned_labels = None\n- else:\n- assigned_labels = paddle.full(\n- (num_bboxes, ), -1, dtype=paddle.int64)\n- return\n+ priors = priors.numpy()\n+ labels = np.ones([num_bboxes], dtype=np.int64) * self.num_classes\n+ label_weights = np.ones([num_bboxes], dtype=np.float32)\n+ bbox_targets = np.zeros_like(priors)\n+ return priors, labels, label_weights, bbox_targets, 0\nvalid_mask, is_in_boxes_and_center = self.get_in_gt_and_in_center_info(\npriors, gt_bboxes)\n@@ -238,8 +233,6 @@ class SimOTAAssigner(object):\ngt_bboxes = gt_bboxes.numpy()\nassigned_gt_inds[valid_mask] = matched_gt_inds + 1\n- assigned_labels = np.full((num_bboxes, ), self.num_classes)\n- assigned_labels[valid_mask] = gt_labels.squeeze(-1)[matched_gt_inds]\npos_inds, neg_inds, pos_gt_bboxes, pos_assigned_gt_inds \\\n= self.get_sample(assigned_gt_inds, gt_bboxes)\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/heads/simota_head.py",
"new_path": "ppdet/modeling/heads/simota_head.py",
"diff": "@@ -127,11 +127,6 @@ class OTAHead(GFLHead):\ngt_labels):\n\"\"\"Compute targets for priors in a single image.\n\"\"\"\n- num_gts = gt_labels.shape[0]\n- # No target\n- if num_gts == 0:\n- pass\n-\ncentors, labels, label_weights, bbox_targets, pos_num = self.assigner(\nF.sigmoid(cls_preds), centors, decoded_bboxes, gt_bboxes, gt_labels)\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix PicoDet export and simOTA (#4431)
|
499,301 |
02.11.2021 23:40:29
| -28,800 |
73a94c1ea979d3bfb301a77bd81cb24ee4239fc1
|
update sniper config
|
[
{
"change_type": "MODIFY",
"old_path": "configs/sniper/faster_rcnn_r50_fpn_1x_visdrone.yml",
"new_path": "configs/sniper/faster_rcnn_r50_fpn_1x_visdrone.yml",
"diff": "_BASE_: [\n'../datasets/coco_detection.yml',\n'../runtime.yml',\n- '_base_/optimizer_1x.yml',\n- '_base_/faster_rcnn_r50_fpn.yml',\n- '_base_/faster_fpn_reader.yml',\n+ '../faster_rcnn/_base_/optimizer_1x.yml',\n+ '../faster_rcnn/_base_/faster_rcnn_r50_fpn.yml',\n+ '../faster_rcnn/_base_/faster_fpn_reader.yml',\n]\nweights: output/faster_rcnn_r50_fpn_1x_visdrone/model_final\n"
},
{
"change_type": "MODIFY",
"old_path": "configs/sniper/ppyolo_r50vd_dcn_1x_visdrone.yml",
"new_path": "configs/sniper/ppyolo_r50vd_dcn_1x_visdrone.yml",
"diff": "_BASE_: [\n'../datasets/coco_detection.yml',\n'../runtime.yml',\n- './_base_/ppyolo_r50vd_dcn.yml',\n- './_base_/optimizer_1x.yml',\n- './_base_/ppyolo_reader.yml',\n+ '../ppyolo/_base_/ppyolo_r50vd_dcn.yml',\n+ '../ppyolo/_base_/optimizer_1x.yml',\n+ '../ppyolo/_base_/ppyolo_reader.yml',\n]\nsnapshot_epoch: 8\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
update sniper config (#4429)
|
499,301 |
03.11.2021 10:52:24
| -28,800 |
69c12a2a317934fa4f7b94e85f277b71c545ef19
|
fix sniper db
|
[
{
"change_type": "MODIFY",
"old_path": "configs/sniper/faster_rcnn_r50_fpn_1x_sniper_visdrone.yml",
"new_path": "configs/sniper/faster_rcnn_r50_fpn_1x_sniper_visdrone.yml",
"diff": "@@ -7,25 +7,3 @@ _BASE_: [\n]\nweights: output/faster_rcnn_r50_fpn_1x_sniper_visdrone/model_final\nfind_unused_parameters: true\n-\n-\n-\n-metric: COCO\n-num_classes: 9\n-\n-TrainDataset:\n- !COCODataSet\n- image_dir: train\n- anno_path: annotations/train.json\n- dataset_dir: dataset/VisDrone2019_coco\n- data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd']\n-\n-EvalDataset:\n- !COCODataSet\n- image_dir: val\n- anno_path: annotations/val.json\n- dataset_dir: dataset/VisDrone2019_coco\n-\n-TestDataset:\n- !ImageFolder\n- anno_path: annotations/val.json\n"
},
{
"change_type": "MODIFY",
"old_path": "configs/sniper/ppyolo_r50vd_dcn_1x_sniper_visdrone.yml",
"new_path": "configs/sniper/ppyolo_r50vd_dcn_1x_sniper_visdrone.yml",
"diff": "@@ -31,26 +31,3 @@ OptimizerBuilder:\nregularizer:\nfactor: 0.0005\ntype: L2\n-\n-\n-\n-\n-metric: COCO\n-num_classes: 9\n-\n-TrainDataset:\n- !COCODataSet\n- image_dir: train\n- anno_path: annotations/train.json\n- dataset_dir: dataset/VisDrone2019_coco\n- data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd']\n-\n-EvalDataset:\n- !COCODataSet\n- image_dir: val\n- anno_path: annotations/val.json\n- dataset_dir: dataset/VisDrone2019_coco\n-\n-TestDataset:\n- !ImageFolder\n- anno_path: annotations/val.json\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix sniper db (#4433)
|
499,304 |
03.11.2021 14:20:55
| -28,800 |
2513f35ac77dae81a5e008897050b94a442ee593
|
fix PicoDet train bug
|
[
{
"change_type": "MODIFY",
"old_path": "configs/picodet/README.md",
"new_path": "configs/picodet/README.md",
"diff": "@@ -195,6 +195,15 @@ python tools/post_quant.py -c configs/picodet/picodet_s_320_coco.yml \\\n</details>\n+## FAQ\n+\n+<details>\n+<summary>Out of memory error.</summary>\n+\n+Please reduce the `batch_size` of `TrainReader` in config.\n+\n+</details>\n+\n## Cite PP-PiocDet\nIf you use PiocDet in your research, please cite our work by using the following BibTeX entry:\n```\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/architectures/picodet.py",
"new_path": "ppdet/modeling/architectures/picodet.py",
"diff": "@@ -75,7 +75,7 @@ class PicoDet(BaseArch):\ndef get_loss(self, ):\nloss = {}\n- head_outs = self._forward()\n+ head_outs, _ = self._forward()\nloss_gfl = self.head.get_loss(head_outs, self.inputs)\nloss.update(loss_gfl)\ntotal_loss = paddle.add_n(list(loss.values()))\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix PicoDet train bug (#4438)
|
499,298 |
03.11.2021 20:42:34
| -28,800 |
7a7a414f2da0419f554ff5c0618940215e91c582
|
update visdrone cite
|
[
{
"change_type": "MODIFY",
"old_path": "configs/mot/pedestrian/README_cn.md",
"new_path": "configs/mot/pedestrian/README_cn.md",
"diff": "@@ -118,16 +118,14 @@ doi={10.1109/ICCV.2017.40},\nISSN={2380-7504},\nmonth={Oct},}\n-@article{zhu2018vision,\n- title={Vision meets drones: A challenge},\n- author={Zhu, Pengfei and Wen, Longyin and Bian, Xiao and Ling, Haibin and Hu, Qinghua},\n- journal={arXiv preprint arXiv:1804.07437},\n- year={2018}\n-}\n-@article{zhu2020vision,\n- title={Vision Meets Drones: Past, Present and Future},\n- author={Zhu, Pengfei and Wen, Longyin and Du, Dawei and Bian, Xiao and Hu, Qinghua and Ling, Haibin},\n- journal={arXiv preprint arXiv:2001.06303},\n- year={2020}\n+@ARTICLE{9573394,\n+ author={Zhu, Pengfei and Wen, Longyin and Du, Dawei and Bian, Xiao and Fan, Heng and Hu, Qinghua and Ling, Haibin},\n+ journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},\n+ title={Detection and Tracking Meet Drones Challenge},\n+ year={2021},\n+ volume={},\n+ number={},\n+ pages={1-1},\n+ doi={10.1109/TPAMI.2021.3119563}\n}\n```\n"
},
{
"change_type": "MODIFY",
"old_path": "configs/mot/vehicle/README_cn.md",
"new_path": "configs/mot/vehicle/README_cn.md",
"diff": "@@ -153,16 +153,14 @@ python deploy/python/mot_jde_infer.py --model_dir=output_inference/fairmot_dla34\nyear = {2012}\n}\n-@article{zhu2018vision,\n- title={Vision meets drones: A challenge},\n- author={Zhu, Pengfei and Wen, Longyin and Bian, Xiao and Ling, Haibin and Hu, Qinghua},\n- journal={arXiv preprint arXiv:1804.07437},\n- year={2018}\n-}\n-@article{zhu2020vision,\n- title={Vision Meets Drones: Past, Present and Future},\n- author={Zhu, Pengfei and Wen, Longyin and Du, Dawei and Bian, Xiao and Hu, Qinghua and Ling, Haibin},\n- journal={arXiv preprint arXiv:2001.06303},\n- year={2020}\n+@ARTICLE{9573394,\n+ author={Zhu, Pengfei and Wen, Longyin and Du, Dawei and Bian, Xiao and Fan, Heng and Hu, Qinghua and Ling, Haibin},\n+ journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},\n+ title={Detection and Tracking Meet Drones Challenge},\n+ year={2021},\n+ volume={},\n+ number={},\n+ pages={1-1},\n+ doi={10.1109/TPAMI.2021.3119563}\n}\n```\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
update visdrone cite (#4448)
|
499,304 |
03.11.2021 21:18:49
| -28,800 |
4e7ad69adf7c0d090549cb9999cbcabb7b6d7eed
|
add PicoDet voc config
|
[
{
"change_type": "MODIFY",
"old_path": "configs/picodet/README.md",
"new_path": "configs/picodet/README.md",
"diff": "@@ -20,9 +20,6 @@ We developed a series of lightweight models, named `PP-PicoDet`. Because of the\n- [ ] Pretrained models for more scenarios.\n- [ ] More features in need.\n-## Requirements\n-- PaddlePaddle >= 2.1.2\n-\n## Benchmark\n| Model | Input size | mAP<sup>val<br>0.5:0.95 | mAP<sup>val<br>0.5 | Params<br><sup>(M) | FLOPS<br><sup>(G) | Latency<sup><small>[NCNN](#latency)</small><sup><br><sup>(ms) | Latency<sup><small>[Lite](#latency)</small><sup><br><sup>(ms) | download | config |\n@@ -69,6 +66,62 @@ We developed a series of lightweight models, named `PP-PicoDet`. Because of the\n| YOLOv5s | 640*640 | 37.2 | 56.0 | 7.2 | 16.5 | 78.05 |\n+## Quick Start\n+\n+<details open>\n+<summary>Requirements:</summary>\n+\n+- PaddlePaddle >= 2.1.2\n+\n+</details>\n+\n+<details>\n+<summary>Installation</summary>\n+\n+- [Installation guide](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/docs/tutorials/INSTALL.md)\n+- [Prepare dataset](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/docs/tutorials/PrepareDataSet_en.md)\n+\n+</details>\n+\n+<details>\n+<summary>Training and Evaluation</summary>\n+\n+- Training model on single-GPU:\n+\n+```shell\n+# training on single-GPU\n+export CUDA_VISIBLE_DEVICES=0\n+python tools/train.py -c configs/picodet/picodet_s_320_coco.yml --eval\n+```\n+\n+- Training model on multi-GPU:\n+\n+\n+```shell\n+# training on single-GPU\n+export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7\n+python -m paddle.distributed.launch --gpus 0,1,2,3,4,5,6,7 tools/train.py -c configs/picodet/picodet_s_320_coco.yml --eval\n+```\n+\n+- Evaluation:\n+\n+```shell\n+python tools/eval.py -c configs/picodet/picodet_s_320_coco.yml \\\n+ -o weights=https://paddledet.bj.bcebos.com/models/picodet_s_320_coco.pdparams\n+```\n+\n+- Infer:\n+\n+```shell\n+python tools/infer.py -c configs/picodet/picodet_s_320_coco.yml \\\n+ -o weights=https://paddledet.bj.bcebos.com/models/picodet_s_320_coco.pdparams\n+```\n+\n+Detail also can refer to [Quick start guide](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/docs/tutorials/GETTING_STARTED.md).\n+\n+</details>\n+\n+\n## Deployment\n### Export and Convert Model\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/picodet/picodet_s_320_voc.yml",
"diff": "+_BASE_: [\n+ '../datasets/voc.yml',\n+ '../runtime.yml',\n+ '_base_/picodet_esnet.yml',\n+ '_base_/optimizer_300e.yml',\n+ '_base_/picodet_320_reader.yml',\n+]\n+\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ESNet_x0_75_pretrained.pdparams\n+weights: output/picodet_s_320_coco/model_final\n+find_unused_parameters: True\n+use_ema: true\n+cycle_epoch: 40\n+snapshot_epoch: 10\n+\n+ESNet:\n+ scale: 0.75\n+ feature_maps: [4, 11, 14]\n+ act: hard_swish\n+ channel_ratio: [0.875, 0.5, 0.5, 0.5, 0.625, 0.5, 0.625, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]\n+\n+CSPPAN:\n+ out_channels: 96\n+\n+PicoHead:\n+ conv_feat:\n+ name: PicoFeat\n+ feat_in: 96\n+ feat_out: 96\n+ num_convs: 2\n+ num_fpn_stride: 4\n+ norm_type: bn\n+ share_cls_reg: True\n+ feat_in_chan: 96\n+\n+EvalReader:\n+ collate_batch: false\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add PicoDet voc config (#4442)
|
499,313 |
03.11.2021 23:03:31
| -28,800 |
25f36578fc9271f41c790ad6111520589fa2213f
|
remove Mosaic & RandomPerspective
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/data/transform/operators.py",
"new_path": "ppdet/data/transform/operators.py",
"diff": "@@ -253,8 +253,8 @@ class SniperDecodeCrop(BaseOperator):\nchip = sample['chip']\nx1, y1, x2, y2 = [int(xi) for xi in chip]\n- im = im[max(y1, 0):min(y2, im.shape[0]),\n- max(x1, 0):min(x2, im.shape[1]), :]\n+ im = im[max(y1, 0):min(y2, im.shape[0]), max(x1, 0):min(x2, im.shape[\n+ 1]), :]\nsample['image'] = im\nh = im.shape[0]\n@@ -2471,184 +2471,6 @@ class RandomResizeCrop(BaseOperator):\nreturn sample\n-class RandomPerspective(BaseOperator):\n- \"\"\"\n- Rotate, tranlate, scale, shear and perspect image and bboxes randomly,\n- refer to https://github.com/ultralytics/yolov5/blob/develop/utils/datasets.py\n-\n- Args:\n- degree (int): rotation degree, uniformly sampled in [-degree, degree]\n- translate (float): translate fraction, translate_x and translate_y are uniformly sampled\n- in [0.5 - translate, 0.5 + translate]\n- scale (float): scale factor, uniformly sampled in [1 - scale, 1 + scale]\n- shear (int): shear degree, shear_x and shear_y are uniformly sampled in [-shear, shear]\n- perspective (float): perspective_x and perspective_y are uniformly sampled in [-perspective, perspective]\n- area_thr (float): the area threshold of bbox to be kept after transformation, default 0.25\n- fill_value (tuple): value used in case of a constant border, default (114, 114, 114)\n- \"\"\"\n-\n- def __init__(self,\n- degree=10,\n- translate=0.1,\n- scale=0.1,\n- shear=10,\n- perspective=0.0,\n- border=[0, 0],\n- area_thr=0.25,\n- fill_value=(114, 114, 114)):\n- super(RandomPerspective, self).__init__()\n- self.degree = degree\n- self.translate = translate\n- self.scale = scale\n- self.shear = shear\n- self.perspective = perspective\n- self.border = border\n- self.area_thr = area_thr\n- self.fill_value = fill_value\n-\n- def apply(self, sample, context=None):\n- im = sample['image']\n- height = im.shape[0] + self.border[0] * 2\n- width = im.shape[1] + self.border[1] * 2\n-\n- # center\n- C = np.eye(3)\n- C[0, 2] = -im.shape[1] / 2\n- C[1, 2] = -im.shape[0] / 2\n-\n- # perspective\n- P = np.eye(3)\n- P[2, 0] = random.uniform(-self.perspective, self.perspective)\n- P[2, 1] = random.uniform(-self.perspective, self.perspective)\n-\n- # Rotation and scale\n- R = np.eye(3)\n- a = random.uniform(-self.degree, self.degree)\n- s = random.uniform(1 - self.scale, 1 + self.scale)\n- R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)\n-\n- # Shear\n- S = np.eye(3)\n- # shear x (deg)\n- S[0, 1] = math.tan(\n- random.uniform(-self.shear, self.shear) * math.pi / 180)\n- # shear y (deg)\n- S[1, 0] = math.tan(\n- random.uniform(-self.shear, self.shear) * math.pi / 180)\n-\n- # Translation\n- T = np.eye(3)\n- T[0, 2] = random.uniform(0.5 - self.translate,\n- 0.5 + self.translate) * width\n- T[1, 2] = random.uniform(0.5 - self.translate,\n- 0.5 + self.translate) * height\n-\n- # matmul\n- # M = T @ S @ R @ P @ C\n- M = np.eye(3)\n- for cM in [T, S, R, P, C]:\n- M = np.matmul(M, cM)\n-\n- if (self.border[0] != 0) or (self.border[1] != 0) or (\n- M != np.eye(3)).any():\n- if self.perspective:\n- im = cv2.warpPerspective(\n- im, M, dsize=(width, height), borderValue=self.fill_value)\n- else:\n- im = cv2.warpAffine(\n- im,\n- M[:2],\n- dsize=(width, height),\n- borderValue=self.fill_value)\n-\n- sample['image'] = im\n- if sample['gt_bbox'].shape[0] > 0:\n- sample = transform_bbox(\n- sample,\n- M,\n- width,\n- height,\n- area_thr=self.area_thr,\n- perspective=self.perspective)\n-\n- return sample\n-\n-\n-@register_op\n-class Mosaic(BaseOperator):\n- \"\"\"\n- Mosaic Data Augmentation, refer to https://github.com/ultralytics/yolov5/blob/develop/utils/datasets.py\n-\n- \"\"\"\n-\n- def __init__(self,\n- target_size,\n- mosaic_border=None,\n- fill_value=(114, 114, 114)):\n- super(Mosaic, self).__init__()\n- self.target_size = target_size\n- if mosaic_border is None:\n- mosaic_border = (-target_size // 2, -target_size // 2)\n- self.mosaic_border = mosaic_border\n- self.fill_value = fill_value\n-\n- def __call__(self, sample, context=None):\n- if not isinstance(sample, Sequence):\n- return sample\n-\n- s = self.target_size\n- yc, xc = [\n- int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border\n- ]\n- boxes = [x['gt_bbox'] for x in sample]\n- labels = [x['gt_class'] for x in sample]\n- for i in range(len(sample)):\n- im = sample[i]['image']\n- h, w, c = im.shape\n-\n- if i == 0: # top left\n- image = np.ones(\n- (s * 2, s * 2, c), dtype=np.uint8) * self.fill_value\n- # xmin, ymin, xmax, ymax (dst image)\n- x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc\n- # xmin, ymin, xmax, ymax (src image)\n- x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h\n- elif i == 1: # top right\n- x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc\n- x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h\n- elif i == 2: # bottom left\n- x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)\n- x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(\n- y2a - y1a, h)\n- elif i == 3: # bottom right\n- x1a, y1a, x2a, y2a = xc, yc, min(xc + w,\n- s * 2), min(s * 2, yc + h)\n- x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)\n-\n- image[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b]\n- padw = x1a - x1b\n- padh = y1a - y1b\n- boxes[i] = boxes[i] + (padw, padh, padw, padh)\n-\n- boxes = np.concatenate(boxes, axis=0)\n- boxes = np.clip(boxes, 0, s * 2)\n- labels = np.concatenate(labels, axis=0)\n- if 'is_crowd' in sample[0]:\n- is_crowd = np.concatenate([x['is_crowd'] for x in sample], axis=0)\n- if 'difficult' in sample[0]:\n- difficult = np.concatenate([x['difficult'] for x in sample], axis=0)\n- sample = sample[0]\n- sample['image'] = image.astype(np.uint8)\n- sample['gt_bbox'] = boxes\n- sample['gt_class'] = labels\n- if 'is_crowd' in sample:\n- sample['is_crowd'] = is_crowd\n- if 'difficult' in sample:\n- sample['difficult'] = difficult\n-\n- return sample\n-\n-\n@register_op\nclass RandomSelect(BaseOperator):\n\"\"\"\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
remove Mosaic & RandomPerspective (#4450)
|
499,339 |
04.11.2021 10:42:36
| -28,800 |
397a1d57d2092b59400ab2388506bb03ae46f407
|
[benchmark] fix faster rcnn dataloader inconsistence
|
[
{
"change_type": "ADD",
"old_path": null,
"new_path": "benchmark/configs/faster_rcnn_r50_fpn_1x_coco.yml",
"diff": "+_BASE_: [\n+ '../../configs/datasets/coco_detection.yml',\n+ '../../configs/runtime.yml',\n+ '../../configs/faster_rcnn/_base_/optimizer_1x.yml',\n+ '../../configs/faster_rcnn/_base_/faster_rcnn_r50_fpn.yml',\n+]\n+weights: output/faster_rcnn_r50_fpn_1x_coco/model_final\n+\n+worker_num: 2\n+TrainReader:\n+ sample_transforms:\n+ - Decode: {}\n+ - Resize: {interp: 2, target_size: [800, 1333], keep_ratio: True}\n+ - RandomFlip: {prob: 0.5}\n+ - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n+ - Permute: {}\n+ batch_transforms:\n+ - PadBatch: {pad_to_stride: 32}\n+ batch_size: 1\n+ shuffle: true\n+ drop_last: true\n+ collate_batch: false\n+\n+\n+EvalReader:\n+ sample_transforms:\n+ - Decode: {}\n+ - Resize: {interp: 2, target_size: [800, 1333], keep_ratio: True}\n+ - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n+ - Permute: {}\n+ batch_transforms:\n+ - PadBatch: {pad_to_stride: 32}\n+ batch_size: 1\n+ shuffle: false\n+ drop_last: false\n+\n+\n+TestReader:\n+ sample_transforms:\n+ - Decode: {}\n+ - Resize: {interp: 2, target_size: [800, 1333], keep_ratio: True}\n+ - NormalizeImage: {is_scale: true, mean: [0.485,0.456,0.406], std: [0.229, 0.224,0.225]}\n+ - Permute: {}\n+ batch_transforms:\n+ - PadBatch: {pad_to_stride: 32}\n+ batch_size: 1\n+ shuffle: false\n+ drop_last: false\n"
},
{
"change_type": "MODIFY",
"old_path": "benchmark/run_benchmark.sh",
"new_path": "benchmark/run_benchmark.sh",
"diff": "@@ -25,7 +25,7 @@ function _train(){\nset_optimizer_lr_mp=\" \"\n# parse model_name\ncase ${model_name} in\n- faster_rcnn) model_yml=\"configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.yml\"\n+ faster_rcnn) model_yml=\"benchmark/configs/faster_rcnn_r50_fpn_1x_coco.yml\"\nset_optimizer_lr_sp=\"LearningRate.base_lr=0.001\" ;;\nfcos) model_yml=\"configs/fcos/fcos_r50_fpn_1x_coco.yml\"\nset_optimizer_lr_sp=\"LearningRate.base_lr=0.001\" ;;\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
[benchmark] fix faster rcnn dataloader inconsistence (#4451)
|
499,301 |
05.11.2021 01:08:43
| -28,800 |
d8cb6707a2291f0f493f0b5a2e155760483ab574
|
fix picodet postprocess for none-det case
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/python/picodet_postprocess.py",
"new_path": "deploy/python/picodet_postprocess.py",
"diff": "@@ -193,13 +193,17 @@ class PicoDetPostProcess(object):\ntop_k=self.keep_top_k, )\npicked_box_probs.append(box_probs)\npicked_labels.extend([class_index] * box_probs.shape[0])\n- if not picked_box_probs:\n- return np.array([]), np.array([]), np.array([])\n+\n+ if len(picked_box_probs) == 0:\n+ out_boxes_list.append(np.empty((0, 4)))\n+ out_boxes_num.append(0)\n+\n+ else:\npicked_box_probs = np.concatenate(picked_box_probs)\n# resize output boxes\n- picked_box_probs[:, :4] = self.warp_boxes(picked_box_probs[:, :4],\n- self.ori_shape[batch_id])\n+ picked_box_probs[:, :4] = self.warp_boxes(\n+ picked_box_probs[:, :4], self.ori_shape[batch_id])\nim_scale = np.concatenate([\nself.scale_factor[batch_id][::-1],\nself.scale_factor[batch_id][::-1]\n@@ -210,7 +214,8 @@ class PicoDetPostProcess(object):\nnp.concatenate(\n[\nnp.expand_dims(\n- np.array(picked_labels), axis=-1), np.expand_dims(\n+ np.array(picked_labels),\n+ axis=-1), np.expand_dims(\npicked_box_probs[:, 4], axis=-1),\npicked_box_probs[:, :4]\n],\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix picodet postprocess for none-det case (#4462)
|
499,299 |
05.11.2021 09:53:28
| -28,800 |
8a685c4b67807907ce0012aba497bab1f76b31cf
|
update code reference information for keypoint
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/python/keypoint_preprocess.py",
"new_path": "deploy/python/keypoint_preprocess.py",
"diff": "@@ -109,7 +109,10 @@ def get_affine_transform(center,\ndef get_warp_matrix(theta, size_input, size_dst, size_target):\n- \"\"\"Calculate the transformation matrix under the constraint of unbiased.\n+ \"\"\"This code is based on\n+ https://github.com/open-mmlab/mmpose/blob/master/mmpose/core/post_processing/post_transforms.py\n+\n+ Calculate the transformation matrix under the constraint of unbiased.\nPaper ref: Huang et al. The Devil is in the Details: Delving into Unbiased\nData Processing for Human Pose Estimation (CVPR 2020).\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/data/transform/keypoint_operators.py",
"new_path": "ppdet/data/transform/keypoint_operators.py",
"diff": "@@ -790,7 +790,10 @@ class ToHeatmapsTopDown_DARK(object):\n@register_keypointop\nclass ToHeatmapsTopDown_UDP(object):\n- \"\"\"to generate the gaussian heatmaps of keypoint for heatmap loss.\n+ \"\"\"This code is based on:\n+ https://github.com/HuangJunJie2017/UDP-Pose/blob/master/deep-high-resolution-net.pytorch/lib/dataset/JointsDataset.py\n+\n+ to generate the gaussian heatmaps of keypoint for heatmap loss.\nref: Huang et al. The Devil is in the Details: Delving into Unbiased Data Processing\nfor Human Pose Estimation (CVPR 2020).\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/backbones/lite_hrnet.py",
"new_path": "ppdet/modeling/backbones/lite_hrnet.py",
"diff": "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n+\"\"\"\n+This code is based on\n+https://github.com/HRNet/Lite-HRNet/blob/hrnet/models/backbones/litehrnet.py\n+\"\"\"\nimport paddle\nimport paddle.nn as nn\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/keypoint_utils.py",
"new_path": "ppdet/modeling/keypoint_utils.py",
"diff": "@@ -96,7 +96,10 @@ def get_affine_transform(center,\ndef get_warp_matrix(theta, size_input, size_dst, size_target):\n- \"\"\"Calculate the transformation matrix under the constraint of unbiased.\n+ \"\"\"This code is based on\n+ https://github.com/open-mmlab/mmpose/blob/master/mmpose/core/post_processing/post_transforms.py\n+\n+ Calculate the transformation matrix under the constraint of unbiased.\nPaper ref: Huang et al. The Devil is in the Details: Delving into Unbiased\nData Processing for Human Pose Estimation (CVPR 2020).\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
update code reference information for keypoint (#4458)
|
499,348 |
05.11.2021 11:36:26
| -28,800 |
8ebccc9f1ef248ea1885d4263cfb1816184aeab1
|
update keypoint code citation
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/python/keypoint_postprocess.py",
"new_path": "deploy/python/keypoint_postprocess.py",
"diff": "@@ -23,7 +23,7 @@ from keypoint_preprocess import get_affine_mat_kernel, get_affine_transform\nclass HrHRNetPostProcess(object):\n- '''\n+ \"\"\"\nHrHRNet postprocess contain:\n1) get topk keypoints in the output heatmap\n2) sample the tagmap's value corresponding to each of the topk coordinate\n@@ -37,7 +37,7 @@ class HrHRNetPostProcess(object):\ninputs(list[heatmap]): the output list of modle, [heatmap, heatmap_maxpool, tagmap], heatmap_maxpool used to get topk\noriginal_height, original_width (float): the original image size\n- '''\n+ \"\"\"\ndef __init__(self, max_num_people=30, heat_thresh=0.2, tag_thresh=1.):\nself.max_num_people = max_num_people\n@@ -212,7 +212,7 @@ class HRNetPostProcess(object):\nreturn output_flipped\ndef get_max_preds(self, heatmaps):\n- '''get predictions from score maps\n+ \"\"\"get predictions from score maps\nArgs:\nheatmaps: numpy.ndarray([batch_size, num_joints, height, width])\n@@ -220,7 +220,7 @@ class HRNetPostProcess(object):\nReturns:\npreds: numpy.ndarray([batch_size, num_joints, 2]), keypoints coords\nmaxvals: numpy.ndarray([batch_size, num_joints, 2]), the maximum confidence of the keypoints\n- '''\n+ \"\"\"\nassert isinstance(heatmaps,\nnp.ndarray), 'heatmaps should be numpy.ndarray'\nassert heatmaps.ndim == 4, 'batch_images should be 4-ndim'\n@@ -286,6 +286,10 @@ class HRNetPostProcess(object):\nreturn coord\ndef dark_postprocess(self, hm, coords, kernelsize):\n+ \"\"\"\n+ refer to https://github.com/ilovepose/DarkPose/lib/core/inference.py\n+\n+ \"\"\"\nhm = self.gaussian_blur(hm, kernelsize)\nhm = np.maximum(hm, 1e-10)\nhm = np.log(hm)\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/python/keypoint_preprocess.py",
"new_path": "deploy/python/keypoint_preprocess.py",
"diff": "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n-\n+\"\"\"\n+this code is based on https://github.com/open-mmlab/mmpose/mmpose/core/post_processing/post_transforms.py\n+\"\"\"\nimport cv2\nimport numpy as np\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/python/preprocess.py",
"new_path": "deploy/python/preprocess.py",
"diff": "import cv2\nimport numpy as np\n+from keypoint_preprocess import get_affine_transform\ndef decode_image(im_file, im_info):\n@@ -263,90 +264,6 @@ class WarpAffine(object):\nself.scale = scale\nself.shift = shift\n- def _get_3rd_point(self, a, b):\n- assert len(\n- a) == 2, 'input of _get_3rd_point should be point with length of 2'\n- assert len(\n- b) == 2, 'input of _get_3rd_point should be point with length of 2'\n- direction = a - b\n- third_pt = b + np.array([-direction[1], direction[0]], dtype=np.float32)\n- return third_pt\n-\n- def rotate_point(self, pt, angle_rad):\n- \"\"\"Rotate a point by an angle.\n-\n- Args:\n- pt (list[float]): 2 dimensional point to be rotated\n- angle_rad (float): rotation angle by radian\n-\n- Returns:\n- list[float]: Rotated point.\n- \"\"\"\n- assert len(pt) == 2\n- sn, cs = np.sin(angle_rad), np.cos(angle_rad)\n- new_x = pt[0] * cs - pt[1] * sn\n- new_y = pt[0] * sn + pt[1] * cs\n- rotated_pt = [new_x, new_y]\n-\n- return rotated_pt\n-\n- def get_affine_transform(self,\n- center,\n- input_size,\n- rot,\n- output_size,\n- shift=(0., 0.),\n- inv=False):\n- \"\"\"Get the affine transform matrix, given the center/scale/rot/output_size.\n-\n- Args:\n- center (np.ndarray[2, ]): Center of the bounding box (x, y).\n- input_size (np.ndarray[2, ]): Size of input feature (width, height).\n- rot (float): Rotation angle (degree).\n- output_size (np.ndarray[2, ]): Size of the destination heatmaps.\n- shift (0-100%): Shift translation ratio wrt the width/height.\n- Default (0., 0.).\n- inv (bool): Option to inverse the affine transform direction.\n- (inv=False: src->dst or inv=True: dst->src)\n-\n- Returns:\n- np.ndarray: The transform matrix.\n- \"\"\"\n- assert len(center) == 2\n- assert len(output_size) == 2\n- assert len(shift) == 2\n-\n- if not isinstance(input_size, (np.ndarray, list)):\n- input_size = np.array([input_size, input_size], dtype=np.float32)\n- scale_tmp = input_size\n-\n- shift = np.array(shift)\n- src_w = scale_tmp[0]\n- dst_w = output_size[0]\n- dst_h = output_size[1]\n-\n- rot_rad = np.pi * rot / 180\n- src_dir = self.rotate_point([0., src_w * -0.5], rot_rad)\n- dst_dir = np.array([0., dst_w * -0.5])\n-\n- src = np.zeros((3, 2), dtype=np.float32)\n-\n- src[0, :] = center + scale_tmp * shift\n- src[1, :] = center + src_dir + scale_tmp * shift\n- src[2, :] = self._get_3rd_point(src[0, :], src[1, :])\n-\n- dst = np.zeros((3, 2), dtype=np.float32)\n- dst[0, :] = [dst_w * 0.5, dst_h * 0.5]\n- dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir\n- dst[2, :] = self._get_3rd_point(dst[0, :], dst[1, :])\n-\n- if inv:\n- trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))\n- else:\n- trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))\n-\n- return trans\n-\ndef __call__(self, im, im_info):\n\"\"\"\nArgs:\n@@ -371,7 +288,7 @@ class WarpAffine(object):\ninput_h, input_w = self.input_h, self.input_w\nc = np.array([w / 2., h / 2.], dtype=np.float32)\n- trans_input = self.get_affine_transform(c, s, 0, [input_w, input_h])\n+ trans_input = get_affine_transform(c, s, 0, [input_w, input_h])\nimg = cv2.resize(img, (w, h))\ninp = cv2.warpAffine(\nimg, trans_input, (input_w, input_h), flags=cv2.INTER_LINEAR)\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/data/source/keypoint_coco.py",
"new_path": "ppdet/data/source/keypoint_coco.py",
"diff": "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n-\n+\"\"\"\n+this code is base on https://github.com/open-mmlab/mmpose\n+\"\"\"\nimport os\nimport cv2\nimport numpy as np\n@@ -25,8 +27,7 @@ from ppdet.core.workspace import register, serializable\n@serializable\nclass KeypointBottomUpBaseDataset(DetDataset):\n- \"\"\"Base class for bottom-up datasets. Adapted from\n- https://github.com/open-mmlab/mmpose\n+ \"\"\"Base class for bottom-up datasets.\nAll datasets should subclass it.\nAll subclasses should overwrite:\n@@ -90,8 +91,7 @@ class KeypointBottomUpBaseDataset(DetDataset):\n@register\n@serializable\nclass KeypointBottomUpCocoDataset(KeypointBottomUpBaseDataset):\n- \"\"\"COCO dataset for bottom-up pose estimation. Adapted from\n- https://github.com/open-mmlab/mmpose\n+ \"\"\"COCO dataset for bottom-up pose estimation.\nThe dataset loads raw features and apply specified transforms\nto return a dict containing the image tensors and other information.\n@@ -262,8 +262,7 @@ class KeypointBottomUpCocoDataset(KeypointBottomUpBaseDataset):\n@register\n@serializable\nclass KeypointBottomUpCrowdPoseDataset(KeypointBottomUpCocoDataset):\n- \"\"\"CrowdPose dataset for bottom-up pose estimation. Adapted from\n- https://github.com/open-mmlab/mmpose\n+ \"\"\"CrowdPose dataset for bottom-up pose estimation.\nThe dataset loads raw features and apply specified transforms\nto return a dict containing the image tensors and other information.\n@@ -387,9 +386,7 @@ class KeypointTopDownBaseDataset(DetDataset):\n@register\n@serializable\nclass KeypointTopDownCocoDataset(KeypointTopDownBaseDataset):\n- \"\"\"COCO dataset for top-down pose estimation. Adapted from\n- https://github.com/leoxiaobin/deep-high-resolution-net.pytorch\n- Copyright (c) Microsoft, under the MIT License.\n+ \"\"\"COCO dataset for top-down pose estimation.\nThe dataset loads raw features and apply specified transforms\nto return a dict containing the image tensors and other information.\n@@ -582,9 +579,7 @@ class KeypointTopDownCocoDataset(KeypointTopDownBaseDataset):\n@register\n@serializable\nclass KeypointTopDownMPIIDataset(KeypointTopDownBaseDataset):\n- \"\"\"MPII dataset for topdown pose estimation. Adapted from\n- https://github.com/leoxiaobin/deep-high-resolution-net.pytorch\n- Copyright (c) Microsoft, under the MIT License.\n+ \"\"\"MPII dataset for topdown pose estimation.\nThe dataset loads raw features and apply specified transforms\nto return a dict containing the image tensors and other information.\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/data/transform/keypoint_operators.py",
"new_path": "ppdet/data/transform/keypoint_operators.py",
"diff": "@@ -682,6 +682,10 @@ class ToHeatmapsTopDown(object):\nself.sigma = sigma\ndef __call__(self, records):\n+ \"\"\"refer to\n+ https://github.com/leoxiaobin/deep-high-resolution-net.pytorch\n+ Copyright (c) Microsoft, under the MIT License.\n+ \"\"\"\njoints = records['joints']\njoints_vis = records['joints_vis']\nnum_joints = joints.shape[0]\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/metrics/keypoint_metrics.py",
"new_path": "ppdet/metrics/keypoint_metrics.py",
"diff": "@@ -27,11 +27,10 @@ __all__ = ['KeyPointTopDownCOCOEval', 'KeyPointTopDownMPIIEval']\nclass KeyPointTopDownCOCOEval(object):\n- '''\n- Adapted from\n+ \"\"\"refer to\nhttps://github.com/leoxiaobin/deep-high-resolution-net.pytorch\nCopyright (c) Microsoft, under the MIT License.\n- '''\n+ \"\"\"\ndef __init__(self,\nanno_file,\n@@ -286,7 +285,7 @@ class KeyPointTopDownMPIIEval(object):\nreturn self.eval_results\ndef evaluate(self, outputs, savepath=None):\n- \"\"\"Evaluate PCKh for MPII dataset. Adapted from\n+ \"\"\"Evaluate PCKh for MPII dataset. refer to\nhttps://github.com/leoxiaobin/deep-high-resolution-net.pytorch\nCopyright (c) Microsoft, under the MIT License.\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/keypoint_utils.py",
"new_path": "ppdet/modeling/keypoint_utils.py",
"diff": "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n+\"\"\"\n+this code is based on https://github.com/open-mmlab/mmpose\n+\"\"\"\nimport cv2\nimport numpy as np\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
update keypoint code citation (#4456)
|
499,395 |
05.11.2021 13:05:46
| -28,800 |
69cc99f9d17ff7ca34670fb770915d653ce26b30
|
add reference of some code and remove some code
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/data/transform/op_helper.py",
"new_path": "ppdet/data/transform/op_helper.py",
"diff": "@@ -464,65 +464,6 @@ def gaussian2D(shape, sigma_x=1, sigma_y=1):\nreturn h\n-def transform_bbox(sample,\n- M,\n- w,\n- h,\n- area_thr=0.25,\n- wh_thr=2,\n- ar_thr=20,\n- perspective=False):\n- \"\"\"\n- transfrom bbox according to tranformation matrix M,\n- refer to https://github.com/ultralytics/yolov5/blob/develop/utils/datasets.py\n- \"\"\"\n- bbox = sample['gt_bbox']\n- label = sample['gt_class']\n- # rotate bbox\n- n = len(bbox)\n- xy = np.ones((n * 4, 3), dtype=np.float32)\n- xy[:, :2] = bbox[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2)\n- # xy = xy @ M.T\n- xy = np.matmul(xy, M.T)\n- if perspective:\n- xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8)\n- else:\n- xy = xy[:, :2].reshape(n, 8)\n- # get new bboxes\n- x = xy[:, [0, 2, 4, 6]]\n- y = xy[:, [1, 3, 5, 7]]\n- bbox = np.concatenate(\n- (x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T\n- # clip boxes\n- mask = filter_bbox(bbox, w, h, area_thr)\n- sample['gt_bbox'] = bbox[mask]\n- sample['gt_class'] = sample['gt_class'][mask]\n- if 'is_crowd' in sample:\n- sample['is_crowd'] = sample['is_crowd'][mask]\n- if 'difficult' in sample:\n- sample['difficult'] = sample['difficult'][mask]\n- return sample\n-\n-\n-def filter_bbox(bbox, w, h, area_thr=0.25, wh_thr=2, ar_thr=20):\n- \"\"\"\n- filter bbox, refer to https://github.com/ultralytics/yolov5/blob/develop/utils/datasets.py\n- \"\"\"\n- # clip boxes\n- area1 = (bbox[:, 2:4] - bbox[:, 0:2]).prod(1)\n- bbox[:, [0, 2]] = bbox[:, [0, 2]].clip(0, w)\n- bbox[:, [1, 3]] = bbox[:, [1, 3]].clip(0, h)\n- # compute\n- area2 = (bbox[:, 2:4] - bbox[:, 0:2]).prod(1)\n- area_ratio = area2 / (area1 + 1e-16)\n- wh = bbox[:, 2:4] - bbox[:, 0:2]\n- ar_ratio = np.maximum(wh[:, 1] / (wh[:, 0] + 1e-16),\n- wh[:, 0] / (wh[:, 1] + 1e-16))\n- mask = (area_ratio > area_thr) & (\n- (wh > wh_thr).all(1)) & (ar_ratio < ar_thr)\n- return mask\n-\n-\ndef draw_umich_gaussian(heatmap, center, radius, k=1):\n\"\"\"\ndraw_umich_gaussian, refer to https://github.com/xingyizhou/CenterNet/blob/master/src/lib/utils/image.py#L126\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/data/transform/operators.py",
"new_path": "ppdet/data/transform/operators.py",
"diff": "@@ -48,7 +48,7 @@ from .op_helper import (satisfy_sample_constraint, filter_and_process,\ngenerate_sample_bbox, clip_bbox, data_anchor_sampling,\nsatisfy_sample_constraint_coverage, crop_image_sampling,\ngenerate_sample_bbox_square, bbox_area_sampling,\n- is_poly, transform_bbox, get_border)\n+ is_poly, get_border)\nfrom ppdet.utils.logger import setup_logger\nfrom ppdet.modeling.keypoint_utils import get_affine_transform, affine_transform\n@@ -2476,6 +2476,9 @@ class RandomSelect(BaseOperator):\n\"\"\"\nRandomly choose a transformation between transforms1 and transforms2,\nand the probability of choosing transforms1 is p.\n+\n+ The code is based on https://github.com/facebookresearch/detr/blob/main/datasets/transforms.py\n+\n\"\"\"\ndef __init__(self, transforms1, transforms2, p=0.5):\n@@ -2833,6 +2836,10 @@ class WarpAffine(BaseOperator):\nshift=0.1):\n\"\"\"WarpAffine\nWarp affine the image\n+\n+ The code is based on https://github.com/xingyizhou/CenterNet/blob/master/src/lib/datasets/sample/ctdet.py\n+\n+\n\"\"\"\nsuper(WarpAffine, self).__init__()\nself.keep_res = keep_res\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/ext_op/rbox_iou_op.cc",
"new_path": "ppdet/ext_op/rbox_iou_op.cc",
"diff": "-/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n-Licensed under the Apache License, Version 2.0 (the \"License\");\n-you may not use this file except in compliance with the License.\n-You may obtain a copy of the License at\n-\n- http://www.apache.org/licenses/LICENSE-2.0\n-\n-Unless required by applicable law or agreed to in writing, software\n-distributed under the License is distributed on an \"AS IS\" BASIS,\n-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-See the License for the specific language governing permissions and\n-limitations under the License. */\n+// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+//\n+// The code is based on https://github.com/csuhan/s2anet/blob/master/mmdet/ops/box_iou_rotated\n#include \"rbox_iou_op.h\"\n#include \"paddle/extension.h\"\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/ext_op/rbox_iou_op.cu",
"new_path": "ppdet/ext_op/rbox_iou_op.cu",
"diff": "-/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n-Licensed under the Apache License, Version 2.0 (the \"License\");\n-you may not use this file except in compliance with the License.\n-You may obtain a copy of the License at\n-\n- http://www.apache.org/licenses/LICENSE-2.0\n-\n-Unless required by applicable law or agreed to in writing, software\n-distributed under the License is distributed on an \"AS IS\" BASIS,\n-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-See the License for the specific language governing permissions and\n-limitations under the License. */\n+// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+//\n+// The code is based on https://github.com/csuhan/s2anet/blob/master/mmdet/ops/box_iou_rotated\n#include \"rbox_iou_op.h\"\n#include \"paddle/extension.h\"\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/ext_op/rbox_iou_op.h",
"new_path": "ppdet/ext_op/rbox_iou_op.h",
"diff": "-/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n-Licensed under the Apache License, Version 2.0 (the \"License\");\n-you may not use this file except in compliance with the License.\n-You may obtain a copy of the License at\n-\n- http://www.apache.org/licenses/LICENSE-2.0\n-\n-Unless required by applicable law or agreed to in writing, software\n-distributed under the License is distributed on an \"AS IS\" BASIS,\n-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-See the License for the specific language governing permissions and\n-limitations under the License. */\n+// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n+//\n+// Licensed under the Apache License, Version 2.0 (the \"License\");\n+// you may not use this file except in compliance with the License.\n+// You may obtain a copy of the License at\n+//\n+// http://www.apache.org/licenses/LICENSE-2.0\n+//\n+// Unless required by applicable law or agreed to in writing, software\n+// distributed under the License is distributed on an \"AS IS\" BASIS,\n+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+// See the License for the specific language governing permissions and\n+// limitations under the License.\n+//\n+// The code is based on https://github.com/csuhan/s2anet/blob/master/mmdet/ops/box_iou_rotated\n#pragma once\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/heads/s2anet_head.py",
"new_path": "ppdet/modeling/heads/s2anet_head.py",
"diff": "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n+#\n+# The code is based on https://github.com/csuhan/s2anet/blob/master/mmdet/models/anchor_heads_rotated/s2anet_head.py\n+\nimport paddle\nfrom paddle import ParamAttr\nimport paddle.nn as nn\n@@ -625,7 +628,8 @@ class S2ANetHead(nn.Layer):\nfam_bbox_total = self.gwd_loss(fam_bbox_decode,\nbbox_gt_bboxes_level)\nfam_bbox_total = fam_bbox_total * feat_bbox_weights\n- fam_bbox_total = paddle.sum(fam_bbox_total) / num_total_samples\n+ fam_bbox_total = paddle.sum(\n+ fam_bbox_total) / num_total_samples\nfam_bbox_losses.append(fam_bbox_total)\nst_idx += feat_anchor_num\n@@ -739,7 +743,8 @@ class S2ANetHead(nn.Layer):\nodm_bbox_total = self.gwd_loss(odm_bbox_decode,\nbbox_gt_bboxes_level)\nodm_bbox_total = odm_bbox_total * feat_bbox_weights\n- odm_bbox_total = paddle.sum(odm_bbox_total) / num_total_samples\n+ odm_bbox_total = paddle.sum(\n+ odm_bbox_total) / num_total_samples\nodm_bbox_losses.append(odm_bbox_total)\nst_idx += feat_anchor_num\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/necks/yolo_fpn.py",
"new_path": "ppdet/modeling/necks/yolo_fpn.py",
"diff": "@@ -180,7 +180,7 @@ class CoordConv(nn.Layer):\nname='',\ndata_format='NCHW'):\n\"\"\"\n- CoordConv layer\n+ CoordConv layer, see https://arxiv.org/abs/1807.03247\nArgs:\nch_in (int): input channel\n"
},
{
"change_type": "MODIFY",
"old_path": "static/tools/anchor_cluster.py",
"new_path": "static/tools/anchor_cluster.py",
"diff": "@@ -126,8 +126,7 @@ class YOLOv2AnchorCluster(BaseAnchorCluster):\n\"\"\"\nYOLOv2 Anchor Cluster\n- Reference:\n- https://github.com/AlexeyAB/darknet/blob/master/scripts/gen_anchors.py\n+ The code is based on https://github.com/AlexeyAB/darknet/blob/master/scripts/gen_anchors.py\nArgs:\nn (int): number of clusters\n@@ -196,103 +195,6 @@ class YOLOv2AnchorCluster(BaseAnchorCluster):\nreturn centers\n-class YOLOv5AnchorCluster(BaseAnchorCluster):\n- def __init__(self,\n- n,\n- dataset,\n- size,\n- cache_path,\n- cache,\n- iters=300,\n- gen_iters=1000,\n- thresh=0.25,\n- verbose=True):\n- super(YOLOv5AnchorCluster, self).__init__(\n- n, cache_path, cache, verbose=verbose)\n- \"\"\"\n- YOLOv5 Anchor Cluster\n-\n- Reference:\n- https://github.com/ultralytics/yolov5/blob/master/utils/general.py\n-\n- Args:\n- n (int): number of clusters\n- dataset (DataSet): DataSet instance, VOC or COCO\n- size (list): [w, h]\n- cache_path (str): cache directory path\n- cache (bool): whether using cache\n- iters (int): iters of kmeans algorithm\n- gen_iters (int): iters of genetic algorithm\n- threshold (float): anchor scale threshold\n- verbose (bool): whether print results\n- \"\"\"\n- self.dataset = dataset\n- self.size = size\n- self.iters = iters\n- self.gen_iters = gen_iters\n- self.thresh = thresh\n-\n- def print_result(self, centers):\n- whs = self.whs\n- centers = centers[np.argsort(centers.prod(1))]\n- x, best = self.metric(whs, centers)\n- bpr, aat = (\n- best > self.thresh).mean(), (x > self.thresh).mean() * self.n\n- logger.info(\n- 'thresh=%.2f: %.4f best possible recall, %.2f anchors past thr' %\n- (self.thresh, bpr, aat))\n- logger.info(\n- 'n=%g, img_size=%s, metric_all=%.3f/%.3f-mean/best, past_thresh=%.3f-mean: '\n- % (self.n, self.size, x.mean(), best.mean(),\n- x[x > self.thresh].mean()))\n- logger.info('%d anchor cluster result: [w, h]' % self.n)\n- for w, h in centers:\n- logger.info('[%d, %d]' % (round(w), round(h)))\n-\n- def metric(self, whs, centers):\n- r = whs[:, None] / centers[None]\n- x = np.minimum(r, 1. / r).min(2)\n- return x, x.max(1)\n-\n- def fitness(self, whs, centers):\n- _, best = self.metric(whs, centers)\n- return (best * (best > self.thresh)).mean()\n-\n- def calc_anchors(self):\n- self.whs = self.whs * self.shapes / self.shapes.max(\n- 1, keepdims=True) * np.array([self.size])\n- wh0 = self.whs\n- i = (wh0 < 3.0).any(1).sum()\n- if i:\n- logger.warning('Extremely small objects found. %d of %d'\n- 'labels are < 3 pixels in width or height' %\n- (i, len(wh0)))\n-\n- wh = wh0[(wh0 >= 2.0).any(1)]\n- logger.info('Running kmeans for %g anchors on %g points...' %\n- (self.n, len(wh)))\n- s = wh.std(0)\n- centers, dist = kmeans(wh / s, self.n, iter=self.iters)\n- centers *= s\n-\n- f, sh, mp, s = self.fitness(wh, centers), centers.shape, 0.9, 0.1\n- pbar = tqdm(\n- range(self.gen_iters),\n- desc='Evolving anchors with Genetic Algorithm')\n- for _ in pbar:\n- v = np.ones(sh)\n- while (v == 1).all():\n- v = ((np.random.random(sh) < mp) * np.random.random() *\n- np.random.randn(*sh) * s + 1).clip(0.3, 3.0)\n- new_centers = (centers.copy() * v).clip(min=2.0)\n- new_f = self.fitness(wh, new_centers)\n- if new_f > f:\n- f, centers = new_f, new_centers.copy()\n- pbar.desc = 'Evolving anchors with Genetic Algorithm: fitness = %.4f' % f\n-\n- return centers\n-\n-\ndef main():\nparser = ArgsParser()\nparser.add_argument(\n@@ -303,18 +205,6 @@ def main():\ndefault=1000,\ntype=int,\nhelp='num of iterations for kmeans')\n- parser.add_argument(\n- '--gen_iters',\n- '-gi',\n- default=1000,\n- type=int,\n- help='num of iterations for genetic algorithm')\n- parser.add_argument(\n- '--thresh',\n- '-t',\n- default=0.25,\n- type=float,\n- help='anchor scale threshold')\nparser.add_argument(\n'--verbose', '-v', default=True, type=bool, help='whether print result')\nparser.add_argument(\n@@ -328,7 +218,7 @@ def main():\n'-m',\ndefault='v2',\ntype=str,\n- help='cluster method, [v2, v5] are supported now')\n+ help='cluster method, v2 is only supported now')\nparser.add_argument(\n'--cache_path', default='cache', type=str, help='cache path')\nparser.add_argument(\n@@ -353,18 +243,14 @@ def main():\nsize = int(FLAGS.size)\nsize = [size, size]\n- elif 'image_shape' in cfg['TrainReader']['inputs_def']:\n- size = cfg['TrainReader']['inputs_def']['image_shape'][1:]\n+ elif 'image_shape' in cfg['TestReader']['inputs_def']:\n+ size = cfg['TestReader']['inputs_def']['image_shape'][1:]\nelse:\nraise ValueError('size is not specified')\nif FLAGS.method == 'v2':\ncluster = YOLOv2AnchorCluster(FLAGS.n, dataset, size, FLAGS.cache_path,\nFLAGS.cache, FLAGS.iters, FLAGS.verbose)\n- elif FLAGS.method == 'v5':\n- cluster = YOLOv5AnchorCluster(FLAGS.n, dataset, size, FLAGS.cache_path,\n- FLAGS.cache, FLAGS.iters, FLAGS.gen_iters,\n- FLAGS.thresh, FLAGS.verbose)\nelse:\nraise ValueError('cluster method: %s is not supported' % FLAGS.method)\n"
},
{
"change_type": "MODIFY",
"old_path": "tools/anchor_cluster.py",
"new_path": "tools/anchor_cluster.py",
"diff": "@@ -111,8 +111,7 @@ class YOLOv2AnchorCluster(BaseAnchorCluster):\n\"\"\"\nYOLOv2 Anchor Cluster\n- Reference:\n- https://github.com/AlexeyAB/darknet/blob/master/scripts/gen_anchors.py\n+ The code is based on https://github.com/AlexeyAB/darknet/blob/master/scripts/gen_anchors.py\nArgs:\nn (int): number of clusters\n@@ -182,103 +181,6 @@ class YOLOv2AnchorCluster(BaseAnchorCluster):\nreturn centers\n-class YOLOv5AnchorCluster(BaseAnchorCluster):\n- def __init__(self,\n- n,\n- dataset,\n- size,\n- cache_path,\n- cache,\n- iters=300,\n- gen_iters=1000,\n- thresh=0.25,\n- verbose=True):\n- super(YOLOv5AnchorCluster, self).__init__(\n- n, cache_path, cache, verbose=verbose)\n- \"\"\"\n- YOLOv5 Anchor Cluster\n-\n- Reference:\n- https://github.com/ultralytics/yolov5/blob/master/utils/general.py\n-\n- Args:\n- n (int): number of clusters\n- dataset (DataSet): DataSet instance, VOC or COCO\n- size (list): [w, h]\n- cache_path (str): cache directory path\n- cache (bool): whether using cache\n- iters (int): iters of kmeans algorithm\n- gen_iters (int): iters of genetic algorithm\n- threshold (float): anchor scale threshold\n- verbose (bool): whether print results\n- \"\"\"\n- self.dataset = dataset\n- self.size = size\n- self.iters = iters\n- self.gen_iters = gen_iters\n- self.thresh = thresh\n-\n- def print_result(self, centers):\n- whs = self.whs\n- centers = centers[np.argsort(centers.prod(1))]\n- x, best = self.metric(whs, centers)\n- bpr, aat = (\n- best > self.thresh).mean(), (x > self.thresh).mean() * self.n\n- logger.info(\n- 'thresh=%.2f: %.4f best possible recall, %.2f anchors past thr' %\n- (self.thresh, bpr, aat))\n- logger.info(\n- 'n=%g, img_size=%s, metric_all=%.3f/%.3f-mean/best, past_thresh=%.3f-mean: '\n- % (self.n, self.size, x.mean(), best.mean(),\n- x[x > self.thresh].mean()))\n- logger.info('%d anchor cluster result: [w, h]' % self.n)\n- for w, h in centers:\n- logger.info('[%d, %d]' % (round(w), round(h)))\n-\n- def metric(self, whs, centers):\n- r = whs[:, None] / centers[None]\n- x = np.minimum(r, 1. / r).min(2)\n- return x, x.max(1)\n-\n- def fitness(self, whs, centers):\n- _, best = self.metric(whs, centers)\n- return (best * (best > self.thresh)).mean()\n-\n- def calc_anchors(self):\n- self.whs = self.whs * self.shapes / self.shapes.max(\n- 1, keepdims=True) * np.array([self.size])\n- wh0 = self.whs\n- i = (wh0 < 3.0).any(1).sum()\n- if i:\n- logger.warning('Extremely small objects found. %d of %d'\n- 'labels are < 3 pixels in width or height' %\n- (i, len(wh0)))\n-\n- wh = wh0[(wh0 >= 2.0).any(1)]\n- logger.info('Running kmeans for %g anchors on %g points...' %\n- (self.n, len(wh)))\n- s = wh.std(0)\n- centers, dist = kmeans(wh / s, self.n, iter=self.iters)\n- centers *= s\n-\n- f, sh, mp, s = self.fitness(wh, centers), centers.shape, 0.9, 0.1\n- pbar = tqdm(\n- range(self.gen_iters),\n- desc='Evolving anchors with Genetic Algorithm')\n- for _ in pbar:\n- v = np.ones(sh)\n- while (v == 1).all():\n- v = ((np.random.random(sh) < mp) * np.random.random() *\n- np.random.randn(*sh) * s + 1).clip(0.3, 3.0)\n- new_centers = (centers.copy() * v).clip(min=2.0)\n- new_f = self.fitness(wh, new_centers)\n- if new_f > f:\n- f, centers = new_f, new_centers.copy()\n- pbar.desc = 'Evolving anchors with Genetic Algorithm: fitness = %.4f' % f\n-\n- return centers\n-\n-\ndef main():\nparser = ArgsParser()\nparser.add_argument(\n@@ -289,18 +191,6 @@ def main():\ndefault=1000,\ntype=int,\nhelp='num of iterations for kmeans')\n- parser.add_argument(\n- '--gen_iters',\n- '-gi',\n- default=1000,\n- type=int,\n- help='num of iterations for genetic algorithm')\n- parser.add_argument(\n- '--thresh',\n- '-t',\n- default=0.25,\n- type=float,\n- help='anchor scale threshold')\nparser.add_argument(\n'--verbose', '-v', default=True, type=bool, help='whether print result')\nparser.add_argument(\n@@ -314,7 +204,7 @@ def main():\n'-m',\ndefault='v2',\ntype=str,\n- help='cluster method, [v2, v5] are supported now')\n+ help='cluster method, v2 is only supported now')\nparser.add_argument(\n'--cache_path', default='cache', type=str, help='cache path')\nparser.add_argument(\n@@ -338,19 +228,15 @@ def main():\nelse:\nsize = int(FLAGS.size)\nsize = [size, size]\n- elif 'inputs_def' in cfg['TrainReader'] and 'image_shape' in cfg[\n- 'TrainReader']['inputs_def']:\n- size = cfg['TrainReader']['inputs_def']['image_shape'][1:]\n+ elif 'inputs_def' in cfg['TestReader'] and 'image_shape' in cfg[\n+ 'TestReader']['inputs_def']:\n+ size = cfg['TestReader']['inputs_def']['image_shape'][1:]\nelse:\nraise ValueError('size is not specified')\nif FLAGS.method == 'v2':\ncluster = YOLOv2AnchorCluster(FLAGS.n, dataset, size, FLAGS.cache_path,\nFLAGS.cache, FLAGS.iters, FLAGS.verbose)\n- elif FLAGS.method == 'v5':\n- cluster = YOLOv5AnchorCluster(FLAGS.n, dataset, size, FLAGS.cache_path,\n- FLAGS.cache, FLAGS.iters, FLAGS.gen_iters,\n- FLAGS.thresh, FLAGS.verbose)\nelse:\nraise ValueError('cluster method: %s is not supported' % FLAGS.method)\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add reference of some code and remove some code (#4467)
|
499,339 |
05.11.2021 16:16:31
| -28,800 |
56705e1edd07b31e8a25fdc375a1e76944c62f0d
|
fix code reference and licence
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/assigners/atss_assigner.py",
"new_path": "ppdet/modeling/assigners/atss_assigner.py",
"diff": "@@ -79,7 +79,10 @@ class ATSSAssigner(nn.Layer):\ngt_bboxes,\nbg_index,\ngt_scores=None):\n- r\"\"\"The assignment is done in following steps\n+ r\"\"\"This code is based on\n+ https://github.com/fcjian/TOOD/blob/master/mmdet/core/bbox/assigners/atss_assigner.py\n+\n+ The assignment is done in following steps\n1. compute iou between all bbox (bbox of all pyramid levels) and gt\n2. compute center distance between all bbox and gt\n3. on each pyramid level, for each gt, select k bbox whose center\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/assigners/task_aligned_assigner.py",
"new_path": "ppdet/modeling/assigners/task_aligned_assigner.py",
"diff": "@@ -47,7 +47,10 @@ class TaskAlignedAssigner(nn.Layer):\ngt_bboxes,\nbg_index,\ngt_scores=None):\n- r\"\"\"The assignment is done in following steps\n+ r\"\"\"This code is based on\n+ https://github.com/fcjian/TOOD/blob/master/mmdet/core/bbox/assigners/task_aligned_assigner.py\n+\n+ The assignment is done in following steps\n1. compute alignment metric between all bbox (bbox of all pyramid levels) and gt\n2. select top-k bbox as candidates for each gt\n3. limit the positive sample's center in gt (because the anchor-free detector\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/heads/detr_head.py",
"new_path": "ppdet/modeling/heads/detr_head.py",
"diff": "@@ -28,6 +28,10 @@ __all__ = ['DETRHead', 'DeformableDETRHead']\nclass MLP(nn.Layer):\n+ \"\"\"This code is based on\n+ https://github.com/facebookresearch/detr/blob/main/models/detr.py\n+ \"\"\"\n+\ndef __init__(self, input_dim, hidden_dim, output_dim, num_layers):\nsuper().__init__()\nself.num_layers = num_layers\n@@ -48,7 +52,11 @@ class MLP(nn.Layer):\nclass MultiHeadAttentionMap(nn.Layer):\n- \"\"\"This is a 2D attention module, which only returns the attention softmax (no multiplication by value)\"\"\"\n+ \"\"\"This code is based on\n+ https://github.com/facebookresearch/detr/blob/main/models/segmentation.py\n+\n+ This is a 2D attention module, which only returns the attention softmax (no multiplication by value)\n+ \"\"\"\ndef __init__(self, query_dim, hidden_dim, num_heads, dropout=0.0,\nbias=True):\n@@ -94,7 +102,9 @@ class MultiHeadAttentionMap(nn.Layer):\nclass MaskHeadFPNConv(nn.Layer):\n- \"\"\"\n+ \"\"\"This code is based on\n+ https://github.com/facebookresearch/detr/blob/main/models/segmentation.py\n+\nSimple convolutional head, using group norm.\nUpsampling is done using a FPN approach\n\"\"\"\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/heads/tood_head.py",
"new_path": "ppdet/modeling/heads/tood_head.py",
"diff": "@@ -48,6 +48,10 @@ class ScaleReg(nn.Layer):\nclass TaskDecomposition(nn.Layer):\n+ \"\"\"This code is based on\n+ https://github.com/fcjian/TOOD/blob/master/mmdet/models/dense_heads/tood_head.py\n+ \"\"\"\n+\ndef __init__(\nself,\nfeat_channels,\n@@ -105,6 +109,9 @@ class TaskDecomposition(nn.Layer):\n@register\nclass TOODHead(nn.Layer):\n+ \"\"\"This code is based on\n+ https://github.com/fcjian/TOOD/blob/master/mmdet/models/dense_heads/tood_head.py\n+ \"\"\"\n__inject__ = ['nms', 'static_assigner', 'assigner']\n__shared__ = ['num_classes']\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/transformers/deformable_transformer.py",
"new_path": "ppdet/modeling/transformers/deformable_transformer.py",
"diff": "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n+#\n+# Modified from Deformable-DETR (https://github.com/fundamentalvision/Deformable-DETR)\n+# Copyright (c) 2020 SenseTime. All Rights Reserved.\nfrom __future__ import absolute_import\nfrom __future__ import division\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/transformers/detr_transformer.py",
"new_path": "ppdet/modeling/transformers/detr_transformer.py",
"diff": "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n+#\n+# Modified from DETR (https://github.com/facebookresearch/detr)\n+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom __future__ import absolute_import\nfrom __future__ import division\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/transformers/matchers.py",
"new_path": "ppdet/modeling/transformers/matchers.py",
"diff": "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n+#\n+# Modified from DETR (https://github.com/facebookresearch/detr)\n+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom __future__ import absolute_import\nfrom __future__ import division\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/transformers/position_encoding.py",
"new_path": "ppdet/modeling/transformers/position_encoding.py",
"diff": "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n+#\n+# Modified from DETR (https://github.com/facebookresearch/detr)\n+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom __future__ import absolute_import\nfrom __future__ import division\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/transformers/utils.py",
"new_path": "ppdet/modeling/transformers/utils.py",
"diff": "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n+#\n+# Modified from DETR (https://github.com/facebookresearch/detr)\n+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom __future__ import absolute_import\nfrom __future__ import division\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix code reference and licence (#4477)
|
499,348 |
05.11.2021 19:23:38
| -28,800 |
a229530bc3a88269ba750b37f9dbafc666e8e219
|
add deploy keypoint infer save results
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/python/det_keypoint_unite_infer.py",
"new_path": "deploy/python/det_keypoint_unite_infer.py",
"diff": "# limitations under the License.\nimport os\n-\n+import json\nimport cv2\nimport math\nimport numpy as np\n@@ -80,7 +80,7 @@ def predict_with_given_det(image, det_res, keypoint_detector,\nkeypoint_res = {}\nkeypoint_res['keypoint'] = [\n- np.vstack(keypoint_vector), np.vstack(score_vector)\n+ np.vstack(keypoint_vector).tolist(), np.vstack(score_vector).tolist()\n] if len(keypoint_vector) > 0 else [[], []]\nkeypoint_res['bbox'] = rect_vector\nreturn keypoint_res\n@@ -89,8 +89,10 @@ def predict_with_given_det(image, det_res, keypoint_detector,\ndef topdown_unite_predict(detector,\ntopdown_keypoint_detector,\nimage_list,\n- keypoint_batch_size=1):\n+ keypoint_batch_size=1,\n+ save_res=False):\ndet_timer = detector.get_timer()\n+ store_res = []\nfor i, img_file in enumerate(image_list):\n# Decode image in advance in det + pose prediction\ndet_timer.preprocess_time_s.start()\n@@ -114,6 +116,11 @@ def topdown_unite_predict(detector,\nimage, results, topdown_keypoint_detector, keypoint_batch_size,\nFLAGS.det_threshold, FLAGS.keypoint_threshold, FLAGS.run_benchmark)\n+ if save_res:\n+ store_res.append([\n+ i, keypoint_res['bbox'],\n+ [keypoint_res['keypoint'][0], keypoint_res['keypoint'][1]]\n+ ])\nif FLAGS.run_benchmark:\ncm, gm, gu = get_current_memory_mb()\ntopdown_keypoint_detector.cpu_mem += cm\n@@ -127,12 +134,23 @@ def topdown_unite_predict(detector,\nkeypoint_res,\nvisual_thread=FLAGS.keypoint_threshold,\nsave_dir=FLAGS.output_dir)\n+ if save_res:\n+ \"\"\"\n+ 1) store_res: a list of image_data\n+ 2) image_data: [imageid, rects, [keypoints, scores]]\n+ 3) rects: list of rect [xmin, ymin, xmax, ymax]\n+ 4) keypoints: 17(joint numbers)*[x, y, conf], total 51 data in list\n+ 5) scores: mean of all joint conf\n+ \"\"\"\n+ with open(\"det_keypoint_unite_image_results.json\", 'w') as wf:\n+ json.dump(store_res, wf, indent=4)\ndef topdown_unite_predict_video(detector,\ntopdown_keypoint_detector,\ncamera_id,\n- keypoint_batch_size=1):\n+ keypoint_batch_size=1,\n+ save_res=False):\nvideo_name = 'output.mp4'\nif camera_id != -1:\ncapture = cv2.VideoCapture(camera_id)\n@@ -153,6 +171,7 @@ def topdown_unite_predict_video(detector,\nfourcc = cv2.VideoWriter_fourcc(* 'mp4v')\nwriter = cv2.VideoWriter(out_path, fourcc, fps, (width, height))\nindex = 0\n+ store_res = []\nwhile (1):\nret, frame = capture.read()\nif not ret:\n@@ -172,6 +191,11 @@ def topdown_unite_predict_video(detector,\nkeypoint_res,\nvisual_thread=FLAGS.keypoint_threshold,\nreturnimg=True)\n+ if save_res:\n+ store_res.append([\n+ index, keypoint_res['bbox'],\n+ [keypoint_res['keypoint'][0], keypoint_res['keypoint'][1]]\n+ ])\nwriter.write(im)\nif camera_id != -1:\n@@ -179,6 +203,16 @@ def topdown_unite_predict_video(detector,\nif cv2.waitKey(1) & 0xFF == ord('q'):\nbreak\nwriter.release()\n+ if save_res:\n+ \"\"\"\n+ 1) store_res: a list of frame_data\n+ 2) frame_data: [frameid, rects, [keypoints, scores]]\n+ 3) rects: list of rect [xmin, ymin, xmax, ymax]\n+ 4) keypoints: 17(joint numbers)*[x, y, conf], total 51 data in list\n+ 5) scores: mean of all joint conf\n+ \"\"\"\n+ with open(\"det_keypoint_unite_video_results.json\", 'w') as wf:\n+ json.dump(store_res, wf, indent=4)\ndef main():\n@@ -216,12 +250,13 @@ def main():\n# predict from video file or camera video stream\nif FLAGS.video_file is not None or FLAGS.camera_id != -1:\ntopdown_unite_predict_video(detector, topdown_keypoint_detector,\n- FLAGS.camera_id, FLAGS.keypoint_batch_size)\n+ FLAGS.camera_id, FLAGS.keypoint_batch_size,\n+ FLAGS.save_res)\nelse:\n# predict from image\nimg_list = get_test_images(FLAGS.image_dir, FLAGS.image_file)\ntopdown_unite_predict(detector, topdown_keypoint_detector, img_list,\n- FLAGS.keypoint_batch_size)\n+ FLAGS.keypoint_batch_size, FLAGS.save_res)\nif not FLAGS.run_benchmark:\ndetector.det_times.info(average=True)\ntopdown_keypoint_detector.det_times.info(average=True)\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/python/det_keypoint_unite_utils.py",
"new_path": "deploy/python/det_keypoint_unite_utils.py",
"diff": "@@ -115,5 +115,15 @@ def argsparser():\ntype=bool,\ndefault=True,\nhelp='whether to use darkpose to get better keypoint position predict ')\n-\n+ parser.add_argument(\n+ '--save_res',\n+ type=bool,\n+ default=False,\n+ help=(\n+ \"whether to save predict results to json file\"\n+ \"1) store_res: a list of image_data\"\n+ \"2) image_data: [imageid, rects, [keypoints, scores]]\"\n+ \"3) rects: list of rect [xmin, ymin, xmax, ymax]\"\n+ \"4) keypoints: 17(joint numbers)*[x, y, conf], total 51 data in list\"\n+ \"5) scores: mean of all joint conf\"))\nreturn parser\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/python/visualize.py",
"new_path": "deploy/python/visualize.py",
"diff": "@@ -240,6 +240,7 @@ def draw_pose(imgfile,\nraise e\nskeletons, scores = results['keypoint']\n+ skeletons = np.array(skeletons)\nkpt_nums = 17\nif len(skeletons) > 0:\nkpt_nums = skeletons.shape[1]\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add deploy keypoint infer save results (#4480)
|
499,298 |
06.11.2021 17:48:20
| -28,800 |
b2f3ad7cc0a91d589a3910a49e2fd5c5cce0e83c
|
[MOT] refine deepsort, fix jde
|
[
{
"change_type": "DELETE",
"old_path": "deploy/python/tracker/__init__.py",
"new_path": null,
"diff": "-# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n-#\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\n-# you may not use this file except in compliance with the License.\n-# You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing, software\n-# distributed under the License is distributed on an \"AS IS\" BASIS,\n-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-# See the License for the specific language governing permissions and\n-# limitations under the License.\n-\n-from . import deepsort_tracker\n-\n-from .deepsort_tracker import *\n"
},
{
"change_type": "DELETE",
"old_path": "deploy/python/tracker/deepsort_tracker.py",
"new_path": null,
"diff": "-# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n-#\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\n-# you may not use this file except in compliance with the License.\n-# You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing, software\n-# distributed under the License is distributed on an \"AS IS\" BASIS,\n-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-# See the License for the specific language governing permissions and\n-# limitations under the License.\n-\"\"\"\n-This code is borrow from https://github.com/nwojke/deep_sort/blob/master/deep_sort/tracker.py\n-\"\"\"\n-\n-import numpy as np\n-from ppdet.modeling.mot.motion import KalmanFilter\n-from ppdet.modeling.mot.matching.deepsort_matching import NearestNeighborDistanceMetric\n-from ppdet.modeling.mot.matching.deepsort_matching import iou_cost, min_cost_matching, matching_cascade, gate_cost_matrix\n-from ppdet.modeling.mot.tracker.base_sde_tracker import Track\n-from ppdet.modeling.mot.utils import Detection\n-\n-__all__ = ['DeepSORTTracker']\n-\n-\n-class DeepSORTTracker(object):\n- \"\"\"\n- DeepSORT tracker\n-\n- Args:\n- input_size (list): input feature map size to reid model, [h, w] format,\n- [64, 192] as default.\n- min_box_area (int): min box area to filter out low quality boxes\n- vertical_ratio (float): w/h, the vertical ratio of the bbox to filter\n- bad results, set 1.6 default for pedestrian tracking. If set <=0\n- means no need to filter bboxes.\n- budget (int): If not None, fix samples per class to at most this number.\n- Removes the oldest samples when the budget is reached.\n- max_age (int): maximum number of missed misses before a track is deleted\n- n_init (float): Number of frames that a track remains in initialization\n- phase. Number of consecutive detections before the track is confirmed.\n- The track state is set to `Deleted` if a miss occurs within the first\n- `n_init` frames.\n- metric_type (str): either \"euclidean\" or \"cosine\", the distance metric\n- used for measurement to track association.\n- matching_threshold (float): samples with larger distance are\n- considered an invalid match.\n- max_iou_distance (float): max iou distance threshold\n- motion (object): KalmanFilter instance\n- \"\"\"\n-\n- def __init__(self,\n- input_size=[64, 192],\n- min_box_area=0,\n- vertical_ratio=-1,\n- budget=100,\n- max_age=70,\n- n_init=3,\n- metric_type='cosine',\n- matching_threshold=0.2,\n- max_iou_distance=0.9,\n- motion='KalmanFilter'):\n- self.input_size = input_size\n- self.min_box_area = min_box_area\n- self.vertical_ratio = vertical_ratio\n- self.max_age = max_age\n- self.n_init = n_init\n- self.metric = NearestNeighborDistanceMetric(metric_type,\n- matching_threshold, budget)\n- self.max_iou_distance = max_iou_distance\n- self.motion = KalmanFilter()\n-\n- self.tracks = []\n- self._next_id = 1\n-\n- def predict(self):\n- \"\"\"\n- Propagate track state distributions one time step forward.\n- This function should be called once every time step, before `update`.\n- \"\"\"\n- for track in self.tracks:\n- track.predict(self.motion)\n-\n- def update(self, pred_dets, pred_embs):\n- \"\"\"\n- pred_dets (Tensor): Detection results of the image, shape is [N, 6].\n- pred_embs (Tensor): Embedding results of the image, shape is [N, 128],\n- usually pred_embs.shape[1] can be a multiple of 128, in PCB\n- Pyramidal model is 128*21.\n- \"\"\"\n- pred_tlwhs = pred_dets[:, :4]\n- pred_scores = pred_dets[:, 4:5]\n- pred_cls_ids = pred_dets[:, 5:]\n-\n- detections = [\n- Detection(tlwh, score, feat, cls_id)\n- for tlwh, score, feat, cls_id in zip(pred_tlwhs, pred_scores,\n- pred_embs, pred_cls_ids)\n- ]\n-\n- # Run matching cascade.\n- matches, unmatched_tracks, unmatched_detections = \\\n- self._match(detections)\n-\n- # Update track set.\n- for track_idx, detection_idx in matches:\n- self.tracks[track_idx].update(self.motion,\n- detections[detection_idx])\n- for track_idx in unmatched_tracks:\n- self.tracks[track_idx].mark_missed()\n- for detection_idx in unmatched_detections:\n- self._initiate_track(detections[detection_idx])\n- self.tracks = [t for t in self.tracks if not t.is_deleted()]\n-\n- # Update distance metric.\n- active_targets = [t.track_id for t in self.tracks if t.is_confirmed()]\n- features, targets = [], []\n- for track in self.tracks:\n- if not track.is_confirmed():\n- continue\n- features += track.features\n- targets += [track.track_id for _ in track.features]\n- track.features = []\n- self.metric.partial_fit(\n- np.asarray(features), np.asarray(targets), active_targets)\n- output_stracks = self.tracks\n- return output_stracks\n-\n- def _match(self, detections):\n- def gated_metric(tracks, dets, track_indices, detection_indices):\n- features = np.array([dets[i].feature for i in detection_indices])\n- targets = np.array([tracks[i].track_id for i in track_indices])\n- cost_matrix = self.metric.distance(features, targets)\n- cost_matrix = gate_cost_matrix(self.motion, cost_matrix, tracks,\n- dets, track_indices,\n- detection_indices)\n- return cost_matrix\n-\n- # Split track set into confirmed and unconfirmed tracks.\n- confirmed_tracks = [\n- i for i, t in enumerate(self.tracks) if t.is_confirmed()\n- ]\n- unconfirmed_tracks = [\n- i for i, t in enumerate(self.tracks) if not t.is_confirmed()\n- ]\n-\n- # Associate confirmed tracks using appearance features.\n- matches_a, unmatched_tracks_a, unmatched_detections = \\\n- matching_cascade(\n- gated_metric, self.metric.matching_threshold, self.max_age,\n- self.tracks, detections, confirmed_tracks)\n-\n- # Associate remaining tracks together with unconfirmed tracks using IOU.\n- iou_track_candidates = unconfirmed_tracks + [\n- k for k in unmatched_tracks_a\n- if self.tracks[k].time_since_update == 1\n- ]\n- unmatched_tracks_a = [\n- k for k in unmatched_tracks_a\n- if self.tracks[k].time_since_update != 1\n- ]\n- matches_b, unmatched_tracks_b, unmatched_detections = \\\n- min_cost_matching(\n- iou_cost, self.max_iou_distance, self.tracks,\n- detections, iou_track_candidates, unmatched_detections)\n-\n- matches = matches_a + matches_b\n- unmatched_tracks = list(set(unmatched_tracks_a + unmatched_tracks_b))\n- return matches, unmatched_tracks, unmatched_detections\n-\n- def _initiate_track(self, detection):\n- mean, covariance = self.motion.initiate(detection.to_xyah())\n- self.tracks.append(\n- Track(mean, covariance, self._next_id, self.n_init, self.max_age,\n- detection.cls_id, detection.score, detection.feature))\n- self._next_id += 1\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/engine/tracker.py",
"new_path": "ppdet/engine/tracker.py",
"diff": "@@ -184,7 +184,7 @@ class Tracker(object):\nuse_detector = False if not self.model.detector else True\ntimer = MOTTimer()\n- results = []\n+ results = defaultdict(list)\nframe_id = 0\nself.status['mode'] = 'track'\nself.model.eval()\n@@ -269,6 +269,7 @@ class Tracker(object):\ndata.update({'crops': crops})\npred_embs = self.model(data)\n+ pred_dets, pred_embs = pred_dets.numpy(), pred_embs.numpy()\ntracker.predict()\nonline_targets = tracker.update(pred_dets, pred_embs)\n@@ -291,7 +292,7 @@ class Tracker(object):\ntimer.toc()\n# save results\n- results.append(\n+ results[0].append(\n(frame_id + 1, online_tlwhs, online_scores, online_ids))\nsave_vis_results(data, frame_id, online_ids, online_tlwhs,\nonline_scores, timer.average_time, show_image,\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/architectures/jde.py",
"new_path": "ppdet/modeling/architectures/jde.py",
"diff": "@@ -105,7 +105,7 @@ class JDE(BaseArch):\nnms_keep_idx = det_outs['nms_keep_idx']\n- pred_dets = paddle.concat((bbox[:, 2:], bbox[:, 1:2]), axis=1)\n+ pred_dets = paddle.concat((bbox[:, 2:], bbox[:, 1:2], bbox[:, 0:1]), axis=1)\nemb_valid = paddle.gather_nd(emb_outs, boxes_idx)\npred_embs = paddle.gather_nd(emb_valid, nms_keep_idx)\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/mot/tracker/deepsort_tracker.py",
"new_path": "ppdet/modeling/mot/tracker/deepsort_tracker.py",
"diff": "@@ -17,6 +17,7 @@ This code is borrow from https://github.com/nwojke/deep_sort/blob/master/deep_so\nimport numpy as np\n+from ..motion import KalmanFilter\nfrom ..matching.deepsort_matching import NearestNeighborDistanceMetric\nfrom ..matching.deepsort_matching import iou_cost, min_cost_matching, matching_cascade, gate_cost_matrix\nfrom .base_sde_tracker import Track\n@@ -32,7 +33,6 @@ __all__ = ['DeepSORTTracker']\n@register\n@serializable\nclass DeepSORTTracker(object):\n- __inject__ = ['motion']\n\"\"\"\nDeepSORT tracker\n@@ -77,7 +77,8 @@ class DeepSORTTracker(object):\nself.metric = NearestNeighborDistanceMetric(metric_type,\nmatching_threshold, budget)\nself.max_iou_distance = max_iou_distance\n- self.motion = motion\n+ if motion == 'KalmanFilter':\n+ self.motion = KalmanFilter()\nself.tracks = []\nself._next_id = 1\n@@ -94,14 +95,14 @@ class DeepSORTTracker(object):\n\"\"\"\nPerform measurement update and track management.\nArgs:\n- pred_dets (Tensor): Detection results of the image, shape is [N, 6].\n- pred_embs (Tensor): Embedding results of the image, shape is [N, 128],\n- usually pred_embs.shape[1] can be a multiple of 128, in PCB\n- Pyramidal model is 128*21.\n+ pred_dets (np.array): Detection results of the image, the shape is\n+ [N, 6], means 'x0, y0, x1, y1, score, cls_id'.\n+ pred_embs (np.array): Embedding results of the image, the shape is\n+ [N, 128], usually pred_embs.shape[1] is a multiple of 128.\n\"\"\"\npred_tlwhs = pred_dets[:, :4]\n- pred_scores = pred_dets[:, 4:5].squeeze(1)\n- pred_cls_ids = pred_dets[:, 5:].squeeze(1)\n+ pred_scores = pred_dets[:, 4:5]\n+ pred_cls_ids = pred_dets[:, 5:]\ndetections = [\nDetection(tlwh, score, feat, cls_id)\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/reid/pplcnet_embedding.py",
"new_path": "ppdet/modeling/reid/pplcnet_embedding.py",
"diff": "@@ -21,9 +21,9 @@ import paddle.nn as nn\nimport paddle.nn.functional as F\nfrom paddle.nn.initializer import Normal, Constant\nfrom paddle import ParamAttr\n-from paddle.nn import AdaptiveAvgPool2D, BatchNorm, Conv2D, Dropout, Linear\n+from paddle.nn import AdaptiveAvgPool2D, BatchNorm, Conv2D, Linear\nfrom paddle.regularizer import L2Decay\n-from paddle.nn.initializer import KaimingNormal\n+from paddle.nn.initializer import KaimingNormal, XavierNormal\nfrom ppdet.core.workspace import register\n__all__ = ['PPLCNetEmbedding']\n@@ -250,6 +250,17 @@ class PPLCNet(nn.Layer):\nreturn x\n+class FC(nn.Layer):\n+ def __init__(self, input_ch, output_ch):\n+ super(FC, self).__init__()\n+ weight_attr = ParamAttr(initializer=XavierNormal())\n+ self.fc = paddle.nn.Linear(input_ch, output_ch, weight_attr=weight_attr)\n+\n+ def forward(self, x):\n+ out = self.fc(x)\n+ return out\n+\n+\n@register\nclass PPLCNetEmbedding(nn.Layer):\n\"\"\"\n@@ -262,7 +273,7 @@ class PPLCNetEmbedding(nn.Layer):\ndef __init__(self, scale=2.5, input_ch=1280, output_ch=512):\nsuper(PPLCNetEmbedding, self).__init__()\nself.backbone = PPLCNet(scale=scale)\n- self.neck = nn.Linear(input_ch, output_ch)\n+ self.neck = FC(input_ch, output_ch)\ndef forward(self, x):\nfeat = self.backbone(x)\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
[MOT] refine deepsort, fix jde (#4490)
|
499,333 |
07.11.2021 20:02:03
| -28,800 |
305928b882a28574efe5ece8eeaf410f1e8f9e4d
|
[MOT] fix stream predict
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/pptracking/include/pipeline.h",
"new_path": "deploy/pptracking/include/pipeline.h",
"diff": "@@ -79,7 +79,7 @@ class Pipeline {\nvoid PredictMTMCT(const std::vector<std::string> video_inputs);\n// Run pipeline in stream\n- void RunMOTStream(const cv::Mat img, const int frame_id, cv::Mat out_img, std::vector<std::string>& records, std::vector<int>& count_list, std::vector<int>& in_count_list, std::vector<int>& out_count_list);\n+ void RunMOTStream(const cv::Mat img, const int frame_id, cv::Mat& out_img, std::vector<std::string>& records, std::vector<int>& count_list, std::vector<int>& in_count_list, std::vector<int>& out_count_list);\nvoid RunMTMCTStream(const std::vector<cv::Mat> imgs, std::vector<std::string>& records);\nvoid PrintBenchmarkLog(std::vector<double> det_time, int img_num);\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/pptracking/src/pipeline.cc",
"new_path": "deploy/pptracking/src/pipeline.cc",
"diff": "@@ -214,7 +214,7 @@ void Pipeline::PredictMTMCT(const std::vector<std::string> video_path) {\nthrow \"Not Implement!\";\n}\n-void Pipeline::RunMOTStream(const cv::Mat img, const int frame_id, cv::Mat out_img, std::vector<std::string>& records, std::vector<int>& count_list, std::vector<int>& in_count_list, std::vector<int>& out_count_list) {\n+void Pipeline::RunMOTStream(const cv::Mat img, const int frame_id, cv::Mat& out_img, std::vector<std::string>& records, std::vector<int>& count_list, std::vector<int>& in_count_list, std::vector<int>& out_count_list) {\nPaddleDetection::MOTResult result;\nstd::vector<double> det_times(3);\ndouble times;\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
[MOT] fix stream predict (#4497)
|
499,298 |
09.11.2021 11:17:28
| -28,800 |
9c0b62a7977d88c939c2a60c8f84e5678f903fe4
|
add pptracking light weight modelzoo
|
[
{
"change_type": "MODIFY",
"old_path": "configs/mot/mcfairmot/README.md",
"new_path": "configs/mot/mcfairmot/README.md",
"diff": "@@ -18,6 +18,8 @@ MCFairMOT is the Multi-class extended version of [FairMOT](https://arxiv.org/abs\n| :--------------| :------- | :----: | :----: | :---: | :------: | :----: |:----: |\n| DLA-34 | 1088x608 | 24.3 | 41.6 | 2314 | - |[model](https://paddledet.bj.bcebos.com/models/mot/mcfairmot_dla34_30e_1088x608_visdrone.pdparams) | [config](./mcfairmot_dla34_30e_1088x608_visdrone.yml) |\n| HRNetV2-W18 | 1088x608 | 20.4 | 39.9 | 2603 | - |[model](https://paddledet.bj.bcebos.com/models/mot/mcfairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone.pdparams) | [config](./mcfairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone.yml) |\n+| HRNetV2-W18 | 864x480 | 18.2 | 38.7 | 2416 | - |[model](https://paddledet.bj.bcebos.com/models/mot/mcfairmot_hrnetv2_w18_dlafpn_30e_864x480_visdrone.pdparams) | [config](./mcfairmot_hrnetv2_w18_dlafpn_30e_864x480_visdrone.yml) |\n+| HRNetV2-W18 | 576x320 | 12.0 | 33.8 | 2178 | - |[model](https://paddledet.bj.bcebos.com/models/mot/mcfairmot_hrnetv2_w18_dlafpn_30e_576x320_visdrone.pdparams) | [config](./mcfairmot_hrnetv2_w18_dlafpn_30e_576x320_visdrone.yml) |\n**Notes:**\nMOTA is the average MOTA of 10 catecories in the VisDrone2019 MOT dataset, and its value is also equal to the average MOTA of all the evaluated video sequences.\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/mot/mcfairmot/mcfairmot_hrnetv2_w18_dlafpn_30e_576x320_visdrone.yml",
"diff": "+_BASE_: [\n+ '../fairmot/fairmot_hrnetv2_w18_dlafpn_30e_576x320.yml',\n+ '../../datasets/mcmot.yml'\n+]\n+\n+architecture: FairMOT\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/HRNet_W18_C_pretrained.pdparams\n+for_mot: True\n+\n+FairMOT:\n+ detector: CenterNet\n+ reid: FairMOTEmbeddingHead\n+ loss: FairMOTLoss\n+ tracker: JDETracker # multi-class tracker\n+\n+CenterNetHead:\n+ regress_ltrb: False\n+\n+CenterNetPostProcess:\n+ regress_ltrb: False\n+ max_per_img: 200\n+\n+JDETracker:\n+ min_box_area: 0\n+ vertical_ratio: 0 # no need to filter bboxes according to w/h\n+ conf_thres: 0.4\n+ tracked_thresh: 0.4\n+ metric_type: cosine\n+\n+weights: output/mcfairmot_hrnetv2_w18_dlafpn_30e_576x320_visdrone/model_final\n+\n+epoch: 30\n+LearningRate:\n+ base_lr: 0.0005\n+ schedulers:\n+ - !PiecewiseDecay\n+ gamma: 0.1\n+ milestones: [10, 20]\n+ use_warmup: False\n+\n+OptimizerBuilder:\n+ optimizer:\n+ type: Adam\n+ regularizer: NULL\n+\n+TrainReader:\n+ batch_size: 8\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/mot/mcfairmot/mcfairmot_hrnetv2_w18_dlafpn_30e_864x480_visdrone.yml",
"diff": "+_BASE_: [\n+ '../fairmot/fairmot_hrnetv2_w18_dlafpn_30e_864x480.yml',\n+ '../../datasets/mcmot.yml'\n+]\n+\n+architecture: FairMOT\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/HRNet_W18_C_pretrained.pdparams\n+for_mot: True\n+\n+FairMOT:\n+ detector: CenterNet\n+ reid: FairMOTEmbeddingHead\n+ loss: FairMOTLoss\n+ tracker: JDETracker # multi-class tracker\n+\n+CenterNetHead:\n+ regress_ltrb: False\n+\n+CenterNetPostProcess:\n+ regress_ltrb: False\n+ max_per_img: 200\n+\n+JDETracker:\n+ min_box_area: 0\n+ vertical_ratio: 0 # no need to filter bboxes according to w/h\n+ conf_thres: 0.4\n+ tracked_thresh: 0.4\n+ metric_type: cosine\n+\n+weights: output/mcfairmot_hrnetv2_w18_dlafpn_30e_864x480_visdrone/model_final\n+\n+epoch: 30\n+LearningRate:\n+ base_lr: 0.0005\n+ schedulers:\n+ - !PiecewiseDecay\n+ gamma: 0.1\n+ milestones: [10, 20]\n+ use_warmup: False\n+\n+OptimizerBuilder:\n+ optimizer:\n+ type: Adam\n+ regularizer: NULL\n+\n+TrainReader:\n+ batch_size: 8\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/mot/pedestrian/fairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone_pedestrian.yml",
"diff": "+_BASE_: [\n+ '../fairmot/fairmot_hrnetv2_w18_dlafpn_30e_1088x608.yml'\n+]\n+\n+weights: output/fairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone_pedestrian/model_final\n+\n+# for MOT training\n+TrainDataset:\n+ !MOTDataSet\n+ dataset_dir: dataset/mot\n+ image_lists: ['visdrone_pedestrian.train']\n+ data_fields: ['image', 'gt_bbox', 'gt_class', 'gt_ide']\n+\n+# for MOT evaluation\n+# If you want to change the MOT evaluation dataset, please modify 'data_root'\n+EvalMOTDataset:\n+ !MOTImageFolder\n+ dataset_dir: dataset/mot\n+ data_root: visdrone_pedestrian/images/val\n+ keep_ori_im: False # set True if save visualization images or video, or used in DeepSORT\n+\n+# for MOT video inference\n+TestMOTDataset:\n+ !MOTImageFolder\n+ dataset_dir: dataset/mot\n+ keep_ori_im: True # set True if save visualization images or video\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/mot/pedestrian/fairmot_hrnetv2_w18_dlafpn_30e_576x320_visdrone_pedestrian.yml",
"diff": "+_BASE_: [\n+ '../fairmot/fairmot_hrnetv2_w18_dlafpn_30e_576x320.yml'\n+]\n+\n+weights: output/fairmot_hrnetv2_w18_dlafpn_30e_576x320_visdrone_pedestrian/model_final\n+\n+# for MOT training\n+TrainDataset:\n+ !MOTDataSet\n+ dataset_dir: dataset/mot\n+ image_lists: ['visdrone_pedestrian.train']\n+ data_fields: ['image', 'gt_bbox', 'gt_class', 'gt_ide']\n+\n+# for MOT evaluation\n+# If you want to change the MOT evaluation dataset, please modify 'data_root'\n+EvalMOTDataset:\n+ !MOTImageFolder\n+ dataset_dir: dataset/mot\n+ data_root: visdrone_pedestrian/images/val\n+ keep_ori_im: False # set True if save visualization images or video, or used in DeepSORT\n+\n+# for MOT video inference\n+TestMOTDataset:\n+ !MOTImageFolder\n+ dataset_dir: dataset/mot\n+ keep_ori_im: True # set True if save visualization images or video\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/mot/pedestrian/fairmot_hrnetv2_w18_dlafpn_30e_864x480_visdrone_pedestrian.yml",
"diff": "+_BASE_: [\n+ '../fairmot/fairmot_hrnetv2_w18_dlafpn_30e_864x480.yml'\n+]\n+\n+weights: output/fairmot_hrnetv2_w18_dlafpn_30e_864x480_visdrone_pedestrian/model_final\n+\n+# for MOT training\n+TrainDataset:\n+ !MOTDataSet\n+ dataset_dir: dataset/mot\n+ image_lists: ['visdrone_pedestrian.train']\n+ data_fields: ['image', 'gt_bbox', 'gt_class', 'gt_ide']\n+\n+# for MOT evaluation\n+# If you want to change the MOT evaluation dataset, please modify 'data_root'\n+EvalMOTDataset:\n+ !MOTImageFolder\n+ dataset_dir: dataset/mot\n+ data_root: visdrone_pedestrian/images/val\n+ keep_ori_im: False # set True if save visualization images or video, or used in DeepSORT\n+\n+# for MOT video inference\n+TestMOTDataset:\n+ !MOTImageFolder\n+ dataset_dir: dataset/mot\n+ keep_ori_im: True # set True if save visualization images or video\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/mot/vehicle/fairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone_vehicle.yml",
"diff": "+_BASE_: [\n+ '../fairmot/fairmot_hrnetv2_w18_dlafpn_30e_1088x608.yml'\n+]\n+\n+weights: output/fairmot_hrnetv2_w18_dlafpn_30e_1088x608_visdrone_vehicle/model_final\n+\n+# for MOT training\n+TrainDataset:\n+ !MOTDataSet\n+ dataset_dir: dataset/mot\n+ image_lists: ['visdrone_vehicle.train']\n+ data_fields: ['image', 'gt_bbox', 'gt_class', 'gt_ide']\n+\n+# for MOT evaluation\n+# If you want to change the MOT evaluation dataset, please modify 'data_root'\n+EvalMOTDataset:\n+ !MOTImageFolder\n+ dataset_dir: dataset/mot\n+ data_root: visdrone_vehicle/images/val\n+ keep_ori_im: False # set True if save visualization images or video, or used in DeepSORT\n+\n+# for MOT video inference\n+TestMOTDataset:\n+ !MOTImageFolder\n+ dataset_dir: dataset/mot\n+ keep_ori_im: True # set True if save visualization images or video\n+\n+# model config\n+FairMOT:\n+ detector: CenterNet\n+ reid: FairMOTEmbeddingHead\n+ loss: FairMOTLoss\n+ tracker: JDETracker\n+\n+JDETracker:\n+ min_box_area: 0\n+ vertical_ratio: 0 # no need to filter bboxes according to w/h\n+ conf_thres: 0.4\n+ tracked_thresh: 0.4\n+ metric_type: cosine\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/mot/vehicle/fairmot_hrnetv2_w18_dlafpn_30e_576x320_bdd100kmot_vehicle.yml",
"diff": "+_BASE_: [\n+ '../fairmot/fairmot_hrnetv2_w18_dlafpn_30e_576x320.yml'\n+]\n+\n+weights: output/fairmot_hrnetv2_w18_dlafpn_30e_576x320_bdd100kmot_vehicle/model_final\n+\n+# for MOT training\n+TrainDataset:\n+ !MOTDataSet\n+ dataset_dir: dataset/mot\n+ image_lists: ['bdd100kmot_vehicle.train']\n+ data_fields: ['image', 'gt_bbox', 'gt_class', 'gt_ide']\n+\n+# for MOT evaluation\n+# If you want to change the MOT evaluation dataset, please modify 'data_root'\n+EvalMOTDataset:\n+ !MOTImageFolder\n+ dataset_dir: dataset/mot\n+ data_root: bdd100kmot_vehicle/images/val\n+ keep_ori_im: False # set True if save visualization images or video, or used in DeepSORT\n+\n+# for MOT video inference\n+TestMOTDataset:\n+ !MOTImageFolder\n+ dataset_dir: dataset/mot\n+ keep_ori_im: True # set True if save visualization images or video\n+\n+# model config\n+FairMOT:\n+ detector: CenterNet\n+ reid: FairMOTEmbeddingHead\n+ loss: FairMOTLoss\n+ tracker: JDETracker\n+\n+JDETracker:\n+ min_box_area: 0\n+ vertical_ratio: 0 # no need to filter bboxes according to w/h\n+ conf_thres: 0.4\n+ tracked_thresh: 0.4\n+ metric_type: cosine\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/mot/vehicle/fairmot_hrnetv2_w18_dlafpn_30e_576x320_visdrone_vehicle.yml",
"diff": "+_BASE_: [\n+ '../fairmot/fairmot_hrnetv2_w18_dlafpn_30e_576x320.yml'\n+]\n+\n+weights: output/fairmot_hrnetv2_w18_dlafpn_30e_576x320_visdrone_vehicle/model_final\n+\n+# for MOT training\n+TrainDataset:\n+ !MOTDataSet\n+ dataset_dir: dataset/mot\n+ image_lists: ['visdrone_vehicle.train']\n+ data_fields: ['image', 'gt_bbox', 'gt_class', 'gt_ide']\n+\n+# for MOT evaluation\n+# If you want to change the MOT evaluation dataset, please modify 'data_root'\n+EvalMOTDataset:\n+ !MOTImageFolder\n+ dataset_dir: dataset/mot\n+ data_root: visdrone_vehicle/images/val\n+ keep_ori_im: False # set True if save visualization images or video, or used in DeepSORT\n+\n+# for MOT video inference\n+TestMOTDataset:\n+ !MOTImageFolder\n+ dataset_dir: dataset/mot\n+ keep_ori_im: True # set True if save visualization images or video\n+\n+# model config\n+FairMOT:\n+ detector: CenterNet\n+ reid: FairMOTEmbeddingHead\n+ loss: FairMOTLoss\n+ tracker: JDETracker\n+\n+JDETracker:\n+ min_box_area: 0\n+ vertical_ratio: 0 # no need to filter bboxes according to w/h\n+ conf_thres: 0.4\n+ tracked_thresh: 0.4\n+ metric_type: cosine\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/mot/vehicle/fairmot_hrnetv2_w18_dlafpn_30e_864x480_visdrone_vehicle.yml",
"diff": "+_BASE_: [\n+ '../fairmot/fairmot_hrnetv2_w18_dlafpn_30e_864x480.yml'\n+]\n+\n+weights: output/fairmot_hrnetv2_w18_dlafpn_30e_864x480_visdrone_vehicle/model_final\n+\n+# for MOT training\n+TrainDataset:\n+ !MOTDataSet\n+ dataset_dir: dataset/mot\n+ image_lists: ['visdrone_vehicle.train']\n+ data_fields: ['image', 'gt_bbox', 'gt_class', 'gt_ide']\n+\n+# for MOT evaluation\n+# If you want to change the MOT evaluation dataset, please modify 'data_root'\n+EvalMOTDataset:\n+ !MOTImageFolder\n+ dataset_dir: dataset/mot\n+ data_root: visdrone_vehicle/images/val\n+ keep_ori_im: False # set True if save visualization images or video, or used in DeepSORT\n+\n+# for MOT video inference\n+TestMOTDataset:\n+ !MOTImageFolder\n+ dataset_dir: dataset/mot\n+ keep_ori_im: True # set True if save visualization images or video\n+\n+# model config\n+FairMOT:\n+ detector: CenterNet\n+ reid: FairMOTEmbeddingHead\n+ loss: FairMOTLoss\n+ tracker: JDETracker\n+\n+JDETracker:\n+ min_box_area: 0\n+ vertical_ratio: 0 # no need to filter bboxes according to w/h\n+ conf_thres: 0.4\n+ tracked_thresh: 0.4\n+ metric_type: cosine\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add pptracking light weight modelzoo (#4512)
|
499,298 |
09.11.2021 15:56:00
| -28,800 |
7441fba7580a5c017cd30d8187498c48db356ed2
|
[MOT] fix picodet deepsort deploy, add cls_name visualization
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/python/mot_jde_infer.py",
"new_path": "deploy/python/mot_jde_infer.py",
"diff": "@@ -23,7 +23,6 @@ import paddle\nfrom paddle.inference import Config\nfrom paddle.inference import create_predictor\n-from preprocess import preprocess\nfrom utils import argsparser, Timer, get_current_memory_mb\nfrom infer import Detector, get_test_images, print_arguments, PredictConfig\nfrom benchmark_utils import PaddleInferBenchmark\n@@ -167,6 +166,8 @@ def predict_image(detector, image_list):\nresults = []\nnum_classes = detector.num_classes\ndata_type = 'mcmot' if num_classes > 1 else 'mot'\n+ ids2names = detector.pred_config.labels\n+\nimage_list.sort()\nfor frame_id, img_file in enumerate(image_list):\nframe = cv2.imread(img_file)\n@@ -181,7 +182,8 @@ def predict_image(detector, image_list):\nonline_tlwhs, online_scores, online_ids = detector.predict(\n[frame], FLAGS.threshold)\nonline_im = plot_tracking_dict(frame, num_classes, online_tlwhs,\n- online_ids, online_scores, frame_id)\n+ online_ids, online_scores, frame_id,\n+ ids2names)\nif FLAGS.save_images:\nif not os.path.exists(FLAGS.output_dir):\nos.makedirs(FLAGS.output_dir)\n@@ -216,6 +218,8 @@ def predict_video(detector, camera_id):\nresults = defaultdict(list) # support single class and multi classes\nnum_classes = detector.num_classes\ndata_type = 'mcmot' if num_classes > 1 else 'mot'\n+ ids2names = detector.pred_config.labels\n+\nwhile (1):\nret, frame = capture.read()\nif not ret:\n@@ -237,7 +241,8 @@ def predict_video(detector, camera_id):\nonline_ids,\nonline_scores,\nframe_id=frame_id,\n- fps=fps)\n+ fps=fps,\n+ ids2names=ids2names)\nif FLAGS.save_images:\nsave_dir = os.path.join(FLAGS.output_dir, video_name.split('.')[-2])\nif not os.path.exists(save_dir):\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/python/mot_sde_infer.py",
"new_path": "deploy/python/mot_sde_infer.py",
"diff": "@@ -23,9 +23,9 @@ import paddle\nfrom paddle.inference import Config\nfrom paddle.inference import create_predictor\n-from preprocess import preprocess\n+from picodet_postprocess import PicoDetPostProcess\nfrom utils import argsparser, Timer, get_current_memory_mb\n-from infer import Detector, get_test_images, print_arguments, PredictConfig\n+from infer import Detector, DetectorPicoDet, get_test_images, print_arguments, PredictConfig\nfrom infer import load_predictor\nfrom benchmark_utils import PaddleInferBenchmark\n@@ -139,6 +139,7 @@ class SDE_Detector(Detector):\ncpu_threads=cpu_threads,\nenable_mkldnn=enable_mkldnn)\nassert batch_size == 1, \"The JDE Detector only supports batch size=1 now\"\n+ self.pred_config = pred_config\ndef postprocess(self, boxes, input_shape, im_shape, scale_factor, threshold,\nscaled):\n@@ -147,6 +148,8 @@ class SDE_Detector(Detector):\npred_dets = np.zeros((1, 6), dtype=np.float32)\npred_xyxys = np.zeros((1, 4), dtype=np.float32)\nreturn pred_dets, pred_xyxys\n+ else:\n+ boxes = boxes[over_thres_idx]\nif not scaled:\n# scaled means whether the coords after detector outputs\n@@ -159,6 +162,11 @@ class SDE_Detector(Detector):\npred_xyxys, keep_idx = clip_box(pred_bboxes, input_shape, im_shape,\nscale_factor)\n+ if len(keep_idx[0]) == 0:\n+ pred_dets = np.zeros((1, 6), dtype=np.float32)\n+ pred_xyxys = np.zeros((1, 4), dtype=np.float32)\n+ return pred_dets, pred_xyxys\n+\npred_scores = boxes[:, 1:2][keep_idx[0]]\npred_cls_ids = boxes[:, 0:1][keep_idx[0]]\npred_tlwhs = np.concatenate(\n@@ -168,7 +176,7 @@ class SDE_Detector(Detector):\npred_dets = np.concatenate(\n(pred_tlwhs, pred_scores, pred_cls_ids), axis=1)\n- return pred_dets[over_thres_idx], pred_xyxys[over_thres_idx]\n+ return pred_dets, pred_xyxys\ndef predict(self, image, scaled, threshold=0.5, warmup=0, repeats=1):\n'''\n@@ -220,6 +228,142 @@ class SDE_Detector(Detector):\nreturn pred_dets, pred_xyxys\n+class SDE_DetectorPicoDet(DetectorPicoDet):\n+ \"\"\"\n+ Args:\n+ pred_config (object): config of model, defined by `Config(model_dir)`\n+ model_dir (str): root path of model.pdiparams, model.pdmodel and infer_cfg.yml\n+ device (str): Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU\n+ run_mode (str): mode of running(fluid/trt_fp32/trt_fp16)\n+ trt_min_shape (int): min shape for dynamic shape in trt\n+ trt_max_shape (int): max shape for dynamic shape in trt\n+ trt_opt_shape (int): opt shape for dynamic shape in trt\n+ trt_calib_mode (bool): If the model is produced by TRT offline quantitative\n+ calibration, trt_calib_mode need to set True\n+ cpu_threads (int): cpu threads\n+ enable_mkldnn (bool): whether to open MKLDNN\n+ \"\"\"\n+\n+ def __init__(self,\n+ pred_config,\n+ model_dir,\n+ device='CPU',\n+ run_mode='fluid',\n+ batch_size=1,\n+ trt_min_shape=1,\n+ trt_max_shape=1088,\n+ trt_opt_shape=608,\n+ trt_calib_mode=False,\n+ cpu_threads=1,\n+ enable_mkldnn=False):\n+ super(SDE_DetectorPicoDet, self).__init__(\n+ pred_config=pred_config,\n+ model_dir=model_dir,\n+ device=device,\n+ run_mode=run_mode,\n+ batch_size=batch_size,\n+ trt_min_shape=trt_min_shape,\n+ trt_max_shape=trt_max_shape,\n+ trt_opt_shape=trt_opt_shape,\n+ trt_calib_mode=trt_calib_mode,\n+ cpu_threads=cpu_threads,\n+ enable_mkldnn=enable_mkldnn)\n+ assert batch_size == 1, \"The JDE Detector only supports batch size=1 now\"\n+ self.pred_config = pred_config\n+\n+ def postprocess_bboxes(self, boxes, input_shape, im_shape, scale_factor, threshold):\n+ over_thres_idx = np.nonzero(boxes[:, 1:2] >= threshold)[0]\n+ if len(over_thres_idx) == 0:\n+ pred_dets = np.zeros((1, 6), dtype=np.float32)\n+ pred_xyxys = np.zeros((1, 4), dtype=np.float32)\n+ return pred_dets, pred_xyxys\n+ else:\n+ boxes = boxes[over_thres_idx]\n+\n+ pred_bboxes = boxes[:, 2:]\n+\n+ pred_xyxys, keep_idx = clip_box(pred_bboxes, input_shape, im_shape,\n+ scale_factor)\n+ if len(keep_idx[0]) == 0:\n+ pred_dets = np.zeros((1, 6), dtype=np.float32)\n+ pred_xyxys = np.zeros((1, 4), dtype=np.float32)\n+ return pred_dets, pred_xyxys\n+\n+ pred_scores = boxes[:, 1:2][keep_idx[0]]\n+ pred_cls_ids = boxes[:, 0:1][keep_idx[0]]\n+ pred_tlwhs = np.concatenate(\n+ (pred_xyxys[:, 0:2], pred_xyxys[:, 2:4] - pred_xyxys[:, 0:2] + 1),\n+ axis=1)\n+\n+ pred_dets = np.concatenate(\n+ (pred_tlwhs, pred_scores, pred_cls_ids), axis=1)\n+ return pred_dets, pred_xyxys\n+\n+ def predict(self, image, scaled, threshold=0.5, warmup=0, repeats=1):\n+ '''\n+ Args:\n+ image (np.ndarray): image numpy data\n+ threshold (float): threshold of predicted box' score\n+ scaled (bool): whether the coords after detector outputs are scaled,\n+ default False in jde yolov3, set True in general detector.\n+ Returns:\n+ pred_dets (np.ndarray, [N, 6])\n+ '''\n+ self.det_times.preprocess_time_s.start()\n+ inputs = self.preprocess(image)\n+ self.det_times.preprocess_time_s.end()\n+\n+ input_names = self.predictor.get_input_names()\n+ for i in range(len(input_names)):\n+ input_tensor = self.predictor.get_input_handle(input_names[i])\n+ input_tensor.copy_from_cpu(inputs[input_names[i]])\n+\n+ np_score_list, np_boxes_list = [], []\n+ for i in range(warmup):\n+ self.predictor.run()\n+ output_names = self.predictor.get_output_names()\n+ boxes_tensor = self.predictor.get_output_handle(output_names[0])\n+ boxes = boxes_tensor.copy_to_cpu()\n+\n+ self.det_times.inference_time_s.start()\n+ for i in range(repeats):\n+ self.predictor.run()\n+ np_score_list.clear()\n+ np_boxes_list.clear()\n+ output_names = self.predictor.get_output_names()\n+ num_outs = int(len(output_names) / 2)\n+ for out_idx in range(num_outs):\n+ np_score_list.append(\n+ self.predictor.get_output_handle(output_names[out_idx])\n+ .copy_to_cpu())\n+ np_boxes_list.append(\n+ self.predictor.get_output_handle(output_names[\n+ out_idx + num_outs]).copy_to_cpu())\n+\n+ self.det_times.inference_time_s.end(repeats=repeats)\n+ self.det_times.img_num += 1\n+ self.det_times.postprocess_time_s.start()\n+ self.postprocess = PicoDetPostProcess(\n+ inputs['image'].shape[2:],\n+ inputs['im_shape'],\n+ inputs['scale_factor'],\n+ strides=self.pred_config.fpn_stride,\n+ nms_threshold=self.pred_config.nms['nms_threshold'])\n+ boxes, boxes_num = self.postprocess(np_score_list, np_boxes_list)\n+\n+ if len(boxes) == 0:\n+ pred_dets = np.zeros((1, 6), dtype=np.float32)\n+ pred_xyxys = np.zeros((1, 4), dtype=np.float32)\n+ else:\n+ input_shape = inputs['image'].shape[2:]\n+ im_shape = inputs['im_shape']\n+ scale_factor = inputs['scale_factor']\n+ pred_dets, pred_xyxys = self.postprocess_bboxes(\n+ boxes, input_shape, im_shape, scale_factor, threshold)\n+\n+ return pred_dets, pred_xyxys\n+\n+\nclass SDE_ReID(object):\ndef __init__(self,\npred_config,\n@@ -350,7 +494,7 @@ def predict_image(detector, reid_model, image_list):\npred_dets, pred_xyxys = detector.predict([frame], FLAGS.scaled,\nFLAGS.threshold)\n- if len(pred_dets) == 1 and sum(pred_dets) == 0:\n+ if len(pred_dets) == 1 and np.sum(pred_dets) == 0:\nprint('Frame {} has no object, try to modify score threshold.'.\nformat(i))\nonline_im = frame\n@@ -407,7 +551,7 @@ def predict_video(detector, reid_model, camera_id):\npred_dets, pred_xyxys = detector.predict([frame], FLAGS.scaled,\nFLAGS.threshold)\n- if len(pred_dets) == 1 and sum(pred_dets) == 0:\n+ if len(pred_dets) == 1 and np.sum(pred_dets) == 0:\nprint('Frame {} has no object, try to modify score threshold.'.\nformat(frame_id))\ntimer.toc()\n@@ -464,11 +608,15 @@ def predict_video(detector, reid_model, camera_id):\ndef main():\npred_config = PredictConfig(FLAGS.model_dir)\n- detector = SDE_Detector(\n- pred_config,\n+ detector_func = 'SDE_Detector'\n+ if pred_config.arch == 'PicoDet':\n+ detector_func = 'SDE_DetectorPicoDet'\n+\n+ detector = eval(detector_func)(pred_config,\nFLAGS.model_dir,\ndevice=FLAGS.device,\nrun_mode=FLAGS.run_mode,\n+ batch_size=FLAGS.batch_size,\ntrt_min_shape=FLAGS.trt_min_shape,\ntrt_max_shape=FLAGS.trt_max_shape,\ntrt_opt_shape=FLAGS.trt_opt_shape,\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/mot/visualization.py",
"new_path": "ppdet/modeling/mot/visualization.py",
"diff": "@@ -28,7 +28,7 @@ def plot_tracking(image,\nscores=None,\nframe_id=0,\nfps=0.,\n- ids2=None):\n+ ids2names=[]):\nim = np.ascontiguousarray(np.copy(image))\nim_h, im_w = im.shape[:2]\n@@ -52,15 +52,16 @@ def plot_tracking(image,\nintbox = tuple(map(int, (x1, y1, x1 + w, y1 + h)))\nobj_id = int(obj_ids[i])\nid_text = '{}'.format(int(obj_id))\n- if ids2 is not None:\n- id_text = id_text + ', {}'.format(int(ids2[i]))\n+ if ids2names != []:\n+ assert len(ids2names) == 1, \"plot_tracking only supports single classes.\"\n+ id_text = '{}_'.format(ids2names[0]) + id_text\n_line_thickness = 1 if obj_id <= 0 else line_thickness\ncolor = get_color(abs(obj_id))\ncv2.rectangle(\nim, intbox[0:2], intbox[2:4], color=color, thickness=line_thickness)\ncv2.putText(\nim,\n- id_text, (intbox[0], intbox[1] + 10),\n+ id_text, (intbox[0], intbox[1] - 10),\ncv2.FONT_HERSHEY_PLAIN,\ntext_scale, (0, 0, 255),\nthickness=text_thickness)\n@@ -69,7 +70,7 @@ def plot_tracking(image,\ntext = '{:.2f}'.format(float(scores[i]))\ncv2.putText(\nim,\n- text, (intbox[0], intbox[1] - 10),\n+ text, (intbox[0], intbox[1] + 10),\ncv2.FONT_HERSHEY_PLAIN,\ntext_scale, (0, 255, 255),\nthickness=text_thickness)\n@@ -83,7 +84,7 @@ def plot_tracking_dict(image,\nscores_dict,\nframe_id=0,\nfps=0.,\n- ids2=None):\n+ ids2names=[]):\nim = np.ascontiguousarray(np.copy(image))\nim_h, im_w = im.shape[:2]\n@@ -111,10 +112,12 @@ def plot_tracking_dict(image,\nx1, y1, w, h = tlwh\nintbox = tuple(map(int, (x1, y1, x1 + w, y1 + h)))\nobj_id = int(obj_ids[i])\n- if num_classes == 1:\n+\nid_text = '{}'.format(int(obj_id))\n+ if ids2names != []:\n+ id_text = '{}_{}'.format(ids2names[cls_id], id_text)\nelse:\n- id_text = 'class{}_id{}'.format(cls_id, int(obj_id))\n+ id_text = 'class{}_{}'.format(cls_id, id_text)\n_line_thickness = 1 if obj_id <= 0 else line_thickness\ncolor = get_color(abs(obj_id))\n@@ -126,7 +129,7 @@ def plot_tracking_dict(image,\nthickness=line_thickness)\ncv2.putText(\nim,\n- id_text, (intbox[0], intbox[1] + 10),\n+ id_text, (intbox[0], intbox[1] - 10),\ncv2.FONT_HERSHEY_PLAIN,\ntext_scale, (0, 0, 255),\nthickness=text_thickness)\n@@ -135,7 +138,7 @@ def plot_tracking_dict(image,\ntext = '{:.2f}'.format(float(scores[i]))\ncv2.putText(\nim,\n- text, (intbox[0], intbox[1] - 10),\n+ text, (intbox[0], intbox[1] + 10),\ncv2.FONT_HERSHEY_PLAIN,\ntext_scale, (0, 255, 255),\nthickness=text_thickness)\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
[MOT] fix picodet deepsort deploy, add cls_name visualization (#4513)
|
499,395 |
10.11.2021 14:28:15
| -28,800 |
91f55ece6c86b0ea9a2d3e8510ee347152d8d592
|
modify s2anet config
|
[
{
"change_type": "MODIFY",
"old_path": "configs/dota/s2anet_conv_2x_dota.yml",
"new_path": "configs/dota/s2anet_conv_2x_dota.yml",
"diff": "@@ -7,6 +7,13 @@ _BASE_: [\n]\nweights: output/s2anet_conv_1x_dota/model_final\n+ResNet:\n+ depth: 50\n+ variant: b\n+ norm_type: bn\n+ return_idx: [1,2,3]\n+ num_stages: 4\n+\nS2ANetHead:\nanchor_strides: [8, 16, 32, 64, 128]\nanchor_scales: [4]\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
modify s2anet config (#4524)
|
499,298 |
11.11.2021 10:28:26
| -28,800 |
b78d756e9b07b5770586b7f79c6ba3f001404b6e
|
[MOT] fix kitti results, fix mcmot ids2names
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/python/mot_jde_infer.py",
"new_path": "deploy/python/mot_jde_infer.py",
"diff": "@@ -183,7 +183,7 @@ def predict_image(detector, image_list):\n[frame], FLAGS.threshold)\nonline_im = plot_tracking_dict(frame, num_classes, online_tlwhs,\nonline_ids, online_scores, frame_id,\n- ids2names)\n+ ids2names=ids2names)\nif FLAGS.save_images:\nif not os.path.exists(FLAGS.output_dir):\nos.makedirs(FLAGS.output_dir)\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/data/source/mot.py",
"new_path": "ppdet/data/source/mot.py",
"diff": "@@ -367,10 +367,10 @@ class MCMOTDataSet(DetDataset):\nlogger.info('Image start index: {}'.format(self.img_start_index))\nlogger.info('Total identities of each category: ')\n- self.num_identities_dict = sorted(\n+ num_identities_dict = sorted(\nself.num_identities_dict.items(), key=lambda x: x[0])\ntotal_IDs_all_cats = 0\n- for (k, v) in self.num_identities_dict:\n+ for (k, v) in num_identities_dict:\nlogger.info('Category {} [{}] has {} IDs.'.format(k, cid2cname[k],\nv))\ntotal_IDs_all_cats += v\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/mot/utils.py",
"new_path": "ppdet/modeling/mot/utils.py",
"diff": "@@ -121,21 +121,22 @@ def write_mot_results(filename, results, data_type='mot', num_classes=1):\nf = open(filename, 'w')\nfor cls_id in range(num_classes):\nfor frame_id, tlwhs, tscores, track_ids in results[cls_id]:\n- for tlwh, score, track_id in zip(tlwhs, tscores, track_ids):\n- if track_id < 0: continue\nif data_type == 'kitti':\nframe_id -= 1\n- elif data_type == 'mot':\n+ for tlwh, score, track_id in zip(tlwhs, tscores, track_ids):\n+ if track_id < 0: continue\n+ if data_type == 'mot':\ncls_id = -1\n- elif data_type == 'mcmot':\n- cls_id = cls_id\nx1, y1, w, h = tlwh\n+ x2, y2 = x1 + w, y1 + h\nline = save_format.format(\nframe=frame_id,\nid=track_id,\nx1=x1,\ny1=y1,\n+ x2=x2,\n+ y2=y2,\nw=w,\nh=h,\nscore=score,\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
[MOT] fix kitti results, fix mcmot ids2names (#4536)
|
499,299 |
11.11.2021 13:58:02
| -28,800 |
d3927dde28e892a0c81108b51b02b09057b7bdb5
|
default fusenorm to False in export model
|
[
{
"change_type": "MODIFY",
"old_path": "configs/keypoint/tiny_pose/keypoint/tinypose_128x96.yml",
"new_path": "configs/keypoint/tiny_pose/keypoint/tinypose_128x96.yml",
"diff": "@@ -144,4 +144,4 @@ TestReader:\nis_scale: true\n- Permute: {}\nbatch_size: 1\n- fuse_normalize: true\n+ fuse_normalize: false\n"
},
{
"change_type": "MODIFY",
"old_path": "configs/keypoint/tiny_pose/keypoint/tinypose_256x192.yml",
"new_path": "configs/keypoint/tiny_pose/keypoint/tinypose_256x192.yml",
"diff": "@@ -144,4 +144,4 @@ TestReader:\nis_scale: true\n- Permute: {}\nbatch_size: 1\n- fuse_normalize: true\n+ fuse_normalize: false\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
default fusenorm to False in export model (#4547)
|
499,313 |
12.11.2021 10:41:03
| -28,800 |
70753f5d62ac5730dfe78be0ef8d3148d51b0a2b
|
add citation for ppdet/utils/colormap.py
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/utils/colormap.py",
"new_path": "ppdet/utils/colormap.py",
"diff": "@@ -23,6 +23,8 @@ import numpy as np\ndef colormap(rgb=False):\n\"\"\"\nGet colormap\n+\n+ The code of this function is copied from https://github.com/facebookresearch/Detectron/blob/main/detectron/utils/colormap.py\n\"\"\"\ncolor_list = np.array([\n0.000, 0.447, 0.741, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494,\n"
},
{
"change_type": "MODIFY",
"old_path": "static/ppdet/utils/colormap.py",
"new_path": "static/ppdet/utils/colormap.py",
"diff": "@@ -23,6 +23,8 @@ import numpy as np\ndef colormap(rgb=False):\n\"\"\"\nGet colormap\n+\n+ The code of this function is copied from https://github.com/facebookresearch/Detectron/blob/main/detectron/utils/colormap.py\n\"\"\"\ncolor_list = np.array([\n0.000, 0.447, 0.741, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494,\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add citation for ppdet/utils/colormap.py (#4466)
|
499,313 |
12.11.2021 22:19:30
| -28,800 |
7826f247cca66f2e9d0d0d113d6eaf4e46eae215
|
fix PIL may read image with rotation
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/engine/trainer.py",
"new_path": "ppdet/engine/trainer.py",
"diff": "@@ -22,7 +22,7 @@ import copy\nimport time\nimport numpy as np\n-from PIL import Image\n+from PIL import Image, ImageOps\nimport paddle\nimport paddle.distributed as dist\n@@ -527,6 +527,7 @@ class Trainer(object):\nfor i, im_id in enumerate(outs['im_id']):\nimage_path = imid2path[int(im_id)]\nimage = Image.open(image_path).convert('RGB')\n+ image = ImageOps.exif_transpose(image)\nself.status['original_image'] = np.array(image.copy())\nend = start + bbox_num[i]\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix PIL may read image with rotation (#4563)
|
499,333 |
14.11.2021 22:54:49
| -28,800 |
93787dca80dc3e65377cbb61e986c1a712008fc1
|
[MOT] update pptracking doc
|
[
{
"change_type": "ADD",
"old_path": "docs/images/pptracking-demo.gif",
"new_path": "docs/images/pptracking-demo.gif",
"diff": "Binary files /dev/null and b/docs/images/pptracking-demo.gif differ\n"
},
{
"change_type": "ADD",
"old_path": "docs/images/pptracking.png",
"new_path": "docs/images/pptracking.png",
"diff": "Binary files /dev/null and b/docs/images/pptracking.png differ\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
[MOT] update pptracking doc (#4581)
|
499,333 |
15.11.2021 16:31:37
| -28,800 |
b999092e8f54e83da2faab4c4c9b7b8a153ffbaf
|
[MOT] add demo citation
|
[
{
"change_type": "MODIFY",
"old_path": "configs/mot/README.md",
"new_path": "configs/mot/README.md",
"diff": "@@ -28,6 +28,8 @@ Paddledetection implements three MOT algorithms of these two series.\n<div align=\"center\">\n<img src=\"../../docs/images/mot16_jde.gif\" width=500 />\n+ <br>\n+ demo resource: MOT17 dataset</div>\n</div>\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
[MOT] add demo citation (#4584)
|
499,340 |
17.11.2021 17:52:16
| -28,800 |
bba88f6791aa8a4f2e1d3683541bb48f00eb4e6e
|
fix warmup+decay LR
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/optimizer.py",
"new_path": "ppdet/optimizer.py",
"diff": "@@ -56,11 +56,13 @@ class CosineDecay(object):\nmax_iters = self.max_epochs * int(step_per_epoch)\nif boundary is not None and value is not None and self.use_warmup:\n+ warmup_iters = len(boundary)\nfor i in range(int(boundary[-1]), max_iters):\nboundary.append(i)\n- decayed_lr = base_lr * 0.5 * (\n- math.cos(i * math.pi / max_iters) + 1)\n+ decayed_lr = base_lr * 0.5 * (math.cos(\n+ (i - warmup_iters) * math.pi /\n+ (max_iters - warmup_iters)) + 1)\nvalue.append(decayed_lr)\nreturn optimizer.lr.PiecewiseDecay(boundary, value)\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix warmup+decay LR (#4611)
|
499,304 |
17.11.2021 18:48:39
| -28,800 |
d6dff40e7b68c54c76e3dc03a0ffe1c5064276cf
|
fix picodet docs
|
[
{
"change_type": "MODIFY",
"old_path": "configs/picodet/README.md",
"new_path": "configs/picodet/README.md",
"diff": "@@ -212,7 +212,7 @@ paddle2onnx --model_dir output_inference/picodet_s_320_coco/ \\\n- [PaddleLite C++ demo](../../deploy/lite)\n- [NCNN C++/Python demo](../../deploy/third_engine/demo_ncnn)\n- [MNN C++/Python demo](../../deploy/third_engine/demo_mnn)\n-- [OpenVINO C++/Python demo](../../deploy/third_engine/demo_openvino)\n+- [OpenVINO C++ demo](../../deploy/third_engine/demo_openvino)\n- [Android demo](https://github.com/JiweiMaster/PP-PicoDet-Android-Demo)\n@@ -222,11 +222,6 @@ Android demo visualization:\n</div>\n-## Application\n-\n-- **Pedestrian detection:** model zoo of `PicoDet-S-Pedestrian` please refer to [PP-TinyPose](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/keypoint/tiny_pose#%E8%A1%8C%E4%BA%BA%E6%A3%80%E6%B5%8B%E6%A8%A1%E5%9E%8B)\n-\n-\n## Quantization\n<details open>\n@@ -271,6 +266,10 @@ python tools/post_quant.py -c configs/picodet/picodet_s_320_coco.yml \\\n</details>\n+## Application\n+\n+- **Pedestrian detection:** model zoo of `PicoDet-S-Pedestrian` please refer to [PP-TinyPose](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/keypoint/tiny_pose#%E8%A1%8C%E4%BA%BA%E6%A3%80%E6%B5%8B%E6%A8%A1%E5%9E%8B)\n+\n## FAQ\n<details>\n@@ -298,8 +297,8 @@ Please use `PicoDet-LCNet` model, which has fewer `transpose` operators.\n</details>\n-## Cite PP-PiocDet\n-If you use PiocDet in your research, please cite our work by using the following BibTeX entry:\n+## Cite PP-PicoDet\n+If you use PicoDet in your research, please cite our work by using the following BibTeX entry:\n```\n@misc{yu2021pppicodet,\ntitle={PP-PicoDet: A Better Real-Time Object Detector on Mobile Devices},\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix picodet docs (#4608)
|
499,333 |
17.11.2021 18:59:37
| -28,800 |
4e51747527f996466b9510bd2f9a88dbc5350bda
|
support draw traj in mct
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/pptracking/python/mot_jde_infer.py",
"new_path": "deploy/pptracking/python/mot_jde_infer.py",
"diff": "@@ -224,7 +224,7 @@ def predict_video(detector, camera_id):\nentrance = None\nrecords = None\nif FLAGS.draw_center_traj:\n- center_traj = {}\n+ center_traj = [{} for i in range(num_classes)]\nif num_classes == 1:\nid_set = set()\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/pptracking/python/utils.py",
"new_path": "deploy/pptracking/python/utils.py",
"diff": "@@ -129,7 +129,7 @@ def argsparser():\nparser.add_argument(\n\"--secs_interval\",\ntype=int,\n- default=10,\n+ default=2,\nhelp=\"The seconds interval to count after tracking\")\nparser.add_argument(\n\"--draw_center_traj\",\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/pptracking/python/visualize.py",
"new_path": "deploy/pptracking/python/visualize.py",
"diff": "@@ -213,6 +213,33 @@ def plot_tracking_dict(image,\nradius = max(5, int(im_w / 140.))\n+ if num_classes == 1:\n+ start = records[-1].find('Total')\n+ end = records[-1].find('In')\n+ cv2.putText(\n+ im,\n+ records[-1][start:end - 2], (0, int(40 * text_scale)),\n+ cv2.FONT_HERSHEY_PLAIN,\n+ text_scale, (0, 0, 255),\n+ thickness=2)\n+\n+ if num_classes == 1 and do_entrance_counting:\n+ entrance_line = tuple(map(int, entrance))\n+ cv2.rectangle(\n+ im,\n+ entrance_line[0:2],\n+ entrance_line[2:4],\n+ color=(0, 255, 255),\n+ thickness=line_thickness)\n+ # find start location for entrance counting data\n+ start = records[-1].find('In')\n+ cv2.putText(\n+ im,\n+ records[-1][start:-1], (0, int(60 * text_scale)),\n+ cv2.FONT_HERSHEY_PLAIN,\n+ text_scale, (0, 0, 255),\n+ thickness=2)\n+\nfor cls_id in range(num_classes):\ntlwhs = tlwhs_dict[cls_id]\nobj_ids = obj_ids_dict[cls_id]\n@@ -233,9 +260,9 @@ def plot_tracking_dict(image,\nobj_id = int(obj_ids[i])\nif center_traj is not None:\nrecord_id.add(obj_id)\n- if obj_id not in center_traj:\n- center_traj[obj_id] = deque(maxlen=30)\n- center_traj[obj_id].append(center)\n+ if obj_id not in center_traj[cls_id]:\n+ center_traj[cls_id][obj_id] = deque(maxlen=30)\n+ center_traj[cls_id][obj_id].append(center)\nid_text = '{}'.format(int(obj_id))\nif ids2names != []:\n@@ -266,19 +293,11 @@ def plot_tracking_dict(image,\ncv2.FONT_HERSHEY_PLAIN,\ntext_scale, (0, 255, 255),\nthickness=text_thickness)\n- if num_classes == 1 and do_entrance_counting:\n- entrance_line = tuple(map(int, entrance))\n- cv2.rectangle(\n- im,\n- entrance_line[0:2],\n- entrance_line[2:4],\n- color=(0, 255, 255),\n- thickness=line_thickness)\n-\nif center_traj is not None:\n- for i in center_traj.keys():\n+ for traj in center_traj:\n+ for i in traj.keys():\nif i not in record_id:\ncontinue\n- for point in center_traj[i]:\n+ for point in traj[i]:\ncv2.circle(im, point, 3, (0, 0, 255), -1)\nreturn im\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
support draw traj in mct (#4604)
|
499,299 |
18.11.2021 10:39:32
| -28,800 |
af56c3e5a7943b58103f5b1af8e956a17fe4cc90
|
update tinypose readme
|
[
{
"change_type": "ADD",
"old_path": "docs/images/tinypose_demo.png",
"new_path": "docs/images/tinypose_demo.png",
"diff": "Binary files /dev/null and b/docs/images/tinypose_demo.png differ\n"
},
{
"change_type": "ADD",
"old_path": "docs/images/tinypose_pipeline.png",
"new_path": "docs/images/tinypose_pipeline.png",
"diff": "Binary files /dev/null and b/docs/images/tinypose_pipeline.png differ\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
update tinypose readme (#4618)
|
499,333 |
18.11.2021 16:15:53
| -28,800 |
817f7cdba33f8d283245ad2cc87d67d68a3aa433
|
fix cmake in pptracking
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/pptracking/cpp/CMakeLists.txt",
"new_path": "deploy/pptracking/cpp/CMakeLists.txt",
"diff": "@@ -19,7 +19,7 @@ include_directories(\"${CMAKE_SOURCE_DIR}/\")\ninclude_directories(\"${CMAKE_CURRENT_BINARY_DIR}/ext/yaml-cpp/src/ext-yaml-cpp/include\")\nlink_directories(\"${CMAKE_CURRENT_BINARY_DIR}/ext/yaml-cpp/lib\")\n-set(SRCS src/main.cc src/preprocess_op.cc src/pipeline.cc src/jde_predictor.cc src/sde_predictor.cc src/tracker.cc src/trajectory.cc src/lapjv.cpp src/base_predictor.cc src/postprocess.cc)\n+set(SRCS src/main.cc src/preprocess_op.cc src/pipeline.cc src/jde_predictor.cc src/sde_predictor.cc src/tracker.cc src/trajectory.cc src/lapjv.cpp src/postprocess.cc)\nmacro(safe_set_static_flag)\nforeach(flag_var\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix cmake in pptracking (#4622)
|
499,298 |
19.11.2021 10:12:16
| -28,800 |
e34ef6b61168f1feb9e12305c544d73b04f585cb
|
fix centernet_head size_target
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/heads/centernet_head.py",
"new_path": "ppdet/modeling/heads/centernet_head.py",
"diff": "@@ -201,9 +201,14 @@ class CenterNetHead(nn.Layer):\nsize_target = inputs['size']\n# shape: [bs, max_per_img, 4]\nelse:\n- size_target = inputs['size'][:, :, 0:2] + inputs['size'][:, :,\n- 2:]\n+ if inputs['size'].shape[-1] == 2:\n+ # inputs['size'] is wh, and regress as wh\n# shape: [bs, max_per_img, 2]\n+ size_target = inputs['size']\n+ else:\n+ # inputs['size'] is ltrb, but regress as wh\n+ # shape: [bs, max_per_img, 4]\n+ size_target = inputs['size'][:, :, 0:2] + inputs['size'][:, :, 2:]\nsize_target.stop_gradient = True\nsize_loss = F.l1_loss(\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix centernet_head size_target (#4626)
|
499,333 |
20.11.2021 13:50:56
| -28,800 |
7325d4ab511fa58330396e5436f78709a0446b0b
|
[MOT] update doc & fix output
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/pptracking/python/visualize.py",
"new_path": "deploy/pptracking/python/visualize.py",
"diff": "@@ -209,7 +209,7 @@ def plot_tracking_dict(image,\nend = records[-1].find('In')\ncv2.putText(\nim,\n- records[-1][start:end - 2], (0, int(40 * text_scale)),\n+ records[-1][start:end], (0, int(40 * text_scale)),\ncv2.FONT_HERSHEY_PLAIN,\ntext_scale, (0, 0, 255),\nthickness=2)\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
[MOT] update doc & fix output (#4635)
|
499,395 |
20.11.2021 13:51:55
| -28,800 |
b1c36d9c9c63b3f3745beacf2f7e130ec30e88bd
|
increase trt workspace size to 32M while using dcn trt plugin
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/python/infer.py",
"new_path": "deploy/python/infer.py",
"diff": "@@ -537,7 +537,7 @@ def load_predictor(model_dir,\n}\nif run_mode in precision_map.keys():\nconfig.enable_tensorrt_engine(\n- workspace_size=1 << 10,\n+ workspace_size=1 << 25,\nmax_batch_size=batch_size,\nmin_subgraph_size=min_subgraph_size,\nprecision_mode=precision_map[run_mode],\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
increase trt workspace size to 32M while using dcn trt plugin (#4629)
|
499,395 |
22.11.2021 13:39:52
| -28,800 |
e07f9b9cceaae9f554b9e522d6213fd56776c823
|
polish and fix some code
|
[
{
"change_type": "MODIFY",
"old_path": "configs/dota/s2anet_1x_spine.yml",
"new_path": "configs/dota/s2anet_1x_spine.yml",
"diff": "@@ -27,4 +27,3 @@ S2ANetHead:\nreg_loss_weight: [1.0, 1.0, 1.0, 1.0, 1.05]\ncls_loss_weight: [1.05, 1.0]\nreg_loss_type: 'l1'\n- is_training: True\n"
},
{
"change_type": "MODIFY",
"old_path": "configs/dota/s2anet_conv_2x_dota.yml",
"new_path": "configs/dota/s2anet_conv_2x_dota.yml",
"diff": "@@ -6,6 +6,7 @@ _BASE_: [\n'_base_/s2anet_reader.yml',\n]\nweights: output/s2anet_conv_1x_dota/model_final\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_cos_pretrained.pdparams\nResNet:\ndepth: 50\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/ext_op/rbox_iou_op.cc",
"new_path": "ppdet/ext_op/rbox_iou_op.cc",
"diff": "@@ -45,8 +45,7 @@ std::vector<paddle::Tensor> RboxIouCPUForward(const paddle::Tensor& rbox1, const\nauto rbox1_num = rbox1.shape()[0];\nauto rbox2_num = rbox2.shape()[0];\n- auto output = paddle::Tensor(paddle::PlaceType::kCPU);\n- output.reshape({rbox1_num, rbox2_num});\n+ auto output = paddle::Tensor(paddle::PlaceType::kCPU, {rbox1_num, rbox2_num});\nPD_DISPATCH_FLOATING_TYPES(\nrbox1.type(),\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/ext_op/rbox_iou_op.cu",
"new_path": "ppdet/ext_op/rbox_iou_op.cu",
"diff": "@@ -94,8 +94,7 @@ std::vector<paddle::Tensor> RboxIouCUDAForward(const paddle::Tensor& rbox1, cons\nauto rbox1_num = rbox1.shape()[0];\nauto rbox2_num = rbox2.shape()[0];\n- auto output = paddle::Tensor(paddle::PlaceType::kGPU);\n- output.reshape({rbox1_num, rbox2_num});\n+ auto output = paddle::Tensor(paddle::PlaceType::kGPU, {rbox1_num, rbox2_num});\nconst int blocks_x = CeilDiv(rbox1_num, BLOCK_DIM_X);\nconst int blocks_y = CeilDiv(rbox2_num, BLOCK_DIM_Y);\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/bbox_utils.py",
"new_path": "ppdet/modeling/bbox_utils.py",
"diff": "@@ -529,18 +529,18 @@ def poly2rbox(polys):\nrbox_angle = 0\nif edge1 > edge2:\nrbox_angle = np.arctan2(\n- np.float(pt2[1] - pt1[1]), np.float(pt2[0] - pt1[0]))\n+ float(pt2[1] - pt1[1]), float(pt2[0] - pt1[0]))\nelif edge2 >= edge1:\nrbox_angle = np.arctan2(\n- np.float(pt4[1] - pt1[1]), np.float(pt4[0] - pt1[0]))\n+ float(pt4[1] - pt1[1]), float(pt4[0] - pt1[0]))\ndef norm_angle(angle, range=[-np.pi / 4, np.pi]):\nreturn (angle - range[0]) % range[1] + range[0]\nrbox_angle = norm_angle(rbox_angle)\n- x_ctr = np.float(pt1[0] + pt3[0]) / 2\n- y_ctr = np.float(pt1[1] + pt3[1]) / 2\n+ x_ctr = float(pt1[0] + pt3[0]) / 2\n+ y_ctr = float(pt1[1] + pt3[1]) / 2\nrotated_box = np.array([x_ctr, y_ctr, width, height, rbox_angle])\nrotated_boxes.append(rotated_box)\nret_rotated_boxes = np.array(rotated_boxes)\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
polish and fix some code (#4643)
|
499,298 |
22.11.2021 18:54:38
| -28,800 |
1ba91c0e6dc73be9b8b7ccce9d73bc693a0af02d
|
[MOT] fix jde head
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/architectures/jde.py",
"new_path": "ppdet/modeling/architectures/jde.py",
"diff": "@@ -16,8 +16,6 @@ from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n-import paddle\n-from ppdet.modeling.mot.utils import scale_coords\nfrom ppdet.core.workspace import register, create\nfrom .meta_arch import BaseArch\n@@ -73,8 +71,11 @@ class JDE(BaseArch):\nemb_feats = det_outs['emb_feats']\nloss_confs = det_outs['det_losses']['loss_confs']\nloss_boxes = det_outs['det_losses']['loss_boxes']\n- jde_losses = self.reid(emb_feats, self.inputs, loss_confs,\n- loss_boxes)\n+ jde_losses = self.reid(\n+ emb_feats,\n+ self.inputs,\n+ loss_confs=loss_confs,\n+ loss_boxes=loss_boxes)\nreturn jde_losses\nelse:\nif self.metric == 'MOTDet':\n@@ -84,32 +85,18 @@ class JDE(BaseArch):\n}\nreturn det_results\n- elif self.metric == 'ReID':\n- emb_feats = det_outs['emb_feats']\n- embs_and_gts = self.reid(emb_feats, self.inputs, test_emb=True)\n- return embs_and_gts\n-\nelif self.metric == 'MOT':\nemb_feats = det_outs['emb_feats']\n- emb_outs = self.reid(emb_feats, self.inputs)\n-\n+ bboxes = det_outs['bbox']\nboxes_idx = det_outs['boxes_idx']\n- bbox = det_outs['bbox']\n-\n- input_shape = self.inputs['image'].shape[2:]\n- im_shape = self.inputs['im_shape']\n- scale_factor = self.inputs['scale_factor']\n-\n- bbox[:, 2:] = scale_coords(bbox[:, 2:], input_shape, im_shape,\n- scale_factor)\n-\nnms_keep_idx = det_outs['nms_keep_idx']\n- pred_dets = paddle.concat((bbox[:, 2:], bbox[:, 1:2], bbox[:, 0:1]), axis=1)\n-\n- emb_valid = paddle.gather_nd(emb_outs, boxes_idx)\n- pred_embs = paddle.gather_nd(emb_valid, nms_keep_idx)\n-\n+ pred_dets, pred_embs = self.reid(\n+ emb_feats,\n+ self.inputs,\n+ bboxes=bboxes,\n+ boxes_idx=boxes_idx,\n+ nms_keep_idx=nms_keep_idx)\nreturn pred_dets, pred_embs\nelse:\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/reid/fairmot_embedding_head.py",
"new_path": "ppdet/modeling/reid/fairmot_embedding_head.py",
"diff": "@@ -61,13 +61,9 @@ class FairMOTEmbeddingHead(nn.Layer):\nif num_classes == 1:\nnID = self.num_identities_dict[0] # single class\nself.classifier = nn.Linear(\n- ch_emb,\n- nID,\n- weight_attr=param_attr,\n- bias_attr=bias_attr)\n+ ch_emb, nID, weight_attr=param_attr, bias_attr=bias_attr)\n# When num_identities(nID) is 1, emb_scale is set as 1\n- self.emb_scale = math.sqrt(2) * math.log(\n- nID - 1) if nID > 1 else 1\n+ self.emb_scale = math.sqrt(2) * math.log(nID - 1) if nID > 1 else 1\nelse:\nself.classifiers = dict()\nself.emb_scale_dict = dict()\n@@ -84,7 +80,7 @@ class FairMOTEmbeddingHead(nn.Layer):\ninput_shape = input_shape[0]\nreturn {'in_channels': input_shape.channels}\n- def process_by_class(self, det_outs, embedding, bbox_inds, topk_clses):\n+ def process_by_class(self, bboxes, embedding, bbox_inds, topk_clses):\npred_dets, pred_embs = [], []\nfor cls_id in range(self.num_classes):\ninds_masks = topk_clses == cls_id\n@@ -97,8 +93,8 @@ class FairMOTEmbeddingHead(nn.Layer):\ncls_inds_mask = inds_masks > 0\nbbox_mask = paddle.nonzero(cls_inds_mask)\n- cls_det_outs = paddle.gather_nd(det_outs, bbox_mask)\n- pred_dets.append(cls_det_outs)\n+ cls_bboxes = paddle.gather_nd(bboxes, bbox_mask)\n+ pred_dets.append(cls_bboxes)\ncls_inds = paddle.masked_select(bbox_inds, cls_inds_mask)\ncls_inds = cls_inds.unsqueeze(-1)\n@@ -108,12 +104,12 @@ class FairMOTEmbeddingHead(nn.Layer):\nreturn paddle.concat(pred_dets), paddle.concat(pred_embs)\ndef forward(self,\n- feat,\n+ neck_feat,\ninputs,\n- det_outs=None,\n+ bboxes=None,\nbbox_inds=None,\ntopk_clses=None):\n- reid_feat = self.reid(feat)\n+ reid_feat = self.reid(neck_feat)\nif self.training:\nif self.num_classes == 1:\nloss = self.get_loss(reid_feat, inputs)\n@@ -121,18 +117,18 @@ class FairMOTEmbeddingHead(nn.Layer):\nloss = self.get_mc_loss(reid_feat, inputs)\nreturn loss\nelse:\n- assert det_outs is not None and bbox_inds is not None\n+ assert bboxes is not None and bbox_inds is not None\nreid_feat = F.normalize(reid_feat)\nembedding = paddle.transpose(reid_feat, [0, 2, 3, 1])\nembedding = paddle.reshape(embedding, [-1, self.ch_emb])\n# embedding shape: [bs * h * w, ch_emb]\nif self.num_classes == 1:\n- pred_dets = det_outs\n+ pred_dets = bboxes\npred_embs = paddle.gather(embedding, bbox_inds)\nelse:\npred_dets, pred_embs = self.process_by_class(\n- det_outs, embedding, bbox_inds, topk_clses)\n+ bboxes, embedding, bbox_inds, topk_clses)\nreturn pred_dets, pred_embs\ndef get_loss(self, feat, inputs):\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/reid/jde_embedding_head.py",
"new_path": "ppdet/modeling/reid/jde_embedding_head.py",
"diff": "@@ -17,6 +17,7 @@ from __future__ import division\nfrom __future__ import print_function\nimport math\n+import numpy as np\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\n@@ -115,31 +116,58 @@ class JDEEmbeddingHead(nn.Layer):\ndef forward(self,\nidentify_feats,\n- targets=None,\n+ targets,\nloss_confs=None,\nloss_boxes=None,\n- test_emb=False):\n+ bboxes=None,\n+ boxes_idx=None,\n+ nms_keep_idx=None):\n+ assert self.num_classes == 1, 'JDE only support sindle class MOT.'\nassert len(identify_feats) == self.anchor_levels\nide_outs = []\nfor feat, ide_head in zip(identify_feats, self.identify_outputs):\nide_outs.append(ide_head(feat))\nif self.training:\n- assert targets != None\nassert len(loss_confs) == len(loss_boxes) == self.anchor_levels\nloss_ides = self.emb_loss(ide_outs, targets, self.emb_scale,\nself.classifier)\n- return self.jde_loss(loss_confs, loss_boxes, loss_ides,\n- self.loss_params_cls, self.loss_params_reg,\n- self.loss_params_ide, targets)\n- else:\n- if test_emb:\n- assert targets != None\n- embs_and_gts = self.get_emb_and_gt_outs(ide_outs, targets)\n- return embs_and_gts\n+ jde_losses = self.jde_loss(\n+ loss_confs, loss_boxes, loss_ides, self.loss_params_cls,\n+ self.loss_params_reg, self.loss_params_ide, targets)\n+ return jde_losses\nelse:\n+ assert bboxes is not None\n+ assert boxes_idx is not None\n+ assert nms_keep_idx is not None\n+\nemb_outs = self.get_emb_outs(ide_outs)\n- return emb_outs\n+ emb_valid = paddle.gather_nd(emb_outs, boxes_idx)\n+ pred_embs = paddle.gather_nd(emb_valid, nms_keep_idx)\n+\n+ input_shape = targets['image'].shape[2:]\n+ # input_shape: [h, w], before data transforms, set in model config\n+ im_shape = targets['im_shape'][0].numpy()\n+ # im_shape: [new_h, new_w], after data transforms\n+ scale_factor = targets['scale_factor'][0].numpy()\n+ bboxes[:, 2:] = self.scale_coords(bboxes[:, 2:], input_shape,\n+ im_shape, scale_factor)\n+ # tlwhs, scores, cls_ids\n+ pred_dets = paddle.concat(\n+ (bboxes[:, 2:], bboxes[:, 1:2], bboxes[:, 0:1]), axis=1)\n+ return pred_dets, pred_embs\n+\n+ def scale_coords(self, coords, input_shape, im_shape, scale_factor):\n+ ratio = scale_factor[0]\n+ pad_w = (input_shape[1] - int(im_shape[1])) / 2\n+ pad_h = (input_shape[0] - int(im_shape[0])) / 2\n+ coords = paddle.cast(coords, 'float32')\n+ coords[:, 0::2] -= pad_w\n+ coords[:, 1::2] -= pad_h\n+ coords[:, 0:4] /= ratio\n+ coords[:, :4] = paddle.clip(\n+ coords[:, :4], min=0, max=coords[:, :4].max())\n+ return coords.round()\ndef get_emb_and_gt_outs(self, ide_outs, targets):\nemb_and_gts = []\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
[MOT] fix jde head (#4652)
|
499,298 |
23.11.2021 11:00:42
| -28,800 |
d015403e62f2587210c2d28d79d095417d60d554
|
[MOT] fix deploy infer of pptracking
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/pptracking/python/mot_jde_infer.py",
"new_path": "deploy/pptracking/python/mot_jde_infer.py",
"diff": "@@ -45,7 +45,7 @@ class JDE_Detector(Detector):\nmodel_dir (str): root path of model.pdiparams, model.pdmodel and infer_cfg.yml\ndevice (str): Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU\nrun_mode (str): mode of running(fluid/trt_fp32/trt_fp16)\n- batch_size (int): size of pre batch in inference\n+ batch_size (int): size of per batch in inference, default is 1 in tracking models\ntrt_min_shape (int): min shape for dynamic shape in trt\ntrt_max_shape (int): max shape for dynamic shape in trt\ntrt_opt_shape (int): opt shape for dynamic shape in trt\n@@ -111,7 +111,8 @@ class JDE_Detector(Detector):\ntid = t.track_id\ntscore = t.score\nif tscore < threshold: continue\n- if tlwh[2] * tlwh[3] <= self.tracker.min_box_area: continue\n+ if tlwh[2] * tlwh[3] <= self.tracker.min_box_area:\n+ continue\nif self.tracker.vertical_ratio > 0 and tlwh[2] / tlwh[\n3] > self.tracker.vertical_ratio:\ncontinue\n@@ -123,7 +124,8 @@ class JDE_Detector(Detector):\ndef predict(self, image_list, threshold=0.5, warmup=0, repeats=1):\n'''\nArgs:\n- image_list (list): list of image\n+ image_list (list[str]): path of images, only support one image path\n+ (batch_size=1) in tracking model\nthreshold (float): threshold of predicted box' score\nReturns:\nonline_tlwhs, online_scores, online_ids (dict[np.array])\n@@ -159,6 +161,7 @@ class JDE_Detector(Detector):\npred_dets, pred_embs, threshold)\nself.det_times.postprocess_time_s.end()\nself.det_times.img_num += 1\n+\nreturn online_tlwhs, online_scores, online_ids\n@@ -172,7 +175,7 @@ def predict_image(detector, image_list):\nfor frame_id, img_file in enumerate(image_list):\nframe = cv2.imread(img_file)\nif FLAGS.run_benchmark:\n- detector.predict([frame], FLAGS.threshold, warmup=10, repeats=10)\n+ detector.predict([img_file], FLAGS.threshold, warmup=10, repeats=10)\ncm, gm, gu = get_current_memory_mb()\ndetector.cpu_mem += cm\ndetector.gpu_mem += gm\n@@ -180,10 +183,15 @@ def predict_image(detector, image_list):\nprint('Test iter {}, file name:{}'.format(frame_id, img_file))\nelse:\nonline_tlwhs, online_scores, online_ids = detector.predict(\n- [frame], FLAGS.threshold)\n- online_im = plot_tracking_dict(frame, num_classes, online_tlwhs,\n- online_ids, online_scores, frame_id,\n- ids2names)\n+ [img_file], FLAGS.threshold)\n+ online_im = plot_tracking_dict(\n+ frame,\n+ num_classes,\n+ online_tlwhs,\n+ online_ids,\n+ online_scores,\n+ frame_id=frame_id,\n+ ids2names=ids2names)\nif FLAGS.save_images:\nif not os.path.exists(FLAGS.output_dir):\nos.makedirs(FLAGS.output_dir)\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/pptracking/python/mot_sde_infer.py",
"new_path": "deploy/pptracking/python/mot_sde_infer.py",
"diff": "@@ -61,11 +61,14 @@ def bench_log(detector, img_list, model_info, batch_size=1, name=None):\nclass SDE_Detector(Detector):\n\"\"\"\n+ Detector of SDE methods\n+\nArgs:\npred_config (object): config of model, defined by `Config(model_dir)`\nmodel_dir (str): root path of model.pdiparams, model.pdmodel and infer_cfg.yml\ndevice (str): Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU\nrun_mode (str): mode of running(fluid/trt_fp32/trt_fp16)\n+ batch_size (int): size of per batch in inference, default is 1 in tracking models\ntrt_min_shape (int): min shape for dynamic shape in trt\ntrt_max_shape (int): max shape for dynamic shape in trt\ntrt_opt_shape (int): opt shape for dynamic shape in trt\n@@ -99,10 +102,15 @@ class SDE_Detector(Detector):\ntrt_calib_mode=trt_calib_mode,\ncpu_threads=cpu_threads,\nenable_mkldnn=enable_mkldnn)\n- assert batch_size == 1, \"The JDE Detector only supports batch size=1 now\"\n+ assert batch_size == 1, \"The detector of tracking models only supports batch_size=1 now\"\nself.pred_config = pred_config\n- def postprocess(self, boxes, ori_image_shape, threshold, scaled):\n+ def postprocess(self,\n+ boxes,\n+ ori_image_shape,\n+ threshold,\n+ inputs,\n+ scaled=False):\nover_thres_idx = np.nonzero(boxes[:, 1:2] >= threshold)[0]\nif len(over_thres_idx) == 0:\npred_dets = np.zeros((1, 6), dtype=np.float32)\n@@ -115,6 +123,9 @@ class SDE_Detector(Detector):\n# scaled means whether the coords after detector outputs\n# have been scaled back to the original image, set True\n# in general detector, set False in JDE YOLOv3.\n+ input_shape = inputs['image'].shape[2:]\n+ im_shape = inputs['im_shape'][0]\n+ scale_factor = inputs['scale_factor'][0]\npred_bboxes = scale_coords(boxes[:, 2:], input_shape, im_shape,\nscale_factor)\nelse:\n@@ -138,7 +149,13 @@ class SDE_Detector(Detector):\nreturn pred_dets, pred_xyxys\n- def predict(self, image_path, ori_image_shape, scaled, threshold=0.5, warmup=0, repeats=1):\n+ def predict(self,\n+ image_path,\n+ ori_image_shape,\n+ threshold=0.5,\n+ scaled=False,\n+ warmup=0,\n+ repeats=1):\n'''\nArgs:\nimage_path (list[str]): path of images, only support one image path\n@@ -148,7 +165,8 @@ class SDE_Detector(Detector):\nscaled (bool): whether the coords after detector outputs are scaled,\ndefault False in jde yolov3, set True in general detector.\nReturns:\n- pred_dets (np.ndarray, [N, 6])\n+ pred_dets (np.ndarray, [N, 6]): 'x,y,w,h,score,cls_id'\n+ pred_xyxys (np.ndarray, [N, 4]): 'x1,y1,x2,y2'\n'''\nself.det_times.preprocess_time_s.start()\ninputs = self.preprocess(image_path)\n@@ -179,20 +197,24 @@ class SDE_Detector(Detector):\npred_xyxys = np.zeros((1, 4), dtype=np.float32)\nelse:\npred_dets, pred_xyxys = self.postprocess(\n- boxes, ori_image_shape, threshold, scaled)\n-\n+ boxes, ori_image_shape, threshold, inputs, scaled=scaled)\nself.det_times.postprocess_time_s.end()\nself.det_times.img_num += 1\n+\nreturn pred_dets, pred_xyxys\nclass SDE_DetectorPicoDet(DetectorPicoDet):\n\"\"\"\n+ PicoDet of SDE methods, the postprocess of PicoDet has not been exported as\n+ other detectors, so do postprocess here.\n+\nArgs:\npred_config (object): config of model, defined by `Config(model_dir)`\nmodel_dir (str): root path of model.pdiparams, model.pdmodel and infer_cfg.yml\ndevice (str): Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU\nrun_mode (str): mode of running(fluid/trt_fp32/trt_fp16)\n+ batch_size (int): size of per batch in inference, default is 1 in tracking models\ntrt_min_shape (int): min shape for dynamic shape in trt\ntrt_max_shape (int): max shape for dynamic shape in trt\ntrt_opt_shape (int): opt shape for dynamic shape in trt\n@@ -226,11 +248,10 @@ class SDE_DetectorPicoDet(DetectorPicoDet):\ntrt_calib_mode=trt_calib_mode,\ncpu_threads=cpu_threads,\nenable_mkldnn=enable_mkldnn)\n- assert batch_size == 1, \"The JDE Detector only supports batch size=1 now\"\n+ assert batch_size == 1, \"The detector of tracking models only supports batch_size=1 now\"\nself.pred_config = pred_config\n- def postprocess_bboxes(self, boxes, input_shape, im_shape, scale_factor,\n- threshold):\n+ def postprocess(self, boxes, ori_image_shape, threshold):\nover_thres_idx = np.nonzero(boxes[:, 1:2] >= threshold)[0]\nif len(over_thres_idx) == 0:\npred_dets = np.zeros((1, 6), dtype=np.float32)\n@@ -241,8 +262,7 @@ class SDE_DetectorPicoDet(DetectorPicoDet):\npred_bboxes = boxes[:, 2:]\n- pred_xyxys, keep_idx = clip_box(pred_bboxes, input_shape, im_shape,\n- scale_factor)\n+ pred_xyxys, keep_idx = clip_box(pred_bboxes, ori_image_shape)\nif len(keep_idx[0]) == 0:\npred_dets = np.zeros((1, 6), dtype=np.float32)\npred_xyxys = np.zeros((1, 4), dtype=np.float32)\n@@ -256,20 +276,30 @@ class SDE_DetectorPicoDet(DetectorPicoDet):\npred_dets = np.concatenate(\n(pred_tlwhs, pred_scores, pred_cls_ids), axis=1)\n+\nreturn pred_dets, pred_xyxys\n- def predict(self, image, scaled, threshold=0.5, warmup=0, repeats=1):\n+ def predict(self,\n+ image_path,\n+ ori_image_shape,\n+ threshold=0.5,\n+ scaled=False,\n+ warmup=0,\n+ repeats=1):\n'''\nArgs:\n- image (np.ndarray): image numpy data\n+ image_path (list[str]): path of images, only support one image path\n+ (batch_size=1) in tracking model\n+ ori_image_shape (list[int]: original image shape\nthreshold (float): threshold of predicted box' score\nscaled (bool): whether the coords after detector outputs are scaled,\ndefault False in jde yolov3, set True in general detector.\nReturns:\n- pred_dets (np.ndarray, [N, 6])\n+ pred_dets (np.ndarray, [N, 6]): 'x,y,w,h,score,cls_id'\n+ pred_xyxys (np.ndarray, [N, 4]): 'x1,y1,x2,y2'\n'''\nself.det_times.preprocess_time_s.start()\n- inputs = self.preprocess(image)\n+ inputs = self.preprocess(image_path)\nself.det_times.preprocess_time_s.end()\ninput_names = self.predictor.get_input_names()\n@@ -298,32 +328,50 @@ class SDE_DetectorPicoDet(DetectorPicoDet):\nnp_boxes_list.append(\nself.predictor.get_output_handle(output_names[\nout_idx + num_outs]).copy_to_cpu())\n-\nself.det_times.inference_time_s.end(repeats=repeats)\n- self.det_times.img_num += 1\n+\nself.det_times.postprocess_time_s.start()\n- self.postprocess = PicoDetPostProcess(\n+ self.picodet_postprocess = PicoDetPostProcess(\ninputs['image'].shape[2:],\ninputs['im_shape'],\ninputs['scale_factor'],\nstrides=self.pred_config.fpn_stride,\nnms_threshold=self.pred_config.nms['nms_threshold'])\n- boxes, boxes_num = self.postprocess(np_score_list, np_boxes_list)\n+ boxes, boxes_num = self.picodet_postprocess(np_score_list,\n+ np_boxes_list)\nif len(boxes) == 0:\npred_dets = np.zeros((1, 6), dtype=np.float32)\npred_xyxys = np.zeros((1, 4), dtype=np.float32)\nelse:\n- input_shape = inputs['image'].shape[2:]\n- im_shape = inputs['im_shape']\n- scale_factor = inputs['scale_factor']\n- pred_dets, pred_xyxys = self.postprocess_bboxes(\n- boxes, input_shape, im_shape, scale_factor, threshold)\n+ pred_dets, pred_xyxys = self.postprocess(boxes, ori_image_shape,\n+ threshold)\n+ self.det_times.postprocess_time_s.end()\n+ self.det_times.img_num += 1\nreturn pred_dets, pred_xyxys\nclass SDE_ReID(object):\n+ \"\"\"\n+ ReID of SDE methods\n+\n+ Args:\n+ pred_config (object): config of model, defined by `Config(model_dir)`\n+ model_dir (str): root path of model.pdiparams, model.pdmodel and infer_cfg.yml\n+ device (str): Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU\n+ run_mode (str): mode of running(fluid/trt_fp32/trt_fp16)\n+ batch_size (int): size of per batch in inference, default 50 means at most\n+ 50 sub images can be made a batch and send into ReID model\n+ trt_min_shape (int): min shape for dynamic shape in trt\n+ trt_max_shape (int): max shape for dynamic shape in trt\n+ trt_opt_shape (int): opt shape for dynamic shape in trt\n+ trt_calib_mode (bool): If the model is produced by TRT offline quantitative\n+ calibration, trt_calib_mode need to set True\n+ cpu_threads (int): cpu threads\n+ enable_mkldnn (bool): whether to open MKLDNN\n+ \"\"\"\n+\ndef __init__(self,\npred_config,\nmodel_dir,\n@@ -394,7 +442,8 @@ class SDE_ReID(object):\ntlwh = t.to_tlwh()\ntscore = t.score\ntid = t.track_id\n- if tlwh[2] * tlwh[3] <= tracker.min_box_area: continue\n+ if tlwh[2] * tlwh[3] <= tracker.min_box_area:\n+ continue\nif tracker.vertical_ratio > 0 and tlwh[2] / tlwh[\n3] > tracker.vertical_ratio:\ncontinue\n@@ -422,7 +471,8 @@ class SDE_ReID(object):\ntlwh = t.to_tlwh()\ntscore = t.score\ntid = t.track_id\n- if tlwh[2] * tlwh[3] <= tracker.min_box_area: continue\n+ if tlwh[2] * tlwh[3] <= tracker.min_box_area:\n+ continue\nif tracker.vertical_ratio > 0 and tlwh[2] / tlwh[\n3] > tracker.vertical_ratio:\ncontinue\n@@ -497,17 +547,23 @@ def predict_image(detector, reid_model, image_list):\nimage_list.sort()\nfor i, img_file in enumerate(image_list):\nframe = cv2.imread(img_file)\n+ ori_image_shape = list(frame.shape[:2])\nif FLAGS.run_benchmark:\npred_dets, pred_xyxys = detector.predict(\n- [frame], FLAGS.scaled, FLAGS.threshold, warmup=10, repeats=10)\n+ [img_file],\n+ ori_image_shape,\n+ FLAGS.threshold,\n+ FLAGS.scaled,\n+ warmup=10,\n+ repeats=10)\ncm, gm, gu = get_current_memory_mb()\ndetector.cpu_mem += cm\ndetector.gpu_mem += gm\ndetector.gpu_util += gu\nprint('Test iter {}, file name:{}'.format(i, img_file))\nelse:\n- pred_dets, pred_xyxys = detector.predict([frame], FLAGS.scaled,\n- FLAGS.threshold)\n+ pred_dets, pred_xyxys = detector.predict(\n+ [img_file], ori_image_shape, FLAGS.threshold, FLAGS.scaled)\nif len(pred_dets) == 1 and np.sum(pred_dets) == 0:\nprint('Frame {} has no object, try to modify score threshold.'.\n@@ -577,8 +633,9 @@ def predict_video(detector, reid_model, camera_id):\nif not ret:\nbreak\ntimer.tic()\n- pred_dets, pred_xyxys = detector.predict([frame], FLAGS.scaled,\n- FLAGS.threshold)\n+ ori_image_shape = list(frame.shape[:2])\n+ pred_dets, pred_xyxys = detector.predict([frame], ori_image_shape,\n+ FLAGS.threshold, FLAGS.scaled)\nif len(pred_dets) == 1 and np.sum(pred_dets) == 0:\nprint('Frame {} has no object, try to modify score threshold.'.\n@@ -674,7 +731,8 @@ def predict_mtmct_seq(detector, reid_model, seq_name, output_dir):\nresults = defaultdict(list)\nmot_features_dict = {} # cid_tid_fid feats\n- print('Totally {} frames found in seq {}.'.format(len(image_list), seq_name))\n+ print('Totally {} frames found in seq {}.'.format(\n+ len(image_list), seq_name))\nfor frame_id, img_file in enumerate(image_list):\nif frame_id % 40 == 0:\n@@ -682,8 +740,8 @@ def predict_mtmct_seq(detector, reid_model, seq_name, output_dir):\nframe = cv2.imread(os.path.join(fpath, img_file))\nori_image_shape = list(frame.shape[:2])\nframe_path = os.path.join(fpath, img_file)\n- pred_dets, pred_xyxys = detector.predict([frame_path], ori_image_shape, FLAGS.scaled,\n- FLAGS.threshold)\n+ pred_dets, pred_xyxys = detector.predict([frame_path], ori_image_shape,\n+ FLAGS.threshold, FLAGS.scaled)\nif len(pred_dets) == 1 and np.sum(pred_dets) == 0:\nprint('Frame {} has no object, try to modify score threshold.'.\n@@ -765,15 +823,16 @@ def predict_mtmct(detector, reid_model, mtmct_dir, mtmct_cfg):\next = seq.split('.')[-1]\nseq = seq.split('.')[-2]\nprint('ffmpeg processing of video {}'.format(fpath))\n- frames_path = video2frames(video_path=fpath, outpath=mtmct_dir, frame_rate=25)\n+ frames_path = video2frames(\n+ video_path=fpath, outpath=mtmct_dir, frame_rate=25)\nfpath = os.path.join(mtmct_dir, seq)\nif os.path.isdir(fpath) == False:\nprint('{} is not a image folder.'.format(fpath))\ncontinue\n- mot_features_dict = predict_mtmct_seq(detector, reid_model,\n- seq, output_dir)\n+ mot_features_dict = predict_mtmct_seq(detector, reid_model, seq,\n+ output_dir)\ncid = int(re.sub('[a-z,A-Z]', \"\", seq))\ntid_data, mot_list_break = trajectory_fusion(\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
[MOT] fix deploy infer of pptracking (#4659)
|
499,348 |
23.11.2021 18:33:57
| -28,800 |
62e759d69839205e6c9c5b216eef6f963d2aec1b
|
fix cpp infer deploy visualize
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/cpp/src/main_keypoint.cc",
"new_path": "deploy/cpp/src/main_keypoint.cc",
"diff": "#include <glog/logging.h>\n+#include <math.h>\n+#include <sys/stat.h>\n+#include <sys/types.h>\n+#include <algorithm>\n#include <iostream>\n+#include <numeric>\n#include <string>\n#include <vector>\n-#include <numeric>\n-#include <sys/types.h>\n-#include <sys/stat.h>\n-#include <math.h>\n-#include <algorithm>\n#ifdef _WIN32\n#include <direct.h>\n#include <io.h>\n#elif LINUX\n#include <stdarg.h>\n-#include <sys/stat.h>\n#endif\n-#include \"include/object_detector.h\"\n+#include <gflags/gflags.h>\n#include \"include/keypoint_detector.h\"\n+#include \"include/object_detector.h\"\n#include \"include/preprocess_op.h\"\n-#include <gflags/gflags.h>\n-\nDEFINE_string(model_dir, \"\", \"Path of object detector inference model\");\n-DEFINE_string(model_dir_keypoint, \"\", \"Path of keypoint detector inference model\");\n+DEFINE_string(model_dir_keypoint,\n+ \"\",\n+ \"Path of keypoint detector inference model\");\nDEFINE_string(image_file, \"\", \"Path of input image\");\n-DEFINE_string(image_dir, \"\", \"Dir of input image, `image_file` has a higher priority.\");\n+DEFINE_string(image_dir,\n+ \"\",\n+ \"Dir of input image, `image_file` has a higher priority.\");\nDEFINE_int32(batch_size, 1, \"batch_size of object detector\");\nDEFINE_int32(batch_size_keypoint, 8, \"batch_size of keypoint detector\");\n-DEFINE_string(video_file, \"\", \"Path of input video, `video_file` or `camera_id` has a highest priority.\");\n+DEFINE_string(\n+ video_file,\n+ \"\",\n+ \"Path of input video, `video_file` or `camera_id` has a highest priority.\");\nDEFINE_int32(camera_id, -1, \"Device id of camera to predict\");\n-DEFINE_bool(use_gpu, false, \"Deprecated, please use `--device` to set the device you want to run.\");\n-DEFINE_string(device, \"CPU\", \"Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU.\");\n+DEFINE_bool(\n+ use_gpu,\n+ false,\n+ \"Deprecated, please use `--device` to set the device you want to run.\");\n+DEFINE_string(device,\n+ \"CPU\",\n+ \"Choose the device you want to run, it can be: CPU/GPU/XPU, \"\n+ \"default is CPU.\");\nDEFINE_double(threshold, 0.5, \"Threshold of score.\");\nDEFINE_double(threshold_keypoint, 0.5, \"Threshold of score.\");\nDEFINE_string(output_dir, \"output\", \"Directory of output visualization files.\");\n-DEFINE_string(run_mode, \"fluid\", \"Mode of running(fluid/trt_fp32/trt_fp16/trt_int8)\");\n+DEFINE_string(run_mode,\n+ \"fluid\",\n+ \"Mode of running(fluid/trt_fp32/trt_fp16/trt_int8)\");\nDEFINE_int32(gpu_id, 0, \"Device id of GPU to execute\");\n-DEFINE_bool(run_benchmark, false, \"Whether to predict a image_file repeatedly for benchmark\");\n+DEFINE_bool(run_benchmark,\n+ false,\n+ \"Whether to predict a image_file repeatedly for benchmark\");\nDEFINE_bool(use_mkldnn, false, \"Whether use mkldnn with CPU\");\nDEFINE_int32(cpu_threads, 1, \"Num of threads with CPU\");\nDEFINE_int32(trt_min_shape, 1, \"Min shape of TRT DynamicShapeI\");\nDEFINE_int32(trt_max_shape, 1280, \"Max shape of TRT DynamicShapeI\");\nDEFINE_int32(trt_opt_shape, 640, \"Opt shape of TRT DynamicShapeI\");\n-DEFINE_bool(trt_calib_mode, false, \"If the model is produced by TRT offline quantitative calibration, trt_calib_mode need to set True\");\n+DEFINE_bool(trt_calib_mode,\n+ false,\n+ \"If the model is produced by TRT offline quantitative calibration, \"\n+ \"trt_calib_mode need to set True\");\nDEFINE_bool(use_dark, true, \"Whether use dark decode in keypoint postprocess\");\nvoid PrintBenchmarkLog(std::vector<double> det_time, int img_num) {\nLOG(INFO) << \"----------------------- Config info -----------------------\";\nLOG(INFO) << \"runtime_device: \" << FLAGS_device;\n- LOG(INFO) << \"ir_optim: \" << \"True\";\n- LOG(INFO) << \"enable_memory_optim: \" << \"True\";\n+ LOG(INFO) << \"ir_optim: \"\n+ << \"True\";\n+ LOG(INFO) << \"enable_memory_optim: \"\n+ << \"True\";\nint has_trt = FLAGS_run_mode.find(\"trt\");\nif (has_trt >= 0) {\n- LOG(INFO) << \"enable_tensorrt: \" << \"True\";\n+ LOG(INFO) << \"enable_tensorrt: \"\n+ << \"True\";\nstd::string precision = FLAGS_run_mode.substr(4, 8);\nLOG(INFO) << \"precision: \" << precision;\n} else {\n- LOG(INFO) << \"enable_tensorrt: \" << \"False\";\n- LOG(INFO) << \"precision: \" << \"fp32\";\n+ LOG(INFO) << \"enable_tensorrt: \"\n+ << \"False\";\n+ LOG(INFO) << \"precision: \"\n+ << \"fp32\";\n}\nLOG(INFO) << \"enable_mkldnn: \" << (FLAGS_use_mkldnn ? \"True\" : \"False\");\nLOG(INFO) << \"cpu_math_library_num_threads: \" << FLAGS_cpu_threads;\nLOG(INFO) << \"----------------------- Data info -----------------------\";\nLOG(INFO) << \"batch_size: \" << FLAGS_batch_size;\n- LOG(INFO) << \"input_shape: \" << \"dynamic shape\";\n+ LOG(INFO) << \"input_shape: \"\n+ << \"dynamic shape\";\nLOG(INFO) << \"----------------------- Model info -----------------------\";\nFLAGS_model_dir.erase(FLAGS_model_dir.find_last_not_of(\"/\") + 1);\nLOG(INFO) << \"model_name: \" << FLAGS_model_dir;\n@@ -97,7 +121,8 @@ void PrintKptsBenchmarkLog(std::vector<double> det_time, int img_num){\nLOG(INFO) << \"----------------------- Data info -----------------------\";\nLOG(INFO) << \"batch_size_keypoint: \" << FLAGS_batch_size_keypoint;\nLOG(INFO) << \"----------------------- Model info -----------------------\";\n- FLAGS_model_dir_keypoint.erase(FLAGS_model_dir_keypoint.find_last_not_of(\"/\") + 1);\n+ FLAGS_model_dir_keypoint.erase(\n+ FLAGS_model_dir_keypoint.find_last_not_of(\"/\") + 1);\nLOG(INFO) << \"keypoint_model_name: \" << FLAGS_model_dir_keypoint;\nLOG(INFO) << \"----------------------- Perf info ------------------------\";\nLOG(INFO) << \"Total number of predicted data: \" << img_num\n@@ -162,7 +187,8 @@ void PredictVideo(const std::string& video_path,\ncapture.open(FLAGS_camera_id);\n} else {\ncapture.open(video_path.c_str());\n- video_out_name = video_path.substr(video_path.find_last_of(OS_PATH_SEP) + 1);\n+ video_out_name =\n+ video_path.substr(video_path.find_last_of(OS_PATH_SEP) + 1);\n}\nif (!capture.isOpened()) {\nprintf(\"can not open video : %s\\n\", video_path.c_str());\n@@ -173,7 +199,8 @@ void PredictVideo(const std::string& video_path,\nint video_width = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_WIDTH));\nint video_height = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_HEIGHT));\nint video_fps = static_cast<int>(capture.get(CV_CAP_PROP_FPS));\n- int video_frame_count = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_COUNT));\n+ int video_frame_count =\n+ static_cast<int>(capture.get(CV_CAP_PROP_FRAME_COUNT));\nprintf(\"fps: %d, frame_count: %d\\n\", video_fps, video_frame_count);\n// Create VideoWriter for output\n@@ -199,7 +226,6 @@ void PredictVideo(const std::string& video_path,\nauto labels = det->GetLabelList();\nauto colormap = PaddleDetection::GenerateColorMap(labels.size());\n-\n// Store keypoint results\nstd::vector<PaddleDetection::KeyPointResult> result_kpts;\nstd::vector<cv::Mat> imgs_kpts;\n@@ -237,8 +263,7 @@ void PredictVideo(const std::string& video_path,\nitem.rect[5],\nitem.rect[6],\nitem.rect[7]);\n- }\n- else{\n+ } else {\nprintf(\"class=%d confidence=%.4f rect=[%d %d %d %d]\\n\",\nitem.class_id,\nitem.confidence,\n@@ -249,27 +274,34 @@ void PredictVideo(const std::string& video_path,\n}\n}\n- if(keypoint)\n- {\n+ if (keypoint) {\n+ result_kpts.clear();\nint imsize = out_result.size();\nfor (int i = 0; i < imsize; i++) {\nauto item = out_result[i];\ncv::Mat crop_img;\nstd::vector<double> keypoint_times;\n- std::vector<int> rect = {item.rect[0], item.rect[1], item.rect[2], item.rect[3]};\n+ std::vector<int> rect = {\n+ item.rect[0], item.rect[1], item.rect[2], item.rect[3]};\nstd::vector<float> center;\nstd::vector<float> scale;\n- if(item.class_id == 0)\n- {\n+ if (item.class_id == 0) {\nPaddleDetection::CropImg(frame, crop_img, rect, center, scale);\ncenter_bs.emplace_back(center);\nscale_bs.emplace_back(scale);\nimgs_kpts.emplace_back(crop_img);\n}\n- if (imgs_kpts.size()==FLAGS_batch_size_keypoint || ((i==imsize-1)&&!imgs_kpts.empty()))\n- {\n- keypoint->Predict(imgs_kpts, center_bs, scale_bs, FLAGS_threshold, 0, 1, &result_kpts, &keypoint_times);\n+ if (imgs_kpts.size() == FLAGS_batch_size_keypoint ||\n+ ((i == imsize - 1) && !imgs_kpts.empty())) {\n+ keypoint->Predict(imgs_kpts,\n+ center_bs,\n+ scale_bs,\n+ FLAGS_threshold,\n+ 0,\n+ 1,\n+ &result_kpts,\n+ &keypoint_times);\nimgs_kpts.clear();\ncenter_bs.clear();\nscale_bs.clear();\n@@ -277,8 +309,7 @@ void PredictVideo(const std::string& video_path,\n}\ncv::Mat out_im = VisualizeKptsResult(frame, result_kpts, colormap_kpts);\nvideo_out.write(out_im);\n- }\n- else{\n+ } else {\n// Visualization result\ncv::Mat out_im = PaddleDetection::VisualizeResult(\nframe, out_result, labels, colormap, is_rbox);\n@@ -299,11 +330,13 @@ void PredictImage(const std::vector<std::string> all_img_paths,\nPaddleDetection::KeyPointDetector* keypoint,\nconst std::string& output_dir = \"output\") {\nstd::vector<double> det_t = {0, 0, 0};\n- int steps = ceil(float(all_img_paths.size()) / batch_size);\n+ int steps = ceil(static_cast<float>(all_img_paths.size()) / batch_size);\nint kpts_imgs = 0;\nstd::vector<double> keypoint_t = {0, 0, 0};\nprintf(\"total images = %d, batch_size = %d, total steps = %d\\n\",\n- all_img_paths.size(), batch_size, steps);\n+ all_img_paths.size(),\n+ batch_size,\n+ steps);\nfor (int idx = 0; idx < steps; idx++) {\nstd::vector<cv::Mat> batch_imgs;\nint left_image_cnt = all_img_paths.size() - idx * batch_size;\n@@ -330,7 +363,8 @@ void PredictImage(const std::vector<std::string> all_img_paths,\nbool is_rbox = false;\nif (run_benchmark) {\n- det->Predict(batch_imgs, threshold, 10, 10, &result, &bbox_num, &det_times);\n+ det->Predict(\n+ batch_imgs, threshold, 10, 10, &result, &bbox_num, &det_times);\n} else {\ndet->Predict(batch_imgs, threshold, 0, 1, &result, &bbox_num, &det_times);\n}\n@@ -362,8 +396,7 @@ void PredictImage(const std::vector<std::string> all_img_paths,\nitem.rect[5],\nitem.rect[6],\nitem.rect[7]);\n- }\n- else{\n+ } else {\nprintf(\"class=%d confidence=%.4f rect=[%d %d %d %d]\\n\",\nitem.class_id,\nitem.confidence,\n@@ -373,7 +406,8 @@ void PredictImage(const std::vector<std::string> all_img_paths,\nitem.rect[3]);\n}\n}\n- std::cout << all_img_paths.at(idx * batch_size + i) << \" The number of detected box: \" << detect_num << std::endl;\n+ std::cout << all_img_paths.at(idx * batch_size + i)\n+ << \" The number of detected box: \" << detect_num << std::endl;\nitem_start_idx = item_start_idx + bbox_num[i];\nstd::vector<int> compression_params;\n@@ -384,18 +418,17 @@ void PredictImage(const std::vector<std::string> all_img_paths,\noutput_path += OS_PATH_SEP;\n}\nstd::string image_file_path = all_img_paths.at(idx * batch_size + i);\n- if(keypoint)\n- {\n+ if (keypoint) {\nint imsize = im_result.size();\nfor (int i = 0; i < imsize; i++) {\nauto item = im_result[i];\ncv::Mat crop_img;\nstd::vector<double> keypoint_times;\n- std::vector<int> rect = {item.rect[0], item.rect[1], item.rect[2], item.rect[3]};\n+ std::vector<int> rect = {\n+ item.rect[0], item.rect[1], item.rect[2], item.rect[3]};\nstd::vector<float> center;\nstd::vector<float> scale;\n- if(item.class_id == 0)\n- {\n+ if (item.class_id == 0) {\nPaddleDetection::CropImg(im, crop_img, rect, center, scale);\ncenter_bs.emplace_back(center);\nscale_bs.emplace_back(scale);\n@@ -403,13 +436,26 @@ void PredictImage(const std::vector<std::string> all_img_paths,\nkpts_imgs += 1;\n}\n- if (imgs_kpts.size()==FLAGS_batch_size_keypoint || ((i==imsize-1)&&!imgs_kpts.empty()))\n- {\n+ if (imgs_kpts.size() == FLAGS_batch_size_keypoint ||\n+ ((i == imsize - 1) && !imgs_kpts.empty())) {\nif (run_benchmark) {\n- keypoint->Predict(imgs_kpts, center_bs, scale_bs, 0.5, 10, 10, &result_kpts, &keypoint_times);\n- }\n- else{\n- keypoint->Predict(imgs_kpts, center_bs, scale_bs, 0.5, 0, 1, &result_kpts, &keypoint_times);\n+ keypoint->Predict(imgs_kpts,\n+ center_bs,\n+ scale_bs,\n+ 0.5,\n+ 10,\n+ 10,\n+ &result_kpts,\n+ &keypoint_times);\n+ } else {\n+ keypoint->Predict(imgs_kpts,\n+ center_bs,\n+ scale_bs,\n+ 0.5,\n+ 0,\n+ 1,\n+ &result_kpts,\n+ &keypoint_times);\n}\nimgs_kpts.clear();\ncenter_bs.clear();\n@@ -419,16 +465,20 @@ void PredictImage(const std::vector<std::string> all_img_paths,\nkeypoint_t[2] += keypoint_times[2];\n}\n}\n- std::string kpts_savepath = output_path + \"keypoint_\" + image_file_path.substr(image_file_path.find_last_of('/') + 1);\n- cv::Mat kpts_vis_img = VisualizeKptsResult(im, result_kpts, colormap_kpts);\n+ std::string kpts_savepath =\n+ output_path + \"keypoint_\" +\n+ image_file_path.substr(image_file_path.find_last_of('/') + 1);\n+ cv::Mat kpts_vis_img =\n+ VisualizeKptsResult(im, result_kpts, colormap_kpts);\ncv::imwrite(kpts_savepath, kpts_vis_img, compression_params);\nprintf(\"Visualized output saved as %s\\n\", kpts_savepath.c_str());\n- }\n- else{\n+ } else {\n// Visualization result\ncv::Mat vis_img = PaddleDetection::VisualizeResult(\nim, im_result, labels, colormap, is_rbox);\n- std::string det_savepath = output_path + image_file_path.substr(image_file_path.find_last_of('/') + 1);\n+ std::string det_savepath =\n+ output_path +\n+ image_file_path.substr(image_file_path.find_last_of('/') + 1);\ncv::imwrite(det_savepath, vis_img, compression_params);\nprintf(\"Visualized output saved as %s\\n\", det_savepath.c_str());\n}\n@@ -447,39 +497,61 @@ void PredictImage(const std::vector<std::string> all_img_paths,\nint main(int argc, char** argv) {\n// Parsing command-line\ngoogle::ParseCommandLineFlags(&argc, &argv, true);\n- if (FLAGS_model_dir.empty()\n- || (FLAGS_image_file.empty() && FLAGS_image_dir.empty() && FLAGS_video_file.empty())) {\n- std::cout << \"Usage: ./main --model_dir=/PATH/TO/INFERENCE_MODEL/ (--model_dir_keypoint=/PATH/TO/INFERENCE_MODEL/)\"\n+ if (FLAGS_model_dir.empty() ||\n+ (FLAGS_image_file.empty() && FLAGS_image_dir.empty() &&\n+ FLAGS_video_file.empty())) {\n+ std::cout << \"Usage: ./main --model_dir=/PATH/TO/INFERENCE_MODEL/ \"\n+ \"(--model_dir_keypoint=/PATH/TO/INFERENCE_MODEL/)\"\n<< \"--image_file=/PATH/TO/INPUT/IMAGE/\" << std::endl;\nreturn -1;\n}\n- if (!(FLAGS_run_mode == \"fluid\" || FLAGS_run_mode == \"trt_fp32\"\n- || FLAGS_run_mode == \"trt_fp16\" || FLAGS_run_mode == \"trt_int8\")) {\n- std::cout << \"run_mode should be 'fluid', 'trt_fp32', 'trt_fp16' or 'trt_int8'.\";\n+ if (!(FLAGS_run_mode == \"fluid\" || FLAGS_run_mode == \"trt_fp32\" ||\n+ FLAGS_run_mode == \"trt_fp16\" || FLAGS_run_mode == \"trt_int8\")) {\n+ std::cout\n+ << \"run_mode should be 'fluid', 'trt_fp32', 'trt_fp16' or 'trt_int8'.\";\nreturn -1;\n}\n- transform(FLAGS_device.begin(),FLAGS_device.end(),FLAGS_device.begin(),::toupper);\n- if (!(FLAGS_device == \"CPU\" || FLAGS_device == \"GPU\" || FLAGS_device == \"XPU\")) {\n+ transform(FLAGS_device.begin(),\n+ FLAGS_device.end(),\n+ FLAGS_device.begin(),\n+ ::toupper);\n+ if (!(FLAGS_device == \"CPU\" || FLAGS_device == \"GPU\" ||\n+ FLAGS_device == \"XPU\")) {\nstd::cout << \"device should be 'CPU', 'GPU' or 'XPU'.\";\nreturn -1;\n}\nif (FLAGS_use_gpu) {\n- std::cout << \"Deprecated, please use `--device` to set the device you want to run.\";\n+ std::cout << \"Deprecated, please use `--device` to set the device you want \"\n+ \"to run.\";\nreturn -1;\n}\n// Load model and create a object detector\n- PaddleDetection::ObjectDetector det(FLAGS_model_dir, FLAGS_device, FLAGS_use_mkldnn,\n- FLAGS_cpu_threads, FLAGS_run_mode, FLAGS_batch_size,FLAGS_gpu_id,\n- FLAGS_trt_min_shape, FLAGS_trt_max_shape, FLAGS_trt_opt_shape,\n+ PaddleDetection::ObjectDetector det(FLAGS_model_dir,\n+ FLAGS_device,\n+ FLAGS_use_mkldnn,\n+ FLAGS_cpu_threads,\n+ FLAGS_run_mode,\n+ FLAGS_batch_size,\n+ FLAGS_gpu_id,\n+ FLAGS_trt_min_shape,\n+ FLAGS_trt_max_shape,\n+ FLAGS_trt_opt_shape,\nFLAGS_trt_calib_mode);\nPaddleDetection::KeyPointDetector* keypoint = nullptr;\n- if (!FLAGS_model_dir_keypoint.empty())\n- {\n- keypoint = new PaddleDetection::KeyPointDetector(FLAGS_model_dir_keypoint, FLAGS_device, FLAGS_use_mkldnn,\n- FLAGS_cpu_threads, FLAGS_run_mode, FLAGS_batch_size_keypoint, FLAGS_gpu_id,\n- FLAGS_trt_min_shape, FLAGS_trt_max_shape, FLAGS_trt_opt_shape,\n- FLAGS_trt_calib_mode, FLAGS_use_dark);\n+ if (!FLAGS_model_dir_keypoint.empty()) {\n+ keypoint = new PaddleDetection::KeyPointDetector(FLAGS_model_dir_keypoint,\n+ FLAGS_device,\n+ FLAGS_use_mkldnn,\n+ FLAGS_cpu_threads,\n+ FLAGS_run_mode,\n+ FLAGS_batch_size_keypoint,\n+ FLAGS_gpu_id,\n+ FLAGS_trt_min_shape,\n+ FLAGS_trt_max_shape,\n+ FLAGS_trt_opt_shape,\n+ FLAGS_trt_calib_mode,\n+ FLAGS_use_dark);\n}\n// Do inference on input video or image\nif (!PathExists(FLAGS_output_dir)) {\n@@ -493,7 +565,8 @@ int main(int argc, char** argv) {\nif (!FLAGS_image_file.empty()) {\nall_img_paths.push_back(FLAGS_image_file);\nif (FLAGS_batch_size > 1) {\n- std::cout << \"batch_size should be 1, when set `image_file`.\" << std::endl;\n+ std::cout << \"batch_size should be 1, when set `image_file`.\"\n+ << std::endl;\nreturn -1;\n}\n} else {\n@@ -502,8 +575,13 @@ int main(int argc, char** argv) {\nall_img_paths.push_back(img_path);\n}\n}\n- PredictImage(all_img_paths, FLAGS_batch_size, FLAGS_threshold,\n- FLAGS_run_benchmark, &det, keypoint, FLAGS_output_dir);\n+ PredictImage(all_img_paths,\n+ FLAGS_batch_size,\n+ FLAGS_threshold,\n+ FLAGS_run_benchmark,\n+ &det,\n+ keypoint,\n+ FLAGS_output_dir);\n}\ndelete keypoint;\nkeypoint = nullptr;\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix cpp infer deploy visualize (#4670)
|
499,301 |
24.11.2021 14:32:20
| -28,800 |
cc70a3df4e9aaa7267289e5476dbdb20935a0e69
|
model params FAQ
|
[
{
"change_type": "MODIFY",
"old_path": "configs/picodet/README.md",
"new_path": "configs/picodet/README.md",
"diff": "@@ -308,6 +308,21 @@ Please use `PicoDet-LCNet` model, which has fewer `transpose` operators.\n</details>\n+<details>\n+<summary>How to count model parameters.</summary>\n+\n+You can insert below code at [here](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/engine/trainer.py#L141) to count learnable parameters.\n+\n+```python\n+params = sum([\n+ p.numel() for n, p in self.model. named_parameters()\n+ if all([x not in n for x in ['_mean', '_variance']])\n+]) # exclude BatchNorm running status\n+print('params: ', params)\n+```\n+\n+</details>\n+\n## Cite PP-PicoDet\nIf you use PicoDet in your research, please cite our work by using the following BibTeX entry:\n```\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
model params FAQ (#4690)
|
499,298 |
24.11.2021 18:38:22
| -28,800 |
24e9605685dad069d583c7bb3961c4b23b47e2a5
|
[MOT] fix plot_tracking_dict
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/pptracking/python/visualize.py",
"new_path": "deploy/pptracking/python/visualize.py",
"diff": "@@ -205,6 +205,7 @@ def plot_tracking_dict(image,\nline_thickness = max(1, int(image.shape[1] / 500.))\nif num_classes == 1:\n+ if records is not None:\nstart = records[-1].find('Total')\nend = records[-1].find('In')\ncv2.putText(\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
[MOT] fix plot_tracking_dict (#4694)
|
499,339 |
26.11.2021 15:07:01
| -28,800 |
0c3975b9945ac3e90fee1deaa4337b7d891f6e8b
|
[TIPC] fix cascade rcnn configs, test=document_fix
|
[
{
"change_type": "MODIFY",
"old_path": "test_tipc/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco_train_infer_python.txt",
"new_path": "test_tipc/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco_train_infer_python.txt",
"diff": "@@ -41,11 +41,11 @@ inference:./deploy/python/infer.py\n--device:gpu|cpu\n--enable_mkldnn:True|False\n--cpu_threads:1|6\n---batch_size:1|8\n+--batch_size:1\n--use_tensorrt:null\n--run_mode:fluid|trt_fp32|trt_fp16\n--model_dir:\n--image_dir:./dataset/coco/test2017/\n--save_log_path:null\n--run_benchmark:True\n-null:null\n\\ No newline at end of file\n+--trt_max_shape:1600\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "test_tipc/configs/cascade_rcnn/cascade_mask_rcnn_r50_vd_fpn_ssld_1x_coco_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt",
"new_path": "test_tipc/configs/cascade_rcnn/cascade_mask_rcnn_r50_vd_fpn_ssld_1x_coco_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt",
"diff": "@@ -20,10 +20,10 @@ inference:./deploy/cpp/build/main\n--device:gpu|cpu\n--use_mkldnn:True|False\n--cpu_threads:1|6\n---batch_size:1|8\n+--batch_size:1\n--use_tensorrt:null\n--run_mode:fluid|trt_fp32|trt_fp16\n--model_dir:\n--image_dir:./dataset/coco/test2017/\n--run_benchmark:True\n-null:null\n\\ No newline at end of file\n+--trt_max_shape:1600\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "test_tipc/configs/cascade_rcnn/cascade_mask_rcnn_r50_vd_fpn_ssld_1x_coco_train_infer_python.txt",
"new_path": "test_tipc/configs/cascade_rcnn/cascade_mask_rcnn_r50_vd_fpn_ssld_1x_coco_train_infer_python.txt",
"diff": "@@ -41,11 +41,11 @@ inference:./deploy/python/infer.py\n--device:gpu|cpu\n--enable_mkldnn:True|False\n--cpu_threads:1|6\n---batch_size:1|8\n+--batch_size:1\n--use_tensorrt:null\n--run_mode:fluid|trt_fp32|trt_fp16\n--model_dir:\n--image_dir:./dataset/coco/test2017/\n--save_log_path:null\n--run_benchmark:True\n-null:null\n\\ No newline at end of file\n+--trt_max_shape:1600\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "test_tipc/configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco_train_infer_python.txt",
"new_path": "test_tipc/configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco_train_infer_python.txt",
"diff": "@@ -41,11 +41,11 @@ inference:./deploy/python/infer.py\n--device:gpu|cpu\n--enable_mkldnn:True|False\n--cpu_threads:1|6\n---batch_size:1|8\n+--batch_size:1\n--use_tensorrt:null\n--run_mode:fluid|trt_fp32|trt_fp16\n--model_dir:\n--image_dir:./dataset/coco/test2017/\n--save_log_path:null\n--run_benchmark:True\n-null:null\n\\ No newline at end of file\n+--trt_max_shape:1600\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "test_tipc/configs/cascade_rcnn/cascade_rcnn_r50_vd_fpn_ssld_1x_coco_train_infer_python.txt",
"new_path": "test_tipc/configs/cascade_rcnn/cascade_rcnn_r50_vd_fpn_ssld_1x_coco_train_infer_python.txt",
"diff": "@@ -41,11 +41,11 @@ inference:./deploy/python/infer.py\n--device:gpu|cpu\n--enable_mkldnn:True|False\n--cpu_threads:1|6\n---batch_size:1|8\n+--batch_size:1\n--use_tensorrt:null\n--run_mode:fluid|trt_fp32|trt_fp16\n--model_dir:\n--image_dir:./dataset/coco/test2017/\n--save_log_path:null\n--run_benchmark:True\n-null:null\n\\ No newline at end of file\n+--trt_max_shape:1600\n\\ No newline at end of file\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
[TIPC] fix cascade rcnn configs, test=document_fix (#4727)
|
499,395 |
29.11.2021 11:23:36
| -28,800 |
f53a950d812e962715804028d807d3c15e93d6a9
|
modify docs of ppyolo and s2anet, test=document_fix
|
[
{
"change_type": "MODIFY",
"old_path": "configs/dota/README_en.md",
"new_path": "configs/dota/README_en.md",
"diff": "@@ -164,7 +164,6 @@ The inputs of the `multiclass_nms` operator in Paddle support quadrilateral inpu\nPlease refer to the deployment tutorial[Predict deployment](../../deploy/README_en.md)\n-**Attention:** The `is_training` parameter was added to the configuration file because the `paddle.Detach` function would cause the size error of the exported model when it went quiet, and the exported model would need to be set to `False` to predict deployment\n## Citations\n```\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
modify docs of ppyolo and s2anet, test=document_fix (#4738)
|
499,339 |
30.11.2021 12:30:51
| -28,800 |
dfb8ea1e8ea2b04dab47f05a86f6f2ce3b6e4fd8
|
[TOOD] fix dy2st
|
[
{
"change_type": "MODIFY",
"old_path": "configs/tood/README.md",
"new_path": "configs/tood/README.md",
"diff": "@@ -11,7 +11,7 @@ TOOD is an object detection model. We reproduced the model of the paper.\n| Backbone | Model | Images/GPU | Inf time (fps) | Box AP | Config | Download |\n|:------:|:--------:|:--------:|:--------------:|:------:|:------:|:--------:|\n-| R-50 | TOOD | 4 | --- | 42.8 | [config](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/configs/tood/tood_r50_fpn_1x_coco.yml) | [model](https://paddledet.bj.bcebos.com/models/tood_r50_fpn_1x_coco.pdparams) |\n+| R-50 | TOOD | 4 | --- | 42.5 | [config](https://github.com/PaddlePaddle/PaddleDetection/blob/develop/configs/tood/tood_r50_fpn_1x_coco.yml) | [model](https://paddledet.bj.bcebos.com/models/tood_r50_fpn_1x_coco.pdparams) |\n**Notes:**\n"
},
{
"change_type": "MODIFY",
"old_path": "configs/tood/_base_/tood_reader.yml",
"new_path": "configs/tood/_base_/tood_reader.yml",
"diff": "@@ -3,7 +3,7 @@ TrainReader:\nsample_transforms:\n- Decode: {}\n- RandomFlip: {prob: 0.5}\n- - Resize: {target_size: [800, 1333], keep_ratio: true, interp: 1}\n+ - Resize: {target_size: [800, 1333], keep_ratio: true}\n- NormalizeImage: {is_scale: true, mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225]}\n- Permute: {}\nbatch_transforms:\n@@ -18,7 +18,7 @@ TrainReader:\nEvalReader:\nsample_transforms:\n- Decode: {}\n- - Resize: {interp: 1, target_size: [800, 1333], keep_ratio: True}\n+ - Resize: {target_size: [800, 1333], keep_ratio: True}\n- NormalizeImage: {is_scale: true, mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225]}\n- Permute: {}\nbatch_transforms:\n@@ -30,7 +30,7 @@ EvalReader:\nTestReader:\nsample_transforms:\n- Decode: {}\n- - Resize: {interp: 1, target_size: [800, 1333], keep_ratio: True}\n+ - Resize: {target_size: [800, 1333], keep_ratio: True}\n- NormalizeImage: {is_scale: true, mean: [0.485, 0.456, 0.406], std: [0.229, 0.224, 0.225]}\n- Permute: {}\nbatch_transforms:\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/python/infer.py",
"new_path": "deploy/python/infer.py",
"diff": "@@ -46,6 +46,7 @@ SUPPORT_MODELS = {\n'GFL',\n'PicoDet',\n'CenterNet',\n+ 'TOOD',\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/engine/export_utils.py",
"new_path": "ppdet/engine/export_utils.py",
"diff": "@@ -46,6 +46,7 @@ TRT_MIN_SUBGRAPH = {\n'GFL': 16,\n'PicoDet': 3,\n'CenterNet': 5,\n+ 'TOOD': 5,\n}\nKEYPOINT_ARCH = ['HigherHRNet', 'TopDownHRNet']\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/assigners/utils.py",
"new_path": "ppdet/modeling/assigners/utils.py",
"diff": "@@ -19,6 +19,12 @@ from __future__ import print_function\nimport paddle\nimport paddle.nn.functional as F\n+__all__ = [\n+ 'pad_gt', 'gather_topk_anchors', 'check_points_inside_bboxes',\n+ 'compute_max_iou_anchor', 'compute_max_iou_gt',\n+ 'generate_anchors_for_grid_cell'\n+]\n+\ndef pad_gt(gt_labels, gt_bboxes, gt_scores=None):\nr\"\"\" Pad 0 in gt_labels and gt_bboxes.\n@@ -147,3 +153,42 @@ def compute_max_iou_gt(ious):\nmax_iou_index = ious.argmax(axis=-1)\nis_max_iou = F.one_hot(max_iou_index, num_anchors)\nreturn is_max_iou.astype(ious.dtype)\n+\n+\n+def generate_anchors_for_grid_cell(feats,\n+ fpn_strides,\n+ grid_cell_size=5.0,\n+ grid_cell_offset=0.5):\n+ r\"\"\"\n+ Like ATSS, generate anchors based on grid size.\n+ Args:\n+ feats (List[Tensor]): shape[s, (b, c, h, w)]\n+ fpn_strides (tuple|list): shape[s], stride for each scale feature\n+ grid_cell_size (float): anchor size\n+ grid_cell_offset (float): The range is between 0 and 1.\n+ Returns:\n+ anchors (List[Tensor]): shape[s, (l, 4)]\n+ num_anchors_list (List[int]): shape[s]\n+ stride_tensor_list (List[Tensor]): shape[s, (l, 1)]\n+ \"\"\"\n+ assert len(feats) == len(fpn_strides)\n+ anchors = []\n+ num_anchors_list = []\n+ stride_tensor_list = []\n+ for feat, stride in zip(feats, fpn_strides):\n+ _, _, h, w = feat.shape\n+ cell_half_size = grid_cell_size * stride * 0.5\n+ shift_x = (paddle.arange(end=w) + grid_cell_offset) * stride\n+ shift_y = (paddle.arange(end=h) + grid_cell_offset) * stride\n+ shift_y, shift_x = paddle.meshgrid(shift_y, shift_x)\n+ anchor = paddle.stack(\n+ [\n+ shift_x - cell_half_size, shift_y - cell_half_size,\n+ shift_x + cell_half_size, shift_y + cell_half_size\n+ ],\n+ axis=-1).astype(feat.dtype)\n+ anchors.append(anchor.reshape([-1, 4]))\n+ num_anchors_list.append(len(anchors[-1]))\n+ stride_tensor_list.append(\n+ paddle.full([num_anchors_list[-1], 1], stride))\n+ return anchors, num_anchors_list, stride_tensor_list\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/bbox_utils.py",
"new_path": "ppdet/modeling/bbox_utils.py",
"diff": "@@ -748,6 +748,28 @@ def bbox_center(boxes):\nReturns:\nTensor: boxes centers with shape (N, 2), \"cx, cy\" format.\n\"\"\"\n- boxes_cx = (boxes[:, 0] + boxes[:, 2]) / 2\n- boxes_cy = (boxes[:, 1] + boxes[:, 3]) / 2\n+ boxes_cx = (boxes[..., 0] + boxes[..., 2]) / 2\n+ boxes_cy = (boxes[..., 1] + boxes[..., 3]) / 2\nreturn paddle.stack([boxes_cx, boxes_cy], axis=-1)\n+\n+\n+def batch_distance2bbox(points, distance, max_shapes=None):\n+ \"\"\"Decode distance prediction to bounding box for batch.\n+ Args:\n+ points (Tensor): [B, ..., 2]\n+ distance (Tensor): [B, ..., 4]\n+ max_shapes (tuple): [B, 2], \"h,w\" format, Shape of the image.\n+ Returns:\n+ Tensor: Decoded bboxes.\n+ \"\"\"\n+ x1 = points[..., 0] - distance[..., 0]\n+ y1 = points[..., 1] - distance[..., 1]\n+ x2 = points[..., 0] + distance[..., 2]\n+ y2 = points[..., 1] + distance[..., 3]\n+ if max_shapes is not None:\n+ for i, max_shape in enumerate(max_shapes):\n+ x1[i] = x1[i].clip(min=0, max=max_shape[1])\n+ y1[i] = y1[i].clip(min=0, max=max_shape[0])\n+ x2[i] = x2[i].clip(min=0, max=max_shape[1])\n+ y2[i] = y2[i].clip(min=0, max=max_shape[0])\n+ return paddle.stack([x1, y1, x2, y2], -1)\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/heads/tood_head.py",
"new_path": "ppdet/modeling/heads/tood_head.py",
"diff": "@@ -24,10 +24,11 @@ from paddle.nn.initializer import Constant\nfrom ppdet.core.workspace import register\nfrom ..initializer import normal_, constant_, bias_init_with_prob\n-from ppdet.modeling.bbox_utils import bbox_center\n+from ppdet.modeling.bbox_utils import bbox_center, batch_distance2bbox\nfrom ..losses import GIoULoss\n-from paddle.vision.ops import deform_conv2d\nfrom ppdet.modeling.layers import ConvNormLayer\n+from ppdet.modeling.ops import get_static_shape\n+from ppdet.modeling.assigners.utils import generate_anchors_for_grid_cell\nclass ScaleReg(nn.Layer):\n@@ -84,25 +85,13 @@ class TaskDecomposition(nn.Layer):\nnormal_(self.la_conv1.weight, std=0.001)\nnormal_(self.la_conv2.weight, std=0.001)\n- def forward(self, feat, avg_feat=None):\n- b, _, h, w = feat.shape\n- if avg_feat is None:\n- avg_feat = F.adaptive_avg_pool2d(feat, (1, 1))\n+ def forward(self, feat, avg_feat):\n+ b, _, h, w = get_static_shape(feat)\nweight = F.relu(self.la_conv1(avg_feat))\n- weight = F.sigmoid(self.la_conv2(weight))\n-\n- # here new_conv_weight = layer_attention_weight * conv_weight\n- # in order to save memory and FLOPs.\n- conv_weight = weight.reshape([b, 1, self.stacked_convs, 1]) * \\\n- self.reduction_conv.conv.weight.reshape(\n- [1, self.feat_channels, self.stacked_convs, self.feat_channels])\n- conv_weight = conv_weight.reshape(\n- [b, self.feat_channels, self.in_channels])\n- feat = feat.reshape([b, self.in_channels, h * w])\n- feat = paddle.bmm(conv_weight, feat).reshape(\n- [b, self.feat_channels, h, w])\n- if self.norm_type is not None:\n- feat = self.reduction_conv.norm(feat)\n+ weight = F.sigmoid(self.la_conv2(weight)).unsqueeze(-1)\n+ feat = paddle.reshape(\n+ feat, [b, self.stacked_convs, self.feat_channels, h, w]) * weight\n+ feat = self.reduction_conv(feat.flatten(1, 2))\nfeat = F.relu(feat)\nreturn feat\n@@ -211,81 +200,32 @@ class TOODHead(nn.Layer):\nnormal_(self.cls_prob_conv2.weight, std=0.01)\nconstant_(self.cls_prob_conv2.bias, bias_cls)\nnormal_(self.reg_offset_conv1.weight, std=0.001)\n- normal_(self.reg_offset_conv2.weight, std=0.001)\n+ constant_(self.reg_offset_conv2.weight)\nconstant_(self.reg_offset_conv2.bias)\n- def _generate_anchors(self, feats):\n- anchors, num_anchors_list = [], []\n- stride_tensor_list = []\n- for feat, stride in zip(feats, self.fpn_strides):\n- _, _, h, w = feat.shape\n- cell_half_size = self.grid_cell_scale * stride * 0.5\n- shift_x = (paddle.arange(end=w) + self.grid_cell_offset) * stride\n- shift_y = (paddle.arange(end=h) + self.grid_cell_offset) * stride\n- shift_y, shift_x = paddle.meshgrid(shift_y, shift_x)\n- anchor = paddle.stack(\n- [\n- shift_x - cell_half_size, shift_y - cell_half_size,\n- shift_x + cell_half_size, shift_y + cell_half_size\n- ],\n- axis=-1)\n- anchors.append(anchor.reshape([-1, 4]))\n- num_anchors_list.append(len(anchors[-1]))\n- stride_tensor_list.append(\n- paddle.full([num_anchors_list[-1], 1], stride))\n- return anchors, num_anchors_list, stride_tensor_list\n-\n- @staticmethod\n- def _batch_distance2bbox(points, distance, max_shapes=None):\n- \"\"\"Decode distance prediction to bounding box.\n- Args:\n- points (Tensor): [B, l, 2]\n- distance (Tensor): [B, l, 4]\n- max_shapes (tuple): [B, 2], \"h w\" format, Shape of the image.\n- Returns:\n- Tensor: Decoded bboxes.\n- \"\"\"\n- x1 = points[:, :, 0] - distance[:, :, 0]\n- y1 = points[:, :, 1] - distance[:, :, 1]\n- x2 = points[:, :, 0] + distance[:, :, 2]\n- y2 = points[:, :, 1] + distance[:, :, 3]\n- bboxes = paddle.stack([x1, y1, x2, y2], -1)\n- if max_shapes is not None:\n- out_bboxes = []\n- for bbox, max_shape in zip(bboxes, max_shapes):\n- bbox[:, 0] = bbox[:, 0].clip(min=0, max=max_shape[1])\n- bbox[:, 1] = bbox[:, 1].clip(min=0, max=max_shape[0])\n- bbox[:, 2] = bbox[:, 2].clip(min=0, max=max_shape[1])\n- bbox[:, 3] = bbox[:, 3].clip(min=0, max=max_shape[0])\n- out_bboxes.append(bbox)\n- out_bboxes = paddle.stack(out_bboxes)\n- return out_bboxes\n- return bboxes\n-\n- @staticmethod\n- def _deform_sampling(feat, offset):\n- \"\"\" Sampling the feature according to offset.\n- Args:\n- feat (Tensor): Feature\n- offset (Tensor): Spatial offset for for feature sampliing\n- \"\"\"\n- # it is an equivalent implementation of bilinear interpolation\n- # you can also use F.grid_sample instead\n- c = feat.shape[1]\n- weight = paddle.ones([c, 1, 1, 1])\n- y = deform_conv2d(feat, offset, weight, deformable_groups=c, groups=c)\n- return y\n+ def _reg_grid_sample(self, feat, offset, anchor_points):\n+ b, _, h, w = get_static_shape(feat)\n+ feat = paddle.reshape(feat, [-1, 1, h, w])\n+ offset = paddle.reshape(offset, [-1, 2, h, w]).transpose([0, 2, 3, 1])\n+ grid_shape = paddle.concat([w, h]).astype('float32')\n+ grid = (offset + anchor_points) / grid_shape\n+ grid = 2 * grid.clip(0., 1.) - 1\n+ feat = F.grid_sample(feat, grid)\n+ feat = paddle.reshape(feat, [b, -1, h, w])\n+ return feat\ndef forward(self, feats):\nassert len(feats) == len(self.fpn_strides), \\\n\"The size of feats is not equal to size of fpn_strides\"\n- anchors, num_anchors_list, stride_tensor_list = self._generate_anchors(\n- feats)\n+ anchors, num_anchors_list, stride_tensor_list = generate_anchors_for_grid_cell(\n+ feats, self.fpn_strides, self.grid_cell_scale,\n+ self.grid_cell_offset)\n+\ncls_score_list, bbox_pred_list = [], []\nfor feat, scale_reg, anchor, stride in zip(feats, self.scales_regs,\nanchors, self.fpn_strides):\n- b, _, h, w = feat.shape\n+ b, _, h, w = get_static_shape(feat)\ninter_feats = []\nfor inter_conv in self.inter_convs:\nfeat = F.relu(inter_conv(feat))\n@@ -309,16 +249,16 @@ class TOODHead(nn.Layer):\n# reg prediction and alignment\nreg_dist = scale_reg(self.tood_reg(reg_feat).exp())\n- reg_dist = reg_dist.transpose([0, 2, 3, 1]).reshape([b, -1, 4])\n+ reg_dist = reg_dist.flatten(2).transpose([0, 2, 1])\nanchor_centers = bbox_center(anchor).unsqueeze(0) / stride\n- reg_bbox = self._batch_distance2bbox(\n- anchor_centers.tile([b, 1, 1]), reg_dist)\n+ reg_bbox = batch_distance2bbox(anchor_centers, reg_dist)\nif self.use_align_head:\n- reg_bbox = reg_bbox.reshape([b, h, w, 4]).transpose(\n- [0, 3, 1, 2])\nreg_offset = F.relu(self.reg_offset_conv1(feat))\nreg_offset = self.reg_offset_conv2(reg_offset)\n- bbox_pred = self._deform_sampling(reg_bbox, reg_offset)\n+ reg_bbox = reg_bbox.transpose([0, 2, 1]).reshape([b, 4, h, w])\n+ anchor_centers = anchor_centers.reshape([1, h, w, 2])\n+ bbox_pred = self._reg_grid_sample(reg_bbox, reg_offset,\n+ anchor_centers)\nbbox_pred = bbox_pred.flatten(2).transpose([0, 2, 1])\nelse:\nbbox_pred = reg_bbox\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/ops.py",
"new_path": "ppdet/modeling/ops.py",
"diff": "@@ -1600,3 +1600,9 @@ def channel_shuffle(x, groups):\nx = paddle.transpose(x=x, perm=[0, 2, 1, 3, 4])\nx = paddle.reshape(x=x, shape=[batch_size, num_channels, height, width])\nreturn x\n+\n+\n+def get_static_shape(tensor):\n+ shape = paddle.shape(tensor)\n+ shape.stop_gradient = True\n+ return shape\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
[TOOD] fix dy2st (#4751)
|
499,304 |
30.11.2021 19:17:14
| -28,800 |
1cb2d6c3888f627a6d6126d89ea5600b228e2d49
|
add Paddle Liet android demo
|
[
{
"change_type": "MODIFY",
"old_path": "configs/picodet/README.md",
"new_path": "configs/picodet/README.md",
"diff": "@@ -213,7 +213,8 @@ paddle2onnx --model_dir output_inference/picodet_s_320_coco/ \\\n- [NCNN C++/Python demo](../../deploy/third_engine/demo_ncnn)\n- [MNN C++/Python demo](../../deploy/third_engine/demo_mnn)\n- [OpenVINO C++ demo](../../deploy/third_engine/demo_openvino)\n-- [Android demo](https://github.com/JiweiMaster/PP-PicoDet-Android-Demo)\n+- [Android demo(NCNN)](https://github.com/JiweiMaster/PP-PicoDet-Android-Demo)\n+- [Android demo(Paddle Lite)](https://github.com/marsplus-wjh/Picodet-PaddleLite-AndroidDemo)\nAndroid demo visualization:\n@@ -271,7 +272,7 @@ python tools/post_quant.py -c configs/picodet/picodet_s_320_coco.yml \\\n<details open>\n<summary>Toturial:</summary>\n-Please refer this [documentation](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/README_PRUNER.md) for details such as requirements, training and deployment.\n+Please refer this [documentation](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/pruner/README.md) for details such as requirements, training and deployment.\n</details>\n"
},
{
"change_type": "RENAME",
"old_path": "configs/picodet/README_PRUNER.md",
"new_path": "configs/picodet/pruner/README.md",
"diff": ""
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add Paddle Liet android demo (#4736)
|
499,298 |
02.12.2021 18:55:11
| -28,800 |
d41b085d66946219351d8d99c2b3d95756489624
|
fix static ssd cfgs
|
[
{
"change_type": "MODIFY",
"old_path": "static/configs/ssd/ssd_vgg16_300.yml",
"new_path": "static/configs/ssd/ssd_vgg16_300.yml",
"diff": "@@ -62,8 +62,8 @@ TrainReader:\nfields: ['image', 'gt_bbox', 'gt_class']\ndataset:\n!COCODataSet\n- image_dir: val2017\n- anno_path: annotations/instances_val2017.json\n+ image_dir: train2017\n+ anno_path: annotations/instances_train2017.json\ndataset_dir: dataset/coco\nsample_transforms:\n- !DecodeImage\n"
},
{
"change_type": "MODIFY",
"old_path": "static/configs/ssd/ssd_vgg16_512.yml",
"new_path": "static/configs/ssd/ssd_vgg16_512.yml",
"diff": "@@ -64,8 +64,8 @@ TrainReader:\nfields: ['image', 'gt_bbox', 'gt_class']\ndataset:\n!COCODataSet\n- image_dir: val2017\n- anno_path: annotations/instances_val2017.json\n+ image_dir: train2017\n+ anno_path: annotations/instances_train2017.json\ndataset_dir: dataset/coco\nsample_transforms:\n- !DecodeImage\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix static ssd cfgs (#4784)
|
499,333 |
02.12.2021 21:18:48
| -28,800 |
320ce6c7cb8f633021b96be30559587211bb36bc
|
support batch_size=2 in RCNN
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/post_process.py",
"new_path": "ppdet/modeling/post_process.py",
"diff": "@@ -91,9 +91,23 @@ class BBoxPostProcess(nn.Layer):\nincluding labels, scores and bboxes.\n\"\"\"\n- if bboxes.shape[0] == 0:\n- bboxes = self.fake_bboxes\n- bbox_num = self.fake_bbox_num\n+ bboxes_list = []\n+ bbox_num_list = []\n+ id_start = 0\n+ # add fake bbox when output is empty for each batch\n+ for i in range(bbox_num.shape[0]):\n+ if bbox_num[i] == 0:\n+ bboxes_i = self.fake_bboxes\n+ bbox_num_i = self.fake_bbox_num\n+ id_start += 1\n+ else:\n+ bboxes_i = bboxes[id_start:id_start + bbox_num[i], :]\n+ bbox_num_i = bbox_num[i]\n+ id_start += bbox_num[i]\n+ bboxes_list.append(bboxes_i)\n+ bbox_num_list.append(bbox_num_i)\n+ bboxes = paddle.concat(bboxes_list)\n+ bbox_num = paddle.concat(bbox_num_list)\norigin_shape = paddle.floor(im_shape / scale_factor + 0.5)\n@@ -156,6 +170,7 @@ class MaskPostProcess(object):\n\"\"\"\nPaste the mask prediction to the original image.\n\"\"\"\n+\nx0, y0, x1, y1 = paddle.split(boxes, 4, axis=1)\nmasks = paddle.unsqueeze(masks, [0, 1])\nimg_y = paddle.arange(0, im_h, dtype='float32') + 0.5\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
support batch_size=2 in RCNN (#4787)
|
499,333 |
06.12.2021 22:55:03
| -28,800 |
383bde0b2e3fb74b6344f7926a93cc7e1ac9938f
|
fix multi-gpu training in static
|
[
{
"change_type": "MODIFY",
"old_path": "static/tools/train.py",
"new_path": "static/tools/train.py",
"diff": "@@ -112,7 +112,7 @@ def main():\nif cfg.use_gpu:\ndevices_num = fluid.core.get_cuda_device_count()\n- if cfg.use_npu:\n+ elif cfg.use_npu:\ndevices_num = fluid.core.get_npu_device_count()\nelif use_xpu:\n# ToDo(qingshu): XPU only support single card now\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix multi-gpu training in static (#4835)
|
499,396 |
07.12.2021 12:06:08
| -28,800 |
02428cfd991d80272ed36a5b4fdc95b97829b4d7
|
avoid 'invalid escape sequence' warning
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/ops.py",
"new_path": "ppdet/modeling/ops.py",
"diff": "@@ -473,7 +473,7 @@ def distribute_fpn_proposals(fpn_rois,\npixel_offset=False,\nrois_num=None,\nname=None):\n- \"\"\"\n+ r\"\"\"\n**This op only takes LoDTensor as input.** In Feature Pyramid Networks\n(FPN) models, it is needed to distribute all proposals into different FPN\n@@ -1274,7 +1274,7 @@ def box_coder(prior_box,\nbox_normalized=True,\naxis=0,\nname=None):\n- \"\"\"\n+ r\"\"\"\n**Box Coder Layer**\nEncode/Decode the target bounding box with the priorbox information.\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
avoid 'invalid escape sequence' warning (#4832)
|
499,301 |
08.12.2021 16:31:06
| -28,800 |
8f98cd7becf8fcf3f7fb9d70d1e5cc5cd218e12f
|
add arg eta_min
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/optimizer.py",
"new_path": "ppdet/optimizer.py",
"diff": "@@ -42,9 +42,10 @@ class CosineDecay(object):\nthe max_iters is much larger than the warmup iter\n\"\"\"\n- def __init__(self, max_epochs=1000, use_warmup=True):\n+ def __init__(self, max_epochs=1000, use_warmup=True, eta_min=0):\nself.max_epochs = max_epochs\nself.use_warmup = use_warmup\n+ self.eta_min = eta_min\ndef __call__(self,\nbase_lr=None,\n@@ -66,7 +67,8 @@ class CosineDecay(object):\nvalue.append(decayed_lr)\nreturn optimizer.lr.PiecewiseDecay(boundary, value)\n- return optimizer.lr.CosineAnnealingDecay(base_lr, T_max=max_iters)\n+ return optimizer.lr.CosineAnnealingDecay(\n+ base_lr, T_max=max_iters, eta_min=self.eta_min)\n@serializable\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add arg eta_min (#4815)
|
499,298 |
09.12.2021 09:44:33
| -28,800 |
f5f778344a4b8d3cc858b191bde506e028f5e7a1
|
fix xpu deploy infer enable_lite_engine
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/pptracking/python/det_infer.py",
"new_path": "deploy/pptracking/python/det_infer.py",
"diff": "@@ -410,6 +410,7 @@ def load_predictor(model_dir,\n# optimize graph and fuse op\nconfig.switch_ir_optim(True)\nelif device == 'XPU':\n+ config.enable_lite_engine()\nconfig.enable_xpu(10 * 1024 * 1024)\nelse:\nconfig.disable_gpu()\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/python/infer.py",
"new_path": "deploy/python/infer.py",
"diff": "@@ -511,6 +511,7 @@ def load_predictor(model_dir,\n# optimize graph and fuse op\nconfig.switch_ir_optim(True)\nelif device == 'XPU':\n+ config.enable_lite_engine()\nconfig.enable_xpu(10 * 1024 * 1024)\nelse:\nconfig.disable_gpu()\n"
},
{
"change_type": "MODIFY",
"old_path": "static/deploy/python/infer.py",
"new_path": "static/deploy/python/infer.py",
"diff": "@@ -415,6 +415,7 @@ def load_predictor(model_dir,\n# optimize graph and fuse op\nconfig.switch_ir_optim(True)\nelif device == 'XPU':\n+ config.enable_lite_engine()\nconfig.enable_xpu(10 * 1024 * 1024)\nelse:\nconfig.disable_gpu()\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix xpu deploy infer enable_lite_engine (#4843)
|
499,333 |
16.12.2021 22:47:03
| -28,800 |
b3e0bd3a00cfe0040b9b43b7d0f2f189117ed0f0
|
fix rcnn export
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/post_process.py",
"new_path": "ppdet/modeling/post_process.py",
"diff": "@@ -42,10 +42,6 @@ class BBoxPostProcess(nn.Layer):\nself.num_classes = num_classes\nself.decode = decode\nself.nms = nms\n- self.fake_bboxes = paddle.to_tensor(\n- np.array(\n- [[-1, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype='float32'))\n- self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))\ndef forward(self, head_out, rois, im_shape, scale_factor):\n\"\"\"\n@@ -94,11 +90,16 @@ class BBoxPostProcess(nn.Layer):\nbboxes_list = []\nbbox_num_list = []\nid_start = 0\n+ fake_bboxes = paddle.to_tensor(\n+ np.array(\n+ [[-1, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype='float32'))\n+ fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))\n+\n# add fake bbox when output is empty for each batch\nfor i in range(bbox_num.shape[0]):\nif bbox_num[i] == 0:\n- bboxes_i = self.fake_bboxes\n- bbox_num_i = self.fake_bbox_num\n+ bboxes_i = fake_bboxes\n+ bbox_num_i = fake_bbox_num\nid_start += 1\nelse:\nbboxes_i = bboxes[id_start:id_start + bbox_num[i], :]\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix rcnn export (#4920)
|
499,298 |
17.12.2021 16:15:38
| -28,800 |
08370fccb661e33b09704add02131013da2e894d
|
[MOT] add bytetracker
|
[
{
"change_type": "MODIFY",
"old_path": "configs/mot/fairmot/README.md",
"new_path": "configs/mot/fairmot/README.md",
"diff": "@@ -81,6 +81,24 @@ PP-tracking provides an AI studio public project tutorial. Please refer to this\n**Notes:**\n- FairMOT HRNetV2-W18 used 8 GPUs for training and mini-batch size as 4 on each GPU, and trained for 30 epoches. Only ImageNet pre-train model is used, and the optimizer adopts Momentum. The crowdhuman dataset is added to the train-set during training.\n+### FairMOT + BYTETracker\n+\n+### Results on MOT-17 Half Set\n+| backbone | input shape | MOTA | IDF1 | IDS | FP | FN | FPS | download | config |\n+| :--------------| :------- | :----: | :----: | :----: | :----: | :----: | :------: | :----: |:-----: |\n+| DLA-34 | 1088x608 | 69.1 | 72.8 | 299 | 1957 | 14412 | - |[model](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608_bytetracker.pdparams) | [config](./fairmot_dla34_30e_1088x608.yml) |\n+| DLA-34 + BYTETracker| 1088x608 | 70.3 | 73.2 | 234 | 2176 | 13598 | - |[model](https://paddledet.bj.bcebos.com/models/mot/fairmot_dla34_30e_1088x608_bytetracker.pdparams) | [config](./fairmot_dla34_30e_1088x608_bytetracker.yml) |\n+\n+**Notes:**\n+ - FairMOT here is for ablation study, the training dataset is the 5 datasets of MIX(Caltech,CUHKSYSU,PRW,Cityscapes,ETHZ) and the first half of MOT17 Train, and the pretrain weights is CenterNet COCO model, the evaluation is on the second half of MOT17 Train.\n+ - BYTETracker adapt to other FairMOT models of PaddleDetection, you can modify the tracker of the config like this:\n+ ```\n+ JDETracker:\n+ use_byte: True\n+ match_thres: 0.8\n+ conf_thres: 0.4\n+ low_conf_thres: 0.2\n+ ```\n## Getting Start\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/mot/fairmot/fairmot_dla34_30e_1088x608_bytetracker.yml",
"diff": "+_BASE_: [\n+ '../../datasets/mot.yml',\n+ '../../runtime.yml',\n+ '_base_/optimizer_30e.yml',\n+ '_base_/fairmot_dla34.yml',\n+ '_base_/fairmot_reader_1088x608.yml',\n+]\n+weights: output/fairmot_dla34_30e_1088x608_bytetracker/model_final\n+\n+# for ablation study, MIX + MOT17-half\n+TrainDataset:\n+ !MOTDataSet\n+ dataset_dir: dataset/mot\n+ image_lists: ['mot17.half', 'caltech.all', 'cuhksysu.train', 'prw.train', 'citypersons.train', 'eth.train']\n+ data_fields: ['image', 'gt_bbox', 'gt_class', 'gt_ide']\n+\n+JDETracker:\n+ use_byte: True\n+ match_thres: 0.8\n+ conf_thres: 0.4\n+ low_conf_thres: 0.2\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/pptracking/python/mot/tracker/base_jde_tracker.py",
"new_path": "deploy/pptracking/python/mot/tracker/base_jde_tracker.py",
"diff": "@@ -98,28 +98,28 @@ class STrack(BaseTrack):\ndef __init__(self,\ntlwh,\nscore,\n- temp_feat,\n- num_classes,\ncls_id,\n- buff_size=30):\n- # object class id\n- self.cls_id = cls_id\n+ buff_size=30,\n+ temp_feat=None):\n# wait activate\nself._tlwh = np.asarray(tlwh, dtype=np.float)\n+ self.score = score\n+ self.cls_id = cls_id\n+ self.track_len = 0\n+\nself.kalman_filter = None\nself.mean, self.covariance = None, None\nself.is_activated = False\n- self.score = score\n- self.track_len = 0\n-\n+ self.use_reid = True if temp_feat is not None else False\n+ if self.use_reid:\nself.smooth_feat = None\nself.update_features(temp_feat)\nself.features = deque([], maxlen=buff_size)\nself.alpha = 0.9\ndef update_features(self, feat):\n- # L2 normalizing\n+ # L2 normalizing, this function has no use for BYTETracker\nfeat /= np.linalg.norm(feat)\nself.curr_feat = feat\nif self.smooth_feat is None:\n@@ -175,6 +175,7 @@ class STrack(BaseTrack):\ndef re_activate(self, new_track, frame_id, new_id=False):\nself.mean, self.covariance = self.kalman_filter.update(\nself.mean, self.covariance, self.tlwh_to_xyah(new_track.tlwh))\n+ if self.use_reid:\nself.update_features(new_track.curr_feat)\nself.track_len = 0\nself.state = TrackState.Tracked\n@@ -194,7 +195,7 @@ class STrack(BaseTrack):\nself.is_activated = True # set flag 'activated'\nself.score = new_track.score\n- if update_feature:\n+ if update_feature and self.use_reid:\nself.update_features(new_track.curr_feat)\n@property\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/pptracking/python/mot/tracker/jde_tracker.py",
"new_path": "deploy/pptracking/python/mot/tracker/jde_tracker.py",
"diff": "@@ -52,6 +52,7 @@ class JDETracker(object):\n\"\"\"\ndef __init__(self,\n+ use_byte=False,\nnum_classes=1,\ndet_thresh=0.3,\ntrack_buffer=30,\n@@ -60,11 +61,14 @@ class JDETracker(object):\ntracked_thresh=0.7,\nr_tracked_thresh=0.5,\nunconfirmed_thresh=0.7,\n- motion='KalmanFilter',\nconf_thres=0,\n+ match_thres=0.8,\n+ low_conf_thres=0.2,\n+ motion='KalmanFilter',\nmetric_type='euclidean'):\n+ self.use_byte = use_byte\nself.num_classes = num_classes\n- self.det_thresh = det_thresh\n+ self.det_thresh = det_thresh if not use_byte else conf_thres + 0.1\nself.track_buffer = track_buffer\nself.min_box_area = min_box_area\nself.vertical_ratio = vertical_ratio\n@@ -72,9 +76,12 @@ class JDETracker(object):\nself.tracked_thresh = tracked_thresh\nself.r_tracked_thresh = r_tracked_thresh\nself.unconfirmed_thresh = unconfirmed_thresh\n+ self.conf_thres = conf_thres\n+ self.match_thres = match_thres\n+ self.low_conf_thres = low_conf_thres\n+\nif motion == 'KalmanFilter':\nself.motion = KalmanFilter()\n- self.conf_thres = conf_thres\nself.metric_type = metric_type\nself.frame_id = 0\n@@ -85,7 +92,7 @@ class JDETracker(object):\nself.max_time_lost = 0\n# max_time_lost will be calculated: int(frame_rate / 30.0 * track_buffer)\n- def update(self, pred_dets, pred_embs):\n+ def update(self, pred_dets, pred_embs=None):\n\"\"\"\nProcesses the image frame and finds bounding box(detections).\nAssociates the detection with corresponding tracklets and also handles\n@@ -117,7 +124,10 @@ class JDETracker(object):\nfor cls_id in range(self.num_classes):\ncls_idx = (pred_dets[:, 5:] == cls_id).squeeze(-1)\npred_dets_dict[cls_id] = pred_dets[cls_idx]\n+ if pred_embs is not None:\npred_embs_dict[cls_id] = pred_embs[cls_idx]\n+ else:\n+ pred_embs_dict[cls_id] = None\nfor cls_id in range(self.num_classes):\n\"\"\" Step 1: Get detections by class\"\"\"\n@@ -126,12 +136,18 @@ class JDETracker(object):\nremain_inds = (pred_dets_cls[:, 4:5] > self.conf_thres).squeeze(-1)\nif remain_inds.sum() > 0:\npred_dets_cls = pred_dets_cls[remain_inds]\n+ if self.use_byte:\n+ detections = [\n+ STrack(\n+ STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], cls_id, 30, temp_feat=None)\n+ for tlbrs in pred_dets_cls\n+ ]\n+ else:\npred_embs_cls = pred_embs_cls[remain_inds]\ndetections = [\nSTrack(\n- STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f,\n- self.num_classes, cls_id, 30)\n- for (tlbrs, f) in zip(pred_dets_cls, pred_embs_cls)\n+ STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], cls_id, 30, temp_feat)\n+ for (tlbrs, temp_feat) in zip(pred_dets_cls, pred_embs_cls)\n]\nelse:\ndetections = []\n@@ -154,6 +170,11 @@ class JDETracker(object):\n# Predict the current location with KalmanFilter\nSTrack.multi_predict(track_pool_dict[cls_id], self.motion)\n+ if self.use_byte:\n+ dists = matching.iou_distance(track_pool_dict[cls_id], detections)\n+ matches, u_track, u_detection = matching.linear_assignment(\n+ dists, thresh=self.match_thres) # not self.tracked_thresh\n+ else:\ndists = matching.embedding_distance(\ntrack_pool_dict[cls_id], detections, metric=self.metric_type)\ndists = matching.fuse_motion(self.motion, dists,\n@@ -177,19 +198,41 @@ class JDETracker(object):\n# None of the steps below happen if there are no undetected tracks.\n\"\"\" Step 3: Second association, with IOU\"\"\"\n+ if self.use_byte:\n+ inds_low = pred_dets_dict[cls_id][:, 4:5] > self.low_conf_thres\n+ inds_high = pred_dets_dict[cls_id][:, 4:5] < self.conf_thres\n+ inds_second = np.logical_and(inds_low, inds_high).squeeze(-1)\n+ pred_dets_cls_second = pred_dets_dict[cls_id][inds_second]\n+\n+ # association the untrack to the low score detections\n+ if len(pred_dets_cls_second) > 0:\n+ detections_second = [\n+ STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], cls_id, 30, temp_feat=None)\n+ for tlbrs in pred_dets_cls_second[:, :5]\n+ ]\n+ else:\n+ detections_second = []\n+ r_tracked_stracks = [\n+ track_pool_dict[cls_id][i] for i in u_track\n+ if track_pool_dict[cls_id][i].state == TrackState.Tracked\n+ ]\n+ dists = matching.iou_distance(r_tracked_stracks, detections_second)\n+ matches, u_track, u_detection_second = matching.linear_assignment(\n+ dists, thresh=0.4) # not r_tracked_thresh\n+ else:\ndetections = [detections[i] for i in u_detection]\nr_tracked_stracks = []\nfor i in u_track:\nif track_pool_dict[cls_id][i].state == TrackState.Tracked:\nr_tracked_stracks.append(track_pool_dict[cls_id][i])\n-\ndists = matching.iou_distance(r_tracked_stracks, detections)\n+\nmatches, u_track, u_detection = matching.linear_assignment(\ndists, thresh=self.r_tracked_thresh)\nfor i_tracked, idet in matches:\ntrack = r_tracked_stracks[i_tracked]\n- det = detections[idet]\n+ det = detections[idet] if not self.use_byte else detections_second[idet]\nif track.state == TrackState.Tracked:\ntrack.update(det, self.frame_id)\nactivated_tracks_dict[cls_id].append(track)\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/mot/tracker/base_jde_tracker.py",
"new_path": "ppdet/modeling/mot/tracker/base_jde_tracker.py",
"diff": "@@ -105,28 +105,28 @@ class STrack(BaseTrack):\ndef __init__(self,\ntlwh,\nscore,\n- temp_feat,\n- num_classes,\ncls_id,\n- buff_size=30):\n- # object class id\n- self.cls_id = cls_id\n+ buff_size=30,\n+ temp_feat=None):\n# wait activate\nself._tlwh = np.asarray(tlwh, dtype=np.float)\n+ self.score = score\n+ self.cls_id = cls_id\n+ self.track_len = 0\n+\nself.kalman_filter = None\nself.mean, self.covariance = None, None\nself.is_activated = False\n- self.score = score\n- self.track_len = 0\n-\n+ self.use_reid = True if temp_feat is not None else False\n+ if self.use_reid:\nself.smooth_feat = None\nself.update_features(temp_feat)\nself.features = deque([], maxlen=buff_size)\nself.alpha = 0.9\ndef update_features(self, feat):\n- # L2 normalizing\n+ # L2 normalizing, this function has no use for BYTETracker\nfeat /= np.linalg.norm(feat)\nself.curr_feat = feat\nif self.smooth_feat is None:\n@@ -182,6 +182,7 @@ class STrack(BaseTrack):\ndef re_activate(self, new_track, frame_id, new_id=False):\nself.mean, self.covariance = self.kalman_filter.update(\nself.mean, self.covariance, self.tlwh_to_xyah(new_track.tlwh))\n+ if self.use_reid:\nself.update_features(new_track.curr_feat)\nself.track_len = 0\nself.state = TrackState.Tracked\n@@ -201,7 +202,7 @@ class STrack(BaseTrack):\nself.is_activated = True # set flag 'activated'\nself.score = new_track.score\n- if update_feature:\n+ if update_feature and self.use_reid:\nself.update_features(new_track.curr_feat)\n@property\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/mot/tracker/jde_tracker.py",
"new_path": "ppdet/modeling/mot/tracker/jde_tracker.py",
"diff": "@@ -58,6 +58,7 @@ class JDETracker(object):\n\"\"\"\ndef __init__(self,\n+ use_byte=False,\nnum_classes=1,\ndet_thresh=0.3,\ntrack_buffer=30,\n@@ -66,11 +67,14 @@ class JDETracker(object):\ntracked_thresh=0.7,\nr_tracked_thresh=0.5,\nunconfirmed_thresh=0.7,\n- motion='KalmanFilter',\nconf_thres=0,\n+ match_thres=0.8,\n+ low_conf_thres=0.2,\n+ motion='KalmanFilter',\nmetric_type='euclidean'):\n+ self.use_byte = use_byte\nself.num_classes = num_classes\n- self.det_thresh = det_thresh\n+ self.det_thresh = det_thresh if not use_byte else conf_thres + 0.1\nself.track_buffer = track_buffer\nself.min_box_area = min_box_area\nself.vertical_ratio = vertical_ratio\n@@ -78,9 +82,12 @@ class JDETracker(object):\nself.tracked_thresh = tracked_thresh\nself.r_tracked_thresh = r_tracked_thresh\nself.unconfirmed_thresh = unconfirmed_thresh\n+ self.conf_thres = conf_thres\n+ self.match_thres = match_thres\n+ self.low_conf_thres = low_conf_thres\n+\nif motion == 'KalmanFilter':\nself.motion = KalmanFilter()\n- self.conf_thres = conf_thres\nself.metric_type = metric_type\nself.frame_id = 0\n@@ -91,7 +98,7 @@ class JDETracker(object):\nself.max_time_lost = 0\n# max_time_lost will be calculated: int(frame_rate / 30.0 * track_buffer)\n- def update(self, pred_dets, pred_embs):\n+ def update(self, pred_dets, pred_embs=None):\n\"\"\"\nProcesses the image frame and finds bounding box(detections).\nAssociates the detection with corresponding tracklets and also handles\n@@ -123,7 +130,10 @@ class JDETracker(object):\nfor cls_id in range(self.num_classes):\ncls_idx = (pred_dets[:, 5:] == cls_id).squeeze(-1)\npred_dets_dict[cls_id] = pred_dets[cls_idx]\n+ if pred_embs is not None:\npred_embs_dict[cls_id] = pred_embs[cls_idx]\n+ else:\n+ pred_embs_dict[cls_id] = None\nfor cls_id in range(self.num_classes):\n\"\"\" Step 1: Get detections by class\"\"\"\n@@ -132,12 +142,18 @@ class JDETracker(object):\nremain_inds = (pred_dets_cls[:, 4:5] > self.conf_thres).squeeze(-1)\nif remain_inds.sum() > 0:\npred_dets_cls = pred_dets_cls[remain_inds]\n+ if self.use_byte:\n+ detections = [\n+ STrack(\n+ STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], cls_id, 30, temp_feat=None)\n+ for tlbrs in pred_dets_cls\n+ ]\n+ else:\npred_embs_cls = pred_embs_cls[remain_inds]\ndetections = [\nSTrack(\n- STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f,\n- self.num_classes, cls_id, 30)\n- for (tlbrs, f) in zip(pred_dets_cls, pred_embs_cls)\n+ STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], cls_id, 30, temp_feat)\n+ for (tlbrs, temp_feat) in zip(pred_dets_cls, pred_embs_cls)\n]\nelse:\ndetections = []\n@@ -160,6 +176,11 @@ class JDETracker(object):\n# Predict the current location with KalmanFilter\nSTrack.multi_predict(track_pool_dict[cls_id], self.motion)\n+ if self.use_byte:\n+ dists = matching.iou_distance(track_pool_dict[cls_id], detections)\n+ matches, u_track, u_detection = matching.linear_assignment(\n+ dists, thresh=self.match_thres) #\n+ else:\ndists = matching.embedding_distance(\ntrack_pool_dict[cls_id], detections, metric=self.metric_type)\ndists = matching.fuse_motion(self.motion, dists,\n@@ -183,19 +204,41 @@ class JDETracker(object):\n# None of the steps below happen if there are no undetected tracks.\n\"\"\" Step 3: Second association, with IOU\"\"\"\n+ if self.use_byte:\n+ inds_low = pred_dets_dict[cls_id][:, 4:5] > self.low_conf_thres\n+ inds_high = pred_dets_dict[cls_id][:, 4:5] < self.conf_thres\n+ inds_second = np.logical_and(inds_low, inds_high).squeeze(-1)\n+ pred_dets_cls_second = pred_dets_dict[cls_id][inds_second]\n+\n+ # association the untrack to the low score detections\n+ if len(pred_dets_cls_second) > 0:\n+ detections_second = [\n+ STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], cls_id, 30, temp_feat=None)\n+ for tlbrs in pred_dets_cls_second[:, :5]\n+ ]\n+ else:\n+ detections_second = []\n+ r_tracked_stracks = [\n+ track_pool_dict[cls_id][i] for i in u_track\n+ if track_pool_dict[cls_id][i].state == TrackState.Tracked\n+ ]\n+ dists = matching.iou_distance(r_tracked_stracks, detections_second)\n+ matches, u_track, u_detection_second = matching.linear_assignment(\n+ dists, thresh=0.4) # not r_tracked_thresh\n+ else:\ndetections = [detections[i] for i in u_detection]\nr_tracked_stracks = []\nfor i in u_track:\nif track_pool_dict[cls_id][i].state == TrackState.Tracked:\nr_tracked_stracks.append(track_pool_dict[cls_id][i])\n-\ndists = matching.iou_distance(r_tracked_stracks, detections)\n+\nmatches, u_track, u_detection = matching.linear_assignment(\ndists, thresh=self.r_tracked_thresh)\nfor i_tracked, idet in matches:\ntrack = r_tracked_stracks[i_tracked]\n- det = detections[idet]\n+ det = detections[idet] if not self.use_byte else detections_second[idet]\nif track.state == TrackState.Tracked:\ntrack.update(det, self.frame_id)\nactivated_tracks_dict[cls_id].append(track)\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
[MOT] add bytetracker (#4910)
|
499,339 |
20.12.2021 17:20:57
| -28,800 |
3cf6e926b4070fe064b4a24f5385d5c74efc7c05
|
update serving to 0.7.0
|
[
{
"change_type": "ADD",
"old_path": null,
"new_path": "deploy/serving/label_list.txt",
"diff": "+person\n+bicycle\n+car\n+motorcycle\n+airplane\n+bus\n+train\n+truck\n+boat\n+traffic light\n+fire hydrant\n+stop sign\n+parking meter\n+bench\n+bird\n+cat\n+dog\n+horse\n+sheep\n+cow\n+elephant\n+bear\n+zebra\n+giraffe\n+backpack\n+umbrella\n+handbag\n+tie\n+suitcase\n+frisbee\n+skis\n+snowboard\n+sports ball\n+kite\n+baseball bat\n+baseball glove\n+skateboard\n+surfboard\n+tennis racket\n+bottle\n+wine glass\n+cup\n+fork\n+knife\n+spoon\n+bowl\n+banana\n+apple\n+sandwich\n+orange\n+broccoli\n+carrot\n+hot dog\n+pizza\n+donut\n+cake\n+chair\n+couch\n+potted plant\n+bed\n+dining table\n+toilet\n+tv\n+laptop\n+mouse\n+remote\n+keyboard\n+cell phone\n+microwave\n+oven\n+toaster\n+sink\n+refrigerator\n+book\n+clock\n+vase\n+scissors\n+teddy bear\n+hair drier\n+toothbrush\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/serving/test_client.py",
"new_path": "deploy/serving/test_client.py",
"diff": "@@ -23,21 +23,21 @@ preprocess = Sequential([\n(2, 0, 1))\n])\n-postprocess = RCNNPostprocess(\"label_list.txt\", \"output\", [608, 608])\n+postprocess = RCNNPostprocess(sys.argv[1], \"output\", [608, 608])\nclient = Client()\nclient.load_client_config(\"serving_client/serving_client_conf.prototxt\")\nclient.connect(['127.0.0.1:9393'])\n-im = preprocess(sys.argv[1])\n+im = preprocess(sys.argv[2])\nfetch_map = client.predict(\nfeed={\n\"image\": im,\n\"im_shape\": np.array(list(im.shape[1:])).reshape(-1),\n\"scale_factor\": np.array([1.0, 1.0]).reshape(-1),\n},\n- fetch=[\"save_infer_model/scale_0.tmp_1\"],\n+ fetch=[\"multiclass_nms3_0.tmp_0\"],\nbatch=False)\nprint(fetch_map)\n-fetch_map[\"image\"] = sys.argv[1]\n+fetch_map[\"image\"] = sys.argv[2]\npostprocess(fetch_map)\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
update serving to 0.7.0 (#4956)
|
499,304 |
20.12.2021 19:25:16
| -28,800 |
1f8f85397d728958e98e4922309be5954e050071
|
fix reader of print_flops
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/engine/trainer.py",
"new_path": "ppdet/engine/trainer.py",
"diff": "@@ -370,7 +370,9 @@ class Trainer(object):\nself.status['training_staus'] = stats.TrainingStats(self.cfg.log_iter)\nif self.cfg.get('print_flops', False):\n- self._flops(self.loader)\n+ flops_loader = create('{}Reader'.format(self.mode.capitalize()))(\n+ self.dataset, self.cfg.worker_num)\n+ self._flops(flops_loader)\nprofiler_options = self.cfg.get('profiler_options', None)\nself._compose_callback.on_train_begin(self.status)\n@@ -469,7 +471,9 @@ class Trainer(object):\nself.status['mode'] = 'eval'\nself.model.eval()\nif self.cfg.get('print_flops', False):\n- self._flops(loader)\n+ flops_loader = create('{}Reader'.format(self.mode.capitalize()))(\n+ self.dataset, self.cfg.worker_num, self._eval_batch_sampler)\n+ self._flops(flops_loader)\nfor step_id, data in enumerate(loader):\nself.status['step_id'] = step_id\nself._compose_callback.on_step_begin(self.status)\n@@ -520,7 +524,8 @@ class Trainer(object):\nself.status['mode'] = 'test'\nself.model.eval()\nif self.cfg.get('print_flops', False):\n- self._flops(loader)\n+ flops_loader = create('TestReader')(self.dataset, 0)\n+ self._flops(flops_loader)\nresults = []\nfor step_id, data in enumerate(loader):\nself.status['step_id'] = step_id\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix reader of print_flops (#4959)
|
499,304 |
21.12.2021 10:35:48
| -28,800 |
a2f247621f52150cb036dec8054ac63bc5b2f84a
|
fix voc dataset collact_batch
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/engine/trainer.py",
"new_path": "ppdet/engine/trainer.py",
"diff": "@@ -111,8 +111,12 @@ class Trainer(object):\nif self.mode == 'eval':\nself._eval_batch_sampler = paddle.io.BatchSampler(\nself.dataset, batch_size=self.cfg.EvalReader['batch_size'])\n- self.loader = create('{}Reader'.format(self.mode.capitalize()))(\n- self.dataset, cfg.worker_num, self._eval_batch_sampler)\n+ reader_name = '{}Reader'.format(self.mode.capitalize())\n+ # If metric is VOC, need to be set collate_batch=False.\n+ if cfg.metric == 'VOC':\n+ cfg[reader_name]['collate_batch'] = False\n+ self.loader = create(reader_name)(self.dataset, cfg.worker_num,\n+ self._eval_batch_sampler)\n# TestDataset build after user set images, skip loader creation here\n# build optimizer in train mode\n@@ -444,6 +448,9 @@ class Trainer(object):\npaddle.io.BatchSampler(\nself._eval_dataset,\nbatch_size=self.cfg.EvalReader['batch_size'])\n+ # If metric is VOC, need to be set collate_batch=False.\n+ if self.cfg.metric == 'VOC':\n+ self.cfg['EvalReader']['collate_batch'] = False\nself._eval_loader = create('EvalReader')(\nself._eval_dataset,\nself.cfg.worker_num,\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/backbones/lcnet.py",
"new_path": "ppdet/modeling/backbones/lcnet.py",
"diff": "@@ -19,7 +19,7 @@ from __future__ import print_function\nimport paddle\nimport paddle.nn as nn\nfrom paddle import ParamAttr\n-from paddle.nn import AdaptiveAvgPool2D, BatchNorm, Conv2D, Dropout, Linear\n+from paddle.nn import AdaptiveAvgPool2D, Conv2D\nfrom paddle.regularizer import L2Decay\nfrom paddle.nn.initializer import KaimingNormal\n@@ -81,7 +81,7 @@ class ConvBNLayer(nn.Layer):\nweight_attr=ParamAttr(initializer=KaimingNormal()),\nbias_attr=False)\n- self.bn = BatchNorm2D(\n+ self.bn = nn.BatchNorm2D(\nnum_filters,\nweight_attr=ParamAttr(regularizer=L2Decay(0.0)),\nbias_attr=ParamAttr(regularizer=L2Decay(0.0)))\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/backbones/shufflenet_v2.py",
"new_path": "ppdet/modeling/backbones/shufflenet_v2.py",
"diff": "@@ -19,7 +19,8 @@ from __future__ import print_function\nimport paddle\nimport paddle.nn as nn\nfrom paddle import ParamAttr\n-from paddle.nn import Conv2D, MaxPool2D, AdaptiveAvgPool2D, BatchNorm\n+import paddle.nn.functional as F\n+from paddle.nn import Conv2D, MaxPool2D, AdaptiveAvgPool2D, BatchNorm2D\nfrom paddle.nn.initializer import KaimingNormal\nfrom paddle.regularizer import L2Decay\n@@ -55,6 +56,8 @@ class ConvBNLayer(nn.Layer):\nout_channels,\nweight_attr=ParamAttr(regularizer=L2Decay(0.0)),\nbias_attr=ParamAttr(regularizer=L2Decay(0.0)))\n+ if act == \"hard_swish\":\n+ act = 'hardswish'\nself.act = act\ndef forward(self, inputs):\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix voc dataset collact_batch (#4966)
|
499,333 |
21.12.2021 21:21:36
| -28,800 |
576f52cf131fb3ac931ff77f5568859f9895e1ef
|
fix no label training in bs=2
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/proposal_generator/target.py",
"new_path": "ppdet/modeling/proposal_generator/target.py",
"diff": "@@ -50,8 +50,8 @@ def rpn_anchor_target(anchors,\nlabels = paddle.scatter(labels, fg_inds, paddle.ones_like(fg_inds))\n# Step3: make output\nif gt_bbox.shape[0] == 0:\n- matched_gt_boxes = paddle.zeros([0, 4])\n- tgt_delta = paddle.zeros([0, 4])\n+ matched_gt_boxes = paddle.zeros([matches.shape[0], 4])\n+ tgt_delta = paddle.zeros([matches.shape[0], 4])\nelse:\nmatched_gt_boxes = paddle.gather(gt_bbox, matches)\ntgt_delta = bbox2delta(anchors, matched_gt_boxes, weights)\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix no label training in bs=2 (#4977)
|
499,301 |
22.12.2021 11:59:32
| -28,800 |
692d732994660ceba82c75034c802eb1138239cf
|
swin shift window
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/backbones/swin_transformer.py",
"new_path": "ppdet/modeling/backbones/swin_transformer.py",
"diff": "@@ -493,8 +493,13 @@ class BasicLayer(nn.Layer):\ncnt = 0\nfor h in h_slices:\nfor w in w_slices:\n+ try:\nimg_mask[:, h, w, :] = cnt\n+ except:\n+ pass\n+\ncnt += 1\n+\nmask_windows = window_partition(\nimg_mask, self.window_size) # nW, window_size, window_size, 1\nmask_windows = mask_windows.reshape(\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
swin shift window (#4954)
|
499,392 |
23.12.2021 21:16:44
| -28,800 |
4ab7c7c079b9b4713c39e93e172e9e1475504bd6
|
add ofa picodet demo
|
[
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/slim/ofa/ofa_picodet_demo.yml",
"diff": "+weights: https://paddledet.bj.bcebos.com/models/pretrained/ESNet_x1_0_pretrained.pdparams\n+slim: OFA\n+OFA:\n+ ofa_config:\n+ task: expand_ratio\n+ expand_ratio: [0.5, 1]\n+\n+ skip_neck: True\n+ skip_head: True\n+\n+ RunConfig:\n+ # Skip the output layer of each block by layer name\n+ skip_layers: ['backbone._conv1._conv','backbone.2_1._conv_linear_1._conv',\n+ 'backbone.2_1._conv_linear_2._conv', 'backbone.2_1._conv_dw_mv1._conv',\n+ 'backbone.2_1._conv_pw_mv1._conv', 'backbone.2_2._conv_linear._conv',\n+ 'backbone.2_3._conv_linear._conv', 'backbone.3_1._conv_linear_1._conv',\n+ 'backbone.3_1._conv_linear_2._conv', 'backbone.3_1._conv_dw_mv1._conv',\n+ 'backbone.3_1._conv_pw_mv1._conv', 'backbone.3_2._conv_linear._conv',\n+ 'backbone.3_3._conv_linear._conv', 'backbone.3_4._conv_linear._conv',\n+ 'backbone.3_5._conv_linear._conv', 'backbone.3_6._conv_linear._conv',\n+ 'backbone.3_7._conv_linear._conv', 'backbone.4_1._conv_linear_1._conv',\n+ 'backbone.4_1._conv_linear_2._conv', 'backbone.4_1._conv_dw_mv1._conv',\n+ 'backbone.4_1._conv_pw_mv1._conv', 'backbone.4_2._conv_linear._conv',\n+ 'backbone.4_3._conv_linear._conv']\n+\n+ # For block-wise search, make layers in each block in the same search sapce\n+ same_search_space: [\n+ ['backbone.2_1._conv_dw_1._conv', 'backbone.2_1._conv_pw_2._conv',\n+ 'backbone.2_1._conv_dw_2._conv', 'backbone.2_1._se.conv1', 'backbone.2_1._se.conv2'],\n+ ['backbone.2_2._conv_pw._conv', 'backbone.2_2._conv_dw._conv',\n+ 'backbone.2_2._se.conv1', 'backbone.2_2._se.conv2'],\n+ ['backbone.2_3._conv_pw._conv', 'backbone.2_3._conv_dw._conv',\n+ 'backbone.2_3._se.conv1', 'backbone.2_3._se.conv2'],\n+ ['backbone.3_1._conv_dw_1._conv', 'backbone.3_1._conv_pw_2._conv',\n+ 'backbone.3_1._conv_dw_2._conv', 'backbone.3_1._se.conv1', 'backbone.3_1._se.conv2'],\n+ ['backbone.3_2._conv_pw._conv', 'backbone.3_2._conv_dw._conv',\n+ 'backbone.3_2._se.conv1', 'backbone.3_2._se.conv2'],\n+ ['backbone.3_3._conv_pw._conv', 'backbone.3_3._conv_dw._conv',\n+ 'backbone.3_3._se.conv1', 'backbone.3_3._se.conv2'],\n+ ['backbone.3_4._conv_pw._conv', 'backbone.3_4._conv_dw._conv',\n+ 'backbone.3_4._se.conv1', 'backbone.3_4._se.conv2'],\n+ ['backbone.3_5._conv_pw._conv', 'backbone.3_5._conv_dw._conv',\n+ 'backbone.3_5._se.conv1', 'backbone.3_5._se.conv2'],\n+ ['backbone.3_6._conv_pw._conv', 'backbone.3_6._conv_dw._conv',\n+ 'backbone.3_6._se.conv1', 'backbone.3_6._se.conv2'],\n+ ['backbone.3_7._conv_pw._conv', 'backbone.3_7._conv_dw._conv',\n+ 'backbone.3_7._se.conv1', 'backbone.3_7._se.conv2'],\n+ ['backbone.4_1._conv_dw_1._conv', 'backbone.4_1._conv_pw_2._conv',\n+ 'backbone.4_1._conv_dw_2._conv', 'backbone.4_1._se.conv1', 'backbone.4_1._se.conv2'],\n+ ['backbone.4_2._conv_pw._conv', 'backbone.4_2._conv_dw._conv',\n+ 'backbone.4_2._se.conv1', 'backbone.4_2._se.conv2'],\n+ ['backbone.4_3._conv_pw._conv', 'backbone.4_3._conv_dw._conv',\n+ 'backbone.4_3._se.conv1', 'backbone.4_3._se.conv2']]\n+\n+ # demo expand ratio\n+ # Generally, for expand ratio, float in (0, 1] is available.\n+ # But please be careful if the model is complicated.\n+ # For picodet, there are many split and concat, the choice of channel number is important.\n+ ofa_layers:\n+ 'backbone.2_1._conv_dw_1._conv':\n+ 'expand_ratio': [0.5, 1]\n+ 'backbone.2_2._conv_pw._conv':\n+ 'expand_ratio': [0.5, 1]\n+ 'backbone.2_3._conv_pw._conv':\n+ 'expand_ratio': [0.5, 1]\n+ 'backbone.3_1._conv_dw_1._conv':\n+ 'expand_ratio': [0.5, 1]\n+ 'backbone.3_2._conv_pw._conv':\n+ 'expand_ratio': [0.5, 1]\n+ 'backbone.3_3._conv_pw._conv':\n+ 'expand_ratio': [0.5, 1]\n+ 'backbone.3_4._conv_pw._conv':\n+ 'expand_ratio': [0.5, 1]\n+ 'backbone.3_5._conv_pw._conv':\n+ 'expand_ratio': [0.5, 1]\n+ 'backbone.3_6._conv_pw._conv':\n+ 'expand_ratio': [0.5, 1]\n+ 'backbone.3_7._conv_pw._conv':\n+ 'expand_ratio': [0.5, 1]\n+ 'backbone.4_1._conv_dw_1._conv':\n+ 'expand_ratio': [0.5, 1]\n+ 'backbone.4_2._conv_pw._conv':\n+ 'expand_ratio': [0.5, 1]\n+ 'backbone.4_3._conv_pw._conv':\n+ 'expand_ratio': [0.5, 1]\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/engine/trainer.py",
"new_path": "ppdet/engine/trainer.py",
"diff": "@@ -95,6 +95,10 @@ class Trainer(object):\nself.is_loaded_weights = True\n#normalize params for deploy\n+ if 'slim' in cfg and cfg['slim_type'] == 'OFA':\n+ self.model.model.load_meanstd(cfg['TestReader'][\n+ 'sample_transforms'])\n+ else:\nself.model.load_meanstd(cfg['TestReader']['sample_transforms'])\nself.use_ema = ('use_ema' in cfg and cfg['use_ema'])\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/slim/__init__.py",
"new_path": "ppdet/slim/__init__.py",
"diff": "@@ -21,6 +21,7 @@ from .prune import *\nfrom .quant import *\nfrom .distill import *\nfrom .unstructured_prune import *\n+from .ofa import *\nimport yaml\nfrom ppdet.core.workspace import load_config\n@@ -36,6 +37,14 @@ def build_slim_model(cfg, slim_cfg, mode='train'):\nif slim_load_cfg['slim'] == 'Distill':\nmodel = DistillModel(cfg, slim_cfg)\ncfg['model'] = model\n+ elif slim_load_cfg['slim'] == 'OFA':\n+ load_config(slim_cfg)\n+ model = create(cfg.architecture)\n+ load_pretrain_weight(model, cfg.weights)\n+ slim = create(cfg.slim)\n+ cfg['slim_type'] = cfg.slim\n+ cfg['model'] = slim(model, model.state_dict())\n+ cfg['slim'] = slim\nelif slim_load_cfg['slim'] == 'DistillPrune':\nif mode == 'train':\nmodel = DistillModel(cfg, slim_cfg)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "ppdet/slim/ofa.py",
"diff": "+from __future__ import absolute_import\n+from __future__ import division\n+from __future__ import print_function\n+\n+import paddle\n+import paddle.nn as nn\n+import paddle.nn.functional as F\n+\n+from ppdet.core.workspace import load_config, merge_config, create\n+from ppdet.utils.checkpoint import load_weight, load_pretrain_weight\n+from ppdet.utils.logger import setup_logger\n+from ppdet.core.workspace import register, serializable\n+\n+from paddle.utils import try_import\n+\n+logger = setup_logger(__name__)\n+\n+\n+@register\n+@serializable\n+class OFA(object):\n+ def __init__(self, ofa_config):\n+ super(OFA, self).__init__()\n+ self.ofa_config = ofa_config\n+\n+ def __call__(self, model, param_state_dict):\n+\n+ paddleslim = try_import('paddleslim')\n+ from paddleslim.nas.ofa import OFA, RunConfig, utils\n+ from paddleslim.nas.ofa.convert_super import Convert, supernet\n+ task = self.ofa_config['task']\n+ expand_ratio = self.ofa_config['expand_ratio']\n+\n+ skip_neck = self.ofa_config['skip_neck']\n+ skip_head = self.ofa_config['skip_head']\n+\n+ run_config = self.ofa_config['RunConfig']\n+ if 'skip_layers' in run_config:\n+ skip_layers = run_config['skip_layers']\n+ else:\n+ skip_layers = []\n+\n+ # supernet config\n+ sp_config = supernet(expand_ratio=expand_ratio)\n+ # convert to supernet\n+ model = Convert(sp_config).convert(model)\n+\n+ skip_names = []\n+ if skip_neck:\n+ skip_names.append('neck.')\n+ if skip_head:\n+ skip_names.append('head.')\n+\n+ for name, sublayer in model.named_sublayers():\n+ for n in skip_names:\n+ if n in name:\n+ skip_layers.append(name)\n+\n+ run_config['skip_layers'] = skip_layers\n+ run_config = RunConfig(**run_config)\n+\n+ # build ofa model\n+ ofa_model = OFA(model, run_config=run_config)\n+\n+ ofa_model.set_epoch(0)\n+ ofa_model.set_task(task)\n+\n+ input_spec = [{\n+ \"image\": paddle.ones(\n+ shape=[1, 3, 640, 640], dtype='float32'),\n+ \"im_shape\": paddle.full(\n+ [1, 2], 640, dtype='float32'),\n+ \"scale_factor\": paddle.ones(\n+ shape=[1, 2], dtype='float32')\n+ }]\n+\n+ ofa_model._clear_search_space(input_spec=input_spec)\n+ ofa_model._build_ss = True\n+ check_ss = ofa_model._sample_config('expand_ratio', phase=None)\n+ # tokenize the search space\n+ ofa_model.tokenize()\n+ # check token map, search cands and search space\n+ logger.info('Token map is {}'.format(ofa_model.token_map))\n+ logger.info('Search candidates is {}'.format(ofa_model.search_cands))\n+ logger.info('The length of search_space is {}, search_space is {}'.\n+ format(len(ofa_model._ofa_layers), ofa_model._ofa_layers))\n+ # set model state dict into ofa model\n+ utils.set_state_dict(ofa_model.model, param_state_dict)\n+ return ofa_model\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
add ofa picodet demo (#4923)
|
499,333 |
24.12.2021 16:15:47
| -28,800 |
0c2f474fda9bac93778127dce5a761eb3547f703
|
fix idx in post_process
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/post_process.py",
"new_path": "ppdet/modeling/post_process.py",
"diff": "@@ -100,7 +100,6 @@ class BBoxPostProcess(nn.Layer):\nif bbox_num[i] == 0:\nbboxes_i = fake_bboxes\nbbox_num_i = fake_bbox_num\n- id_start += 1\nelse:\nbboxes_i = bboxes[id_start:id_start + bbox_num[i], :]\nbbox_num_i = bbox_num[i]\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix idx in post_process (#4996)
|
499,298 |
26.12.2021 23:59:15
| -28,800 |
e035261710a8e3cb335144e6c98c7defba2e986c
|
fix truncated image reading
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/pptracking/python/visualize.py",
"new_path": "deploy/pptracking/python/visualize.py",
"diff": "@@ -17,7 +17,8 @@ from __future__ import division\nimport os\nimport cv2\nimport numpy as np\n-from PIL import Image, ImageDraw\n+from PIL import Image, ImageDraw, ImageFile\n+ImageFile.LOAD_TRUNCATED_IMAGES = True\nfrom collections import deque\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/python/visualize.py",
"new_path": "deploy/python/visualize.py",
"diff": "@@ -18,7 +18,8 @@ from __future__ import division\nimport os\nimport cv2\nimport numpy as np\n-from PIL import Image, ImageDraw\n+from PIL import Image, ImageDraw, ImageFile\n+ImageFile.LOAD_TRUNCATED_IMAGES = True\nimport math\n"
},
{
"change_type": "MODIFY",
"old_path": "ppdet/engine/trainer.py",
"new_path": "ppdet/engine/trainer.py",
"diff": "@@ -23,7 +23,8 @@ import time\nimport numpy as np\nimport typing\n-from PIL import Image, ImageOps\n+from PIL import Image, ImageOps, ImageFile\n+ImageFile.LOAD_TRUNCATED_IMAGES = True\nimport paddle\nimport paddle.distributed as dist\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
fix truncated image reading (#4936)
|
499,308 |
27.12.2021 15:07:20
| -28,800 |
ebbe6686b940ef90ebfb9571126f955bb6f6f7ca
|
remove version check for clang-format,test=document_fix
|
[
{
"change_type": "MODIFY",
"old_path": ".travis/codestyle/clang_format.hook",
"new_path": ".travis/codestyle/clang_format.hook",
"diff": "#!/bin/bash\nset -e\n-readonly VERSION=\"3.8\"\n-\n-version=$(clang-format -version)\n-\n-if ! [[ $version == *\"$VERSION\"* ]]; then\n- echo \"clang-format version check failed.\"\n- echo \"a version contains '$VERSION' is needed, but get '$version'\"\n- echo \"you can install the right version, and make an soft-link to '\\$PATH' env\"\n- exit -1\n-fi\n-\nclang-format $@\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
remove version check for clang-format,test=document_fix (#5004)
|
499,333 |
27.12.2021 15:20:15
| -28,800 |
f5d8f770c2cf8ad9a91e6c1ae5eeb6bd29524bd2
|
support windows deploy for pptracking
|
[
{
"change_type": "MODIFY",
"old_path": "deploy/pptracking/cpp/src/jde_predictor.cc",
"new_path": "deploy/pptracking/cpp/src/jde_predictor.cc",
"diff": "@@ -32,7 +32,7 @@ void JDEPredictor::LoadModel(const std::string& model_dir,\nconfig.EnableUseGpu(200, this->gpu_id_);\nconfig.SwitchIrOptim(true);\n// use tensorrt\n- if (run_mode != \"fluid\") {\n+ if (run_mode != \"paddle\") {\nauto precision = paddle_infer::Config::Precision::kFloat32;\nif (run_mode == \"trt_fp32\") {\nprecision = paddle_infer::Config::Precision::kFloat32;\n@@ -42,7 +42,8 @@ void JDEPredictor::LoadModel(const std::string& model_dir,\nprecision = paddle_infer::Config::Precision::kInt8;\n} else {\nprintf(\n- \"run_mode should be 'fluid', 'trt_fp32', 'trt_fp16' or 'trt_int8'\");\n+ \"run_mode should be 'paddle', 'trt_fp32', 'trt_fp16' or \"\n+ \"'trt_int8'\");\n}\n// set tensorrt\nconfig.EnableTensorRtEngine(1 << 30,\n"
},
{
"change_type": "MODIFY",
"old_path": "deploy/pptracking/cpp/src/tracker.cc",
"new_path": "deploy/pptracking/cpp/src/tracker.cc",
"diff": "@@ -191,9 +191,9 @@ bool JDETracker::update(const cv::Mat &dets,\ntracks->clear();\nfor (size_t i = 0; i < this->tracked_trajectories.size(); ++i) {\nif (this->tracked_trajectories[i].is_activated) {\n- Track track = {.id = this->tracked_trajectories[i].id,\n- .score = this->tracked_trajectories[i].score,\n- .ltrb = this->tracked_trajectories[i].ltrb};\n+ Track track = {this->tracked_trajectories[i].id,\n+ this->tracked_trajectories[i].score,\n+ this->tracked_trajectories[i].ltrb};\ntracks->push_back(track);\n}\n}\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
support windows deploy for pptracking (#4969)
|
499,304 |
27.12.2021 22:53:25
| -28,800 |
bb945bf7b3095842ca8f7c719ac218b146eb4f95
|
update picodet model zoo
|
[
{
"change_type": "MODIFY",
"old_path": "configs/picodet/README.md",
"new_path": "configs/picodet/README.md",
"diff": "@@ -27,9 +27,9 @@ We developed a series of lightweight models, named `PP-PicoDet`. Because of the\n| Model | Input size | mAP<sup>val<br>0.5:0.95 | mAP<sup>val<br>0.5 | Params<br><sup>(M) | FLOPS<br><sup>(G) | Latency<sup><small>[NCNN](#latency)</small><sup><br><sup>(ms) | Latency<sup><small>[Lite](#latency)</small><sup><br><sup>(ms) | Download | Config |\n| :-------- | :--------: | :---------------------: | :----------------: | :----------------: | :---------------: | :-----------------------------: | :-----------------------------: | :----------------------------------------: | :--------------------------------------- |\n| PicoDet-S | 320*320 | 27.1 | 41.4 | 0.99 | 0.73 | 8.13 | **6.65** | [model](https://paddledet.bj.bcebos.com/models/picodet_s_320_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_s_320_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_s_320_coco.yml) |\n-| PicoDet-S | 416*416 | 30.6 | 45.5 | 0.99 | 1.24 | 12.37 | **9.82** | [model](https://paddledet.bj.bcebos.com/models/picodet_s_416_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_s_416_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_s_416_coco.yml) |\n+| PicoDet-S | 416*416 | 30.7 | 45.8 | 0.99 | 1.24 | 12.37 | **9.82** | [model](https://paddledet.bj.bcebos.com/models/picodet_s_416_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_s_416_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_s_416_coco.yml) |\n| PicoDet-M | 320*320 | 30.9 | 45.7 | 2.15 | 1.48 | 11.27 | **9.61** | [model](https://paddledet.bj.bcebos.com/models/picodet_m_320_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_m_320_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_m_320_coco.yml) |\n-| PicoDet-M | 416*416 | 34.3 | 49.8 | 2.15 | 2.50 | 17.39 | **15.88** | [model](https://paddledet.bj.bcebos.com/models/picodet_m_416_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_m_416_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_m_416_coco.yml) |\n+| PicoDet-M | 416*416 | 34.8 | 50.5 | 2.15 | 2.50 | 17.39 | **15.88** | [model](https://paddledet.bj.bcebos.com/models/picodet_m_416_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_m_416_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_m_416_coco.yml) |\n| PicoDet-L | 320*320 | 32.9 | 48.2 | 3.30 | 2.23 | 15.26 | **13.42** | [model](https://paddledet.bj.bcebos.com/models/picodet_l_320_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_320_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_320_coco.yml) |\n| PicoDet-L | 416*416 | 36.6 | 52.5 | 3.30 | 3.76 | 23.36 | **21.85** | [model](https://paddledet.bj.bcebos.com/models/picodet_l_416_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_416_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_416_coco.yml) |\n| PicoDet-L | 640*640 | 40.9 | 57.6 | 3.30 | 8.91 | 54.11 | **50.55** | [model](https://paddledet.bj.bcebos.com/models/picodet_l_640_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_l_640_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/picodet_l_640_coco.yml) |\n@@ -41,6 +41,8 @@ We developed a series of lightweight models, named `PP-PicoDet`. Because of the\n| PicoDet-Shufflenetv2 1x | 416*416 | 30.0 | 44.6 | 1.17 | 1.53 | 15.06 | **10.63** | [model](https://paddledet.bj.bcebos.com/models/picodet_shufflenetv2_1x_416_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_shufflenetv2_1x_416_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/more_config/picodet_shufflenetv2_1x_416_coco.yml) |\n| PicoDet-MobileNetv3-large 1x | 416*416 | 35.6 | 52.0 | 3.55 | 2.80 | 20.71 | **17.88** | [model](https://paddledet.bj.bcebos.com/models/picodet_mobilenetv3_large_1x_416_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_mobilenetv3_large_1x_416_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/more_config/picodet_mobilenetv3_large_1x_416_coco.yml) |\n| PicoDet-LCNet 1.5x | 416*416 | 36.3 | 52.2 | 3.10 | 3.85 | 21.29 | **20.8** | [model](https://paddledet.bj.bcebos.com/models/picodet_lcnet_1_5x_416_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_lcnet_1_5x_416_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/more_config/picodet_lcnet_1_5x_416_coco.yml) |\n+| PicoDet-LCNet 1.5x | 640*640 | 40.6 | 57.4 | 3.10 | - | - | - | [model](https://paddledet.bj.bcebos.com/models/picodet_lcnet_1_5x_640_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_lcnet_1_5x_640_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/more_config/picodet_lcnet_1_5x_640_coco.yml) |\n+| PicoDet-R18 | 640*640 | 40.7 | 57.2 | 11.10 | - | - | - | [model](https://paddledet.bj.bcebos.com/models/picodet_r18_640_coco.pdparams) | [log](https://paddledet.bj.bcebos.com/logs/train_picodet_r18_640_coco.log) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet/more_config/picodet_r18_640_coco.yml) |\n<details open>\n<summary><b>Table Notes:</b></summary>\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "configs/picodet/more_config/picodet_lcnet_1_5x_640_coco.yml",
"diff": "+_BASE_: [\n+ '../../datasets/coco_detection.yml',\n+ '../../runtime.yml',\n+ '../_base_/picodet_esnet.yml',\n+ '../_base_/optimizer_300e.yml',\n+ '../_base_/picodet_640_reader.yml',\n+]\n+\n+pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/LCNet_x1_5_pretrained.pdparams\n+weights: output/picodet_lcnet_1_5x_640_coco/model_final\n+find_unused_parameters: True\n+use_ema: true\n+cycle_epoch: 40\n+snapshot_epoch: 10\n+\n+PicoDet:\n+ backbone: LCNet\n+ neck: CSPPAN\n+ head: PicoHead\n+\n+LCNet:\n+ scale: 1.5\n+ feature_maps: [3, 4, 5]\n+\n+CSPPAN:\n+ out_channels: 160\n+\n+PicoHead:\n+ conv_feat:\n+ name: PicoFeat\n+ feat_in: 160\n+ feat_out: 160\n+ num_convs: 4\n+ num_fpn_stride: 4\n+ norm_type: bn\n+ share_cls_reg: True\n+ feat_in_chan: 160\n+\n+TrainReader:\n+ batch_size: 24\n+\n+LearningRate:\n+ base_lr: 0.2\n+ schedulers:\n+ - !CosineDecay\n+ max_epochs: 300\n+ - !LinearWarmup\n+ start_factor: 0.1\n+ steps: 300\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
update picodet model zoo (#5009)
|
499,395 |
27.12.2021 22:53:38
| -28,800 |
54f2411f2bd8d26b2029709a02ae6e9a884c2f9f
|
modify infer problem of s2anet
|
[
{
"change_type": "MODIFY",
"old_path": "ppdet/modeling/post_process.py",
"new_path": "ppdet/modeling/post_process.py",
"diff": "@@ -252,7 +252,7 @@ class S2ANetBBoxPostProcess(nn.Layer):\ndef __init__(self, num_classes=15, nms_pre=2000, min_bbox_size=0, nms=None):\nsuper(S2ANetBBoxPostProcess, self).__init__()\nself.num_classes = num_classes\n- self.nms_pre = paddle.to_tensor(nms_pre)\n+ self.nms_pre = nms_pre\nself.min_bbox_size = min_bbox_size\nself.nms = nms\nself.origin_shape_list = []\n"
}
] |
Python
|
Apache License 2.0
|
paddlepaddle/paddledetection
|
modify infer problem of s2anet (#5010)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.