diff --git a/PyTorch/contrib/cv/classification/InceptionV3_ID1596_for_PyTorch/main-8p.py b/PyTorch/contrib/cv/classification/InceptionV3_ID1596_for_PyTorch/main-8p.py index f90cd1ee50af4b39c0030420c4143726316c7312..7613119e091c36596aa93a8decd7b7ef1ffa5907 100644 --- a/PyTorch/contrib/cv/classification/InceptionV3_ID1596_for_PyTorch/main-8p.py +++ b/PyTorch/contrib/cv/classification/InceptionV3_ID1596_for_PyTorch/main-8p.py @@ -182,7 +182,7 @@ def main(): # main_worker process function # The child process uses the environment variables of the parent process, # we have to set KERNEL_NAME_ID for every proc - mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args)) + main_worker(args.gpu, ngpus_per_node, args) else: # Simply call main_worker function diff --git a/PyTorch/contrib/cv/classification/InceptionV3_ID1596_for_PyTorch/test/train_full_8p.sh b/PyTorch/contrib/cv/classification/InceptionV3_ID1596_for_PyTorch/test/train_full_8p.sh index 1e68866795725b822fa7bbe38c988dc2eb600300..a91bba9d139430d12d50ba7ee0375538e3f548b9 100644 --- a/PyTorch/contrib/cv/classification/InceptionV3_ID1596_for_PyTorch/test/train_full_8p.sh +++ b/PyTorch/contrib/cv/classification/InceptionV3_ID1596_for_PyTorch/test/train_full_8p.sh @@ -91,6 +91,7 @@ do --world-size=1 \ --rank=0 \ --device='npu' \ + --gpu=${i} \ --epochs=${train_epochs} \ --label-smoothing=0.1 \ --batch-size=${batch_size} > ${test_path_dir}/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & diff --git a/PyTorch/contrib/cv/classification/InceptionV3_ID1596_for_PyTorch/test/train_performance_8p.sh b/PyTorch/contrib/cv/classification/InceptionV3_ID1596_for_PyTorch/test/train_performance_8p.sh index b59f64220449fbf5440c14cd112f01b947d2e451..d0610a17b748e02b5be09c7cb502617ae4e7c939 100644 --- a/PyTorch/contrib/cv/classification/InceptionV3_ID1596_for_PyTorch/test/train_performance_8p.sh +++ b/PyTorch/contrib/cv/classification/InceptionV3_ID1596_for_PyTorch/test/train_performance_8p.sh @@ -91,6 +91,7 @@ do --world-size=1 \ --rank=0 \ --device='npu' \ + --gpu=${i} \ --epochs=${train_epochs} \ --label-smoothing=0.1 \ --batch-size=${batch_size} > ${test_path_dir}/output/$ASCEND_DEVICE_ID/train_$ASCEND_DEVICE_ID.log 2>&1 & diff --git a/PyTorch/contrib/cv/detection/FCOS/README.md b/PyTorch/contrib/cv/detection/FCOS/README.md index a58b943634f87935df007bbee3554ac38555064d..bdbd3e957ff88a1e183896d4b49d92666b2df919 100644 --- a/PyTorch/contrib/cv/detection/FCOS/README.md +++ b/PyTorch/contrib/cv/detection/FCOS/README.md @@ -1,157 +1,130 @@ -# Warning -- 当前README为原生mmdetection自带的,请参考[README_raw.md](https://gitee.com/ascend/modelzoo/blob/master/contrib/PyTorch/Research/cv/image_object_detection/FCOS/README_raw.md)来进行FCOS模型训练 +# Fcos +This implements training of Fcos on the Coco dataset, mainly modified from [pytorch/examples](https://github.com/open-mmlab/mmdetection). +## Fcos Detail -
- -
+As of the current date, Ascend-Pytorch is still inefficient for contiguous operations. +Therefore, Fcos is re-implemented by changing mmdet and mmcv . -**News**: We released the technical report on [ArXiv](https://arxiv.org/abs/1906.07155). -Documentation: https://mmdetection.readthedocs.io/ +## Requirements -## Introduction +- NPU配套的run包安装 +- Python 3.7.5 +- PyTorch(NPU版本) +- apex(NPU版本) -MMDetection is an open source object detection toolbox based on PyTorch. It is -a part of the OpenMMLab project developed by [Multimedia Laboratory, CUHK](http://mmlab.ie.cuhk.edu.hk/). +### Document and data preparation +1. 下载压缩modelzoo\contrib\PyTorch\cv\image_object_detection\Fcos文件夹 +2. 于npu服务器解压Fcos压缩包 +3. 下载coco数据集 +4. 将coco数据集放于Fcos/data目录下 -The master branch works with **PyTorch 1.3 to 1.6**. -The old v1.x branch works with PyTorch 1.1 to 1.4, but v2.0 is strongly recommended for faster speed, higher performance, better design and more friendly usage. - -![demo image](resources/coco_test_12510.jpg) - -### Major features - -- **Modular Design** - - We decompose the detection framework into different components and one can easily construct a customized object detection framework by combining different modules. - -- **Support of multiple frameworks out of box** - - The toolbox directly supports popular and contemporary detection frameworks, *e.g.* Faster RCNN, Mask RCNN, RetinaNet, etc. - -- **High efficiency** - - All basic bbox and mask operations run on GPUs. The training speed is faster than or comparable to other codebases, including [Detectron2](https://github.com/facebookresearch/detectron2), [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark) and [SimpleDet](https://github.com/TuSimple/simpledet). - -- **State of the art** - - The toolbox stems from the codebase developed by the *MMDet* team, who won [COCO Detection Challenge](http://cocodataset.org/#detection-leaderboard) in 2018, and we keep pushing it forward. - -Apart from MMDetection, we also released a library [mmcv](https://github.com/open-mmlab/mmcv) for computer vision research, which is heavily depended on by this toolbox. - -## License - -This project is released under the [Apache 2.0 license](LICENSE). - -## Changelog - -v2.6.0 was released in 1/11/2020. -Please refer to [changelog.md](docs/changelog.md) for details and release history. -A comparison between v1.x and v2.0 codebases can be found in [compatibility.md](docs/compatibility.md). - -## Benchmark and model zoo - -Results and models are available in the [model zoo](docs/model_zoo.md). +### Download and modify mmcv +1. 下载mmcv,最好是1.2.7版本的(版本要求是1.2.5以上,1.3.0以下) +``` +git clone -b v1.2.7 git://github.com/open-mmlab/mmcv.git +``` +2. 用mmcv_need里的文件替换mmcv中对应的文件 +``` +cp -f mmcv_need/_functions.py ../mmcv/mmcv/parallel/ +cp -f mmcv_need/builder.py ../mmcv/mmcv/runner/optimizer/ +cp -f mmcv_need/distributed.py ../mmcv/mmcv/parallel/ +cp -f mmcv_need/data_parallel.py ../mmcv/mmcv/parallel/ +cp -f mmcv_need/dist_utils.py ../mmcv/mmcv/runner/ +cp -f mmcv_need/optimizer.py ../mmcv/mmcv/runner/hooks/ +cp -f mmcv_need/checkpoint.py ../mmcv/mmcv/runner/ +``` +3. 以下三个文件的替换是为了在log中打印出FPS的信息,替换与否对模型训练无影响 +``` +cp -f mmcv_need/iter_timer.py ../mmcv/mmcv/runner/hooks/ +cp -f mmcv_need/base_runner.py ../mmcv/mmcv/runner/ +cp -f mmcv_need/epoch_based_runner.py ../mmcv/mmcv/runner/ +``` +### Configure the environment +1. 推荐使用conda管理 +``` +conda create -n fcos --clone env # 复制一个已经包含依赖包的环境 +conda activate fcos +``` +2. 配置安装mmcv +``` +cd mmcv +export MMCV_WITH_OPS=1 +export MAX_JOBS=8 +python3.7 setup.py build_ext +python3.7 setup.py develop +pip3 list | grep mmcv # 查看版本和路径 +``` +3. 配置安装mmdet +``` +cd Fcos +pip3 install -r requirements/build.txt +python3.7 setup.py develop +pip3 list | grep mmdet # 查看版本和路径 +``` +4. 修改apex中的113行,主要是为了支持O1,参考路径root/archiconda3/envs/fcos/lib/python3.7/site-packages/apex/amp/utils.py +``` +if cached_x.grad_fn.next_functions[1][0].variable is not x: +``` +改成 +``` +if cached_x.grad_fn.next_functions[0][0].variable is not x: +``` +## Train MODEL -Supported backbones: -- [x] ResNet -- [x] ResNeXt -- [x] VGG -- [x] HRNet -- [x] RegNet -- [x] Res2Net -- [x] ResNeSt +### 进入Fcos文件夹下 +``` +cd FCOS +``` -Supported methods: -- [x] [RPN](configs/rpn) -- [x] [Fast R-CNN](configs/fast_rcnn) -- [x] [Faster R-CNN](configs/faster_rcnn) -- [x] [Mask R-CNN](configs/mask_rcnn) -- [x] [Cascade R-CNN](configs/cascade_rcnn) -- [x] [Cascade Mask R-CNN](configs/cascade_rcnn) -- [x] [SSD](configs/ssd) -- [x] [RetinaNet](configs/retinanet) -- [x] [GHM](configs/ghm) -- [x] [Mask Scoring R-CNN](configs/ms_rcnn) -- [x] [Double-Head R-CNN](configs/double_heads) -- [x] [Hybrid Task Cascade](configs/htc) -- [x] [Libra R-CNN](configs/libra_rcnn) -- [x] [Guided Anchoring](configs/guided_anchoring) -- [x] [FCOS](configs/fcos) -- [x] [RepPoints](configs/reppoints) -- [x] [Foveabox](configs/foveabox) -- [x] [FreeAnchor](configs/free_anchor) -- [x] [NAS-FPN](configs/nas_fpn) -- [x] [ATSS](configs/atss) -- [x] [FSAF](configs/fsaf) -- [x] [PAFPN](configs/pafpn) -- [x] [Dynamic R-CNN](configs/dynamic_rcnn) -- [x] [PointRend](configs/point_rend) -- [x] [CARAFE](configs/carafe/README.md) -- [x] [DCNv2](configs/dcn/README.md) -- [x] [Group Normalization](configs/gn/README.md) -- [x] [Weight Standardization](configs/gn+ws/README.md) -- [x] [OHEM](configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py) -- [x] [Soft-NMS](configs/faster_rcnn/faster_rcnn_r50_fpn_soft_nms_1x_coco.py) -- [x] [Generalized Attention](configs/empirical_attention/README.md) -- [x] [GCNet](configs/gcnet/README.md) -- [x] [Mixed Precision (FP16) Training](configs/fp16/README.md) -- [x] [InstaBoost](configs/instaboost/README.md) -- [x] [GRoIE](configs/groie/README.md) -- [x] [DetectoRS](configs/detectors/README.md) -- [x] [Generalized Focal Loss](configs/gfl/README.md) -- [x] [CornerNet](configs/cornernet/README.md) -- [x] [Side-Aware Boundary Localization](configs/sabl/README.md) -- [x] [YOLOv3](configs/yolo/README.md) -- [x] [PAA](configs/paa/README.md) -- [x] [YOLACT](configs/yolact/README.md) -- [x] [CentripetalNet](configs/centripetalnet/README.md) -- [x] [VFNet](configs/vfnet/README.md) +### 1p +导入环境变量,修改train_1p.sh权限并运行 +``` +chmod +x ./scripts/train_1p.sh +bash ./scripts/train_1p.sh +``` -Some other methods are also supported in [projects using MMDetection](./docs/projects.md). +### 8p +导入环境变量,修改train_8p.sh权限并运行 +``` +chmod +x ./scripts/train_8p.sh +bash ./scripts/train_8p.sh +``` -## Installation +### Eval +修改eval.sh权限并运行 +``` +chmod +x ./scripts/eval.sh +bash ./scripts/eval.sh +``` -Please refer to [get_started.md](docs/get_started.md) for installation. +### 单p推理 +1. 运行demo.py +``` +python3.7 demo.py xxx.pth +``` -## Getting Started -Please see [get_started.md](docs/get_started.md) for the basic usage of MMDetection. -We provide [colab tutorial](demo/MMDet_Tutorial.ipynb), and full guidance for quick run [with existing dataset](docs/1_exist_data_model.md) and [with new dataset](docs/2_new_data_model.md) for beginners. -There are also tutorials for [finetuning models](docs/tutorials/finetune.md), [adding new dataset](docs/tutorials/new_dataset.md), [designing data pipeline](docs/tutorials/data_pipeline.md), [customizing models](docs/tutorials/customize_models.md), [customizing runtime settings](docs/tutorials/customize_runtime.md) and [useful tools](docs/useful_tools.md). +### 导出onnx +1. 下载mmdetection2.11,在文件夹下重新编译 +``` +git clone -b v2.11.0 https://github.com/open-mmlab/mmdetection.git +cd mmdetection +python3.7 setup.py develop +``` +2. 运行pthtar2onx.py +``` +python3.7 pthtar2onx.py +``` -Please refer to [FAQ](docs/faq.md) for frequently asked questions. -## Contributing +## Fcos training result -We appreciate all contributions to improve MMDetection. Please refer to [CONTRIBUTING.md](.github/CONTRIBUTING.md) for the contributing guideline. - -## Acknowledgement - -MMDetection is an open source project that is contributed by researchers and engineers from various colleges and companies. We appreciate all the contributors who implement their methods or add new features, as well as users who give valuable feedbacks. -We wish that the toolbox and benchmark could serve the growing research community by providing a flexible toolkit to reimplement existing methods and develop their own new detectors. - -## Citation - -If you use this toolbox or benchmark in your research, please cite this project. - -``` -@article{mmdetection, - title = {{MMDetection}: Open MMLab Detection Toolbox and Benchmark}, - author = {Chen, Kai and Wang, Jiaqi and Pang, Jiangmiao and Cao, Yuhang and - Xiong, Yu and Li, Xiaoxiao and Sun, Shuyang and Feng, Wansen and - Liu, Ziwei and Xu, Jiarui and Zhang, Zheng and Cheng, Dazhi and - Zhu, Chenchen and Cheng, Tianheng and Zhao, Qijie and Li, Buyu and - Lu, Xin and Zhu, Rui and Wu, Yue and Dai, Jifeng and Wang, Jingdong - and Shi, Jianping and Ouyang, Wanli and Loy, Chen Change and Lin, Dahua}, - journal= {arXiv preprint arXiv:1906.07155}, - year={2019} -} -``` - -## Contact - -This repo is currently maintained by Kai Chen ([@hellock](http://github.com/hellock)), Yuhang Cao ([@yhcao6](https://github.com/yhcao6)), Wenwei Zhang ([@ZwwWayne](https://github.com/ZwwWayne)), -Jiarui Xu ([@xvjiarui](https://github.com/xvjiarui)). Other core developers include Jiangmiao Pang ([@OceanPang](https://github.com/OceanPang)) and Jiaqi Wang ([@myownskyW7](https://github.com/myownskyW7)). +| Acc@1 | FPS | Npu/Gpu_nums | Epochs | AMP_Type | Loss_Scale | +| :------: | :------: | :------: | :------: | :------: | :------: | +| 12.6 | 19.2 | 1p Gpu | 1 | O1 | dynamic | +| 36.2 | 102.0 | 8p Gpu | 12 | O1 | dynamic | +| 16.4 | 6.8 | 1p Npu | 1 | O1 | 32.0 | +| 36.2 | 19.4 | 8p Npu | 12 | O1 | 32.0 | diff --git a/PyTorch/contrib/cv/detection/FCOS/README_raw.md b/PyTorch/contrib/cv/detection/FCOS/README_raw.md index 7cdac786faa09865a6b6cb6d6c885c08011c4e08..0b2c85262737aa6cae68a33d50ac113fd5603384 100644 --- a/PyTorch/contrib/cv/detection/FCOS/README_raw.md +++ b/PyTorch/contrib/cv/detection/FCOS/README_raw.md @@ -1,130 +1,157 @@ -# Fcos +# Warning +- 当前README为原生mmdetection自带的,请参考[README_raw.md](https://gitee.com/ascend/modelzoo/blob/master/contrib/PyTorch/Research/cv/image_object_detection/FCOS/README_raw.md)来进行FCOS模型训练 -This implements training of Fcos on the Coco dataset, mainly modified from [pytorch/examples](https://github.com/open-mmlab/mmdetection). -## Fcos Detail -As of the current date, Ascend-Pytorch is still inefficient for contiguous operations. -Therefore, Fcos is re-implemented by changing mmdet and mmcv . +
+ +
+**News**: We released the technical report on [ArXiv](https://arxiv.org/abs/1906.07155). -## Requirements +Documentation: https://mmdetection.readthedocs.io/ -- NPU配套的run包安装 -- Python 3.7.5 -- PyTorch(NPU版本) -- apex(NPU版本) +## Introduction -### Document and data preparation -1. 下载压缩modelzoo\contrib\PyTorch\cv\image_object_detection\Fcos文件夹 -2. 于npu服务器解压Fcos压缩包 -3. 下载coco数据集 -4. 将coco数据集放于Fcos/data目录下 +MMDetection is an open source object detection toolbox based on PyTorch. It is +a part of the OpenMMLab project developed by [Multimedia Laboratory, CUHK](http://mmlab.ie.cuhk.edu.hk/). -### Download and modify mmcv -1. 下载mmcv,最好是1.2.7版本的(版本要求是1.2.5以上,1.3.0以下) -``` -git clone -b v1.2.7 git://github.com/open-mmlab/mmcv.git -``` -2. 用mmcv_need里的文件替换mmcv中对应的文件 -``` -cp -f mmcv_need/_functions.py ../mmcv/mmcv/parallel/ -cp -f mmcv_need/builder.py ../mmcv/mmcv/runner/optimizer/ -cp -f mmcv_need/distributed.py ../mmcv/mmcv/parallel/ -cp -f mmcv_need/data_parallel.py ../mmcv/mmcv/parallel/ -cp -f mmcv_need/dist_utils.py ../mmcv/mmcv/runner/ -cp -f mmcv_need/optimizer.py ../mmcv/mmcv/runner/hooks/ -cp -f mmcv_need/checkpoint.py ../mmcv/mmcv/runner/ -``` -3. 以下三个文件的替换是为了在log中打印出FPS的信息,替换与否对模型训练无影响 -``` -cp -f mmcv_need/iter_timer.py ../mmcv/mmcv/runner/hooks/ -cp -f mmcv_need/base_runner.py ../mmcv/mmcv/runner/ -cp -f mmcv_need/epoch_based_runner.py ../mmcv/mmcv/runner/ -``` -### Configure the environment -1. 推荐使用conda管理 -``` -conda create -n fcos --clone env # 复制一个已经包含依赖包的环境 -conda activate fcos -``` -2. 配置安装mmcv -``` -cd mmcv -export MMCV_WITH_OPS=1 -export MAX_JOBS=8 -python3.7 setup.py build_ext -python3.7 setup.py develop -pip3 list | grep mmcv # 查看版本和路径 -``` -3. 配置安装mmdet -``` -cd Fcos -pip3 install -r requirements/build.txt -python3.7 setup.py develop -pip3 list | grep mmdet # 查看版本和路径 -``` -4. 修改apex中的113行,主要是为了支持O1,参考路径root/archiconda3/envs/fcos/lib/python3.7/site-packages/apex/amp/utils.py -``` -if cached_x.grad_fn.next_functions[1][0].variable is not x: -``` -改成 -``` -if cached_x.grad_fn.next_functions[0][0].variable is not x: -``` -## Train MODEL +The master branch works with **PyTorch 1.3 to 1.6**. +The old v1.x branch works with PyTorch 1.1 to 1.4, but v2.0 is strongly recommended for faster speed, higher performance, better design and more friendly usage. -### 进入Fcos文件夹下 -``` -cd FCOS -``` +![demo image](resources/coco_test_12510.jpg) -### 1p -导入环境变量,修改train_1p.sh权限并运行 -``` -chmod +x ./scripts/train_1p.sh -bash ./scripts/train_1p.sh -``` +### Major features -### 8p -导入环境变量,修改train_8p.sh权限并运行 -``` -chmod +x ./scripts/train_8p.sh -bash ./scripts/train_8p.sh -``` +- **Modular Design** -### Eval -修改eval.sh权限并运行 -``` -chmod +x ./scripts/eval.sh -bash ./scripts/eval.sh -``` + We decompose the detection framework into different components and one can easily construct a customized object detection framework by combining different modules. -### 单p推理 -1. 运行demo.py -``` -python3.7 demo.py xxx.pth -``` +- **Support of multiple frameworks out of box** + The toolbox directly supports popular and contemporary detection frameworks, *e.g.* Faster RCNN, Mask RCNN, RetinaNet, etc. -### 导出onnx -1. 下载mmdetection2.11,在文件夹下重新编译 -``` -git clone -b v2.11.0 https://github.com/open-mmlab/mmdetection.git -cd mmdetection -python3.7 setup.py develop -``` -2. 运行pthtar2onx.py -``` -python3.7 pthtar2onx.py -``` +- **High efficiency** + + All basic bbox and mask operations run on GPUs. The training speed is faster than or comparable to other codebases, including [Detectron2](https://github.com/facebookresearch/detectron2), [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark) and [SimpleDet](https://github.com/TuSimple/simpledet). + +- **State of the art** + + The toolbox stems from the codebase developed by the *MMDet* team, who won [COCO Detection Challenge](http://cocodataset.org/#detection-leaderboard) in 2018, and we keep pushing it forward. + +Apart from MMDetection, we also released a library [mmcv](https://github.com/open-mmlab/mmcv) for computer vision research, which is heavily depended on by this toolbox. + +## License + +This project is released under the [Apache 2.0 license](LICENSE). + +## Changelog + +v2.6.0 was released in 1/11/2020. +Please refer to [changelog.md](docs/changelog.md) for details and release history. +A comparison between v1.x and v2.0 codebases can be found in [compatibility.md](docs/compatibility.md). + +## Benchmark and model zoo + +Results and models are available in the [model zoo](docs/model_zoo.md). + +Supported backbones: +- [x] ResNet +- [x] ResNeXt +- [x] VGG +- [x] HRNet +- [x] RegNet +- [x] Res2Net +- [x] ResNeSt + +Supported methods: +- [x] [RPN](configs/rpn) +- [x] [Fast R-CNN](configs/fast_rcnn) +- [x] [Faster R-CNN](configs/faster_rcnn) +- [x] [Mask R-CNN](configs/mask_rcnn) +- [x] [Cascade R-CNN](configs/cascade_rcnn) +- [x] [Cascade Mask R-CNN](configs/cascade_rcnn) +- [x] [SSD](configs/ssd) +- [x] [RetinaNet](configs/retinanet) +- [x] [GHM](configs/ghm) +- [x] [Mask Scoring R-CNN](configs/ms_rcnn) +- [x] [Double-Head R-CNN](configs/double_heads) +- [x] [Hybrid Task Cascade](configs/htc) +- [x] [Libra R-CNN](configs/libra_rcnn) +- [x] [Guided Anchoring](configs/guided_anchoring) +- [x] [FCOS](configs/fcos) +- [x] [RepPoints](configs/reppoints) +- [x] [Foveabox](configs/foveabox) +- [x] [FreeAnchor](configs/free_anchor) +- [x] [NAS-FPN](configs/nas_fpn) +- [x] [ATSS](configs/atss) +- [x] [FSAF](configs/fsaf) +- [x] [PAFPN](configs/pafpn) +- [x] [Dynamic R-CNN](configs/dynamic_rcnn) +- [x] [PointRend](configs/point_rend) +- [x] [CARAFE](configs/carafe/README.md) +- [x] [DCNv2](configs/dcn/README.md) +- [x] [Group Normalization](configs/gn/README.md) +- [x] [Weight Standardization](configs/gn+ws/README.md) +- [x] [OHEM](configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py) +- [x] [Soft-NMS](configs/faster_rcnn/faster_rcnn_r50_fpn_soft_nms_1x_coco.py) +- [x] [Generalized Attention](configs/empirical_attention/README.md) +- [x] [GCNet](configs/gcnet/README.md) +- [x] [Mixed Precision (FP16) Training](configs/fp16/README.md) +- [x] [InstaBoost](configs/instaboost/README.md) +- [x] [GRoIE](configs/groie/README.md) +- [x] [DetectoRS](configs/detectors/README.md) +- [x] [Generalized Focal Loss](configs/gfl/README.md) +- [x] [CornerNet](configs/cornernet/README.md) +- [x] [Side-Aware Boundary Localization](configs/sabl/README.md) +- [x] [YOLOv3](configs/yolo/README.md) +- [x] [PAA](configs/paa/README.md) +- [x] [YOLACT](configs/yolact/README.md) +- [x] [CentripetalNet](configs/centripetalnet/README.md) +- [x] [VFNet](configs/vfnet/README.md) + +Some other methods are also supported in [projects using MMDetection](./docs/projects.md). + +## Installation + +Please refer to [get_started.md](docs/get_started.md) for installation. + +## Getting Started + +Please see [get_started.md](docs/get_started.md) for the basic usage of MMDetection. +We provide [colab tutorial](demo/MMDet_Tutorial.ipynb), and full guidance for quick run [with existing dataset](docs/1_exist_data_model.md) and [with new dataset](docs/2_new_data_model.md) for beginners. +There are also tutorials for [finetuning models](docs/tutorials/finetune.md), [adding new dataset](docs/tutorials/new_dataset.md), [designing data pipeline](docs/tutorials/data_pipeline.md), [customizing models](docs/tutorials/customize_models.md), [customizing runtime settings](docs/tutorials/customize_runtime.md) and [useful tools](docs/useful_tools.md). +Please refer to [FAQ](docs/faq.md) for frequently asked questions. -## Fcos training result +## Contributing -| Acc@1 | FPS | Npu/Gpu_nums | Epochs | AMP_Type | Loss_Scale | -| :------: | :------: | :------: | :------: | :------: | :------: | -| 12.6 | 19.2 | 1p Gpu | 1 | O1 | dynamic | -| 36.2 | 102.0 | 8p Gpu | 12 | O1 | dynamic | -| 16.4 | 6.8 | 1p Npu | 1 | O1 | 32.0 | -| 36.2 | 19.4 | 8p Npu | 12 | O1 | 32.0 | +We appreciate all contributions to improve MMDetection. Please refer to [CONTRIBUTING.md](.github/CONTRIBUTING.md) for the contributing guideline. + +## Acknowledgement + +MMDetection is an open source project that is contributed by researchers and engineers from various colleges and companies. We appreciate all the contributors who implement their methods or add new features, as well as users who give valuable feedbacks. +We wish that the toolbox and benchmark could serve the growing research community by providing a flexible toolkit to reimplement existing methods and develop their own new detectors. + +## Citation + +If you use this toolbox or benchmark in your research, please cite this project. + +``` +@article{mmdetection, + title = {{MMDetection}: Open MMLab Detection Toolbox and Benchmark}, + author = {Chen, Kai and Wang, Jiaqi and Pang, Jiangmiao and Cao, Yuhang and + Xiong, Yu and Li, Xiaoxiao and Sun, Shuyang and Feng, Wansen and + Liu, Ziwei and Xu, Jiarui and Zhang, Zheng and Cheng, Dazhi and + Zhu, Chenchen and Cheng, Tianheng and Zhao, Qijie and Li, Buyu and + Lu, Xin and Zhu, Rui and Wu, Yue and Dai, Jifeng and Wang, Jingdong + and Shi, Jianping and Ouyang, Wanli and Loy, Chen Change and Lin, Dahua}, + journal= {arXiv preprint arXiv:1906.07155}, + year={2019} +} +``` + +## Contact + +This repo is currently maintained by Kai Chen ([@hellock](http://github.com/hellock)), Yuhang Cao ([@yhcao6](https://github.com/yhcao6)), Wenwei Zhang ([@ZwwWayne](https://github.com/ZwwWayne)), +Jiarui Xu ([@xvjiarui](https://github.com/xvjiarui)). Other core developers include Jiangmiao Pang ([@OceanPang](https://github.com/OceanPang)) and Jiaqi Wang ([@myownskyW7](https://github.com/myownskyW7)). diff --git a/PyTorch/contrib/cv/detection/GFocalV2/mmcv_need/epoch_based_runner.py b/PyTorch/contrib/cv/detection/GFocalV2/mmcv_need/epoch_based_runner.py index 6612df21b3b6199ebbbd70cefb718f8b1c4ececb..0dfba53a5098fe9920136bc146611ee318bd756d 100644 --- a/PyTorch/contrib/cv/detection/GFocalV2/mmcv_need/epoch_based_runner.py +++ b/PyTorch/contrib/cv/detection/GFocalV2/mmcv_need/epoch_based_runner.py @@ -17,6 +17,7 @@ import platform import shutil import time import warnings +import os import torch @@ -57,6 +58,7 @@ class EpochBasedRunner(BaseRunner): self._max_iters = self._max_epochs * len(self.data_loader) self.call_hook('before_train_epoch') time.sleep(2) # Prevent possible deadlock during epoch transition + PERF_MAX_STEPS = os.environ.get("PERF_MAX_STEPS", None) for i, data_batch in enumerate(self.data_loader): self._inner_iter = i # if i == 500: @@ -71,8 +73,11 @@ class EpochBasedRunner(BaseRunner): self.run_iter(data_batch, train_mode=True) self.call_hook('after_train_iter') self._iter += 1 + if PERF_MAX_STEPS and i == int(PERF_MAX_STEPS): + break - self.logger.info('FPS: ' + str(self.samples_per_gpu * self.num_of_gpus / self.iter_timer_hook.time_all * (len(self.data_loader) - 5))) + self.logger.info('FPS: ' + str( + self.samples_per_gpu * self.num_of_gpus / self.iter_timer_hook.time_all * (len(self.data_loader) - 5))) self.call_hook('after_train_epoch') self._epoch += 1 diff --git a/PyTorch/contrib/cv/detection/GFocalV2/test/train_full_1p.sh b/PyTorch/contrib/cv/detection/GFocalV2/test/train_full_1p.sh index 7c3720ebff484a9691721821f4d28dd3c23f7a9f..ce4388407def55135ff8557bbe4fb07529fbd223 100644 --- a/PyTorch/contrib/cv/detection/GFocalV2/test/train_full_1p.sh +++ b/PyTorch/contrib/cv/detection/GFocalV2/test/train_full_1p.sh @@ -25,6 +25,9 @@ data_path="" #网络名称,同目录名称,需要模型审视修改 Network="GFocal" +#设置最多训练步数 +export PERF_MAX_STEPS=2000 + #训练batch_size,,需要模型审视修改 batch_size=8 device_id=0