diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 112527e..0bb316f 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -12,11 +12,12 @@ All kinds of contributions are welcome, including but not limited to the followi 3. commit your changes 4. create a PR -Note +:::{note} - If you plan to add some new features that involve large changes, it is encouraged to open an issue for discussion first. - If you are the author of some papers and would like to include your method to mmsegmentation, please contact Kai Chen (chenkaidev[at]gmail[dot]com). We will much appreciate your contribution. +::: ## Code style diff --git a/configs/deeplabv3/README.md b/configs/deeplabv3/README.md index 02c2775..06caa33 100644 --- a/configs/deeplabv3/README.md +++ b/configs/deeplabv3/README.md @@ -15,7 +15,9 @@ ## Results and models -Note: `D-8` here corresponding to the output stride 8 setting for DeepLab series. +:::{note} +`D-8` here corresponding to the output stride 8 setting for DeepLab series. +::: ### Cityscapes diff --git a/configs/deeplabv3plus/README.md b/configs/deeplabv3plus/README.md index be46e32..16702fe 100644 --- a/configs/deeplabv3plus/README.md +++ b/configs/deeplabv3plus/README.md @@ -15,9 +15,10 @@ ## Results and models -Note: +:::{note} `D-8`/`D-16` here corresponding to the output stride 8/16 setting for DeepLab series. `MG-124` stands for multi-grid dilation in the last stage of ResNet. +::: ### Cityscapes diff --git a/docs/_static/css/readthedocs.css b/docs/_static/css/readthedocs.css new file mode 100644 index 0000000..2e38d08 --- /dev/null +++ b/docs/_static/css/readthedocs.css @@ -0,0 +1,6 @@ +.header-logo { + background-image: url("../images/mmsegmentation.png"); + background-size: 201px 40px; + height: 40px; + width: 201px; +} diff --git a/docs/_static/images/mmsegmentation.png b/docs/_static/images/mmsegmentation.png new file mode 100644 index 0000000..009083a Binary files /dev/null and b/docs/_static/images/mmsegmentation.png differ diff --git a/docs/api.rst b/docs/api.rst index 9c14a67..8285841 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -1,6 +1,3 @@ -API Reference -============== - mmseg.apis -------------- .. automodule:: mmseg.apis diff --git a/docs/conf.py b/docs/conf.py index aaea424..4353266 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -15,6 +15,8 @@ import os import subprocess import sys +import pytorch_sphinx_theme + sys.path.insert(0, os.path.abspath('..')) # -- Project information ----------------------------------------------------- @@ -40,11 +42,8 @@ release = get_version() # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.napoleon', - 'sphinx.ext.viewcode', - 'recommonmark', - 'sphinx_markdown_tables', + 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', + 'sphinx_markdown_tables', 'sphinx_copybutton', 'myst_parser' ] autodoc_mock_imports = [ @@ -75,12 +74,101 @@ exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'sphinx_rtd_theme' +# html_theme = 'sphinx_rtd_theme' +html_theme = 'pytorch_sphinx_theme' +html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] +html_theme_options = { + # 'logo_url': 'https://mmsegmentation.readthedocs.io/en/latest/', + 'menu': [ + { + 'name': + 'Tutorial', + 'url': + 'https://github.com/open-mmlab/mmsegmentation/blob/master/' + 'demo/MMSegmentation_Tutorial.ipynb' + }, + { + 'name': 'GitHub', + 'url': 'https://github.com/open-mmlab/mmsegmentation' + }, + { + 'name': + 'Upstream', + 'children': [ + { + 'name': 'MMCV', + 'url': 'https://github.com/open-mmlab/mmcv', + 'description': 'Foundational library for computer vision' + }, + ] + }, + { + 'name': + 'Projects', + 'children': [ + { + 'name': 'MMAction2', + 'url': 'https://github.com/open-mmlab/mmaction2', + }, + { + 'name': 'MMClassification', + 'url': 'https://github.com/open-mmlab/mmclassification', + }, + { + 'name': 'MMOCR', + 'url': 'https://github.com/open-mmlab/mmocr', + }, + { + 'name': 'MMDetection', + 'url': 'https://github.com/open-mmlab/mmdetection', + }, + { + 'name': 'MMEditing', + 'url': 'https://github.com/open-mmlab/mmediting', + }, + { + 'name': 'MMDetection3D', + 'url': 'https://github.com/open-mmlab/mmdetection3d', + }, + { + 'name': 'MMPose', + 'url': 'https://github.com/open-mmlab/mmpose', + }, + { + 'name': 'MMTracking', + 'url': 'https://github.com/open-mmlab/mmtracking', + }, + { + 'name': 'MMGeneration', + 'url': 'https://github.com/open-mmlab/mmgeneration', + }, + ] + }, + { + 'name': + 'OpenMMLab', + 'children': [ + { + 'name': 'Homepage', + 'url': 'https://openmmlab.com/' + }, + { + 'name': 'GitHub', + 'url': 'https://github.com/open-mmlab/' + }, + ] + }, + ] +} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] +html_css_files = ['css/readthedocs.css'] + +# Enable ::: for my_st +myst_enable_extensions = ['colon_fence'] language = 'en' diff --git a/docs/get_started.md b/docs/get_started.md index f7d9bf0..90479c9 100644 --- a/docs/get_started.md +++ b/docs/get_started.md @@ -26,8 +26,10 @@ The compatible MMSegmentation and MMCV versions are as below. Please install the | 0.7.0 | mmcv-full>=1.1.2, <1.2.0 | | 0.6.0 | mmcv-full>=1.1.2, <1.2.0 | -Note: You need to run `pip uninstall mmcv` first if you have mmcv installed. +:::{note} +You need to run `pip uninstall mmcv` first if you have mmcv installed. If mmcv and mmcv-full are both installed, there will be `ModuleNotFoundError`. +::: ## Installation @@ -105,7 +107,7 @@ cd mmsegmentation pip install -e . # or "python setup.py develop" ``` -Note: +:::{note} 1. When training or testing models on Windows, please ensure that all the '\\' in paths are replaced with '/'. Add .replace('\\', '/') to your python code wherever path strings occur. 2. The `version+git_hash` will also be saved in trained models meta, e.g. 0.5.0+c415a2e. @@ -114,6 +116,7 @@ Note: you can install it before installing MMCV. 5. Some dependencies are optional. Simply running `pip install -e .` will only install the minimum runtime requirements. To use optional dependencies like `cityscapessripts` either install them manually with `pip install -r requirements/optional.txt` or specify desired extras when calling `pip` (e.g. `pip install -e .[optional]`). Valid keys for the extras field are: `all`, `tests`, `build`, and `optional`. +::: ### A from-scratch setup script diff --git a/docs/inference.md b/docs/inference.md index 65f1e46..632400d 100644 --- a/docs/inference.md +++ b/docs/inference.md @@ -63,8 +63,10 @@ Assume that you have already downloaded the checkpoints to the directory `checkp 4 --out results.pkl --eval mIoU cityscapes ``` - Note: There is some gap (~0.1%) between cityscapes mIoU and our mIoU. The reason is that cityscapes average each class with class size by default. + :::{note} + There is some gap (~0.1%) between cityscapes mIoU and our mIoU. The reason is that cityscapes average each class with class size by default. We use the simple version without average for all datasets. +::: 5. Test PSPNet on cityscapes test split with 4 GPUs, and generate the png files to be submit to the official evaluation server. diff --git a/docs/model_zoo.md b/docs/model_zoo.md index 3806495..7babd2e 100644 --- a/docs/model_zoo.md +++ b/docs/model_zoo.md @@ -176,4 +176,6 @@ The training speed is reported as followed, in terms of second per iter (s/iter) | [CASILVision](https://github.com/CSAILVision/semantic-segmentation-pytorch) | 1.15 | N/A | | [vedaseg](https://github.com/Media-Smart/vedaseg) | 0.95 | 1.25 | -Note: The output stride of DeepLabV3+ is 8. +:::{note} +The output stride of DeepLabV3+ is 8. +::: diff --git a/docs/tutorials/customize_datasets.md b/docs/tutorials/customize_datasets.md index 020d513..8ed524c 100644 --- a/docs/tutorials/customize_datasets.md +++ b/docs/tutorials/customize_datasets.md @@ -42,8 +42,10 @@ Only `data/my_dataset/ann_dir/train/xxx{seg_map_suffix}`, `data/my_dataset/ann_dir/train/zzz{seg_map_suffix}` will be loaded. -Note: The annotations are images of shape (H, W), the value pixel should fall in range `[0, num_classes - 1]`. +:::{note} +The annotations are images of shape (H, W), the value pixel should fall in range `[0, num_classes - 1]`. You may use `'P'` mode of [pillow](https://pillow.readthedocs.io/en/stable/handbook/concepts.html#palette) to create your annotation image with color. +::: ## Customize datasets by mixing dataset diff --git a/docs/tutorials/customize_runtime.md b/docs/tutorials/customize_runtime.md index dd67ef5..3b9097b 100644 --- a/docs/tutorials/customize_runtime.md +++ b/docs/tutorials/customize_runtime.md @@ -176,12 +176,14 @@ In such case, we can set the workflow as so that 1 epoch for training and 1 epoch for validation will be run iteratively. -**Note**: +:::{note} 1. The parameters of model will not be updated during val epoch. 2. Keyword `total_epochs` in the config only controls the number of training epochs and will not affect the validation workflow. 3. Workflows `[('train', 1), ('val', 1)]` and `[('train', 1)]` will not change the behavior of `EvalHook` because `EvalHook` is called by `after_train_epoch` and validation workflow only affect hooks that are called through `after_val_epoch`. Therefore, the only difference between `[('train', 1), ('val', 1)]` and `[('train', 1)]` is that the runner will calculate losses on validation set after each training epoch. +::: + ## Customize hooks ### Use hooks implemented in MMCV diff --git a/docs/useful_tools.md b/docs/useful_tools.md index b18fd89..28f9a42 100644 --- a/docs/useful_tools.md +++ b/docs/useful_tools.md @@ -19,7 +19,9 @@ Params: 48.98 M ============================== ``` -**Note**: This tool is still experimental and we do not guarantee that the number is correct. You may well use the result for simple comparisons, but double check it before you adopt it in technical reports or papers. +:::{note} +This tool is still experimental and we do not guarantee that the number is correct. You may well use the result for simple comparisons, but double check it before you adopt it in technical reports or papers. +::: (1) FLOPs are related to the input shape while parameters are not. The default input shape is (1, 3, 1280, 800). (2) Some operators are not counted into FLOPs like GN and custom operators. @@ -74,7 +76,9 @@ Description of arguments: - `--dynamic-export`: Determines whether to export ONNX model with dynamic input and output shapes. If not specified, it will be set to `False`. - `--cfg-options`:Update config options. -**Note**: This tool is still experimental. Some customized operators are not supported for now. +:::{note} +This tool is still experimental. Some customized operators are not supported for now. +::: ### Evaluate ONNX model @@ -132,7 +136,9 @@ Description of all arguments | deeplabv3 | deeplabv3_r50-d8_769x769_40k_cityscapes.py | cityscapes | mIoU | 78.5 | 78.3 | | | | deeplabv3+ | deeplabv3plus_r50-d8_769x769_40k_cityscapes.py | cityscapes | mIoU | 78.9 | 78.7 | | | -**Note**: TensorRT is only available on configs with `whole mode`. +:::{note} +TensorRT is only available on configs with `whole mode`. +::: ### Convert to TorchScript (experimental) @@ -158,9 +164,13 @@ Description of arguments: - `--show`: Determines whether to print the traced graph of the exported model. If not specified, it will be set to `False`. - `--verify`: Determines whether to verify the correctness of an exported model. If not specified, it will be set to `False`. -**Note**: It's only support PyTorch>=1.8.0 for now. +:::{note} +It's only support PyTorch>=1.8.0 for now. +::: -**Note**: This tool is still experimental. Some customized operators are not supported for now. +:::{note} +This tool is still experimental. Some customized operators are not supported for now. +::: Examples: @@ -211,7 +221,9 @@ Description of all arguments - `--verify` : Verify the outputs of ONNXRuntime and TensorRT. - `--verbose` : Whether to verbose logging messages while creating TensorRT engine. Defaults to False. -**Note**: Only tested on whole mode. +:::{note} +Only tested on whole mode. +::: ## Miscellaneous @@ -297,7 +309,9 @@ python tools/mmseg2torchserve.py ${CONFIG_FILE} ${CHECKPOINT_FILE} \ --model-name ${MODEL_NAME} ``` -**Note**: ${MODEL_STORE} needs to be an absolute path to a folder. +:::{note} +${MODEL_STORE} needs to be an absolute path to a folder. +::: ### 2. Build `mmseg-serve` docker image diff --git a/docs_zh-CN/_static/css/readthedocs.css b/docs_zh-CN/_static/css/readthedocs.css new file mode 100644 index 0000000..2e38d08 --- /dev/null +++ b/docs_zh-CN/_static/css/readthedocs.css @@ -0,0 +1,6 @@ +.header-logo { + background-image: url("../images/mmsegmentation.png"); + background-size: 201px 40px; + height: 40px; + width: 201px; +} diff --git a/docs_zh-CN/_static/images/mmsegmentation.png b/docs_zh-CN/_static/images/mmsegmentation.png new file mode 100644 index 0000000..009083a Binary files /dev/null and b/docs_zh-CN/_static/images/mmsegmentation.png differ diff --git a/docs_zh-CN/api.rst b/docs_zh-CN/api.rst index 9c14a67..8285841 100644 --- a/docs_zh-CN/api.rst +++ b/docs_zh-CN/api.rst @@ -1,6 +1,3 @@ -API Reference -============== - mmseg.apis -------------- .. automodule:: mmseg.apis diff --git a/docs_zh-CN/conf.py b/docs_zh-CN/conf.py index ed5eb52..f7f47bf 100644 --- a/docs_zh-CN/conf.py +++ b/docs_zh-CN/conf.py @@ -15,6 +15,8 @@ import os import subprocess import sys +import pytorch_sphinx_theme + sys.path.insert(0, os.path.abspath('..')) # -- Project information ----------------------------------------------------- @@ -40,14 +42,13 @@ release = get_version() # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.napoleon', - 'sphinx.ext.viewcode', - 'recommonmark', - 'sphinx_markdown_tables', + 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', + 'sphinx_markdown_tables', 'sphinx_copybutton', 'myst_parser' ] -autodoc_mock_imports = ['matplotlib', 'pycocotools', 'mmseg.version'] +autodoc_mock_imports = [ + 'matplotlib', 'pycocotools', 'mmseg.version', 'mmcv.ops' +] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -73,14 +74,103 @@ exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'sphinx_rtd_theme' +# html_theme = 'sphinx_rtd_theme' +html_theme = 'pytorch_sphinx_theme' +html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] +html_theme_options = { + # 'logo_url': 'https://mmsegmentation.readthedocs.io/en/latest/', + 'menu': [ + { + 'name': + 'Tutorial', + 'url': + 'https://github.com/open-mmlab/mmsegmentation/blob/master/' + 'demo/MMSegmentation_Tutorial.ipynb' + }, + { + 'name': 'GitHub', + 'url': 'https://github.com/open-mmlab/mmsegmentation' + }, + { + 'name': + 'Upstream', + 'children': [ + { + 'name': 'MMCV', + 'url': 'https://github.com/open-mmlab/mmcv', + 'description': 'Foundational library for computer vision' + }, + ] + }, + { + 'name': + 'Projects', + 'children': [ + { + 'name': 'MMAction2', + 'url': 'https://github.com/open-mmlab/mmaction2', + }, + { + 'name': 'MMClassification', + 'url': 'https://github.com/open-mmlab/mmclassification', + }, + { + 'name': 'MMOCR', + 'url': 'https://github.com/open-mmlab/mmocr', + }, + { + 'name': 'MMDetection', + 'url': 'https://github.com/open-mmlab/mmdetection', + }, + { + 'name': 'MMEditing', + 'url': 'https://github.com/open-mmlab/mmediting', + }, + { + 'name': 'MMDetection3D', + 'url': 'https://github.com/open-mmlab/mmdetection3d', + }, + { + 'name': 'MMPose', + 'url': 'https://github.com/open-mmlab/mmpose', + }, + { + 'name': 'MMTracking', + 'url': 'https://github.com/open-mmlab/mmtracking', + }, + { + 'name': 'MMGeneration', + 'url': 'https://github.com/open-mmlab/mmgeneration', + }, + ] + }, + { + 'name': + 'OpenMMLab', + 'children': [ + { + 'name': 'Homepage', + 'url': 'https://openmmlab.com/' + }, + { + 'name': 'GitHub', + 'url': 'https://github.com/open-mmlab/' + }, + ] + }, + ] +} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] +html_css_files = ['css/readthedocs.css'] -language = 'zh_CN' +# Enable ::: for my_st +myst_enable_extensions = ['colon_fence'] + +language = 'zh-CN' def builder_inited_handler(app): diff --git a/docs_zh-CN/tutorials/data_pipeline.md b/docs_zh-CN/tutorials/data_pipeline.md index 64d3993..f3dfcd8 100644 --- a/docs_zh-CN/tutorials/data_pipeline.md +++ b/docs_zh-CN/tutorials/data_pipeline.md @@ -9,7 +9,7 @@ 数据的准备流程和数据集是解耦的。通常一个数据集定义了如何处理标注数据(annotations)信息,而一个数据流程定义了准备一个数据字典的所有步骤。一个流程包括了一系列操作,每个操作里都把一个字典作为输入,然后再输出一个新的字典给下一个变换操作。 -这些操作可分为数据加载 (data loading),预处理 (pre-processing),格式变化 (formatting) 和测试时数据增强 (test-time augmentation) 。 +这些操作可分为数据加载 (data loading),预处理 (pre-processing),格式变化 (formatting) 和测试时数据增强 (test-time augmentation)。 下面的例子就是 PSPNet 的一个流程: diff --git a/requirements/docs.txt b/requirements/docs.txt index 866c4d3..2017084 100644 --- a/requirements/docs.txt +++ b/requirements/docs.txt @@ -1,4 +1,6 @@ -recommonmark +docutils==0.16.0 +myst-parser +-e git+https://github.com/gaotongxiao/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme sphinx==4.0.2 +sphinx_copybutton sphinx_markdown_tables -sphinx_rtd_theme==0.5.2 diff --git a/setup.cfg b/setup.cfg index 6e88c11..75fcedc 100644 --- a/setup.cfg +++ b/setup.cfg @@ -8,6 +8,6 @@ line_length = 79 multi_line_output = 0 known_standard_library = setuptools known_first_party = mmseg -known_third_party = PIL,cityscapesscripts,cv2,detail,matplotlib,mmcv,numpy,onnxruntime,packaging,prettytable,pytest,scipy,seaborn,torch,ts +known_third_party = PIL,cityscapesscripts,cv2,detail,matplotlib,mmcv,numpy,onnxruntime,packaging,prettytable,pytest,pytorch_sphinx_theme,scipy,seaborn,torch,ts no_lines_before = STDLIB,LOCALFOLDER default_section = THIRDPARTY