Skip to content

Commit

Permalink
[Docs] Fix api docs in readthedocs (#34)
Browse files Browse the repository at this point in the history
* Add __init__ to compelete docs

* add code block for camera docs

* Modify cameras __init__

* Modify apis __init__.py

* Modify utils __init__.py

* Replace collect package with func

* Fix loop-import

* Add scipy in readthedocs req

* Modify readthedocs req

* Add pytorch3d in readthedocs reqs

* Mock pytorch3d and torch_renderer
  • Loading branch information
yl-1993 authored Dec 21, 2021
1 parent 9ec38db commit d6a8f47
Show file tree
Hide file tree
Showing 10 changed files with 229 additions and 50 deletions.
1 change: 1 addition & 0 deletions .readthedocs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,4 @@ python:
install:
- requirements: requirements/docs.txt
- requirements: requirements/readthedocs.txt
- requirements: requirements/runtime.txt
10 changes: 7 additions & 3 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
# -- Project information -----------------------------------------------------

project = 'MMHuman3D'
copyright = '2018-2021, OpenMMLab'
copyright = '2021, OpenMMLab'
author = 'MMHuman3D Authors'
version_file = '../mmhuman3d/version.py'

Expand All @@ -44,7 +44,11 @@ def get_version():
'sphinx_markdown_tables', 'sphinx_copybutton', 'myst_parser'
]

autodoc_mock_imports = ['mmhuman3d.version', 'mmcv.ops']
autodoc_mock_imports = [
'mmhuman3d.version',
'mmhuman3d.core.visualization.renderer.torch3d_renderer', 'mmcv.ops',
'pytorch3d'
]

# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
Expand Down Expand Up @@ -78,7 +82,7 @@ def get_version():
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmdetection'
'url': 'https://github.com/open-mmlab/mmhuman3d'
},
{
'name':
Expand Down
17 changes: 12 additions & 5 deletions mmhuman3d/apis/__init__.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,15 @@
from .inference import inference_model, init_model
from .test import multi_gpu_test, single_gpu_test
from .train import set_random_seed, train_model
from mmhuman3d.apis import inference, test, train
from mmhuman3d.apis.inference import LoadImage, inference_model, init_model
from mmhuman3d.apis.test import (
collect_results_cpu,
collect_results_gpu,
multi_gpu_test,
single_gpu_test,
)
from mmhuman3d.apis.train import set_random_seed, train_model

__all__ = [
'set_random_seed', 'train_model', 'init_model', 'inference_model',
'multi_gpu_test', 'single_gpu_test'
'LoadImage', 'collect_results_cpu', 'collect_results_gpu', 'inference',
'inference_model', 'init_model', 'multi_gpu_test', 'set_random_seed',
'single_gpu_test', 'test', 'train', 'train_model'
]
15 changes: 9 additions & 6 deletions mmhuman3d/core/cameras/__init__.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,19 @@
from .builder import build_cameras
from .camera_parameter import CameraParameter
from .cameras import (
from mmhuman3d.core.cameras import builder, camera_parameter, cameras
from mmhuman3d.core.cameras.builder import CAMERAS, build_cameras
from mmhuman3d.core.cameras.camera_parameter import CameraParameter
from mmhuman3d.core.cameras.cameras import (
FoVOrthographicCameras,
FoVPerspectiveCameras,
NewAttributeCameras,
OrthographicCameras,
PerspectiveCameras,
WeakPerspectiveCameras,
compute_orbit_cameras,
)

__all__ = [
'WeakPerspectiveCameras', 'CameraParameter', 'compute_orbit_cameras',
'FoVOrthographicCameras', 'FoVPerspectiveCameras', 'PerspectiveCameras',
'OrthographicCameras', 'build_cameras'
'CAMERAS', 'CameraParameter', 'FoVOrthographicCameras',
'FoVPerspectiveCameras', 'NewAttributeCameras', 'OrthographicCameras',
'PerspectiveCameras', 'WeakPerspectiveCameras', 'build_cameras', 'builder',
'camera_parameter', 'cameras', 'compute_orbit_cameras'
]
60 changes: 34 additions & 26 deletions mmhuman3d/core/cameras/cameras.py
Original file line number Diff line number Diff line change
Expand Up @@ -404,18 +404,7 @@ def convert_orig_cam_to_matrix(
**kwargs) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Compute intrinsic camera matrix from orig_cam parameter of smpl.
Args:
orig_cam (torch.Tensor): shape should be (N, 4).
znear (Union[torch.Tensor, float], optional):
near clipping plane of the view frustrum.
Defaults to 0.0.
aspect_ratio (Union[torch.Tensor, float], optional):
aspect ratio of the image pixels. 1.0 indicates square pixels.
Defaults to 1.0.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
opencv intrinsic matrix: (N, 4, 4)
.. code-block:: python
r > 1::
Expand All @@ -440,6 +429,19 @@ def convert_orig_cam_to_matrix(
translation matrix: (N, 3)::
[0, 0, -znear]
Args:
orig_cam (torch.Tensor): shape should be (N, 4).
znear (Union[torch.Tensor, float], optional):
near clipping plane of the view frustrum.
Defaults to 0.0.
aspect_ratio (Union[torch.Tensor, float], optional):
aspect ratio of the image pixels. 1.0 indicates square pixels.
Defaults to 1.0.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
opencv intrinsic matrix: (N, 4, 4)
"""
znear = kwargs.get('znear', -1.0)
aspect_ratio = kwargs.get('aspect_ratio', 1.0)
Expand Down Expand Up @@ -476,6 +478,8 @@ def convert_K_to_orig_cam(
K (torch.Tensor):
opencv orthographics intrinsic matrix: (N, 4, 4)
.. code-block:: python
K = [[sx*r, 0, 0, tx*sx*r],
[0, sy, 0, ty*sy],
[0, 0, 1, 0],
Expand Down Expand Up @@ -924,13 +928,7 @@ def get_default_projection_matrix(cls, **args) -> torch.Tensor:
"""Class method. Calculate the projective transformation matrix by
default parameters.
Args:
**kwargs: parameters for the projection can be passed in as keyword
arguments to override the default values.
Return:
a `torch.Tensor` which represents a batch of projection matrices K
of shape (N, 4, 4)
.. code-block:: python
fx = focal_length[:,0]
fy = focal_length[:,1]
Expand All @@ -941,6 +939,14 @@ def get_default_projection_matrix(cls, **args) -> torch.Tensor:
[0, fy, 0, py],
[0, 0, 1, 0],
[0, 0, 0, 1],]
Args:
**kwargs: parameters for the projection can be passed in as keyword
arguments to override the default values.
Return:
a `torch.Tensor` which represents a batch of projection matrices K
of shape (N, 4, 4)
"""
batch_size = args.get('batch_size', 1)
device = args.get('device', 'cpu')
Expand Down Expand Up @@ -1032,13 +1038,7 @@ def get_default_projection_matrix(cls, **args) -> torch.Tensor:
"""Class method. Calculate the projective transformation matrix by
default parameters.
Args:
**kwargs: parameters for the projection can be passed in as keyword
arguments to override the default values.
Return:
a `torch.Tensor` which represents a batch of projection matrices K
of shape (N, 4, 4)
.. code-block:: python
scale_x = 2 / (max_x - min_x)
scale_y = 2 / (max_y - min_y)
Expand All @@ -1051,6 +1051,14 @@ def get_default_projection_matrix(cls, **args) -> torch.Tensor:
[0, scale_y, 0, -mix_y],
[0, 0, -scale_z, -mid_z],
[0, 0, 0, 1],]
Args:
**kwargs: parameters for the projection can be passed in as keyword
arguments to override the default values.
Return:
a `torch.Tensor` which represents a batch of projection matrices K
of shape (N, 4, 4)
"""
znear = args.get('znear', 1.0)
zfar = args.get('zfar', 100.0)
Expand Down
35 changes: 35 additions & 0 deletions mmhuman3d/core/conventions/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
from mmhuman3d.core.conventions import cameras, keypoints_mapping, segmentation
from mmhuman3d.core.conventions.cameras import (
CAMERA_CONVENTIONS,
convert_cameras,
convert_K_3x3_to_4x4,
convert_K_4x4_to_3x3,
convert_ndc_to_screen,
convert_perspective_to_weakperspective,
convert_screen_to_ndc,
convert_weakperspective_to_perspective,
convert_world_view,
enc_camera_convention,
)
from mmhuman3d.core.conventions.keypoints_mapping import (
KEYPOINTS_FACTORY,
compress_converted_kps,
convert_kps,
get_flip_pairs,
get_keypoint_idx,
get_keypoint_idxs_by_part,
get_keypoint_num,
get_mapping,
)
from mmhuman3d.core.conventions.segmentation import body_segmentation

__all__ = [
'CAMERA_CONVENTIONS', 'KEYPOINTS_FACTORY', 'body_segmentation', 'cameras',
'compress_converted_kps', 'convert_K_3x3_to_4x4', 'convert_K_4x4_to_3x3',
'convert_cameras', 'convert_kps', 'convert_ndc_to_screen',
'convert_perspective_to_weakperspective', 'convert_screen_to_ndc',
'convert_weakperspective_to_perspective', 'convert_world_view',
'enc_camera_convention', 'get_flip_pairs', 'get_keypoint_idx',
'get_keypoint_idxs_by_part', 'get_keypoint_num', 'get_mapping',
'keypoints_mapping', 'segmentation'
]
17 changes: 11 additions & 6 deletions mmhuman3d/core/conventions/cameras/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,8 @@
from .convert_convention import (
from mmhuman3d.core.conventions.cameras import (
convert_convention,
convert_projection,
)
from mmhuman3d.core.conventions.cameras.convert_convention import (
CAMERA_CONVENTIONS,
convert_cameras,
convert_K_3x3_to_4x4,
Expand All @@ -8,14 +12,15 @@
convert_world_view,
enc_camera_convention,
)
from .convert_projection import (
from mmhuman3d.core.conventions.cameras.convert_projection import (
convert_perspective_to_weakperspective,
convert_weakperspective_to_perspective,
)

__all__ = [
'convert_cameras', 'convert_K_3x3_to_4x4', 'convert_K_4x4_to_3x3',
'convert_ndc_to_screen', 'convert_screen_to_ndc', 'convert_world_view',
'CAMERA_CONVENTIONS', 'convert_perspective_to_weakperspective',
'convert_weakperspective_to_perspective', 'enc_camera_convention'
'CAMERA_CONVENTIONS', 'convert_K_3x3_to_4x4', 'convert_K_4x4_to_3x3',
'convert_cameras', 'convert_convention', 'convert_ndc_to_screen',
'convert_perspective_to_weakperspective', 'convert_projection',
'convert_screen_to_ndc', 'convert_weakperspective_to_perspective',
'convert_world_view', 'enc_camera_convention'
]
7 changes: 7 additions & 0 deletions mmhuman3d/core/evaluation/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
from mmhuman3d.core.evaluation import mesh_eval, mpjpe
from mmhuman3d.core.evaluation.mesh_eval import compute_similarity_transform
from mmhuman3d.core.evaluation.mpjpe import keypoint_mpjpe

__all__ = [
'compute_similarity_transform', 'keypoint_mpjpe', 'mesh_eval', 'mpjpe'
]
4 changes: 4 additions & 0 deletions mmhuman3d/data/data_structures/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
from mmhuman3d.data.data_structures import human_data
from mmhuman3d.data.data_structures.human_data import HumanData

__all__ = ['HumanData', 'human_data']
113 changes: 109 additions & 4 deletions mmhuman3d/utils/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,112 @@
from .collect_env import collect_env
from .logger import get_root_logger
from mmhuman3d.utils.collect_env import collect_env
from mmhuman3d.utils.demo_utils import (
box2cs,
conver_verts_to_cam_coord,
convert_bbox_to_intrinsic,
convert_crop_cam_to_orig_img,
convert_kp2d_to_bbox,
get_default_hmr_intrinsic,
prepare_frames,
process_mmdet_results,
process_mmtracking_results,
smooth_process,
xywh2xyxy,
xyxy2xywh,
)
from mmhuman3d.utils.dist_utils import DistOptimizerHook, allreduce_grads
from mmhuman3d.utils.ffmpeg_utils import (
array_to_images,
array_to_video,
compress_video,
crop_video,
gif_to_images,
gif_to_video,
images_to_array,
images_to_gif,
images_to_sorted_images,
images_to_video,
pad_for_libx264,
slice_video,
spatial_concat_video,
temporal_concat_video,
vid_info_reader,
video_to_array,
video_to_gif,
video_to_images,
video_writer,
)
from mmhuman3d.utils.geometry import (
batch_rodrigues,
estimate_translation,
estimate_translation_np,
perspective_projection,
quaternion_to_angle_axis,
rotation_matrix_to_angle_axis,
rotation_matrix_to_quaternion,
)
from mmhuman3d.utils.keypoint_utils import get_different_colors, search_limbs
from mmhuman3d.utils.logger import get_root_logger
from mmhuman3d.utils.mesh_utils import (
join_batch_meshes_as_scene,
mesh_to_pointcloud_vc,
save_meshes_as_plys,
)
from mmhuman3d.utils.misc import multi_apply, torch_to_numpy
from mmhuman3d.utils.path_utils import (
Existence,
check_input_path,
check_path_existence,
check_path_suffix,
prepare_output_path,
)
from mmhuman3d.utils.transforms import (
Compose,
aa_to_ee,
aa_to_quat,
aa_to_rot6d,
aa_to_rotmat,
aa_to_sja,
ee_to_aa,
ee_to_quat,
ee_to_rot6d,
ee_to_rotmat,
quat_to_aa,
quat_to_ee,
quat_to_rot6d,
quat_to_rotmat,
rot6d_to_aa,
rot6d_to_ee,
rot6d_to_quat,
rot6d_to_rotmat,
rotmat_to_aa,
rotmat_to_ee,
rotmat_to_quat,
rotmat_to_rot6d,
sja_to_aa,
)

__all__ = [
'get_root_logger',
'collect_env',
'Compose', 'DistOptimizerHook', 'Existence', 'aa_to_ee', 'aa_to_quat',
'aa_to_rot6d', 'aa_to_rotmat', 'aa_to_sja', 'allreduce_grads',
'array_to_images', 'array_to_video', 'batch_rodrigues', 'box2cs',
'check_input_path', 'check_path_existence', 'check_path_suffix',
'collect_env', 'compress_video', 'conver_verts_to_cam_coord',
'convert_bbox_to_intrinsic', 'convert_crop_cam_to_orig_img',
'convert_kp2d_to_bbox', 'crop_video', 'ee_to_aa', 'ee_to_quat',
'ee_to_rot6d', 'ee_to_rotmat', 'estimate_translation',
'estimate_translation_np', 'get_default_hmr_intrinsic',
'get_different_colors', 'get_root_logger', 'gif_to_images', 'gif_to_video',
'images_to_array', 'images_to_gif', 'images_to_sorted_images',
'images_to_video', 'join_batch_meshes_as_scene', 'mesh_to_pointcloud_vc',
'multi_apply', 'pad_for_libx264', 'perspective_projection',
'prepare_frames', 'prepare_output_path', 'process_mmdet_results',
'process_mmtracking_results', 'quat_to_aa', 'quat_to_ee', 'quat_to_rot6d',
'quat_to_rotmat', 'quaternion_to_angle_axis', 'rot6d_to_aa', 'rot6d_to_ee',
'rot6d_to_quat', 'rot6d_to_rotmat', 'rotation_matrix_to_angle_axis',
'rotation_matrix_to_quaternion', 'rotmat_to_aa', 'rotmat_to_ee',
'rotmat_to_quat', 'rotmat_to_rot6d', 'save_meshes_as_plys', 'search_limbs',
'sja_to_aa', 'slice_video', 'smooth_process', 'spatial_concat_video',
'temporal_concat_video', 'torch_to_numpy', 'vid_info_reader',
'video_to_array', 'video_to_gif', 'video_to_images', 'video_writer',
'xywh2xyxy', 'xyxy2xywh'
]

0 comments on commit d6a8f47

Please sign in to comment.