hrnet_w48_coco_256x192.py 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169
  1. # _base_ = [
  2. # '../../../../_base_/default_runtime.py',
  3. # '../../../../_base_/datasets/coco.py'
  4. # ]
  5. evaluation = dict(interval=10, metric='mAP', save_best='AP')
  6. optimizer = dict(
  7. type='Adam',
  8. lr=5e-4,
  9. )
  10. optimizer_config = dict(grad_clip=None)
  11. # learning policy
  12. lr_config = dict(
  13. policy='step',
  14. warmup='linear',
  15. warmup_iters=500,
  16. warmup_ratio=0.001,
  17. step=[170, 200])
  18. total_epochs = 210
  19. channel_cfg = dict(
  20. num_output_channels=17,
  21. dataset_joints=17,
  22. dataset_channel=[
  23. [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
  24. ],
  25. inference_channel=[
  26. 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
  27. ])
  28. # model settings
  29. model = dict(
  30. type='TopDown',
  31. pretrained='https://download.openmmlab.com/mmpose/'
  32. 'pretrain_models/hrnet_w48-8ef0771d.pth',
  33. backbone=dict(
  34. type='HRNet',
  35. in_channels=3,
  36. extra=dict(
  37. stage1=dict(
  38. num_modules=1,
  39. num_branches=1,
  40. block='BOTTLENECK',
  41. num_blocks=(4, ),
  42. num_channels=(64, )),
  43. stage2=dict(
  44. num_modules=1,
  45. num_branches=2,
  46. block='BASIC',
  47. num_blocks=(4, 4),
  48. num_channels=(48, 96)),
  49. stage3=dict(
  50. num_modules=4,
  51. num_branches=3,
  52. block='BASIC',
  53. num_blocks=(4, 4, 4),
  54. num_channels=(48, 96, 192)),
  55. stage4=dict(
  56. num_modules=3,
  57. num_branches=4,
  58. block='BASIC',
  59. num_blocks=(4, 4, 4, 4),
  60. num_channels=(48, 96, 192, 384))),
  61. ),
  62. keypoint_head=dict(
  63. type='TopdownHeatmapSimpleHead',
  64. in_channels=48,
  65. out_channels=channel_cfg['num_output_channels'],
  66. num_deconv_layers=0,
  67. extra=dict(final_conv_kernel=1, ),
  68. loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
  69. train_cfg=dict(),
  70. test_cfg=dict(
  71. flip_test=True,
  72. post_process='default',
  73. shift_heatmap=True,
  74. modulate_kernel=11))
  75. data_cfg = dict(
  76. image_size=[192, 256],
  77. heatmap_size=[48, 64],
  78. num_output_channels=channel_cfg['num_output_channels'],
  79. num_joints=channel_cfg['dataset_joints'],
  80. dataset_channel=channel_cfg['dataset_channel'],
  81. inference_channel=channel_cfg['inference_channel'],
  82. soft_nms=False,
  83. nms_thr=1.0,
  84. oks_thr=0.9,
  85. vis_thr=0.2,
  86. use_gt_bbox=False,
  87. det_bbox_thr=0.0,
  88. bbox_file='data/coco/person_detection_results/'
  89. 'COCO_val2017_detections_AP_H_56_person.json',
  90. )
  91. train_pipeline = [
  92. dict(type='LoadImageFromFile'),
  93. dict(type='TopDownGetBboxCenterScale', padding=1.25),
  94. dict(type='TopDownRandomShiftBboxCenter', shift_factor=0.16, prob=0.3),
  95. dict(type='TopDownRandomFlip', flip_prob=0.5),
  96. dict(
  97. type='TopDownHalfBodyTransform',
  98. num_joints_half_body=8,
  99. prob_half_body=0.3),
  100. dict(
  101. type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
  102. dict(type='TopDownAffine'),
  103. dict(type='ToTensor'),
  104. dict(
  105. type='NormalizeTensor',
  106. mean=[0.485, 0.456, 0.406],
  107. std=[0.229, 0.224, 0.225]),
  108. dict(type='TopDownGenerateTarget', sigma=2),
  109. dict(
  110. type='Collect',
  111. keys=['img', 'target', 'target_weight'],
  112. meta_keys=[
  113. 'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
  114. 'rotation', 'bbox_score', 'flip_pairs'
  115. ]),
  116. ]
  117. val_pipeline = [
  118. dict(type='LoadImageFromFile'),
  119. dict(type='TopDownGetBboxCenterScale', padding=1.25),
  120. dict(type='TopDownAffine'),
  121. dict(type='ToTensor'),
  122. dict(
  123. type='NormalizeTensor',
  124. mean=[0.485, 0.456, 0.406],
  125. std=[0.229, 0.224, 0.225]),
  126. dict(
  127. type='Collect',
  128. keys=['img'],
  129. meta_keys=[
  130. 'image_file', 'center', 'scale', 'rotation', 'bbox_score',
  131. 'flip_pairs'
  132. ]),
  133. ]
  134. test_pipeline = val_pipeline
  135. data_root = 'data/coco'
  136. data = dict(
  137. samples_per_gpu=32,
  138. workers_per_gpu=2,
  139. val_dataloader=dict(samples_per_gpu=32),
  140. test_dataloader=dict(samples_per_gpu=32),
  141. train=dict(
  142. type='TopDownCocoDataset',
  143. ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
  144. img_prefix=f'{data_root}/train2017/',
  145. data_cfg=data_cfg,
  146. pipeline=train_pipeline,
  147. dataset_info={{_base_.dataset_info}}),
  148. val=dict(
  149. type='TopDownCocoDataset',
  150. ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
  151. img_prefix=f'{data_root}/val2017/',
  152. data_cfg=data_cfg,
  153. pipeline=val_pipeline,
  154. dataset_info={{_base_.dataset_info}}),
  155. test=dict(
  156. type='TopDownCocoDataset',
  157. ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
  158. img_prefix=f'{data_root}/val2017/',
  159. data_cfg=data_cfg,
  160. pipeline=test_pipeline,
  161. dataset_info={{_base_.dataset_info}}),
  162. )