@inproceedings{11c0e2739b5840c99b9109c5c66e5c7a,
title = "3D human pose estimation in video for human-computer/robot interaction",
abstract = "3D human pose estimation is widely used in motion capture, human-computer interaction, virtual character driving and other fields. The current 3D human pose estimation has been suffering from depth blurring and self-obscuring problems to be solved. This paper proposes a human pose estimation network in video based on a 2D lifting to 3D approach using transformer and graph convolutional network(GCN), which are widely used in natural language processing. We use transformer to obtain sequence features and use graph convolution to extract features between local joints to get more accurate 3D pose coordinates. In addition, we use the proposed 3D pose estimation network for animated character motion generation and robot motion following and design two systems of human-computer/robot interaction (HCI/HRI) applications. The proposed 3D human pose estimation network is tested on the Human3.6M dataset and outperforms the state-of-the-art models. Both HCI/HRI systems are designed to work quickly and accurately by the proposed 3D human pose estimation method.",
keywords = "Human Pose Estimation, Human-Computer Interaction, Human-Robot Interaction, Deep Learning",
author = "Rongtian Huo and Qing Gao and Jing Qi and Zhaojie Ju",
year = "2023",
month = oct,
day = "13",
doi = "10.1007/978-981-99-6498-7_16",
language = "English",
isbn = "9789819964970",
series = "Lecture Notes in Computer Science",
publisher = "Springer",
pages = "176–187",
editor = "Yang, {Huayong } and Liu, {Honghai } and Zou, {Jun } and Yin, {Zhouping } and Liu, {Lianqing } and Yang, {Geng } and Ouyang, {Xiaoping } and Wang, {Zhiyong }",
booktitle = "Intelligent Robotics and Applications",
note = "International Conference on Intelligent Robotics and Applications ; Conference date: 05-07-2023 Through 07-07-2023",
}