Abstract
This paper presents a novel solution for underwater intention recognition that simultaneously detects head motion and throat vibration, enhancing multi-modal human-robot interactions for underwater diving. The system pairs with an underwater supernumerary robotic limb (superlimb), providing propulsion assistance to reduce the diver’s physical load and mental fatigue. An inertial measurement unit monitors head motion, while a throat microphone captures vocal vibrations. Learning algorithms process these signals to accurately interpret the diver’s intentions and map them to the superlimb for posture management. The system features a compact design optimized for diving scenarios and includes a multi-modal, real-time classification algorithm to distinguish various head motions and vocal signals. By collecting and analyzing underwater throat vibration data, the study demonstrates the feasibility of this approach, enabling continuous motion commands for enhanced diving assistance. The results show that the head motion recognition component of the system achieved a high classification accuracy of 94%, and throat vibration classification reached 86% accuracy on land and 89% underwater for various purposes.
Links
BibTeX (Download)
@online{Zhang2024MultiModal, title = {Multi-Modal Intention Recognition Combining Head Motion and Throat Vibration for Underwater Superlimbs}, author = {Rongzheng Zhang and Wanghongjie Qiu and Jianuo Qiu and Yuqin Guo and Chengxiao Dong and Tuo Zhang and Juan Yi and Chaoyang Song and Harry Asada and Fang Wan}, year = {2024}, date = {2024-09-01}, journal = {IEEE Transactions on Automation Science and Engineering}, abstract = {This paper presents a novel solution for underwater intention recognition that simultaneously detects head motion and throat vibration, enhancing multi-modal human-robot interactions for underwater diving. The system pairs with an underwater supernumerary robotic limb (superlimb), providing propulsion assistance to reduce the diver’s physical load and mental fatigue. An inertial measurement unit monitors head motion, while a throat microphone captures vocal vibrations. Learning algorithms process these signals to accurately interpret the diver’s intentions and map them to the superlimb for posture management. The system features a compact design optimized for diving scenarios and includes a multi-modal, real-time classification algorithm to distinguish various head motions and vocal signals. By collecting and analyzing underwater throat vibration data, the study demonstrates the feasibility of this approach, enabling continuous motion commands for enhanced diving assistance. The results show that the head motion recognition component of the system achieved a high classification accuracy of 94%, and throat vibration classification reached 86% accuracy on land and 89% underwater for various purposes.}, note = {Invited Submission to IEEE Transactions on Automation Science and Engineering}, keywords = {Co-Author, Under Review}, pubstate = {forthcoming}, tppubtype = {online} }