




Working Papers
Fang Wan, Chaoyang Song
Multi-Layered Reasoning from a Single Viewpoint for Learning See-Through Grasping Working paper Forthcoming
Forthcoming.
Abstract | Links | BibTeX | Tags: Authorship - Corresponding, Status - Under Review
@workingpaper{Wan2024SeeThruFinger,
title = {Multi-Layered Reasoning from a Single Viewpoint for Learning See-Through Grasping},
author = {Fang Wan and Chaoyang Song},
url = {https://doi.org/10.48550/arXiv.2312.09822
https://github.com/ancorasir/SeeThruFinger},
year = {2026},
date = {2026-01-22},
urldate = {2026-01-22},
abstract = {Sensory substitution enables biological systems to perceive stimuli that are typically perceived by another organ, which is inspirational for physical agents. Multimodal perception of intrinsic and extrinsic interactions is critical in building an intelligent robot that learns. This study presents a Vision-based See-Through Perception (VBSeeThruP) architecture that simultaneously perceives multiple intrinsic and extrinsic modalities from a single visual input, in a markerless manner, all packed into a soft robotic finger using the Soft Polyhedral Network design. It is generally applicable to miniature vision systems placed beneath deformable networks with a see-through design, capturing real-time images of the network's physical interactions induced by contact-based events, overlaid on the visual scene of the external environment, as demonstrated in the ablation study. We present the VBSeeThruP's capability for learning reactive grasping without using external cameras or dedicated force and torque sensors on the fingertips. Using the inpainted scene and the deformation mask, we further demonstrate the multimodal performance of the VBSeeThruP architecture to simultaneously achieve various perceptions, including but not limited to scene inpainting, object detection, depth sensing, scene segmentation, masked deformation tracking, 6D force/torque sensing, and contact event detection, all within a single sensory input from the in-finger vision markerlessly.},
keywords = {Authorship - Corresponding, Status - Under Review},
pubstate = {forthcoming},
tppubtype = {workingpaper}
}
Under Review
Guojing Huang, Guangyi Huang, Junwei Lv, Ronghan Xu, Fang Wan, Chaoyang Song
Parametrically Reconfigurable Pseudo-Open-Chain for Overconstrained Robotic Limbs Online Forthcoming
Forthcoming, (Submitted to IEEE Robotics and Automation Letters).
Abstract | BibTeX | Tags: Authorship - Corresponding, Status - Under Review
@online{Huang2026ParametricallyReconfigurable,
title = {Parametrically Reconfigurable Pseudo-Open-Chain for Overconstrained Robotic Limbs },
author = {Guojing Huang and Guangyi Huang and Junwei Lv and Ronghan Xu and Fang Wan and Chaoyang Song},
year = {2026},
date = {2026-03-04},
abstract = {Contemporary quadrupedal locomotion relies predominantly on serial open-chain or planar closed-chain limb architectures, which often necessitate a compromise between structural stiffness and kinematic versatility. While overconstrained spatial linkages offer unique theoretical advantages regarding stiffness-to-weight ratios and spatial coupling, they have historically lacked the compact form factors required for agile robotics. To address this gap, we propose a parametrically reconfigurable pseudo-open-chain limb derived from the overconstrained Yu & Baker six-bar linkage. By synthesizing a modular design methodology, we demonstrate the ability to rapidly reconfigure the limb into planar and spherical variants through parametric adjustment. We employ Deep Reinforcement Learning to train a quadruped equipped with these limbs to achieve omnidirectional locomotion on flat ground under arbitrary velocity commands. Comparative empirical analysis reveals that the proposed overconstrained architecture yields superior energy efficiency, registering a mean Cost of Transport of 0.3864 compared to 0.6700 for the planar equivalent, and exhibits enhanced velocity tracking during lateral maneuvers due to inherent spatial coupling. This work establishes a comprehensive pipeline from kinematic synthesis to experimental prototyping and validates the efficacy of high-dimensional overconstrained mechanisms in next-generation robotic limb design.},
note = {Submitted to IEEE Robotics and Automation Letters},
keywords = {Authorship - Corresponding, Status - Under Review},
pubstate = {forthcoming},
tppubtype = {online}
}
Hao Tian, Ruozhou Wang, Feilong Zhou, Chaoyang Song, Jia Pan, Weihua Ou
Object Retrieval in Cluttered Scenes using Two-Stage Graph Neural Networks Online Forthcoming
Forthcoming, (Submitted to IEEE Robotics and Automation Letters).
Abstract | BibTeX | Tags: Authorship - Co-Author, Status - Under Review
@online{Tian2026ObjectRetrievalb,
title = {Object Retrieval in Cluttered Scenes using Two-Stage Graph Neural Networks},
author = {Hao Tian and Ruozhou Wang and Feilong Zhou and Chaoyang Song and Jia Pan and Weihua Ou},
year = {2026},
date = {2026-02-10},
abstract = {In this study, we propose a novel object retrieval framework in cluttered environments based on a two-stage Graph Neural Network (GNN) architecture. The proposed method leverages a Traversability Graph (T-graph) to represent the spatial layout of objects and their movable relationships, enabling efficient planning for retrieving a target object obstructed by multiple obstacles. In the first stage, an Edge Prediction Network (EdgeNet) predicts feasible connections between object pairs to construct the T-graph from spatial distributions. In the second stage, a Path Prediction Network (PathNet) estimates the
optimal retrieval sequence on the predicted T-graph, indicating the order of object removals required to access the target. To address execution failures caused by collisions or kinematic constraints during manipulation, we integrate the learned models with a heuristic Lifelong Planning A* (LPA*) algorithm, facilitating efficient online replanning. Extensive simulation and physical robot experiments demonstrate that our approach significantly improves retrieval efficiency and success rate compared with existing search-based and learning-based methods, particularly in highly cluttered scenes with numerous objects. The results verify that the proposed two-stage GNN framework provides a scalable and data-driven solution for robotic object retrieval in complex, confined environments.},
note = {Submitted to IEEE Robotics and Automation Letters},
keywords = {Authorship - Co-Author, Status - Under Review},
pubstate = {forthcoming},
tppubtype = {online}
}
optimal retrieval sequence on the predicted T-graph, indicating the order of object removals required to access the target. To address execution failures caused by collisions or kinematic constraints during manipulation, we integrate the learned models with a heuristic Lifelong Planning A* (LPA*) algorithm, facilitating efficient online replanning. Extensive simulation and physical robot experiments demonstrate that our approach significantly improves retrieval efficiency and success rate compared with existing search-based and learning-based methods, particularly in highly cluttered scenes with numerous objects. The results verify that the proposed two-stage GNN framework provides a scalable and data-driven solution for robotic object retrieval in complex, confined environments.
Tuo Zhang, Wanghongjie Qiu, Fang Wan, Chaoyang Song
The Wearable Dilemma in Life-Critical Hazardous Missions Online Forthcoming
Forthcoming, (Submitted to Wearable Technologies).
Abstract | BibTeX | Tags: Authorship - Corresponding, Status - Under Review
@online{Zhang2026WearableDilemma,
title = {The Wearable Dilemma in Life-Critical Hazardous Missions},
author = {Tuo Zhang and Wanghongjie Qiu and Fang Wan and Chaoyang Song},
year = {2026},
date = {2026-01-15},
abstract = {Life-critical hazardous missions, such as those in space exploration, deep-sea operations, pandemic response, and nuclear emergencies, depend on the unique cognitive and adaptive capabilities of human operators. However, these individuals face a significant "wearable dilemma," where the protective systems essential for survival invariably compromise their mobility, endurance, and situational awareness, creating a fundamental trade-off between safety and performance. This paper analyzes this dilemma by characterizing life-critical missions and examining mission archetypes to identify the limitations of current wearable protective systems. Through case studies of spacesuits, atmospheric diving suits, hazmat suits, and personal protective equipment, the manuscript systematically evaluates inherent trade-offs such as protection versus mobility, weight versus endurance, complexity versus reliability, and isolation versus situational awareness. To resolve these challenges, the paper posits that emerging wearable robotics, including powered exoskeletons and supernumerary robotic limbs, offer a promising solution to augment human strength and endurance. Building on this, the manuscript introduces the conceptual framework of the SuperSuit, a novel paradigm for a next-generation intelligent wearable system that seamlessly integrates protective gear with robotic technologies. The SuperSuit is envisioned to enhance the operator's physical functions and mission execution capacity through intuitive, user-centric interfaces and shared control strategies, ultimately aiming to transform protective gear into an intelligent extension of the human body that amplifies unique human skills while mitigating operational risks.},
note = {Submitted to Wearable Technologies},
keywords = {Authorship - Corresponding, Status - Under Review},
pubstate = {forthcoming},
tppubtype = {online}
}

Hao Tian, Ruozhou Wang, Feilong Zhou, Chaoyang Song, Jia Pan, Weihua Ou
Object Retrieval in Cluttered Scenes using Two-Stage Graph Neural Networks Online Forthcoming
Forthcoming, (Submitted to IEEE Robotics and Automation Letters).
Abstract | BibTeX | Tags: Authorship - Co-Author, Status - Under Review
@online{Tian2026ObjectRetrieval,
title = {Object Retrieval in Cluttered Scenes using Two-Stage Graph Neural Networks},
author = {Hao Tian and Ruozhou Wang and Feilong Zhou and Chaoyang Song and Jia Pan and Weihua Ou},
year = {2025},
date = {2025-11-24},
abstract = {In this study, we propose a novel object retrieval framework in
cluttered environments based on a two-stage Graph Neural Network (GNN)
architecture. The proposed method leverages a Traversability Graph
(T-graph) to represent the spatial layout of objects and their movable
relationships, enabling efficient planning for retrieving a target
object obstructed by multiple obstacles. In the first stage, an Edge
Prediction Network (EdgeNet) predicts feasible connections between
object pairs to construct the T-graph from spatial distributions. In
the second stage, a Path Prediction Network (PathNet) estimates the
optimal retrieval sequence on the predicted T-graph, indicating the
order of object removals required to access the target. To address
execution failures caused by collisions or kinematic constraints during
manipulation, we integrate the learned models with a heuristic Lifelong
Planning A* (LPA*) algorithm, facilitating efficient online replanning.
Extensive simulation and physical robot experiments demonstrate that
our approach significantly improves retrieval efficiency and success
rate compared with existing graph-based and search-based methods,
particularly in highly cluttered scenes with numerous objects. The
results verify that the proposed two-stage GNN framework provides a
scalable and data-driven solution for robotic object retrieval in
complex, confined environments.},
note = {Submitted to IEEE Robotics and Automation Letters},
keywords = {Authorship - Co-Author, Status - Under Review},
pubstate = {forthcoming},
tppubtype = {online}
}
cluttered environments based on a two-stage Graph Neural Network (GNN)
architecture. The proposed method leverages a Traversability Graph
(T-graph) to represent the spatial layout of objects and their movable
relationships, enabling efficient planning for retrieving a target
object obstructed by multiple obstacles. In the first stage, an Edge
Prediction Network (EdgeNet) predicts feasible connections between
object pairs to construct the T-graph from spatial distributions. In
the second stage, a Path Prediction Network (PathNet) estimates the
optimal retrieval sequence on the predicted T-graph, indicating the
order of object removals required to access the target. To address
execution failures caused by collisions or kinematic constraints during
manipulation, we integrate the learned models with a heuristic Lifelong
Planning A* (LPA*) algorithm, facilitating efficient online replanning.
Extensive simulation and physical robot experiments demonstrate that
our approach significantly improves retrieval efficiency and success
rate compared with existing graph-based and search-based methods,
particularly in highly cluttered scenes with numerous objects. The
results verify that the proposed two-stage GNN framework provides a
scalable and data-driven solution for robotic object retrieval in
complex, confined environments.
Haoran Sun, Bangchao Huang, Zishang Zhang, Junwei Lv, Guangyi Huang, Jiayi Yin, Shihao Feng, Ronghan Xu, Guojing Huang, Nuofan Qiu, Hua Chen, Wei Zhang, Fang Wan, Jia Pan, Chaoyang Song
Overconstrained Locomotion Online Forthcoming
Forthcoming, (Submitted to the Special Collection for ISRR24 in the International Journal of Robotics Research).
Abstract | Links | BibTeX | Tags: Authorship - Corresponding, Status - Under Review
@online{Sun2025OverconstrainedLocomotion,
title = {Overconstrained Locomotion},
author = {Haoran Sun and Bangchao Huang and Zishang Zhang and Junwei Lv and Guangyi Huang and Jiayi Yin and Shihao Feng and Ronghan Xu and Guojing Huang and Nuofan Qiu and Hua Chen and Wei Zhang and Fang Wan and Jia Pan and Chaoyang Song},
url = {https://github.com/ancorasir/OverconstrainedLocomotion},
year = {2025},
date = {2025-10-01},
abstract = {This paper presents a foundational study on the design, modeling, and control of a novel robotic limb that generates overconstrained locomotion. By utilizing a Bennett linkage for motion generation, the limb enables parametric reconfiguration between reptile- and mammal-inspired morphologies within a single quadrupedal platform. While the family of overconstrained linkages has solid theoretical foundations in spatial kinematics, it remains underexplored in dynamic robotics. This work establishes the morphological and performance advantages of Overconstrained Robotic Limbs (ORLs), which can transform into prevailing planar limbs. We develop a complete kinematic and dynamic model of the ORL and apply Model Predictive Control (MPC) to validate its performance. Hardware experiments on a single-limb prototype validate the core mechanical design and actuation principles. Furthermore, extensive high-fidelity simulations of the complete quadruped confirm the efficacy of our control framework for a range of dynamic locomotion tasks over challenging terrains. Besides the clear advantage in lateral mobility, our comparative analysis reveals the ORL's superior energy efficiency over planar limbs when considering varying foothold distances and speeds. From an evolutionary biology perspective, these findings provide a quantitative insight into the distinctive biomechanical trade-offs in limb design, presenting the first comprehensive analysis of how ORLs outperform planar designs in simulated dynamic locomotion.},
note = {Submitted to the Special Collection for ISRR24 in the International Journal of Robotics Research},
keywords = {Authorship - Corresponding, Status - Under Review},
pubstate = {forthcoming},
tppubtype = {online}
}
Sicong Liu, Jianhui Liu, Fang Chen, Wenjian Yang, Juan Yi, Yu Zheng, Zheng Wang, Wanchao Chi, Chaoyang Song
A Biomimetic Vertebraic Soft Robotic Tail for High-Speed, High-Force Dynamic Maneuvering Online Forthcoming
Forthcoming, (Submitted to IEEE Transactions on Robotics).
Abstract | Links | BibTeX | Tags: Authorship - Corresponding, Status - Under Review
@online{Liu2025ABiomimetic,
title = {A Biomimetic Vertebraic Soft Robotic Tail for High-Speed, High-Force Dynamic Maneuvering},
author = {Sicong Liu and Jianhui Liu and Fang Chen and Wenjian Yang and Juan Yi and Yu Zheng and Zheng Wang and Wanchao Chi and Chaoyang Song},
doi = {10.48550/arXiv.2509.20219},
year = {2025},
date = {2025-09-23},
abstract = {Robotic tails can enhance the stability and maneuverability of mobile robots, but current designs face a trade-off between the power of rigid systems and the safety of soft ones. Rigid tails generate large inertial effects but pose risks in unstructured environments, while soft tails lack sufficient speed and force. We present a Biomimetic Vertebraic Soft Robotic (BVSR) tail that resolves this challenge through a compliant pneumatic body reinforced by a passively jointed vertebral column inspired by musculoskeletal structures. This hybrid design decouples load-bearing and actuation, enabling high-pressure actuation (up to 6 bar) for superior dynamics while preserving compliance. A dedicated kinematic and dynamic model incorporating vertebral constraints is developed and validated experimentally. The BVSR tail achieves angular velocities above 670°/s and generates inertial forces and torques up to 5.58 N and 1.21 Nm, indicating over 200% improvement compared to non-vertebraic designs. Demonstrations on rapid cart stabilization, obstacle negotiation, high-speed steering, and quadruped integration confirm its versatility and practical utility for agile robotic platforms.},
note = {Submitted to IEEE Transactions on Robotics},
keywords = {Authorship - Corresponding, Status - Under Review},
pubstate = {forthcoming},
tppubtype = {online}
}
Journal Articles
Sorry, no publications matched your criteria.
Conference Papers
Sorry, no publications matched your criteria.
Conference Workshops & Extended Abstracts
Sorry, no publications matched your criteria.
Doctoral Thesis
Sorry, no publications matched your criteria.


