




Working Papers
Sorry, no publications matched your criteria.
Under Review
Sorry, no publications matched your criteria.
Journal Articles
Haokun Wang, Xiaobo Liu, Nuofan Qiu, Ning Guo, Fang Wan, Chaoyang Song
DeepClaw 2.0: A Data Collection Platform for Learning Human Manipulation Journal Article
In: Frontiers in Robotics and AI, vol. 9, pp. 787291, 2022.
Abstract | Links | BibTeX | Tags: Authorship - Corresponding, JCR Q2, Jour - Front. Robot. AI. (FROBT)
@article{Wang2022DeepClaw2.0,
title = {DeepClaw 2.0: A Data Collection Platform for Learning Human Manipulation},
author = {Haokun Wang and Xiaobo Liu and Nuofan Qiu and Ning Guo and Fang Wan and Chaoyang Song},
url = {Sec. Computational Intelligence in Robotics},
doi = {10.3389/frobt.2022.787291},
year = {2022},
date = {2022-03-15},
urldate = {2022-03-15},
journal = {Frontiers in Robotics and AI},
volume = {9},
pages = {787291},
abstract = {Besides direct interaction, human hands are also skilled at using tools to manipulate objects for typical life and work tasks. This paper proposes DeepClaw 2.0 as a low-cost, open-sourced data collection platform for learning human manipulation. We use an RGB-D camera to visually track the motion and deformation of a pair of soft finger networks on a modified kitchen tong operated by human teachers. These fingers can be easily integrated with robotic grippers to bridge the structural mismatch between humans and robots during learning. The deformation of soft finger networks, which reveals tactile information in contact-rich manipulation, is captured passively. We collected a comprehensive sample dataset involving five human demonstrators in ten manipulation tasks with five trials per task. As a low-cost, open-sourced platform, we also developed an intuitive interface that converts the raw sensor data into state-action data for imitation learning problems. For learning-by-demonstration problems, we further demonstrated our dataset’s potential by using real robotic hardware to collect joint actuation data or using a simulated environment when limited access to the hardware.},
keywords = {Authorship - Corresponding, JCR Q2, Jour - Front. Robot. AI. (FROBT)},
pubstate = {published},
tppubtype = {article}
}
Haiyang Jiang, Xudong Han, Yonglin Jing, Ning Guo, Fang Wan, Chaoyang Song
Rigid-Soft Interactive Design of a Lobster-Inspired Finger Surface for Enhanced Grasping Underwater Journal Article
In: Frontiers in Robotics and AI, vol. 8, pp. 787187, 2021.
Abstract | Links | BibTeX | Tags: Authorship - Corresponding, JCR Q2, Jour - Front. Robot. AI. (FROBT)
@article{Jiang2021RigidSoft,
title = {Rigid-Soft Interactive Design of a Lobster-Inspired Finger Surface for Enhanced Grasping Underwater},
author = {Haiyang Jiang and Xudong Han and Yonglin Jing and Ning Guo and Fang Wan and Chaoyang Song},
url = {Sec. Soft Robotics},
doi = {10.3389/frobt.2021.787187},
year = {2021},
date = {2021-12-22},
urldate = {2021-12-22},
issuetitle = {Section Soft Robotics},
journal = {Frontiers in Robotics and AI},
volume = {8},
pages = {787187},
abstract = {Bio-inspirations from soft-bodied animals provide a rich design source for soft robots, yet limited literature explored the potential enhancement from rigid-bodied ones. This paper draws inspiration from the tooth profiles of the rigid claws of the Boston Lobster, aiming at an enhanced soft finger surface for underwater grasping using an iterative design process. The lobsters distinguish themselves from other marine animals with a pair of claws capable of dexterous object manipulation both on land and underwater. We proposed a 3-stage design iteration process that involves raw imitation, design parametric exploration, and bionic parametric exploitation on the original tooth profiles on the claws of the Boston Lobster. Eventually, 7 finger surface designs were generated and fabricated with soft silicone. We validated each design stage through many vision-based robotic grasping attempts against selected objects from the Evolved Grasping Analysis Dataset (EGAD). Over 14,000 grasp attempts were accumulated on land (71.4%) and underwater (28.6%), where we selected the optimal design through an on-land experiment and further tested its capability underwater. As a result, we observed an 18.2% improvement in grasping success rate at most from a resultant bionic finger surface design, compared with those without the surface, and a 10.4% improvement at most compared with the validation design from the previous literature. Results from this paper are relevant and consistent with the bioresearch earlier in 1911, showing the value of bionics. The results indicate the capability and competence of the optimal bionic finger surface design in an amphibious environment, which can contribute to future research in enhanced underwater grasping using soft robots.},
keywords = {Authorship - Corresponding, JCR Q2, Jour - Front. Robot. AI. (FROBT)},
pubstate = {published},
tppubtype = {article}
}
Fang Wan, Chaoyang Song
Flange-Based Hand-Eye Calibration Using a 3D Camera with High Resolution, Accuracy, and Frame Rate Journal Article
In: Frontiers in Robotics and AI, vol. 7, pp. 65, 2020.
Abstract | Links | BibTeX | Tags: Authorship - Corresponding, JCR Q2, Jour - Front. Robot. AI. (FROBT)
@article{Wan2020FlangeBased,
title = {Flange-Based Hand-Eye Calibration Using a 3D Camera with High Resolution, Accuracy, and Frame Rate},
author = {Fang Wan and Chaoyang Song},
doi = {10.3389/frobt.2020.00065},
year = {2020},
date = {2020-05-29},
urldate = {2020-05-29},
journal = {Frontiers in Robotics and AI},
volume = {7},
pages = {65},
abstract = {Point cloud data provides three-dimensional (3D) measurement of the geometric details in the physical world, which relies heavily on the quality of the machine vision system. In this paper, we explore the potentials of a 3D scanner of high quality (15 million points per second), accuracy (up to 0.150 mm), and frame rate (up to 20 FPS) during static and dynamic measurements of the robot flange for direct hand-eye calibration and trajectory error tracking. With the availability of high-quality point cloud data, we can exploit the standardized geometric features on the robot flange for 3D measurement, which are directly accessible for hand-eye calibration problems. In the meanwhile, we tested the proposed flange-based calibration methods in a dynamic setting to capture point cloud data in a high frame rate. We found that our proposed method works robustly even in dynamic environments, enabling a versatile hand-eye calibration during motion. Furthermore, capturing high-quality point cloud data in real-time opens new doors for the use of 3D scanners, capable of detecting sensitive anomalies of refined details even in motion trajectories. Codes and sample data of this calibration method is provided at Github (https://github.com/ancorasir/flange_handeye_calibration).},
keywords = {Authorship - Corresponding, JCR Q2, Jour - Front. Robot. AI. (FROBT)},
pubstate = {published},
tppubtype = {article}
}
Fang Wan, Chaoyang Song
A Neural Network with Logical Reasoning based on Auxiliary Inputs Journal Article
In: Frontiers in Robotics and AI, vol. 5, pp. 86, 2018.
Abstract | Links | BibTeX | Tags: Authorship - Corresponding, JCR Q2, Jour - Front. Robot. AI. (FROBT)
@article{Wan2018ANeural,
title = {A Neural Network with Logical Reasoning based on Auxiliary Inputs},
author = {Fang Wan and Chaoyang Song},
url = {Sec. Computational Intelligence in Robotics},
doi = {10.3389/frobt.2018.00086},
year = {2018},
date = {2018-07-30},
urldate = {2018-07-30},
journal = {Frontiers in Robotics and AI},
volume = {5},
pages = {86},
abstract = {This paper describes a neural network design using auxiliary inputs, namely the indicators, that act as the hints to explain the predicted outcome through logical reasoning, mimicking the human behavior of deductive reasoning. Besides the original network input and output, we add an auxiliary input that reflects the specific logic of the data to formulate a reasoning process for cross-validation. We found that one can design either meaningful indicators, or even meaningless ones, when using such auxiliary inputs, upon which one can use as the basis of reasoning to explain the predicted outputs. As a result, one can formulate different reasonings to explain the predicted results by designing different sets of auxiliary inputs without the loss of trustworthiness of the outcome. This is similar to human explanation process where one can explain the same observation from different perspectives with reasons. We demonstrate our network concept by using the MNIST data with different sets of auxiliary inputs, where a series of design guidelines are concluded. Later, we validated our results by using a set of images taken from a robotic grasping platform. We found that our network enhanced the last 1–2% of the prediction accuracy while eliminating questionable predictions with self-conflicting logics. Future application of our network with auxiliary inputs can be applied to robotic detection problems such as autonomous object grasping, where the logical reasoning can be introduced to optimize robotic learning.},
keywords = {Authorship - Corresponding, JCR Q2, Jour - Front. Robot. AI. (FROBT)},
pubstate = {published},
tppubtype = {article}
}
Conference Papers
Sorry, no publications matched your criteria.
Extended Abstracts
Sorry, no publications matched your criteria.
Doctoral Thesis
Sorry, no publications matched your criteria.