Abstract
Robot learning is widely accepted by academia and industry with its potentials to transform autonomous robot control through machine learning. Inspired by widely used soft fingers on grasping, we propose a method of rigid-soft interactive learning, aiming at reducing the time of data collection. In this letter, we classify the interaction categories into Rigid-Rigid, Rigid-Soft, SoftRigid according to the interaction surface between grippers and target objects. We find experimental evidence that the interaction types between grippers and target objects play an essential role in the learning methods. We use soft, stuffed toys for training, instead of everyday objects, to reduce the integration complexity and computational burden. Although the stuffed toys are limited in reflecting the physics of finger-object interaction in real-life scenarios, we exploit such rigid-soft interaction by changing the gripper fingers to the soft ones when dealing with rigid, daily-life items such as the Yale-CMU-Berkeley (YCB) objects. With a small data collection of 5 K picking attempts in total, our results suggest that such Rigid-Soft and Soft-Rigid interactions are transferable. Moreover, the combination of such interactions shows better performance on the grasping test. We also explore the effect of the grasp type on the learning method by changing the gripper configurations. We achieve the best grasping performance at 97.5% for easy YCB objects and 81.3% for difficult YCB objects while using a precise grasp with a two-soft-finger gripper to collect training data and power grasp with a four-soft-finger gripper to test the grasp policy.
Links
BibTeX (Download)
@article{Yang2020RigidSoft, title = {Rigid-Soft Interactive Learning for Robust Grasping}, author = {Linhan Yang and Fang Wan and Haokun Wang and Xiaobo Liu and Yujia Liu and Jia Pan and Chaoyang Song}, doi = {10.1109/lra.2020.2969932}, year = {2020}, date = {2020-04-01}, urldate = {2020-04-01}, booktitle = {IEEE International Conference on Robotics and Automation (ICRA)}, journal = {IEEE Robotics and Automation Letters}, volume = {5}, number = {2}, issue = {April}, pages = {1720 - 1727}, address = {Paris, France}, abstract = {Robot learning is widely accepted by academia and industry with its potentials to transform autonomous robot control through machine learning. Inspired by widely used soft fingers on grasping, we propose a method of rigid-soft interactive learning, aiming at reducing the time of data collection. In this letter, we classify the interaction categories into Rigid-Rigid, Rigid-Soft, SoftRigid according to the interaction surface between grippers and target objects. We find experimental evidence that the interaction types between grippers and target objects play an essential role in the learning methods. We use soft, stuffed toys for training, instead of everyday objects, to reduce the integration complexity and computational burden. Although the stuffed toys are limited in reflecting the physics of finger-object interaction in real-life scenarios, we exploit such rigid-soft interaction by changing the gripper fingers to the soft ones when dealing with rigid, daily-life items such as the Yale-CMU-Berkeley (YCB) objects. With a small data collection of 5 K picking attempts in total, our results suggest that such Rigid-Soft and Soft-Rigid interactions are transferable. Moreover, the combination of such interactions shows better performance on the grasping test. We also explore the effect of the grasp type on the learning method by changing the gripper configurations. We achieve the best grasping performance at 97.5% for easy YCB objects and 81.3% for difficult YCB objects while using a precise grasp with a two-soft-finger gripper to collect training data and power grasp with a four-soft-finger gripper to test the grasp policy.}, keywords = {Corresponding Author, Dual-Track, IEEE Robot. Autom. Lett. (RA-L), JCR Q2}, pubstate = {published}, tppubtype = {article} }