




Working Papers
Sorry, no publications matched your criteria.
Under Review
Sorry, no publications matched your criteria.
Journal Articles
Yujian Dong, Tianyu Wu, Chaoyang Song
Optimizing Robotic Manipulation with Decision-RWKV: A Recurrent Sequence Modeling Approach for Lifelong Learning Journal Article
In: Journal of Computing and Information Science in Engineering, vol. 23, no. 3, pp. 031004, 2025.
Abstract | Links | BibTeX | Tags: Authorship - Corresponding, JCR Q2, Jour - J. Comput. Inf. Sci. Eng. (JCISE)
@article{Dong2024OptimizingRobotic,
title = {Optimizing Robotic Manipulation with Decision-RWKV: A Recurrent Sequence Modeling Approach for Lifelong Learning},
author = {Yujian Dong and Tianyu Wu and Chaoyang Song},
url = {https://doi.org/10.48550/arXiv.2407.16306},
doi = {10.1115/1.4067524},
year = {2025},
date = {2025-01-27},
urldate = {2025-01-27},
journal = {Journal of Computing and Information Science in Engineering},
volume = {23},
number = {3},
pages = {031004},
abstract = {Models based on the Transformer architecture have seen widespread application across fields such as natural language processing (NLP), computer vision, and robotics, with large language models (LLMs) like ChatGPT revolutionizing machine understanding of human language and demonstrating impressive memory and reproduction capabilities. Traditional machine learning algorithms struggle with catastrophic forgetting, which is detrimental to the diverse and generalized abilities required for robotic deployment. This paper investigates the Receptance Weighted Key Value (RWKV) framework, known for its advanced capabilities in efficient and effective sequence modeling, integration with the decision transformer (DT), and experience replay architectures. It focuses on potential performance enhancements in sequence decision-making and lifelong robotic learning tasks. We introduce the Decision-RWKV (DRWKV) model and conduct extensive experiments using the D4RL database within the OpenAI Gym environment and on the D’Claw platform to assess the DRWKV model's performance in single-task tests and lifelong learning scenarios, showcasing its ability to handle multiple subtasks efficiently. The code for all algorithms, training, and image rendering in this study is open-sourced at https://github.com/ancorasir/DecisionRWKV. },
keywords = {Authorship - Corresponding, JCR Q2, Jour - J. Comput. Inf. Sci. Eng. (JCISE)},
pubstate = {published},
tppubtype = {article}
}
Linhan Yang, Bidan Huang, Qingbiao Li, Ya-Yen Tsai, Wang Wei Lee, Chaoyang Song, Jia Pan
TacGNN: Learning Tactile-based In-hand Manipulation with a Blind Robot using Hierarchical Graph Neural Network Journal Article
In: IEEE Robotics and Automation Letters, vol. 8, iss. June, no. 6, pp. 3605-3612, 2023.
Abstract | Links | BibTeX | Tags: Authorship - Co-Author, JCR Q2, Jour - IEEE Robot. Autom. Lett. (RA-L), Special - Dual-Track
@article{Yang2023TacGNN,
title = {TacGNN: Learning Tactile-based In-hand Manipulation with a Blind Robot using Hierarchical Graph Neural Network},
author = {Linhan Yang and Bidan Huang and Qingbiao Li and Ya-Yen Tsai and Wang Wei Lee and Chaoyang Song and Jia Pan},
doi = {10.1109/LRA.2023.3264759},
year = {2023},
date = {2023-04-05},
urldate = {2023-04-05},
journal = {IEEE Robotics and Automation Letters},
volume = {8},
number = {6},
issue = {June},
pages = {3605-3612},
abstract = {In this letter, we propose a novel framework for tactile-based dexterous manipulation learning with a blind anthropomorphic robotic hand, i.e. without visual sensing. First, object-related states were extracted from the raw tactile signals by a graph-based perception model - TacGNN. The resulting tactile features were then utilized in the policy learning of an in-hand manipulation task in the second stage. This method was examined by a Baoding ball task - simultaneously manipulating two spheres around each other by 180 degrees in hand. We conducted experiments on object states prediction and in-hand manipulation using a reinforcement learning algorithm (PPO). Results show that TacGNN is effective in predicting object-related states during manipulation by decreasing the RMSE of prediction to 0.096 cm comparing to other methods, such as MLP, CNN, and GCN. Finally, the robot hand could finish an in-hand manipulation task solely relying on the robotic own perception - tactile sensing and proprioception. In addition, our methods are tested on three tasks with different difficulty levels and transferred to the real robot without further training.},
keywords = {Authorship - Co-Author, JCR Q2, Jour - IEEE Robot. Autom. Lett. (RA-L), Special - Dual-Track},
pubstate = {published},
tppubtype = {article}
}
Haokun Wang, Xiaobo Liu, Nuofan Qiu, Ning Guo, Fang Wan, Chaoyang Song
DeepClaw 2.0: A Data Collection Platform for Learning Human Manipulation Journal Article
In: Frontiers in Robotics and AI, vol. 9, pp. 787291, 2022.
Abstract | Links | BibTeX | Tags: Authorship - Corresponding, JCR Q2, Jour - Front. Robot. AI. (FROBT)
@article{Wang2022DeepClaw2.0,
title = {DeepClaw 2.0: A Data Collection Platform for Learning Human Manipulation},
author = {Haokun Wang and Xiaobo Liu and Nuofan Qiu and Ning Guo and Fang Wan and Chaoyang Song},
url = {Sec. Computational Intelligence in Robotics},
doi = {10.3389/frobt.2022.787291},
year = {2022},
date = {2022-03-15},
urldate = {2022-03-15},
journal = {Frontiers in Robotics and AI},
volume = {9},
pages = {787291},
abstract = {Besides direct interaction, human hands are also skilled at using tools to manipulate objects for typical life and work tasks. This paper proposes DeepClaw 2.0 as a low-cost, open-sourced data collection platform for learning human manipulation. We use an RGB-D camera to visually track the motion and deformation of a pair of soft finger networks on a modified kitchen tong operated by human teachers. These fingers can be easily integrated with robotic grippers to bridge the structural mismatch between humans and robots during learning. The deformation of soft finger networks, which reveals tactile information in contact-rich manipulation, is captured passively. We collected a comprehensive sample dataset involving five human demonstrators in ten manipulation tasks with five trials per task. As a low-cost, open-sourced platform, we also developed an intuitive interface that converts the raw sensor data into state-action data for imitation learning problems. For learning-by-demonstration problems, we further demonstrated our dataset’s potential by using real robotic hardware to collect joint actuation data or using a simulated environment when limited access to the hardware.},
keywords = {Authorship - Corresponding, JCR Q2, Jour - Front. Robot. AI. (FROBT)},
pubstate = {published},
tppubtype = {article}
}
Youcan Yan, Yajing Shen, Chaoyang Song, Jia Pan
Tactile Super-Resolution Model for Soft Magnetic Skin Journal Article
In: IEEE Robotics and Automation Letters, vol. 7, iss. April, no. 2, pp. 2589-2596, 2022.
Abstract | Links | BibTeX | Tags: Authorship - Co-Author, JCR Q2, Jour - IEEE Robot. Autom. Lett. (RA-L), Special - Dual-Track
@article{Yan2022TactileSuper,
title = {Tactile Super-Resolution Model for Soft Magnetic Skin},
author = {Youcan Yan and Yajing Shen and Chaoyang Song and Jia Pan},
doi = {10.1109/LRA.2022.3141449},
year = {2022},
date = {2022-01-10},
urldate = {2022-01-10},
journal = {IEEE Robotics and Automation Letters},
volume = {7},
number = {2},
issue = {April},
pages = {2589-2596},
abstract = {Tactile sensors of high spatial resolution can provide rich contact information in terms of accurate contact location and force magnitude for robots. However, achieving a high spatial resolution normally requires a high density of tactile sensing cells (or taxels), which will inevitably lead to crowded wire connections, more data acquisition time and probably crosstalk between taxels. An alternative approach to improve the spatial resolution without introducing a high density of taxels is employing super-resolution technology. Here, we propose a novel tactile super-resolution method based on a sinusoidally magnetized soft magnetic skin, by which we have achieved a 15-fold improvement of localization accuracy (from 6 mm to 0.4 mm) as well as the ability to measure the force magnitude. Different from the existing super-resolution methods that rely on overlapping signals of neighbouring taxels, our model only relies on the local information from a single 3-axis taxel and thereby can detect multipoint contact applied on neighboring taxels and work properly even when some of the neighbouring taxels near the contact position are damaged (or unavailable). With this property, our method would be robust to damage and could potentially benefit robotic applications that require multipoint contact detection.},
keywords = {Authorship - Co-Author, JCR Q2, Jour - IEEE Robot. Autom. Lett. (RA-L), Special - Dual-Track},
pubstate = {published},
tppubtype = {article}
}
Haiyang Jiang, Xudong Han, Yonglin Jing, Ning Guo, Fang Wan, Chaoyang Song
Rigid-Soft Interactive Design of a Lobster-Inspired Finger Surface for Enhanced Grasping Underwater Journal Article
In: Frontiers in Robotics and AI, vol. 8, pp. 787187, 2021.
Abstract | Links | BibTeX | Tags: Authorship - Corresponding, JCR Q2, Jour - Front. Robot. AI. (FROBT)
@article{Jiang2021RigidSoft,
title = {Rigid-Soft Interactive Design of a Lobster-Inspired Finger Surface for Enhanced Grasping Underwater},
author = {Haiyang Jiang and Xudong Han and Yonglin Jing and Ning Guo and Fang Wan and Chaoyang Song},
url = {Sec. Soft Robotics},
doi = {10.3389/frobt.2021.787187},
year = {2021},
date = {2021-12-22},
urldate = {2021-12-22},
issuetitle = {Section Soft Robotics},
journal = {Frontiers in Robotics and AI},
volume = {8},
pages = {787187},
abstract = {Bio-inspirations from soft-bodied animals provide a rich design source for soft robots, yet limited literature explored the potential enhancement from rigid-bodied ones. This paper draws inspiration from the tooth profiles of the rigid claws of the Boston Lobster, aiming at an enhanced soft finger surface for underwater grasping using an iterative design process. The lobsters distinguish themselves from other marine animals with a pair of claws capable of dexterous object manipulation both on land and underwater. We proposed a 3-stage design iteration process that involves raw imitation, design parametric exploration, and bionic parametric exploitation on the original tooth profiles on the claws of the Boston Lobster. Eventually, 7 finger surface designs were generated and fabricated with soft silicone. We validated each design stage through many vision-based robotic grasping attempts against selected objects from the Evolved Grasping Analysis Dataset (EGAD). Over 14,000 grasp attempts were accumulated on land (71.4%) and underwater (28.6%), where we selected the optimal design through an on-land experiment and further tested its capability underwater. As a result, we observed an 18.2% improvement in grasping success rate at most from a resultant bionic finger surface design, compared with those without the surface, and a 10.4% improvement at most compared with the validation design from the previous literature. Results from this paper are relevant and consistent with the bioresearch earlier in 1911, showing the value of bionics. The results indicate the capability and competence of the optimal bionic finger surface design in an amphibious environment, which can contribute to future research in enhanced underwater grasping using soft robots.},
keywords = {Authorship - Corresponding, JCR Q2, Jour - Front. Robot. AI. (FROBT)},
pubstate = {published},
tppubtype = {article}
}
Baiyue Wang, Weijie Guo, Shihao Feng, Hongdong Yi, Fang Wan, Chaoyang Song
Volumetrically Enhanced Soft Actuator with Proprioceptive Sensing Journal Article
In: IEEE Robotics and Automation Letters, vol. 6, iss. July, no. 3, pp. 5284-5291, 2021.
Abstract | Links | BibTeX | Tags: Authorship - Corresponding, JCR Q2, Jour - IEEE Robot. Autom. Lett. (RA-L), Special - Dual-Track
@article{Wang2021VolumetricallyEnhanced,
title = {Volumetrically Enhanced Soft Actuator with Proprioceptive Sensing},
author = {Baiyue Wang and Weijie Guo and Shihao Feng and Hongdong Yi and Fang Wan and Chaoyang Song},
doi = {10.1109/LRA.2021.3072859},
year = {2021},
date = {2021-07-01},
urldate = {2021-07-01},
journal = {IEEE Robotics and Automation Letters},
volume = {6},
number = {3},
issue = {July},
pages = {5284-5291},
abstract = {Soft robots often show a superior power-to-weight ratio using highly compliant, light-weight material, which leverages various bio-inspired body designs to generate desirable deformations for life-like motions. In this letter, given that most material used for soft robots is light-weight in general, we propose a volumetrically enhanced design strategy for soft robots, providing a novel design guideline to govern the form factor of soft robots. We present the design, modeling, and optimization of a volumetrically enhanced soft actuator (VESA) with linear and rotary motions, respectively, achieving superior force and torque output, linear and rotary displacement, and overall extension ratio per unit volume. We further explored VESA's proprioceptive sensing capability by validating the output force and torque through analytical modeling and experimental verification. Our results show that the volumetric metrics hold the potential to be used as a practical design guideline to optimize soft robots’ engineering performance.},
keywords = {Authorship - Corresponding, JCR Q2, Jour - IEEE Robot. Autom. Lett. (RA-L), Special - Dual-Track},
pubstate = {published},
tppubtype = {article}
}
Linhan Yang, Xudong Han, Weijie Guo, Fang Wan, Jia Pan, Chaoyang Song
Learning-based Optoelectronically Innervated Tactile Finger for Rigid-Soft Interactive Grasping Journal Article
In: IEEE Robotics and Automation Letters, vol. 6, iss. April, no. 2, pp. 3817 - 3824, 2021.
Abstract | Links | BibTeX | Tags: Authorship - Corresponding, JCR Q2, Jour - IEEE Robot. Autom. Lett. (RA-L), Special - Dual-Track
@article{Yang2021LearningBased,
title = {Learning-based Optoelectronically Innervated Tactile Finger for Rigid-Soft Interactive Grasping},
author = {Linhan Yang and Xudong Han and Weijie Guo and Fang Wan and Jia Pan and Chaoyang Song},
doi = {10.1109/LRA.2021.3065186},
year = {2021},
date = {2021-04-01},
urldate = {2021-04-01},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},
journal = {IEEE Robotics and Automation Letters},
volume = {6},
number = {2},
issue = {April},
pages = {3817 - 3824},
address = {Xi’an, China},
abstract = {This letter presents a novel design of a soft tactile finger with omni-directional adaptation using multi-channel optical fibers for rigid-soft interactive grasping. Machine learning methods are used to train a model for real-time prediction of force, torque, and contact using the tactile data collected. We further integrated such fingers in a reconfigurable gripper design with three fingers so that the finger arrangement can be actively adjusted in real-time based on the tactile data collected during grasping, achieving the process of rigid-soft interactive grasping. Detailed sensor calibration and experimental results are also included to further validate the proposed design for enhanced grasping robustness. Video: https://www.youtube.com/watch?v=ynCfSA4FQnY.},
keywords = {Authorship - Corresponding, JCR Q2, Jour - IEEE Robot. Autom. Lett. (RA-L), Special - Dual-Track},
pubstate = {published},
tppubtype = {article}
}
Fang Wan, Haokun Wang, Jiyuan Wu, Yujia Liu, Sheng Ge, Chaoyang Song
A Reconfigurable Design for Omni-adaptive Grasp Learning Journal Article
In: IEEE Robotics and Automation Letters, vol. 5, iss. July, no. 3, pp. 4210-4217, 2020.
Abstract | Links | BibTeX | Tags: Authorship - Corresponding, JCR Q2, Jour - IEEE Robot. Autom. Lett. (RA-L), Special - Dual-Track
@article{Wan2020AReconfigurable,
title = {A Reconfigurable Design for Omni-adaptive Grasp Learning},
author = {Fang Wan and Haokun Wang and Jiyuan Wu and Yujia Liu and Sheng Ge and Chaoyang Song},
doi = {10.1109/lra.2020.2982059},
year = {2020},
date = {2020-07-01},
urldate = {2020-07-01},
journal = {IEEE Robotics and Automation Letters},
volume = {5},
number = {3},
issue = {July},
pages = {4210-4217},
abstract = {The engineering design of robotic grippers presents an ample design space for optimization towards robust grasping. In this letter, we investigate how learning method can be used to support the design reconfiguration of robotic grippers for grasping using a novel soft structure with omni-directional adaptation. We propose a gripper system that is reconfigurable in terms of the number and arrangement of the proposed finger, which generates a large number of possible design configurations. Such design reconfigurations with omni-adaptive fingers enables us to systematically investigate the optimal arrangement of the fingers towards robust grasping. Furthermore, we adopt a learning-based method as the baseline to benchmark the effectiveness of each design configuration. As a result, we found that the 3-finger radial configuration is suitable for space-saving and cost-effectiveness, achieving an average 96% grasp success rate on seen and novel objects selected from the YCB dataset. The 4-finger radial arrangement can be applied to cases that require a higher payload with even distribution. We achieved dimension reduction using the radial gripper design with the removal of z-axis rotation during grasping. We also reported the different outcomes with or without friction enhancement of the soft finger network.},
keywords = {Authorship - Corresponding, JCR Q2, Jour - IEEE Robot. Autom. Lett. (RA-L), Special - Dual-Track},
pubstate = {published},
tppubtype = {article}
}
Fang Wan, Chaoyang Song
Flange-Based Hand-Eye Calibration Using a 3D Camera with High Resolution, Accuracy, and Frame Rate Journal Article
In: Frontiers in Robotics and AI, vol. 7, pp. 65, 2020.
Abstract | Links | BibTeX | Tags: Authorship - Corresponding, JCR Q2, Jour - Front. Robot. AI. (FROBT)
@article{Wan2020FlangeBased,
title = {Flange-Based Hand-Eye Calibration Using a 3D Camera with High Resolution, Accuracy, and Frame Rate},
author = {Fang Wan and Chaoyang Song},
doi = {10.3389/frobt.2020.00065},
year = {2020},
date = {2020-05-29},
urldate = {2020-05-29},
journal = {Frontiers in Robotics and AI},
volume = {7},
pages = {65},
abstract = {Point cloud data provides three-dimensional (3D) measurement of the geometric details in the physical world, which relies heavily on the quality of the machine vision system. In this paper, we explore the potentials of a 3D scanner of high quality (15 million points per second), accuracy (up to 0.150 mm), and frame rate (up to 20 FPS) during static and dynamic measurements of the robot flange for direct hand-eye calibration and trajectory error tracking. With the availability of high-quality point cloud data, we can exploit the standardized geometric features on the robot flange for 3D measurement, which are directly accessible for hand-eye calibration problems. In the meanwhile, we tested the proposed flange-based calibration methods in a dynamic setting to capture point cloud data in a high frame rate. We found that our proposed method works robustly even in dynamic environments, enabling a versatile hand-eye calibration during motion. Furthermore, capturing high-quality point cloud data in real-time opens new doors for the use of 3D scanners, capable of detecting sensitive anomalies of refined details even in motion trajectories. Codes and sample data of this calibration method is provided at Github (https://github.com/ancorasir/flange_handeye_calibration).},
keywords = {Authorship - Corresponding, JCR Q2, Jour - Front. Robot. AI. (FROBT)},
pubstate = {published},
tppubtype = {article}
}
Linhan Yang, Fang Wan, Haokun Wang, Xiaobo Liu, Yujia Liu, Jia Pan, Chaoyang Song
Rigid-Soft Interactive Learning for Robust Grasping Journal Article
In: IEEE Robotics and Automation Letters, vol. 5, iss. April, no. 2, pp. 1720 - 1727, 2020.
Abstract | Links | BibTeX | Tags: Authorship - Corresponding, JCR Q2, Jour - IEEE Robot. Autom. Lett. (RA-L), Special - Dual-Track
@article{Yang2020RigidSoft,
title = {Rigid-Soft Interactive Learning for Robust Grasping},
author = {Linhan Yang and Fang Wan and Haokun Wang and Xiaobo Liu and Yujia Liu and Jia Pan and Chaoyang Song},
doi = {10.1109/lra.2020.2969932},
year = {2020},
date = {2020-04-01},
urldate = {2020-04-01},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},
journal = {IEEE Robotics and Automation Letters},
volume = {5},
number = {2},
issue = {April},
pages = {1720 - 1727},
address = {Paris, France},
abstract = {Robot learning is widely accepted by academia and industry with its potentials to transform autonomous robot control through machine learning. Inspired by widely used soft fingers on grasping, we propose a method of rigid-soft interactive learning, aiming at reducing the time of data collection. In this letter, we classify the interaction categories into Rigid-Rigid, Rigid-Soft, SoftRigid according to the interaction surface between grippers and target objects. We find experimental evidence that the interaction types between grippers and target objects play an essential role in the learning methods. We use soft, stuffed toys for training, instead of everyday objects, to reduce the integration complexity and computational burden. Although the stuffed toys are limited in reflecting the physics of finger-object interaction in real-life scenarios, we exploit such rigid-soft interaction by changing the gripper fingers to the soft ones when dealing with rigid, daily-life items such as the Yale-CMU-Berkeley (YCB) objects. With a small data collection of 5 K picking attempts in total, our results suggest that such Rigid-Soft and Soft-Rigid interactions are transferable. Moreover, the combination of such interactions shows better performance on the grasping test. We also explore the effect of the grasp type on the learning method by changing the gripper configurations. We achieve the best grasping performance at 97.5% for easy YCB objects and 81.3% for difficult YCB objects while using a precise grasp with a two-soft-finger gripper to collect training data and power grasp with a four-soft-finger gripper to test the grasp policy.},
keywords = {Authorship - Corresponding, JCR Q2, Jour - IEEE Robot. Autom. Lett. (RA-L), Special - Dual-Track},
pubstate = {published},
tppubtype = {article}
}
Katja Hölttä-Otto, Kevin Otto, Chaoyang Song, Jianxi Luo, Timothy Li, Carolyn C. Seepersad, Warren Seering
The Characteristics of Innovative, Mechanical Products—10 Years Later Journal Article
In: Journal of Mechanical Design, vol. 140, iss. August, no. 8, pp. 084501, 2018.
Abstract | Links | BibTeX | Tags: Authorship - Co-Author, JCR Q2, Jour - J. Mech. Des. (JMD)
@article{HolttaOtto2018TheCharacteristics,
title = {The Characteristics of Innovative, Mechanical Products—10 Years Later},
author = {Katja Hölttä-Otto and Kevin Otto and Chaoyang Song and Jianxi Luo and Timothy Li and Carolyn C. Seepersad and Warren Seering},
doi = {10.1115/1.4039851},
year = {2018},
date = {2018-08-01},
urldate = {2018-08-01},
journal = {Journal of Mechanical Design},
volume = {140},
number = {8},
issue = {August},
pages = {084501},
abstract = {Ten years prior to this paper, innovative mechanical products were analyzed and found to embody multiple innovation characteristics—an average of two more than competing products in the marketplace. At the time, it was not known whether these products would be successful over time and whether the number or type of innovation characteristics would be related with success. In this work, products from the previous study were categorized into well- and under-adopted products. Also, each product was categorized according to the type of firm that launched it: a new venture or an established firm. The innovative products enjoyed a success rate of 77% on average. The success was not dependent on the number or type of innovation characteristics embodied by the product. However, products developed in new ventures embody, on average, one more innovation characteristic and enjoy a slightly higher success rate than those launched by established firms.},
keywords = {Authorship - Co-Author, JCR Q2, Jour - J. Mech. Des. (JMD)},
pubstate = {published},
tppubtype = {article}
}
Fang Wan, Chaoyang Song
A Neural Network with Logical Reasoning based on Auxiliary Inputs Journal Article
In: Frontiers in Robotics and AI, vol. 5, pp. 86, 2018.
Abstract | Links | BibTeX | Tags: Authorship - Corresponding, JCR Q2, Jour - Front. Robot. AI. (FROBT)
@article{Wan2018ANeural,
title = {A Neural Network with Logical Reasoning based on Auxiliary Inputs},
author = {Fang Wan and Chaoyang Song},
url = {Sec. Computational Intelligence in Robotics},
doi = {10.3389/frobt.2018.00086},
year = {2018},
date = {2018-07-30},
urldate = {2018-07-30},
journal = {Frontiers in Robotics and AI},
volume = {5},
pages = {86},
abstract = {This paper describes a neural network design using auxiliary inputs, namely the indicators, that act as the hints to explain the predicted outcome through logical reasoning, mimicking the human behavior of deductive reasoning. Besides the original network input and output, we add an auxiliary input that reflects the specific logic of the data to formulate a reasoning process for cross-validation. We found that one can design either meaningful indicators, or even meaningless ones, when using such auxiliary inputs, upon which one can use as the basis of reasoning to explain the predicted outputs. As a result, one can formulate different reasonings to explain the predicted results by designing different sets of auxiliary inputs without the loss of trustworthiness of the outcome. This is similar to human explanation process where one can explain the same observation from different perspectives with reasons. We demonstrate our network concept by using the MNIST data with different sets of auxiliary inputs, where a series of design guidelines are concluded. Later, we validated our results by using a set of images taken from a robotic grasping platform. We found that our network enhanced the last 1–2% of the prediction accuracy while eliminating questionable predictions with self-conflicting logics. Future application of our network with auxiliary inputs can be applied to robotic detection problems such as autonomous object grasping, where the logical reasoning can be introduced to optimize robotic learning.},
keywords = {Authorship - Corresponding, JCR Q2, Jour - Front. Robot. AI. (FROBT)},
pubstate = {published},
tppubtype = {article}
}
Yaohui Chen, Fang Wan, Tong Wu, Chaoyang Song
Soft-Rigid Interaction Mechanism towards a Lobster-inspired Hybrid Actuator Journal Article
In: Journal of Micromechanics and Microengineering, vol. 28, iss. December, no. 1, pp. 014007, 2017.
Abstract | Links | BibTeX | Tags: Authorship - Corresponding, JCR Q2, Jour - J. Micromech. Microeng. (JMM)
@article{Chen2017SoftRigid,
title = {Soft-Rigid Interaction Mechanism towards a Lobster-inspired Hybrid Actuator},
author = {Yaohui Chen and Fang Wan and Tong Wu and Chaoyang Song},
doi = {10.1088/1361-6439/aa9e25},
year = {2017},
date = {2017-12-15},
urldate = {2017-12-15},
issuetitle = {Special Issue on Soft Robotics and Smart System Technologies},
journal = {Journal of Micromechanics and Microengineering},
volume = {28},
number = {1},
issue = {December},
pages = {014007},
abstract = {Soft pneumatic actuators (SPAs) are intrinsically light-weight, compliant and therefore ideal to directly interact with humans and be implemented into wearable robotic devices. However, they also pose new challenges in describing and sensing their continuous deformation. In this paper, we propose a hybrid actuator design with bio-inspirations from the lobsters, which can generate reconfigurable bending movements through the internal soft chamber interacting with the external rigid shells. This design with joint and link structures enables us to exactly track its bending configurations that previously posed a significant challenge to soft robots. Analytic models are developed to illustrate the soft-rigid interaction mechanism with experimental validation. A robotic glove using hybrid actuators to assist grasping is assembled to illustrate their potentials in safe human-robot interactions. Considering all the design merits, our work presents a practical approach to the design of next-generation robots capable of achieving both good accuracy and compliance.},
keywords = {Authorship - Corresponding, JCR Q2, Jour - J. Micromech. Microeng. (JMM)},
pubstate = {published},
tppubtype = {article}
}
Chaoyang Song, Yan Chen, I-Ming Chen
Kinematic Study of the Original and Revised General Line-Symmetric Bricard 6R Linkages Journal Article
In: Journal of Mechanisms and Robotics, vol. 6, iss. August, no. 3, pp. 031002, 2014.
Abstract | Links | BibTeX | Tags: Authorship - First Author, JCR Q2, Jour - J. Mech. Robot. (JMR)
@article{Song2014KinematicStudy,
title = {Kinematic Study of the Original and Revised General Line-Symmetric Bricard 6R Linkages},
author = {Chaoyang Song and Yan Chen and I-Ming Chen},
doi = {10.1115/1.4026339},
year = {2014},
date = {2014-08-01},
urldate = {2014-08-01},
journal = {Journal of Mechanisms and Robotics},
volume = {6},
number = {3},
issue = {August},
pages = {031002},
abstract = {In this paper, the solutions to closure equations of the original general line-symmetric Bricard 6R linkage are derived through matrix method. Two independent linkage closures are found in the original general line-symmetric Bricard 6R linkage, which are line-symmetric in geometry conditions, kinematic variables and spatial configurations. The revised general line-symmetric Bricard 6R linkage differs from the original linkage with negatively equaled offsets on the opposite joints. Further analysis shows that the revised linkage is equivalent to the original linkage with different setups on joint axis directions. As a special case of the general line-symmetric Bricard linkage, the line-symmetric octahedral Bricard linkage also has two forms in the closure equations. Their closure curves are not independent but joined into a full circle. This work offers an in-depth understanding about the kinematics of the general line-symmetric Bricard linkages.},
keywords = {Authorship - First Author, JCR Q2, Jour - J. Mech. Robot. (JMR)},
pubstate = {published},
tppubtype = {article}
}
Conference Papers
Sorry, no publications matched your criteria.
Extended Abstracts
Sorry, no publications matched your criteria.
Doctoral Thesis
Sorry, no publications matched your criteria.