Cai, Caixia; Liang, Ying Siu; Somani, Nikhil; Wu, Yan Inferring the Geometric Nullspace of Robot Skills from Human Demonstrations Proceedings Article In: 2020 IEEE International Conference on Robotics and Automation (ICRA), pp. 7668-7675, IEEE, Paris, France, 2020, ISSN: 2577-087X. Wang, Tianying; Zhang, Hao; Toh, Wei Qi; Zhu, Hongyuan; Tan, Cheston; Wu, Yan; Liu, Yong; Jing, Wei Efficient Robotic Task Generalization Using Deep Model Fusion Reinforcement Learning Proceedings Article In: 2019 IEEE International Conference on Robotics and Biomimetics (ROBIO), pp. 148-153, IEEE, Dali, China, 2019, ISBN: 978-1-7281-6321-5. Wu, Yan; Wang, Ruohan; D'Haro, Luis Fernando.; Banchs, Rafael E; Tee, Keng Peng Multi-Modal Robot Apprenticeship: Imitation Learning Using Linearly Decayed DMP+ in a Human-Robot Dialogue System Proceedings Article In: 2018 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pp. 1-7, IEEE, Madrid, Spain, 2018, ISBN: 978-1-5386-8094-0. Wang, Ruohan; Wu, Yan; Chan, Wei Liang; Tee, Keng Peng Dynamic Movement Primitives Plus: For enhanced reproduction quality and efficient trajectory modification using truncated kernels and Local Biases Proceedings Article In: 2016 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pp. 3765-3771, IEEE, Daejeon, South Korea, 2016, ISBN: 978-1-5090-3762-9. Wu, Yan; Su, Yanyu; Demiris, Yiannis A Morphable Template Framework for Robot Learning by Demonstration: Integrating One-shot and Incremental Learning Approaches Journal Article In: Robotics and Autonomous Systems, vol. 62, no. 10, pp. 1517-1530, 2014, ISSN: 0921-8890. Wu, Yan; Demiris, Yiannis Hierarchical Learning Approach for One-shot Action Imitation in Humanoid Robots Proceedings Article In: The 11th International Conference on Control, Automation, Robotics and Vision (ICARCV), pp. 453–458, IEEE, 2010, ISBN: 978-1-4244-7814-9. Wu, Yan; Demiris, Yiannis Towards One Shot Learning by Imitation for Humanoid Robots Proceedings Article In: 2010 IEEE International Conference on Robotics and Automation (ICRA), pp. 2889–2894, IEEE, 2010, ISBN: 978-1-4244-5038-1. Wu, Yan; Demiris, Yiannis Efficient Template-based Path Imitation by Invariant Feature Mapping Proceedings Article In: 2009 IEEE International Conference on Robotics and Biomimetics (ROBIO), pp. 913–918, IEEE, 2009, ISBN: 978-1-4244-4774-9.2020
@inproceedings{cai2020inferring,
title = {Inferring the Geometric Nullspace of Robot Skills from Human Demonstrations},
author = {Caixia Cai and Ying Siu Liang and Nikhil Somani and Yan Wu},
url = {https://ieeexplore.ieee.org/document/9197174
https://yan-wu.com/wp-content/uploads/2020/05/cai2020inferring.pdf},
doi = {10.1109/ICRA40945.2020.9197174},
issn = {2577-087X},
year = {2020},
date = {2020-05-31},
booktitle = {2020 IEEE International Conference on Robotics and Automation (ICRA)},
pages = {7668-7675},
publisher = {IEEE},
address = {Paris, France},
abstract = {In this paper we present a framework to learn skills from human demonstrations in the form of geometric nullspaces, which can be executed using a robot. We collect data of human demonstrations, fit geometric nullspaces to them, and also infer their corresponding geometric constraint models. These geometric constraints provide a powerful mathematical model as well as an intuitive representation of the skill in terms of the involved objects. To execute the skill using a robot, we combine this geometric skill description with the robot's kinematics and other environmental constraints, from which poses can be sampled for the robot's execution. The result of our framework is a system that takes the human demonstrations as input, learns the underlying skill model, and executes the learnt skill with different robots in different dynamic environments. We evaluate our approach on a simulated industrial robot, and execute the final task on the iCub humanoid robot.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2019
@inproceedings{wang2019efficient,
title = {Efficient Robotic Task Generalization Using Deep Model Fusion Reinforcement Learning},
author = {Tianying Wang and Hao Zhang and Wei Qi Toh and Hongyuan Zhu and Cheston Tan and Yan Wu and Yong Liu and Wei Jing},
url = {https://ieeexplore.ieee.org/document/8961391
https://www.yan-wu.com/wp-content/uploads/2020/05/wang2019efficient.pdf},
doi = {10.1109/ROBIO49542.2019.8961391},
isbn = {978-1-7281-6321-5},
year = {2019},
date = {2019-12-08},
booktitle = {2019 IEEE International Conference on Robotics and Biomimetics (ROBIO)},
pages = {148-153},
publisher = {IEEE},
address = {Dali, China},
abstract = {Learning-based methods have been used to program robotic tasks in recent years. However, extensive training is usually required not only for the initial task learning but also for generalizing the learned model to the same task but in different environments. In this paper, we propose a novel Deep Reinforcement Learning algorithm for efficient task generalization and environment adaptation in the robotic task learning problem. The proposed method is able to efficiently generalize the previously learned task by model fusion to solve the environment adaptation problem. The proposed Deep Model Fusion (DMF) method reuses and combines the previously trained model to improve the learning efficiency and results. Besides, we also introduce a Multi-objective Guided Reward (MGR) shaping technique to further improve training efficiency. The proposed method was benchmarked with previous methods in various environments to validate its effectiveness.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2018
@inproceedings{wu2018multimodal,
title = {Multi-Modal Robot Apprenticeship: Imitation Learning Using Linearly Decayed DMP+ in a Human-Robot Dialogue System},
author = {Yan Wu and Ruohan Wang and Luis Fernando. D'Haro and Rafael E Banchs and Keng Peng Tee},
url = {https://ieeexplore.ieee.org/document/8593634
https://www.yan-wu.com/wp-content/uploads/2020/05/wu2018multimodal.pdf},
doi = {10.1109/IROS.2018.8593634},
isbn = {978-1-5386-8094-0},
year = {2018},
date = {2018-10-05},
booktitle = {2018 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
pages = {1-7},
publisher = {IEEE},
address = {Madrid, Spain},
abstract = {Robot learning by demonstration gives robots the ability to learn tasks which they have not been programmed to do before. The paradigm allows robots to work in a greater range of real-world applications in our daily life. However, this paradigm has traditionally been applied to learn tasks from a single demonstration modality. This restricts the approach to be scaled to learn and execute a series of tasks in a real-life environment. In this paper, we propose a multi-modal learning approach using DMP+ with linear decay integrated in a dialogue system with speech and ontology for the robot to learn seamlessly through natural interaction modalities (like an apprentice) while learning or re-learning is done on the fly to allow partial updates to a learned task to reduce potential user fatigue and operational downtime in teaching. The performance of new DMP+ with linear decay system is statistically benchmarked against state-of-the-art DMP implementations. A gluing demonstration is also conducted to show how the system provides seamless learning of multiple tasks in a flexible manufacturing set-up.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2016
@inproceedings{wang2016dynamic,
title = {Dynamic Movement Primitives Plus: For enhanced reproduction quality and efficient trajectory modification using truncated kernels and Local Biases},
author = {Ruohan Wang and Yan Wu and Wei Liang Chan and Keng Peng Tee},
url = {https://ieeexplore.ieee.org/document/7759554
http://www.yan-wu.com/docs/wang2016dynamic.pdf},
doi = {10.1109/IROS.2016.7759554},
isbn = {978-1-5090-3762-9},
year = {2016},
date = {2016-10-14},
booktitle = {2016 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
pages = {3765-3771},
publisher = {IEEE},
address = {Daejeon, South Korea},
abstract = {Dynamic Movement Primitives (DMPs) are a generic approach for trajectory modeling in an attractor land-scape based on differential dynamical systems. DMPs guarantee stability and convergence properties of learned trajectories, and scale well to high dimensional data. In this paper, we propose DMP+, a modified formulation of DMPs which, while preserving the desirable properties of the original, 1) achieves lower mean square error (MSE) with equal number of kernels, and 2) allows learned trajectories to be efficiently modified by updating a subset of kernels. The ability to efficiently modify learned trajectories i) improves reusability of existing primitives, and ii) reduces user fatigue during imitation learning as errors during demonstration may be corrected later without requiring another complete demonstration. In addition, DMP+ may be used with existing DMP techniques for trajectory generalization and thus complements them. We compare the performance of our proposed approach against DMPs in learning trajectories of handwritten characters, and show that DMP+ achieves lower MSE in position deviation. We demonstrate in a second experiment that DMP+ can efficiently update a learned trajectory by updating only a subset of kernels. The update algorithm achieves modeling accuracy comparable to learning the adapted trajectory with the original DMPs.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2014
@article{wu2014morphable,
title = {A Morphable Template Framework for Robot Learning by Demonstration: Integrating One-shot and Incremental Learning Approaches},
author = {Yan Wu and Yanyu Su and Yiannis Demiris},
editor = {Ruediger Dillmann},
url = {http://www.sciencedirect.com/science/article/pii/S0921889014000992
http://www.yan-wu.com/docs/wu2014morphable.pdf},
doi = {10.1016/j.robot.2014.05.010},
issn = {0921-8890},
year = {2014},
date = {2014-05-27},
journal = {Robotics and Autonomous Systems},
volume = {62},
number = {10},
pages = {1517-1530},
abstract = {Robot learning by demonstration is key to bringing robots into daily social environments to interact with and learn from human and other agents. However, teaching a robot to acquire new knowledge is a tedious and repetitive process and often restrictive to a specific setup of the environment. We propose a template-based learning framework for robot learning by demonstration to address both generalisation and adaptability. This novel framework is based upon a one-shot learning model integrated with spectral clustering and an online learning model to learn and adapt actions in similar scenarios. A set of statistical experiments is used to benchmark the framework components and shows that this approach requires no extensive training for generalisation and can adapt to environmental changes flexibly. Two real-world applications of an iCub humanoid robot playing the tic-tac-toe game and soldering a circuit board are used to demonstrate the relative merits of the framework.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2010
@inproceedings{wu2010hierarchical,
title = {Hierarchical Learning Approach for One-shot Action Imitation in Humanoid Robots},
author = {Yan Wu and Yiannis Demiris},
url = {https://ieeexplore.ieee.org/document/5707349
http://www.yan-wu.com/docs/wu2010icarcv.pdf},
doi = {10.1109/ICARCV.2010.5707349},
isbn = {978-1-4244-7814-9},
year = {2010},
date = {2010-12-10},
booktitle = {The 11th International Conference on Control, Automation, Robotics and Vision (ICARCV)},
pages = {453--458},
publisher = {IEEE},
abstract = {We consider the issue of segmenting an action in the learning phase into a logical set of smaller primitives in order to construct a generative model for imitation learning using a hierarchical approach. Our proposed framework, addressing the “how-to” question in imitation, is based on a one-shot imitation learning algorithm. It incorporates segmentation of a demonstrated template into a series of subactions and takes a hierarchical approach to generate the task action by using a finite state machine in a generative way. Two sets of experiments have been conducted to evaluate the performance of the framework, both statistically and in practice, through playing a tic-tac-toe game. The experiments demonstrate that the proposed framework can effectively improve the performance of the one-shot learning algorithm and reduce the size of primitive space, without compromising the learning quality.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
@inproceedings{wu2010towardsb,
title = {Towards One Shot Learning by Imitation for Humanoid Robots},
author = {Yan Wu and Yiannis Demiris},
url = {https://ieeexplore.ieee.org/document/5509429
http://www.yan-wu.com/docs/wu2010icra.pdf},
doi = {10.1109/ROBOT.2010.5509429},
isbn = {978-1-4244-5038-1},
year = {2010},
date = {2010-05-07},
booktitle = {2010 IEEE International Conference on Robotics and Automation (ICRA)},
pages = {2889--2894},
publisher = {IEEE},
abstract = {Teaching a robot to learn new knowledge is a repetitive and tedious process. In order to accelerate the process, we propose a novel template-based approach for robot arm movement imitation. This algorithm selects a previously observed path demonstrated by a human and generates a path in a novel situation based on pairwise mapping of invariant feature locations present in both the demonstrated and the new scenes using a combination of minimum distortion and minimum energy strategies. This One-Shot Learning algorithm is capable of not only mapping simple point-to-point paths but also adapting to more complex tasks such as those involving forced waypoints. As compared to traditional methodologies, our work require neither extensive training for generalisation nor expensive run-time computation for accuracy. This algorithm has been statistically validated using cross-validation of grasping experiments as well as tested for practical implementation on the iCub humanoid robot for playing the tic-tac-toe game.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2009
@inproceedings{wu2009efficient,
title = {Efficient Template-based Path Imitation by Invariant Feature Mapping},
author = {Yan Wu and Yiannis Demiris},
url = {https://ieeexplore.ieee.org/document/5420496
http://www.yan-wu.com/docs/wu2009robio.pdf},
doi = {10.1109/ROBIO.2009.5420496},
isbn = {978-1-4244-4774-9},
year = {2009},
date = {2009-12-23},
booktitle = {2009 IEEE International Conference on Robotics and Biomimetics (ROBIO)},
pages = {913--918},
publisher = {IEEE},
abstract = {We propose a novel approach for robot movement imitation that is suitable for robotic arm movement in tasks such as reaching and grasping. This algorithm selects a previously observed path demonstrated by an agent and generates a path in a novel situation based on pairwise mapping of invariant feature locations present in both the demonstrated and the new scenes using minimum distortion and minimum energy strategies. This One-Shot Learning algorithm is capable of not only mapping simple point-to-point paths but also adapting to more complex tasks such as involvement of forced waypoints. As compared to traditional methodologies, our work does not require extensive training for generalisation as well as expensive run-time computation for accuracy. Cross-validation statistics of grasping experiments show great similarity between the paths produced by human subjects and the proposed algorithm.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Publications
Inferring the Geometric Nullspace of Robot Skills from Human Demonstrations Proceedings Article In: 2020 IEEE International Conference on Robotics and Automation (ICRA), pp. 7668-7675, IEEE, Paris, France, 2020, ISSN: 2577-087X. Efficient Robotic Task Generalization Using Deep Model Fusion Reinforcement Learning Proceedings Article In: 2019 IEEE International Conference on Robotics and Biomimetics (ROBIO), pp. 148-153, IEEE, Dali, China, 2019, ISBN: 978-1-7281-6321-5. Multi-Modal Robot Apprenticeship: Imitation Learning Using Linearly Decayed DMP+ in a Human-Robot Dialogue System Proceedings Article In: 2018 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pp. 1-7, IEEE, Madrid, Spain, 2018, ISBN: 978-1-5386-8094-0. Dynamic Movement Primitives Plus: For enhanced reproduction quality and efficient trajectory modification using truncated kernels and Local Biases Proceedings Article In: 2016 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pp. 3765-3771, IEEE, Daejeon, South Korea, 2016, ISBN: 978-1-5090-3762-9. A Morphable Template Framework for Robot Learning by Demonstration: Integrating One-shot and Incremental Learning Approaches Journal Article In: Robotics and Autonomous Systems, vol. 62, no. 10, pp. 1517-1530, 2014, ISSN: 0921-8890. Hierarchical Learning Approach for One-shot Action Imitation in Humanoid Robots Proceedings Article In: The 11th International Conference on Control, Automation, Robotics and Vision (ICARCV), pp. 453–458, IEEE, 2010, ISBN: 978-1-4244-7814-9. Towards One Shot Learning by Imitation for Humanoid Robots Proceedings Article In: 2010 IEEE International Conference on Robotics and Automation (ICRA), pp. 2889–2894, IEEE, 2010, ISBN: 978-1-4244-5038-1. Efficient Template-based Path Imitation by Invariant Feature Mapping Proceedings Article In: 2009 IEEE International Conference on Robotics and Biomimetics (ROBIO), pp. 913–918, IEEE, 2009, ISBN: 978-1-4244-4774-9.2020
2019
2018
2016
2014
2010
2009