Xu, Qianli; Fang, Fen; Gauthier, Nicolas; Liang, Wenyu; Wu, Yan; Li, Liyuan; Lim, Joo Hwee Towards Efficient Multiview Object Detection with Adaptive Action Prediction Proceedings Article In: 2021 IEEE International Conference on Robotics and Automation (ICRA), IEEE, 2021, ISBN: 978-1-7281-9077-8. Gao, Ruihan; Taunyazov, Tasbolat; Lin, Zhiping; Wu, Yan Supervised Autoencoder Joint Learning on Heterogeneous Tactile Sensory Data: Improving Material Classification Performance Proceedings Article In: 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), IEEE, Las Vegas, USA, 2020, ISBN: 978-1-7281-6212-6. Taunyazov, Tasbolat; Koh, Hui Fang; Wu, Yan; Cai, Caixia; Soh, Harold Towards Effective Tactile Identification of Textures using a Hybrid Touch Approach Proceedings Article In: 2019 International Conference on Robotics and Automation (ICRA), pp. 4269-4275, IEEE, Montreal, Canada, 2019, ISBN: 978-1-5386-6027-0. Ognibene, Dimitri; Wu, Yan; Lee, Kyuhwa; Demiris, Yiannis Hierarchies in Embodied Action Perception Book Chapter In: Baldassarre, Gianluca; Mirolli, Marco (Ed.): Computational and Robotic Models of the Hierarchical Organisation of Behaviour, pp. 81–98, Springer, 1, 2013, ISBN: 978-3-642-39874-2. Wu, Yan; Demiris, Yiannis Learning Dynamical Representations of Tools for Tool-Use Recognition Proceedings Article In: 2011 IEEE International Conference on Robotics and Biomimetics (ROBIO), pp. 2664–2669, IEEE, 2011, ISBN: 978-1-4577-2136-6.2021
@inproceedings{xu2021efficient,
title = {Towards Efficient Multiview Object Detection with Adaptive Action Prediction},
author = {Qianli Xu and Fen Fang and Nicolas Gauthier and Wenyu Liang and Yan Wu and Liyuan Li and Joo Hwee Lim },
url = {https://ieeexplore.ieee.org/document/9561388},
doi = {10.1109/ICRA48506.2021.9561388},
isbn = {978-1-7281-9077-8},
year = {2021},
date = {2021-05-31},
booktitle = {2021 IEEE International Conference on Robotics and Automation (ICRA)},
publisher = {IEEE},
abstract = {Active vision is a desirable perceptual feature for robots. Existing approaches usually make strong assumptions about the task and environment, thus are less robust and efficient. This study proposes an adaptive view planning approach to boost the efficiency and robustness of active object detection. We formulate the multi-object detection task as an active multiview object detection problem given the initial location of the objects. Next, we propose a novel adaptive action prediction (A2P) method built on a deep Q-learning network with a dueling architecture. The A2P method is able to perform view planning based on visual information of multiple objects; and adjust action ranges according to the task status. Evaluated on the AVD dataset, A2P leads to 21.9% increase in detection accuracy in unfamiliar environments, while improving efficiency by 22.7%. On the T-LESS dataset, multi-object detection boosts efficiency by more than 30% while achieving equivalent detection accuracy.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2020
@inproceedings{gao2020supervised,
title = {Supervised Autoencoder Joint Learning on Heterogeneous Tactile Sensory Data: Improving Material Classification Performance},
author = {Ruihan Gao and Tasbolat Taunyazov and Zhiping Lin and Yan Wu},
url = {http://yan-wu.com/wp-content/uploads/2020/08/gao2020supervised.pdf
https://ieeexplore.ieee.org/document/9341111},
doi = {10.1109/IROS45743.2020.9341111},
isbn = {978-1-7281-6212-6},
year = {2020},
date = {2020-10-31},
booktitle = {2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
publisher = {IEEE},
address = {Las Vegas, USA},
abstract = {The sense of touch is an essential sensing modality for a robot to interact with the environment as it provides rich and multimodal sensory information upon contact. It enriches the perceptual understanding of the environment and closes the loop for action generation. One fundamental area of perception that touch dominates over other sensing modalities, is the understanding of the materials that it interacts with, for example, glass versus plastic. However, unlike the senses of vision and audition which have standardized data format, the format for tactile data is vastly dictated by the sensor manufacturer, which makes it difficult for large-scale learning on data collected from heterogeneous sensors, limiting the usefulness of publicly available tactile datasets. This paper investigates the joint learnability of data collected from two tactile sensors performing a touch sequence on some common materials. We propose a supervised recurrent autoencoder framework to perform joint material classification task to improve the training effectiveness. The framework is implemented and tested on the two sets of tactile data collected in sliding motion on 20 material textures using the iCub RoboSkin tactile sensors and the SynTouch BioTac sensor respectively. Our results show that the learning efficiency and accuracy improve for both datasets through the joint learning as compared to independent dataset training. This suggests the usefulness for large-scale open tactile datasets sharing with different sensors.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2019
@inproceedings{taunyazov2019towards,
title = {Towards Effective Tactile Identification of Textures using a Hybrid Touch Approach},
author = {Tasbolat Taunyazov and Hui Fang Koh and Yan Wu and Caixia Cai and Harold Soh},
url = {https://ieeexplore.ieee.org/document/8793967
http://www.yan-wu.com/docs/taunyanov2019towards.pdf},
doi = {10.1109/ICRA.2019.8793967},
isbn = {978-1-5386-6027-0},
year = {2019},
date = {2019-05-24},
booktitle = {2019 International Conference on Robotics and Automation (ICRA)},
pages = {4269-4275},
publisher = {IEEE},
address = {Montreal, Canada},
abstract = {The sense of touch is arguably the first human sense to develop. Empowering robots with the sense of touch may augment their understanding of interacted objects and the environment beyond standard sensory modalities (e.g., vision). This paper investigates the effect of hybridizing touch and sliding movements for tactile-based texture classification. We develop three machine-learning methods within a framework to discriminate between surface textures; the first two methods use hand-engineered features, whilst the third leverages convolutional and recurrent neural network layers to learn feature representations from raw data. To compare these methods, we constructed a dataset comprising tactile data from 23 textures gathered using the iCub platform under a loosely constrained setup, i.e., with nonlinear motion. In line with findings from neuroscience, our experiments show that a good initial estimate can be obtained via touch data, which can be further refined via sliding; combining both touch and sliding data results in 98% classification accuracy over unseen test data.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2013
@inbook{ognibene2013hierarchies,
title = {Hierarchies in Embodied Action Perception},
author = {Dimitri Ognibene and Yan Wu and Kyuhwa Lee and Yiannis Demiris},
editor = {Gianluca Baldassarre and Marco Mirolli},
url = {https://link.springer.com/chapter/10.1007/978-3-642-39875-9_5
http://www.yan-wu.com/docs/ognibene2013hierarchies.pdf},
doi = {10.1007/978-3-642-39875-9_5},
isbn = {978-3-642-39874-2},
year = {2013},
date = {2013-01-01},
booktitle = {Computational and Robotic Models of the Hierarchical Organisation of Behaviour},
pages = {81--98},
publisher = {Springer},
edition = {1},
abstract = {During social interactions, humans are capable of initiating and responding to rich and complex social actions despite having incomplete world knowledge, and physical, perceptual and computational constraints. This capability relies on action perception mechanisms that exploit regularities in observed goal-oriented behaviours to generate robust predictions and reduce the workload of sensing systems. To achieve this essential capability, we argue that the following three factors are fundamental. First, human knowledge is frequently hierarchically structured, both in the perceptual and execution domains. Second, human perception is an active process driven by current task requirements and context; this is particularly important when the perceptual input is complex (e.g. human motion) and the agent has to operate under embodiment constraints. Third, learning is at the heart of action perception mechanisms, underlying the agent’s ability to add new behaviours to its repertoire. Based on these factors, we review multiple instantiations of a hierarchically-organised biologically-inspired framework for embodied action perception, demonstrating its flexibility in addressing the rich computational contexts of action perception and learning in robotic platforms.},
keywords = {},
pubstate = {published},
tppubtype = {inbook}
}
2011
@inproceedings{wu2011learning,
title = {Learning Dynamical Representations of Tools for Tool-Use Recognition},
author = {Yan Wu and Yiannis Demiris},
url = {https://ieeexplore.ieee.org/document/6181707
http://www.yan-wu.com/docs/wu2011robio.pdf},
doi = {10.1109/ROBIO.2011.6181707},
isbn = {978-1-4577-2136-6},
year = {2011},
date = {2011-12-11},
booktitle = {2011 IEEE International Conference on Robotics and Biomimetics (ROBIO)},
pages = {2664--2669},
publisher = {IEEE},
abstract = {We consider the problem of representing and recognising tools, a subset of objects that have special functionality and action patterns. Our proposed framework is based on the biological evidence of hierarchical representation of tools in the region of the human cortex that generates action semantics. It addresses the shortfalls of traditional learning models of object representation applied on tools. To showcase its merits, this framework is implemented as a hybrid model between the Hierarchical Attentive Multiple Models for Execution and Recognition of Actions Architecture (HAMMER) and Hidden Markov Model (HMM) to recognise and describe tools as dynamic patterns at symbolic level. The implemented model is tested and validated on two sets of experiments of 50 human demonstrations each on using 5 different tools. In the experiment with precise and accurate input data, the cross-validation statistics suggest very robust identification of the learned tools. In the experiment with unstructured environment, all errors can be explained systematically.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Publications
Towards Efficient Multiview Object Detection with Adaptive Action Prediction Proceedings Article In: 2021 IEEE International Conference on Robotics and Automation (ICRA), IEEE, 2021, ISBN: 978-1-7281-9077-8. Supervised Autoencoder Joint Learning on Heterogeneous Tactile Sensory Data: Improving Material Classification Performance Proceedings Article In: 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), IEEE, Las Vegas, USA, 2020, ISBN: 978-1-7281-6212-6. Towards Effective Tactile Identification of Textures using a Hybrid Touch Approach Proceedings Article In: 2019 International Conference on Robotics and Automation (ICRA), pp. 4269-4275, IEEE, Montreal, Canada, 2019, ISBN: 978-1-5386-6027-0. Hierarchies in Embodied Action Perception Book Chapter In: Baldassarre, Gianluca; Mirolli, Marco (Ed.): Computational and Robotic Models of the Hierarchical Organisation of Behaviour, pp. 81–98, Springer, 1, 2013, ISBN: 978-3-642-39874-2. Learning Dynamical Representations of Tools for Tool-Use Recognition Proceedings Article In: 2011 IEEE International Conference on Robotics and Biomimetics (ROBIO), pp. 2664–2669, IEEE, 2011, ISBN: 978-1-4577-2136-6.2021
2020
2019
2013
2011