@inproceedings{ceec7bf46d3a4288bf6d120047c733f5,
title = "Dimensionality Reduction Through Sub-Space Mapping for Nearest Neighbour Algorithms",
abstract = "Many learning algorithms make an implicit assumption that all the attributes present in the data are relevant to a learning task. However, several studies have demonstrated that this assumption rarely holds; for many supervised learning algorithms, the inclusion of irrelevant or redundant attributes can result in a degradation in classification accuracy. While a variety of different methods for dimensionality reduction exist, many of these are only appropriate for datasets which contain a small number of attributes (e.g. < 20). This paper presents an alternative approach to dimensionality reduction, and demonstrates how it can be combined with a Nearest Neighbour learning algorithm. We present an empirical evaluation of this approach, and contrast its performance with two related techniques; a Monte-Carlo wrapper and an Information Gain-based filter approach.",
keywords = "machine learning, feature selection, nearest-neighbour",
author = "Payne, {T R} and P Edwards",
year = "2000",
doi = "10.1007/3-540-45164-1_35",
language = "English",
isbn = "978-3-540-67602-7",
volume = "1810",
series = "Lecture Notes in Artificial Intelligence",
publisher = "Springer-Verlag",
pages = "331--343",
editor = "{de Mantaras}, {Ramon L{\'o}pez} and Enric Plaza",
booktitle = "Machine Learning: ECML 2000",
}