@inproceedings{a5904005b9414fb09e61779721051b88,
title = "Building Adversarial Defense with Non-invertible Data Transformations",
abstract = "Deep neural networks (DNN) have been recently shown to be susceptible to a particular type of attack possible through the generation of particular synthetic examples referred to as adversarial samples. These samples are constructed by manipulating real examples from the training data distribution in order to “fool” the original neural model, resulting in misclassification of previously correctly classified samples. Addressing this weakness is of utmost importance if DNN is to be applied to critical applications, such as those in cybersecurity. In this paper, we present an analysis of this fundamental flaw lurking in all neural architectures to uncover limitations of previously proposed defense mechanisms. More importantly, we present a unifying framework for protecting deep neural models using a non-invertible data transformation–developing two adversary-resistant DNNs utilizing both linear and nonlinear dimensionality reduction techniques. Empirical results indicate that our framework provides better robustness compared to state-of-art solutions while having negligible degradation in generalization accuracy.",
author = "Wenbo Guo and Dongliang Mu and Ligeng Chen and Jinxuan Gai",
note = "Publisher Copyright: {\textcopyright} 2019, Springer Nature Switzerland AG.; 16th Pacific Rim International Conference on Artificial Intelligence, PRICAI 2019 ; Conference date: 26-08-2019 Through 30-08-2019",
year = "2019",
doi = "10.1007/978-3-030-29894-4_48",
language = "English (US)",
isbn = "9783030298937",
series = "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
publisher = "Springer Verlag",
pages = "593--606",
editor = "Nayak, {Abhaya C.} and Alok Sharma",
booktitle = "PRICAI 2019",
address = "Germany",
}