@inproceedings{a47cbbcfbda34c02bca4a6f7cf34a06d,
title = "A BIC-Based Mixture Model Defense Against Data Poisoning Attacks on Classifiers",
abstract = "Data Poisoning (DP) is an effective attack which degrades a trained classifier's accuracy through covert injection of attack samples into the training set. We propose an unsupervised Bayesian Information Criterion (BIC)-based mixture model defense against DP attacks that: 1) addresses the most challenging embedded DP scenario wherein, if DP is present, the poisoned samples are an a priori unknown subset of the training set, and with no clean validation set available; 2) applies a mixture model to both well-fit potentially multi-modal class distributions and capture poisoned samples within a small subset of the mixture components; 3) jointly identifies poisoned components and samples by minimizing the BIC cost defined over the whole training set. Our experimental results demonstrate the effectiveness of our defense under strong DP attacks, as well as its superiority over other works.",
author = "Xi Li and Miller, {David J.} and Zhen Xiang and George Kesidis",
note = "Publisher Copyright: {\textcopyright} 2023 IEEE.; 33rd IEEE International Workshop on Machine Learning for Signal Processing, MLSP 2023 ; Conference date: 17-09-2023 Through 20-09-2023",
year = "2023",
doi = "10.1109/MLSP55844.2023.10286008",
language = "English (US)",
series = "IEEE International Workshop on Machine Learning for Signal Processing, MLSP",
publisher = "IEEE Computer Society",
editor = "Danilo Comminiello and Michele Scarpiniti",
booktitle = "Proceedings of the 2023 IEEE 33rd International Workshop on Machine Learning for Signal Processing, MLSP 2023",
address = "United States",
}