@inproceedings{ca4e41535bf8494fbef5ab9af436cb7b,
title = "A Weighted Federated Averaging Framework to Reduce the Negative Influence from the Dishonest Users",
abstract = "Federated learning becomes popular for it can train an excellent performance global model without exposing clients{\textquoteright} privacy. However, most FL applications failed to consider there exists fake local trained models returned from attackers or dishonest users. Not only would the fake parameters be harmful to the convergence of the global model but also be wasting of other users{\textquoteright} computational resources. In this paper, we propose a framework to grade the users{\textquoteright} credit score based on the performances of the returned local models on the testing dataset. We also consider historical data using the exponential moving average to give a relatively higher weight for the most recent testing results. The experiments show that our system can efficiently and effectively find out the fake local models and then speed up the convergence of the global model.",
author = "Fengpan Zhao and Yan Huang and Saide Zhu and Venkata Malladi and Yubao Wu",
note = "Publisher Copyright: {\textcopyright} 2021, Springer Nature Switzerland AG.; 13th International Conference on Security, Privacy, and Anonymity in Computation, Communication, and Storage, SpaCCS 2020 ; Conference date: 18-12-2020 Through 20-12-2020",
year = "2021",
doi = "10.1007/978-3-030-68851-6_17",
language = "English (US)",
isbn = "9783030688509",
series = "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
publisher = "Springer Science and Business Media Deutschland GmbH",
pages = "241--250",
editor = "Guojun Wang and Bing Chen and Wei Li and {Di Pietro}, Roberto and Xuefeng Yan and Hao Han",
booktitle = "Security, Privacy, and Anonymity in Computation, Communication, and Storage - 13th International Conference, SpaCCS 2020, Proceedings",
address = "Germany",
}