2023
Chang, Xinglong; Dost, Katharina; Dobbie, Gillian; Wicker, Jörg
Poison is Not Traceless: Fully-Agnostic Detection of Poisoning Attacks Unpublished Forthcoming
Forthcoming.
Abstract | Links | BibTeX | Altmetric | PlumX | Tags: adversarial defence, adversarial learning, machine learning, reliable machine learning
@unpublished{Chang2023poison,
title = {Poison is Not Traceless: Fully-Agnostic Detection of Poisoning Attacks },
author = {Xinglong Chang and Katharina Dost and Gillian Dobbie and J\"{o}rg Wicker},
url = {http://arxiv.org/abs/2310.16224},
doi = {10.48550/arXiv.2310.16224},
year = {2023},
date = {2023-10-23},
urldate = {2023-10-23},
abstract = {The performance of machine learning models depends on the quality of the underlying data. Malicious actors can attack the model by poisoning the training data. Current detectors are tied to either specific data types, models, or attacks, and therefore have limited applicability in real-world scenarios. This paper presents a novel fully-agnostic framework, Diva (Detecting InVisible Attacks), that detects attacks solely relying on analyzing the potentially poisoned data set. Diva is based on the idea that poisoning attacks can be detected by comparing the classifier’s accuracy on poisoned and clean data and pre-trains a meta-learner using Complexity Measures to estimate the otherwise unknown accuracy on a hypothetical clean dataset. The framework applies to generic poisoning attacks. For evaluation purposes, in this paper, we test Diva on label-flipping attacks.},
keywords = {adversarial defence, adversarial learning, machine learning, reliable machine learning},
pubstate = {forthcoming},
tppubtype = {unpublished}
}
Chang, Luke; Dost, Katharina; Zhai, Kaiqi; Demontis, Ambra; Roli, Fabio; Dobbie, Gillian; Wicker, Jörg
BAARD: Blocking Adversarial Examples by Testing for Applicability, Reliability and Decidability Proceedings Article
In: Kashima, Hisashi; Ide, Tsuyoshi; Peng, Wen-Chih (Ed.): The 27th Pacific-Asia Conference on Knowledge Discovery and Data Mining (PAKDD), pp. 3-14, Springer Nature Switzerland, Cham, 2023, ISSN: 978-3-031-33374-3.
Abstract | Links | BibTeX | Altmetric | PlumX | Tags: adversarial defence, adversarial learning, applicability domain, cheminformatics, evasion attacks, machine learning
@inproceedings{chang2021baard,
title = {BAARD: Blocking Adversarial Examples by Testing for Applicability, Reliability and Decidability},
author = {Luke Chang and Katharina Dost and Kaiqi Zhai and Ambra Demontis and Fabio Roli and Gillian Dobbie and J\"{o}rg Wicker},
editor = {Hisashi Kashima and Tsuyoshi Ide and Wen-Chih Peng},
url = {https://arxiv.org/abs/2105.00495
https://github.com/wickerlab/baard},
doi = {10.1007/978-3-031-33374-3_1},
issn = {978-3-031-33374-3},
year = {2023},
date = {2023-05-27},
urldate = {2023-05-27},
booktitle = {The 27th Pacific-Asia Conference on Knowledge Discovery and Data Mining (PAKDD)},
journal = {The 27th Pacific-Asia Conference on Knowledge Discovery and Data Mining (PAKDD)},
pages = {3-14},
publisher = {Springer Nature Switzerland},
address = {Cham},
abstract = {Adversarial defenses protect machine learning models from adversarial attacks, but are often tailored to one type of model or attack. The lack of information on unknown potential attacks makes detecting adversarial examples challenging. Additionally, attackers do not need to follow the rules made by the defender. To address this problem, we take inspiration from the concept of Applicability Domain in cheminformatics. Cheminformatics models struggle to make accurate predictions because only a limited number of compounds are known and available for training. Applicability Domain defines a domain based on the known compounds and rejects any unknown compound that falls outside the domain. Similarly, adversarial examples start as harmless inputs, but can be manipulated to evade reliable classification by moving outside the domain of the classifier. We are the first to identify the similarity between Applicability Domain and adversarial detection. Instead of focusing on unknown attacks, we focus on what is known, the training data. We propose a simple yet robust triple-stage data-driven framework that checks the input globally and locally, and confirms that they are coherent with the model’s output. This framework can be applied to any classification model and is not limited to specific attacks. We demonstrate these three stages work as one unit, effectively detecting various attacks, even for a white-box scenario.},
keywords = {adversarial defence, adversarial learning, applicability domain, cheminformatics, evasion attacks, machine learning},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Zeyu; Dost, Katharina; Zhu, Xuan; Chang, Xinglong; Dobbie, Gillian; Wicker, Jörg
Targeted Attacks on Time Series Forecasting Proceedings Article
In: Kashima, Hisashi; Ide, Tsuyoshi; Peng, Wen-Chih (Ed.): The 27th Pacific-Asia Conference on Knowledge Discovery and Data Mining (PAKDD), pp. 314-327, Springer Nature Switzerland, Cham, 2023, ISSN: 978-3-031-33383-5.
Abstract | Links | BibTeX | Altmetric | PlumX | Tags: adversarial learning, forecasting, machine learning, time series
@inproceedings{Chen2023targeted,
title = {Targeted Attacks on Time Series Forecasting},
author = {Zeyu Chen and Katharina Dost and Xuan Zhu and Xinglong Chang and Gillian Dobbie and J\"{o}rg Wicker},
editor = {Hisashi Kashima and Tsuyoshi Ide and Wen-Chih Peng},
url = {https://github.com/wickerlab/nvita},
doi = {10.1007/978-3-031-33383-5_25},
issn = {978-3-031-33383-5},
year = {2023},
date = {2023-05-26},
urldate = {2023-05-26},
booktitle = {The 27th Pacific-Asia Conference on Knowledge Discovery and Data Mining (PAKDD)},
pages = {314-327},
publisher = {Springer Nature Switzerland},
address = {Cham},
abstract = {Abstract. Time Series Forecasting (TSF) is well established in domains dealing with temporal data to predict future events yielding the basis for strategic decision-making. Previous research indicated that forecasting models are vulnerable to adversarial attacks, that is, maliciously crafted perturbations of the original data with the goal of altering the model’s predictions. However, attackers targeting specific outcomes pose a substantially more severe threat as they could manipulate the model and bend it to their needs. Regardless, there is no systematic approach for targeted adversarial learning in the TSF domain yet. In this paper, we introduce targeted attacks on TSF in a systematic manner. We establish a new experimental design standard regarding attack goals and perturbation control for targeted adversarial learning on TSF. For this purpose, we present a novel indirect sparse black-box evasion attack on TSF, nVita. Additionally, we adapt the popular white-box attacks Fast Gradient Sign Method (FGSM) and Basic Iterative Method (BIM). Our experiments confirm not only that all three methods are effective but also that current state-of-the-art TSF models are indeed susceptible to attacks. These results motivate future research in this area to achieve higher reliability of forecasting models.},
keywords = {adversarial learning, forecasting, machine learning, time series},
pubstate = {published},
tppubtype = {inproceedings}
}