The objective of adversarial learning is to pinpoint vulnerabilities in machine learning models that traditional testing methods cannot detect. It has proven to be effective in various applications, often centered around a particular model or field. For instance, in image classification, techniques have been created to deceive models that identify traffic signs by making minor alterations to images. Another approach to adversarial learning seeks to recognize instances that could disrupt or enhance the model’s training if they were included in the training data.
2024
Kim, Jonathan; Urschler, Martin; Riddle, Pat; Wicker, Jörg
Attacking the Loop: Adversarial Attacks on Graph-based Loop Closure Detection Proceedings Article
In: Proceedings of the 19th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications, pp. 90-97, 2024.
@inproceedings{kim2024attacking,
title = {Attacking the Loop: Adversarial Attacks on Graph-based Loop Closure Detection},
author = {Jonathan Kim and Martin Urschler and Pat Riddle and J\"{o}rg Wicker },
url = {http://arxiv.org/abs/2312.06991
https://doi.org/10.48550/arxiv.2312.06991},
doi = {10.5220/0012313100003660},
year = {2024},
date = {2024-02-27},
urldate = {2024-02-27},
booktitle = {Proceedings of the 19th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications},
volume = {4},
pages = {90-97},
abstract = {With the advancement in robotics, it is becoming increasingly common for large factories and warehouses to incorporate visual SLAM (vSLAM) enabled automated robots that operate closely next to humans. This makes any adversarial attacks on vSLAM components potentially detrimental to humans working alongside them. Loop Closure Detection (LCD) is a crucial component in vSLAM that minimizes the accumulation of drift in mapping, since even a small drift can accumulate into a significant drift over time. Previous work by Kim et al. , unified visual features and semantic objects into a single graph structure for finding loop closure candidates. While this provided a performance improvement over visual feature-based LCD, it also created a single point of vulnerability for potential graph-based adversarial attacks. Unlike previously reported visual-patch based attacks, small graph perturbations are far more challenging to detect, making them a more significant threat. In this paper, we present Adversarial-LCD, a novel black-box evasion attack framework that employs an eigencentrality-based perturbation method and an SVM-RBF surrogate model with a Weisfeiler-Lehman feature extractor for attacking graph-based LCD. Our evaluation shows that the attack performance of Adversarial-LCD was superior to that of other machine learning surrogate algorithms, including SVM-linear, SVM-polynomial, and Bayesian classifier, demonstrating the effectiveness of our attack framework. Furthermore, we show that our eigencentrality-based perturbation method outperforms other algorithms, such as Random-walk and Shortest-path, highlighting the efficiency of Adversarial-LCD’s perturbation selection method.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2023
Chang, Xinglong; Dost, Katharina; Dobbie, Gillian; Wicker, Jörg
Poison is Not Traceless: Fully-Agnostic Detection of Poisoning Attacks Unpublished Forthcoming
Forthcoming.
@unpublished{Chang2023poison,
title = {Poison is Not Traceless: Fully-Agnostic Detection of Poisoning Attacks },
author = {Xinglong Chang and Katharina Dost and Gillian Dobbie and J\"{o}rg Wicker},
url = {http://arxiv.org/abs/2310.16224},
doi = {10.48550/arXiv.2310.16224},
year = {2023},
date = {2023-10-23},
urldate = {2023-10-23},
abstract = {The performance of machine learning models depends on the quality of the underlying data. Malicious actors can attack the model by poisoning the training data. Current detectors are tied to either specific data types, models, or attacks, and therefore have limited applicability in real-world scenarios. This paper presents a novel fully-agnostic framework, Diva (Detecting InVisible Attacks), that detects attacks solely relying on analyzing the potentially poisoned data set. Diva is based on the idea that poisoning attacks can be detected by comparing the classifier’s accuracy on poisoned and clean data and pre-trains a meta-learner using Complexity Measures to estimate the otherwise unknown accuracy on a hypothetical clean dataset. The framework applies to generic poisoning attacks. For evaluation purposes, in this paper, we test Diva on label-flipping attacks.},
keywords = {},
pubstate = {forthcoming},
tppubtype = {unpublished}
}
Chang, Xinglong; Dobbie, Gillian; Wicker, Jörg
Fast Adversarial Label-Flipping Attack on Tabular Data Unpublished Forthcoming
Forthcoming.
@unpublished{Chang2023fast,
title = {Fast Adversarial Label-Flipping Attack on Tabular Data},
author = {Xinglong Chang and Gillian Dobbie and J\"{o}rg Wicker},
url = {https://arxiv.org/abs/2310.10744},
doi = {10.48550/arXiv.2310.10744},
year = {2023},
date = {2023-10-16},
urldate = {2023-10-16},
abstract = {Machine learning models are increasingly used in fields that require high reliability such as cybersecurity. However, these models remain vulnerable to various attacks, among which the adversarial label-flipping attack poses significant threats. In label-flipping attacks, the adversary maliciously flips a portion of training labels to compromise the machine learning model. This paper raises significant concerns as these attacks can camouflage a highly skewed dataset as an easily solvable classification problem, often misleading machine learning practitioners into lower defenses and miscalculations of potential risks. This concern amplifies in tabular data settings, where identifying true labels requires expertise, allowing malicious label-flipping attacks to easily slip under the radar. To demonstrate this risk is inherited in the adversary\'s objective, we propose FALFA (Fast Adversarial Label-Flipping Attack), a novel efficient attack for crafting adversarial labels. FALFA is based on transforming the adversary\'s objective and employs linear programming to reduce computational complexity. Using ten real-world tabular datasets, we demonstrate FALFA\'s superior attack potential, highlighting the need for robust defenses against such threats. },
keywords = {},
pubstate = {forthcoming},
tppubtype = {unpublished}
}
Chang, Luke; Dost, Katharina; Zhai, Kaiqi; Demontis, Ambra; Roli, Fabio; Dobbie, Gillian; Wicker, Jörg
BAARD: Blocking Adversarial Examples by Testing for Applicability, Reliability and Decidability Proceedings Article
In: Kashima, Hisashi; Ide, Tsuyoshi; Peng, Wen-Chih (Ed.): The 27th Pacific-Asia Conference on Knowledge Discovery and Data Mining (PAKDD), pp. 3-14, Springer Nature Switzerland, Cham, 2023, ISSN: 978-3-031-33374-3.
@inproceedings{chang2021baard,
title = {BAARD: Blocking Adversarial Examples by Testing for Applicability, Reliability and Decidability},
author = {Luke Chang and Katharina Dost and Kaiqi Zhai and Ambra Demontis and Fabio Roli and Gillian Dobbie and J\"{o}rg Wicker},
editor = {Hisashi Kashima and Tsuyoshi Ide and Wen-Chih Peng},
url = {https://arxiv.org/abs/2105.00495
https://github.com/wickerlab/baard},
doi = {10.1007/978-3-031-33374-3_1},
issn = {978-3-031-33374-3},
year = {2023},
date = {2023-05-27},
urldate = {2023-05-27},
booktitle = {The 27th Pacific-Asia Conference on Knowledge Discovery and Data Mining (PAKDD)},
journal = {The 27th Pacific-Asia Conference on Knowledge Discovery and Data Mining (PAKDD)},
pages = {3-14},
publisher = {Springer Nature Switzerland},
address = {Cham},
abstract = {Adversarial defenses protect machine learning models from adversarial attacks, but are often tailored to one type of model or attack. The lack of information on unknown potential attacks makes detecting adversarial examples challenging. Additionally, attackers do not need to follow the rules made by the defender. To address this problem, we take inspiration from the concept of Applicability Domain in cheminformatics. Cheminformatics models struggle to make accurate predictions because only a limited number of compounds are known and available for training. Applicability Domain defines a domain based on the known compounds and rejects any unknown compound that falls outside the domain. Similarly, adversarial examples start as harmless inputs, but can be manipulated to evade reliable classification by moving outside the domain of the classifier. We are the first to identify the similarity between Applicability Domain and adversarial detection. Instead of focusing on unknown attacks, we focus on what is known, the training data. We propose a simple yet robust triple-stage data-driven framework that checks the input globally and locally, and confirms that they are coherent with the model’s output. This framework can be applied to any classification model and is not limited to specific attacks. We demonstrate these three stages work as one unit, effectively detecting various attacks, even for a white-box scenario.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Zeyu; Dost, Katharina; Zhu, Xuan; Chang, Xinglong; Dobbie, Gillian; Wicker, Jörg
Targeted Attacks on Time Series Forecasting Proceedings Article
In: Kashima, Hisashi; Ide, Tsuyoshi; Peng, Wen-Chih (Ed.): The 27th Pacific-Asia Conference on Knowledge Discovery and Data Mining (PAKDD), pp. 314-327, Springer Nature Switzerland, Cham, 2023, ISSN: 978-3-031-33383-5.
@inproceedings{Chen2023targeted,
title = {Targeted Attacks on Time Series Forecasting},
author = {Zeyu Chen and Katharina Dost and Xuan Zhu and Xinglong Chang and Gillian Dobbie and J\"{o}rg Wicker},
editor = {Hisashi Kashima and Tsuyoshi Ide and Wen-Chih Peng},
url = {https://github.com/wickerlab/nvita},
doi = {10.1007/978-3-031-33383-5_25},
issn = {978-3-031-33383-5},
year = {2023},
date = {2023-05-26},
urldate = {2023-05-26},
booktitle = {The 27th Pacific-Asia Conference on Knowledge Discovery and Data Mining (PAKDD)},
pages = {314-327},
publisher = {Springer Nature Switzerland},
address = {Cham},
abstract = {Abstract. Time Series Forecasting (TSF) is well established in domains dealing with temporal data to predict future events yielding the basis for strategic decision-making. Previous research indicated that forecasting models are vulnerable to adversarial attacks, that is, maliciously crafted perturbations of the original data with the goal of altering the model’s predictions. However, attackers targeting specific outcomes pose a substantially more severe threat as they could manipulate the model and bend it to their needs. Regardless, there is no systematic approach for targeted adversarial learning in the TSF domain yet. In this paper, we introduce targeted attacks on TSF in a systematic manner. We establish a new experimental design standard regarding attack goals and perturbation control for targeted adversarial learning on TSF. For this purpose, we present a novel indirect sparse black-box evasion attack on TSF, nVita. Additionally, we adapt the popular white-box attacks Fast Gradient Sign Method (FGSM) and Basic Iterative Method (BIM). Our experiments confirm not only that all three methods are effective but also that current state-of-the-art TSF models are indeed susceptible to attacks. These results motivate future research in this area to achieve higher reliability of forecasting models.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2017
Wicker, Jörg; Kramer, Stefan
The Best Privacy Defense is a Good Privacy Offense: Obfuscating a Search Engine User’s Profile Journal Article
In: Data Mining and Knowledge Discovery, vol. 31, no. 5, pp. 1419-1443, 2017, ISSN: 1573-756X.
@article{wicker2017best,
title = {The Best Privacy Defense is a Good Privacy Offense: Obfuscating a Search Engine User's Profile},
author = {J\"{o}rg Wicker and Stefan Kramer},
editor = {Kurt Driessens and Dragi Kocev and Marko Robnik-\v{S}ikonja and Myra Spiliopoulou},
url = {http://rdcu.be/tL0U},
doi = {10.1007/s10618-017-0524-z},
issn = {1573-756X},
year = {2017},
date = {2017-09-01},
journal = {Data Mining and Knowledge Discovery},
volume = {31},
number = {5},
pages = {1419-1443},
abstract = {User privacy on the internet is an important and unsolved problem. So far, no sufficient and comprehensive solution has been proposed that helps a user to protect his or her privacy while using the internet. Data are collected and assembled by numerous service providers. Solutions so far focused on the side of the service providers to store encrypted or transformed data that can be still used for analysis. This has a major flaw, as it relies on the service providers to do this. The user has no chance of actively protecting his or her privacy. In this work, we suggest a new approach, empowering the user to take advantage of the same tool the other side has, namely data mining to produce data which obfuscates the user’s profile. We apply this approach to search engine queries and use feedback of the search engines in terms of personalized advertisements in an algorithm similar to reinforcement learning to generate new queries potentially confusing the search engine. We evaluated the approach using a real-world data set. While evaluation is hard, we achieve results that indicate that it is possible to influence the user’s profile that the search engine generates. This shows that it is feasible to defend a user’s privacy from a new and more practical perspective.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}