2020
Chester, Andrew; Koh, Yun Sing; Wicker, Jörg; Sun, Quan; Lee, Junjae
Balancing Utility and Fairness against Privacy in Medical Data Proceedings Article
In: IEEE Symposium Series on Computational Intelligence (SSCI), pp. 1226-1233, IEEE, 2020.
Abstract | Links | BibTeX | Altmetric | PlumX | Tags: accuracy, computational sustainability, data mining, fairness, imbalance, machine learning, medicine, privacy
@inproceedings{chester2020balancing,
title = {Balancing Utility and Fairness against Privacy in Medical Data},
author = {Andrew Chester and Yun Sing Koh and J\"{o}rg Wicker and Quan Sun and Junjae Lee},
url = {https://ieeexplore.ieee.org/abstract/document/9308226},
doi = {10.1109/SSCI47803.2020.9308226},
year = {2020},
date = {2020-12-01},
booktitle = {IEEE Symposium Series on Computational Intelligence (SSCI)},
pages = {1226-1233},
publisher = {IEEE},
abstract = {There are numerous challenges when designing algorithms that interact with sensitive data, such as, medical or financial records. One of these challenges is privacy. However, there is a tension between privacy, utility (model accuracy), and fairness. While de-identification techniques, such as generalisation and suppression, have been proposed to enable privacy protection, it comes with a cost, specifically to fairness and utility. Recent work on fairness in algorithm design defines fairness as a guarantee of similar outputs for "similar" input data. This notion is discussed in connection to de-identification. This research investigates the trade-off between privacy, fairness, and utility. In contrast, other work investigates the trade-off between privacy and utility of the data or accuracy of the model overall. In this research, we investigate the effects of two standard de-identification techniques, k-anonymity and differential privacy, on both utility and fairness. We propose two measures to calculate the trade-off between privacy-utility and privacy-fairness. Although other research has provided guarantees for privacy regarding utility, this research focuses on the trade-offs given set de-identification levels and relies on guarantees provided by the privacy preservation methods. We discuss the effects of de-identification on data of different characteristics, class imbalance and outcome imbalance. We evaluated this is on synthetic datasets and standard real-world datasets. As a case study, we analysed the Medical Expenditure Panel Survey dataset.},
keywords = {accuracy, computational sustainability, data mining, fairness, imbalance, machine learning, medicine, privacy},
pubstate = {published},
tppubtype = {inproceedings}
}
2017
Wicker, Jörg; Kramer, Stefan
The Best Privacy Defense is a Good Privacy Offense: Obfuscating a Search Engine User’s Profile Journal Article
In: Data Mining and Knowledge Discovery, vol. 31, no. 5, pp. 1419-1443, 2017, ISSN: 1573-756X.
Abstract | Links | BibTeX | Altmetric | PlumX | Tags: adversarial learning, machine learning, personalized ads, privacy, reinforcement learning, search engines
@article{wicker2017best,
title = {The Best Privacy Defense is a Good Privacy Offense: Obfuscating a Search Engine User's Profile},
author = {J\"{o}rg Wicker and Stefan Kramer},
editor = {Kurt Driessens and Dragi Kocev and Marko Robnik-\v{S}ikonja and Myra Spiliopoulou},
url = {http://rdcu.be/tL0U},
doi = {10.1007/s10618-017-0524-z},
issn = {1573-756X},
year = {2017},
date = {2017-09-01},
journal = {Data Mining and Knowledge Discovery},
volume = {31},
number = {5},
pages = {1419-1443},
abstract = {User privacy on the internet is an important and unsolved problem. So far, no sufficient and comprehensive solution has been proposed that helps a user to protect his or her privacy while using the internet. Data are collected and assembled by numerous service providers. Solutions so far focused on the side of the service providers to store encrypted or transformed data that can be still used for analysis. This has a major flaw, as it relies on the service providers to do this. The user has no chance of actively protecting his or her privacy. In this work, we suggest a new approach, empowering the user to take advantage of the same tool the other side has, namely data mining to produce data which obfuscates the user’s profile. We apply this approach to search engine queries and use feedback of the search engines in terms of personalized advertisements in an algorithm similar to reinforcement learning to generate new queries potentially confusing the search engine. We evaluated the approach using a real-world data set. While evaluation is hard, we achieve results that indicate that it is possible to influence the user’s profile that the search engine generates. This shows that it is feasible to defend a user’s privacy from a new and more practical perspective.},
keywords = {adversarial learning, machine learning, personalized ads, privacy, reinforcement learning, search engines},
pubstate = {published},
tppubtype = {article}
}