@unpublished{graffeuille2024lake, title = {Lake by Lake, Globally: Enhancing Water Quality Remote Sensing with Multi-Task Learning Models}, author = {Olivier Graffeuille and Moritz Lehmann and Matthew Allan and J\"{o}rg Wicker and Yun Sing Koh }, doi = {10.2139/ssrn.4762429}, issn = {1556-5068}, year = {2024}, date = {2024-03-17}, abstract = {The estimation of water quality from satellite remote sensing data in inland and coastal waters is an important yet challenging problem. Recent collaborative efforts have produced large global datasets with sufficient data to train machine learning models with high accuracy. In this work, we investigate global water quality remote sensing models at the granularity of individual water bodies. We introduce Multi-Task Learning (MTL), a machine learning technique that learns a distinct model for each water body in the dataset from few data points by sharing knowledge between models. This approach allows MTL to learn water body differences, leading to more accurate predictions. We train and validate our model on the GLORIA dataset of in situ measured remote sensing reflectance and three water quality indicators: chlorophyll$a$, total suspended solids and coloured dissolved organic matter. MTL outperforms other machine learning models by 8-31% in Root Mean Squared Error (RMSE) and 12-34% in Mean Absolute Percentage Error (MAPE). Training on a smaller dataset of chlorophyll$a$ measurements from New Zealand lakes with simultaneous Sentinel-3 OLCI remote sensing reflectance further demonstrates the effectiveness of our model when applied regionally. Additionally, we investigate the performance of machine learning models at estimating the variation in water quality indicators within individual water bodies. Our results reveal that overall performance metrics overestimate the quality of model fit of models trained on a large number of water bodies due to the large between-water body variability of water quality indicators. In our experiments, when estimating TSS or CDOM, all models excluding multi-task learning fail to learn within-water body variability, and fail to outperform a naive baseline approach, suggesting that these models may be of limited usefulness to practitioners monitoring water quality. Overall, our research highlights the importance of considering water body differences in water quality remote sensing research for both model design and evaluation. }, keywords = {inland and coastal waters, machine learning, multi-task learning, remote sensing, water quality}, pubstate = {forthcoming}, tppubtype = {unpublished} } @inproceedings{kim2024attacking, title = {Attacking the Loop: Adversarial Attacks on Graph-based Loop Closure Detection}, author = {Jonathan Kim and Martin Urschler and Pat Riddle and J\"{o}rg Wicker }, url = {http://arxiv.org/abs/2312.06991 https://doi.org/10.48550/arxiv.2312.06991}, doi = {10.5220/0012313100003660}, year = {2024}, date = {2024-02-27}, urldate = {2024-02-27}, booktitle = {Proceedings of the 19th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications}, volume = {4}, pages = {90-97}, abstract = {With the advancement in robotics, it is becoming increasingly common for large factories and warehouses to incorporate visual SLAM (vSLAM) enabled automated robots that operate closely next to humans. This makes any adversarial attacks on vSLAM components potentially detrimental to humans working alongside them. Loop Closure Detection (LCD) is a crucial component in vSLAM that minimizes the accumulation of drift in mapping, since even a small drift can accumulate into a significant drift over time. Previous work by Kim et al. , unified visual features and semantic objects into a single graph structure for finding loop closure candidates. While this provided a performance improvement over visual feature-based LCD, it also created a single point of vulnerability for potential graph-based adversarial attacks. Unlike previously reported visual-patch based attacks, small graph perturbations are far more challenging to detect, making them a more significant threat. In this paper, we present Adversarial-LCD, a novel black-box evasion attack framework that employs an eigencentrality-based perturbation method and an SVM-RBF surrogate model with a Weisfeiler-Lehman feature extractor for attacking graph-based LCD. Our evaluation shows that the attack performance of Adversarial-LCD was superior to that of other machine learning surrogate algorithms, including SVM-linear, SVM-polynomial, and Bayesian classifier, demonstrating the effectiveness of our attack framework. Furthermore, we show that our eigencentrality-based perturbation method outperforms other algorithms, such as Random-walk and Shortest-path, highlighting the efficiency of Adversarial-LCD’s perturbation selection method.}, keywords = {adversarial defence, adversarial learning, machine learning, SLAM}, pubstate = {published}, tppubtype = {inproceedings} } @article{Long2023adducthunter, title = {AdductHunter: Identifying Protein-Metal Complex Adducts in Mass Spectra}, author = {Derek Long and Liam Eade and Katharina Dost and Samuel M Meier-Menches and David C Goldstone and Matthew P Sullivan and Christian Hartinger and J\"{o}rg Wicker and Katerina Taskova}, url = {https://adducthunter.wickerlab.org https://doi.org/10.21203/rs.3.rs-3322854/v1}, doi = {10.1186/s13321-023-00797-7}, issn = {1758-2946}, year = {2024}, date = {2024-02-06}, urldate = {2023-05-29}, journal = {Journal of Cheminformatics}, volume = {16}, issue = {1}, abstract = {Mass spectrometry (MS) is an analytical technique for molecule identification that can be used for investigating protein-metal complex interactions. Once the MS data is collected, the mass spectra are usually interpreted manually to identify the adducts formed as a result of the interactions between proteins and metal-based species. However, with increasing resolution, dataset size, and species complexity, the time required to identify adducts and the error-prone nature of manual assignment have become limiting factors in MS analysis. AdductHunter is a open-source web-based analysis tool that automates the peak identification process using constraint integer optimization to find feasible combinations of protein and fragments, and dynamic time warping to calculate the dissimilarity between the theoretical isotope pattern of a species and its experimental isotope peak distribution. Empirical evaluation on a collection of 22 unique MS datasetsshows fast and accurate identification of protein-metal complex adducts in deconvoluted mass spectra.}, keywords = {cheminformatics, computational sustainability, data mining, dynamic time warping, machine learning, mass spectrometry}, pubstate = {published}, tppubtype = {article} } @unpublished{lyu2023regional, title = {Regional Bias in Monolingual English Language Models}, author = {Jiachen Lyu and Katharina Dost and Yun Sing Koh and J\"{o}rg Wicker}, doi = {10.21203/rs.3.rs-3713494/v1}, year = {2023}, date = {2023-12-06}, urldate = {2023-12-06}, abstract = {In Natural Language Processing (NLP), pre-trained language models (LLMs) are widely employed and refined for various tasks. These models have shown considerable social and geographic biases creating skewed or even unfair representations of certain groups. Research focuses on biases toward L2 (English as a second language) regions but neglects bias within L1 (first language) regions. In this work, we ask if there is regional bias within L1 regions already inherent in pre-trained LLMs and, if so, what the consequences are in terms of downstream model performance. We contribute an investigation framework specifically tailored for low-resource regions, offering a method to identify bias without imposing strict requirements for labeled datasets. Our research reveals subtle geographic variations in the word embeddings of BERT, even in cultures traditionally perceived as similar. These nuanced features, once captured, have the potential to significantly impact downstream tasks. Generally, models exhibit comparable performance on datasets that share similarities, and conversely, performance may diverge when datasets differ in their nuanced features embedded within the language. It is crucial to note that estimating model performance solely based on standard benchmark datasets may not necessarily apply to the datasets with distinct features from the benchmark datasets. Our proposed framework plays a pivotal role in identifying and addressing biases detected in word embeddings, particularly evident in low-resource regions such as New Zealand.}, keywords = {bias, large language models, machine learning, nlp, regional bias, reliable machine learning}, pubstate = {forthcoming}, tppubtype = {unpublished} } @unpublished{hua2023systematic, title = {A Systematic Review of Aspect-based Sentiment Analysis: Domains, Methods, and Trends}, author = {Yan Cathy Hua and Paul Denny and J\"{o}rg Wicker and Katerina Taskova}, url = {https://arxiv.org/abs/2311.10777}, doi = {10.48550/arXiv.2311.10777}, year = {2023}, date = {2023-11-17}, urldate = {2023-11-17}, abstract = {Aspect-based Sentiment Analysis (ABSA) is a fine-grained type of sentiment analysis that identifies aspects and their associated opinions from a given text. With the surge of digital opinionated text data, ABSA gained increasing popularity for its ability to mine more detailed and targeted insights. Many review papers on ABSA subtasks and solution methodologies exist, however, few focus on trends over time or systemic issues relating to research application domains, datasets, and solution approaches. To fill the gap, this paper presents a Systematic Literature Review (SLR) of ABSA studies with a focus on trends and high-level relationships among these fundamental components. This review is one of the largest SLRs on ABSA, and also, to our knowledge, the first that systematically examines the trends and inter-relations among ABSA research and data distribution across domains and solution paradigms and approaches. Our sample includes 519 primary studies screened from 4191 search results without time constraints via an innovative automatic filtering process. Our quantitative analysis not only identifies trends in nearly two decades of ABSA research development but also unveils a systemic lack of dataset and domain diversity as well as domain mismatch that may hinder the development of future ABSA research. We discuss these findings and their implications and propose suggestions for future research. }, keywords = {aspect-based sentiment analysis, machine learning, nlp, review}, pubstate = {forthcoming}, tppubtype = {unpublished} } @unpublished{dost2023defining, title = {Defining Applicability Domain in Biodegradation Pathway Prediction}, author = {Katharina Dost and Jason Tam and Tim Lorsbach and Sebastian Schmidt and J\"{o}rg Wicker}, doi = {https://doi.org/10.21203/rs.3.rs-3587632/v1}, year = {2023}, date = {2023-11-10}, urldate = {2023-11-10}, abstract = {When developing a new chemical, investigating its long-term influences on the environment is crucial to prevent harm. Unfortunately, these experiments are time-consuming. In silico methods can learn from already obtained data to predict biotransformation pathways, and thereby help focus all development efforts on only the most promising chemicals. As all data-based models, these predictors will output pathway predictions for all input compounds in a suitable format, however, these predictions will be faulty unless the model has seen similar compounds during the training process. A common approach to prevent this for other types of models is to define an Applicability Domain for the model that makes predictions only for in-domain compounds and rejects out-of-domain ones. Nonetheless, although exploration of the compound space is particularly interesting in the development of new chemicals, no Applicability Domain method has been tailored to the specific data structure of pathway predictions yet. In this paper, we are the first to define Applicability Domain specialized in biodegradation pathway prediction. Assessing a model’s reliability from different angles, we suggest a three-stage approach that checks for applicability, reliability, and decidability of the model for a queried compound and only allows it to output a prediction if all three stages are passed. Experiments confirm that our proposed technique reliably rejects unsuitable compounds and therefore improves the safety of the biotransformation pathway predictor. }, keywords = {applicability domain, biodegradation, cheminformatics, computational sustainability, enviPath, machine learning, metabolic pathways, reliable machine learning}, pubstate = {forthcoming}, tppubtype = {unpublished} } @unpublished{nokey, title = {Advancements in Biotransformation Pathway Prediction: Enhancements, Datasets, and Novel Functionalities in enviPath}, author = {Jasmin Hafner and Tim Lorsbach and Sebastian Schmidt and Liam Brydon and Katharina Dost and Kunyang Zhang and Kathrin Fenner and J\"{o}rg Wicker}, doi = {10.21203/rs.3.rs-3607847/v1}, year = {2023}, date = {2023-11-03}, urldate = {2023-11-03}, abstract = {enviPath is a widely used database and prediction system for microbial biotransformation pathways of primarily xenobiotic compounds. Data and prediction system are freely available both via a web interface and a public REST API. Since its initial release in 2016, we extended the data available in enviPath and improved the performance of the prediction system and usability of the overall system. We now provide three diverse data sets, covering microbial biotransformation in different environments and under different experimental conditions. This also enabled developing a pathway prediction model that is applicable to a more diverse set of chemicals. In the prediction engine, we implemented a new evaluation tailored towards pathway prediction, that returns a more honest and holistic view on the performance. We also implemented a novel applicability domain algorithm, which allows the user to estimate how well the model will perform on their data. Finally, we improved the implementation to speed up the overall system and provide new functionality via a plugin system. Overall, enviPath has developed into a reliable database and prediction system with a unique use case in research in microbial biotransformations. }, keywords = {applicability domain, biodegradation, bioinformatics, cheminformatics, computational sustainability, enviPath, linked data, machine learning, multi-label classification, Process-based modeling}, pubstate = {forthcoming}, tppubtype = {unpublished} } @unpublished{Chang2023poison, title = {Poison is Not Traceless: Fully-Agnostic Detection of Poisoning Attacks }, author = {Xinglong Chang and Katharina Dost and Gillian Dobbie and J\"{o}rg Wicker}, url = {http://arxiv.org/abs/2310.16224}, doi = {10.48550/arXiv.2310.16224}, year = {2023}, date = {2023-10-23}, urldate = {2023-10-23}, abstract = {The performance of machine learning models depends on the quality of the underlying data. Malicious actors can attack the model by poisoning the training data. Current detectors are tied to either specific data types, models, or attacks, and therefore have limited applicability in real-world scenarios. This paper presents a novel fully-agnostic framework, Diva (Detecting InVisible Attacks), that detects attacks solely relying on analyzing the potentially poisoned data set. Diva is based on the idea that poisoning attacks can be detected by comparing the classifier’s accuracy on poisoned and clean data and pre-trains a meta-learner using Complexity Measures to estimate the otherwise unknown accuracy on a hypothetical clean dataset. The framework applies to generic poisoning attacks. For evaluation purposes, in this paper, we test Diva on label-flipping attacks.}, keywords = {adversarial defence, adversarial learning, machine learning, reliable machine learning}, pubstate = {forthcoming}, tppubtype = {unpublished} } @unpublished{Chang2023fast, title = {Fast Adversarial Label-Flipping Attack on Tabular Data}, author = {Xinglong Chang and Gillian Dobbie and J\"{o}rg Wicker}, url = {https://arxiv.org/abs/2310.10744}, doi = {10.48550/arXiv.2310.10744}, year = {2023}, date = {2023-10-16}, urldate = {2023-10-16}, abstract = {Machine learning models are increasingly used in fields that require high reliability such as cybersecurity. However, these models remain vulnerable to various attacks, among which the adversarial label-flipping attack poses significant threats. In label-flipping attacks, the adversary maliciously flips a portion of training labels to compromise the machine learning model. This paper raises significant concerns as these attacks can camouflage a highly skewed dataset as an easily solvable classification problem, often misleading machine learning practitioners into lower defenses and miscalculations of potential risks. This concern amplifies in tabular data settings, where identifying true labels requires expertise, allowing malicious label-flipping attacks to easily slip under the radar. To demonstrate this risk is inherited in the adversary\'s objective, we propose FALFA (Fast Adversarial Label-Flipping Attack), a novel efficient attack for crafting adversarial labels. FALFA is based on transforming the adversary\'s objective and employs linear programming to reduce computational complexity. Using ten real-world tabular datasets, we demonstrate FALFA\'s superior attack potential, highlighting the need for robust defenses against such threats. }, keywords = {adversarial learning, machine learning, reliable machine learning}, pubstate = {forthcoming}, tppubtype = {unpublished} } @inproceedings{Pullar-Strecker2023memento, title = {Memento: Facilitating Effortless, Efficient, and Reliable ML Experiments}, author = {Zac Pullar-Strecker and Xinglong Chang and Liam Brydon and Ioannis Ziogas and Katharina Dost and J\"{o}rg Wicker}, editor = {Gianmarco De Francisci Morales and Claudia Perlich and Natali Ruchansky and Nicolas Kourtellis and Elena Baralis and Francesco Bonchi }, url = {https://arxiv.org/abs/2304.09175 https://github.com/wickerlab/memento}, doi = {10.1007/978-3-031-43430-3_21}, isbn = {978-3-031-43430-3}, year = {2023}, date = {2023-09-17}, urldate = {2023-09-17}, booktitle = {Machine Learning and Knowledge Discovery in Databases: Applied Data Science and Demo Track}, journal = {Lecture Notes in Computer Science}, pages = {310-314}, publisher = {Springer Nature Switzerland}, address = {Cham}, abstract = { Running complex sets of machine learning experiments is challenging and time-consuming due to the lack of a unified framework. This leaves researchers forced to spend time implementing necessary features such as parallelization, caching, and checkpointing themselves instead of focussing on their project. To simplify the process, in our paper, we introduce Memento, a Python package that is designed to aid researchers and data scientists in the efficient management and execution of computationally intensive experiments. Memento has the capacity to streamline any experimental pipeline by providing a straightforward configuration matrix and the ability to concurrently run experiments across multiple threads. Code related to this paper is available at: https://github.com/wickerlab/memento.}, keywords = {experimental pipeline, parallel computing, reliable machine learning}, pubstate = {published}, tppubtype = {inproceedings} } @article{Miller2023denovo, title = {De novo network analysis reveals autism causal genes and developmental links to co-occurring traits}, author = {Catriona J Miller and Evgenija Golovina and J\"{o}rg Wicker and Jessie C Jacobson and Justin M O\'Sullivan}, url = {https://www.medrxiv.org/content/10.1101/2023.04.24.23289060v1}, doi = {10.26508/lsa.202302142}, year = {2023}, date = {2023-08-08}, urldate = {2023-08-08}, journal = {Life Science Alliance}, volume = {6}, number = {10}, abstract = {Autism is a complex neurodevelopmental condition that manifests in various ways. Autism is often accompanied by other conditions, such as attention-deficit/hyperactivity disorder and schizophrenia, which can complicate diagnosis and management. Although research has investigated the role of specific genes in autism, their relationship with co-occurring traits is not fully understood. To address this, we conducted a two-sample Mendelian randomisation analysis and identified four genes located at the 17q21.31 locus that are putatively causal for autism in fetal cortical tissue (LINC02210, LRRC37A4P, RP11-259G18.1, and RP11-798G7.6). LINC02210 was also identified as putatively causal for autism in adult cortical tissue. By integrating data from expression quantitative trait loci, genes and protein interactions, we identified that the 17q21.31 locus contributes to the intersection between autism and other neurological traits in fetal cortical tissue. We also identified a distinct cluster of co-occurring traits, including cognition and worry, linked to the genetic loci at 3p21.1. Our findings provide insights into the relationship between autism and co-occurring traits, which could be used to develop predictive models for more accurate diagnosis and better clinical management.}, keywords = {autism, bioinformatics, genomics}, pubstate = {published}, tppubtype = {article} } @inproceedings{chang2021baard, title = {BAARD: Blocking Adversarial Examples by Testing for Applicability, Reliability and Decidability}, author = {Luke Chang and Katharina Dost and Kaiqi Zhai and Ambra Demontis and Fabio Roli and Gillian Dobbie and J\"{o}rg Wicker}, editor = {Hisashi Kashima and Tsuyoshi Ide and Wen-Chih Peng}, url = {https://arxiv.org/abs/2105.00495 https://github.com/wickerlab/baard}, doi = {10.1007/978-3-031-33374-3_1}, issn = {978-3-031-33374-3}, year = {2023}, date = {2023-05-27}, urldate = {2023-05-27}, booktitle = {The 27th Pacific-Asia Conference on Knowledge Discovery and Data Mining (PAKDD)}, journal = {The 27th Pacific-Asia Conference on Knowledge Discovery and Data Mining (PAKDD)}, pages = {3-14}, publisher = {Springer Nature Switzerland}, address = {Cham}, abstract = {Adversarial defenses protect machine learning models from adversarial attacks, but are often tailored to one type of model or attack. The lack of information on unknown potential attacks makes detecting adversarial examples challenging. Additionally, attackers do not need to follow the rules made by the defender. To address this problem, we take inspiration from the concept of Applicability Domain in cheminformatics. Cheminformatics models struggle to make accurate predictions because only a limited number of compounds are known and available for training. Applicability Domain defines a domain based on the known compounds and rejects any unknown compound that falls outside the domain. Similarly, adversarial examples start as harmless inputs, but can be manipulated to evade reliable classification by moving outside the domain of the classifier. We are the first to identify the similarity between Applicability Domain and adversarial detection. Instead of focusing on unknown attacks, we focus on what is known, the training data. We propose a simple yet robust triple-stage data-driven framework that checks the input globally and locally, and confirms that they are coherent with the model’s output. This framework can be applied to any classification model and is not limited to specific attacks. We demonstrate these three stages work as one unit, effectively detecting various attacks, even for a white-box scenario.}, keywords = {adversarial defence, adversarial learning, applicability domain, cheminformatics, evasion attacks, machine learning}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Chen2023targeted, title = {Targeted Attacks on Time Series Forecasting}, author = {Zeyu Chen and Katharina Dost and Xuan Zhu and Xinglong Chang and Gillian Dobbie and J\"{o}rg Wicker}, editor = {Hisashi Kashima and Tsuyoshi Ide and Wen-Chih Peng}, url = {https://github.com/wickerlab/nvita}, doi = {10.1007/978-3-031-33383-5_25}, issn = {978-3-031-33383-5}, year = {2023}, date = {2023-05-26}, urldate = {2023-05-25}, booktitle = {The 27th Pacific-Asia Conference on Knowledge Discovery and Data Mining (PAKDD)}, pages = {314-327}, publisher = {Springer Nature Switzerland}, address = {Cham}, abstract = {Abstract. Time Series Forecasting (TSF) is well established in domains dealing with temporal data to predict future events yielding the basis for strategic decision-making. Previous research indicated that forecasting models are vulnerable to adversarial attacks, that is, maliciously crafted perturbations of the original data with the goal of altering the model’s predictions. However, attackers targeting specific outcomes pose a substantially more severe threat as they could manipulate the model and bend it to their needs. Regardless, there is no systematic approach for targeted adversarial learning in the TSF domain yet. In this paper, we introduce targeted attacks on TSF in a systematic manner. We establish a new experimental design standard regarding attack goals and perturbation control for targeted adversarial learning on TSF. For this purpose, we present a novel indirect sparse black-box evasion attack on TSF, nVita. Additionally, we adapt the popular white-box attacks Fast Gradient Sign Method (FGSM) and Basic Iterative Method (BIM). Our experiments confirm not only that all three methods are effective but also that current state-of-the-art TSF models are indeed susceptible to attacks. These results motivate future research in this area to achieve higher reliability of forecasting models.}, keywords = {adversarial learning, forecasting, machine learning, time series}, pubstate = {published}, tppubtype = {inproceedings} } @misc{Wicker2023cinema, title = {Cinema Experiments 2013}, author = { J\"{o}rg Wicker and Nicolas Krauter and Bettina Derstorff and Christof St\"{o}nner and Efstratios Bourtsoukidis and Thomas Kl\"{u}pfel and Jonathan Williams and Stefan Kramer}, url = {https://auckland.figshare.com/articles/dataset/Cinema_Experiments_2013/22777364}, doi = {10.17608/k6.auckland.22777364.v3}, year = {2023}, date = {2023-05-23}, keywords = {atmospheric chemistry, cinema data mining, data mining, machine learning, smell of fear, sof}, pubstate = {published}, tppubtype = {misc} } @misc{St\"{o}nner2023cinema, title = {Cinema Experiments 2015}, author = { Christof St\"{o}nner and Achim Edtbauer and Bettina Derstorff and Efstratios Bourtsoukidis and Thomas Kl\"{u}pfel and J\"{o}rg Wicker and Jonathan Williams}, url = {https://auckland.figshare.com/articles/dataset/Cinema_Experiments_2015/22777352}, doi = {10.17608/k6.auckland.22777352.v2}, year = {2023}, date = {2023-05-23}, keywords = {cinema data mining, data mining, machine learning, smell of fear, sof}, pubstate = {published}, tppubtype = {misc} } @article{Dost2023Combatting, title = {Combatting over-specialization bias in growing chemical databases}, author = {Katharina Dost and Zac Pullar-Strecker and Liam Brydon and Kunyang Zhang and Jasmin Hafner and Pat Riddle and J\"{o}rg Wicker}, url = {https://jcheminf.biomedcentral.com/articles/10.1186/s13321-023-00716-w }, doi = {10.1186/s13321-023-00716-w}, issn = {1758-2946}, year = {2023}, date = {2023-05-19}, urldate = {2023-05-19}, journal = {Journal of Cheminformatics}, volume = {15}, issue = {1}, pages = {53}, abstract = {Background Predicting in advance the behavior of new chemical compounds can support the design process of new products by directing the research toward the most promising candidates and ruling out others. Such predictive models can be data-driven using Machine Learning or based on researchers’ experience and depend on the collection of past results. In either case: models (or researchers) can only make reliable assumptions about compounds that are similar to what they have seen before. Therefore, consequent usage of these predictive models shapes the dataset and causes a continuous specialization shrinking the applicability domain of all trained models on this dataset in the future, and increasingly harming model-based exploration of the space. Proposed solution In this paper, we propose cancels (CounterActiNg Compound spEciaLization biaS), a technique that helps to break the dataset specialization spiral. Aiming for a smooth distribution of the compounds in the dataset, we identify areas in the space that fall short and suggest additional experiments that help bridge the gap. Thereby, we generally improve the dataset quality in an entirely unsupervised manner and create awareness of potential flaws in the data. cancels does not aim to cover the entire compound space and hence retains a desirable degree of specialization to a specified research domain. Results An extensive set of experiments on the use-case of biodegradation pathway prediction not only reveals that the bias spiral can indeed be observed but also that cancels produces meaningful results. Additionally, we demonstrate that mitigating the observed bias is crucial as it cannot only intervene with the continuous specialization process, but also significantly improves a predictor’s performance while reducing the number of required experiments. Overall, we believe that cancels can support researchers in their experimentation process to not only better understand their data and potential flaws, but also to grow the dataset in a sustainable way. All code is available under github.com/KatDost/Cancels.}, keywords = {bias, biodegradation, cheminformatics, computational sustainability, data mining, enviPath, machine learning, metabolic pathways, multi-label classification, reliable machine learning}, pubstate = {published}, tppubtype = {article} } @article{bensemann2023from, title = {From What You See to What We Smell: Linking Human Emotions to Bio-markers in Breath}, author = {Joshua Bensemann and Hasnain Cheena and David Tse Jung Huang and Elizabeth Broadbent and Jonathan Williams and J\"{o}rg Wicker}, url = {https://ieeexplore.ieee.org/document/10123109 https://doi.org/10.17608/k6.auckland.22777364 https://doi.org/10.17608/k6.auckland.22777352 }, doi = {10.1109/TAFFC.2023.3275216}, issn = {1949-3045}, year = {2023}, date = {2023-05-11}, urldate = {2023-05-11}, journal = {IEEE Transactions on Affective Computing}, pages = {1-13}, abstract = {Research has shown that the composition of breath can differ based on the human’s behavioral patterns and mental and physical states immediately before being collected. These breath-collection techniques have also been extended to observe the general processes occurring in groups of humans and can link them to what those groups are collectively experiencing. In this research, we applied machine learning techniques to the breath data collected from cinema audiences. These techniques included XGBOOST Regression, Hierarchical Clustering, and Item Basket analyses created using the Apriori algorithm. They were conducted to find associations between the biomarkers in the crowd’s breath and the movie’s audio-visual stimuli and thematic events. This analysis enabled us to directly link what the group was experiencing and their biological response to that experience. We first extracted visual and auditory features from a movie to achieve this. We compared it to the biomarkers in the crowd’s breath using regression and pattern mining techniques. Our results supported the theory that a crowd’s collective experience directly correlates to the biomarkers in the crowd’s breath. Consequently, these findings suggest that visual and auditory experiences have predictable effects on the human body that can be monitored without requiring expensive or invasive neuroimaging techniques.}, keywords = {biomarkers, breath analysis, cheminformatics, cinema data mining, emotional response analysis, machine learning, smell of fear}, pubstate = {published}, tppubtype = {article} } @article{Roeslin2023development, title = {Development of a Seismic Loss Prediction Model for Residential Buildings using Machine Learning \textendash Christchurch, New Zealand}, author = {Samuel Roeslin and Quincy Ma and Pavan Chigullapally and J\"{o}rg Wicker and Liam Wotherspoon}, url = {https://nhess.copernicus.org/articles/23/1207/2023/}, doi = {10.5194/nhess-23-1207-2023}, year = {2023}, date = {2023-03-22}, urldate = {2023-03-22}, journal = {Natural Hazards and Earth System Sciences}, volume = {23}, number = {3}, pages = {1207-1226}, abstract = {This paper presents a new framework for the seismic loss prediction of residential buildings in Christchurch, New Zealand. It employs data science techniques, geospatial tools, and machine learning (ML) trained on insurance claims data from the Earthquake Commission (EQC) collected following the 2010\textendash2011 Canterbury Earthquake Sequence (CES). The seismic loss prediction obtained from the ML model is shown to outperform the output from existing risk analysis tools for New Zealand for each of the main earthquakes of the CES. In addition to the prediction capabilities, the ML model delivered useful insights into the most important features contributing to losses during the CES. ML correctly highlighted that liquefaction significantly influenced buildings losses for the 22 February 2011 earthquake. The results are consistent with observations, engineering knowledge, and previous studies, confirming the potential of data science and ML in the analysis of insurance claims data and the development of seismic loss prediction models using empirical loss data.}, keywords = {computational sustainability, earthquakes, machine learning}, pubstate = {published}, tppubtype = {article} } @inproceedings{Kim2022closing, title = {Closing the Loop: Graph Networks to Unify Semantic Objects and Visual Features for Multi-object Scenes}, author = {Jonathan Kim and Martin Urschler and Pat Riddle and J\"{o}rg Wicker}, url = {https://ieeexplore.ieee.org/abstract/document/9981542}, doi = {10.1109/IROS47612.2022.9981542}, isbn = {978-1-6654-7927-1}, year = {2022}, date = {2022-10-20}, booktitle = {2022 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS 2022)}, pages = {4352-4358}, abstract = {In Simultaneous Localization and Mapping (SLAM), Loop Closure Detection (LCD) is essential to minimize drift when recognizing previously visited places. Visual Bag- of-Words (vBoW) has been an LCD algorithm of choice for many state-of-the-art SLAM systems. It uses a set of visual features to provide robust place recognition but fails to perceive the semantics or spatial relationship between feature points. Previous work has mainly focused on addressing these issues by combining vBoW with semantic and spatial information from objects in the scene. However, they are unable to exploit spatial information of local visual features and lack a structure that unifies semantic objects and visual features, therefore limiting the symbiosis between the two components. This paper proposes SymbioLCD2, which creates a unified graph structure to integrate semantic objects and visual features symbiotically. Our novel graph-based LCD system utilizes the unified graph structure by applying a Weisfeiler-Lehman graph kernel with temporal constraints to robustly predict loop closure candidates. Evaluation of the proposed system shows that having a unified graph structure incorporating semantic objects and visual features improves LCD prediction accuracy, illustrating that the proposed graph structure provides a strong symbiosis between these two complementary components. It also outperforms other Machine Learning algorithms - such as SVM, Decision Tree, Random Forest, Neural Network and GNN based Graph Matching Networks. Furthermore, it has shown good performance in detecting loop closure candidates earlier than state-of-the-art SLAM systems, demonstrating that extended semantic and spatial awareness from the unified graph structure significantly impacts LCD performance.}, keywords = {machine learning, SLAM}, pubstate = {published}, tppubtype = {inproceedings} } @article{Pullar-Strecker2022hitting, title = {Hitting the Target: Stopping Active Learning at the Cost-Based Optimum}, author = {Zac Pullar-Strecker and Katharina Dost and Eibe Frank and J\"{o}rg Wicker}, editor = {Yu-Feng Li and Prateek Jain}, url = {https://arxiv.org/abs/2110.03802}, doi = {10.1007/s10994-022-06253-1}, issn = {1573-0565}, year = {2022}, date = {2022-10-14}, urldate = {2022-10-14}, journal = {Machine Learning}, abstract = {Active learning allows machine learning models to be trained using fewer labels while retaining similar performance to traditional supervised learning. An active learner selects the most informative data points, requests their labels, and retrains itself. While this approach is promising, it raises the question of how to determine when the model is ‘good enough’ without the additional labels required for traditional evaluation. Previously, different stopping criteria have been proposed aiming to identify the optimal stopping point. Yet, optimality can only be expressed as a domain-dependent trade-off between accuracy and the number of labels, and no criterion is superior in all applications. As a further complication, a comparison of criteria for a particular real-world application would require practitioners to collect additional labelled data they are aiming to avoid by using active learning in the first place. This work enables practitioners to employ active learning by providing actionable recommendations for which stopping criteria are best for a given real-world scenario. We contribute the first large-scale comparison of stopping criteria for pool-based active learning, using a cost measure to quantify the accuracy/label trade-off, public implementations of all stopping criteria we evaluate, and an open-source framework for evaluating stopping criteria. Our research enables practitioners to substantially reduce labeling costs by utilizing the stopping criterion which best suits their domain.}, keywords = {active learning, data labelling, machine learning, stopping criteria}, pubstate = {published}, tppubtype = {article} } @inproceedings{Poonawala-Lohani2022geographic, title = {Geographic Ensembles of Observations using Randomised Ensembles of Autoregression Chains: Ensemble methods for spatio-temporal Time Series Forecasting of Influenza-like Illness}, author = {Nooriyan Poonawala-Lohani and Pat Riddle and Mehnaz Adnan and J\"{o}rg Wicker}, doi = {10.1145/3535508.3545562}, isbn = {9781450393867}, year = {2022}, date = {2022-08-07}, pages = {1-7}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, abstract = {Influenza is a communicable respiratory illness that can cause serious public health hazards. Flu surveillance in New Zealand tracks case counts from various District health boards (DHBs) in the country to monitor the spread of influenza in different geographic locations. Many factors contribute to the spread of the influenza across a geographic region, and it can be challenging to forecast cases in one region without taking into account case numbers in another region. This paper proposes a novel ensemble method called Geographic Ensembles of Observations using Randomised Ensembles of Autoregression Chains (GEO-Reach). GEO-Reach is an ensemble technique that uses a two layer approach to utilise interdependence of historical case counts between geographic regions in New Zealand. This work extends a previously published method by the authors called Randomized Ensembles of Auto-regression chains (Reach). State-of-the-art forecasting models look at studying the spread of the virus. They focus on accurate forecasting of cases for a location using historical case counts for the same location and other data sources based on human behaviour such as movement of people across cities/geographic regions. This new approach is evaluated using Influenza like illness (ILI) case counts in 7 major regions in New Zealand from the years 2015-2019 and compares its performance with other standard methods such as Dante, ARIMA, Autoregression and Random Forests. The results demonstrate that the proposed method performed better than baseline methods when applied to this multi-variate time series forecasting problem.}, keywords = {bioinformatics, computational sustainability, dynamic time warping, forecasting, influenza, machine learning, medicine, time series}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{graffeuille2022semi, title = {Semi-Supervised Conditional Density Estimation with Wasserstein Laplacian Regularisation}, author = {Olivier Graffeuille and Yun Sing Koh and J\"{o}rg Wicker and Moritz Lehmann}, url = {https://ojs.aaai.org/index.php/AAAI/article/view/20630}, doi = {10.1609/aaai.v36i6.20630}, year = {2022}, date = {2022-06-28}, urldate = {2022-02-22}, booktitle = {Proceeding of the Thirty-Sixth AAAI Conference on Artificial Intelligence}, volume = {36}, number = {6}, pages = {6746-6754}, abstract = {Conditional Density Estimation (CDE) has wide-reaching applicability to various real-world problems, such as spatial density estimation and environmental modelling. CDE estimates the probability density of a random variable rather than a single value and can thus model uncertainty and inverse problems. This task is inherently more complex than regression, and many algorithms suffer from overfitting, particularly when modelled with few labelled data points. For applications where unlabelled data is abundant but labelled data is scarce, we propose Wasserstein Laplacian Regularisation, a semi-supervised learning framework that allows CDE algorithms to leverage these unlabelled data. The framework minimises an objective function which ensures that the learned model is smooth along the manifold of the underlying data, as measured by Wasserstein distance. When applying our framework to Mixture Density Networks, the resulting semi-supervised algorithm can achieve similar performance to a supervised model with up to three times as many labelled data points on baseline datasets. We additionally apply our technique to the problem of remote sensing for chlorophyll-a estimation in inland waters.}, keywords = {classification, computational sustainability, machine learning, semi-supervised learning}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{dost2022divide, title = {Divide and Imitate: Multi-Cluster Identification and Mitigation of Selection Bias}, author = {Katharina Dost and Hamish Duncanson and Ioannis Ziogas and Pat Riddle and J\"{o}rg Wicker}, url = {https://link.springer.com/chapter/10.1007/978-3-031-05936-0_12 https://github.com/KatDost/Mimic https://pypi.org/project/imitatebias}, doi = {10.1007/978-3-031-05936-0_12}, isbn = {978-3-031-05935-3}, year = {2022}, date = {2022-05-16}, urldate = {2022-05-16}, booktitle = {26th Pacific-Asia Conference on Knowledge Discovery and Data Mining (PAKDD2022)}, pages = {149-160}, publisher = {Springer-Verlag}, address = {Berlin, Heidelberg}, abstract = {Machine Learning can help overcome human biases in decision making by focusing on purely logical conclusions based on the training data. If the training data is biased, however, that bias will be transferred to the model and remains undetected as the performance is validated on a test set drawn from the same biased distribution. Existing strategies for selection bias identification and mitigation generally rely on some sort of knowledge of the bias or the ground-truth. An exception is the Imitate algorithm that assumes no knowledge but comes with a strong limitation: It can only model datasets with one normally distributed cluster per class. In this paper, we introduce a novel algorithm, Mimic, which uses Imitate as a building block but relaxes this limitation. By allowing mixtures of multivariate Gaussians, our technique is able to model multi-cluster datasets and provide solutions for a substantially wider set of problems. Experiments confirm that Mimic not only identifies potential biases in multi-cluster datasets which can be corrected early on but also improves classifier performance.}, keywords = {bias, clustering, machine learning}, pubstate = {published}, tppubtype = {inproceedings} } @misc{Kim2022, title = {SymbioLCD - Datasets}, author = {Jonathan Kim and Martin Urschler and Pat Riddle and J\"{o}rg Wicker}, url = {https://auckland.figshare.com/articles/dataset/SymbioLCD_-_Datasets/14958228}, doi = {10.17608/k6.auckland.14958228.v1}, year = {2022}, date = {2022-01-18}, howpublished = {data set}, keywords = {machine learning, SLAM}, pubstate = {published}, tppubtype = {misc} } @inproceedings{poonawala-lohani2022novel, title = {A Novel Approach for Time Series Forecasting of Influenza-like Illness Using a Regression Chain Method}, author = {Nooriyan Poonawala-Lohani and Pat Riddle and Mehnaz Adnan and J\"{o}rg Wicker}, editor = {Russ Altman and Keith Dunker and Lawrence Hunter and Marylyn Ritchie and Tiffany Murray and Teri Klein}, url = {https://www.worldscientific.com/doi/abs/10.1142/9789811250477_0028 http://psb.stanford.edu/psb-online/proceedings/psb22/poorawala-lohani.pdf}, doi = {10.1142/9789811250477_0028}, year = {2022}, date = {2022-01-03}, urldate = {2022-01-03}, booktitle = {Pacific Symposium on Biocomputing}, volume = {27}, pages = {301-312}, abstract = {Influenza is a communicable respiratory illness that can cause serious public health hazards. Due to its huge threat to the community, accurate forecasting of Influenza-like-illness (ILI) can diminish the impact of an influenza season by enabling early public health interventions. Current forecasting models are limited in their performance, particularly when using a longer forecasting window. To support better forecasts over a longer forecasting window, we propose to use additional features such as weather data. Commonly used methods to fore-cast ILI, including statistical methods such as ARIMA, limit prediction performance when using additional data sources that might have complex non-linear associations with ILI incidence. This paper proposes a novel time series forecasting method, Randomized Ensembles of Auto-regression chains (Reach). Reach implements an ensemble of random chains for multi-step time series forecasting. This new approach is evaluated on ILI case counts in Auckland, New Zealand from the years 2015-2018 and compared to other standard methods. The results demonstrate that the proposed method performed better than baseline methods when applied to this multi-variate time series forecasting problem.}, keywords = {computational sustainability, forecasting, influenza, machine learning, time series}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{kim2021symbiolcd, title = {SymbioLCD: Ensemble-Based Loop Closure Detection using CNN-Extracted Objects and Visual Bag-of-Words}, author = {Jonathan Kim and Martin Urschler and Pat Riddle and J\"{o}rg Wicker}, url = {https://ieeexplore.ieee.org/abstract/document/9636622 http://arxiv.org/abs/2110.11491}, doi = {10.1109/IROS51168.2021.9636622}, year = {2021}, date = {2021-09-27}, urldate = {2021-09-27}, booktitle = {2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}, pages = {5425-5425}, abstract = {Loop closure detection is an essential tool of Simultaneous Localization and Mapping (SLAM) to minimize drift in its localization. Many state-of-the-art loop closure detection (LCD) algorithms use visual Bag-of-Words (vBoW), which is robust against partial occlusions in a scene but cannot perceive the semantics or spatial relationships between feature points. CNN object extraction can address those issues, by providing semantic labels and spatial relationships between objects in a scene. Previous work has mainly focused on replacing vBoW with CNN derived features. In this paper we propose SymbioLCD, a novel ensemble-based LCD that utilizes both CNN-extracted objects and vBoW features for LCD candidate prediction. When used in tandem, the added elements of object semantics and spatial-awareness creates a more robust and symbiotic loop closure detection system. The proposed SymbioLCD uses scale-invariant spatial and semantic matching, Hausdorff distance with temporal constraints, and a Random Forest that utilizes combined information from both CNN-extracted objects and vBoW features for predicting accurate loop closure candidates. Evaluation of the proposed method shows it outperforms other Machine Learning (ML) algorithms - such as SVM, Decision Tree and Neural Network, and demonstrates that there is a strong symbiosis between CNN-extracted object information and vBoW features which assists accurate LCD candidate prediction. Furthermore, it is able to perceive loop closure candidates earlier than state-of-the-art SLAM algorithms, utilizing added spatial and semantic information from CNN-extracted objects.}, keywords = {machine learning, SLAM}, pubstate = {published}, tppubtype = {inproceedings} } @article{tam2021holisticb, title = {Holistic Evaluation of Biodegradation Pathway Prediction: Assessing Multi-Step Reactions and Intermediate Products}, author = {Jason Tam and Tim Lorsbach and Sebastian Schmidt and J\"{o}rg Wicker}, url = {https://jcheminf.biomedcentral.com/articles/10.1186/s13321-021-00543-x https://chemrxiv.org/articles/preprint/Holistic_Evaluation_of_Biodegradation_Pathway_Prediction_Assessing_Multi-Step_Reactions_and_Intermediate_Products/14315963 https://dx.doi.org/10.26434/chemrxiv.14315963}, doi = {10.1186/s13321-021-00543-x}, year = {2021}, date = {2021-09-03}, urldate = {2021-09-03}, journal = {Journal of Cheminformatics}, volume = {13}, number = {1}, pages = {63}, abstract = {The prediction of metabolism and biotransformation pathways of xenobiotics is a highly desired tool in environmental sciences, drug discovery, and (eco)toxicology. Several systems predict single transformation steps or complete pathways as series of parallel and subsequent steps. Their performance is commonly evaluated on the level of a single transformation step. Such an approach cannot account for some specific challenges that are caused by specific properties of biotransformation experiments. That is, missing transformation products in the reference data that occur only in low concentrations, e.g. transient intermediates or higher-generation metabolites. Furthermore, some rule-based prediction systems evaluate the performance only based on the defined set of transformation rules. Therefore, the performance of these models cannot be directly compared. In this paper, we introduce a new evaluation framework that extends the evaluation of biotransformation prediction from single transformations to whole pathways, taking into account multiple generations of metabolites. We introduce a procedure to address transient intermediates and propose a weighted scoring system that acknowledges the uncertainty of higher-generation metabolites. We implemented this framework in enviPath and demonstrate its strict performance metrics on predictions of in vitro biotransformation and degradation of xenobiotics in soil. Our approach is model-agnostic and can be transferred to other prediction systems. It is also capable of revealing knowledge gaps in terms of incompletely defined sets of transformation rules.}, keywords = {biodegradation, cheminformatics, computational sustainability, data mining, enviPath, machine learning, metabolic pathways}, pubstate = {published}, tppubtype = {article} } @article{stepisnik2021comprehensive, title = {A comprehensive comparison of molecular feature representations for use in predictive modeling}, author = {Toma\v{z} Stepi\v{s}nik and Bla\v{z} \v{S}krlj and J\"{o}rg Wicker and Dragi Kocev}, url = {http://www.sciencedirect.com/science/article/pii/S001048252030528X}, doi = {10.1016/j.compbiomed.2020.104197}, issn = {0010-4825}, year = {2021}, date = {2021-03-01}, journal = {Computers in Biology and Medicine}, volume = {130}, pages = {104197}, abstract = {Machine learning methods are commonly used for predicting molecular properties to accelerate material and drug design. An important part of this process is deciding how to represent the molecules. Typically, machine learning methods expect examples represented by vectors of values, and many methods for calculating molecular feature representations have been proposed. In this paper, we perform a comprehensive comparison of different molecular features, including traditional methods such as fingerprints and molecular descriptors, and recently proposed learnable representations based on neural networks. Feature representations are evaluated on 11 benchmark datasets, used for predicting properties and measures such as mutagenicity, melting points, activity, solubility, and IC50. Our experiments show that several molecular features work similarly well over all benchmark datasets. The ones that stand out most are Spectrophores, which give significantly worse performance than other features on most datasets. Molecular descriptors from the PaDEL library seem very well suited for predicting physical properties of molecules. Despite their simplicity, MACCS fingerprints performed very well overall. The results show that learnable representations achieve competitive performance compared to expert based representations. However, task-specific representations (graph convolutions and Weave methods) rarely offer any benefits, even though they are computationally more demanding. Lastly, combining different molecular feature representations typically does not give a noticeable improvement in performance compared to individual feature representations.}, keywords = {biodegradation, cheminformatics, computational sustainability, data mining, enviPath, machine learning, metabolic pathways, molecular feature representation, toxicity}, pubstate = {published}, tppubtype = {article} } @inproceedings{chester2020balancing, title = {Balancing Utility and Fairness against Privacy in Medical Data}, author = {Andrew Chester and Yun Sing Koh and J\"{o}rg Wicker and Quan Sun and Junjae Lee}, url = {https://ieeexplore.ieee.org/abstract/document/9308226}, doi = {10.1109/SSCI47803.2020.9308226}, year = {2020}, date = {2020-12-01}, booktitle = {IEEE Symposium Series on Computational Intelligence (SSCI)}, pages = {1226-1233}, publisher = {IEEE}, abstract = {There are numerous challenges when designing algorithms that interact with sensitive data, such as, medical or financial records. One of these challenges is privacy. However, there is a tension between privacy, utility (model accuracy), and fairness. While de-identification techniques, such as generalisation and suppression, have been proposed to enable privacy protection, it comes with a cost, specifically to fairness and utility. Recent work on fairness in algorithm design defines fairness as a guarantee of similar outputs for "similar" input data. This notion is discussed in connection to de-identification. This research investigates the trade-off between privacy, fairness, and utility. In contrast, other work investigates the trade-off between privacy and utility of the data or accuracy of the model overall. In this research, we investigate the effects of two standard de-identification techniques, k-anonymity and differential privacy, on both utility and fairness. We propose two measures to calculate the trade-off between privacy-utility and privacy-fairness. Although other research has provided guarantees for privacy regarding utility, this research focuses on the trade-offs given set de-identification levels and relies on guarantees provided by the privacy preservation methods. We discuss the effects of de-identification on data of different characteristics, class imbalance and outcome imbalance. We evaluated this is on synthetic datasets and standard real-world datasets. As a case study, we analysed the Medical Expenditure Panel Survey dataset.}, keywords = {accuracy, computational sustainability, data mining, fairness, imbalance, machine learning, medicine, privacy}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{dost2020your, title = {Your Best Guess When You Know Nothing: Identification and Mitigation of Selection Bias}, author = {Katharina Dost and Katerina Taskova and Pat Riddle and J\"{o}rg Wicker}, url = {https://ieeexplore.ieee.org/document/9338355 https://github.com/KatDost/Imitate https://pypi.org/project/imitatebias/}, doi = {10.1109/ICDM50108.2020.00115}, issn = {2374-8486}, year = {2020}, date = {2020-11-17}, urldate = {2020-11-17}, booktitle = {2020 IEEE International Conference on Data Mining (ICDM)}, pages = {996-1001}, publisher = {IEEE}, abstract = {Machine Learning typically assumes that training and test set are independently drawn from the same distribution, but this assumption is often violated in practice which creates a bias. Many attempts to identify and mitigate this bias have been proposed, but they usually rely on ground-truth information. But what if the researcher is not even aware of the bias? In contrast to prior work, this paper introduces a new method, Imitate, to identify and mitigate Selection Bias in the case that we may not know if (and where) a bias is present, and hence no ground-truth information is available. Imitate investigates the dataset\'s probability density, then adds generated points in order to smooth out the density and have it resemble a Gaussian, the most common density occurring in real-world applications. If the artificial points focus on certain areas and are not widespread, this could indicate a Selection Bias where these areas are underrepresented in the sample. We demonstrate the effectiveness of the proposed method in both, synthetic and real-world datasets. We also point out limitations and future research directions.}, keywords = {bias, data mining, fairness, machine learning}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{roeslin2020feature, title = {Feature Engineering for a Seismic Loss Prediction Model using Machine Learning, Christchurch Experience}, author = {Samuel Roeslin and Quincy Ma and Pavan Chigullapally and J\"{o}rg Wicker and Liam Wotherspoon}, url = {https://www.researchgate.net/profile/Samuel_Roeslin/publication/344503593_Feature_Engineering_for_a_Seismic_Loss_Prediction_Model_using_Machine_Learning_Christchurch_Experience/links/5f7d015a92851c14bcb36ed7/Feature-Engineering-for-a-Seismic-Loss-Prediction-Model-using-Machine-Learning-Christchurch-Experience.pdf}, year = {2020}, date = {2020-09-17}, booktitle = {17th World Conference on Earthquake Engineering}, abstract = {The city of Christchurch, New Zealand experienced four major earthquakes (MW \> 5.9) and multiple aftershocks between 4 September 2010 and 23 December 2011. This series of earthquakes, commonly known as the Canterbury Earthquake Sequence (CES), induced over NZ$40 billion in total economic losses. Liquefaction alone led to building damage in 51,000 of the 140,000 residential buildings, with around 15,000 houses left unpractical to repair. Widespread damage to residential buildings highlighted the need for improved seismic prediction tools and to better understand factors influencing damage. Fortunately, due to New Zealand unique insurance setting, up to 80% of the losses were insured. Over the entire CES, insurers received more than 650,000 claims. This research project employs multi-disciplinary empirical data gathered during and prior to the CES to develop a seismic loss prediction model for residential buildings in Christchurch using machine learning. The intent is to develop a procedure for developing insights from post-earthquake data that is subjected to continuous updating, to enable identification of critical parameters affecting losses, and to apply such a model to establish priority building stock for risk mitigation measures. The following paper describes the complex data preparation process required for the application of machine learning techniques. The paper covers the production of a merged dataset with information from the Earthquake Commission (EQC) claim database, building characteristics from RiskScape, seismic demand interpolated from GeoNet strong motion records, liquefaction occurrence from the New Zealand Geotechnical Database (NZGD) and soil conditions from Land Resource Information Systems (LRIS).}, keywords = {computational sustainability, data mining, earthquakes, machine learning}, pubstate = {published}, tppubtype = {inproceedings} } @article{roeslin2020machine, title = {A machine learning damage prediction model for the 2017 Puebla-Morelos, Mexico, earthquake}, author = {Samuel Roeslin and Quincy Ma and Hugon Ju\'{a}rez-Garcia and Alonso G\'{o}mez-Bernal and J\"{o}rg Wicker and Liam Wotherspoon}, doi = {https://doi.org/10.1177/8755293020936714}, year = {2020}, date = {2020-07-30}, journal = {Earthquake Spectra}, volume = {36}, number = {2}, pages = {314-339}, abstract = {The 2017 Puebla, Mexico, earthquake event led to significant damage in many buildings in Mexico City. In the months following the earthquake, civil engineering students conducted detailed building assessments throughout the city. They collected building damage information and structural characteristics for 340 buildings in the Mexico City urban area, with an emphasis on the Roma and Condesa neighborhoods where they assessed 237 buildings. These neighborhoods are of particular interest due to the availability of seismic records captured by nearby recording stations, and preexisting information from when the neighborhoods were affected by the 1985 Michoac\'{a}n earthquake. This article presents a case study on developing a damage prediction model using machine learning. It details a framework suitable for working with future post-earthquake observation data. Four algorithms able to perform classification tasks were trialed. Random forest, the best performing algorithm, achieves more than 65% prediction accuracy. The study of the feature importance for the random forest shows that the building location, seismic demand, and building height are the parameters that influence the model output the most.}, keywords = {computational sustainability, data mining, earthquakes, machine learning}, pubstate = {published}, tppubtype = {article} } @article{Cheng2020NHC, title = {NHC-gold compounds mediate immune suppression through induction of AHR-TGFβ1 signalling in vitro and in scurfy mice}, author = {Xinlai Cheng and Stefanie Haeberle and Iart Luca Shytaj and Rodrigo A. Gama-Brambila and Jannick Theobald and Shahrouz Ghafoory and Jessica W\"{o}lker and Uttara Basu and Claudia Schmidt and Annika Timm and Katerina Ta\v{s}kova and Andrea S. Bauer and J\"{o}rg Hoheisel and Nikolaos Tsopoulidis and Oliver T. Fackler and Andrea Savarino and Miguel A. Andrade-Navarro and Ingo Ott and Marina Lusic and Eva N. Hadaschik and Stefan W\"{o}lfl }, url = {https://www.nature.com/articles/s42003-019-0716-8}, doi = {10.1038/s42003-019-0716-8}, issn = {2399-3642}, year = {2020}, date = {2020-01-03}, urldate = {2020-01-03}, journal = {Communications Biology}, volume = {3}, pages = {2399-3642}, abstract = {Gold compounds have a long history of use as immunosuppressants, but their precise mechanism of action is not completely understood. Using our recently developed liver-on-a-chip platform we now show that gold compounds containing planar N-heterocyclic carbene (NHC) ligands are potent ligands for the aryl hydrocarbon receptor (AHR). Further studies showed that the lead compound (MC3) activates TGFβ1 signaling and suppresses CD4+ T-cell activation in vitro, in human and mouse T cells. Conversely, genetic knockdown or chemical inhibition of AHR activity or of TGFβ1-SMAD-mediated signaling offsets the MC3-mediated immunosuppression. In scurfy mice, a mouse model of human immunodysregulation polyendocrinopathy enteropathy X-linked syndrome, MC3 treatment reduced autoimmune phenotypes and extended lifespan from 24 to 58 days. Our findings suggest that the immunosuppressive activity of gold compounds can be improved by introducing planar NHC ligands to activate the AHR-associated immunosuppressive pathway, thus expanding their potential clinical application for autoimmune diseases.}, keywords = {bioinformatics}, pubstate = {published}, tppubtype = {article} } @inproceedings{wicker2019xor, title = {XOR-based Boolean Matrix Decomposition}, author = {J\"{o}rg Wicker and Yan Cathy Hua and Rayner Rebello and Bernhard Pfahringer}, editor = {Jianyong Wang and Kyuseok Shim and Xindong Wu}, url = {https://ieeexplore.ieee.org/document/8970951}, doi = {10.1109/ICDM.2019.00074}, isbn = {978-1-7281-4604-1}, year = {2019}, date = {2019-11-08}, booktitle = {2019 IEEE International Conference on Data Mining (ICDM)}, pages = {638-647}, publisher = {IEEE}, abstract = {Boolean matrix factorization (BMF) is a data summarizing and dimension-reduction technique. Existing BMF methods build on matrix properties defined by Boolean algebra, where the addition operator is the logical inclusive OR and the multiplication operator the logical AND. As a consequence, this leads to the lack of an additive inverse in all Boolean matrix operations, which produces an indelible type of approximation error. Previous research adopted various methods to address such an issue and produced reasonably accurate approximation. However, an exact factorization is rarely found in the literature. In this paper, we introduce a new algorithm named XBMaD (Xor-based Boolean Matrix Decomposition) where the addition operator is defined as the exclusive OR (XOR). This change completely removes the error-mitigation issue of OR-based BMF methods, and allows for an exact error-free factorization. An evaluation comparing XBMaD and classic OR-based methods suggested that XBMAD performed equal or in most cases more accurately and faster.}, keywords = {Boolean matrix decomposition, data mining}, pubstate = {published}, tppubtype = {inproceedings} } @article{Jonauskaite2019, title = {A machine learning approach to quantifying the specificity of color-emotion associations and their cultural differences}, author = {Domicele Jonauskaite and J\"{o}rg Wicker and Chrisine Mohr and Nele Dael and Jelena Havelka and Marietta Papadatou-Pastou and Meng Zhang and Daniel Oberfeld}, editor = {Andrew Dunn}, url = {https://royalsocietypublishing.org/doi/10.1098/rsos.190741 https://doi.org/10.1098/rsos.190741}, doi = {10.1098/rsos.190741}, year = {2019}, date = {2019-09-25}, journal = {Royal Society Open Science}, volume = {6}, number = {9}, pages = {190741}, abstract = {The link between colour and emotion and its possible similarity across cultures are questions that have not been fully resolved. Online, 711 participants from China, Germany, Greece and the UK associated 12 colour terms with 20 discrete emotion terms in their native languages. We propose a machine learning approach to quantify (a) the consistency and specificity of colour-emotion associations and (b) the degree to which they are country-specific, on the basis of the accuracy of a statistical classifier in (a) decoding the colour term evaluated on a given trial from the 20 ratings of colour-emotion associations and (b) predicting the country of origin from the 240 individual colour-emotion associations, respectively. The classifier accuracies were significantly above chance level, demonstrating that emotion associations are to some extent colour-specific and that colour-emotion associations are to some extent country-specific. A second measure of country-specificity, the in-group advantage of the colour-decoding accuracy, was detectable but relatively small (6.1%), indicating that colour-emotion associations are both universal and culture-specific. Our results show that machine learning is a promising tool when analysing complex datasets from emotion research.}, keywords = {emotion, machine learning, psychology}, pubstate = {published}, tppubtype = {article} } @inproceedings{roeslin2019data, title = {Data integration for the development of a seismic loss prediction model for residential buildings in New Zealand}, author = {Samuel Roeslin and Quincy Ma and J\"{o}rg Wicker and Liam Wotherspoon}, editor = {Peggy Cellier and Kurt Driessens}, url = {https://link.springer.com/chapter/10.1007/978-3-030-43887-6_8}, doi = {10.1007/978-3-030-43887-6_8}, isbn = {978-3-030-43887-6}, year = {2019}, date = {2019-09-19}, booktitle = {Machine Learning and Knowledge Discovery in Databases}, pages = {88-100}, publisher = {Springer International Publishing}, address = {Cham}, abstract = {In 2010--2011, New Zealand experienced the most damaging earthquakes in its history. It led to extensive damage to Christchurch buildings, infrastructure and its surroundings; affecting commercial and residential buildings. The direct economic losses represented 20% of New Zealand's GDP in 2011. Owing to New Zealand's particular insurance structure, the insurance sector contributed to over 80% of losses for a total of more than NZ$31 billion. Amongst this, over NZ$11 billion of the losses arose from residential building claims and were covered either partially or entirely from the NZ government backed Earthquake Commission (EQC) cover insurance scheme. In the process of resolving the claims, EQC collected detailed financial loss data, post-event observations and building characteristics for each of the approximately 434,000 claims lodged following the Canterbury Earthquake sequence (CES). Added to this, the active NZ earthquake engineering community treated the event as a large scale outdoor experiment and collected extensive data on the ground shaking levels, soil conditions, and liquefaction occurrence throughout wider Christchurch. This paper discusses the necessary data preparation process preceding the development of a machine learning seismic loss model. The process draws heavily upon using Geographic Information System (GIS) techniques to aggregate relevant information from multiple databases interpolating data between categories and converting data between continuous and categorical forms. Subsequently, the database is processed, and a residential seismic loss prediction model is developed using machine learning. The aim is to develop a `grey-box' model enabling human interpretability of the decision steps.}, keywords = {computational sustainability, earthquakes}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{williams2019what, title = {What can we learn from the air chemistry of crowds?}, author = {Jonathan Williams and Christof St\"{o}nner and Achim Edtbauer and Bettina Derstorff and Efstratios Bourtsoukidis and Thomas Kl\"{u}pfel and Nicolas Krauter and J\"{o}rg Wicker and Stefan Kramer}, editor = {Armin Hansel and J\"{u}rgen Dunkl}, url = {https://www.ionicon.com/sites/default/files/uploads/doc/Contributions_8th-PTR-MS-Conference-2019_web.pdf#page=122}, year = {2019}, date = {2019-05-10}, booktitle = {8th International Conference on Proton Transfer Reaction Mass Spectrometry and its Applications}, pages = {121-123}, publisher = {Innsbruck University Press}, address = {Innsbruck}, abstract = {Current PTR-MS technology allows hundreds of volatile trace gases in air to be measured every second at extremely low levels (parts per trillion). These instruments are often used in atmospheric research on planes and ships and even in the Amazon rainforest. Recently, we have used this technology to examine air composition changes caused by large groups of people (10,000-30,000) under real world conditions at a football match and in a movie theater. In both cases the trace gas signatures measured in ambient air are shown to reflect crowd behavior. By applying advanced data mining techniques we have shown that groups of people reproducibly respond to certain emotional stimuli (e.g. suspense and comedy) by exhaling specific trace gases. Furthermore, we explore whether this information can be used to determine the age classification of films.}, keywords = {atmospheric chemistry, breath analysis, cheminformatics, cinema data mining, data mining, emotional response analysis, machine learning, movie analysis, smell of fear, sof, time series}, pubstate = {published}, tppubtype = {inproceedings} } @article{Dabiri2019Imidazopyridines, title = {Imidazopyridines as Potent KDM5 Demethylase Inhibitors Promoting Reprogramming Efficiency of Human iPSCs}, author = {Yasamin Dabiri and Rodrigo A. Gama-Brambila and Katerina Taskova and Kristina Herold and Stefanie Reuter and James Adjaye and Jochen Utikal and Ralf Mrowka and Jichang Wang and Miguel A. Andrade-Navarro and Xinlai Cheng}, url = {https://www.sciencedirect.com/science/article/pii/S2589004219300124}, doi = {https://doi.org/10.1016/j.isci.2019.01.012}, issn = {2589-0042}, year = {2019}, date = {2019-01-01}, urldate = {2019-01-01}, journal = {iScience}, volume = {12}, pages = {168-181}, abstract = {Summary Pioneering human induced pluripotent stem cell (iPSC)-based pre-clinical studies have raised safety concerns and pinpointed the need for safer and more efficient approaches to generate and maintain patient-specific iPSCs. One approach is searching for compounds that influence pluripotent stem cell reprogramming using functional screens of known drugs. Our high-throughput screening of drug-like hits showed that imidazopyridines\textemdashanalogs of zolpidem, a sedative-hypnotic drug\textemdashare able to improve reprogramming efficiency and facilitate reprogramming of resistant human primary fibroblasts. The lead compound (O4I3) showed a remarkable OCT4 induction, which at least in part is due to the inhibition of H3K4 demethylase (KDM5, also known as JARID1). Experiments demonstrated that KDM5A, but not its homolog KDM5B, serves as a reprogramming barrier by interfering with the enrichment of H3K4Me3 at the OCT4 promoter. Thus our results introduce a new class of KDM5 chemical inhibitors and provide further insight into the pluripotency-related properties of KDM5 family members.}, keywords = {Biochemistry, Biological Sciences, Molecular Biology}, pubstate = {published}, tppubtype = {article} } @article{10.1371/journal.pone.0210467, title = {Literature optimized integration of gene expression for organ-specific evaluation of toxicogenomics datasets}, author = {Katerina Ta\v{s}kova and Jean-Fred Fontaine and Ralf Mrowka and Miguel A. Andrade-Navarro}, url = {https://doi.org/10.1371/journal.pone.0210467}, doi = {10.1371/journal.pone.0210467}, year = {2019}, date = {2019-01-01}, urldate = {2019-01-01}, journal = {PLOS ONE}, volume = {14}, number = {1}, pages = {1-21}, publisher = {Public Library of Science}, abstract = {The study of drug toxicity in human organs is complicated by their complex inter-relations and by the obvious difficulty to testing drug effects on biologically relevant material. Animal models and human cell cultures offer alternatives for systematic and large-scale profiling of drug effects on gene expression level, as typically found in the so-called toxicogenomics datasets. However, the complexity of these data, which includes variable drug doses, time points, and experimental setups, makes it difficult to choose and integrate the data, and to evaluate the appropriateness of one or another model system to study drug toxicity (of particular drugs) of particular human organs. Here, we define a protocol to integrate drug-wise rankings of gene expression changes in toxicogenomics data, which we apply to the TG-GATEs dataset, to prioritize genes for association to drug toxicity in liver or kidney. Contrast of the results with sets of known human genes associated to drug toxicity in the literature allows to compare different rank aggregation approaches for the task at hand. Collectively, ranks from multiple models point to genes not previously associated to toxicity, notably, the PCNA clamp associated factor (PCLAF), and genes regulated by the master regulator of the antioxidant response NFE2L2, such as NQO1 and SRXN1. In addition, comparing gene ranks from different models allowed us to evaluate striking differences in terms of toxicity-associated genes between human and rat hepatocytes or between rat liver and rat hepatocytes. We interpret these results to point to the different molecular functions associated to organ toxicity that are best described by each model. We conclude that the expected production of toxicogenomics panels with larger numbers of drugs and models, in combination with the ongoing increase of the experimental literature in organ toxicity, will lead to increasingly better associations of genes for organism toxicity.}, keywords = {bioinformatics}, pubstate = {published}, tppubtype = {article} } @article{Stonner2018, title = {Proof of concept study: Testing human volatile organic compounds as tools for age classification of films}, author = {Christof St\"{o}nner and Achim Edtbauer and Bettina Derstorff and Efstratios Bourtsoukidis and Thomas Kl\"{u}pfel and J\"{o}rg Wicker and Jonathan Williams}, doi = {10.1371/journal.pone.0203044}, year = {2018}, date = {2018-10-11}, journal = {PLOS One}, volume = {13}, number = {10}, pages = {1-14}, publisher = {Public Library of Science}, abstract = {Humans emit numerous volatile organic compounds (VOCs) through breath and skin. The nature and rate of these emissions are affected by various factors including emotional state. Previous measurements of VOCs and CO2 in a cinema have shown that certain chemicals are reproducibly emitted by audiences reacting to events in a particular film. Using data from films with various age classifications, we have studied the relationship between the emission of multiple VOCs and CO2 and the age classifier (0, 6, 12, and 16) with a view to developing a new chemically based and objective film classification method. We apply a random forest model built with time independent features extracted from the time series of every measured compound, and test predictive capability on subsets of all data. It was found that most compounds were not able to predict all age classifiers reliably, likely reflecting the fact that current classification is based on perceived sensibilities to many factors (e.g. incidences of violence, sex, antisocial behaviour, drug use, and bad language) rather than the visceral biological responses expressed in the data. However, promising results were found for isoprene which reliably predicted 0, 6 and 12 age classifiers for a variety of film genres and audience age groups. Therefore, isoprene emission per person might in future be a valuable aid to national classification boards, or even offer an alternative, objective, metric for rating films based on the reactions of large groups of people.}, keywords = {atmospheric chemistry, breath analysis, cheminformatics, cinema data mining, data mining, emotional response analysis, machine learning, movie analysis, smell of fear, sof, time series}, pubstate = {published}, tppubtype = {article} } @conference{St\"{o}nner2018investigating, title = {Investigating human emissions of volatile organic compounds in a cinema, flux rates, links to scene content, and possible applications}, author = {Christof St\"{o}nner and Achim Edtbauer and Bettina Derstorff and Efstratios Bourtsoukidis and Thomas Kl\"{u}pfel and J\"{o}rg Wicker and Jonathan Williams}, isbn = {978-171382651-4}, year = {2018}, date = {2018-07-22}, booktitle = {15th Conference of the International Society of Indoor Air Quality and Climate, INDOOR AIR 2018}, publisher = {International Society of Indoor Air Quality and Climate}, abstract = {Humans emit numerous volatile organic compounds (VOCs) into the air via skin and breath. These emissions can depend on various factors such as nutrition, sporting activity and also the emotional state. It is shown that the emission rates of the main endogenous breath gases like CO2, acetone and isoprene are generally lower for children than for adults. In contrast, VOCs from exogenous sources strongly vary over the course of day. Interestingly, small scale variances in emission rates were found to occur reproducibly over multiple screenings of the same film. The peaks occurring in the time series of a compound during the screening of the film were induced by the physiological response of the audience to audio-visual stimuli. Additionally, the question whether this chemical reaction of the audience can be used for the prediction of age classification of films is addressed.}, keywords = {atmospheric chemistry, cheminformatics, cinema data mining, sof}, pubstate = {published}, tppubtype = {conference} } @article{Theobald2018Liver, title = {Liver-Kidney-on-Chip To Study Toxicity of Drug Metabolites}, author = {Jannick Theobald and Ali Ghanem and Patrick Wallisch and Amin A. Banaeiyan and Miguel A. Andrade-Navarro and Katerina Ta\v{s}kova and Manuela Haltmeier and Andreas Kurtz and Holger Becker and Stefanie Reuter and Ralf Mrowka and Xinlai Cheng and Stefan W\"{o}lfl}, url = {https://doi.org/10.1021/acsbiomaterials.7b00417}, doi = {10.1021/acsbiomaterials.7b00417}, year = {2018}, date = {2018-01-01}, urldate = {2018-01-01}, journal = {ACS Biomaterials Science \& Engineering}, volume = {4}, number = {1}, pages = {78-89}, note = {PMID: 33418680}, keywords = {bioinformatics}, pubstate = {published}, tppubtype = {article} } @article{Mah250431, title = {Evaluating Cell Identity from Transcription Profiles}, author = {Nancy Mah and Katerina Taskova and Khadija El Amrani and Krithika Hariharan and Andreas Kurtz and Miguel A. Andrade-Navarro}, url = {https://www.biorxiv.org/content/early/2018/01/19/250431}, doi = {10.1101/250431}, year = {2018}, date = {2018-01-01}, urldate = {2018-01-01}, journal = {bioRxiv}, publisher = {Cold Spring Harbor Laboratory}, abstract = {Induced pluripotent stem cells (iPS) and direct lineage programming offer promising autologous and patient-specific sources of cells for personalized drug-testing and cell-based therapy. Before these engineered cells can be widely used, it is important to evaluate how well the engineered cell types resemble their intended target cell types. We have developed a method to generate CellScore, a cell identity score that can be used to evaluate the success of an engineered cell type in relation to both its initial and desired target cell type, which are used as references. Of 20 cell transitions tested, the most successful transitions were the iPS cells (CellScore \> 0.9), while other transitions (e.g. induced hepatocytes or motor neurons) indicated incomplete transitions (CellScore \< 0.5). In principle, the method can be applied to any engineered cell undergoing a cell transition, where transcription profiles are available for the reference cell types and the engineered cell type.HighlightsA curated standard dataset of transcription profiles from normal cell types was created.CellScore evaluates the cell identity of engineered cell types, using the curated dataset.CellScore considers the initial and desired target cell type.CellScore identifies the most successfully engineered clones for further functional testing.}, keywords = {bioinformatics}, pubstate = {published}, tppubtype = {article} } @article{TASKOVA201857, title = {Evaluation of in vivo and in vitro models of toxicity by comparison of toxicogenomics data with the literature}, author = {Katerina Ta\v{s}kova and Jean-Fred Fontaine and Ralf Mrowka and Miguel A. Andrade-Navarro}, url = {https://www.sciencedirect.com/science/article/pii/S1046202317300543}, doi = {https://doi.org/10.1016/j.ymeth.2017.07.010}, issn = {1046-2023}, year = {2018}, date = {2018-01-01}, journal = {Methods}, volume = {132}, pages = {57-65}, abstract = {Toxicity affecting humans is studied by observing the effects of chemical substances in animal organisms (in vivo) or in animal and human cultivated cell lines (in vitro). Toxicogenomics studies collect gene expression profiles and histopathology assessment data for hundreds of drugs and pollutants in standardized experimental designs using different model systems. These data are an invaluable source for analyzing genome-wide drug response in biological systems. However, a problem remains that is how to evaluate the suitability of heterogeneous in vitro and in vivo systems to model the many different aspects of human toxicity. We propose here that a given model system (cell type or animal organ) is supported to appropriately describe a particular aspect of human toxicity if the set of compounds associated in the literature with that aspect of toxicity causes a change in expression of genes with a particular function in the tested model system. This approach provides candidate genes to explain the toxicity effect (the differentially expressed genes) and the compounds whose effect could be modeled (the ones producing both the change of expression in the model system and that are associated with the human phenotype in the literature). Here we present an application of this approach using a computational pipeline that integrates compound-induced gene expression profiles (from the Open TG-GATEs database) and biomedical literature annotations (from the PubMed database) to evaluate the suitability of (human and rat) in vitro systems as well as rat in vivo systems to model human toxicity.}, note = {Comparison and Visualization Methods for High-Dimensional Biological Data}, keywords = {Differential expression analysis, Functional enrichment analysis, Literature co-occurrence analysis, model systems, Toxicogenomics data integration}, pubstate = {published}, tppubtype = {article} } @article{wicker2017best, title = {The Best Privacy Defense is a Good Privacy Offense: Obfuscating a Search Engine User's Profile}, author = {J\"{o}rg Wicker and Stefan Kramer}, editor = {Kurt Driessens and Dragi Kocev and Marko Robnik-\v{S}ikonja and Myra Spiliopoulou}, url = {http://rdcu.be/tL0U}, doi = {10.1007/s10618-017-0524-z}, issn = {1573-756X}, year = {2017}, date = {2017-09-01}, journal = {Data Mining and Knowledge Discovery}, volume = {31}, number = {5}, pages = {1419-1443}, abstract = {User privacy on the internet is an important and unsolved problem. So far, no sufficient and comprehensive solution has been proposed that helps a user to protect his or her privacy while using the internet. Data are collected and assembled by numerous service providers. Solutions so far focused on the side of the service providers to store encrypted or transformed data that can be still used for analysis. This has a major flaw, as it relies on the service providers to do this. The user has no chance of actively protecting his or her privacy. In this work, we suggest a new approach, empowering the user to take advantage of the same tool the other side has, namely data mining to produce data which obfuscates the user’s profile. We apply this approach to search engine queries and use feedback of the search engines in terms of personalized advertisements in an algorithm similar to reinforcement learning to generate new queries potentially confusing the search engine. We evaluated the approach using a real-world data set. While evaluation is hard, we achieve results that indicate that it is possible to influence the user’s profile that the search engine generates. This shows that it is feasible to defend a user’s privacy from a new and more practical perspective.}, keywords = {adversarial learning, machine learning, personalized ads, privacy, reinforcement learning, search engines}, pubstate = {published}, tppubtype = {article} } @article{latino2017eawag, title = {Eawag-Soil in enviPath: a new resource for exploring regulatory pesticide soil biodegradation pathways and half-life data}, author = {Diogo Latino and J\"{o}rg Wicker and Martin G\"{u}tlein and Emanuel Schmid and Stefan Kramer and Kathrin Fenner}, doi = {10.1039/C6EM00697C}, year = {2017}, date = {2017-01-01}, journal = {Environmental Science: Process \& Impact}, publisher = {The Royal Society of Chemistry}, abstract = {Developing models for the prediction of microbial biotransformation pathways and half-lives of trace organic contaminants in different environments requires as training data easily accessible and sufficiently large collections of respective biotransformation data that are annotated with metadata on study conditions. Here, we present the Eawag-Soil package, a public database that has been developed to contain all freely accessible regulatory data on pesticide degradation in laboratory soil simulation studies for pesticides registered in the EU (282 degradation pathways, 1535 reactions, 1619 compounds and 4716 biotransformation half-life values with corresponding metadata on study conditions). We provide a thorough description of this novel data resource, and discuss important features of the pesticide soil degradation data that are relevant for model development. Most notably, the variability of half-life values for individual compounds is large and only about one order of magnitude lower than the entire range of median half-life values spanned by all compounds, demonstrating the need to consider study conditions in the development of more accurate models for biotransformation prediction. We further show how the data can be used to find missing rules relevant for predicting soil biotransformation pathways. From this analysis, eight examples of reaction types were presented that should trigger the formulation of new biotransformation rules, e.g., Ar-OH methylation, or the extension of existing rules e.g., hydroxylation in aliphatic rings. The data were also used to exemplarily explore the dependence of half-lives of different amide pesticides on chemical class and experimental parameters. This analysis highlighted the value of considering initial transformation reactions for the development of meaningful quantitative-structure biotransformation relationships (QSBR), which is a novel opportunity of f ered by the simultaneous encoding of transformation reactions and corresponding half-lives in Eawag-Soil. Overall, Eawag-Soil provides an unprecedentedly rich collection of manually extracted and curated biotransformation data, which should be useful in a great variety of applications.}, keywords = {biodegradation, cheminformatics, computational sustainability, data mining, enviPath, multi-label classification, REST, web services}, pubstate = {published}, tppubtype = {article} } @incollection{wicker2016ahybrid, title = {A Hybrid Machine Learning and Knowledge Based Approach to Limit Combinatorial Explosion in Biodegradation Prediction}, author = {J\"{o}rg Wicker and Kathrin Fenner and Stefan Kramer}, editor = {J\"{o}rg L\"{a}ssig and Kristian Kersting and Katharina Morik}, url = {http://dx.doi.org/10.1007/978-3-319-31858-5_5}, doi = {10.1007/978-3-319-31858-5_5}, isbn = {978-3-319-31858-5}, year = {2016}, date = {2016-04-21}, booktitle = {Computational Sustainability}, pages = {75-97}, publisher = {Springer International Publishing}, address = {Cham}, abstract = {One of the main tasks in chemical industry regarding the sustainability of a product is the prediction of its environmental fate, i.e., its degradation products and pathways. Current methods for the prediction of biodegradation products and pathways of organic environmental pollutants either do not take into account domain knowledge or do not provide probability estimates. In this chapter, we propose a hybrid knowledge-based and machine learning-based approach to overcome these limitations in the context of the University of Minnesota Pathway Prediction System (UM-PPS). The proposed solution performs relative reasoning in a machine learning framework, and obtains one probability estimate for each biotransformation rule of the system. Since the application of a rule then depends on a threshold for the probability estimate, the trade-off between recall (sensitivity) and precision (selectivity) can be addressed and leveraged in practice. Results from leave-one-out cross-validation show that a recall and precision of approximately 0.8 can be achieved for a subset of 13 transformation rules. The set of used rules is further extended using multi-label classification, where dependencies among the transformation rules are exploited to improve the predictions. While the results regarding recall and precision vary, the area under the ROC curve can be improved using multi-label classification. Therefore, it is possible to optimize precision without compromising recall. Recently, we integrated the presented approach into enviPath, a complete redesign and re-implementation of UM-PPS.}, keywords = {biodegradation, cheminformatics, computational sustainability, enviPath, machine learning, metabolic pathways, multi-label classification}, pubstate = {published}, tppubtype = {incollection} } @inproceedings{wicker2016nonlinear, title = {A Nonlinear Label Compression and Transformation Method for Multi-Label Classification using Autoencoders}, author = {J\"{o}rg Wicker and Andrey Tyukin and Stefan Kramer}, editor = {James Bailey and Latifur Khan and Takashi Washio and Gill Dobbie and Zhexue Joshua Huang and Ruili Wang}, url = {http://dx.doi.org/10.1007/978-3-319-31753-3_27}, doi = {10.1007/978-3-319-31753-3_27}, isbn = {978-3-319-31753-3}, year = {2016}, date = {2016-04-16}, booktitle = {The 20th Pacific Asia Conference on Knowledge Discovery and Data Mining (PAKDD)}, volume = {9651}, pages = {328-340}, publisher = {Springer International Publishing}, address = {Switzerland}, series = {Lecture Notes in Computer Science}, abstract = {Multi-label classification targets the prediction of multiple interdependent and non-exclusive binary target variables. Transformation-based algorithms transform the data set such that regular single-label algorithms can be applied to the problem. A special type of transformation-based classifiers are label compression methods, that compress the labels and then mostly use single label classifiers to predict the compressed labels. So far, there are no compression-based algorithms follow a problem transformation approach and address non-linear dependencies in the labels. In this paper, we propose a new algorithm, called Maniac (Multi-lAbel classificatioN usIng AutoenCoders), which extracts the non-linear dependencies by compressing the labels using autoencoders. We adapt the training process of autoencoders in a way to make them more suitable for a parameter optimization in the context of this algorithm. The method is evaluated on eight standard multi-label data sets. Experiments show that despite not producing a good ranking, Maniac generates a particularly good bipartition of the labels into positives and negatives. This is caused by rather strong predictions with either really high or low probability. Additionally, the algorithm seems to perform better given more labels and a higher label cardinality in the data set.}, keywords = {autoencoders, label compression, machine learning, multi-label classification}, pubstate = {published}, tppubtype = {inproceedings} } @article{wicker2016envipath, title = {enviPath - The Environmental Contaminant Biotransformation Pathway Resource}, author = {J\"{o}rg Wicker and Tim Lorsbach and Martin G\"{u}tlein and Emanuel Schmid and Diogo Latino and Stefan Kramer and Kathrin Fenner}, editor = {Michael Galperin}, url = {http://nar.oxfordjournals.org/content/44/D1/D502.abstract}, doi = {10.1093/nar/gkv1229}, year = {2016}, date = {2016-01-01}, journal = {Nucleic Acid Research}, volume = {44}, number = {D1}, pages = {D502-D508}, abstract = {The University of Minnesota Biocatalysis/Biodegradation Database and Pathway Prediction System (UM-BBD/PPS) has been a unique resource covering microbial biotransformation pathways of primarily xenobiotic chemicals for over 15 years. This paper introduces the successor system, enviPath (The Environmental Contaminant Biotransformation Pathway Resource), which is a complete redesign and reimplementation of UM-BBD/PPS. enviPath uses the database from the UM-BBD/PPS as a basis, extends the use of this database, and allows users to include their own data to support multiple use cases. Relative reasoning is supported for the refinement of predictions and to allow its extensions in terms of previously published, but not implemented machine learning models. User access is simplified by providing a REST API that simplifies the inclusion of enviPath into existing workflows. An RDF database is used to enable simple integration with other databases. enviPath is publicly available at https://envipath.org with free and open access to its core data.}, keywords = {biodegradation, cheminformatics, computational sustainability, data mining, enviPath, linked data, machine learning, metabolic pathways, multi-label classification}, pubstate = {published}, tppubtype = {article} } @inproceedings{raza2016trading, title = {Trading Off Accuracy for Efficiency by Randomized Greedy Warping}, author = {Atif Raza and J\"{o}rg Wicker and Stefan Kramer}, url = {https://wicker.nz/nwp-acm/authorize.php?id=N10030 http://doi.acm.org/10.1145/2851613.2851651}, doi = {10.1145/2851613.2851651}, isbn = {978-1-4503-3739-7}, year = {2016}, date = {2016-01-01}, booktitle = {Proceedings of the 31st Annual ACM Symposium on Applied Computing}, pages = {883-890}, publisher = {ACM}, address = {New York, NY, USA}, series = {SAC '16}, abstract = {Dynamic Time Warping (DTW) is a widely used distance measure for time series data mining. Its quadratic complexity requires the application of various techniques (e.g. warping constraints, lower-bounds) for deployment in real-time scenarios. In this paper we propose a randomized greedy warping algorithm for f i nding similarity between time series instances.We show that the proposed algorithm outperforms the simple greedy approach and also provides very good time series similarity approximation consistently, as compared to DTW. We show that the Randomized Time Warping (RTW) can be used in place of DTW as a fast similarity approximation technique by trading some classification accuracy for very fast classification.}, keywords = {data mining, dynamic time warping, time series}, pubstate = {published}, tppubtype = {inproceedings} } @article{williams2015element, title = {Cinema audiences reproducibly vary the chemical composition of air during films, by broadcasting scene specific emissions on breath}, author = {Jonathan Williams and Christof St\"{o}nner and J\"{o}rg Wicker and Nicolas Krauter and Bettina Derstorff and Efstratios Bourtsoukidis and Thomas Kl\"{u}pfel and Stefan Kramer}, url = {http://www.nature.com/articles/srep25464}, doi = {10.1038/srep25464}, year = {2016}, date = {2016-01-01}, urldate = {2016-01-01}, journal = {Scientific Reports}, volume = {6}, publisher = {Nature Publishing Group}, abstract = {Human beings continuously emit chemicals into the air by breath and through the skin. In order to determine whether these emissions vary predictably in response to audiovisual stimuli, we have continuously monitored carbon dioxide and over one hundred volatile organic compounds in a cinema. It was found that many airborne chemicals in cinema air varied distinctively and reproducibly with time for a particular film, even in different screenings to different audiences. Application of scene labels and advanced data mining methods revealed that specific film events, namely "suspense" or "comedy" caused audiences to change their emission of specific chemicals. These event-type synchronous, broadcasted human chemosignals open the possibility for objective and non-invasive assessment of a human group response to stimuli by continuous measurement of chemicals in air. Such methods can be applied to research fields such as psychology and biology, and be valuable to industries such as film making and advertising.}, keywords = {atmospheric chemistry, causality, cheminformatics, data mining, emotional response analysis, smell of fear, sof, time series}, pubstate = {published}, tppubtype = {article} } @inproceedings{wicker2015cinema, title = {Cinema Data Mining: The Smell of Fear}, author = {J\"{o}rg Wicker and Nicolas Krauter and Bettina Derstorff and Christof St\"{o}nner and Efstratios Bourtsoukidis and Thomas Kl\"{u}pfel and Jonathan Williams and Stefan Kramer}, url = {https://wicker.nz/nwp-acm/authorize.php?id=N10031 http://doi.acm.org/10.1145/2783258.2783404}, doi = {10.1145/2783258.2783404}, isbn = {978-1-4503-3664-2}, year = {2015}, date = {2015-01-01}, booktitle = {Proceedings of the 21st ACM SIGKDD International Conference on Knowledge Discovery and Data Mining}, pages = {1235-1304}, publisher = {ACM}, address = {New York, NY, USA}, organization = {ACM}, series = {KDD '15}, abstract = {While the physiological response of humans to emotional events or stimuli is well-investigated for many modalities (like EEG, skin resistance, ...), surprisingly little is known about the exhalation of so-called Volatile Organic Compounds (VOCs) at quite low concentrations in response to such stimuli. VOCs are molecules of relatively small mass that quickly evaporate or sublimate and can be detected in the air that surrounds us. The paper introduces a new field of application for data mining, where trace gas responses of people reacting on-line to films shown in cinemas (or movie theaters) are related to the semantic content of the films themselves. To do so, we measured the VOCs from a movie theatre over a whole month in intervals of thirty seconds, and annotated the screened films by a controlled vocabulary compiled from multiple sources. To gain a better understanding of the data and to reveal unknown relationships, we have built prediction models for so-called forward prediction (the prediction of future VOCs from the past), backward prediction (the prediction of past scene labels from future VOCs) and for some forms of abductive reasoning and Granger causality. Experimental results show that some VOCs and some labels can be predicted with relatively low error, and that hints for causality with low p-values can be detected in the data.}, keywords = {atmospheric chemistry, breath analysis, causality, cheminformatics, cinema data mining, data mining, emotional response analysis, movie analysis, smell of fear, sof, time series}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{tyukin2015scavenger, title = {Scavenger - A Framework for the Efficient Evaluation of Dynamic and Modular Algorithms}, author = {Andrey Tyukin and Stefan Kramer and J\"{o}rg Wicker}, editor = {Albert Bifet and Michael May and Bianca Zadrozny and Ricard Gavalda and Dino Pedreschi and Jaime Cardoso and Myra Spiliopoulou}, url = {http://dx.doi.org/10.1007/978-3-319-23461-8_40}, doi = {10.1007/978-3-319-23461-8_40}, isbn = {978-3-319-23460-1}, year = {2015}, date = {2015-01-01}, booktitle = {Machine Learning and Knowledge Discovery in Databases}, volume = {9286}, pages = {325-328}, publisher = {Springer International Publishing}, series = {Lecture Notes in Computer Science}, abstract = {Machine Learning methods and algorithms are often highly modular in the sense that they rely on a large number of subalgorithms that are in principle interchangeable. For example, it is often possible to use various kinds of pre- and post-processing and various base classifiers or regressors as components of the same modular approach. We propose a framework, called Scavenger, that allows evaluating whole families of conceptually similar algorithms efficiently. The algorithms are represented as compositions, couplings and products of atomic subalgorithms. This allows partial results to be cached and shared between different instances of a modular algorithm, so that potentially expensive partial results need not be recomputed multiple times. Furthermore, our framework deals with issues of the parallel execution, load balancing, and with the backup of partial results for the case of implementation or runtime errors. Scavenger is licensed under the GPLv3 and can be downloaded freely at https://github.com/jorro/scavenger.}, keywords = {autoencoders, distributed processing, framework, large-scale, Scavenger}, pubstate = {published}, tppubtype = {inproceedings} } @article{https://doi.org/10.1049/iet-epa.2014.0437, title = {Prediction of vacuum cleaner motor brush life: a regression approach}, author = {Bla\v{z} Benedik and Katerina Ta\v{s}kova and Jo\v{z}e Tav\v{c}ar and Jo\v{z}e Duhovnik}, url = {https://ietresearch.onlinelibrary.wiley.com/doi/abs/10.1049/iet-epa.2014.0437}, doi = {https://doi.org/10.1049/iet-epa.2014.0437}, year = {2015}, date = {2015-01-01}, urldate = {2015-01-01}, journal = {IET Electric Power Applications}, volume = {9}, number = {9}, pages = {569-577}, abstract = {The main focus of this paper is the empirical modelling of the wear of carbon brushes. Rather than determining the dominant wear mechanisms, an approach towards the prediction of wear under a range of different conditions was used. The models were obtained by multiple regression analysis using lifetime (LT) data contributed by the biggest European manufacturer of vacuum cleaner motors. This included reliability data for 607 different test populations involving 3980 motors. Exploration of the data revealed that wear-out parameters behaved in accordance with the existing field theory, giving additional confidence to the models. The numerical appreciation of the wear-out parameters and the resulting conclusions will be beneficial to motor design and reliability engineers. Learned knowledge will be used for faster selection of optimal design and operational motor parameters to meet recent EU regulation 666/2013. Along with the more rapid design of the product, a reduced number of LT tests will result in significant energy savings.}, keywords = {brushes, carbon brush wear modelling, domestic appliances, dominant wear mechanism, electric motors, energy saving, field theory, motor design reliability, multiple regression analysis, vacuum cleaner motor brush life prediction, wear}, pubstate = {published}, tppubtype = {article} } @article{https://doi.org/10.1002/prot.24873, title = {Large oligomeric complex structures can be computationally assembled by efficiently combining docked interfaces}, author = {Matthias Dietzen and Olga V. Kalinina and Katerina Ta\v{s}kova and Benny Kneissl and Anna-Katharina Hildebrandt and Elmar Jaenicke and Heinz Decker and Thomas Lengauer and Andreas Hildebrandt}, url = {https://onlinelibrary.wiley.com/doi/abs/10.1002/prot.24873}, doi = {https://doi.org/10.1002/prot.24873}, year = {2015}, date = {2015-01-01}, journal = {Proteins: Structure, Function, and Bioinformatics}, volume = {83}, number = {10}, pages = {1887-1899}, abstract = {ABSTRACT Macromolecular oligomeric assemblies are involved in many biochemical processes of living organisms. The benefits of such assemblies in crowded cellular environments include increased reaction rates, efficient feedback regulation, cooperativity and protective functions. However, an atom-level structural determination of large assemblies is challenging due to the size of the complex and the difference in binding affinities of the involved proteins. In this study, we propose a novel combinatorial greedy algorithm for assembling large oligomeric complexes from information on the approximate position of interaction interfaces of pairs of monomers in the complex. Prior information on complex symmetry is not required but rather the symmetry is inferred during assembly. We implement an efficient geometric score, the transformation match score, that bypasses the model ranking problems of state-of-the-art scoring functions by scoring the similarity between the inferred dimers of the same monomer simultaneously with different binding partners in a (sub)complex with a set of pregenerated docking poses. We compiled a diverse benchmark set of 308 homo and heteromeric complexes containing 6 to 60 monomers. To explore the applicability of the method, we considered 48 sets of parameters and selected those three sets of parameters, for which the algorithm can correctly reconstruct the maximum number, namely 252 complexes (81.8%) in, at least one of the respective three runs. The crossvalidation coverage, that is, the mean fraction of correctly reconstructed benchmark complexes during crossvalidation, was 78.1%, which demonstrates the ability of the presented method to correctly reconstruct topology of a large variety of biological complexes. Proteins 2015; 83:1887\textendash1899. © 2015 The Authors. Proteins: Structure, Function, and Bioinformatics Published by Wiley Periodicals, Inc.}, keywords = {3D-MOSAIC, complex match score, macromolecular assembly, protein\textendashprotein interactions, structural modeling, transformation match score}, pubstate = {published}, tppubtype = {article} } @article{vsilc2015data, title = {Data mining-assisted parameter tuning of a search algorithm}, author = {Jurij \v{S}ilc and Katerina Ta\v{s}kova and Peter Koro\v{s}ec}, url = {https://informatica.si/index.php/informatica/article/view/833}, year = {2015}, date = {2015-01-01}, urldate = {2015-01-01}, journal = {Informatica}, volume = {39}, number = {2}, abstract = {The main purpose of this paper is to show how using data-mining technique to tackle the problem of tuning the performance of a meta-heuristic search algorithm with respect to its parameters. The operational behavior of typical meta-heuristic search algorithms is determined by a set of control parameters, which have to be fine-tuned in order to obtain a best performance for a given problem. The principle challenge here is how to provide meaningful settings for an algorithm, obtained as result of better insight in its behavior. In this context, we discuss the idea of learning a model of an algorithm behavior by data mining analysis of parameter tuning results. The study was conducted using the Differential Ant-Stigmergy Algorithm as an example meta-heuristic search algorithm.}, keywords = {data mining}, pubstate = {published}, tppubtype = {article} } @inproceedings{tyukin2014bmad, title = {BMaD -- A Boolean Matrix Decomposition Framework}, author = {Andrey Tyukin and Stefan Kramer and J\"{o}rg Wicker}, editor = {Toon Calders and Floriana Esposito and Eyke H\"{u}llermeier and Rosa Meo}, url = {http://dx.doi.org/10.1007/978-3-662-44845-8_40}, doi = {10.1007/978-3-662-44845-8_40}, isbn = {978-3-662-44844-1}, year = {2014}, date = {2014-01-01}, booktitle = {Machine Learning and Knowledge Discovery in Databases}, volume = {8726}, pages = {481-484}, publisher = {Springer Berlin Heidelberg}, series = {Lecture Notes in Computer Science}, abstract = {Boolean matrix decomposition is a method to obtain a compressed representation of a matrix with Boolean entries. We present a modular framework that unifies several Boolean matrix decomposition algorithms, and provide methods to evaluate their performance. The main advantages of the framework are its modular approach and hence the flexible combination of the steps of a Boolean matrix decomposition and the capability of handling missing values. The framework is licensed under the GPLv3 and can be downloaded freely at urlhttp://projects.informatik.uni-mainz.de/bmad.}, keywords = {Boolean matrix decomposition, data mining, framework}, pubstate = {published}, tppubtype = {inproceedings} } @phdthesis{wicker2013large, title = {Large Classifier Systems in Bio- and Cheminformatics}, author = {J\"{o}rg Wicker}, url = {http://mediatum.ub.tum.de/node?id=1165858}, year = {2013}, date = {2013-01-01}, school = {Technische Universit\"{a}t M\"{u}nchen}, abstract = {Large classifier systems are machine learning algorithms that use multiple classifiers to improve the prediction of target values in advanced classification tasks. Although learning problems in bio- and cheminformatics commonly provide data in schemes suitable for large classifier systems, they are rarely used in these domains. This thesis introduces two new classifiers incorporating systems of classifiers using Boolean matrix decomposition to handle data in a schema that often occurs in bio- and cheminformatics. The first approach, called MLC-BMaD (multi-label classification using Boolean matrix decomposition), uses Boolean matrix decomposition to decompose the labels in a multi-label classification task. The decomposed matrices are a compact representation of the information in the labels (first matrix) and the dependencies among the labels (second matrix). The first matrix is used in a further multi-label classification while the second matrix is used to generate the final matrix from the predicted values of the first matrix. MLC-BMaD was evaluated on six standard multi-label data sets, the experiments showed that MLC-BMaD can perform particularly well on data sets with a high number of labels and a small number of instances and can outperform standard multi-label algorithms. Subsequently, MLC-BMaD is extended to a special case of multi-relational learning, by considering the labels not as simple labels, but instances. The algorithm, called ClassFact (Classification factorization), uses both matrices in a multi-label classification. Each label represents a mapping between two instances. Experiments on three data sets from the domain of bioinformatics show that ClassFact can outperform the baseline method, which merges the relations into one, on hard classification tasks. Furthermore, large classifier systems are used on two cheminformatics data sets, the first one is used to predict the environmental fate of chemicals by predicting biodegradation pathways. The second is a data set from the domain of predictive toxicology. In biodegradation pathway prediction, I extend a knowledge-based system and incorporate a machine learning approach to predict a probability for biotransformation products based on the structure- and knowledge-based predictions of products, which are based on transformation rules. The use of multi-label classification improves the performance of the classifiers and extends the number of transformation rules that can be covered. For the prediction of toxic effects of chemicals, I applied large classifier systems to the ToxCasttexttrademark data set, which maps toxic effects to chemicals. As the given toxic effects are not easy to predict due to missing information and a skewed class distribution, I introduce a filtering step in the multi-label classification, which finds labels that are usable in multi-label prediction and does not take the others in the prediction into account. Experiments show that this approach can improve upon the baseline method using binary classification, as well as multi-label approaches using no filtering. The presented results show that large classifier systems can play a role in future research challenges, especially in bio- and cheminformatics, where data sets frequently consist of more complex structures and data can be rather small in terms of the number of instances compared to other domains.}, keywords = {biodegradation, bioinformatics, cheminformatics, computational sustainability, data mining, enviPath, machine learning, multi-label classification, multi-relational learning, toxicity}, pubstate = {published}, tppubtype = {phdthesis} } @inproceedings{wicker2012multi, title = {Multi-label Classification Using Boolean Matrix Decomposition}, author = {J\"{o}rg Wicker and Bernhard Pfahringer and Stefan Kramer}, url = {https://wicker.nz/nwp-acm/authorize.php?id=N10032 http://doi.acm.org/10.1145/2245276.2245311}, doi = {10.1145/2245276.2245311}, isbn = {978-1-4503-0857-1}, year = {2012}, date = {2012-01-01}, booktitle = {Proceedings of the 27th Annual ACM Symposium on Applied Computing}, pages = {179--186}, publisher = {ACM}, series = {SAC '12}, abstract = {This paper introduces a new multi-label classifier based on Boolean matrix decomposition. Boolean matrix decomposition is used to extract, from the full label matrix, latent labels representing useful Boolean combinations of the original labels. Base level models predict latent labels, which are subsequently transformed into the actual labels by Boolean matrix multiplication with the second matrix from the decomposition. The new method is tested on six publicly available datasets with varying numbers of labels. The experimental evaluation shows that the new method works particularly well on datasets with a large number of labels and strong dependencies among them.}, keywords = {associations, Boolean matrix decomposition, machine learning, multi-label classification}, pubstate = {published}, tppubtype = {inproceedings} } @article{CEREPNALKOSKI2012136, title = {The influence of parameter fitting methods on model structure selection in automated modeling of aquatic ecosystems}, author = {Darko \v{C}erepnalkoski and Katerina Ta\v{s}kova and Ljup\v{c}o Todorovski and Nata\v{s}a Atanasova and Sa\v{s}o D\v{z}eroski}, url = {https://www.sciencedirect.com/science/article/pii/S0304380012002724}, doi = {https://doi.org/10.1016/j.ecolmodel.2012.06.001}, issn = {0304-3800}, year = {2012}, date = {2012-01-01}, journal = {Ecological Modelling}, volume = {245}, pages = {136-165}, abstract = {Modeling dynamical systems involves two subtasks: structure identification and parameter estimation. ProBMoT is a tool for automated modeling of dynamical systems that addresses both tasks simultaneously. It takes into account domain knowledge formalized as templates for components of the process-based models: entities and processes. Taking a conceptual model of the system, the library of domain knowledge, and measurements of a particular dynamical system, it identifies both the structure and numerical parameters of the appropriate process-based model. ProBMoT has two main components corresponding to the two subtasks of modeling. The first component is concerned with generating candidate model structures that adhere to the conceptual model specified as input. The second subsystem uses the measured data to find suitable values for the constant parameters of a given model by using parameter estimation methods. ProBMoT uses model error to rank model structures and select the one that fits measured data best. In this paper, we investigate the influence of the selection of the parameter estimation methods on the structure identification. We consider one local (derivative-based) and one global (meta-heuristic) parameter estimation method. As opposed to other comparative studies of parameter estimation methods that focus on identifying parameters of a single model structure, we compare the parameter estimation methods in the context of repetitive parameter estimation for a number of candidate model structures. The results confirm the superiority of the global optimization methods over the local ones in the context of structure identification.}, note = {7th European Conference on Ecological Modelling (ECEM)}, keywords = {Aquatic ecosystems, Dynamical systems, Equation discovery, Meta-heuristic optimization, Parameter estimation, Process-based modeling}, pubstate = {published}, tppubtype = {article} } @article{TASHKOVA201236, title = {Parameter estimation in a nonlinear dynamic model of an aquatic ecosystem with meta-heuristic optimization}, author = {Katerina Taskova and Jurij \v{S}ilc and Nata\v{s}a Atanasova and Sa\v{s}o D\v{z}eroski}, url = {https://www.sciencedirect.com/science/article/pii/S0304380011005795}, doi = {https://doi.org/10.1016/j.ecolmodel.2011.11.029}, issn = {0304-3800}, year = {2012}, date = {2012-01-01}, urldate = {2012-01-01}, journal = {Ecological Modelling}, volume = {226}, pages = {36-61}, abstract = {Parameter estimation in dynamic models of ecosystems is essentially an optimization task. Due to the characteristics of ecosystems and typical models thereof, such as non-linearity, high dimensionality, and low quantity and quality of observed data, this optimization task can be very hard for traditional (derivative-based or local) optimization methods. This calls for the use of advanced meta-heuristic approaches, such as evolutionary or swarm-based methods. In this paper, we conduct an empirical comparison of four meta-heuristic optimization methods, and one local optimization method as a baseline, on a representative task of parameter estimation in a nonlinear dynamic model of an aquatic ecosystem. The five methods compared are the differential ant-stigmergy algorithm (DASA) and its continuous variant (CDASA), particle swarm optimization (PSO), differential evolution (DE) and algorithm 717 (A717). We use synthetic data, both without and with different levels of noise, as well as real measurements from Lake Bled. We also consider two different simulation approaches: teacher forcing, which makes supervised predictions one (small) time step ahead, and full (multistep) simulation, which makes predictions based on the history predictions for longer time periods. The meta-heuristic global optimization methods for parameter estimation are clearly superior and should be preferred over local optimization methods. While the differences in performance between the different methods within the class of meta-heuristics are not significant across all conditions, differential evolution yields the best results in terms of quality of the reconstructed system dynamics as well as speed of convergence. While the use of teacher forcing simulation makes parameter estimation much faster, the use of full simulation produces much better parameter estimates from real measured data.}, keywords = {Aquatic ecosystems, Least-squares estimation, Meta-heuristic optimization, Ordinary differential equations, Parameter estimation}, pubstate = {published}, tppubtype = {article} } @phdthesis{tavskova2012parameter, title = {Parameter Identification in Nonlinear Dynamic Systems with Meta-heuristic Approaches}, author = {Katerina Ta\v{s}kova}, year = {2012}, date = {2012-01-01}, urldate = {2012-01-01}, abstract = {The task of mathematical modeling of dynamic systems from observed system behavior, widely known under the name of system identification, breaks down into two subtasks. The first task, referred to as structure identification, is to specify the model structure, i.e., the functional form of the model. In practice, the model structure is usually given by a human domain expert and reflects prior domain knowledge: this is called knowledge- driven identification (as opposed to data-driven identification, which is based only on data). Structure identification plays an important role in modeling as it defines the choice available for the selection of the “best model”. The second task, referred to as parameter identification, aims to estimate the values of the model parameters that define a best possible fit of the model to the measured data. It assumes that the model structure is known and the observed system behavior is given in the form of measured data. Accurate estimation of the model parameters is important for describing and analyzing the behavior of the modeled system. Parameter identification is therefore a crucial step in almost all approaches for reconstructing system dynamics from measured data, including knowledge-driven and data-driven system identification as well as traditional (human) and automated modeling, i.e., the automated discovery of appropriate model structures and model parameter values by equation discovery tools. In this dissertation, we address the task of parameter identification in dynamic mod- els of real-life systems. The models are represented by ordinary differential equations (ODEs), as considered in the fields of systems biology and ecological modeling. The task is approached as a least-squares estimation problem within the frequentist framework. The latter means that the model parameters have fixed unique values and their optimal values are the ones that minimize a quadratic cost function, i.e., the sum of squared errors between the model prediction and the experimentally measured data. Least-squares esti- mation is essentially an optimization task. However, it can turn into a difficult problem for traditional (gradient-based) optimization methods when modeling complex system dy- namics. Therefore, it should be addressed by advanced meta-heuristic approaches, such as evolutionary or swarm intelligence methods. Typically, biological and ecosystem models are nonlinear and have many parameters, the studied systems can often be only partially observed, and their measurements are sparse and imperfect due to noise. All of these constraints can lead to identifiability problems, i.e., the inability to uniquely identify the unknown model parameters, making parameter estimation an even harder optimization task. Furthermore, the implicit def- inition of the cost function requires expensive numerical ODE simulations that have to be performed for every parameter solution investigated during the optimization process. As a result, parameter identification is a challenging and computationally expensive step in the process of reconstructing the structure and behavior of biological and ecological systems. This dissertation attempts to improve the quality of reconstructed system dynamics by improving parameter identification. In this context, we perform a thorough empirical evaluation of representative meta-heuristic methods on the task of estimating parameters in two nonlinear ODE models. The considered models describe two practically rele- x Abstract vant and representative real-life systems, i.e., endosome maturation in endocytosis and a food web of Lake Bled. The compared meta-heuristic methods are the differential ant- stigmergy algorithm, the continuous differential ant-stigmergy algorithm, particle swarm optimization, and differential evolution. As a baseline method for the experimental com- parison, we use Algorithm 717, a gradient-based local search method essentially designed for nonlinear least-squares estimation. Different experimental scenarios are considered to investigate the effect of limited observability of the system dynamics, the influence of the ODE simulation method, and the impact of the noise in the data, on the complexity of the parameter identification task, as well as the applicability and performance of different optimization methods in this context. The empirical evaluation shows that the meta-heuristic global optimization methods for parameter identification are clearly superior and should be preferred over local opti- mization methods. While the differences in performance between the different methods within the class of meta-heuristics are not significant across all conditions, differential evolution yields the best results in terms of the quality of the reconstructed system dy- namics as well as the speed of convergence. The observability of the system shows a strong influence, where less complete observations make the optimization task much more difficult. The results clearly indicate the importance of choosing a relevant cost function when the modeled systems dynamics is only partially observed. While the use of a simple one-step trapezoidal-based integrator for supervised prediction makes parameter identifi- cation much faster, the use of a multistep variable-coefficient integrator for unsupervised prediction produces much better parameter estimates from real-measured data. Furthermore, we consider the problem of parameter identification within the process of automated modeling of dynamic systems, where a large number of model structures is considered. One major drawback of existing automated modeling approaches is the use of local search methods for parameter identification. In this context, we investigate the influence of parameter identification (in terms of a global and a local optimization method) on the outcome of the automated modeling process, i.e., on what models are selected. We consider eight tasks of automated modeling of phytoplankton dynamics in Lake Bled from single-year data measured in eight different years. The outcome of the experiments empirically demonstrate the benefit of estimating model parameters by global optimization methods for the model (structure) selection process, opening the opportunity to model long term system dynamics. Many challenges still remain concerning the use of optimization methods for parameter identification in dynamic systems, especially in the context of automated modeling by equation discovery methods. Besides the need to extend our study by including additional dynamic systems from different domains, several lines for further improvement of existing automated modeling methods can be followed. These include the use of more appropriate and informative cost functions, as well as more robust and faster methods for parameter identification. Finally, explicit integration of the feedback from identifiability analysis within the process of model selection is highly desirable.}, keywords = {}, pubstate = {published}, tppubtype = {phdthesis} } @article{Taskova2011Parameter, title = {Parameter estimation with bio-inspired meta-heuristic optimization: modeling the dynamics of endocytosis}, author = {Katerina Taskova and Peter Koro\v{s}ec and Jurij \v{S}ilc and Sa\v{s}o D\v{z}eroski}, doi = {10.1186/1752-0509-5-159}, year = {2011}, date = {2011-10-11}, journal = {BMC Systems Biology}, volume = {5}, issue = {1}, pages = {1752-0509}, keywords = {machine learning, Parameter estimation}, pubstate = {published}, tppubtype = {article} } @article{Tashkova2011distributed, title = {A distributed multilevel ant-colony algorithm for the multi-way graph partitioning}, author = {Katerina Taskova and Peter Koro\v{s}ec and Jurij \v{S}ilc}, url = {https://www.inderscienceonline.com/doi/abs/10.1504/IJBIC.2011.042257}, doi = {10.1504/IJBIC.2011.042257}, year = {2011}, date = {2011-01-01}, urldate = {2011-01-01}, journal = {International Journal of Bio-Inspired Computation}, volume = {3}, number = {5}, pages = {286-296}, abstract = {The graph-partitioning problem arises as a fundamental problem in many important scientific and engineering applications. A variety of optimisation methods are used for solving this problem and among them the meta-heuristics outstand for its efficiency and robustness. Here, we address the performance of the distributed multilevel ant-colony algorithm (DMACA), a meta-heuristic approach for solving the multi-way graph partitioning problem, which is based on the ant-colony optimisation paradigm and is integrated with a multilevel procedure. The basic idea of the DMACA consists of parallel, independent runs enhanced with cooperation in the form of a solution exchange among the concurrent searches. The objective of the DMACA is to reduce the overall computation time, while preserving the quality of the solutions obtained by the sequential version. The experimental evaluation on a two-way and four-way partitioning with 1% and 5% imbalance confirms that with respect to the sequential version, the DMACA obtains statistically, equally good solutions at a 99% confidence level within a reduced overall computation time.}, keywords = {}, pubstate = {published}, tppubtype = {article} } @article{hardy2010collaborative, title = {Collaborative development of predictive toxicology applications}, author = {Barry Hardy and Nicki Douglas and Christoph Helma and Micha Rautenberg and Nina Jeliazkova and Vedrin Jeliazkov and Ivelina Nikolova and Romualdo Benigni and Olga Tcheremenskaia and Stefan Kramer and Tobias Girschick and Fabian Buchwald and J\"{o}rg Wicker and Andreas Karwath and Martin G\"{u}tlein and Andreas Maunz and Haralambos Sarimveis and Georgia Melagraki and Antreas Afantitis and Pantelis Sopasakis and David Gallagher and Vladimir Poroikov and Dmitry Filimonov and Alexey Zakharov and Alexey Lagunin and Tatyana Gloriozova and Sergey Novikov and Natalia Skvortsova and Dmitry Druzhilovsky and Sunil Chawla and Indira Ghosh and Surajit Ray and Hitesh Patel and Sylvia Escher}, url = {http://www.jcheminf.com/content/2/1/7}, doi = {10.1186/1758-2946-2-7}, issn = {1758-2946}, year = {2010}, date = {2010-01-01}, journal = {Journal of Cheminformatics}, volume = {2}, number = {1}, pages = {7}, abstract = {OpenTox provides an interoperable, standards-based Framework for the support of predictive toxicology data management, algorithms, modelling, validation and reporting. It is relevant to satisfying the chemical safety assessment requirements of the REACH legislation as it supports access to experimental data, (Quantitative) Structure-Activity Relationship models, and toxicological information through an integrating platform that adheres to regulatory requirements and OECD validation principles. Initial research defined the essential components of the Framework including the approach to data access, schema and management, use of controlled vocabularies and ontologies, architecture, web service and communications protocols, and selection and integration of algorithms for predictive modelling. OpenTox provides end-user oriented tools to non-computational specialists, risk assessors, and toxicological experts in addition to Application Programming Interfaces (APIs) for developers of new applications. OpenTox actively supports public standards for data representation, interfaces, vocabularies and ontologies, Open Source approaches to core platform components, and community-based collaboration approaches, so as to progress system interoperability goals.The OpenTox Framework includes APIs and services for compounds, datasets, features, algorithms, models, ontologies, tasks, validation, and reporting which may be combined into multiple applications satisfying a variety of different user needs. OpenTox applications are based on a set of distributed, interoperable OpenTox API-compliant REST web services. The OpenTox approach to ontology allows for efficient mapping of complementary data coming from different datasets into a unifying structure having a shared terminology and representation.Two initial OpenTox applications are presented as an illustration of the potential impact of OpenTox for high-quality and consistent structure-activity relationship modelling of REACH-relevant endpoints: ToxPredict which predicts and reports on toxicities for endpoints for an input chemical structure, and ToxCreate which builds and validates a predictive toxicity model based on an input toxicology dataset. Because of the extensible nature of the standardised Framework design, barriers of interoperability between applications and content are removed, as the user may combine data, models and validation from multiple sources in a dependable and time-effective way.}, keywords = {cheminformatics, computational sustainability, data mining, machine learning, REST, toxicity}, pubstate = {published}, tppubtype = {article} } @article{wicker2010predicting, title = {Predicting biodegradation products and pathways: a hybrid knowledge- and machine learning-based approach}, author = {J\"{o}rg Wicker and Kathrin Fenner and Lynda Ellis and Larry Wackett and Stefan Kramer}, url = {http://bioinformatics.oxfordjournals.org/content/26/6/814.full}, doi = {10.1093/bioinformatics/btq024}, year = {2010}, date = {2010-01-01}, journal = {Bioinformatics}, volume = {26}, number = {6}, pages = {814-821}, publisher = {Oxford University Press}, abstract = {Motivation: Current methods for the prediction of biodegradation products and pathways of organic environmental pollutants either do not take into account domain knowledge or do not provide probability estimates. In this article, we propose a hybrid knowledge- and machine learning-based approach to overcome these limitations in the context of the University of Minnesota Pathway Prediction System (UM-PPS). The proposed solution performs relative reasoning in a machine learning framework, and obtains one probability estimate for each biotransformation rule of the system. As the application of a rule then depends on a threshold for the probability estimate, the trade-off between recall (sensitivity) and precision (selectivity) can be addressed and leveraged in practice.Results: Results from leave-one-out cross-validation show that a recall and precision of ∼0.8 can be achieved for a subset of 13 transformation rules. Therefore, it is possible to optimize precision without compromising recall. We are currently integrating the results into an experimental version of the UM-PPS server.Availability: The program is freely available on the web at http://wwwkramer.in.tum.de/research/applications/biodegradation/data.Contact: kramer@in.tum.de}, keywords = {biodegradation, cheminformatics, computational sustainability, enviPath, machine learning, metabolic pathways}, pubstate = {published}, tppubtype = {article} } @incollection{wicker2010sindbad, title = {SINDBAD and SiQL: Overview, Applications and Future Developments}, author = {J\"{o}rg Wicker and Lothar Richter and Stefan Kramer}, editor = {Sa\v{s}o D\v{z}eroski and Bart Goethals and Pan\v{c}e Panov}, url = {http://dx.doi.org/10.1007/978-1-4419-7738-0_12}, doi = {10.1007/978-1-4419-7738-0_12}, isbn = {978-1-4419-7737-3}, year = {2010}, date = {2010-01-01}, booktitle = {Inductive Databases and Constraint-Based Data Mining}, pages = {289-309}, publisher = {Springer New York}, abstract = {The chapter gives an overview of the current state of the Sindbad system and planned extensions. Following an introduction to the system and its query language SiQL, we present application scenarios from the areas of gene expression/regulation and small molecules. Next, we describe a web service interface to Sindbad that enables new possibilities for inductive databases (distributing tasks over multiple servers, language and platform independence, …). Finally, we discuss future plans for the system, in particular, to make the system more ‘declarative’ by the use of signatures, to integrate the useful concept of mining views into the system, and to support specific pattern domains like graphs and strings.}, keywords = {data mining, inductive databases, machine learning, query languages}, pubstate = {published}, tppubtype = {incollection} } @inproceedings{5586201, title = {The differential Ant-Stigmergy Algorithm for large-scale global optimization}, author = {Peter Koro\v{s}ec and Katerina Taskova and Jury \v{S}ilc}, doi = {10.1109/CEC.2010.5586201}, year = {2010}, date = {2010-01-01}, urldate = {2010-01-01}, booktitle = {IEEE Congress on Evolutionary Computation}, pages = {1-8}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Ta\v{s}kova2010distributed, title = {A Distributed Multilevel Ant-Colony Approach for Finite Element Mesh Decomposition}, author = {Katerina Ta\v{s}kova and Peter Koro\v{s}ec and Jurij \v{S}ilc}, editor = {Roman Wyrzykowski and Jack Dongarra and Konrad Karczewski and Jerzy Wasniewski}, isbn = {978-3-642-14403-5}, year = {2010}, date = {2010-01-01}, urldate = {2010-01-01}, booktitle = {Parallel Processing and Applied Mathematics}, pages = {398--407}, publisher = {Springer Berlin Heidelberg}, address = {Berlin, Heidelberg}, abstract = {The k-way finite element mesh (FEM) decomposition problem is an NP-complete problem, which consists of finding a decomposition of a FEM into k balanced submeshes such that the number of cut edges is minimized. The multilevel ant-colony algorithm (MACA) is quite new and promising hybrid approach for solving different type of FEM-decomposition problems. The MACA is a swarm-based algorithm and therefore inherently suitable for parallel processing on many levels. Motivated by the good performance of the MACA and the possibility to improve it\'s performance (computational cost and/or solution quality), in this paper we discuss the results of parallelizing the MACA on largest scale (on colony level). Explicitly, we present the distributed MACA (DMACA) approach, which is based on the idea of parallel independent runs enhanced with cooperation in form of a solution exchange among the concurrent searches. Experimental evaluation of the DMACA on a larger set of benchmark FEM-decomposition problems shows that the DMACA compared to the MACA can obtain solutions of equal quality in less computational time.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{wicker2008sindbad, title = {SINDBAD and SiQL: An Inductive Database and Query Language in the Relational Model}, author = {J\"{o}rg Wicker and Lothar Richter and Kristina Kessler and Stefan Kramer}, editor = {Walter Daelemans and Bart Goethals and Katharina Morik}, url = {http://dx.doi.org/10.1007/978-3-540-87481-2_48}, doi = {10.1007/978-3-540-87481-2_48}, isbn = {978-3-540-87480-5}, year = {2008}, date = {2008-01-01}, booktitle = {Machine Learning and Knowledge Discovery in Databases}, volume = {5212}, pages = {690-694}, publisher = {Springer Berlin Heidelberg}, series = {Lecture Notes in Computer Science}, abstract = {In this demonstration, we will present the concepts and an implementation of an inductive database \textendash as proposed by Imielinski and Mannila \textendash in the relational model. The goal is to support all steps of the knowledge discovery process on the basis of queries to a database system. The query language SiQL (structured inductive query language), an SQL extension, offers query primitives for feature selection, discretization, pattern mining, clustering, instance-based learning and rule induction. A prototype system processing such queries was implemented as part of the SINDBAD (structured inductive database development) project. To support the analysis of multi-relational data, we incorporated multi-relational distance measures based on set distances and recursive descent. The inclusion of rule-based classification models made it necessary to extend the data model and software architecture significantly. The prototype is applied to three different data sets: gene expression analysis, gene regulation prediction and structure-activity relationships (SARs) of small molecules.}, keywords = {data mining, inductive databases, machine learning, query languages}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{richter2008inductive, title = {An Inductive Database and Query Language in the Relational Model}, author = {Lothar Richter and J\"{o}rg Wicker and Kristina Kessler and Stefan Kramer}, url = {https://wicker.nz/nwp-acm/authorize.php?id=N10033 http://doi.acm.org/10.1145/1353343.1353440}, doi = {10.1145/1353343.1353440}, isbn = {978-1-59593-926-5}, year = {2008}, date = {2008-01-01}, booktitle = {Proceedings of the 11th International Conference on Extending Database Technology: Advances in Database Technology}, pages = {740--744}, publisher = {ACM}, series = {EDBT '08}, abstract = {In the demonstration, we will present the concepts and an implementation of an inductive database -- as proposed by Imielinski and Mannila -- in the relational model. The goal is to support all steps of the knowledge discovery process, from pre-processing via data mining to post-processing, on the basis of queries to a database system. The query language SIQL (structured inductive query language), an SQL extension, offers query primitives for feature selection, discretization, pattern mining, clustering, instance-based learning and rule induction. A prototype system processing such queries was implemented as part of the SINDBAD (structured inductive database development) project. Key concepts of this system, among others, are the closure of operators and distances between objects. To support the analysis of multi-relational data, we incorporated multi-relational distance measures based on set distances and recursive descent. The inclusion of rule-based classification models made it necessary to extend the data model and the software architecture significantly. The prototype is applied to three different applications: gene expression analysis, gene regulation prediction and structure-activity relationships (SARs) of small molecules.}, keywords = {data mining, inductive databases, machine learning, query languages}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{wicker2008sindbadsails, title = {SINDBAD SAILS: A Service Architecture for Inductive Learning Schemes}, author = {J\"{o}rg Wicker and Christoph Brosdau and Lothar Richter and Stefan Kramer}, url = {http://www.ecmlpkdd2008.org/files/pdf/workshops/sokd/2.pdf}, year = {2008}, date = {2008-01-01}, booktitle = {Proceedings of the First Workshop on Third Generation Data Mining: Towards Service-Oriented Knowledge Discovery}, abstract = {The paper presents SINDBAD SAILS (Service Architecture for Inductive Learning Schemes), a Web Service interface to the inductive database SINDBAD. To the best of our knowledge, it is the first time a Web Service interface is provided for an inductive database. The combination of service-oriented architectures and inductive databases is particularly useful, as it enables distributed data mining without the need to install specialized data mining or machine learning software. Moreover, inductive queries can easily be used in almost any kind of programming language. The paper discusses the underlying concepts and explains a sample program making use of SINDBAD SAILS.}, keywords = {data mining, inductive databases, machine learning, query languages}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{wicker2008machine, title = {Machine Learning and Data Mining Approaches to Biodegradation Pathway Prediction}, author = {J\"{o}rg Wicker and Kathrin Fenner and Lynda Ellis and Larry Wackett and Stefan Kramer}, editor = {Will Bridewell and Toon Calders and Ana Karla Medeiros and Stefan Kramer and Mykola Pechenizkiy and Ljupco Todorovski}, url = {http://www.ecmlpkdd2008.org/files/pdf/workshops/ipm/9.pdf}, year = {2008}, date = {2008-01-01}, booktitle = {Proceedings of the Second International Workshop on the Induction of Process Models at ECML PKDD 2008}, keywords = {biodegradation, cheminformatics, computational sustainability, enviPath, machine learning, metabolic pathways}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{kramer2006inductive, title = {Inductive Databases in the Relational Model: The Data as the Bridge}, author = {Stefan Kramer and Volker Aufschild and Andreas Hapfelmeier and Alexander Jarasch and Kristina Kessler and Stefan Reckow and J\"{o}rg Wicker and Lothar Richter}, editor = {Francesco Bonchi and Jean-Fran\c{c}ois Boulicaut}, url = {http://dx.doi.org/10.1007/11733492_8}, doi = {10.1007/11733492_8}, isbn = {978-3-540-33292-3}, year = {2006}, date = {2006-01-01}, booktitle = {Knowledge Discovery in Inductive Databases}, volume = {3933}, pages = {124-138}, publisher = {Springer Berlin Heidelberg}, series = {Lecture Notes in Computer Science}, abstract = {We present a new and comprehensive approach to inductive databases in the relational model. The main contribution is a new inductive query language extending SQL, with the goal of supporting the whole knowledge discovery process, from pre-processing via data mining to post-processing. A prototype system supporting the query language was developed in the SINDBAD (structured inductive database development) project. Setting aside models and focusing on distance-based and instance-based methods, closure can easily be achieved. An example scenario from the area of gene expression data analysis demonstrates the power and simplicity of the concept. We hope that this preliminary work will help to bring the fundamental issues, such as the integration of various pattern domains and data mining techniques, to the attention of the inductive database community.}, keywords = {data mining, inductive databases, machine learning, query languages}, pubstate = {published}, tppubtype = {inproceedings} }