2019
Kamp, Michael
Black-Box Parallelization for Machine Learning PhD Thesis
Universitäts-und Landesbibliothek Bonn, 2019.
Abstract | Links | BibTeX | Tags: averaging, black-box, communication-efficient, convex optimization, deep learning, distributed, dynamic averaging, federated, learning theory, machine learning, parallelization, privacy, radon machine
@phdthesis{kamp2019black,
title = {Black-Box Parallelization for Machine Learning},
author = {Michael Kamp},
url = {https://d-nb.info/1200020057/34},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
school = {Universitäts-und Landesbibliothek Bonn},
abstract = {The landscape of machine learning applications is changing rapidly: large centralized datasets are replaced by high volume, high velocity data streams generated by a vast number of geographically distributed, loosely connected devices, such as mobile phones, smart sensors, autonomous vehicles or industrial machines. Current learning approaches centralize the data and process it in parallel in a cluster or computing center. This has three major disadvantages: (i) it does not scale well with the number of data-generating devices since their growth exceeds that of computing centers, (ii) the communication costs for centralizing the data are prohibitive in many applications, and (iii) it requires sharing potentially privacy-sensitive data. Pushing computation towards the data-generating devices alleviates these problems and allows to employ their otherwise unused computing power. However, current parallel learning approaches are designed for tightly integrated systems with low latency and high bandwidth, not for loosely connected distributed devices. Therefore, I propose a new paradigm for parallelization that treats the learning algorithm as a black box, training local models on distributed devices and aggregating them into a single strong one. Since this requires only exchanging models instead of actual data, the approach is highly scalable, communication-efficient, and privacy-preserving.
Following this paradigm, this thesis develops black-box parallelizations for two broad classes of learning algorithms. One approach can be applied to incremental learning algorithms, i.e., those that improve a model in iterations. Based on the utility of aggregations it schedules communication dynamically, adapting it to the hardness of the learning problem. In practice, this leads to a reduction in communication by orders of magnitude. It is analyzed for (i) online learning, in particular in the context of in-stream learning, which allows to guarantee optimal regret and for (ii) batch learning based on empirical risk minimization where optimal convergence can be guaranteed. The other approach is applicable to non-incremental algorithms as well. It uses a novel aggregation method based on the Radon point that allows to achieve provably high model quality with only a single aggregation. This is achieved in polylogarithmic runtime on quasi-polynomially many processors. This relates parallel machine learning to Nick’s class of parallel decision problems and is a step towards answering a fundamental open problem about the abilities and limitations of efficient parallel learning algorithms. An empirical study on real distributed systems confirms the potential of the approaches in realistic application scenarios.},
keywords = {averaging, black-box, communication-efficient, convex optimization, deep learning, distributed, dynamic averaging, federated, learning theory, machine learning, parallelization, privacy, radon machine},
pubstate = {published},
tppubtype = {phdthesis}
}
Following this paradigm, this thesis develops black-box parallelizations for two broad classes of learning algorithms. One approach can be applied to incremental learning algorithms, i.e., those that improve a model in iterations. Based on the utility of aggregations it schedules communication dynamically, adapting it to the hardness of the learning problem. In practice, this leads to a reduction in communication by orders of magnitude. It is analyzed for (i) online learning, in particular in the context of in-stream learning, which allows to guarantee optimal regret and for (ii) batch learning based on empirical risk minimization where optimal convergence can be guaranteed. The other approach is applicable to non-incremental algorithms as well. It uses a novel aggregation method based on the Radon point that allows to achieve provably high model quality with only a single aggregation. This is achieved in polylogarithmic runtime on quasi-polynomially many processors. This relates parallel machine learning to Nick’s class of parallel decision problems and is a step towards answering a fundamental open problem about the abilities and limitations of efficient parallel learning algorithms. An empirical study on real distributed systems confirms the potential of the approaches in realistic application scenarios.

2017
Kamp, Michael; Boley, Mario; Missura, Olana; Gärtner, Thomas
Effective Parallelisation for Machine Learning Proceedings Article
In: Advances in Neural Information Processing Systems, pp. 6480–6491, 2017.
Abstract | Links | BibTeX | Tags: decentralized, distributed, machine learning, parallelization, radon
@inproceedings{kamp2017effective,
title = {Effective Parallelisation for Machine Learning},
author = {Michael Kamp and Mario Boley and Olana Missura and Thomas Gärtner},
url = {http://papers.nips.cc/paper/7226-effective-parallelisation-for-machine-learning.pdf},
year = {2017},
date = {2017-01-01},
urldate = {2017-01-01},
booktitle = {Advances in Neural Information Processing Systems},
pages = {6480--6491},
abstract = {We present a novel parallelisation scheme that simplifies the adaptation of learning algorithms to growing amounts of data as well as growing needs for accurate and confident predictions in critical applications. In contrast to other parallelisation techniques, it can be applied to a broad class of learning algorithms without further mathematical derivations and without writing dedicated code, while at the same time maintaining theoretical performance guarantees. Moreover, our parallelisation scheme is able to reduce the runtime of many learning algorithms to polylogarithmic time on quasi-polynomially many processing units. This is a significant step towards a general answer to an open question on efficient parallelisation of machine learning algorithms in the sense of Nick's Class (NC). The cost of this parallelisation is in the form of a larger sample complexity. Our empirical study confirms the potential of our parallelisation scheme with fixed numbers of processors and instances in realistic application scenarios.},
keywords = {decentralized, distributed, machine learning, parallelization, radon},
pubstate = {published},
tppubtype = {inproceedings}
}

2016
Kamp, Michael; Bothe, Sebastian; Boley, Mario; Mock, Michael
Communication-Efficient Distributed Online Learning with Kernels Proceedings Article
In: Frasconi, Paolo; Landwehr, Niels; Manco, Giuseppe; Vreeken, Jilles (Ed.): Machine Learning and Knowledge Discovery in Databases, pp. 805–819, Springer International Publishing, 2016.
Abstract | Links | BibTeX | Tags: communication-efficient, distributed, dynamic averaging, federated learning, kernel methods, parallelization
@inproceedings{kamp2016communication,
title = {Communication-Efficient Distributed Online Learning with Kernels},
author = {Michael Kamp and Sebastian Bothe and Mario Boley and Michael Mock},
editor = {Paolo Frasconi and Niels Landwehr and Giuseppe Manco and Jilles Vreeken},
url = {http://michaelkamp.org/wp-content/uploads/2020/03/Paper467.pdf},
year = {2016},
date = {2016-09-16},
urldate = {2016-09-16},
booktitle = {Machine Learning and Knowledge Discovery in Databases},
pages = {805--819},
publisher = {Springer International Publishing},
abstract = {We propose an efficient distributed online learning protocol for low-latency real-time services. It extends a previously presented protocol to kernelized online learners that represent their models by a support vector expansion. While such learners often achieve higher predictive performance than their linear counterparts, communicating the support vector expansions becomes inefficient for large numbers of support vectors. The proposed extension allows for a larger class of online learning algorithms—including those alleviating the problem above through model compression. In addition, we characterize the quality of the proposed protocol by introducing a novel criterion that requires the communication to be bounded by the loss suffered.},
keywords = {communication-efficient, distributed, dynamic averaging, federated learning, kernel methods, parallelization},
pubstate = {published},
tppubtype = {inproceedings}
}
