% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@PHDTHESIS{Roith:626059,
      author       = {Roith, Tim},
      othercontributors = {Slepcev, Dejan and Hoffmann, Franca and Burger, Martin},
      title        = {{C}onsistency, {R}obustness and {S}parsity for {L}earning
                      {A}lgorithms},
      school       = {Friedrich-Alexander-Universität Erlangen-Nürnberg},
      type         = {Dissertation},
      publisher    = {FAU},
      reportid     = {PUBDB-2025-01281},
      pages        = {217},
      year         = {2024},
      note         = {Dissertation, Friedrich-Alexander-Universität
                      Erlangen-Nürnberg, 2024},
      abstract     = {This thesis is concerned with consistency, robustness and
                      sparsity of supervised and semi-supervised learning
                      algorithms. For the latter, we consider the so-called
                      Lipschitz learning task (Nadler, Boaz, Nathan Srebro, and
                      Xueyuan Zhou. 'Statistical analysis of semi-supervised
                      learning: The limit of infinite unlabelled data.' Advances
                      in neural information processing systems 22 (2009)) for
                      which we prove Gamma convergence and convergence rates for
                      discrete solutions to their continuum counterpart in the
                      infinite data limit. In the supervised regime, we deal with
                      input-robustness w.r.t. adversarial attacks and resolution
                      changes. For the multi-resolution setting, we analyze the
                      role of Fourier neural operators (Li, Zongyi, et al.
                      'Fourier neural operator for parametric partial differential
                      equations.' arXiv preprint arXiv:2010.08895 (2020).) and
                      their connection to standard convolutional neural layers.
                      Concerning the computational complexity of neural network
                      training, we propose an algorithm based on Bregman
                      iterations (Osher, Stanley, et al. 'An iterative
                      regularization method for total variation-based image
                      restoration.' Multiscale Modeling $\&$ Simulation 4.2
                      (2005)) that allows for sparse weight matrices throughout
                      the training. We also provide the convergence analysis for
                      the stochastic adaption of the original Bregman iterations.},
      keywords     = {Machine Learning (Other) / Consistency (Other) / Sparsity
                      (Other) / Robustness (Other) / DDC Classification::5
                      Naturwissenschaften::50 Naturwissenschaften::500
                      Naturwissenschaften und Mathematik (Other)},
      cin          = {FS-CI},
      cid          = {I:(DE-H253)FS-CI-20230420},
      pnm          = {623 - Data Management and Analysis (POF4-623) / PHGS,
                      VH-GS-500 - PIER Helmholtz Graduate School
                      $(2015_IFV-VH-GS-500)$ / DFG project G:(GEPRIS)390685813 -
                      EXC 2047: Hausdorff Center for Mathematics (HCM) (390685813)
                      / NoMADS - Nonlocal Methods for Arbitrary Data Sources
                      (777826)},
      pid          = {G:(DE-HGF)POF4-623 / $G:(DE-HGF)2015_IFV-VH-GS-500$ /
                      G:(GEPRIS)390685813 / G:(EU-Grant)777826},
      experiment   = {EXP:(DE-MLZ)NOSPEC-20140101},
      typ          = {PUB:(DE-HGF)11},
      doi          = {10.25593/OPEN-FAU-522},
      url          = {https://bib-pubdb1.desy.de/record/626059},
}