% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@ARTICLE{Weigand:636102,
      author       = {Weigand, Lukas and Roith, Tim and Burger, Martin},
      title        = {{A}dversarial flows: {A} gradient flow characterization of
                      adversarial attacks},
      journal      = {European journal of applied mathematics},
      volume       = {37},
      number       = {1},
      issn         = {0956-7925},
      address      = {Cambridge},
      publisher    = {Cambridge Univ. Press},
      reportid     = {PUBDB-2025-03598, arXiv:2406.05376},
      pages        = {123 - 179},
      year         = {2025},
      abstract     = {A popular method to perform adversarial attacks on neuronal
                      networks is the so-called fast gradient sign method and its
                      iterative variant. In this paper, we interpret this method
                      as an explicit Euler discretization of a differential
                      inclusion, where we also show convergence of the
                      discretization to the associated gradient flow. To do so, we
                      consider the concept of p-curves of maximal slope in the
                      case $p=\infty$. We prove existence of $\infty$-curves of
                      maximum slope and derive an alternative characterization via
                      differential inclusions. Furthermore, we also consider
                      Wasserstein gradient flows for potential energies, where we
                      show that curves in the Wasserstein space can be
                      characterized by a representing measure on the space of
                      curves in the underlying Banach space, which fulfill the
                      differential inclusion. The application of our theory to the
                      finite-dimensional setting is twofold: On the one hand, we
                      show that a whole class of normalized gradient descent
                      methods (in particular signed gradient descent) converge, up
                      to subsequences, to the flow, when sending the step size to
                      zero. On the other hand, in the distributional setting, we
                      show that the inner optimization task of adversarial
                      training objective can be characterized via $\infty$-curves
                      of maximum slope on an appropriate optimal transport space.},
      cin          = {FS-CI},
      ddc          = {510},
      cid          = {I:(DE-H253)FS-CI-20230420},
      pnm          = {623 - Data Management and Analysis (POF4-623)},
      pid          = {G:(DE-HGF)POF4-623},
      experiment   = {EXP:(DE-MLZ)NOSPEC-20140101},
      typ          = {PUB:(DE-HGF)16},
      eprint       = {2406.05376},
      howpublished = {arXiv:2406.05376},
      archivePrefix = {arXiv},
      SLACcitation = {$\%\%CITATION$ = $arXiv:2406.05376;\%\%$},
      doi          = {10.1017/S0956792525100120},
      url          = {https://bib-pubdb1.desy.de/record/636102},
}