% IMPORTANT: The following is UTF-8 encoded.  This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.

@ARTICLE{Kaiser:585442,
      author       = {Kaiser, Jan and Xu, Chenran and Eichler, Annika and
                      Santamaria Garcia, Andrea and Stein, Oliver and
                      Bruendermann, Erik and Kuropka, Willi and Dinter, Hannes and
                      Mayet, Frank and Vinatier, Thomas and Burkart, Florian and
                      Schlarb, Holger},
      title        = {{R}einforcement learning-trained optimisers and {B}ayesian
                      optimisation for online particle accelerator tuning},
      journal      = {Scientific reports},
      volume       = {14},
      number       = {1},
      issn         = {2045-2322},
      address      = {[London]},
      publisher    = {Macmillan Publishers Limited, part of Springer Nature},
      reportid     = {PUBDB-2023-03590},
      pages        = {15733},
      year         = {2024},
      abstract     = {Online tuning of particle accelerators is a complex
                      optimisation problem that continues to require manual
                      intervention by experienced human operators. Autonomous
                      tuning is a rapidly expanding field of research, where
                      learning-based methods like Bayesian optimisation (BO) hold
                      great promise in improving plant performance and reducing
                      tuning times. At the same time, reinforcement learning (RL)
                      is a capable method of learning intelligent controllers, and
                      recent work shows that RL can also be used to train
                      domain-specialised optimisers in so-called reinforcement
                      learning-trained optimisation (RLO). In parallel efforts,
                      both algorithms have found successful adoption in particle
                      accelerator tuning. Here we present a comparative case
                      study, assessing the performance of both algorithms while
                      providing a nuanced analysis of the merits and the practical
                      challenges involved in deploying them to real-world
                      facilities. Our results will help practitioners choose a
                      suitable learning-based tuning algorithm for their tuning
                      tasks, accelerating the adoption of autonomous tuning
                      algorithms, ultimately improving the availability of
                      particle accelerators and pushing their operational limits.},
      cin          = {MSK / MPY1 / KIT},
      ddc          = {600},
      cid          = {I:(DE-H253)MSK-20120731 / I:(DE-H253)MPY1-20170908 /
                      I:(DE-H253)KIT-20130928},
      pnm          = {621 - Accelerator Research and Development (POF4-621) /
                      InternLabs-0011 - HIR3X - Helmholtz International Laboratory
                      on Reliability, Repetition, Results at the most advanced
                      X-ray Sources $(2020_InternLabs-0011)$ / ZT-I-PF-5-6 -
                      Autonomous Accelerator (AA) $(2020_ZT-I-PF-5-6)$},
      pid          = {G:(DE-HGF)POF4-621 / $G:(DE-HGF)2020_InternLabs-0011$ /
                      $G:(DE-HGF)2020_ZT-I-PF-5-6$},
      experiment   = {EXP:(DE-H253)ARES-20200101},
      typ          = {PUB:(DE-HGF)16},
      pubmed       = {pmid:38977749},
      UT           = {WOS:001271178000014},
      doi          = {10.1038/s41598-024-66263-y},
      url          = {https://bib-pubdb1.desy.de/record/585442},
}