<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JFR</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Form Res</journal-id>
      <journal-title>JMIR Formative Research</journal-title>
      <issn pub-type="epub">2561-326X</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v10i1e77481</article-id>
      <article-id pub-id-type="pmid">41632969</article-id>
      <article-id pub-id-type="doi">10.2196/77481</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>An Explanation User Interface for Artificial Intelligence–Supported Mechanical Ventilation Optimization for Clinicians: User-Centered Design and Formative Usability Study</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Schwartz</surname>
            <given-names>Amy</given-names>
          </name>
        </contrib>
        <contrib contrib-type="editor">
          <name>
            <surname>Balcarras</surname>
            <given-names>Matthew</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Johansson</surname>
            <given-names>Ulf</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Jawa</surname>
            <given-names>Vibhu</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Jung</surname>
            <given-names>Ian-C</given-names>
          </name>
          <degrees>MSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Institute for Medical Informatics and Biometry</institution>
            <institution>Faculty of Medicine and University Hospital Carl Gustav Carus</institution>
            <institution>TUD Dresden University of Technology</institution>
            <addr-line>Postfach 113</addr-line>
            <addr-line>Fetscherstraße 74</addr-line>
            <addr-line>Dresden, 01307</addr-line>
            <country>Germany</country>
            <phone>49 351 458 87 7711</phone>
            <email>ian-christopher.jung@tu-dresden.de</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-7562-2707</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Zerlik</surname>
            <given-names>Maria</given-names>
          </name>
          <degrees>MSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-2199-7474</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Schuler</surname>
            <given-names>Katharina</given-names>
          </name>
          <degrees>MSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0005-7054-4059</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Sedlmayr</surname>
            <given-names>Martin</given-names>
          </name>
          <degrees>Prof Dr</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-9888-8460</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author">
          <name name-style="western">
            <surname>Sedlmayr</surname>
            <given-names>Brita</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-6159-7822</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Institute for Medical Informatics and Biometry</institution>
        <institution>Faculty of Medicine and University Hospital Carl Gustav Carus</institution>
        <institution>TUD Dresden University of Technology</institution>
        <addr-line>Dresden</addr-line>
        <country>Germany</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Ian-C Jung <email>ian-christopher.jung@tu-dresden.de</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2026</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>3</day>
        <month>2</month>
        <year>2026</year>
      </pub-date>
      <volume>10</volume>
      <elocation-id>e77481</elocation-id>
      <history>
        <date date-type="received">
          <day>15</day>
          <month>5</month>
          <year>2025</year>
        </date>
        <date date-type="rev-request">
          <day>16</day>
          <month>10</month>
          <year>2025</year>
        </date>
        <date date-type="rev-recd">
          <day>25</day>
          <month>11</month>
          <year>2025</year>
        </date>
        <date date-type="accepted">
          <day>25</day>
          <month>11</month>
          <year>2025</year>
        </date>
      </history>
      <copyright-statement>©Ian-C Jung, Maria Zerlik, Katharina Schuler, Martin Sedlmayr, Brita Sedlmayr. Originally published in JMIR Formative Research (https://formative.jmir.org), 03.02.2026.</copyright-statement>
      <copyright-year>2026</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Formative Research, is properly cited. The complete bibliographic information, a link to the original publication on https://formative.jmir.org, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://formative.jmir.org/2026/1/e77481" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>The integration of artificial intelligence (AI) into clinical decision support systems (CDSSs) for mechanical ventilation in intensive care units (ICUs) holds great potential. However, the lack of transparency and explainability hinders the adoption of opaque AI models in clinical practice. Explanation user interfaces (XUIs), incorporating explainable AI algorithms, are considered a key solution to enhance trust and usability. Despite growing research on explainable AI in health care, little is known about how clinicians perceive and interact with such explanation interfaces in high-stakes environments such as the ICU. Addressing this gap is essential to ensure that AI-supported CDSS are not only accurate but also trusted, interpretable, and seamlessly integrated into clinical workflows.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>This study aimed to evaluate the first iteration of the design and evaluation phase of an XUI for an AI-based CDSS intended to optimize mechanical ventilation in the ICU. Specifically, it explores how different user groups—ICU nurses and physicians—perceive and prioritize explanation concepts, providing the empirical foundation for subsequent refinement iterations.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>A midfidelity prototype was developed using the prototyping software Justinmind, based on existing guidelines, scientific literature, and insights from previous user-centered design (UCD) phases. The design process followed ISO (International Organization for Standardization) 9241-210 principles for UCD and combined qualitative and quantitative feedback to identify usability strengths, design challenges, and role-specific explanation needs. The prototype was evaluated formatively through 2 usability walkthroughs (walkthrough 1: 4 resident physicians and walkthrough 2: 4 ICU nurses), which included guided group discussions and Likert-scale assessments of explanation concepts in terms of understandability, suitability, and visual appeal.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>The XUI was structured into 2 levels: a first level displaying high-level explanations (outlier warning and output certainty) alongside the CDSS output, and a second level offering more detailed explanations (available input, feature importance, and rule-based explanation) for users seeking deeper insight. While both user groups appreciated the first level, physicians found the second level of the XUI useful, whereas ICU nurses found it overly detailed. Thus, the structure was able to address the differing needs for explanations. The layered design helped balance transparency and information overload by providing initially concise explanations and more detailed ones on demand. The evaluation further strengthened evidence for role-dependent explanation needs, suggesting that nurses prefer actionable, concise insights, whereas physicians benefit from more granular transparency information.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>This study underscores the importance of UCD in designing XUIs for CDSS. It highlights the differing information needs of physicians and ICU nurses, emphasizing the value of involving users early in the development of suitable XUIs. The findings provide practical guidance for designing layered, role-sensitive explanation interfaces in critical care and form the basis for future iterative evaluations and experimental studies assessing their impact on decision-making and clinician trust.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>CDSS</kwd>
        <kwd>clinical decision support system</kwd>
        <kwd>XAI</kwd>
        <kwd>explanation user interface</kwd>
        <kwd>XUI</kwd>
        <kwd>usability</kwd>
        <kwd>formative evaluation</kwd>
        <kwd>intensive care unit</kwd>
        <kwd>ICU</kwd>
        <kwd>explainable artificial intelligence</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <sec>
        <title>Background</title>
        <p>Artificial intelligence (AI) is expected to benefit the medical domain with improvements in patient care and reduced workload for medical personnel. AI models are increasingly proposed as a knowledge or inference base of clinical decision support systems (CDSSs) to achieve this. These AI-based CDSS are considered well-suited for use in the intensive care unit (ICU) due to the amount of available documented data in the ICU, but the transfer of these systems into clinical practice is still lacking [<xref ref-type="bibr" rid="ref1">1</xref>]. AI models for which it is impossible to directly infer why they generate an output due to their complexity are called black-box models. The black-box nature of these models poses a barrier to adoption in the medical domain, as it makes it difficult for health care providers to understand, trust, or accept these models [<xref ref-type="bibr" rid="ref2">2</xref>-<xref ref-type="bibr" rid="ref4">4</xref>]. According to the EU (European Union) AI Act, AI-based CDSS are classified as high-risk AI systems [<xref ref-type="bibr" rid="ref5">5</xref>]. For high-risk AI systems, the act requires the possibility of human oversight and the provision of appropriate interfaces for this supervision (EU AI Act, Chapter III, Article 13 and 14) [<xref ref-type="bibr" rid="ref6">6</xref>]. Approaches from the field of explainable AI (XAI) are a solution to these barriers [<xref ref-type="bibr" rid="ref7">7</xref>-<xref ref-type="bibr" rid="ref11">11</xref>]. XAI provides additional information to the output of an AI model to make model reasoning more transparent through explanation user interfaces (XUIs). In the medical domain, XAI has been shown to influence users’ trust [<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref13">13</xref>], the perceived usefulness [<xref ref-type="bibr" rid="ref14">14</xref>] or value [<xref ref-type="bibr" rid="ref13">13</xref>] of a system, the medical professionals’ confidence in decisions [<xref ref-type="bibr" rid="ref14">14</xref>], and decision-making [<xref ref-type="bibr" rid="ref15">15</xref>]. XAI research focuses on creating, optimizing, and evaluating XAI algorithms. Explanations are often designed to meet the needs of (X)AI developers while neglecting the needs of clinical end users [<xref ref-type="bibr" rid="ref16">16</xref>]. Recently, calls for a user-centered design (UCD) of XUIs have been raised in the medical informatics domain [<xref ref-type="bibr" rid="ref16">16</xref>-<xref ref-type="bibr" rid="ref20">20</xref>]<italic>.</italic></p>
        <p>The objective of this paper was to conduct the initial design and evaluation phases of the UCD process for an XUI of an AI-based CDSS intended for use in the ICU. As a use case, we selected the provision of recommendations for ventilation parameter settings in patients receiving mechanical ventilation in the ICU. Patients may undergo mechanical ventilation using ventilators—machines that assist breathing by delivering air into the lungs when they cannot breathe adequately. Another form of life support is extracorporeal membrane oxygenation, which bypasses the lungs—and sometimes the heart—by circulating the patient’s blood through an external machine that oxygenates the blood and extracts carbon dioxide from it. To enable the continuous optimization of a patient’s ventilation, a CDSS must be able to recommend a range of parameters. In mechanical ventilation, these include, for example, the ventilator mode, which defines the overall strategy and level of mechanical support; tidal volume, which specifies the amount of air delivered per breath; and respiratory rate, which determines the number of breaths per minute.</p>
        <p>The development of such AI-based CDSS is being carried out within the IntelliLung project, a European Union–funded initiative aimed at improving ventilatory care and reducing the workload of clinical staff [<xref ref-type="bibr" rid="ref21">21</xref>]. The CDSS will generate recommendations for both mechanical ventilation and extracorporeal membrane oxygenation settings. The research presented in this paper used the IntelliLung CDSS as a real-world use case to anchor the design in clinical practice. However, the design is independent of any project-specific AI algorithm decisions and instead focuses conceptually on XUI design for such a CDSS. As Mohseni et al  [<xref ref-type="bibr" rid="ref22">22</xref>] proposed, this type of research can be conducted before selecting and developing the AI algorithms and corresponding XAI methods.</p>
      </sec>
      <sec>
        <title>State of the Art</title>
        <p>Although much research on XAI focuses on the health care domain [<xref ref-type="bibr" rid="ref23">23</xref>], to the authors’ knowledge, no paper explicitly addresses the UCD of XUIs for AI-based CDSS aimed at optimizing continuous mechanical ventilation in the ICU. Several papers report on the use of AI algorithms and XAI methods from a technical perspective, for example, to predict the ventilation duration for patients with acute respiratory distress syndrome [<xref ref-type="bibr" rid="ref24">24</xref>], to predict the disease progression of patients with acute respiratory distress syndrome [<xref ref-type="bibr" rid="ref25">25</xref>], to predict the ventilator support for patients with COVID-19 [<xref ref-type="bibr" rid="ref26">26</xref>], and to generate ventilator setting recommendations [<xref ref-type="bibr" rid="ref27">27</xref>]. Hughes et al [<xref ref-type="bibr" rid="ref28">28</xref>] report on the context and work process analysis for CDSS supporting the weaning process in the ICU to inform the overall CDSS design for weaning support, but do not focus on optimizing continuous ventilation or designing XUIs for the CDSS.</p>
        <p>A limited number of papers address the UCD of XUIs in the ICU [<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref29">29</xref>-<xref ref-type="bibr" rid="ref31">31</xref>]. A preprint by Clark et al [<xref ref-type="bibr" rid="ref29">29</xref>], published after this study concluded, investigates user requirements for using XAI in CDSS, without focusing on a specific ICU use case. Nagendran et al [<xref ref-type="bibr" rid="ref30">30</xref>] demonstrated the feasibility of eye-tracking analysis to assess physicians’ interactions with an XUI for an AI-based CDSS, although not focused on ventilation, in a simulated ICU setting. Barda et al [<xref ref-type="bibr" rid="ref31">31</xref>] designed and evaluated an XUI for an AI-based CDSS that predicts mortality risk in a pediatric ICU. The acceptance of an XUI for a sepsis treatment CDSS by ICU clinicians was investigated by Sivaraman et al [<xref ref-type="bibr" rid="ref14">14</xref>]. Outside the ICU setting, Schoonderwoerd et al [<xref ref-type="bibr" rid="ref32">32</xref>] investigated pediatricians’ perspectives on different explanation concepts for a diagnostic CDSS.</p>
      </sec>
      <sec>
        <title>Previous Research</title>
        <p>The UCD process was conducted in accordance with DIN (Deutsches Institut für Normung) EN (Europäische Norm) ISO (International Organization for Standardization) 9241-210 [<xref ref-type="bibr" rid="ref33">33</xref>] to design the XUI for an AI-based CDSS that recommends ventilator settings. This paper focuses exclusively on the first iteration of the design and evaluation phases of the UCD process. These phases rely on the results of the context analysis and the specification of user requirements. The results of the first 2 phases of the UCD process are summarized to provide the necessary context.</p>
        <p>During the context analysis, the intended use characteristics of the CDSS were defined as (1) a stand-alone CDSS, which should not replace the ventilation monitor, and (2) a CDSS that would be displayed on a stationary tablet next to the ventilation monitor of an individual patient. The identified main user groups of this CDSS were resident physicians and ICU nurses with the authority to change the patient’s ventilator settings. An initial investigation into user requirements revealed conflicting opinions among future users about the importance of explanations [<xref ref-type="bibr" rid="ref34">34</xref>]. These results led to a follow-up investigation into users’ preferences for explanation, to identify which explanations to include in a first design iteration. Seventeen explanation concepts, ranging from less complex ones such as available input information to more sophisticated ones such as counterfactual explanations, were presented to the users as low-fidelity mockups in a questionnaire. This step was performed before this study to attain a manageable number of explanation concepts for the first design iteration and a formative evaluation. The selection of explanation concepts for this first iteration of the XUI was based solely on user preferences, rather than on commonly used explanation concepts. Users selected the explanation concepts, output certainty, and outlier warning to be displayed alongside the CDSS recommendations. The output certainty concept informs users about the CDSS’s confidence level in its recommendation. The outlier warning alerts users if one or more input parameter values are highly unusual. Three additional explanation concepts were selected but were not designated to appear directly next to the CDSS output: available input, feature importance, and rule-based explanation. The available input explanation concept informs the user which parameter values were available when generating the recommendation. The feature importance explanation concept shows users how each parameter influenced the recommendation. The rule-based explanation concept presents simplified rules in text or decision tree format, approximating how the recommendation was calculated. These findings served as the foundation for the XUI design phase.</p>
      </sec>
      <sec>
        <title>Study Scope and Research Question</title>
        <sec>
          <title>Overview</title>
          <p>The underlying research aim of this paper is threefold to (1) design midfidelity mockups of the XUI, focusing on its overall structure and different design variants for the explanation concepts, (2) give users an impression of the XUI without fully designing each explanation concept in detail, and (3) gather early formative feedback from future users to support iterative refinement of the XUI in subsequent design phases. Therefore, this study does not aim to deliver a finalized XUI design, nor does it seek to conduct a comprehensive summative evaluation of the XUI, as this will be the focus of future iterations.</p>
          <p>The following research questions, aligned with the design and evaluation phases, were developed to guide the investigation.</p>
        </sec>
        <sec>
          <title>Design Phase</title>
          <p>The design phase focused on generating design options for the XUI, with the following research question:</p>
          <list list-type="bullet">
            <list-item>
              <p>RQ1: How should the XUI of an AI-based CDSS for mechanical ventilation optimization in the ICU be designed?</p>
            </list-item>
          </list>
        </sec>
        <sec>
          <title>Evaluation Phase</title>
          <p>The evaluation phase focused on collecting early user feedback from clinicians, with the following research questions:</p>
          <list list-type="bullet">
            <list-item>
              <p>RQ2: What are clinicians’ perceptions of the proposed XUI of the AI-based CDSS for mechanical ventilation optimization?</p>
            </list-item>
            <list-item>
              <p>RQ3: What XUI design improvements should be addressed in subsequent design iterations?</p>
            </list-item>
          </list>
        </sec>
      </sec>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <p><xref rid="figure1" ref-type="fig">Figure 1</xref> provides an overview of the methods used in each XUI development phase.</p>
      <fig id="figure1" position="float">
        <label>Figure 1</label>
        <caption>
          <p>Overview of the methods used during the first iteration of the design and evaluation phases of the explanation user interface (XUI).</p>
        </caption>
        <graphic xlink:href="formative_v10i1e77481_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
      </fig>
      <sec>
        <title>XUI Design Phase</title>
        <p>The design phase began with the development of a foundational knowledge base for the XUI. Relevant literature, such as norms and design guidelines, was reviewed. For this purpose, the ISO 9241 standard series on human-system interaction and human-centered design was reviewed. This foundation was expanded through a scoping review and the development of design recommendations for the user-centered XUI design for CDSS [<xref ref-type="bibr" rid="ref35">35</xref>]. Searches for research papers related to the topic of XUIs for AI-based CDSS for the ventilation of patients in the ICU found no relevant results before the design of the XUI.</p>
        <p>Midfidelity XUI mockups were developed using the Justinmind [<xref ref-type="bibr" rid="ref36">36</xref>] prototyping software, based on the knowledge foundation. The design of the mockups was intended to direct users’ attention to elements relevant to RQ2 and RQ3 and to prevent users from focusing on aesthetic aspects such as the color of the XUI. Therefore, mockups were designed with the following characteristics: (1) realistic variable names and content to support user familiarity; (2) realistic-looking data, for which synthetic data points were generated for each displayed input variable with a large language model (Claude Sonnet 3.5 [<xref ref-type="bibr" rid="ref37">37</xref>]); (3) minimal use of color, used selectively to highlight system functionality; and (4) simple interactions (eg, navigation between interface levels).</p>
        <p>A main screen for the CDSS was designed to present the XUI within its intended system context and to help users envision the future context of use for the XUI. The basic structure of the XUI and the design variants for the explanation concepts were developed based on the list of selected explanation concepts and their intended level within the XUI, which had been defined during the requirements specification phase of the UCD process. The mockups were designed iteratively: the first author created initial drafts, and members of the author team with usability expertise provided verbal feedback for improvements during multiple group meetings.</p>
        <p>The design of the individual explanation concepts was oriented toward typical design elements used for these explanation concepts in the XAI literature. This decision was made to provide the users with a realistic representation of common explanation concepts and to keep the focus on the goal of this first evaluation iteration. We opted not to apply a more elaborate design strategy to the explanation concepts in this iteration; this will be addressed in later iterations, which typically focus on aesthetics and detailed design.</p>
      </sec>
      <sec>
        <title>XUI Evaluation Phase</title>
        <p>In the evaluation phase, formative feedback was gathered from ICU physicians and nurses, focusing on the XUI’s structure and explanation concepts. A formative evaluation was chosen to collect insights for improving the CDSS and understanding users’ initial impressions of the CDSS. Given resource constraints—such as limited availability of clinical experts and their high workload—a pragmatic approach was taken. This evaluation is reported according to the STARE-HI (Statement on the Reporting of Evaluation studies in Health Informatics) guidelines to ensure consistency and standardization [<xref ref-type="bibr" rid="ref38">38</xref>]). For the STARE-HI reporting checklist, refer to <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref> and [<xref ref-type="bibr" rid="ref39">39</xref>].</p>
      </sec>
      <sec>
        <title>Study Design</title>
        <p>As a formative evaluation, 2 group usability walkthroughs were conducted—1 for each main user group (physicians and ICU nurses). A semistructured approach was followed, guided by a predefined protocol. Each walkthrough included (1) a presentation of the XUI, (2) individual reflection time, (3) guided group discussions, and (4) a quantitative assessment (QA) of the explanation concepts.</p>
        <p>The walkthrough groups were separated by user group. This separation accounted for differing levels of project involvement and aimed to minimize hierarchical bias, particularly the risk of physicians influencing ICU nurses during discussions. The walkthroughs were moderated by a researcher with usability expertise, who introduced the mockup, guided the discussion using predefined questions focused on information retrieval, overall interaction, and the structure of the XUI, and asked follow-up questions where necessary. A second researcher took notes to ensure thorough documentation.</p>
      </sec>
      <sec>
        <title>Sample Size and Recruiting Process</title>
        <p>The target was to recruit 4 to 5 participants per user group, as small sample sizes are considered acceptable for formative evaluations in medical informatics [<xref ref-type="bibr" rid="ref40">40</xref>], and sample size requirements can be adjusted based on system-specific characteristics [<xref ref-type="bibr" rid="ref41">41</xref>].</p>
        <p>Participants were recruited using convenience sampling. For the physician group, invitation emails were sent to 5 physicians in the same research group, which was involved in the IntelliLung project. Four physicians accepted the invitation. To increase the chances of successful recruitment of ICU nurses, 1 of the cooperating physicians identified an interested contact person among the nursing staff in 1 of the ICUs at the same hospital. The contact person informally inquired about his or her peers’ interest in participating in the study. After the nurses were recruited, the contact person shared the contact details with the researchers. To be eligible to participate in the study, all participants were required to have at least 3 months of ventilation experience (demonstrated through working experience in the ICU or research activity in the field of mechanical ventilation). No compensation was paid for participation in the group usability walkthrough.</p>
      </sec>
      <sec>
        <title>Walkthrough Process</title>
        <p>The details of the physician walkthrough are described in this section. Any predefined process deviations for the nurse walkthrough are reported in the section “Deviations for the Nurse Group Usability Walkthrough.”</p>
        <p>At the start, participants received in-depth information about their rights and the purpose of the usability walkthrough. Afterward, the moderator presented the CDSS use case and the main screen of the CDSS. The XUI mockup evaluation then began. The 2 XUI levels were evaluated sequentially, reflecting their hierarchical structure.</p>
        <p>The evaluation began with the main CDSS screen, which included XUI level 1, displayed on a monitor. The moderator provided a verbal introduction. Participants were then given time to observe and reflect on XUI level 1 silently. The reflection was followed by a group discussion guided by the question: “Is the presentation of outlier warning and system certainty on the main screen sufficient?”</p>
        <p>The XUI level 2, a subordinate screen providing additional explanation concepts, was evaluated in 2 steps. In the first step, the participants assessed the overall structure of the level. The XUI level 2 was shown, and the participants silently analyzed it. Afterward, the group briefly discussed and then collectively answered the following questions: “In which part of the interface do you find explanations for the recommendation?” and “How do you switch between the explanations?”</p>
        <p>In a second step, the individual explanation concepts in XUI level 2 were assessed using the following steps:</p>
        <list list-type="bullet">
          <list-item>
            <p>The initial version of the explanation concept was shown, and the participants silently observed and analyzed each explanation concept.</p>
          </list-item>
          <list-item>
            <p>The group discussed the explanation concept and then provided a collective answer to the questions (<xref ref-type="table" rid="table1">Table 1</xref>). Whether the group answered the questions correctly was documented.</p>
          </list-item>
          <list-item>
            <p>Alternative design variants of each explanation concept were presented. The group discussed these variants and selected their preferred variant.</p>
          </list-item>
          <list-item>
            <p>Each participant then individually assessed the selected version on Likert scales regarding the following 3 dimensions (ie, QA-1, QA-2, and QA-3) with the following anchors:</p>
          </list-item>
          <list-item>
            <p>QA-1 (ease of understandability): “I find this explanation concept easy to understand.”</p>
          </list-item>
          <list-item>
            <p>QA-2 (suitability): “I find this explanation concept suitable for everyday clinical work.”</p>
          </list-item>
          <list-item>
            <p>QA-3 (appealingness): “I find this explanation concept appealingly visualized.”</p>
          </list-item>
          <list-item>
            <p>Strongly Disagree, Disagree, Neutral, Agree, and Strongly Agree</p>
          </list-item>
          <list-item>
            <p>Additional options: no answer and free-text comments</p>
          </list-item>
        </list>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>List of questions and items used to evaluate each explanation concept.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="220"/>
            <col width="780"/>
            <thead>
              <tr valign="top">
                <td>Explanation concept</td>
                <td>Group discussion questions</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Available input (AI)</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>AI-1: What information do you get from this view?</p>
                    </list-item>
                    <list-item>
                      <p>AI-2: In which sections were not all values available when the recommendation was generated?</p>
                    </list-item>
                    <list-item>
                      <p>AI-3: How would you get more detailed information on input values?</p>
                    </list-item>
                    <list-item>
                      <p>AI- 4: How should the sections be sorted in the “available input” area?</p>
                    </list-item>
                  </list>
                  <list list-type="bullet">
                    <list-item>
                      <p>Version A (static order): (1) in the presented order and (2) in a different order.</p>
                    </list-item>
                  </list>
                  <list list-type="bullet">
                    <list-item>
                      <p>Version B (dynamic order): areas with missing values at the top?</p>
                    </list-item>
                    <list-item>
                      <p>If Version A was selected, would you prefer (1) or (2)?</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Feature importance (FI)</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>FI-1: What information do you get from this view?</p>
                    </list-item>
                    <list-item>
                      <p>FI-2: Which parameter had an importance of 80% for generating the recommendation?</p>
                    </list-item>
                    <list-item>
                      <p>FI-3: Where would you have to click to display the importance of more parameters?</p>
                    </list-item>
                  </list>
                </td>
              </tr>
              <tr valign="top">
                <td>Rule-based explanation (R)</td>
                <td>
                  <list list-type="bullet">
                    <list-item>
                      <p>R-1: What information do you get from this view?</p>
                    </list-item>
                  </list>
                </td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
        <p>The complete version of the instrument is provided in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>.</p>
        <p>The group usability walkthrough concluded with a general discussion focusing on the following guiding questions: “Should there be more or fewer explanation concepts presented?” and “Are there any missing explanation concepts?” (The physicians had access to previous presentations of other explanation concepts before the group usability walkthrough).</p>
      </sec>
      <sec>
        <title>Deviations for the Nurse Group Usability Walkthrough</title>
        <p>To accommodate the ICU nurses’ limited previous exposure to the project, the walkthrough session was adapted to include a more comprehensive introduction to CDSS and AI concepts. Insights from the physician group informed minor refinements to the mockups. Elements that had proven consistently irrelevant or confusing across previous sessions were excluded to maintain focus, while still allowing nurses to contribute unique, user-specific feedback.</p>
        <p>Furthermore, in the final group discussion, the nurses were verbally introduced to the possibility of additional explanation concepts (since they had not previously been exposed to other types, such as counterfactual explanations). They were also asked whether the information presented in level 1 of the XUI was sufficient.</p>
      </sec>
      <sec>
        <title>Analysis Approach</title>
        <sec>
          <title>Qualitative Analysis of Group Discussions</title>
          <p>After each group usability walkthrough, the first author combined the notes from both researchers. The second researcher reviewed the merged version, and any disagreements were discussed and resolved jointly. The first author then categorized the notes by XUI level and discussion questions and organized them by content. No predefined coding scheme was used; categories emerged inductively based on the content of the group discussions. Summarizing bullet points were translated into English, as both group usability walkthroughs had been conducted in German, the participants’ native language. The second researcher reviewed the final summary. No verbatim quotes were recorded during documentation of the group usability walkthroughs.</p>
        </sec>
        <sec>
          <title>Quantitative Data Analysis</title>
          <p>The QAs (QA-1, QA-2, and QA-3) of each explanation concept were analyzed purely descriptively due to the limited number of responses per user group. For each explanation concept and user group combination of the QAs (QA-1, QA-2, and QA-3), the number of responses, the number of “No Answer,” the median, and the IQR are reported. In addition, all responses for the items are presented visually as horizontally oriented stacked bar plots.</p>
        </sec>
      </sec>
      <sec>
        <title>Ethical Considerations</title>
        <p>This study was conducted in accordance with ethical research principles. It involved low-risk, formative group usability walkthroughs as a qualitative survey or interview method in which clinician participants were shown interface mockups and asked structured questions about usability, understandability, and suitability. No medical interventions were performed, no patient data were collected, and the study did not constitute epidemiological research. In line with the institutional guidance of the Ethics Committee of TU Dresden for such non-interventional survey or interview studies, no IRB or REB approval was sought [<xref ref-type="bibr" rid="ref42">42</xref>]. All procedures were carried out in accordance with the principles of the Declaration of Helsinki. Informed consent was obtained from all participants after they received detailed information about the study purpose, the walkthrough procedure, data handling, and their rights. Participation was voluntary, and participants could withdraw at any time without consequences. Participants were not compensated for their participation. Privacy and confidentiality were safeguarded by collecting and storing data in pseudonymized form; only non-sensitive, profession-related participant information was collected, no identifying information is reported, and access to raw data was limited to the research team.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>XUI Design Phase: Structure of the XUI</title>
        <p>The design phase produced a 2-level XUI structure (XUI level 1 and XUI level 2). XUI level 1 is positioned on the main screen of the CDSS, adjacent to the system’s recommendations. <xref rid="figure2" ref-type="fig">Figure 2</xref> shows the mockup for the CDSS main screen, which contains a header with basic patient information (A in <xref rid="figure2" ref-type="fig">Figure 2</xref>), the recommendation (B in <xref rid="figure2" ref-type="fig">Figure 2</xref>), XUI level 1 (C in <xref rid="figure2" ref-type="fig">Figure 2</xref>), and a trend view of important parameters (D in <xref rid="figure2" ref-type="fig">Figure 2</xref>). XUI level 1 includes the explanation concept outlier warning (E in <xref rid="figure2" ref-type="fig">Figure 2</xref>), consisting of an icon with a header (H in <xref rid="figure2" ref-type="fig">Figure 2</xref>) and an information text (I in <xref rid="figure2" ref-type="fig">Figure 2</xref>); the explanation concept certainty information (F in <xref rid="figure2" ref-type="fig">Figure 2</xref>), for which the certainty level is presented as a traffic light (J in <xref rid="figure2" ref-type="fig">Figure 2</xref>), a number, and a category label (K in <xref rid="figure2" ref-type="fig">Figure 2</xref>); and a button (G in <xref rid="figure2" ref-type="fig">Figure 2</xref>) that links to XUI level 2.</p>
        <p><xref rid="figure3" ref-type="fig">Figure 3</xref> shows XUI level 2, which includes a header with basic patient information, the CDSS recommendation, a repetition of the explanation concepts from level 1, and an expanded panel on the right side for displaying explanation concepts in more detail, one explanation concept at a time. The following explanation concepts were used in level 2: available input, feature importance, and rule-based explanation. The users can switch between explanation concepts in the expanded section using tabs.</p>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>Main screen of the clinical decision support system (CDSS), showing level 1 of the explanation user interface (XUI) in the top-right section. Two versions are shown: the left screen includes the outlier warning, and the right screen does not.</p>
          </caption>
          <graphic xlink:href="formative_v10i1e77481_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Level 2 of the explanation user interface (XUI) with a description of its content. A: header with basic patient information; B: identification information for the recommendation; C and D: repetition of explainable artificial intelligence (XAI) concepts from the first level; E: repetition of the clinical decision support system (CDSS) recommendation for ventilator setting; F and G: information on when the last recommendation was generated and when the subsequent recommendation will be generated; H: certainty information for each recommended ventilator setting; I: toggle button to show or hide H; J: section for the presentation of explanation concepts, with tabs for selecting specific explanation concepts; K: short sentence introducing the explanation concept; L: space for the specific explanation concepts; M: link to the first level of the XUI. ARDS: acute respiratory distress syndrome; DSS: decision support system; ECMO: extracorporeal membrane oxygenation; FIO2: fraction of inspired oxygen; ICU: intensive care unit; PCV: pressure-controlled ventilation; PEEP: positive end-expiratory pressure; VT: tidal volume; ΔP: driving pressure.</p>
          </caption>
          <graphic xlink:href="formative_v10i1e77481_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Design of Explanation Concepts</title>
        <sec>
          <title>Outlier Warning</title>
          <p>In XUI level 1, the outlier warning explanation is positioned at the top (E in <xref rid="figure2" ref-type="fig">Figure 2</xref>). If an outlier is detected in the input variables, a yellow warning triangle containing an exclamation mark is presented next to the heading “outlier warning.” The heading is followed by text naming the parameter and value that triggered the outlier warning. When an outlier is detected, the background of the outlier warning section is highlighted in yellow. Two options were created for the case that the outlier warning was not triggered: one that hides the outlier warning section entirely, and another that displays the message “No Outlier Warning.” The outlier detection logic was intentionally left undefined at this stage, as the study focused on evaluating conceptual explanation designs within the XUI. The specific detection mechanism will be developed and validated in subsequent iterations.</p>
        </sec>
        <sec>
          <title>Output Certainty</title>
          <p>Below the outlier warning section, the XUI presents the output certainty (F in <xref rid="figure2" ref-type="fig">Figure 2</xref>). On the left side of the output certainty section, a traffic light icon indicates the system’s certainty level. This is followed by a percentage value and a categorical label representing the system’s certainty.</p>
        </sec>
        <sec>
          <title>Available Input</title>
          <p><xref rid="figure4" ref-type="fig">Figure 4</xref> provides 3 design versions of the available input explanation concept. The versions vary in how much detail they present to the user.</p>
          <fig id="figure4" position="float">
            <label>Figure 4</label>
            <caption>
              <p>Three versions of the available input explanation concept. Version A: availability information organized in categories. Version B: availability information extended with the values of the parameters. Version C: availability information and parameter values extended with a time stamp for parameter value collection. CO2: carbon dioxide; DSS: decision support system; ECMO: extracorporeal membrane oxygenation; HCO3: bicarbonate; ICD: International Classification of Diseases; O2: oxygen; pH: potential of hydrogen.</p>
            </caption>
            <graphic xlink:href="formative_v10i1e77481_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
          <p>The input parameters are organized into categories in Version A (<xref rid="figure4" ref-type="fig">Figure 4</xref>). Each category is displayed as an expandable section with a labeled heading. Within each category, parameter names are displayed in a matrix format, each accompanied by an icon indicating availability (a green checkmark for available parameters; a red cross for unavailable ones). Unavailable parameters were also highlighted in red text. If any parameters in a category (eg, “Vital Parameters”) were missing, the category heading appeared in bold red text, and a red cross icon with explanatory text was added to the top of the category section.</p>
          <p>Version B (<xref rid="figure4" ref-type="fig">Figure 4</xref>) extends Version A by including the values of all available parameters. Each category is presented as a table with 3 columns: parameter name, availability status, and parameter value. Cells containing values that triggered an outlier warning are highlighted in yellow.</p>
          <p>Version C (<xref rid="figure4" ref-type="fig">Figure 4</xref>) further builds on Version B. Each table is expanded with a fourth column displaying the time stamp of when the parameter value was recorded. If the recorded time stamp falls outside a predefined range, the cell is highlighted in yellow, and a yellow clock icon appears next to the row to indicate outdated data.</p>
        </sec>
        <sec>
          <title>Feature Importance</title>
          <p><xref rid="figure5" ref-type="fig">Figure 5</xref> provides 3 versions of the feature importance explanation concept. In the mockup, the explanation concept is labeled as “Parameter Importance” to avoid the necessity of pre-existing knowledge of the term feature in the context of AI for clinicians. This change in the name did not influence the information presented in the explanation concept. The 3 versions differ in the level of detail they provide.</p>
          <fig id="figure5" position="float">
            <label>Figure 5</label>
            <caption>
              <p>Three versions of the “feature importance” explanation concept. Version A: vertical bar plot of the 7 most important features. Version B: vertical bar plot showing the feature importance of all features. Version C: feature importance displayed as a heat map–style history in table format.</p>
            </caption>
            <graphic xlink:href="formative_v10i1e77481_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
          <p>Version A (<xref rid="figure5" ref-type="fig">Figure 5</xref>) displays the top 7 features in a vertical bar plot, ranked by importance.</p>
          <p>Version B (<xref rid="figure5" ref-type="fig">Figure 5</xref>) uses the same bar plot format but includes all available features. The user can scroll through the bar plot vertically and toggle between Versions A and B.</p>
          <p>Version C (<xref rid="figure5" ref-type="fig">Figure 5</xref>) provides the 7 most important features of the current recommendation in a heat map history, which is designed as a table. The rightmost column contains the feature importance values for the current recommendation. The cell background color corresponds to the value of the feature importance presented in the legend. The preceding columns represent the feature importance for previous recommendations. Clicking on a column updates the recommendation displayed in the left-side panel of the XUI (B in <xref rid="figure3" ref-type="fig">Figure 3</xref>).</p>
        </sec>
        <sec>
          <title>Rule-Based Explanation</title>
          <p>Two versions of the rule-based explanation concept were designed (<xref rid="figure6" ref-type="fig">Figure 6</xref>).</p>
          <fig id="figure6" position="float">
            <label>Figure 6</label>
            <caption>
              <p>Different versions of the rule-based explanation concept. Version A: textual representation as an “if-then” phrase. Version B: graphical representation as a decision tree.</p>
            </caption>
            <graphic xlink:href="formative_v10i1e77481_fig6.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
          <p>Version A (<xref rid="figure6" ref-type="fig">Figure 6</xref>) provides the explanation concept as an “if-then” rule in text format. Version B (<xref rid="figure6" ref-type="fig">Figure 6</xref>) uses a visual decision tree to convey the same logic. The decision tree highlights the path to the recommendation with blue lines.</p>
        </sec>
      </sec>
      <sec>
        <title>XUI Evaluation Phase</title>
        <sec>
          <title>Participants Description</title>
          <p>The group usability walkthroughs were conducted with 8 health care professionals from a German university hospital, including 4 ICU physicians (4 male) and 4 ICU nurses (3 male and 1 female). All physicians participated in the preceding UCD process phases of the IntelliLung CDSS interface. In contrast, the recruited ICU nurses had no previous exposure to the IntelliLung CDSS. The recruited physicians were all anesthesiology residents with clinical and research experience in mechanical ventilation. The ICU nurses had at least 9 years of experience working in the ICU. All nurses answered a question describing their experience selecting ventilator settings for patients as “a lot” and a question regarding their experience with AI-based CDSS as “non“ (n=1) and “little“ (n=3) on 4-point scales with the following answer options: “non,” “little,” “moderate,” “a lot.” The physicians were not formally asked about their AI knowledge, but some degree of knowledge can be assumed due to their participation as clinical experts in the development of the IntelliLung AI model.</p>
        </sec>
        <sec>
          <title>Evaluation: XUI Level 1</title>
          <p>Both user groups appreciated the content in XUI level 1 and favored a concise presentation format. However, the groups differed in their preferences regarding how the information should be presented. <xref ref-type="table" rid="table2">Table 2</xref> provides a detailed summary of findings.</p>
          <table-wrap position="float" id="table2">
            <label>Table 2</label>
            <caption>
              <p>Insights explanation user interface (XUI<sup>a</sup>) level 1 evaluation.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="140"/>
              <col width="470"/>
              <col width="390"/>
              <thead>
                <tr valign="top">
                  <td>XUI part</td>
                  <td>Physician walkthrough</td>
                  <td>Nurse walkthrough</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>Overall assessment (level 1 XUI)</td>
                  <td>
                    <list list-type="bullet">
                      <list-item>
                        <p>Preferred a simple presentation with minimal content.</p>
                      </list-item>
                    </list>
                  </td>
                  <td>
                    <list list-type="bullet">
                      <list-item>
                        <p>Found the information on level 1 important.</p>
                      </list-item>
                    </list>
                  </td>
                </tr>
                <tr valign="top">
                  <td>Outlier warning</td>
                  <td>
                    <list list-type="bullet">
                      <list-item>
                        <p>Terminology improvement: suggested renaming the concept to “Input Warning” to avoid confusion with clinical outliers (ie, abnormal values).</p>
                      </list-item>
                      <list-item>
                        <p>Navigation: clicking the warning should lead users to a section containing similar information to the available input explanation concept (Version C).</p>
                      </list-item>
                      <list-item>
                        <p>In the absence of a warning, the section should contain the text “No Input Warning”; the section should not be empty.</p>
                      </list-item>
                      <list-item>
                        <p>Presentation of the exact values of the parameter triggering the warning is not important.</p>
                      </list-item>
                    </list>
                  </td>
                  <td>
                    <list list-type="bullet">
                      <list-item>
                        <p>Appreciated the explanation concept.</p>
                      </list-item>
                      <list-item>
                        <p>Navigation: clicking on the warning should lead directly to a section where raw values are displayed.</p>
                      </list-item>
                      <list-item>
                        <p>Requested a mechanism to acknowledge the warning and a clear distinction between new and acknowledged warnings.</p>
                      </list-item>
                      <list-item>
                        <p>Presentation of the exact values of the parameter triggering the warning is important.</p>
                      </list-item>
                      <list-item>
                        <p>Interaction: requested the ability to correct erroneous input values manually.</p>
                      </list-item>
                      <list-item>
                        <p>In the absence of a warning, the section should be empty.</p>
                      </list-item>
                    </list>
                  </td>
                </tr>
                <tr valign="top">
                  <td>Available input</td>
                  <td>
                    <list list-type="bullet">
                      <list-item>
                        <p>Structure: The explanation concept should be presented in an individual section of the interface and not at XUI level 2.</p>
                      </list-item>
                    </list>
                  </td>
                  <td>—</td>
                </tr>
              </tbody>
            </table>
            <table-wrap-foot>
              <fn id="table2fn1">
                <p><sup>a</sup>XUI: explanation user interface.</p>
              </fn>
            </table-wrap-foot>
          </table-wrap>
        </sec>
        <sec>
          <title>Evaluation: XUI Level 2</title>
          <sec>
            <title>Qualitative Analysis of Group Discussions</title>
            <p>Overall, for the second level of the XUI, both the physician group and the nurse group had no problem identifying where the explanation concepts were presented and how to switch between them. However, 1 ICU nurse thought clicking on the recommendation would show an explanation text containing a detailed description of that specific recommended parameter.</p>
            <p>The explanation concepts organize the following group discussion results.</p>
          </sec>
          <sec>
            <title>Available Input</title>
            <p>The nurses and physicians answered the questions AI-1, AI-2, and AI-3 correctly. Both groups correctly understood that the explanation concept informs which input parameters were available to the CDSS when it generated its recommendation (AI-1), identified in which sections input parameters were missing (AI-2), and could describe how to access more detailed information about the input values (AI-3). The physicians generally preferred more detail, while nurses favored a more concise presentation, which was expressed in the group discussions, the free-text answers, and the selection of variants.</p>
            <p>For question AI-4, which addressed parameter ordering, the physician group expressed a unanimous preference for the static ordering used in the mockup. To avoid redundancy and reduce cognitive load, this question was omitted from the nurse session—while still ensuring space for user-specific feedback. Detailed findings are provided in <xref ref-type="table" rid="table3">Table 3</xref>.</p>
            <table-wrap position="float" id="table3">
              <label>Table 3</label>
              <caption>
                <p>Summary of usability walkthrough insights for the “available input” explanation concept.</p>
              </caption>
              <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
                <col width="150"/>
                <col width="380"/>
                <col width="470"/>
                <thead>
                  <tr valign="top">
                    <td>Question</td>
                    <td>Physician walkthrough</td>
                    <td>Nurse walkthrough</td>
                  </tr>
                </thead>
                <tbody>
                  <tr valign="top">
                    <td>AI<sup>a</sup>-4</td>
                    <td>
                      <list list-type="bullet">
                        <list-item>
                          <p>Preferred the static ordering of categories and parameters as presented in the mockup.</p>
                        </list-item>
                      </list>
                    </td>
                    <td>
                      <list list-type="bullet">
                        <list-item>
                          <p>Not discussed</p>
                        </list-item>
                      </list>
                    </td>
                  </tr>
                  <tr valign="top">
                    <td>Selected<break/>version</td>
                    <td>
                      <list list-type="bullet">
                        <list-item>
                          <p>Version C (most information).</p>
                        </list-item>
                      </list>
                    </td>
                    <td>
                      <list list-type="bullet">
                        <list-item>
                          <p>Version A (most concise)</p>
                        </list-item>
                      </list>
                    </td>
                  </tr>
                  <tr valign="top">
                    <td>Discussed insights</td>
                    <td>
                      <list list-type="bullet">
                        <list-item>
                          <p>Physicians should be deciding whether the values are outdated or not, and not the CDSS<sup>b</sup></p>
                        </list-item>
                        <list-item>
                          <p>The value of the parameter triggering an “outlier warning” should be highlighted.</p>
                        </list-item>
                      </list>
                    </td>
                    <td>
                      <list list-type="bullet">
                        <list-item>
                          <p>The other versions were seen as containing too much information.</p>
                        </list-item>
                        <list-item>
                          <p>Preferred less information on the screens.</p>
                        </list-item>
                      </list>
                    </td>
                  </tr>
                  <tr valign="top">
                    <td>Free-text answers</td>
                    <td>
                      <list list-type="bullet">
                        <list-item>
                          <p>None.</p>
                        </list-item>
                      </list>
                    </td>
                    <td>
                      <list list-type="bullet">
                        <list-item>
                          <p>“Minimalize not important, not relevant data, or information.” [P5]</p>
                        </list-item>
                        <list-item>
                          <p>“Deviations should be made visible.” [P5]</p>
                        </list-item>
                        <list-item>
                          <p>“More is less.” [P5]</p>
                        </list-item>
                        <list-item>
                          <p>“Too much information at once —&#62; just the current errors —&#62; one should be able to think that only the most recent blood gas analysis results would be considered [P6]</p>
                        </list-item>
                        <list-item>
                          <p>Red for highlighting is okay.” [P6]</p>
                        </list-item>
                        <list-item>
                          <p>“Too much information.” [P7]</p>
                        </list-item>
                        <list-item>
                          <p>“Not too much information! Keep it simple.” [P8]</p>
                        </list-item>
                      </list>
                    </td>
                  </tr>
                </tbody>
              </table>
              <table-wrap-foot>
                <fn id="table3fn1">
                  <p><sup>a</sup>AI-4: item 4 for group discussion of the available input explanation, as provided in <xref ref-type="table" rid="table1">Table 1</xref>.</p>
                </fn>
                <fn id="table3fn2">
                  <p><sup>b</sup>CDSS: clinical decision support system.</p>
                </fn>
              </table-wrap-foot>
            </table-wrap>
          </sec>
          <sec>
            <title>Feature Importance</title>
            <p>The nurse and physician groups correctly answered questions—FI-1, FI-2, and FI-3—corresponding to the feature importance explanation concept. They correctly noted that the explanation concept presented the most influential factors for the recommendation (FI-1), identified which parameter had an importance of 80% (FI-2), and recognized where to click to display more parameters in the explanation (FI-3). Both groups selected Version A as their favorite. The nurses mentioned that the nursing staff would likely not use this explanation concept in clinical practice. Detailed insights for this explanation concept are provided in <xref ref-type="table" rid="table4">Table 4</xref>.</p>
            <table-wrap position="float" id="table4">
              <label>Table 4</label>
              <caption>
                <p>Insights from both group usability walkthroughs for the explanation concept: feature importance.</p>
              </caption>
              <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
                <col width="150"/>
                <col width="380"/>
                <col width="470"/>
                <thead>
                  <tr valign="top">
                    <td>Question</td>
                    <td>Physician walkthrough</td>
                    <td>Nurse walkthrough</td>
                  </tr>
                </thead>
                <tbody>
                  <tr valign="top">
                    <td>Selected<break/>version</td>
                    <td>
                      <list list-type="bullet">
                        <list-item>
                          <p>Version A.</p>
                        </list-item>
                      </list>
                    </td>
                    <td>
                      <list list-type="bullet">
                        <list-item>
                          <p>Version A.</p>
                        </list-item>
                      </list>
                    </td>
                  </tr>
                  <tr valign="top">
                    <td>Discussed insights</td>
                    <td>
                      <list list-type="bullet">
                        <list-item>
                          <p>Version C was viewed as beneficial for research, but not appropriate for daily clinical workflows.</p>
                        </list-item>
                        <list-item>
                          <p>X-axis labels: What format would be most intuitive for clinicians?</p>
                        </list-item>
                        <list-item>
                          <p>X-Axis labels: the group suggested empty labels; exact values were deemed less relevant than the relative differences between features.</p>
                        </list-item>
                        <list-item>
                          <p>Participants questioned how the explanation would be interpreted or acted upon in clinical practice.</p>
                        </list-item>
                      </list>
                    </td>
                    <td>
                      <list list-type="bullet">
                        <list-item>
                          <p>Version C would not be interesting for the nursing staff.</p>
                        </list-item>
                        <list-item>
                          <p>Suspected feature importance would not be used in daily clinical practice.</p>
                        </list-item>
                      </list>
                    </td>
                  </tr>
                  <tr valign="top">
                    <td>Free-text answers</td>
                    <td>
                      <list list-type="bullet">
                        <list-item>
                          <p>“Importance is not clear. Which consequences should be drawn?” [P3]</p>
                        </list-item>
                      </list>
                    </td>
                    <td>
                      <list list-type="bullet">
                        <list-item>
                          <p>“Only if someone would like to see complete information, not to be shown as default information, only shown on demand.” [P5]</p>
                        </list-item>
                        <list-item>
                          <p>“Not relevant for the daily clinical working process.” [P6]</p>
                        </list-item>
                      </list>
                    </td>
                  </tr>
                </tbody>
              </table>
            </table-wrap>
          </sec>
          <sec>
            <title>Rule-Based</title>
            <p>The physicians unanimously voiced strong concerns about this explanation concept. They feared the concept would lead to an oversimplified perception of the AI model and might mislead nonexperts. They recommended excluding this explanation from the final XUI design. Due to the physicians’ strong objections, the explanation concept was not shown to the nurses. Notes from the discussion are reported in <xref ref-type="table" rid="table5">Table 5</xref>.</p>
            <table-wrap position="float" id="table5">
              <label>Table 5</label>
              <caption>
                <p>Insights from both group usability walkthroughs for the explanation concept: rule-based.</p>
              </caption>
              <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
                <col width="150"/>
                <col width="850"/>
                <thead>
                  <tr valign="top">
                    <td>Question</td>
                    <td>Physician walkthrough</td>
                  </tr>
                </thead>
                <tbody>
                  <tr valign="top">
                    <td>R-1</td>
                    <td>
                      <list list-type="bullet">
                        <list-item>
                          <p>Correct answer.</p>
                        </list-item>
                      </list>
                    </td>
                  </tr>
                  <tr valign="top">
                    <td>Selected<break/>version</td>
                    <td>
                      <list list-type="bullet">
                        <list-item>
                          <p>Version A.</p>
                        </list-item>
                      </list>
                    </td>
                  </tr>
                  <tr valign="top">
                    <td>Discussed insights</td>
                    <td>
                      <list list-type="bullet">
                        <list-item>
                          <p>Group consensus: the explanation concept should not be part of the XUI<sup>a</sup> for this specific CDSS<sup>b</sup> for the following reasons: (1) would suggest to the user a too simple model of the system; (2) users with a low level of expertise could learn rules from the system that are not correct or clinically validated, without knowing this; and (3) an explanation that the rules, are a approximation of the system, would occupy additional resources.</p>
                        </list-item>
                      </list>
                    </td>
                  </tr>
                  <tr valign="top">
                    <td>Free-text answers</td>
                    <td>
                      <list list-type="bullet">
                        <list-item>
                          <p>“Leave [this concept] out.” [P1]</p>
                        </list-item>
                        <list-item>
                          <p>“False security.” [P2]</p>
                        </list-item>
                        <list-item>
                          <p>“Relationships from the menu cannot be established in this way. Possibly conveys false security, false or unproven connections.” [P3]</p>
                        </list-item>
                        <list-item>
                          <p>“Creates a ‘false’ sense of security or being able to understand the model, but only represents it incompletely.” [P4]</p>
                        </list-item>
                      </list>
                    </td>
                  </tr>
                </tbody>
              </table>
              <table-wrap-foot>
                <fn id="table5fn1">
                  <p><sup>a</sup>XUI: explanation user interface.</p>
                </fn>
                <fn id="table5fn2">
                  <p><sup>b</sup>CDSS: clinical decision support system.</p>
                </fn>
              </table-wrap-foot>
            </table-wrap>
          </sec>
          <sec>
            <title>Final Group Discussions of the XUI</title>
            <p>In the final group discussion, both groups agreed that no explanation concepts should be added to the XUI. The nurse group reiterated their preference for using only the first level of the XUI, suggesting that users with greater interest could optionally access Level 2. The nurses emphasized that the interface should minimize screen space usage.</p>
          </sec>
        </sec>
      </sec>
      <sec>
        <title>Quantitative Data Analysis</title>
        <p><xref rid="figure7" ref-type="fig">Figure 7</xref> provides the QA results (QA-1 to QA-3) for the available input and feature importance concepts.</p>
        <fig id="figure7" position="float">
          <label>Figure 7</label>
          <caption>
            <p>Results of quantitative assessments (QA-1 to QA-3) for the explanation concepts: available input and feature importance.</p>
          </caption>
          <graphic xlink:href="formative_v10i1e77481_fig7.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>As shown in the top half of <xref rid="figure7" ref-type="fig">Figure 7</xref>, the physicians agreed or strongly agreed that the available input concept was understandable, suitable for clinical work, and visually appealing. The nurses mostly disagreed or responded neutrally regarding the concept’s suitability and understandability, and found the visualization aesthetically unappealing. The nurses’ and physicians’ ratings, therefore, differ across all assessed dimensions, with nurses being more critical overall.</p>
        <p>The QA results for the feature importance explanation concept are shown in the bottom half of <xref rid="figure7" ref-type="fig">Figure 7</xref>. Most physicians strongly agreed that the feature importance concept was appealing, suitable, and easy to understand. The nurses mostly agreed with its understandability and appeal, but tended to disagree with its suitability for clinical work. Both user groups, therefore, rate the understandability and visual appeal of the explanation concepts positively. However, the user groups differ in their assessment of the suitability for their clinical work, with a positive assessment by the physicians and a negative assessment by the nurses.</p>
        <p><xref rid="figure8" ref-type="fig">Figure 8</xref> provides the QA results for the rule-based explanation concept. Three physicians strongly agreed or agreed with its appeal and understandability; one chose the “no answer” option. All 4 physicians disagreed or strongly disagreed with its suitability for clinical practice.</p>
        <p>The calculated descriptive statistics for each QA for all 3 explanation concepts on the second level are provided in <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>. The descriptive statistics mirror the results visible in <xref rid="figure7" ref-type="fig">Figures 7</xref> and <xref rid="figure8" ref-type="fig">8</xref>.</p>
        <fig id="figure8" position="float">
          <label>Figure 8</label>
          <caption>
            <p>Quantitative assessment results of the rule-based explanation concept.</p>
          </caption>
          <graphic xlink:href="formative_v10i1e77481_fig8.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>This paper provides findings from the first design and formative evaluation phase of an XUI for an AI-based CDSS aimed at optimizing mechanical ventilation in the ICU.</p>
        <p>The design led to an XUI with 2 levels. Level 1 of the XUI, positioned next to the CDSS recommendations on the main screen, includes the output certainty and outlier warning concepts. Level 2 of the XUI provides on-demand access to additional explanation concepts: available input, feature importance, and rule-based explanation. Multiple versions of the explanation concepts on the second level have been designed and evaluated.</p>
        <p>The initial evaluation consisted of 2 group usability walkthroughs with ICU physicians and nurses. Both user groups liked the first level of the XUI. The physicians were satisfied with XUI level 2 but provided valuable feedback for improving it. The nurses were skeptical about whether they would use level 2 of the XUI in clinical practice.</p>
      </sec>
      <sec>
        <title>XUI Design Phase</title>
        <p>For the design phase, the following research question was investigated: RQ1: “How should the XUI of an AI-based CDSS for mechanical ventilation optimization in the ICU be designed?”</p>
        <p>To address this research question, the first focus was on the XUI structure. The 2-level structure of the XUI allowed the provision of easy-to-grasp explanation concepts next to the CDSS recommendation and more complex and more detailed explanation concepts on the second level, on demand. This structure was based on the input of the previous UCD design phases and current design recommendations [<xref ref-type="bibr" rid="ref35">35</xref>]. This design choice aligns with new recommendations published as a preprint after the work reported in this paper was concluded. The preprint recommends multiple levels with increasing detail for XUIs for the ICU [<xref ref-type="bibr" rid="ref29">29</xref>].</p>
        <p>After the structure of the XUI was created, the investigation addressed the presentation of the explanation concepts themselves. The individual visualizations for the explanation concepts are comparable to typical visualizations found for XUIs in the medical domain [<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref44">44</xref>]. The explanation concepts were kept in line with the design of XUIs to provide clinicians with an impression of the possibility of explanations at the current state of the art of research in the field.</p>
        <p>The different versions of the explanation concepts in level 2 of the XUI varied in the level of detail presented to determine users’ preferred level of detail for this CDSS. Based on the literature, this is considered an important design decision [<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref35">35</xref>]. Minor adaptations were made to incorporate design recommendations for the UCD of XUIs; for example, the explanation concept of feature importance was presented to the user under the term “parameter importance,” as the term feature importance was assumed not to be self-explanatory for non-AI experts. In addition, to add some variety to the different presented explanation concepts, the typical bar plot visualization for the feature importance explanation concept was extended by a version inspired by the works of Bienefeld et al [<xref ref-type="bibr" rid="ref45">45</xref>], which created a heat map–style feature importance history in an XUI.</p>
      </sec>
      <sec>
        <title>XUI Evaluation Phase</title>
        <p>During the evaluation phase, the following research question, RQ2, was investigated: “What are clinicians’ perceptions of the proposed XUI of the AI-based CDSS for mechanical ventilation optimization?”</p>
        <p>The users’ first impression of the 2-level structure of the XUI suggests that this structure is the right approach for this CDSS. Both groups were content with level 1 of the XUI, and the physician group was satisfied with level 2 of the XUI, while the nurses were more skeptical of their intentions to use level 2. No concerns were raised by either group about the general structure. The structure allows both user groups to access the information they prefer while preventing information clutter on the main screen of the CDSS.</p>
        <p>The combined results from group discussions, concept selection, QAs, and free-text responses indicate that ICU nurses and physicians had noticeably different preferences regarding the level of detail and complexity in the XUI. The nurses preferred less complex explanation concepts, such as output certainty and outlier warning, explicitly requested a lower level of detail for the explained concepts, and rated the suitability of the explanation concepts on level 2 lower than the physicians. The literature supports this, as it has been reported that nurses wished for clear and concise explanations, outputs, and actionable information when dealing with AI [<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref46">46</xref>]. In contrast, the physicians were satisfied with both XUI levels, which allowed them to consult low-detail and less complex information on level 1 and more detailed and complex explanation concepts on level 2 of the XUI. This implies that the explanation needs and preferences between the nurses and physicians differ, with physicians having a higher need for more complex and detailed explanations than the ICU nurses. The work from Barda et al [<xref ref-type="bibr" rid="ref31">31</xref>] found a similar insight. Current research supports this by showing that different health care provider groups can have different explanation preferences [<xref ref-type="bibr" rid="ref15">15</xref>] or that different stakeholders have different explanation needs [<xref ref-type="bibr" rid="ref47">47</xref>]. The observed differences may reflect role-specific workflows in ventilator care. ICU nurses typically operate in fast-paced bedside routines with frequent monitoring and rapid adjustments, which may favor concise, immediately actionable explanations. Physicians, in contrast, often integrate XAI outputs into broader diagnostic reasoning and decision validation, which may increase the perceived value of more detailed, mechanism-oriented explanations provided on level 2.</p>
        <p>Notably, divergence between groups was most consistent for clinical suitability rather than for understandability or visual appeal (<xref rid="figure7" ref-type="fig">Figure 7</xref>). This suggests that acceptance may not only be a matter of whether clinicians can comprehend an explanation and perceive it as visually appealing, but also whether it fits their real-world task context and decision-making responsibilities. Therefore, suitability should be treated as a primary design target in XUI development, alongside clarity and aesthetics.</p>
        <p>These results emphasize the need for a rigorous application of the UCD process when an XUI is developed for different health care providers. In the XUI developed and evaluated in this study, the 2-level solution appears to meet the differing explanation needs and preferences of the user groups. Future iterations should evaluate whether this approach remains sufficient or if separate XUIs are needed. It should be kept in mind that ICU nurses will probably not consult level 2 of the XUI in future iterations. Therefore, presenting potentially necessary and valuable information only on level 2 should be avoided. Clinical role alone may not fully explain the differences; AI literacy and ventilator-care experience likely act as moderators that shape how much explanation depth is perceived as useful versus burdensome. Future work should disentangle professional role effects from training- and experience-related factors, for example, by stratifying participants accordingly.</p>
        <p>Interestingly, both groups were skeptical about their peers’ adaptations of the explanation concepts in the clinical daily routine, which was voiced during the group discussions and in the free-text answers. The nurse group voiced more skepticism than the physician group, and their skepticism increased with the level of detail and complexity of the explanation concepts. The physicians were mainly satisfied with the proposed explanation concepts, apart from the rule-based explanation concept. However, they stressed that the complexity of the explanation concepts should not exceed that used in the XUI. These results explain why both groups had no interest in more complex explanation concepts, such as a counterfactual explanation. This result contrasts with the results of a study by Jung et al [<xref ref-type="bibr" rid="ref48">48</xref>], in which clinicians had a positive perception of counterfactual explanations.</p>
        <p>The physician group positively assessed the explanation concepts on level 2 of the XUI, apart from the suitability of the rule-based explanations. The understandability ratings for the explanation concepts are similar to previous research, showing that physicians can understand the selected concepts [<xref ref-type="bibr" rid="ref32">32</xref>]. The positive assessment of the feature importance explanation concept for the physician group is in line with other research efforts [<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref15">15</xref>]. Due to the overall positive assessment by the physicians of both levels of the XUI and nurses’ positive perception of level 1 of the XUI, it seems that the results of the first design phase were a step in the right direction. However, the evaluation also revealed some improvement opportunities.</p>
        <p>Second, the evaluation phase investigated research question RQ-3: “What XUI design improvements should be addressed in subsequent design iterations?”</p>
        <p>Based on the results of the evaluation, all the explanation concepts should be revised, keeping the nurses’ preferences for a low level of detail and complexity for the explanation concept, and their perception of the explanation concept as having too much detail in mind. In the following design iteration, all the explanation concept versions should be provided with a lower level of detail or complexity as a default version. From these default versions, the more interested user should be able to access a more detailed version on demand. This approach has to be balanced so that the more interested users are not burdened while accessing the explanation concepts with the current level of detail and complexity for which they provided positive feedback in the evaluation.</p>
        <p>For the feature importance explanation concept, the evaluation revealed that further research is needed for the design of the x-axis. The x-axis should be redesigned to be understandable for physicians and nurses and to transport the appropriate amount of information without overwhelming them. As raised in the physician group walkthrough, one option could be an empty x-axis; the suitability of such a version should be tested in comparison to other typical variants of the x-axis for this explanation concept. As the question about the actionability of the information provided in the feature importance explanation concept arose in the physician group, it should be investigated whether incorporating information about how to act on the explanation concept would be valuable. For the outlier warning explanation concept, the evaluation revealed different preferences for the case in which no outlier would be detected. It should be investigated how the user groups would react to the version they did not prefer when interacting with a version of the XUI.</p>
        <p>An increased effort should be put into the design of level 1 of the XUI. No alternative versions had been designed for the explanation concepts on level 1 for this initial prototype to reduce complexity. These should be a primary focus for the next design iteration, as both user groups saw them as valuable. While refining these explanation concepts, an emphasis should be placed on concise information visualizations and effortless information retention from both user groups.</p>
      </sec>
      <sec>
        <title>Limitations</title>
        <p>This first iteration of the design and evaluation phase of the UCD process for the XUI of a CDSS has some limitations that influence the generalizability of the results and should be carefully considered when interpreting the results of this study. A pragmatic group usability walkthrough was selected for the evaluation phase due to the limited number of available and suitable participants per user group. Nonetheless, this pragmatic approach proved valuable as it allowed for the receipt of early feedback from subject matter experts on mechanical ventilation with real-world and research experience. Their experience might have biased their assessment of the XUI, as they might not have had to rely on the CDSS in the first place due to their extensive experience. However, their knowledge allowed them to focus on the XUI design and incorporate extensive practical knowledge and expertise in their feedback and assessment of the XUI. The small group size of only 4 participants per group usability walkthrough might be seen as a limitation, but this group size allowed for a lively discussion between participants, and smaller sample sizes are in line with early evaluation iterations [<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref41">41</xref>]. In addition, the group walkthrough allowed for the collection of feedback in a condensed amount of time, which was necessary due to resource restrictions. Nonetheless, these points limit the generalizability to the clinical population that should be addressed in the following design iterations.</p>
        <p>Furthermore, the changes for the nurse walkthrough limit the comparability between the 2 walkthrough groups. The walkthrough had to be adjusted to avoid overwhelming the participants. Despite the minor adjustments, the collected insights allow for a preliminary assessment of the difference between the user groups. No actual data or patient vignettes were used to promote deeper engagement or trigger demand for an explanation from the CDSS, which limits the comparability of the assessment of XUI in real-world situations. The current state of the XUI mockup was sufficient to spark extensive discussions while preventing the participants from focusing on the medical validity of the situation or mockup content. In this initial iteration of the UCD, only subjective assessments of the XUIs were elicited. This may not provide a complete picture, as it has been shown for XUIs in the ICU that subjective assessments might not transfer to the use or influence of the XUI during the medical decision-making process [<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref49">49</xref>]. Nevertheless, the amount of valuable insights generated through subjective assessments of the XUI in this study shows that this type of feedback is helpful during such an early design phase. These limitations should be addressed in future evaluation iterations.</p>
      </sec>
      <sec>
        <title>Future Research</title>
        <p>Based on this research, the XUI mockup should be improved. The improvements should address the identified opportunities for enhancement in the XUI, making it more suitable for both user groups without drastically altering it, as physicians were already satisfied with the current design. A higher focus should be placed on level 1 of the XUI, as this is likely the most used part. In addition, feedback should be sought from the AI and XAI algorithm developer and incorporated into the mockup to enhance the realism of the XUI for the next iteration. An improved and extended XUI mockup would be a suitable basis for a more elaborate evaluation. The enhanced mockup could be accompanied by different vignettes of patients and situations that might trigger the need for an explanation from the user. Future evaluation rounds should have a higher experimental character to avoid the potential shortcomings of subjective assessments of an XUI in the ICU and allow the users to interact with the XUI directly. The proposed evaluation iterations should aim for a higher sample size and a diversified participant pool concerning their ventilation experience, AI literacy, and clinical center to foster generalizability and reflectiveness of the clinician population. To this end, a remote experimental study could be conducted in which the user directly interacts with the CDSS based on presented patient vignettes. During such an evaluation, participants reflecting a broad base of clinical users should be recruited from multiple institutions.</p>
      </sec>
      <sec>
        <title>Conclusion</title>
        <p>To the best of our knowledge, this paper is the first to report on the UCD and formative evaluation of an XUI for an AI-based CDSS aimed at optimizing continuous mechanical ventilation in the ICU. The findings indicate that a 2-level XUI structure can effectively address the distinct explanation needs of ICU clinicians. Nurses’ information needs were largely met through concise, high-level explanations such as outlier warning and output certainty, presented alongside CDSS recommendations, whereas physicians preferred access to more detailed explanations—such as available input and feature importance—on a secondary level. These results provide empirical evidence that explanation needs differ by professional role and underscore the necessity of user-centered, iterative design processes in the development of XAI systems for critical care. Future work should refine these explanation concepts and evaluate their impact on clinical decision-making, user trust, and system adoption.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>STARE-HI checklist.</p>
        <media xlink:href="formative_v10i1e77481_app1.pdf" xlink:title="PDF File  (Adobe PDF File), 77 KB"/>
      </supplementary-material>
      <supplementary-material id="app2">
        <label>Multimedia Appendix 2</label>
        <p>Quantitative assessment sheet.</p>
        <media xlink:href="formative_v10i1e77481_app2.pdf" xlink:title="PDF File  (Adobe PDF File), 87 KB"/>
      </supplementary-material>
      <supplementary-material id="app3">
        <label>Multimedia Appendix 3</label>
        <p>Descriptive statistics for quantitative assessments of each explanation per user group.</p>
        <media xlink:href="formative_v10i1e77481_app3.docx" xlink:title="DOCX File , 18 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">CDSS</term>
          <def>
            <p>clinical decision support system</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">DIN</term>
          <def>
            <p>Deutsches Institut für Normung</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">EN</term>
          <def>
            <p>Europäische Norm</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">EU</term>
          <def>
            <p>European Union</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">FI</term>
          <def>
            <p>feature importance</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">ICU</term>
          <def>
            <p>intensive care unit</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">ISO</term>
          <def>
            <p>International Organization for Standardization</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">QA</term>
          <def>
            <p>quantitative assessment</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb10">STARE-HI</term>
          <def>
            <p>Statement on the Reporting of Evaluation studies in Health Informatics</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb11">UCD</term>
          <def>
            <p>user-centered design</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb12">XAI</term>
          <def>
            <p>explainable artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb13">XUI</term>
          <def>
            <p>explanation user interface</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>The authors are grateful to the IntelliLung project team for their productive cooperation and to the participants of the evaluation phase for contributing their time and feedback. The authors declare that generative AI (GAI) tools were used in the research and writing process. According to the GAIDeT taxonomy (2025), tasks delegated to GAI tools under full human supervision included proofreading, editing, and translation. The GAI tools used in this work were ChatGPT-4o (OpenAI) [<xref ref-type="bibr" rid="ref50">50</xref>], ChatGPT-5 (OpenAI), and Grammarly (Grammarly, Inc). Responsibility for the final manuscript remains entirely with the authors. GAI tools were not listed as authors and do not bear responsibility for the content or outcomes of this work. In addition, Claude Sonnet 3.5 (Anthropic) [<xref ref-type="bibr" rid="ref37">37</xref>] was used to generate syntactic data points for the parameters displayed in the mockups, as described in the “Methods” section. The declaration was submitted by ICJ.</p>
    </ack>
    <notes>
      <title>Funding</title>
      <p>The research work was carried out as part of the junior research group “CDS2USE-prospective user-centered design of clinical decision support systems in the context of personalized medicine,” funded by the German Federal Ministry of Research, Technology, and Space (BMFTR; grant number 01ZZ2002).</p>
    </notes>
    <fn-group>
      <fn fn-type="con">
        <p>ICJ contributed to the conceptualization, methodology, and investigation, and was responsible for writing the original draft, reviewing and editing the manuscript, and creating the visualization. MZ and KS contributed to the Investigation and assisted with manuscript review and editing. MS and BS contributed to the manuscript review and editing, and were responsible for supervision and funding acquisition.</p>
      </fn>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Barea Mendoza</surname>
              <given-names>JA</given-names>
            </name>
            <name name-style="western">
              <surname>Valiente Fernandez</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Pardo Fernandez</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Gómez Álvarez</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Current perspectives on the use of artificial intelligence in critical patient safety</article-title>
          <source>Med Intensiva (Engl Ed)</source>
          <year>2025</year>
          <volume>49</volume>
          <issue>3</issue>
          <fpage>154</fpage>
          <lpage>164</lpage>
          <pub-id pub-id-type="doi">10.1016/j.medine.2024.04.002</pub-id>
          <pub-id pub-id-type="medline">38677902</pub-id>
          <pub-id pub-id-type="pii">S2173-5727(24)00080-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Cai</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Seery</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Gonzalez</surname>
              <given-names>MJ</given-names>
            </name>
            <name name-style="western">
              <surname>Ali</surname>
              <given-names>NM</given-names>
            </name>
            <name name-style="western">
              <surname>Ren</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Qiao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Xue</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Acceptance of clinical artificial intelligence among physicians and medical students: a systematic review with cross-sectional survey</article-title>
          <source>Front Med (Lausanne)</source>
          <year>2022</year>
          <volume>9</volume>
          <fpage>990604</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36117979"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fmed.2022.990604</pub-id>
          <pub-id pub-id-type="medline">36117979</pub-id>
          <pub-id pub-id-type="pmcid">PMC9472134</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hua</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Petrina</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Young</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Cho</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Poon</surname>
              <given-names>SK</given-names>
            </name>
          </person-group>
          <article-title>Understanding the factors influencing acceptability of AI in medical imaging domains among healthcare professionals: a scoping review</article-title>
          <source>Artif Intell Med</source>
          <year>2024</year>
          <volume>147</volume>
          <fpage>102698</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0933-3657(23)00212-9"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.artmed.2023.102698</pub-id>
          <pub-id pub-id-type="medline">38184343</pub-id>
          <pub-id pub-id-type="pii">S0933-3657(23)00212-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Asan</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Bayrak</surname>
              <given-names>AE</given-names>
            </name>
            <name name-style="western">
              <surname>Choudhury</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence and human trust in healthcare: focus on clinicians</article-title>
          <source>J Med Internet Res</source>
          <year>2020</year>
          <volume>22</volume>
          <issue>6</issue>
          <fpage>e15154</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2020/6/e15154/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/15154</pub-id>
          <pub-id pub-id-type="medline">32558657</pub-id>
          <pub-id pub-id-type="pii">v22i6e15154</pub-id>
          <pub-id pub-id-type="pmcid">PMC7334754</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>van Kolfschooten</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>van Oirschot</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>The EU artificial intelligence act (2024): implications for healthcare</article-title>
          <source>Health Policy</source>
          <year>2024</year>
          <volume>149</volume>
          <fpage>105152</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0168-8510(24)00162-3"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.healthpol.2024.105152</pub-id>
          <pub-id pub-id-type="medline">39244818</pub-id>
          <pub-id pub-id-type="pii">S0168-8510(24)00162-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="web">
          <article-title>Regulation (EU) 2024/1689 of the European Parliament and of the Council of 13 June 2024 laying down harmonised rules on artificial intelligence (Artificial Intelligence Act)</article-title>
          <source>EUR-Lex</source>
          <year>2024</year>
          <access-date>2025-12-25</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://eur-lex.europa.eu/eli/reg/2024/1689/oj">https://eur-lex.europa.eu/eli/reg/2024/1689/oj</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Meskó</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Görög</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>A short guide for medical professionals in the era of artificial intelligence</article-title>
          <source>NPJ Digit Med</source>
          <year>2020</year>
          <volume>3</volume>
          <fpage>126</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41746-020-00333-z"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41746-020-00333-z</pub-id>
          <pub-id pub-id-type="medline">33043150</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41746-020-00333-z</pub-id>
          <pub-id pub-id-type="pmcid">PMC7518439</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rajpurkar</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Banerjee</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Topol</surname>
              <given-names>EJ</given-names>
            </name>
          </person-group>
          <article-title>AI in health and medicine</article-title>
          <source>Nat Med</source>
          <year>2022</year>
          <volume>28</volume>
          <issue>1</issue>
          <fpage>31</fpage>
          <lpage>38</lpage>
          <pub-id pub-id-type="doi">10.1038/s41591-021-01614-0</pub-id>
          <pub-id pub-id-type="medline">35058619</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41591-021-01614-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Amann</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Blasimme</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Vayena</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Frey</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Madai</surname>
              <given-names>VI</given-names>
            </name>
            <collab>Precise4Q consortium</collab>
          </person-group>
          <article-title>Explainability for artificial intelligence in healthcare: a multidisciplinary perspective</article-title>
          <source>BMC Med Inform Decis Mak</source>
          <year>2020</year>
          <volume>20</volume>
          <issue>1</issue>
          <fpage>310</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmcmedinformdecismak.biomedcentral.com/articles/10.1186/s12911-020-01332-6"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12911-020-01332-6</pub-id>
          <pub-id pub-id-type="medline">33256715</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12911-020-01332-6</pub-id>
          <pub-id pub-id-type="pmcid">PMC7706019</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mohanty</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Mishra</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Mishra</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Tripathy</surname>
              <given-names>H.K</given-names>
            </name>
            <name name-style="western">
              <surname>Mallick</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Shaalan</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>A comprehensive study of explainable artificial intelligence in healthcare</article-title>
          <source>Augmented Intelligence in Healthcare: A Pragmatic and Integrated Analysis</source>
          <year>2022</year>
          <publisher-loc>Singapore</publisher-loc>
          <publisher-name>Springer</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Allgaier</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Mulansky</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Draelos</surname>
              <given-names>RL</given-names>
            </name>
            <name name-style="western">
              <surname>Pryss</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>How does the model make predictions? A systematic literature review on the explainability power of machine learning in healthcare</article-title>
          <source>Artif Intell Med</source>
          <year>2023</year>
          <volume>143</volume>
          <fpage>102616</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0933-3657(23)00130-6"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.artmed.2023.102616</pub-id>
          <pub-id pub-id-type="medline">37673561</pub-id>
          <pub-id pub-id-type="pii">S0933-3657(23)00130-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rosenbacke</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Melhus</surname>
              <given-names>Å</given-names>
            </name>
            <name name-style="western">
              <surname>McKee</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Stuckler</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>How explainable artificial intelligence can increase or decrease clinicians' trust in ai applications in health care: systematic review</article-title>
          <source>JMIR AI</source>
          <year>2024</year>
          <volume>3</volume>
          <fpage>e53207</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://ai.jmir.org/2024//e53207/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/53207</pub-id>
          <pub-id pub-id-type="medline">39476365</pub-id>
          <pub-id pub-id-type="pii">v3i1e53207</pub-id>
          <pub-id pub-id-type="pmcid">PMC11561425</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>CF</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>ZC</given-names>
            </name>
            <name name-style="western">
              <surname>Kuo</surname>
              <given-names>SC</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>TC</given-names>
            </name>
          </person-group>
          <article-title>Does AI explainability affect physicians' intention to use AI?</article-title>
          <source>Int J Med Inform</source>
          <year>2022</year>
          <volume>168</volume>
          <fpage>104884</fpage>
          <pub-id pub-id-type="doi">10.1016/j.ijmedinf.2022.104884</pub-id>
          <pub-id pub-id-type="medline">36228415</pub-id>
          <pub-id pub-id-type="pii">S1386-5056(22)00198-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sivaraman</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Bukowski</surname>
              <given-names>LA</given-names>
            </name>
            <name name-style="western">
              <surname>Levin</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kahn</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Perer</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>ignore, trust, or negotiate: understanding clinician acceptance of AI-based treatment recommendations in health care</article-title>
          <year>2023</year>
          <conf-name>CHI '23: Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems</conf-name>
          <conf-date>April 23-28, 2023</conf-date>
          <conf-loc>Hamburg, Germany</conf-loc>
          <fpage>1</fpage>
          <lpage>18</lpage>
          <pub-id pub-id-type="doi">10.1145/3544548.3581075</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Du</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Antoniadi</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>McNestry</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>McAuliffe</surname>
              <given-names>FM</given-names>
            </name>
            <name name-style="western">
              <surname>Mooney</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>The role of XAI in advice-taking from a clinical decision support system: a comparative user study of feature contribution-based and example-based explanations</article-title>
          <source>Appl Sci</source>
          <year>2022</year>
          <month>10</month>
          <day>13</day>
          <volume>12</volume>
          <issue>20</issue>
          <fpage>10323</fpage>
          <pub-id pub-id-type="doi">10.3390/app122010323</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nasarian</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Alizadehsani</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Acharya</surname>
              <given-names>UR</given-names>
            </name>
            <name name-style="western">
              <surname>Tsui</surname>
              <given-names>KL</given-names>
            </name>
          </person-group>
          <article-title>Designing interpretable ML system to enhance trust in healthcare: a systematic review to proposed responsible clinician-AI-collaboration framework</article-title>
          <source>Inf Fusion</source>
          <year>2024</year>
          <month>08</month>
          <volume>108</volume>
          <fpage>102412</fpage>
          <pub-id pub-id-type="doi">10.1016/j.inffus.2024.102412</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lambert</surname>
              <given-names>SI</given-names>
            </name>
            <name name-style="western">
              <surname>Madi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sopka</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Lenes</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Stange</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Buszello</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Stephan</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>An integrative review on the acceptance of artificial intelligence among healthcare professionals in hospitals</article-title>
          <source>NPJ Digit Med</source>
          <year>2023</year>
          <volume>6</volume>
          <issue>1</issue>
          <fpage>111</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41746-023-00852-5"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41746-023-00852-5</pub-id>
          <pub-id pub-id-type="medline">37301946</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41746-023-00852-5</pub-id>
          <pub-id pub-id-type="pmcid">PMC10257646</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Combi</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Amico</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Bellazzi</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Holzinger</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Moore</surname>
              <given-names>JH</given-names>
            </name>
            <name name-style="western">
              <surname>Zitnik</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Holmes</surname>
              <given-names>JH</given-names>
            </name>
          </person-group>
          <article-title>A manifesto on explainability for artificial intelligence in medicine</article-title>
          <source>Artif Intell Med</source>
          <year>2022</year>
          <volume>133</volume>
          <fpage>102423</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0933-3657(22)00175-0"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.artmed.2022.102423</pub-id>
          <pub-id pub-id-type="medline">36328669</pub-id>
          <pub-id pub-id-type="pii">S0933-3657(22)00175-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rubinger</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Gazendam</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ekhtiari</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Bhandari</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Machine learning and artificial intelligence in research and healthcare</article-title>
          <source>Injury</source>
          <year>2023</year>
          <volume>54 Suppl 3</volume>
          <fpage>S69</fpage>
          <lpage>S73</lpage>
          <pub-id pub-id-type="doi">10.1016/j.injury.2022.01.046</pub-id>
          <pub-id pub-id-type="medline">35135685</pub-id>
          <pub-id pub-id-type="pii">S0020-1383(22)00076-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Amann</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Vetter</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Blomberg</surname>
              <given-names>SN</given-names>
            </name>
            <name name-style="western">
              <surname>Christensen</surname>
              <given-names>HC</given-names>
            </name>
            <name name-style="western">
              <surname>Coffee</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Gerke</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Gilbert</surname>
              <given-names>TK</given-names>
            </name>
            <name name-style="western">
              <surname>Hagendorff</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Holm</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Livne</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Spezzatti</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Strümke</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Zicari</surname>
              <given-names>RV</given-names>
            </name>
            <name name-style="western">
              <surname>Madai</surname>
              <given-names>VI</given-names>
            </name>
            <collab>Z-Inspection initiative</collab>
          </person-group>
          <article-title>To explain or not to explain?-Artificial intelligence explainability in clinical decision support systems</article-title>
          <source>PLOS Digit Health</source>
          <year>2022</year>
          <volume>1</volume>
          <issue>2</issue>
          <fpage>e0000016</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36812545"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pdig.0000016</pub-id>
          <pub-id pub-id-type="medline">36812545</pub-id>
          <pub-id pub-id-type="pii">PDIG-D-21-00082</pub-id>
          <pub-id pub-id-type="pmcid">PMC9931364</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="web">
          <article-title>Artificial intelligence to help optimize ventilation for intensive care patients</article-title>
          <source>IntelliLung</source>
          <access-date>2024-02-14</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://intellilung-project.eu/">https://intellilung-project.eu/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mohseni</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Zarei</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Ragan</surname>
              <given-names>ED</given-names>
            </name>
          </person-group>
          <article-title>A multidisciplinary survey and framework for design and evaluation of explainable AI systems</article-title>
          <source>ACM Trans Interact Intell Syst</source>
          <year>2021</year>
          <month>09</month>
          <day>03</day>
          <volume>11</volume>
          <issue>3-4</issue>
          <fpage>1</fpage>
          <lpage>45</lpage>
          <pub-id pub-id-type="doi">10.1145/3387166</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Saarela</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Podgorelec</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>Recent applications of explainable AI (XAI): a systematic literature review</article-title>
          <source>Appl Sci</source>
          <year>2024</year>
          <volume>14</volume>
          <issue>19</issue>
          <fpage>8884</fpage>
          <pub-id pub-id-type="doi">10.3390/app14198884</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Yin</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Lyu</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Developing an explainable machine learning model to predict the mechanical ventilation duration of patients with ARDS in intensive care units</article-title>
          <source>Heart Lung</source>
          <year>2023</year>
          <volume>58</volume>
          <fpage>74</fpage>
          <lpage>81</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36423504"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.hrtlng.2022.11.005</pub-id>
          <pub-id pub-id-type="medline">36423504</pub-id>
          <pub-id pub-id-type="pii">S0147-9563(22)00269-2</pub-id>
          <pub-id pub-id-type="pmcid">PMC9678346</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Z</given-names>
            </name>
          </person-group>
          <article-title>An Explainable AI predictor to improve clinical prognosis for acute respiratory distress syndrome</article-title>
          <year>2023</year>
          <conf-name>Computing in Cardiology (CinC)</conf-name>
          <conf-date>October 1-4, 2023</conf-date>
          <conf-loc>Atlanta, GA</conf-loc>
          <pub-id pub-id-type="doi">10.22489/cinc.2023.003</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Aslam</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Explainable artificial intelligence approach for the early prediction of ventilator support and mortality in COVID-19 patients</article-title>
          <source>Computation</source>
          <year>2022</year>
          <volume>10</volume>
          <issue>3</issue>
          <fpage>36</fpage>
          <pub-id pub-id-type="doi">10.3390/computation10030036</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Safaei</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Nenadović</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Liessner</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Raphael</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Jakob</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Jens</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Sahar</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>X-Vent: ICU ventilation with explainable model-based reinforcement learning</article-title>
          <year>2024</year>
          <conf-name>27th European Conference on Artificial Intelligence (ECAI 2024)</conf-name>
          <conf-date>October 19-24, 2024</conf-date>
          <conf-loc>Santiago de Compostela, Spain</conf-loc>
          <fpage>4719</fpage>
          <lpage>4726</lpage>
          <pub-id pub-id-type="doi">10.3233/faia241069</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hughes</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Jia</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Sujan</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Lawton</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Habli</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>McDermid</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Contextual design requirements for decision-support tools involved in weaning patients from mechanical ventilation in intensive care units</article-title>
          <source>Appl Ergon</source>
          <year>2024</year>
          <volume>118</volume>
          <fpage>104275</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0003-6870(24)00052-8"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.apergo.2024.104275</pub-id>
          <pub-id pub-id-type="medline">38574594</pub-id>
          <pub-id pub-id-type="pii">S0003-6870(24)00052-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Clark</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wragg</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Nielsen</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Exploring the requirements of clinicians for explainable AI decision support systems in intensive care</article-title>
          <source>arXiv</source>
          <year>2024</year>
          <month>11</month>
          <day>18</day>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://arxiv.org/abs/2411.11774"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nagendran</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Festor</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Komorowski</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Gordon</surname>
              <given-names>AC</given-names>
            </name>
            <name name-style="western">
              <surname>Faisal</surname>
              <given-names>AA</given-names>
            </name>
          </person-group>
          <article-title>Eye tracking insights into physician behaviour with safe and unsafe explainable AI recommendations</article-title>
          <source>NPJ Digit Med</source>
          <year>2024</year>
          <volume>7</volume>
          <issue>1</issue>
          <fpage>202</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41746-024-01200-x"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41746-024-01200-x</pub-id>
          <pub-id pub-id-type="medline">39095449</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41746-024-01200-x</pub-id>
          <pub-id pub-id-type="pmcid">PMC11297294</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Barda</surname>
              <given-names>AJ</given-names>
            </name>
            <name name-style="western">
              <surname>Horvat</surname>
              <given-names>CM</given-names>
            </name>
            <name name-style="western">
              <surname>Hochheiser</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>A qualitative research framework for the design of user-centered displays of explanations for machine learning model predictions in healthcare</article-title>
          <source>BMC Med Inform Decis Mak</source>
          <year>2020</year>
          <volume>20</volume>
          <issue>1</issue>
          <fpage>257</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmcmedinformdecismak.biomedcentral.com/articles/10.1186/s12911-020-01276-x"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12911-020-01276-x</pub-id>
          <pub-id pub-id-type="medline">33032582</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12911-020-01276-x</pub-id>
          <pub-id pub-id-type="pmcid">PMC7545557</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schoonderwoerd</surname>
              <given-names>TAJ</given-names>
            </name>
            <name name-style="western">
              <surname>Jorritsma</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Neerincx</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>van den Bosch</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Human-centered XAI: developing design patterns for explanations of clinical decision support systems</article-title>
          <source>Int J Hum Comput Stud</source>
          <year>2021</year>
          <volume>154</volume>
          <fpage>102684</fpage>
          <pub-id pub-id-type="doi">10.1016/j.ijhcs.2021.102684</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="web">
          <article-title>Ergonomics of human-system interaction—part 210: human-centred design for interactive systems</article-title>
          <source>International Organization for Standardization (ISO)</source>
          <year>2019</year>
          <access-date>2025-02-05</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.iso.org/standard/77520.html">https://www.iso.org/standard/77520.html</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jung</surname>
              <given-names>IC</given-names>
            </name>
            <name name-style="western">
              <surname>Zerlik</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Schuler</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Sedlmayr</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sedlmayr</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>User-centered development of explanation user interfaces for AI-based CDSS: lessons learned from early phases</article-title>
          <source>Stud Health Technol Inform</source>
          <year>2024</year>
          <volume>316</volume>
          <fpage>570</fpage>
          <lpage>574</lpage>
          <pub-id pub-id-type="doi">10.3233/SHTI240478</pub-id>
          <pub-id pub-id-type="medline">39176806</pub-id>
          <pub-id pub-id-type="pii">SHTI240478</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jung</surname>
              <given-names>IC</given-names>
            </name>
            <name name-style="western">
              <surname>Schuler</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Zerlik</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Grummt</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sedlmayr</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sedlmayr</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Overview of basic design recommendations for user-centered explanation interfaces for AI-based clinical decision support systems: a scoping review</article-title>
          <source>Digit Health</source>
          <year>2025</year>
          <volume>11</volume>
          <fpage>20552076241308298</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://journals.sagepub.com/doi/10.1177/20552076241308298?url_ver=Z39.88-2003&#38;rfr_id=ori:rid:crossref.org&#38;rfr_dat=cr_pub  0pubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/20552076241308298</pub-id>
          <pub-id pub-id-type="medline">39866885</pub-id>
          <pub-id pub-id-type="pii">10.1177_20552076241308298</pub-id>
          <pub-id pub-id-type="pmcid">PMC11758527</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="web">
          <article-title>Justinmind version 9.9.0</article-title>
          <source>Justinmind</source>
          <access-date>2025-12-25</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.justinmind.com/release-notes">https://www.justinmind.com/release-notes</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="web">
          <article-title>Claude 3.5 Sonnet</article-title>
          <source>Anthropic</source>
          <access-date>2025-12-25</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.anthropic.com/news/claude-3-5-sonnet">https://www.anthropic.com/news/claude-3-5-sonnet</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Talmon</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ammenwerth</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Brender</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>de Keizer</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Nykänen</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Rigby</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>STARE-HI—statement on reporting of evaluation studies in health informatics</article-title>
          <source>Int J Med Inform</source>
          <year>2009</year>
          <volume>78</volume>
          <issue>1</issue>
          <fpage>1</fpage>
          <lpage>9</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1016/j.ijmedinf.2008.09.002"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Brender</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Talmon</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>de Keizer</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Nykänen</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Rigby</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ammenwerth</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>STARE-HI - statement on reporting of evaluation studies in health informatics: explanation and elaboration</article-title>
          <source>Appl Clin Inform</source>
          <year>2013</year>
          <volume>4</volume>
          <issue>3</issue>
          <fpage>331</fpage>
          <lpage>358</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://www.thieme-connect.com/DOI/DOI?10.4338/ACI-2013-04-RA-0024"/>
          </comment>
          <pub-id pub-id-type="doi">10.4338/ACI-2013-04-RA-0024</pub-id>
          <pub-id pub-id-type="medline">24155788</pub-id>
          <pub-id pub-id-type="pmcid">PMC3799207</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="web">
          <article-title>Human factors engineering—design of medical devices</article-title>
          <source>ANSI Webstore</source>
          <year>2009</year>
          <access-date>2025-12-25</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://webstore.ansi.org/standards/aami/ansiaamihe752009">https://webstore.ansi.org/standards/aami/ansiaamihe752009</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="web">
          <article-title>Applying human factors and usability engineering to medical devices: guidance for industry and food and drug administration staff</article-title>
          <source>Food and Drug Administration (FDA)</source>
          <year>2016</year>
          <access-date>2025-12-25</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.fda.gov/regulatory-information/search-fda-guidance-documents/applying-human-factors-and-usability-engineering-medical-devices">https://www.fda.gov/regulatory-information/search-fda-guidance-documents/applying-human-factors-and-usability-engineering-medical-devices</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <collab>Ethics Committee of TU Dresden</collab>
          </person-group>
          <article-title>FAQ: When is an ethics application required?</article-title>
          <source>TU Dresden website</source>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://tu-dresden.de/tu-dresden/organisation/gremien-und-beauftragte/kommissionen/ethikkommission/faq#ck_Wann%20ist%20ein%20Antrag%20erforderlich">https://tu-dresden.de/tu-dresden/organisation/gremien-und-beauftragte/kommissionen/ethikkommission/faq#ck_Wann%20ist%20ein%20Antrag%20erforderlich</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cálem</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Moreira</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Jorge</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Intelligent systems in healthcare: a systematic survey of explainable user interfaces</article-title>
          <source>Comput Biol Med</source>
          <year>2024</year>
          <volume>180</volume>
          <fpage>108908</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0010-4825(24)00993-4"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.compbiomed.2024.108908</pub-id>
          <pub-id pub-id-type="medline">39067152</pub-id>
          <pub-id pub-id-type="pii">S0010-4825(24)00993-4</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Subramanian</surname>
              <given-names>HV</given-names>
            </name>
            <name name-style="western">
              <surname>Canfield</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Shank</surname>
              <given-names>DB</given-names>
            </name>
          </person-group>
          <article-title>Designing explainable AI to improve human-AI team performance: a medical stakeholder-driven scoping review</article-title>
          <source>Artif Intell Med</source>
          <year>2024</year>
          <volume>149</volume>
          <fpage>102780</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0933-3657(24)00022-8"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.artmed.2024.102780</pub-id>
          <pub-id pub-id-type="medline">38462282</pub-id>
          <pub-id pub-id-type="pii">S0933-3657(24)00022-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bienefeld</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Boss</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Lüthy</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Brodbeck</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Azzati</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Blaser</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Willms</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Keller</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Solving the explainable AI conundrum by bridging clinicians' needs and developers' goals</article-title>
          <source>NPJ Digit Med</source>
          <year>2023</year>
          <volume>6</volume>
          <issue>1</issue>
          <fpage>94</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41746-023-00837-4"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41746-023-00837-4</pub-id>
          <pub-id pub-id-type="medline">37217779</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41746-023-00837-4</pub-id>
          <pub-id pub-id-type="pmcid">PMC10202353</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hassan</surname>
              <given-names>EA</given-names>
            </name>
            <name name-style="western">
              <surname>El-Ashry</surname>
              <given-names>AM</given-names>
            </name>
          </person-group>
          <article-title>Leading with AI in critical care nursing: challenges, opportunities, and the human factor</article-title>
          <source>BMC Nurs</source>
          <year>2024</year>
          <volume>23</volume>
          <issue>1</issue>
          <fpage>752</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bmcnurs.biomedcentral.com/articles/10.1186/s12912-024-02363-4"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12912-024-02363-4</pub-id>
          <pub-id pub-id-type="medline">39402609</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12912-024-02363-4</pub-id>
          <pub-id pub-id-type="pmcid">PMC11475860</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Song</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Do stakeholder needs differ? - Designing stakeholder-tailored explainable artificial intelligence (XAI) interfaces</article-title>
          <source>Int J Hum Comput Stud</source>
          <year>2024</year>
          <volume>181</volume>
          <fpage>103160</fpage>
          <pub-id pub-id-type="doi">10.1016/j.ijhcs.2023.103160</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref48">
        <label>48</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jung</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Choi</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>El-Kareh</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Evaluating the impact of explainable AI on clinicians' decision-making: a study on ICU length of stay prediction</article-title>
          <source>Int J Med Inform</source>
          <year>2025</year>
          <volume>201</volume>
          <fpage>105943</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1016/j.ijmedinf.2025.105943"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.ijmedinf.2025.105943</pub-id>
          <pub-id pub-id-type="medline">40318498</pub-id>
          <pub-id pub-id-type="pii">S1386-5056(25)00160-1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref49">
        <label>49</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nagendran</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Festor</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Komorowski</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Gordon</surname>
              <given-names>AC</given-names>
            </name>
            <name name-style="western">
              <surname>Faisal</surname>
              <given-names>AA</given-names>
            </name>
          </person-group>
          <article-title>Quantifying the impact of AI recommendations with explanations on prescription decision making</article-title>
          <source>NPJ Digit Med</source>
          <year>2023</year>
          <volume>6</volume>
          <issue>1</issue>
          <fpage>206</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41746-023-00955-z"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41746-023-00955-z</pub-id>
          <pub-id pub-id-type="medline">37935953</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41746-023-00955-z</pub-id>
          <pub-id pub-id-type="pmcid">PMC10630476</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref50">
        <label>50</label>
        <nlm-citation citation-type="web">
          <article-title>GPT-4o</article-title>
          <source>OpenAI</source>
          <year>2024</year>
          <access-date>2025-12-25</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://openai.com/index/hello-gpt-4o">https://openai.com/index/hello-gpt-4o</ext-link>
          </comment>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
