<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.0" xmlns:xlink="http://www.w3.org/1999/xlink">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JFR</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Form Res</journal-id>
      <journal-title>JMIR Formative Research</journal-title>
      <issn pub-type="epub">2561-326X</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v8i1e58623</article-id>
      <article-id pub-id-type="pmid">39661979</article-id>
      <article-id pub-id-type="doi">10.2196/58623</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Integrating GPT-Based AI into Virtual Patients to Facilitate Communication Training Among Medical First Responders: Usability Study of Mixed Reality Simulation</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Mavragani</surname>
            <given-names>Amaryllis</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Chrimes</surname>
            <given-names>Dillon</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Hudon</surname>
            <given-names>Alexandre</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Gutiérrez Maquilón</surname>
            <given-names>Rodrigo</given-names>
          </name>
          <degrees>BEng, MSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Center for Technology Experience</institution>
            <institution>AIT - Austrian Institute of Technology</institution>
            <addr-line>Giefinggasse 4</addr-line>
            <addr-line>Vienna, 1210</addr-line>
            <country>Austria</country>
            <phone>43 66478588121</phone>
            <email>rodrigo.gutierrez@ait.ac.at</email>
          </address>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-6736-3418</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Uhl</surname>
            <given-names>Jakob</given-names>
          </name>
          <degrees>MSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-2178-7987</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Schrom-Feiertag</surname>
            <given-names>Helmut</given-names>
          </name>
          <degrees>DI</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-5327-2494</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Tscheligi</surname>
            <given-names>Manfred</given-names>
          </name>
          <degrees>Prof Dr</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-6056-7285</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Center for Technology Experience</institution>
        <institution>AIT - Austrian Institute of Technology</institution>
        <addr-line>Vienna</addr-line>
        <country>Austria</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Department of Artificial Intelligence and Human Interfaces</institution>
        <institution>Paris Lodron University of Salzburg</institution>
        <addr-line>Salzburg</addr-line>
        <country>Austria</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Rodrigo Gutiérrez Maquilón <email>rodrigo.gutierrez@ait.ac.at</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2024</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>11</day>
        <month>12</month>
        <year>2024</year>
      </pub-date>
      <volume>8</volume>
      <elocation-id>e58623</elocation-id>
      <history>
        <date date-type="received">
          <day>20</day>
          <month>3</month>
          <year>2024</year>
        </date>
        <date date-type="rev-request">
          <day>28</day>
          <month>6</month>
          <year>2024</year>
        </date>
        <date date-type="rev-recd">
          <day>19</day>
          <month>7</month>
          <year>2024</year>
        </date>
        <date date-type="accepted">
          <day>24</day>
          <month>9</month>
          <year>2024</year>
        </date>
      </history>
      <copyright-statement>©Rodrigo Gutiérrez Maquilón, Jakob Uhl, Helmut Schrom-Feiertag, Manfred Tscheligi. Originally published in JMIR Formative Research (https://formative.jmir.org), 11.12.2024.</copyright-statement>
      <copyright-year>2024</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Formative Research, is properly cited. The complete bibliographic information, a link to the original publication on https://formative.jmir.org, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://formative.jmir.org/2024/1/e58623" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Training in social-verbal interactions is crucial for medical first responders (MFRs) to assess a patient’s condition and perform urgent treatment during emergency medical service administration. Integrating conversational agents (CAs) in virtual patients (VPs), that is, digital simulations, is a cost-effective alternative to resource-intensive human role-playing. There is moderate evidence that CAs improve communication skills more effectively when used with instructional interventions. However, more recent GPT-based artificial intelligence (AI) produces richer, more diverse, and more natural responses than previous CAs and has control of prosodic voice qualities like pitch and duration. These functionalities have the potential to better match the interaction expectations of MFRs regarding habitability.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>We aimed to study how the integration of GPT-based AI in a mixed reality (MR)–VP could support communication training of MFRs.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>We developed an MR simulation of a traffic accident with a VP. ChatGPT (OpenAI) was integrated into the VP and prompted with verified characteristics of accident victims. MFRs (N=24) were instructed on how to interact with the MR scenario. After assessing and treating the VP, the MFRs were administered the Mean Opinion Scale-Expanded, version 2, and the Subjective Assessment of Speech System Interfaces questionnaires to study their perception of the voice quality and the usability of the voice interactions, respectively. Open-ended questions were asked after completing the questionnaires. The observed and logged interactions with the VP, descriptive statistics of the questionnaires, and the output of the open-ended questions are reported.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>The usability assessment of the VP resulted in moderate positive ratings, especially in habitability (median 4.25, IQR 4-4.81) and likeability (median 4.50, IQR 3.97-5.91). Interactions were negatively affected by the approximately 3-second latency of the responses. MFRs acknowledged the naturalness of determining the physiological states of the VP through verbal communication, for example, with questions such as “Where does it hurt?” However, the question-answer dynamic in the verbal exchange with the VP and the lack of the VP’s ability to start the verbal exchange were noticed. Noteworthy insights highlighted the potential of domain-knowledge prompt engineering to steer the actions of MFRs for effective training.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>Generative AI in VPs facilitates MFRs’ training but continues to rely on instructions for effective verbal interactions. Therefore, the capabilities of the GPT-VP and a training protocol need to be communicated to trainees. Future interactions should implement triggers based on keyword recognition, the VP pointing to the hurting area, conversational turn-taking techniques, and add the ability for the VP to start a verbal exchange. Furthermore, a local AI server, chunk processing, and lowering the audio resolution of the VP’s voice could ameliorate the delay in response and allay privacy concerns. Prompting could be used in future studies to create a virtual MFR capable of assisting trainees.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>medical first responders</kwd>
        <kwd>verbal communication skills</kwd>
        <kwd>training</kwd>
        <kwd>virtual patient</kwd>
        <kwd>generative artificial intelligence</kwd>
        <kwd>GPT</kwd>
        <kwd>large language models</kwd>
        <kwd>prompt engineering</kwd>
        <kwd>mixed reality</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <sec>
        <title>Background</title>
        <p>Virtual patients (VPs) are used in clinical scenarios and have features such as clinical information, case progression, and knowledge of diagnosis that are presented on digital displays; they are mainly used in health care services, education, or training to support the development of mental models in clinical reasoning [<xref ref-type="bibr" rid="ref1">1</xref>]. VPs have lately been implemented in virtual reality (VR) and mixed reality (MR) simulations to study the training of medical first responders (MFRs) for the urgent assessment and treatment of victims, for example, in mass casualty incident (MCI) triage [<xref ref-type="bibr" rid="ref2">2</xref>-<xref ref-type="bibr" rid="ref4">4</xref>]. Particularly, a VP in MR was integrated in our previous study with chroma key compositing [<xref ref-type="bibr" rid="ref2">2</xref>]. Chroma keying allows MFRs to interact with a physical manikin that is overlaid in the virtual world with a 3D avatar representing the victim of an accident, that is, the VP (<xref rid="figure1" ref-type="fig">Figure 1</xref>). The tangible VP can then show different injuries, movements, and facial expressions that match speech production, respiration patterns, or pain sounds. These sensory cues are crucial to determine the treatment or triage category that needs to be provided to the victim. For instance, if breathing difficulties are present or if there is an inability to follow simple commands, the patient is assigned a red category for immediate treatment [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref6">6</xref>]. Moreover, MR allows MFRs to see their own hands, bodies, and medical tools through the MR head-mounted display (HMD) as shown in <xref rid="figure1" ref-type="fig">Figure 1</xref>A, while providing haptic feedback with physical objects as shown in <xref rid="figure1" ref-type="fig">Figure 1</xref>B.</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>(A) The mixed reality view shows a 3D avatar that represents a victim of a traffic accident and a medical first responder’s real hands and tools, that is, an oxygen mask and an AMBU bag to provide treatment in an emergency scenario. (B) The green manikin and sheet on the floor in the real world enable chroma key compositing software to mix physical and virtual content in the mixed reality head-mounted display. A headset with headphones and a microphone provides a 3D sound environment and natural language communication capabilities with the virtual patient.</p>
          </caption>
          <graphic xlink:href="formative_v8i1e58623_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>Training in medical emergency assessment and treatment for triage in MR simulations benefit end users and organizations alike because they are highly immersive and efficient when compared to existing real exercises, which sometimes involve hundreds of people, including actors, participants, and organizers [<xref ref-type="bibr" rid="ref3">3</xref>]. Training in digital simulations increases its availability to more MFRs, simplifies the architecture and access to training facilities, and eliminates the need for human role players and props.</p>
        <p>Notably, communication training is a major objective both in digital and in real-world medical emergency simulations. Developing communication skills is an important component of simulations that has shown to positively affect patient outcomes [<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref8">8</xref>]. In real-world exercises, human role players act as injured persons or standardized patients, and MFRs interact with them in small groups in different scenarios. MFRs repeat this procedure several times to build communication skills. Ideally, retraining workshops should take place at least once a year to account for triage accuracy and overall performance drop [<xref ref-type="bibr" rid="ref9">9</xref>].</p>
        <p>Human social interaction is essential for verbal skill development. Nonetheless, real-world simulation and role-playing are costly and require intensive planning of infrastructure and mobilization and coordination of multiple personnel [<xref ref-type="bibr" rid="ref10">10</xref>]. However, training verbal interactions in MR simulations for MFRs also comes with limitations and inherent problems. Social communication in digital simulations, and lately in MR, is trained with virtual agents, for example, avatars or conversational agents (CAs), that represent the victims in the simulations, that is, VPs. However, the verbal (written and spoken) interactions with VPs depend mostly on the state of the art of language technologies and thus have mostly been studied in health care with CAs that use scripted conversational flows or basic machine learning models [<xref ref-type="bibr" rid="ref11">11</xref>].</p>
        <p>In this context, the latest advancements with large language models (LLMs) and generative artificial intelligence (AI) allow low latency generation of sentences based on relatively small amounts of tailored contextual information to provide natural spoken interactions with computer systems for specific use cases, that is, prompting generative voice agents (GVAs) like ChatGPT [<xref ref-type="bibr" rid="ref12">12</xref>]. However, ChatGPT can be computationally expensive and untrustworthy [<xref ref-type="bibr" rid="ref13">13</xref>]. The question is whether we can use the performance and productivity of GVAs as an alternative for human role-players and previous VP technology in MR simulations to train MFRs. First, we must study the usability of GVAs in the context of VP training to find out how MFRs interact and perceive them. Hence, in this paper, we pose the following research questions (RQs): (1) How usable are verbal interactions with a GVA-based VP in MR emergency assessment and treatment training of MFRs? (RQ1) and (2) What is the overall perception of the GVA-VP’s voice quality simulating a victim of an accident? (RQ2)</p>
        <p>Answering these questions required us to integrate ChatGPT’s verbal interaction capabilities in a previously studied MR medical emergency training VP. Following that, it was necessary to collect MFRs’ data about the treatment of patients during emergency treatment in accidents to design and test a prompt that derived accurate responses from the GVA-VP. Finally, we conducted a formative evaluation of the feature. Consequently, this work can guide effective prompt engineering, generative AI interaction design, and optimization of GVAs in MR training for MFRs.</p>
      </sec>
      <sec>
        <title>State-of-the-Art</title>
        <sec>
          <title>Advantages and Disadvantages of CAs</title>
          <p>CAs are used in a variety of health care applications, but conventionally, their implementation ranges from simple, specific domain knowledge and preprogrammed conversation flows to smart, predictive machine learning models that do not reach the spontaneous and somewhat improvised human-level communication abilities [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref14">14</xref>]. This inherent characteristic of traditional CAs is a limitation to the development of more realistic, natural verbal interactions. Therefore, CAs are mostly delivered through text-based mobile devices apps, web or desktop-based software for command-response, question-answer use cases in medical practice, for example, to support patients in handling information like scheduling appointments or giving test results, and, to a lesser degree, health care professionals with various degrees of accuracy on education and training; triage; diagnosis of respiratory issues; mental health problems, eating disorders, and sexual health problems [<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref15">15</xref>]. However, even with restricted language communication ability, VPs have shown moderately effective results in medical education when screen-based, virtual learning environments (VLEs) or similar included instructional interventions and postactivity human feedback are used [<xref ref-type="bibr" rid="ref16">16</xref>-<xref ref-type="bibr" rid="ref18">18</xref>]. In training involving VLEs, VPs supported the improvement of verbal communication skills of students mostly in history taking and delivery of bad news, procedural skills, and clinical reasoning, and also of usability and satisfaction ratings [<xref ref-type="bibr" rid="ref17">17</xref>]. The drawbacks in the usability of CA-based VPs and VLEs in real-world contexts include poor understanding because of limited vocabulary, voice recognition accuracy, error management of word inputs, general repetitive interactions, and lack of variability in conversations [<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref19">19</xref>].</p>
        </sec>
        <sec>
          <title>Advantages and Disadvantages of GVAs</title>
          <p>The recent availability to the public of generative AI architecture integrated with LLMs, such as ChatGPT, provides potential ways to ameliorate the limitations of previous CA-based VPs with more natural verbal communication between humans and computer systems [<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref20">20</xref>]. LLMs are trained on vast amounts of data that humans have generated in digital form throughout the years. These data allow LLMs to perform natural language processing (NLP) tasks, that is, humanlike text generation by predicting the likelihood of a word based on the previous one, context understanding, answering questions, language translation, or sentiment analysis [<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref20">20</xref>]. GPT, a specific case of LLM, can further extend its functionality beyond NLP tasks. For example, Dall-E is based on GPT-3 and performs image generation tasks with text input, and ChatGPT-4 accepts images as input to generate text, leveraging computer vision, image recognition, and NLP [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref21">21</xref>].</p>
          <p>Gaming studies and industry quickly understood the potential capabilities of GPT models in their fields. Nonplayer characters (NPCs) powered by GPT models are now capable of more engaging and natural verbal interactions, that is, written and spoken, with players. The history of the interactions is also stored in the memory of GPT models to create dynamic behavior of NPCs based on previous exchanges and contexts [<xref ref-type="bibr" rid="ref22">22</xref>]. Furthermore, the personality of the characters can also be reinforced with modeled voices that match a desired role [<xref ref-type="bibr" rid="ref23">23</xref>]. Hence, enabling NPCs with more natural conversational interactions can also be applied in MR triage training. To the best of our knowledge, there are currently no implementations or studies of GPT in MR training for medical emergency assessment and treatment, for example, triage training for MFRs. Furthermore, OpenAI ChatGPT has public access through an application programming interface (API) that is well documented and allows configuration of the prompt, language, voice quality, and other parameters [<xref ref-type="bibr" rid="ref24">24</xref>].</p>
          <p>However, a negative side of ChatGPT models is that they are computationally expensive, that is, they require billions of parameters to process prompts and generate responses. Consequently, responses take a short time to be produced depending on the constraints given in the prompt, the size of the answer, the traffic on the server, and other factors. Some of these elements can be optimized through OpenAI’s API, but the delay is always noticeable. Another drawback of ChatGPT is its proneness to fabricate facts, that is, hallucinations. This issue negatively affects its trustworthiness when factual information is needed. Finally, the use of ChatGPT represents a security and privacy risk because information is sent to remote servers for processing. These are 3 general limitations of ChatGPT, but several others exist [<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref21">21</xref>].</p>
          <p>There are, nevertheless, approaches to solve the delay produced by ChatGPT’s response processing time, its proneness to fabricate inaccurate responses given its probabilistic nature, and the security risks from remote data processing. For instance, breaking spoken sentences into chunks for transcription, that is, incremental automatic speech recognition (ASR), produces faster results in conversational AI [<xref ref-type="bibr" rid="ref25">25</xref>]. More importantly, looking back into CAs’ preprogrammed conversation flows gives insights into synergizing the capabilities of CAs and GVAs to create voice agents. Incorporating knowledge graphs and ontologies, that is, structured data with labeled meaning for predefined decision trees found in CAs, to work in parallel with a GVA’s model, could accelerate the response time and enable the control of responses’ content when necessary [<xref ref-type="bibr" rid="ref26">26</xref>]. Furthermore, since the release of Meta’s open-source LLM GPT, many new alternatives to ChatGPT have emerged. These fully functional GPT models perform similarly to ChatGPT, can run locally on home computers, and can be stripped down to any use case’s knowledge base [<xref ref-type="bibr" rid="ref27">27</xref>].</p>
        </sec>
      </sec>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>VP MR Scenario</title>
        <p>The main audiovisual stimulus consisted of a VP representing a male victim of a traffic accident in a 3D scene made with Unity3D 2022.3.7f1 LTS. The scene consisted of a city’s street intersection where the aftermath of a traffic accident included crashed vehicles, MFRs, and victims (<xref rid="figure2" ref-type="fig">Figure 2</xref>). This scenario was the result of several workshops with MFRs from different emergency medical service (EMS) organizations across Europe as part of the project Med1stMR [<xref ref-type="bibr" rid="ref2">2</xref>]. The 3D scene was presented to the MFRs through the Varjo XR-3 HMD, which used chroma key compositing to display virtual elements on a physical green manikin lying on a green sheet on the floor. MFRs could see an avatar overlaid on the manikin as well as real-world elements if they were not green, along with their own hands, body, and medical tools (<xref rid="figure1" ref-type="fig">Figure 1</xref>). Vive Trackers (version 3.0) were placed on the head, hands, feet, and groin of the manikin and mapped to the corresponding parts of the VP’s avatar. This allowed the MFRs to freely move the manikin and thus the VP. Both the green screen composition and tracking provided an immersive, tangible MR experience. To start the scenario, a bleeding cut on the VP’s left leg indicated priority for treatment. MFRs wrapped and secured a real tourniquet above the upper section of the wound after which the bleeding stopped. MFRs could communicate verbally with the VP at any time. Further visual inspection of the naked torso of the VP’s avatar revealed a hematoma on the right side of the chest. At the same time, the MFRs could lift the left hand of the VP to see the values on a pulse oximeter on the middle finger indicating a normal state of the VP’s heart rate at around 90 bpm and oxygen saturation at approximately 95%. When close to the VP, the MFRs could hear the VP’s groans and sounds of abnormal efforts to breathe. Sounds of pain were also automatically reproduced when the MFRs touched the wounded areas. At this point, MFRs would perform auscultation and hear rales with a real stethoscope. The VP’s skin coloration then started to turn blue, and the values of the pulse oximeter changed accordingly, that is, the heart rate started to rise and oxygen saturation dropped. At the same time, the VP started closing its eyes, no longer emitting sounds or responding to verbal interactions. MFRs then provided the VP with manual ventilation using a real resuscitator bag, which caused the VP’s oxygen saturation, heart rate, skin coloration, and verbal responsiveness to become re-established. At this moment, the MFRs were informed that the scenario had ended.</p>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>The accident scene presented to the medical first responders. (A) The overlaid 3D avatar simulating the victim of an accident, that is, the virtual patient. (B) Additional nonplayer characters that were already being attended to by virtual medical first responders. (C) Highlighted vehicles, buildings, and other objects for visual context.</p>
          </caption>
          <graphic xlink:href="formative_v8i1e58623_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>GPT Integration</title>
        <sec>
          <title>Overview</title>
          <p>Verbal interactions were enabled on the injured VP’s avatar with the OpenAI ChatGPT model (GPT-3.5 Turbo), which was used as the central AI response generator. The Unity virtual environment communicated with the cloud-based ChatGPT’s generative AI model with an adapted asset that wrapped ChatGPT’s APIs using C# scripts. This asset exposed ChatGPT’s settings in the Unity Editor using a private API key and allowed it to set ChatGPT’s model, voice, prompt, temperature, etc. <xref rid="figure3" ref-type="fig">Figure 3</xref> shows a diagram summarizing the processing chain of a common ChatGPT implementation.</p>
          <p>The steps summarized in <xref rid="figure3" ref-type="fig">Figure 3</xref> are explained in detail to provide a better understanding of the technical and design aspects of the implementation and potential optimizations in the communication between the MFRs and the VP.</p>
          <fig id="figure3" position="float">
            <label>Figure 3</label>
            <caption>
              <p>(1) The microphone picks up the voice of the medical first responder (MFR) and the automatic speech recognition (ASR) module transcribes it. (2) The text message is then added to the prompted attributes of an accident victim and both are sent to the ChatGPT model. (3) The generative voice model estimates the likelihood of the sequence of words to generate a written answer given its training data. (4) The answer is converted to audio by the text-to-speech engine with the characteristics of the modeled voice and then is output to the MFR.</p>
            </caption>
            <graphic xlink:href="formative_v8i1e58623_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
          <p>In the first step, the MFR’s voice is initially picked up by a Microsoft LifeChat LX-3000 headset with a noise-canceling, switchable microphone. The microphone is placed in front of the mouth and accurately triggers recording with the first utterances when a set threshold in Unity’s interface is reached. This headset is also lightweight and comfortable to wear. The recordings are sent for processing to OpenAI Whisper, a versatile multilanguage ASR module capable of converting speech to text (STT). Whisper is an accurate STT engine, but sometimes it can output transcriptions that are unrelated to the audio signal, that is, hallucinations. For example, it can pick up the sound of a hand hitting a desk and translate it into Korean characters or phonemes of the English language [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref29">29</xref>].</p>
          <p>Next, the generated text message is added to the voice agent’s role characteristics that have been previously set in ChatGPT’s prompt. The prompt is a constraining feature of the model. The app developer allows to specify required responses. For example, the emotional state of the voice agent can be specified in the prompt, for example, “Play the role of a sad person.” Therefore, the model will generate text according to the provided emotion. Otherwise, ChatGPT can produce more open and undesired responses. Prompting is a reiterative process, and it can be tailored to a high degree. There are currently several guidelines for prompting ChatGPT with the goal of getting more accurate responses [<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref21">21</xref>]. The ChatGPT Prompt section provides details on the prompt used in this study for the VP.</p>
          <p>The 2 pieces of information, that is, the MFR’s message and the prompt, are used by the cloud-based GPT model to generate a text-based response grounded on the likelihood of the sequence of words. ChatGPT has been trained with large amounts of text (LLM) and will respond by matching the criteria given by the user’s message, the prompt, and the temperature setting [<xref ref-type="bibr" rid="ref21">21</xref>]. The model used, GPT-3.5 Turbo, is trained with data up to 2021 [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref21">21</xref>].</p>
          <p>In the final step, a different module, that is, ElevenLabs, generates audio streams from ChatGPT’s written response. ElevenLabs’ text to speech (TTS) engine is also responsible for the voice quality, for example, the pitch, emphasis, and timing of the artificial voice. This TTS module has several options that can be accessed from its API: male and female voices, stability, and clarity. Nonetheless, ElevenLabs also has several limitations. The voice models it provides were designed for public presentations, and therefore are energetic and upbeat. The voice of a person whispering or crying is currently not possible to recreate artificially with ElevenLabs [<xref ref-type="bibr" rid="ref23">23</xref>]. Refer to the Voice Quality Model subsection for details on the construction of the VP’s voice.</p>
          <p>ChatGPT was prompted with characteristics of a traffic accident victim collected from MFRs’ studies and generated corresponding responses during spoken interactions. The voice quality was modeled to match the role and accompanying abnormal breathing sounds and groans.</p>
        </sec>
        <sec>
          <title>ChatGPT Prompt</title>
          <p>The GPT’s prompt was designed based on previous studies with MFRs and their descriptions of role-playing, triage systems, and victim’s characteristics in MCIs [<xref ref-type="bibr" rid="ref30">30</xref>]. These specifications about triage systems, physiological signals, behavior, and injuries were prompted in ChatGPT. Furthermore, to avoid GPT’s hallucinations, constraints were also prompted to only use the local language (German) and to respond “I don’t understand” to incomprehensible verbal commands or questions. The specific prompt was formulated in the German language, and we present the English translation as follows:</p>
          <disp-quote>
            <p>Your name is Tobi and you are a 28 years old male victim of an accident. A bus hit the corner of a building after avoiding two cars that crashed in the nearby roundabout. You were ridding your bicycle with your girlfriend and the bus hit you. You were going to the park to meet some friends. Your girlfriend was ridding next to you. You don’t know where your girlfriend is now. You have been lying on the ground for 20 minutes. You are now receiving assistance from medical first responders. You can hardly breathe, and it does not get better. You cannot be cured now. You do not feel good. You cannot feel good. You have to go to the hospital for immediate attention. You have a painful wound in your right ribs. You can hardly move. Your right leg is bleeding. Your vision is blurred. It is not possible for you to stand up or walk. You feel weak, tired, cold, sleepy, and drowsy. You have no allergies. You are not taking any medication. Respond only in German. You only understand German. Respond only by stuttering. Use only one 8-word sentence or less to respond. Stutter: “My ribs hurt” every minute. Never say: “Thank you.” Never answer: “Thank you.” If you don’t understand something, only answer with a stutter: “What?” or “I don’t understand you,” or “What are you saying?” Respond only by stuttering a maximum of an 8-word sentence. You are scared and anxious. Cry every minute: “Arrghh” or “Oh God help.” Respond only as if you were this character.</p>
          </disp-quote>
        </sec>
        <sec>
          <title>Voice Quality Model</title>
          <p>The ElevenLabs AI voice was trained using our own recorded speech samples in English with prosody matching the simulated victim’s condition, that is, a heavy breathing and voice conveying pain. We input 25 uncompressed WAV files of around 55 second duration and high quality (48 KHz sample rate and 32 bits) into ElevenLabs web interface to train the voice synthesizer. A short part of the speech samples containing heavy breathing and groans was looped during the verbal interactions with the VP to fill the gap of ChatGPT’s response delay. However, even though the synthesized voice had a tame quality to it, it did not fully reflect a state of pain and suffering. This was expected given that most current TTS models target public speakers’ use cases for presentations, and thus, their models lack capabilities to whisper or cry. Stuttering instructions were added in the prompt to provide a suffering characteristic to the voice. The English-trained voice produced a foreign accent when the German-language prompt was applied to the ElevenLabs English version 1 model, which is normal in the context of international cities.</p>
        </sec>
      </sec>
      <sec>
        <title>Study Procedure</title>
        <p>Participants consisted of 24 MFRs from different EMS organizations from Austria and Germany. They were recruited from a network of European EMSs as part of the Med1stMR project [<xref ref-type="bibr" rid="ref2">2</xref>]. Consent for audiovisual capturing and participation was obtained from the participants invited for the study. The MFRs were then equipped with the HMD, headphones, and microphone and went through a short visual calibration process. They took a minute to familiarize themselves with the environment while they were introduced to the scenario previously described. The VP’s delay of responses was also explained to the MFRs and then they were instructed to approach and interact with the VP just as in real life using nonscripted, spontaneous natural language communication (<xref rid="figure4" ref-type="fig">Figure 4</xref>). The verbal interactions consisted of the MFRs’ own questions and instructions associated with emergency training. MFRs treated the VP according to the wounds and audiovisual feedback they received. When the MFRs felt ready, they stopped interacting. The MR experience was performed once and lasted from 6 to 10 minutes.</p>
        <p>Afterward, version 2 of the Mean Opinion Scale-Expanded (MOS-X2) questionnaire was used to study the participant’s perception of voice quality. The Subjective Assessment of Speech System Interfaces (SASSI) questionnaire was used to question the MFRs regarding the usability of the voice interactions. Open-ended questions and conversations with the participants were conducted after completing the questionnaires. Notes were made during this process where participants reflected on their experience with the GVA. Each participant took approximately 30 minutes to complete the entire procedure. Audiovisual screen captures were recorded as well. The design of the experiment for the assessment of a GVA’s voice quality and the usability of the GVAs’ interactions was based on the existing evidence of studies using the same and similar methods, for example, the interactive short conversation test and the International Telecommunication Union quality of experience [<xref ref-type="bibr" rid="ref31">31</xref>-<xref ref-type="bibr" rid="ref33">33</xref>].</p>
        <fig id="figure4" position="float">
          <label>Figure 4</label>
          <caption>
            <p>(A) The overlaid 3D avatar simulating the virtual patient (VP), the victim of an accident, with generative voice, breathing sound patterns, and groans. (B) A participating medical first responder talking to the VP using a noise-canceling microphone that helped to focus on the verbal interactions. Responses from the VP and other spatialized sound elements, for example, background conversation, the city environment, and the VP’s groans were reproduced through headphones.</p>
          </caption>
          <graphic xlink:href="formative_v8i1e58623_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Measurements</title>
        <p>Two questionnaires were used to assess the user experience of the trainees: the MOS-X2 [<xref ref-type="bibr" rid="ref34">34</xref>] for investigating the experience of voice characteristics, and the SASSI [<xref ref-type="bibr" rid="ref35">35</xref>] for general voice-related user experience.</p>
        <p>The MOS-X2 consists of 4 Likert items, that is, intelligibility, naturalness, prosody, and social impression. Each item had a scale from 1 to 10 (1=extremely unnatural and 10=perfectly natural).</p>
        <p>The SASSI was used to assess the usability of voice interactions. SASSI measures 6 dimensions of usability, that is, system response accuracy, likeability, cognitive demand, annoyance, habitability, and speed, with 39 Likert items containing scales from 1 to 7, that is, strongly disagree to strongly agree.</p>
      </sec>
      <sec>
        <title>Ethical Considerations</title>
        <p>All the studies within the MED1stMR project followed the ethical guidelines and procedures established at the beginning of the project [<xref ref-type="bibr" rid="ref36">36</xref>]. The studies were approved by the ethics committee of the Faculty of Behavioural and Empirical Cultural Sciences at the University of Heidelberg, Germany (AZ BEU 2023), and managed by the appointed ethical advisor. Informed consent was obtained from all participants involved in all field trial studies conducted within the MED1stMR project, including the study described in this paper. Participants for this evaluation were local MFRs who were invited to participate as trainees via email. Participation of MFRs in this study was voluntary, and the invitation was sent to recruits through the different EMS organization networks and submitted for an ethics approval. They were informed that (1) the study would be recorded, transcribed, and analyzed; (2) their identity would remain confidential; and (3) that participation was voluntary and could be withdrawn at any time.</p>
        <p>In addition, all researchers who were involved in the study were listed. This included the researchers’ names, their roles in the study or project, affiliations, and contact information (eg, email address and phone number). This step was necessary to establish the transparency of the research process as well as reflect who had access to the generated data. This information was also to be provided to the participants so that they could make a fully informed decision on whether they were willing to share their data with the parties involved. All partners in the Med1stMR project provided detailed descriptions of their study before it could be voted on regarding potential ethical concerns. Furthermore, an explanation of how the data are stored (eg, on a hard drive or secured server) and who will be able to access the data (and how) was specified in the consent form signed by recruits. Finally, the collection of potentially identifiable information (eg, video recordings or email addresses) was detailed. Video and audio recordings were scheduled for deletion after 90 days, and partner employees were also obligated to delete locally saved copies of these materials. The Austrian Institute of Technology is hosting a protected SharePoint server on its IT premises, located in Giefinggasse 4, A-1210, Vienna, for project data storage. The data are only saved on the local servers and not transferred to the Microsoft cloud.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <p>We performed the study with 24 MFRs (women: n=4, 17% and men: n=20, 83%) aged 19 to 50 (mean 30.61, SD 9.21) years. Central tendency analysis was performed for 2 questionnaires.</p>
      <sec>
        <title>Perception of Voice Quality</title>
        <p>Results of the perception of the voice quality with the MOS-X2 questionnaire indicated a moderate humanlike perception of the GVA’s voice with high dispersion values. Intelligibility was rated with a median value of 4 (IQR 3.75-6), mode 4, and a dispersion with a range of 10. Naturalness was rated with a median value of 4 (IQR 3-7), mode 3, and range R of 9. Prosody was rated with a median value of 4.5 (IQR 3-6), mode 4, and range R of 8. Social impression was rated with a median value of 4.5 (IQR 3.75-6), mode 6, and range R of 9. A graphical representation of these results is shown in <xref rid="figure5" ref-type="fig">Figure 5</xref>.</p>
        <fig id="figure5" position="float">
          <label>Figure 5</label>
          <caption>
            <p>The Mean Opinion Scale-Expanded, version 2 (MOS-X2) rating scale measures the perception of voice quality. Overall, the 4 dimensions, that is, intelligibility, naturalness, prosody, and social impression, showed moderate ratings and high dispersions. The perception of the naturalness of the voice was rated the highest.</p>
          </caption>
          <graphic xlink:href="formative_v8i1e58623_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Usability of Voice Interactions</title>
        <p>Results of the usability of the voice interactions with the (SASSI) questionnaire indicated Likeability was rated with a median value of 4.50 (IQR 3.97-5.91) and a range of 3.63. Cognitive demand was rated with a median value of 4.80 (IQR 4.20-5.40) and a range of 2.20. Annoyance was rated with a median value of 3.60 (IQR 2.50-4) and a range of 3.60. Habitability was rated with a median value of 4.25 (IQR 4-4.81) and a range of 3.50. Speed was rated with a median value of 4 (IQR 4-4.50) and a range of 1.50. A graphical representation of these results is shown in <xref rid="figure6" ref-type="fig">Figure 6</xref>. The usability measures indicated good overall interaction performance, but there is room for improvement in all 6 dimensions.</p>
        <p>Both questionnaires supported the integration of ChatGPT in VPs for MR emergency assessment and treatment training and will be further discussed in the subsequent section. Similarly, the interviews with the MFRs and the analysis of audiovisual captures yield intriguing insights that merit further discussion.</p>
        <fig id="figure6" position="float">
          <label>Figure 6</label>
          <caption>
            <p>The Subjective Assessment of Speech System Interfaces (SASSI) scale measures the usability of the generative voice agent’s (GVA) interactions. All 5 dimensions show a moderately high usability of the GVA, especially on the likeability scale.</p>
          </caption>
          <graphic xlink:href="formative_v8i1e58623_fig6.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <sec>
          <title>Overview</title>
          <p>The results show mixed experiences of the MFRs with the GVA-VP. On one hand, many participants were impressed and excited about the idea of interacting with a virtual agent, not only because of the novelty but also because it enabled training without the need for role players. On the other hand, several technical limitations still stood in the way of a completely seamless social experience. Accordingly, we divided the Discussion section into 2 parts—verbal interaction and technical insights.</p>
        </sec>
        <sec>
          <title>Verbal Interactions Analysis</title>
          <p>MFRs varied greatly in their communication approaches. In total, 5 (21%) of the 24 MFRs had very few verbal interactions with the VP, but 3 (12%) MFRs detailed every step of their assessment and treatment and were continuously communicating with the VP. Furthermore, 3 (12%) MFRs approached the VP with the question “Can you hear me?” to which the VP answered with variations of “AAhh. Pain. Hospital. Help. Please. Treatment.” or “Oh God help, I cannot breathe.” while exhibiting great breathing efforts. Even though the participating MFRs were explained about the delay of approximately 3 seconds for the GVA’s response, the verbal interactions usually had overlaps. The participants sometimes took a brief pause after their initial verbal interaction and then continued with the second question expecting the GVA to answer quickly. Given that ChatGPT takes 2 to 3 seconds to respond, the participants’ second question thereby overlapped with ChatGPT’s response. During these overlaps or when the microphone picked up noises, for example, tools hitting the microphone, ChatGPT produced sentences like “I don’t understand, please help.” as instructed in the prompt to manage errors of word inputs. This created some confusion among participants while taking turns to speak and was reflected in the results of the SASSI questionnaire. For instance, the participants’ rating was close to 5 (out of 7) in habitability (the match between the language that humans use and the language that the computer accepts) and speed (the VP’s response time), indicating that sometimes the system did not do what the MFRs intended, they were not sure what the system was doing, or they noticed the slow response speed. ChatGPT’s response delay is the most crucial issue to solve, and a potential solution is discussed subsequently in this section.</p>
          <p>In a few cases, the MFRs waited for enough time to get an answer and acted according to it. For example, 4 (17%) of the 24 participants asked the GVA either “What happened?” “Does it hurt? Where does it hurt?” or “Can you breathe well?” and waited for an answer. The GVA accurately responded “Accident with the bus,” “Yes, hurts, right ribs,” and “I cannot breathe” correspondingly. Then, the MFRs continued inspection in the indicated or relevant area. This example shows the need for instructions and continuous monitoring to better manage turn-taking in the verbal interactions between the MFRs and the VP to fully exploit GPT’s current capabilities.</p>
          <p>A word on the perception of the voice produced by the TTS engine is also necessary. ElevenLabs is a powerful and realistic TTS and voice cloning technology, but currently, it only provides voices for presentations like podcasts and advertisements. This will change soon, but we found that training ElevenLabs with a voice conveying suffering, breathing difficulties, and groans worked adequately to resemble a human victim with an injury on the ribs who has difficulties producing speech. This was also the motivation to add stutters. These qualities are reflected in the MOS-X2 ratings regarding voice characteristics, which were rated moderately by the participants (eg, intelligibility and naturalness, both with a median of 4), which show room for improvement. A few MFRs mentioned the American accent of the German-speaking GVA as a factor that affected understanding, but 2 (8%) MFRs stated their positive perception of the accent. This may be a desirable feature since victims can have problems with verbal communication in real-world scenarios and this can be the reason for the higher scores of naturalness and likeability. The multicultural characteristic of intranational cities was a deciding criterion when choosing a voice with an accent. By contrast, the ElevenLabs Multilingual version 2 model produces natural German speech without an accent and can reflect the characteristics of the training voice samples with more accuracy, that is, it has a more natural voice when conveying pain. Sounds of sirens and NPCs talking in the background also have to be considered in a more in-depth communication training study of the subjective perception of the acoustic environment or soundscape and its potential influence on cognitive performance [<xref ref-type="bibr" rid="ref37">37</xref>].</p>
        </sec>
        <sec>
          <title>Technical Insights</title>
          <p>Furthermore, as described in the Methods section, there are 4 processes involved in the use of ChatGPT for the generation of humanlike voice communication, that is, ASR or STT transcription, message and prompt analysis, textual response generation, and TTS transformation. Each of these processes has inherent difficulties that deserve a deeper analysis, but benchmarking our implementation of the GVA-VP will be part of a separate study. However, during this study, we identified ways of ameliorating the GVA-VP’s implementation process.</p>
          <p>The first challenge relates to the characteristics of the or microphone and how this affects the OpenAI’s STT module, that is, Whisper, and thus the overall usability. Noise-canceling headphones or a silent environment guarantee that the GVA’s voice and the sound environment of the MR experience are adequately mixed for an optimal and realistic experience for the user. Moreover, the passive noise-canceling feature of headphones’ earcups prevents sound leakage from the speakers to the microphone. Similarly, a noise-canceling microphone helps to only pick up the voice required to be sent to the STT engine. Here, the threshold value at which the system starts recording speech is very important, because it must be leveled in accordance with the user’s breathing sounds that are also being picked up by the microphone and could confuse Whisper and trigger a response. Beamforming microphones in the body of the headphones could be a good solution. An easy-to-access button to activate or deactivate the microphone is also possible but may interfere with the MFR’s freedom of movement to perform treatment using their tools.</p>
          <p>In the context of extended reality, encompassing mixed, virtual, and augmented reality, the spatialization of sound is a key feature. For instance, in MR triage training, users must get closer to the GVA representing the victim to hear its voice. This mimics real-world phenomena, such as distance attenuation and sound source localization. Such spatialized soundscapes encourage head movements, which can be spontaneous and abrupt. Consequently, it is crucial for the headset to offer a secure yet comfortable fit.</p>
          <p>Furthermore, ChatGPT prompting displayed some common challenges also present in this work: the prompt must always be very specific to avoid inaccurate responses that are generated because of unintended noises or imprecise requests. Humans, in particular MFRs, can behave in unexpected ways and the GVAs must be provided with a fallback mechanism to respond. This can be as simple as responding “I don’t understand” to incomprehensible verbal commands or questions as previously described, but it is possible to react differently to different types of sounds, for example, responding “What?” when mistakenly misunderstanding a given language accent or pronunciation, and responding “Say again” when answering to out-of-context questions or commands. This can add further variability and realism to the interactions.</p>
          <p>Finally, a discussion about the implementation of knowledge graphs and ontologies is important because it has the potential to serve as a robust parallel voice generation mechanism that provides quick, accurate, genuine, and verifiable information. When we abstract from the implementation of GVAs in MR medical emergency training, we can see that the fundamental problem with AI is that we do not know if what it is saying is accurate, because there are no tools developed to understand why it reaches a particular decision, that is, the well-known problem of AI explainability. The common understanding is that AI functionality is years ahead of explainable AI. ChatGPT can produce responses that contain false information. While this was not a concern in our study, it is a major problem for future GVAs and should be attended to. The integration of ontologies in GVAs then adopts a crucial role; however, there is more. The domain of MCI has no general formalization or standards. There are many triage systems to prioritize the victims’ need for resources, but organizational and staff interoperability is lacking.</p>
        </sec>
      </sec>
      <sec>
        <title>Limitations and Future Studies</title>
        <p>While an important and original contribution of this paper is showing the MFRs’ experience, the usability of GPT-based AI in a VP, and its capacity to resemble human communication for MR medical emergency training, the goal to achieve a humanlike verbal interaction still needs a lot of work. GPT verbal interactions are still limited by a noticeable latency, reduced availability of voice prosody, natural conversational turn-taking, and autonomous speech generation strategies. This study was very helpful in identifying these drawbacks of the implementation with MFRs, who have immensely contributed to the road map of realistic GVAs for MCI simulations. Future studies will include testing an improved version of the GVA-VP inside the MR experience.</p>
        <p>Therefore, we plan to implement a local AI server with optimized domain-knowledge LLM for faster response times. It is then necessary to further collect MFRs’ requirements to construct an ontology for an outlook of how an MCI’s knowledge base formalization could work. Once an ontology is in the standardized Web Ontology Language format, it can be visualized with tools like Web-Based Visualization of Ontologies and connected to other systems for scalability to extend the potential of MFR training. Ontologies can support the functionality of GVAs by reducing the response delay and providing accurate answers. For example, a virtual MFR using GPT could be integrated with domain-knowledge standards to assist human MFR trainees.</p>
      </sec>
      <sec>
        <title>Comparison With Prior Work</title>
        <p>To the best of our knowledge, there are no studies of GPT implementation in MR simulations for medical emergency training of MFRs. Baetzner et al [<xref ref-type="bibr" rid="ref3">3</xref>] presented a review of immersive VR (n=3) and MR (n=1) studies and their effectiveness in preparing MFRs for crises, but there is no mention of verbal communication or verbal interactions with VPs. Real Response Blueroom [<xref ref-type="bibr" rid="ref4">4</xref>] is a commercial application of physical manikins integrated with sensors and humanlike features, for example, bleeding, to resemble a victim without a voice within a VR environment. In our previous study, we used a similar setup with a green torso overlaid with a 3D avatar to study tangible interactions in MR and used human role players as voice actors to communicate with the MFR trainees [<xref ref-type="bibr" rid="ref2">2</xref>]. Moreover, there is a wide variety of VP studies dating back over a decade that ranges from pen and paper, screen-based questionnaires and avatars, and more recently, rule-based and basic machine learning CAs in text or voice apps on mobile devices or in smart speakers [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref14">14</xref>-<xref ref-type="bibr" rid="ref19">19</xref>]. Overall, the VR and MR studies show their cost-efficient, reproducibility, safety, and immersion benefits, and recent CAs in VP studies have shown moderately effective results in medical education and services of different types but not in medical emergency training for MFRs [<xref ref-type="bibr" rid="ref14">14</xref>-<xref ref-type="bibr" rid="ref18">18</xref>]. Similarly, as shown in the Introduction section, state-of-the-art CAs show poor understanding because of limited vocabulary, voice recognition accuracy, error management of word inputs, general repetitive interactions, and lack of variability in conversations [<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref19">19</xref>]. This paper supports some of those previous findings and shows the potential to overcome conventional CAs’ drawbacks with LLM’s rich vocabulary, high voice recognition accuracy of Whisper, good capabilities of error management of word input, and variability in responses with prompt engineering.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>A GVA based on OpenAI ChatGPT was integrated into the 3D avatar of a VP that represented a victim in an MR simulation for medical emergency assessment and treatment training of MFRs. The perception of the GVA’s voice quality and its usability were studied with MFRs to determine if the artificial voice agent could be effectively used in emergency training and if its voice quality matched that of an accident victim. The results showed that the MFR participants had a moderately high perception of naturalness for the GVA’s voice quality and equal likeability perception for the GVA’s usability. Moreover, voice quality measures of intelligibility, prosody, and social impression appropriateness were moderate, pointing to paths of improvement and deeper analysis, for example, how to extend the STT system’s ability to cry, whisper, or moan or determining if a victim with high intelligibility is desirable in emergency training. Therefore, it is reasonable to conclude that the GVA was usable in MR medical emergency training and resembled a victim of an accident to a moderate degree. Furthermore, the usability of the GVA was accurate due to its state-of-the-art capabilities; it was engaging and required an appropriate perceived level of cognitive load. Reports of delays in responses and overlapping verbal interactions indicated a need for a faster system and the development of conversational turn-taking strategies. We can then further support previous findings that showed the need for instructional interventions to fully take advantage of the usability of GVAs in medical emergency training of MFRs. This study constitutes a novel contribution to MR and MCI triage training, describing a system that potentially performs better than state-of-the-art CAs, which have limited communication capacity and thus are deterministic and unnatural.</p>
        <p>The use of GVAs to replace role-players in MR training offers numerous advantages. Costs are significantly reduced as there is no longer a need to hire and train human role-players, and at the same time, this increases scalability to simulate a larger number of patients with injuries and thereby helps accommodate multiple participants. Virtual agents ensure consistent performance and enable standardized training experiences. They can be customized to simulate different characters and behaviors, and scenarios can be tailored to specific training objectives. In addition, GVAs provide a safe environment for trainees, enable objective performance evaluation, and support repetition and iteration. These benefits improve the effectiveness, efficiency, and accessibility of MR training programs.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">API</term>
          <def>
            <p>application programming interface</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">ASR</term>
          <def>
            <p>automatic speech recognition</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">CA</term>
          <def>
            <p>conversational agent</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">EMS</term>
          <def>
            <p>emergency medical service</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">GVA</term>
          <def>
            <p>generative voice agent</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">HMD</term>
          <def>
            <p>head-mounted display</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">LLM</term>
          <def>
            <p>large language model</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">MCI</term>
          <def>
            <p>mass casualty incident</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb10">MFR</term>
          <def>
            <p>medical first responder</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb11">MOS-X2</term>
          <def>
            <p>Mean Opinion Scale-Expanded, version 2</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb12">MR</term>
          <def>
            <p>mixed reality</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb13">NLP</term>
          <def>
            <p>natural language processing</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb14">NPC</term>
          <def>
            <p>nonplayer character</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb15">RQ</term>
          <def>
            <p>research question</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb16">SASSI</term>
          <def>
            <p>Subjective Assessment of Speech System Interfaces</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb17">STT</term>
          <def>
            <p>speech to text</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb18">TTS</term>
          <def>
            <p>text to speech</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb19">VLE</term>
          <def>
            <p>virtual learning environment</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb20">VP</term>
          <def>
            <p>virtual patient</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb21">VR</term>
          <def>
            <p>virtual reality</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>This study was part of the project MED1stMR, which has received funding from the European Union’s Horizon 2020 Research and Innovation Programme (grant 101021775). The content reflects only the MED1stMR consortium’s view. The Research Executive Agency and the European Commission are not liable for any use that may be made of the information contained herein.</p>
    </ack>
    <notes>
      <sec>
        <title>Data Availability</title>
        <p>The datasets generated or analyzed during this study are available from the corresponding author upon reasonable request.</p>
      </sec>
    </notes>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cook</surname>
              <given-names>DA</given-names>
            </name>
            <name name-style="western">
              <surname>Triola</surname>
              <given-names>MM</given-names>
            </name>
          </person-group>
          <article-title>Virtual patients: a critical literature review and proposed next steps</article-title>
          <source>Med Educ</source>
          <year>2009</year>
          <month>04</month>
          <volume>43</volume>
          <issue>4</issue>
          <fpage>303</fpage>
          <lpage>11</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1111/j.1365-2923.2008.03286.x"/>
          </comment>
          <pub-id pub-id-type="doi">10.1111/j.1365-2923.2008.03286.x</pub-id>
          <pub-id pub-id-type="medline">19335571</pub-id>
          <pub-id pub-id-type="pii">MED3286</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Uhl</surname>
              <given-names>JC</given-names>
            </name>
            <name name-style="western">
              <surname>Gutierrez</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Regal</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Schrom-Feiertag</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Schuster</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Tscheligi</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Choosing the right reality: a comparative analysis of tangibility in immersive trauma simulations</article-title>
          <source>Proceedings of the 2024 CHI Conference on Human Factors in Computing System</source>
          <year>2024</year>
          <conf-name>CHI '24</conf-name>
          <conf-date>May 11-16, 2024</conf-date>
          <conf-loc>Honolulu, HI</conf-loc>
          <fpage>1</fpage>
          <lpage>17</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1145/3613904.3641912"/>
          </comment>
          <pub-id pub-id-type="doi">10.1145/3613904.3641912</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Baetzner</surname>
              <given-names>AS</given-names>
            </name>
            <name name-style="western">
              <surname>Wespi</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Hill</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Gyllencreutz</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Sauter</surname>
              <given-names>TC</given-names>
            </name>
            <name name-style="western">
              <surname>Saveman</surname>
              <given-names>BI</given-names>
            </name>
            <name name-style="western">
              <surname>Mohr</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Regal</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Wrzus</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Frenkel</surname>
              <given-names>MO</given-names>
            </name>
          </person-group>
          <article-title>Preparing medical first responders for crises: a systematic literature review of disaster training programs and their effectiveness</article-title>
          <source>Scand J Trauma Resusc Emerg Med</source>
          <year>2022</year>
          <month>12</month>
          <day>24</day>
          <volume>30</volume>
          <issue>1</issue>
          <fpage>76</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://sjtrem.biomedcentral.com/articles/10.1186/s13049-022-01056-8"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s13049-022-01056-8</pub-id>
          <pub-id pub-id-type="medline">36566227</pub-id>
          <pub-id pub-id-type="pii">10.1186/s13049-022-01056-8</pub-id>
          <pub-id pub-id-type="pmcid">PMC9789518</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="web">
          <article-title>Real XR projects</article-title>
          <source>Real Response</source>
          <access-date>2023-06-14</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://staging.realresponse.com.au/real-xr-projects/">https://staging.realresponse.com.au/real-xr-projects/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bazyar</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Farrokhi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Salari</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Safarpour</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Khankeh</surname>
              <given-names>HR</given-names>
            </name>
          </person-group>
          <article-title>Accuracy of triage systems in disasters and mass casualty incidents: a systematic review</article-title>
          <source>Arch Acad Emerg Med</source>
          <year>2022</year>
          <volume>10</volume>
          <issue>1</issue>
          <fpage>e32</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/35573710"/>
          </comment>
          <pub-id pub-id-type="doi">10.22037/aaem.v10i1.1526</pub-id>
          <pub-id pub-id-type="medline">35573710</pub-id>
          <pub-id pub-id-type="pmcid">PMC9078064</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Khorram-Manesh</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Nordling</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Carlström</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Goniewicz</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Faccincani</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Burkle</surname>
              <given-names>FM</given-names>
            </name>
          </person-group>
          <article-title>A translational triage research development tool: standardizing prehospital triage decision-making systems in mass casualty incidents</article-title>
          <source>Scand J Trauma Resusc Emerg Med</source>
          <year>2021</year>
          <month>08</month>
          <day>17</day>
          <volume>29</volume>
          <issue>1</issue>
          <fpage>119</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://sjtrem.biomedcentral.com/articles/10.1186/s13049-021-00932-z"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s13049-021-00932-z</pub-id>
          <pub-id pub-id-type="medline">34404443</pub-id>
          <pub-id pub-id-type="pii">10.1186/s13049-021-00932-z</pub-id>
          <pub-id pub-id-type="pmcid">PMC8369703</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Barghi Shirazi</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Moslehi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Rasouli</surname>
              <given-names>MR</given-names>
            </name>
            <name name-style="western">
              <surname>Masoumi</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>A systematic literature review identifying the dimensions and components of simulation of the hospital emergency department during emergencies and disasters</article-title>
          <source>Med J Islam Repub Iran</source>
          <year>2022</year>
          <volume>36</volume>
          <fpage>82</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36128272"/>
          </comment>
          <pub-id pub-id-type="doi">10.47176/mjiri.36.82</pub-id>
          <pub-id pub-id-type="medline">36128272</pub-id>
          <pub-id pub-id-type="pmcid">PMC9448461</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Alsabri</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Boudi</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Lauque</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Dias</surname>
              <given-names>Rd</given-names>
            </name>
            <name name-style="western">
              <surname>Whelan</surname>
              <given-names>JS</given-names>
            </name>
            <name name-style="western">
              <surname>Östlundh</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Alinier</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Onyeji</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Michel</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>SW</given-names>
            </name>
            <name name-style="western">
              <surname>Jr Camargo</surname>
              <given-names>Ca</given-names>
            </name>
            <name name-style="western">
              <surname>Lindner</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Slagman</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bates</surname>
              <given-names>DW</given-names>
            </name>
            <name name-style="western">
              <surname>Tazarourte</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Singer</surname>
              <given-names>SJ</given-names>
            </name>
            <name name-style="western">
              <surname>Toussi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Grossman</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Bellou</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Impact of teamwork and communication training interventions on safety culture and patient safety in emergency departments: a systematic review</article-title>
          <source>J Patient Saf</source>
          <year>2022</year>
          <month>01</month>
          <day>01</day>
          <volume>18</volume>
          <issue>1</issue>
          <fpage>e351</fpage>
          <lpage>61</lpage>
          <pub-id pub-id-type="doi">10.1097/PTS.0000000000000782</pub-id>
          <pub-id pub-id-type="medline">33890752</pub-id>
          <pub-id pub-id-type="pii">01209203-202201000-00056</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dittmar</surname>
              <given-names>MS</given-names>
            </name>
            <name name-style="western">
              <surname>Wolf</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Bigalke</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Graf</surname>
              <given-names>BM</given-names>
            </name>
            <name name-style="western">
              <surname>Birkholz</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Primary mass casualty incident triage: evidence for the benefit of yearly brief re-training from a simulation study</article-title>
          <source>Scand J Trauma Resusc Emerg Med</source>
          <year>2018</year>
          <month>04</month>
          <day>27</day>
          <volume>26</volume>
          <issue>1</issue>
          <fpage>35</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://sjtrem.biomedcentral.com/articles/10.1186/s13049-018-0501-6"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s13049-018-0501-6</pub-id>
          <pub-id pub-id-type="medline">29703219</pub-id>
          <pub-id pub-id-type="pii">10.1186/s13049-018-0501-6</pub-id>
          <pub-id pub-id-type="pmcid">PMC5923025</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mills</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Dykstra</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Hansen</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Miles</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Rankin</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Hopper</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Brook</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Bartlett</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Virtual reality triage training can provide comparable simulation efficacy for paramedicine students compared to live simulation-based scenarios</article-title>
          <source>Prehosp Emerg Care</source>
          <year>2020</year>
          <volume>24</volume>
          <issue>4</issue>
          <fpage>525</fpage>
          <lpage>36</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1080/10903127.2019.1676345"/>
          </comment>
          <pub-id pub-id-type="doi">10.1080/10903127.2019.1676345</pub-id>
          <pub-id pub-id-type="medline">31580178</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dingler</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Kwasnicka</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Wei</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Gong</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Oldenburg</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>The use and promise of conversational agents in digital health</article-title>
          <source>Yearb Med Inform</source>
          <year>2021</year>
          <month>08</month>
          <volume>30</volume>
          <issue>1</issue>
          <fpage>191</fpage>
          <lpage>9</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://www.thieme-connect.com/DOI/DOI?10.1055/s-0041-1726510"/>
          </comment>
          <pub-id pub-id-type="doi">10.1055/s-0041-1726510</pub-id>
          <pub-id pub-id-type="medline">34479391</pub-id>
          <pub-id pub-id-type="pmcid">PMC8416202</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Giray</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Prompt engineering with ChatGPT: a guide for academic writers</article-title>
          <source>Ann Biomed Eng</source>
          <year>2023</year>
          <month>12</month>
          <volume>51</volume>
          <issue>12</issue>
          <fpage>2629</fpage>
          <lpage>33</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1007/s10439-023-03272-4"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s10439-023-03272-4</pub-id>
          <pub-id pub-id-type="medline">37284994</pub-id>
          <pub-id pub-id-type="pii">10.1007/s10439-023-03272-4</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ray</surname>
              <given-names>PP</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT: a comprehensive review on background, applications, key challenges, bias, ethics, limitations and future scope</article-title>
          <source>Internet Things Cyber Phys Syst</source>
          <year>2023</year>
          <volume>3</volume>
          <fpage>121</fpage>
          <lpage>54</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1016/j.iotcps.2023.04.003"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.iotcps.2023.04.003</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tudor Car</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Dhinagaran</surname>
              <given-names>DA</given-names>
            </name>
            <name name-style="western">
              <surname>Kyaw</surname>
              <given-names>BM</given-names>
            </name>
            <name name-style="western">
              <surname>Kowatsch</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Joty</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Theng</surname>
              <given-names>YL</given-names>
            </name>
            <name name-style="western">
              <surname>Atun</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Conversational agents in health care: scoping review and conceptual analysis</article-title>
          <source>J Med Internet Res</source>
          <year>2020</year>
          <month>08</month>
          <day>07</day>
          <volume>22</volume>
          <issue>8</issue>
          <fpage>e17158</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2020/8/e17158/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/17158</pub-id>
          <pub-id pub-id-type="medline">32763886</pub-id>
          <pub-id pub-id-type="pii">v22i8e17158</pub-id>
          <pub-id pub-id-type="pmcid">PMC7442948</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Giansanti</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>The chatbots are invading us: a map point on the evolution, applications, opportunities, and emerging problems in the</article-title>
          <source>Life (Basel)</source>
          <year>2023</year>
          <month>05</month>
          <day>05</day>
          <volume>13</volume>
          <issue>5</issue>
          <fpage>1130</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=life13051130"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/life13051130</pub-id>
          <pub-id pub-id-type="medline">37240775</pub-id>
          <pub-id pub-id-type="pii">life13051130</pub-id>
          <pub-id pub-id-type="pmcid">PMC10222535</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>KH</given-names>
            </name>
            <name name-style="western">
              <surname>Jung</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Jowsey</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Webster</surname>
              <given-names>CS</given-names>
            </name>
          </person-group>
          <article-title>Effective virtual patient simulators for medical communication training: a systematic review</article-title>
          <source>Med Educ</source>
          <year>2020</year>
          <month>09</month>
          <volume>54</volume>
          <issue>9</issue>
          <fpage>786</fpage>
          <lpage>95</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1111/medu.14152"/>
          </comment>
          <pub-id pub-id-type="doi">10.1111/medu.14152</pub-id>
          <pub-id pub-id-type="medline">32162355</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kononowicz</surname>
              <given-names>AA</given-names>
            </name>
            <name name-style="western">
              <surname>Woodham</surname>
              <given-names>LA</given-names>
            </name>
            <name name-style="western">
              <surname>Edelbring</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Stathakarou</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Davies</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Saxena</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Tudor Car</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Carlstedt-Duke</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Car</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Zary</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Virtual patient simulations in health professions education: systematic review and meta-analysis by the digital health education collaboration</article-title>
          <source>J Med Internet Res</source>
          <year>2019</year>
          <month>07</month>
          <day>02</day>
          <volume>21</volume>
          <issue>7</issue>
          <fpage>e14676</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2019/7/e14676/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/14676</pub-id>
          <pub-id pub-id-type="medline">31267981</pub-id>
          <pub-id pub-id-type="pii">v21i7e14676</pub-id>
          <pub-id pub-id-type="pmcid">PMC6632099</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Guetterman</surname>
              <given-names>TC</given-names>
            </name>
            <name name-style="western">
              <surname>Sakakibara</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Baireddy</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kron</surname>
              <given-names>FW</given-names>
            </name>
            <name name-style="western">
              <surname>Scerbo</surname>
              <given-names>MW</given-names>
            </name>
            <name name-style="western">
              <surname>Cleary</surname>
              <given-names>JF</given-names>
            </name>
            <name name-style="western">
              <surname>Fetters</surname>
              <given-names>MD</given-names>
            </name>
          </person-group>
          <article-title>Medical students' experiences and outcomes using a virtual human simulation to improve communication skills: mixed methods study</article-title>
          <source>J Med Internet Res</source>
          <year>2019</year>
          <month>11</month>
          <day>27</day>
          <volume>21</volume>
          <issue>11</issue>
          <fpage>e15459</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2019/11/e15459/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/15459</pub-id>
          <pub-id pub-id-type="medline">31774400</pub-id>
          <pub-id pub-id-type="pii">v21i11e15459</pub-id>
          <pub-id pub-id-type="pmcid">PMC6906619</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Milne-Ives</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>de Cock</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Lim</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Shehadeh</surname>
              <given-names>MH</given-names>
            </name>
            <name name-style="western">
              <surname>de Pennington</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Mole</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Normando</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Meinert</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>The effectiveness of artificial intelligence conversational agents in health care: systematic review</article-title>
          <source>J Med Internet Res</source>
          <year>2020</year>
          <month>10</month>
          <day>22</day>
          <volume>22</volume>
          <issue>10</issue>
          <fpage>e20346</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2020/10/e20346/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/20346</pub-id>
          <pub-id pub-id-type="medline">33090118</pub-id>
          <pub-id pub-id-type="pii">v22i10e20346</pub-id>
          <pub-id pub-id-type="pmcid">PMC7644372</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lund</surname>
              <given-names>BD</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Mannuru</surname>
              <given-names>NR</given-names>
            </name>
            <name name-style="western">
              <surname>Nie</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Shimray</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Z</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT and a new academic reality: artificial intelligence-written research papers and the ethics of the large language models in scholarly publishing</article-title>
          <source>SSRN Journal</source>
          <comment>Preprint posted online 31st March, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://papers.ssrn.com/sol3/papers.cfm?abstract_id=4389887"/>
          </comment>
          <pub-id pub-id-type="doi">10.2139/ssrn.4389887</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <collab>OpenAI</collab>
          </person-group>
          <article-title>GPT-4 technical report</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online March 4, 2024</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/2303.08774"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Park</surname>
              <given-names>JS</given-names>
            </name>
            <name name-style="western">
              <surname>O'Brien</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Cai</surname>
              <given-names>CJ</given-names>
            </name>
            <name name-style="western">
              <surname>Morris</surname>
              <given-names>MR</given-names>
            </name>
            <name name-style="western">
              <surname>Liang</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Bernstein</surname>
              <given-names>MS</given-names>
            </name>
          </person-group>
          <article-title>Generative agents: interactive simulacra of human behavior</article-title>
          <source>Proceedings of the 36th Annual ACM Symposium on User Interface Software and Technology</source>
          <year>2023</year>
          <conf-name>UIST '23</conf-name>
          <conf-date>October 29-November 1, 2023</conf-date>
          <conf-loc>San Francisco, CA</conf-loc>
          <fpage>1</fpage>
          <lpage>22</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1145/3586183.3606763"/>
          </comment>
          <pub-id pub-id-type="doi">10.1145/3586183.3606763</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="web">
          <article-title>Text to speech</article-title>
          <source>ElevenLabs</source>
          <access-date>2023-06-15</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://api.elevenlabs.io/docs">https://api.elevenlabs.io/docs</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="web">
          <article-title>OpenAI API: we’re releasing an API for accessing</article-title>
          <source>OpenAI</source>
          <access-date>2023-06-15</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://openai.com/blog/openai-api">https://openai.com/blog/openai-api</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Addlesee</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Eshghi</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>A comprehensive evaluation of incremental speech recognition and diarization for conversational AI</article-title>
          <source>Proceedings of the 28th International Conference on Computational Linguistics</source>
          <year>2020</year>
          <conf-name>COLING '20</conf-name>
          <conf-date>December 8-13, 2020</conf-date>
          <conf-loc>Barcelona, Spain</conf-loc>
          <fpage>3492</fpage>
          <lpage>503</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://aclanthology.org/2020.coling-main.312.pdf"/>
          </comment>
          <pub-id pub-id-type="doi">10.18653/v1/2020.coling-main.312</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Meloni</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Angioni</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Salatino</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Osborne</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Reforgiato Recupero</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Motta</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Integrating conversational agents and knowledge graphs within the scholarly domain</article-title>
          <source>IEEE Access</source>
          <year>2023</year>
          <volume>11</volume>
          <fpage>22468</fpage>
          <lpage>89</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1109/ACCESS.2023.3253388"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/access.2023.3253388</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Touvron</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Lavril</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Izacard</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Martinet</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Lachaux</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Lacroix</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Rozière</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Goyal</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Hambro</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Azhar</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Rodriguez</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Joulin</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Grave</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Lample</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>LLaMA: open and efficient foundation language models</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online February 27, 2023</comment>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://arxiv.org/abs/2302.13971"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Radford</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>JW</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Brockman</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>McLeavey</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Sutskever</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Robust speech recognition via large-scale weak supervision</article-title>
          <source>Proceedings of the 40th International Conference on Machine Learning</source>
          <year>2023</year>
          <conf-name>ICML '23</conf-name>
          <conf-date>July 23-29, 2023</conf-date>
          <conf-loc>Honolulu, HI</conf-loc>
          <fpage>28492</fpage>
          <lpage>518</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dl.acm.org/doi/10.5555/3618408.3619590"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="web">
          <article-title>Introducing ChatGPT and Whisper APIs</article-title>
          <source>OpenAI</source>
          <access-date>2023-06-15</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://openai.com/blog/introducing-chatgpt-and-whisper-apis">https://openai.com/blog/introducing-chatgpt-and-whisper-apis</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Uhl</surname>
              <given-names>JC</given-names>
            </name>
            <name name-style="western">
              <surname>Schrom-Feiertag</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Regal</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Gallhuber</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Tscheligi</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Tangible immersive trauma simulation: is mixed reality the next level of medical skills training?</article-title>
          <source>Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems</source>
          <year>2023</year>
          <conf-name>CHI '23</conf-name>
          <conf-date>April 23-28, 2023</conf-date>
          <conf-loc>Hamburg, Germany</conf-loc>
          <fpage>1</fpage>
          <lpage>17</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1145/3544548.3581292"/>
          </comment>
          <pub-id pub-id-type="doi">10.1145/3544548.3581292</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Niculescu</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>van Dijk</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Nijholt</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>See</surname>
              <given-names>SL</given-names>
            </name>
          </person-group>
          <article-title>The influence of voice pitch on the evaluation of a social robot receptionist</article-title>
          <source>Proceedings of the 2011 International Conference on User Science and Engineering</source>
          <year>2011</year>
          <conf-name>i-USEr '11</conf-name>
          <conf-date>November 29-December 1, 2011</conf-date>
          <conf-loc>Selangor, Malaysia</conf-loc>
          <fpage>18</fpage>
          <lpage>23</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://ieeexplore.ieee.org/document/6150529"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/iuser.2011.6150529</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Michael</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Möller</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Simulating human-to-human conversations for the prediction of conversational quality</article-title>
          <source>Proceedings of the 2018 Annual Conference of the German Society for Acoustics</source>
          <year>2018</year>
          <conf-name>DAGA '18</conf-name>
          <conf-date>March 19-22, 2018</conf-date>
          <conf-loc>Munich, Germany</conf-loc>
          <fpage>525</fpage>
          <lpage>8</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://pub.dega-akustik.de/DAGA_2018/data/articles/000107.pdf"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zacharov</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <source>Sensory Evaluation of Sound</source>
          <year>2018</year>
          <publisher-loc>Boca Raton, FL</publisher-loc>
          <publisher-name>CRC Press</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Polkosky</surname>
              <given-names>MD</given-names>
            </name>
            <name name-style="western">
              <surname>Lewis</surname>
              <given-names>JR</given-names>
            </name>
          </person-group>
          <article-title>Expanding the MOS: development and psychometric evaluation of the MOS-R and MOS-X</article-title>
          <source>Int J Speech Technol</source>
          <year>2003</year>
          <volume>6</volume>
          <fpage>161</fpage>
          <lpage>82</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://link.springer.com/article/10.1023/A:1022390615396"/>
          </comment>
          <pub-id pub-id-type="doi">10.1023/A:1022390615396</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hone</surname>
              <given-names>KS</given-names>
            </name>
            <name name-style="western">
              <surname>Graham</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Towards a tool for the Subjective Assessment of Speech System Interfaces (SASSI)</article-title>
          <source>Nat Lang Eng</source>
          <year>2000</year>
          <volume>6</volume>
          <issue>3&amp;4</issue>
          <fpage>287</fpage>
          <lpage>303</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1017/S1351324900002497"/>
          </comment>
          <pub-id pub-id-type="doi">10.1017/s1351324900002497</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Frenke</surname>
              <given-names>MO</given-names>
            </name>
            <name name-style="western">
              <surname>Baetzner</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Wrzus</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Hill</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Ethical guidelines and procedures</article-title>
          <source>MED1stMR</source>
          <access-date>2024-04-29</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.med1stmr.eu/wp-content/uploads/2024/03/MED1stMR_D1.2_Ethical-Guidelines-and-Procedures-v1.0.pdf">https://www.med1stmr.eu/wp-content/uploads/2024/03/MED1stMR_D1.2_Ethical-Guidelines-and-Procedures-v1.0.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gutierrez</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Schultheis</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Effects of biofeedback-informed soundscapes on well-being and cognition</article-title>
          <source>Proceedings of the 24th International Congress on Acoustics</source>
          <year>2022</year>
          <conf-name>ICA '22</conf-name>
          <conf-date>October 24-28, 2022</conf-date>
          <conf-loc>Gyeongju, Korea</conf-loc>
          <fpage>22</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://publications.ait.ac.at/en/publications/effects-of-biofeedback-informed-soundscapes-on-well-being-and-cog"/>
          </comment>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
