<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Form Res</journal-id><journal-id journal-id-type="publisher-id">formative</journal-id><journal-id journal-id-type="index">27</journal-id><journal-title>JMIR Formative Research</journal-title><abbrev-journal-title>JMIR Form Res</abbrev-journal-title><issn pub-type="epub">2561-326X</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v9i1e59328</article-id><article-id pub-id-type="doi">10.2196/59328</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Examining Nonverbal Communication in Dyadic Interactions With Virtual Humans Using an Integrated Coding System: Mixed Methods Analysis</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Perez</surname><given-names>Analay</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1"/></contrib><contrib contrib-type="author"><name name-style="western"><surname>Sakakibara</surname><given-names>Rae</given-names></name><degrees>BS</degrees><xref ref-type="aff" rid="aff1"/></contrib><contrib contrib-type="author"><name name-style="western"><surname>Baireddy</surname><given-names>Srikar</given-names></name><degrees>BS</degrees><xref ref-type="aff" rid="aff1"/></contrib><contrib contrib-type="author" deceased="yes"><name name-style="western"><surname>Fetters</surname><given-names>Michael D</given-names></name><degrees>MD, MPH, MA</degrees><xref ref-type="aff" rid="aff1"/><xref ref-type="fn" rid="de-fn1">&#x2020;</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Guetterman</surname><given-names>Timothy C</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1"/></contrib></contrib-group><aff id="aff1"><institution>Department of Family Medicine, University of Michigan</institution><addr-line>1018 Fuller St</addr-line><addr-line>Ann Arbor</addr-line><addr-line>MI</addr-line><country>United States</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Mavragani</surname><given-names>Amaryllis</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Dantas</surname><given-names>Adilmar Coelho</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Hartigan</surname><given-names>Danielle</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Analay Perez, PhD, Department of Family Medicine, University of Michigan, 1018 Fuller St, Ann Arbor, MI, 48104, United States, 1 734-998-7124; <email>analaype@med.umich.edu</email></corresp><fn fn-type="other" id="de-fn1"><label>&#x2020;</label><p>deceased</p></fn></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>7</day><month>8</month><year>2025</year></pub-date><volume>9</volume><elocation-id>e59328</elocation-id><history><date date-type="received"><day>10</day><month>04</month><year>2024</year></date><date date-type="rev-recd"><day>16</day><month>05</month><year>2025</year></date><date date-type="accepted"><day>21</day><month>05</month><year>2025</year></date></history><copyright-statement>&#x00A9; Analay Perez, Rae Sakakibara, Srikar Baireddy, Michael D Fetters, Timothy C Guetterman. Originally published in JMIR Formative Research (<ext-link ext-link-type="uri" xlink:href="https://formative.jmir.org">https://formative.jmir.org</ext-link>), 7.8.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Formative Research, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://formative.jmir.org">https://formative.jmir.org</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://formative.jmir.org/2025/1/e59328"/><abstract><sec><title>Background</title><p>The patient-physician dyad involves both verbal and nonverbal communication. Traditional methods use quantitative or qualitative coding when analyzing dyadic data of nonverbal communication. Quantitative coding methods can capture the frequency of nonverbal communication, while qualitative coding methods can provide descriptive information on the context and nuance of the nonverbal communication expressed. Yet, limited research has examined the integration of quantitative and qualitative coding methods of nonverbal communication between a patient-physician dyad through video recordings.</p></sec><sec><title>Objective</title><p>The objective of this formative study was to demonstrate how nonverbal communication data can be analyzed using a mixed methods analysis approach and propose an integrated coding system using a subset of the original dataset.</p></sec><sec sec-type="methods"><title>Methods</title><p>A secondary analysis was conducted from the intervention study with a sample of 32 pairs of randomly selected video recordings based on first and second interactions after receiving feedback from a virtual human. A 2-minute segment was used to code nonverbal communication, and a codebook was developed, informed by the literature and inductive qualitative approaches. For the mixed methods analysis, we purposefully selected 2 participants from the sample of 32 who demonstrated high frequency in quantitative and qualitative coding of nonverbal behaviors. We developed a joint display to visually represent the integration of quantitative and qualitative coding methods and developed person-level meta-inferences.</p></sec><sec sec-type="results"><title>Results</title><p>This formative study demonstrates an approach to nonverbal communication analysis that mixes qualitative and quantitative methods. The mixed methods results indicated the frequency of participants&#x2019; (n=32) nonverbal behaviors increased after repeated interactions, including eyebrow raise, nodding, and smiling, in addition to the increased average duration of nonverbal behaviors across interactions. Illustrated through an in-depth example of integrated mixed methods coding of 2 participants from the sample, the integration of quantitative and qualitative data provided insights into nonverbal communication. Quantitatively, we captured the frequency of nonverbal behaviors while qualitatively expanding on the context for nonverbal behaviors and generating person-level meta-inferences. The joint display informed our integrated coding system for mixed methods analysis of nonverbal communication.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>The resultant integrated coding system may be helpful to researchers engaging in nonverbal communication data of dyads by providing a step-by-step method using a mixed methods analysis approach. This approach can help us to advance methods for analyzing nonverbal communication to enhance the patient-physician dyad and education on nonverbal communication. We encourage applying the integrated coding system across several subdisciplines in health sciences research to identify how it can be further expanded and refined.</p></sec><sec sec-type="registered-report"><title>International Registered Report Identifier (IRRID)</title><p>RR2-10.2196/46601</p></sec></abstract><kwd-group><kwd>nonverbal communication</kwd><kwd>nonverbal behaviors</kwd><kwd>mixed methods analysis</kwd><kwd>joint display</kwd><kwd>patient-physician dyad</kwd><kwd>patient-doctor</kwd><kwd>patient-physician</kwd><kwd>dyad</kwd><kwd>dyadic</kwd><kwd>interaction</kwd><kwd>simulation</kwd><kwd>virtual human</kwd><kwd>quantitative</kwd><kwd>qualitative</kwd><kwd>secondary analysis</kwd><kwd>non-verbal</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><sec id="s1-1"><title>Background</title><p>Verbal and nonverbal communication are important characteristics of the patient-physician dyad. Verbal communication can include but is not limited to language, dialects, and the tone of voice, whereas nonverbal communication includes eye contact, gaze gesture, facial expressions, movement, stance, body position, and spatial distancing [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref2">2</xref>]. These behaviors are only a subset of verbal and nonverbal behaviors included in communication. Researchers have also identified more specific nonverbal behaviors including emotions, embodiment, and interpersonal perception [<xref ref-type="bibr" rid="ref3">3</xref>]. In clinical practice, it is vital for physicians to effectively communicate with patients using verbal and nonverbal behaviors to improve health outcomes and the overall patient-physician dyadic encounter [<xref ref-type="bibr" rid="ref4">4</xref>]. Physicians who can appropriately interpret patients&#x2019; nonverbal cues lead to increased patient satisfaction, and patients are more likely to return for follow-up visits than physicians who are less likely to read patients&#x2019; nonverbal cues correctly [<xref ref-type="bibr" rid="ref5">5</xref>]. Appropriate use of nonverbal cues between a patient and a physician can contribute to a positive visit, increase patient self-disclosure, understanding, and recall of information discussed during the visit, and influence a patient&#x2019;s adherence to a physician&#x2019;s recommendations [<xref ref-type="bibr" rid="ref6">6</xref>]. Not only is verbal and nonverbal communication critical to the patient-physician dyad, but nonverbal communication is pivotal for accreditation of residency programs and physician certifications as these assess physicians&#x2019; competency in interpersonal skills, which involve verbal and nonverbal communication [<xref ref-type="bibr" rid="ref7">7</xref>]. For example, the Accreditation Council for Graduate Medical Education includes a core competency of interpersonal and communication skills comprising both verbal and nonverbal communication. Although a wealth of literature exists on the benefits of enhancing verbal and nonverbal communication in health sciences research and the types of gestures commonly associated with each type, additional methodological guidance is needed when coding, analyzing, and integrating verbal and nonverbal using a mixed methods approach. Hillen et al [<xref ref-type="bibr" rid="ref8">8</xref>] conducted a systematic review synthesizing quantitative and qualitative coding methods for assessing the interaction between health care professionals and patients and/or caregivers. However, none of the articles included in their review used mixed methods research, that is, the intentional integration of quantitative and qualitative research to better understand the research phenomenon of health care professionals and patient and/or caregiver interactions. Therefore, research is needed to understand the potential of using mixed methods analysis to holistically explore verbal and nonverbal communication.</p></sec><sec id="s1-2"><title>Traditional Methods for Analyzing Nonverbal Communication</title><p>Traditionally, both quantitative and qualitative methods are used to analyze nonverbal communication. Coding is a process of identifying segments of data, such as a brief video snippet, and applying a code as a label for what is occurring. From a quantitative perspective, the most common method to code and analyze facial expressions involves manual coding of specific facial expressions and movements using frequencies [<xref ref-type="bibr" rid="ref9">9</xref>]. The Facial Affect Coding System (FACS) [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref11">11</xref>] has been widely used by several researchers and has demonstrated strong psychometric properties [<xref ref-type="bibr" rid="ref12">12</xref>]. The FACS was created to identify facial expressions, including details on their intensity and timing. Trained coders are directed to code all possible facial expressions into action units, which are based on underlying muscle movements mapped to specific and observable facial expressions. However, 2 limitations of the FACS are that it does not account for body language and only focuses on one person rather than the interactions.</p><p>Another common approach to analyzing nonverbal behaviors is using quantitative content analysis. This process includes developing an established codebook based on manifest behavior codes and recurring codes to guide coders&#x2019; decisions [<xref ref-type="bibr" rid="ref13">13</xref>]. Quantitative content analysis uses a similar approach as the FACS; however, it is less structured and based on an established codebook developed by the research team. Both approaches use frequencies to determine the total number of specified behaviors expressed by each participant. In either case, researchers can conduct statistical analyses if a large enough sample size exists. Though quantitative methods for coding nonverbal communication can help quantify the frequency of specific behaviors, they fail to provide context for how or why individuals may be expressing those behaviors.</p><p>Qualitatively, researchers can expand on the quantitative codes by exploring additional qualitative descriptors of each nonverbal communication. Qualitative content analysis can be implemented by developing codes and themes using an inductive process. In other words, researchers do not begin with a pre-established codebook but let the data drive what codes they develop and identify. Quantitative content analysis can help uncover what common nonverbal behaviors are, particularly when examining the patient-physician dyad, while qualitative content analysis can help reveal why by providing additional verbal and nonverbal details to complement the quantitative coding [<xref ref-type="bibr" rid="ref13">13</xref>].</p><p>A more specific qualitative coding method is live coding [<xref ref-type="bibr" rid="ref14">14</xref>]. Live coding is a method that allows researchers to code both verbal and nonverbal behaviors while listening to and watching video recordings [<xref ref-type="bibr" rid="ref14">14</xref>]. The Rotor Interaction Analysis System (RIAS) uses a variation of live coding by accounting for some nonverbal behaviors qualitatively, though it primarily uses a quantitative coding system [<xref ref-type="bibr" rid="ref15">15</xref>]. Given the advent of digital data such as video, audio, social media, music, photographs, and films in qualitative research, and the advancements of computerized assisted qualitative data analysis software, researchers are using digital software tools to facilitate this process and simultaneously code both video and audio data. As noted by Parameswaran and colleagues [<xref ref-type="bibr" rid="ref14">14</xref>], this allows researchers to &#x201C;gather contextual information from the video clips to better situate the themes when coding.&#x201D;</p><p>Nonetheless, qualitatively coding nonverbal communication fails to provide the magnitude of a particular nonverbal behavior and may be less precise than quantitative coding. Therefore, merging quantitative and qualitative coding may be an ideal solution to further deepen understanding by providing quantifiable and contextual descriptions of nonverbal communication between dyads. Yet, limited research has explored the potential benefits of integrating quantitative and qualitative coding methods using a mixed methods analysis approach to examine nonverbal communication.</p></sec><sec id="s1-3"><title>Mixed Methods Analysis of Nonverbal Communication</title><p>Mixed methods research is a methodology that intentionally integrates quantitative and qualitative research methods to obtain a holistic understanding of the research questions and objectives [<xref ref-type="bibr" rid="ref16">16</xref>]. When integrating quantitative and qualitative coding at the methods level, a single video dataset can be analyzed using an inherently mixed methods analysis approach. Onwuegbuzie and Abrams [<xref ref-type="bibr" rid="ref1">1</xref>] proposed several reasons for integrating quantitative and qualitative coding. One reason is to reduce the dimensionality of qualitative nonverbal communication data to subsequently further explore using quantitative analysis, such as exploratory factor analysis. Another approach for integrating quantitative and qualitative coding can be to compare quantitative and qualitative nonverbal data through a joint display to generate new conclusions called meta-inferences. Alternatively, using the same dataset, another approach is to transform quantitative nonverbal data to analyze qualitatively by exploring salient themes or link the qualitative nonverbal data with quantitized nonverbal or verbal communication data to examine its correlation. Quantitizing data refers to transforming qualitative coded data into a quantitative representation, while qualitizing data refers to transforming numerical results into qualitative codes and themes [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref18">18</xref>]. This process can be viewed as a mixed methods coding technique [<xref ref-type="bibr" rid="ref19">19</xref>].</p><p>Onwuegbuzie and Abrams [<xref ref-type="bibr" rid="ref1">1</xref>] have conducted recent research on analyzing nonverbal communication using mixed methods analysis and provided useful examples from empirical research to guide researchers. To demonstrate the applications of mixed methods research for nonverbal communication, researchers [<xref ref-type="bibr" rid="ref1">1</xref>] present several examples of studies, most involving qualitative interviews as a method for coding nonverbal communication, as well as in-person observation coding, and using a combination of digital technologies including video recordings and photographs/field notes, with examples primarily derived from the field of education. Given the role and importance of nonverbal communication in health care and health professions education, it is imperative to explore how researchers can use mixed methods research to integrate quantitative and qualitative coding of nonverbal communication at the methods level. Thus, our aim was to develop and pilot a mixed methods approach to analyze nonverbal communication in health care.</p></sec><sec id="s1-4"><title>Research Aims</title><p>The overarching purpose of this formative study was to develop and pilot an integrated coding system for mixed methods analysis of nonverbal communication and illustrate how researchers can integrate quantitative and qualitative coding methods of nonverbal communication. Particularly, this study used data from an existing video interaction dataset of simulated health care encounters between a physician and a virtual human to explore nonverbal behaviors. For this formative study, we developed an integrated coding system using a subset of data from the original sample. The aims were to: (1) understand how nonverbal behaviors could be improved through learner interactions with a virtual human across repeated scenarios and (2) use a novel mixed methods approach to analyze quantitative and qualitative coded data of nonverbal communication to provide greater context for each nonverbal behavior exhibited by participants.</p></sec></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Participants</title><p>This analysis relied on secondary data collected as part of a larger trial [<xref ref-type="bibr" rid="ref20">20</xref>]. Participants in this study consisted of second-year medical students recruited from 3 institutions in the United States.</p></sec><sec id="s2-2"><title>Data Sources</title><p>We conducted a secondary analysis from the intervention arm of a randomized controlled trial (n=210 medical students and 840 video recordings of interactions) of a communication simulation using virtual humans. Our sample for developing and piloting a novel mixed methods analysis of nonverbal communication consisted of n=32 pairs of randomly selected video recordings of first and second interactions after receiving feedback from the virtual human program, representing approximately 15% of the entire dataset.</p><p>The duration of interactions was typically between a range of 4 and 8 minutes; however, we used the &#x201C;thin slice&#x201D; method for coding, which involved coding short excerpts of the video recordings. Evidence shows that short observations of 1 to 3 minutes can represent a participant&#x2019;s nonverbal behavior and predict clinical and social-psychological outcomes [<xref ref-type="bibr" rid="ref21">21</xref>]. We chose to code 2-minute segments to account for the pauses in conversation while learners read prompts on the screen. We began coding 2 minutes into the videos because of the likelihood of capturing the interaction at a point where the learner is confronted by virtual humans in emotionally charged states after some initial exchanges. The virtual human simulation, MPathic-VR (Medical Cyberworlds), provides automated feedback after the first interaction, and the learner is given another opportunity to practice in their second interaction with the same virtual human(s). Additional information on the simulation and larger study can be found in a protocol paper [<xref ref-type="bibr" rid="ref22">22</xref>].</p></sec><sec id="s2-3"><title>Virtual Human Scenarios and Procedures</title><p>Participants in the intervention arm interacted with the MPathic-VR and participated in two related scenarios. One scenario focused on intercultural communication between a virtual human patient, the virtual human patient&#x2019;s mother, and the learner. The second scenario focused on interprofessional communication with a nurse virtual human and the learner [<xref ref-type="bibr" rid="ref20">20</xref>]. Participants in the intervention arm received personalized feedback on their performance (both verbal and nonverbal) after each scenario and a video recording of their conversations with the virtual human.</p></sec><sec id="s2-4"><title>Quantitative Analysis</title><p>Using the established codebook, we first identified which codes would be treated quantitatively using frequencies. After the 2 analysts coded the nonverbal quantitative behaviors by counting the total number of times each behavior was expressed in the 2-minute video segment for the first and second interactions with a virtual human. We summed the number of expressions for each nonverbal behavior across all videos to produce a total number for interactions one and two. We also summed the number of nonverbal behaviors expressed by the participant across the learner and virtual human talk time. <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref> provides the codebook describing each code, its definition, and whether the code was considered a quantitative or qualitative code.</p></sec><sec id="s2-5"><title>Qualitative Analysis</title><p>We began our analysis by reviewing the literature on measures and coding schemes for analyzing nonverbal behaviors. Specifically, we included in the codebook behaviors emphasized by the MPathic-VR system (eg, smiling, nodding, and eyebrow raises), in addition to nonverbal behaviors identified as expressions that may indicate compassion as described by the FACS [<xref ref-type="bibr" rid="ref23">23</xref>]. These included eye gaze, head orientation, forward lean, oblique eyebrows, furrowed eyebrows, lower eyelid raise, slight mouth press, and lip corner puller. Coding of these behaviors was guided by the FACS [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref11">11</xref>]. We used the FACS in this investigation because the action units have a specific anatomical basis, which allowed for reliable application of nonverbal behavior codes across coders.</p><p>Qualitative codes were identified using open coding of the data to generate additional codes. The 2 coders (TCG and RS) also created new codes (not part of FACS) inductively through qualitative open coding. Both were experienced and trained in thematic and content analysis, one at the PhD level and the other, a research assistant with over three years of experience with qualitative data. These qualitative codes (eg, intonation and nodding) were necessary because of their prominence in the data and because the FACS does not capture such behavior. We imported video recordings into MAXQDA 2022 qualitative software for coding [<xref ref-type="bibr" rid="ref24">24</xref>]. The two individuals conducting the initial open coding reviewed three video segments second-by-second to compare and discuss codes through consensus and reach a final codebook. Each code was defined for consistency, and we ensured all analysts were familiar with the codebook. All authors participated in the coding and review of videos and were blinded as to whether the video was the first or second interaction. We applied the qualitative codes to the relevant segment of video, which is a time duration with a start and end point precise to a second using the MAXQDA software. Any discrepancies in coding were resolved through discussions after each video was coded.</p></sec><sec id="s2-6"><title>Ethical Considerations</title><p>The original study was approved by the University of Michigan&#x2019;s Institutional Review Board (HUM00067336) as an exempt study under the educational research category. Written informed consent was waived for participants. The secondary analysis reported in this paper was also approved by the University of Michigan&#x2019;s Institutional Review Board (HUM00134766). Participants did not receive compensation. It was not possible to completely anonymize the video data, but we removed all participant identifiers and assigned a random study identifier. After coding, the videos were unlinked to develop a deidentified dataset.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Nonverbal Behaviors Demonstrated by Learners</title><p>To provide a more thorough examination of nonverbal behaviors, we first investigated whether behaviors emphasized by the MPathic-VR system and other nonverbal behaviors that may convey empathy increased after repeat scenarios by examining the frequency and duration of nonverbal behaviors across videos from 32 pairs. As demonstrated in <xref ref-type="table" rid="table1">Table 1</xref>, the frequency of nonverbal behaviors emphasized by MPathic-VR increased in the second interactions compared to the first, including eyebrow raise (T<sub>1</sub>=111 instances, T<sub>2</sub>=128 instances; here T<sub>1</sub>=Time 1 or first interaction and T<sub>2</sub>=Time 2 or second interaction), nodding (T<sub>1</sub>=112 instances and T<sub>2</sub>=211 instances), and smiling (T<sub>1</sub>=33 instances and T<sub>2</sub>=38 instances). The frequency of all other nonverbal behaviors also increased in the second interaction compared to the first, except for lower eyelid raises and furrowed eyebrows, which remained consistent across scenarios (73 instances and 53 instances, respectively). <xref ref-type="table" rid="table2">Table 2</xref> shows that the average duration of nonverbal behaviors regarding orientation increased in the second interaction compared to the first, including the average duration of eye gaze toward the virtual human (T<sub>1</sub>=61.3 s and T<sub>2</sub>=76.3 s), forward lean (T<sub>1</sub>=37.5 s and T<sub>2</sub>=38.9 s), and head tilting (T<sub>1</sub>=34.2 s and T<sub>2</sub>=48.9 s).</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Frequency of nonverbal behaviors in the first and second interactions.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Nonverbal behaviors</td><td align="left" valign="bottom" colspan="2">Code frequency (instances across n=32 videos), n</td></tr><tr><td align="left" valign="bottom"/><td align="left" valign="top">First interaction</td><td align="left" valign="top">Second interaction</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="3">Behaviors emphasized by MPathic-VR</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Eyebrow raise</td><td align="left" valign="top">111</td><td align="left" valign="top">128</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Nodding</td><td align="left" valign="top">112</td><td align="left" valign="top">211</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Smiling</td><td align="left" valign="top">33</td><td align="left" valign="top">38</td></tr><tr><td align="left" valign="top" colspan="3">Other facial expressions</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Furrowed eyebrows</td><td align="left" valign="top">73</td><td align="left" valign="top">73</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Lip corner puller</td><td align="left" valign="top">90</td><td align="left" valign="top">120</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Lower eyelid raise</td><td align="left" valign="top">53</td><td align="left" valign="top">53</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Oblique eyebrows</td><td align="left" valign="top">45</td><td align="left" valign="top">62</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Slight lip press</td><td align="left" valign="top">90</td><td align="left" valign="top">115</td></tr></tbody></table></table-wrap><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Duration of nonverbal behaviors in the first and second interactions.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Orientation</td><td align="left" valign="bottom" colspan="2">Average code duration per video (seconds)</td></tr><tr><td align="left" valign="bottom"/><td align="left" valign="bottom">First interaction</td><td align="left" valign="bottom">Second interaction</td></tr></thead><tbody><tr><td align="left" valign="top">Eye gaze toward virtual human</td><td align="char" char="." valign="top">61.3</td><td align="char" char="." valign="top">76.3</td></tr><tr><td align="left" valign="top">Forward lean</td><td align="char" char="." valign="top">37.5</td><td align="char" char="." valign="top">38.9</td></tr><tr><td align="left" valign="top">Head tilting</td><td align="char" char="." valign="top">34.2</td><td align="char" char="." valign="top">48.9</td></tr></tbody></table></table-wrap><p>We also examined whether the frequency across all nonverbal behaviors increased after repeated scenarios while either talking or listening to the virtual human (see <xref ref-type="fig" rid="figure1">Figures 1</xref> and <xref ref-type="fig" rid="figure2">2</xref>). The frequency of nonverbal behaviors across learner and virtual human talk time increased from the first interactions to the second interactions, except for lower eyelid raise and smiling. The frequency of lower eyelid raise decreased between the first and second interaction for learner talk time (T<sub>1</sub>=17 and T<sub>2</sub>=16 instances) and smiling (T<sub>1</sub>=3 and T<sub>2</sub>=2 instances). The frequency of smiling was the only nonverbal behavior that did not increase after repeated scenarios during the virtual human talk time (T<sub>1</sub>=19 instances and T<sub>2</sub>=19). <xref ref-type="fig" rid="figure1">Figure 1</xref> shows the nonverbal behaviors of the first interactions, and <xref ref-type="fig" rid="figure2">Figure 2</xref> displays these behaviors across the second interactions. These figures are extracted from the MAXQDA software (VERBI Software) demonstrating a Code Relations Browser with the respective frequencies of each nonverbal behavior between the learner and virtual human talk time.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>First interaction with virtual human: Frequency of simultaneous occurrences of nonverbal behavior codes when the learner or virtual human was talking during video scenario. VH: virtual human.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v9i1e59328_fig01.png"/></fig><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Second interaction with virtual human: Frequency of simultaneous occurrences of nonverbal behavior codes when the learner or virtual human was talking during video scenario. VH: virtual human.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v9i1e59328_fig02.png"/></fig></sec><sec id="s3-2"><title>Mixed Methods Analysis of Nonverbal Communication</title><p>To illustrate the application of mixed methods analysis of nonverbal communication, 2 participants were purposefully selected from the sample based on the highest frequency of quantitative and qualitative coded segments of nonverbal behaviors. To analyze the quantitative and qualitative codes and integrate the data, we found concordance between quantitative and qualitative nonverbal communication codes across segments that were close in time. <xref ref-type="table" rid="table3">Table 3</xref> displays the joint display of integrated findings across quantitative and qualitative coded nonverbal communication data. A joint display is a visual display used to facilitate and represent the integration of the quantitative and qualitative phases in mixed methods research [<xref ref-type="bibr" rid="ref25">25</xref>]. The joint display presented is categorized by the time segment and its respective quantitative codes, the time segment and its respective qualitative codes, and the person-level meta-inferences. Meta-inferences, or integrated conclusions from the quantitative and qualitative strands, were developed by merging quantitative and qualitative coding inferences to fully understand participants&#x2019; nonverbal communication and further augment the verbal data.</p><p>We generated meta-inferences at the person-level by aggregating quantitative and qualitative findings across all time points for each person to generate a rich description of each participant&#x2019;s overall nonverbal communication (see <xref ref-type="table" rid="table3">Table 3</xref> for person-level meta-inferences). For example, participant 047 for 2:06-2:30 engaged in forward lean while at the same time nodding their head upwards and downwards. When integrating these results within the context of the video scenario, we can conclude the participant was engaged in the conversation and displayed nonverbal behaviors consistent with the verbal data, affirming that they knew what was best for the patient. Collectively, these were integrated to generate a person-level meta-inference representing the nonverbal communication displayed by each participant for the entire time segment.</p><p><xref ref-type="table" rid="table4">Table 4</xref> displays the frequencies for each quantitative nonverbal behavior code across each participant. Compared to other quantitative nonverbal codes, participant ID 047 engaged in forward lean and learner talk time at the highest frequency (four and three instances, respectively). Participant ID 057 displayed eye gaze toward the virtual human at the highest frequency (7 instances), followed by learner talk time and virtual human talk time (3 instances, respectively). These results help to further contextualize the qualitative findings from the joint display by providing quantifiable data across nonverbal behaviors expressed by the participants.</p><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Joint display of integrated nonverbal coding analysis and person-level meta-inferences.</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom" colspan="2">Time</td><td align="left" valign="bottom">Quantitative codes</td><td align="left" valign="bottom">Time</td><td align="left" valign="bottom">Qualitative codes</td><td align="left" valign="bottom">Person-level meta-inference</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="6">ID 047</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>2:06-2:30</td><td align="left" valign="top">Forward lean</td><td align="left" valign="top">2:09-2:12</td><td align="left" valign="top">Nodding head up and down</td><td align="left" valign="top" rowspan="6">The participant was engaged in the conversation and displayed nonverbal communication in response to the virtual human regarding steps they took to alleviate tension between the patient and their mother. Participant&#x2019;s nonverbal cues demonstrated that they cared about the outcome of the conversation and explained the actions they took to alleviate tension between the patient and their mother. The participant&#x2019;s gaze was focused on the virtual human, and they adequately responded all questions posed by the virtual human. The participant displayed nonverbal behaviors such as shaking their head while agreeing with the virtual human&#x2019;s response when learning about confidential patient information.</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>2:06-2:35</td><td align="left" valign="top">Forward lean and learner talk time</td><td align="left" valign="top">2:30-2:32</td><td align="left" valign="top">Shakes head side-to-side</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>2:34-2:58</td><td align="left" valign="top">Forward lean and learner talk time</td><td align="left" valign="top">2:54-2:58</td><td align="left" valign="top">Shakes head</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>2:55-3:48</td><td align="left" valign="top">Forward lean</td><td align="left" valign="top">3:31-3:32</td><td align="left" valign="top">Nodding head</td></tr><tr><td align="char" char="." valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>2:57-3:00</td><td align="left" valign="top">Eye gaze toward virtual human</td><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>3:47-4:15</td><td align="left" valign="top">Learner talk time</td><td align="left" valign="top">3:47-3:48</td><td align="left" valign="top">Inconsistent behavior</td></tr><tr><td align="left" valign="top" colspan="6">ID 057</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>2:03-2:06</td><td align="left" valign="top">Learner talk time and eye gaze toward virtual human</td><td align="left" valign="top">2:05-2:07</td><td align="left" valign="top">Nodding head up and down</td><td align="left" valign="top" rowspan="7">The participant focused on the virtual human&#x2019;s response while nodding their head up and down to affirm patient&#x2019;s autonomy. During this process, the participant nodded their head forward and backward, closed their eyes, and their lips read &#x201C;okay&#x201D; as if to imply that there could be an issue with the information that was disclosed about the patient. The participant nodded their head back and forth while quietly under breath stating, &#x201C;I see,&#x201D; based on confidential patient information disclosed by the virtual human. Then, they nodded their head and affirmed they did not understand the situation, implying the participant was admitting to making a mistake. The participant was focused on the response from the virtual human and nodded their head in a manner that demonstrated they understood the situation and closed their eyes as if to sympathize with the patient. As the virtual human continued to share information, the participant nodded their head back and forth in agreement. The participant frowned and pressed their lips to the corner of their mouth to show signs of sympathy and disbelief. The participant nodded their head forward and backward to show that they understood what was happening based on the information shared with them about the patient by the virtual human. The participant&#x2019;s nonverbal behaviors aligned with their agreement to the virtual human&#x2019;s comment on the importance of working as a team in health care with the patient and doctors.</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>2:24-2:43</td><td align="left" valign="top">Virtual human talk time and eye gaze toward virtual human</td><td align="left" valign="top">2:43-2:44</td><td align="left" valign="top">Nodding head forward and backward</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>2:45-2:55</td><td align="left" valign="top">Learner talk time and eye gaze toward virtual human</td><td align="left" valign="top">2:46-2:47</td><td align="left" valign="top">Nodding head side-to-side</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>2:52- 3:22</td><td align="left" valign="top">Virtual human talk time and eye gaze toward virtual human</td><td align="left" valign="top">3:01-3:23</td><td align="left" valign="top">Nodding heading, nodding head front to back, frowns, and presses lips to the corner of mouth</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>3:24-3:46</td><td align="left" valign="top">Virtual human talk time and eye gaze toward virtual human</td><td align="left" valign="top">3:24- 3:29</td><td align="left" valign="top">Nodding head forward and backward</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>3:47-3:51</td><td align="left" valign="top">Eye gaze toward virtual human and learner talk time</td><td align="left" valign="top">3:45-3:48</td><td align="left" valign="top">Nodding head forward and backward and inconsistent behavior</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>3:57-3:59</td><td align="left" valign="top">Eye gaze toward virtual human</td><td align="left" valign="top">3:57-3:58</td><td align="left" valign="top">Nodding head forward and backward</td></tr></tbody></table></table-wrap><table-wrap id="t4" position="float"><label>Table 4.</label><caption><p>Frequencies of quantitative nonverbal codes expressed by participants.</p></caption><table id="table4" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Quantitative code</td><td align="left" valign="bottom" colspan="2">Frequency, n</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Participant ID 047</td><td align="left" valign="top">Participant ID 057</td></tr></thead><tbody><tr><td align="left" valign="top">Forward lean</td><td align="char" char="." valign="top">4</td><td align="char" char="." valign="top">0</td></tr><tr><td align="left" valign="top">Learner talk time</td><td align="char" char="." valign="top">3</td><td align="char" char="." valign="top">3</td></tr><tr><td align="left" valign="top">Eye gaze toward virtual human</td><td align="char" char="." valign="top">1</td><td align="char" char="." valign="top">7</td></tr><tr><td align="left" valign="top">Virtual human talk time</td><td align="char" char="." valign="top">0</td><td align="char" char="." valign="top">3</td></tr></tbody></table></table-wrap></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>The findings from this study demonstrate that the frequency of nonverbal behaviors increased after repeated scenarios between the first and second interactions. Specifically, all nonverbal behaviors emphasized by the MPathic-VR, including eyebrow raise, nodding, and smiling, increased from the first to second interactions. The average duration (in seconds) of nonverbal behaviors, including eye gaze toward virtual human, forward lean, and head tilting, also increased from the first to second interactions. Overall, these results provide some evidence of the validity of our coding approach, given the behaviors expected to increase did increase based on the nonverbal coding.</p><p>These results are particularly promising regarding patient-physician interactions. Physicians who display empathic nonverbal behaviors increase patients&#x2019; perceptions of physician empathy, warmth, and competence [<xref ref-type="bibr" rid="ref26">26</xref>]. Furthermore, nonverbal behaviors such as patient-centered gaze and body orientation positively affect a patient&#x2019;s perceived level of physician empathy [<xref ref-type="bibr" rid="ref27">27</xref>]. Therefore, these findings hold clinical implications for training effective nonverbal communication skills among medical students and professionals in clinical practice. Furthermore, these results are supported in the literature demonstrating that students who interacted with MPathic-VR and received feedback from a simulated scenario improved their communication scores, particularly nonverbal communication [<xref ref-type="bibr" rid="ref20">20</xref>]. The findings from this study used the same virtual human, the MPathic-VR, and reported the benefits of using virtual humans as it allowed for a standardized method for comparison across participants [<xref ref-type="bibr" rid="ref20">20</xref>]. Further research is needed to ensure the findings from a virtual human are replicated in real-world settings.</p><p>Through this formative study, we highlight the role of using video recordings and mixed methods analysis to obtain a more comprehensive understanding of how learners improve their nonverbal communication through virtual human video interactions. Using video rather than photos has been shown to be a more ecologically robust method for analyzing nonverbal communication [<xref ref-type="bibr" rid="ref26">26</xref>]. From a mixed methods analysis perspective, this study sheds light on a novel approach by coding quantitative and qualitative nonverbal data to generate a more holistic understanding with descriptions and context for each behavior. By analyzing and integrating quantitative and qualitative coding of nonverbal behaviors, we generated meta-inferences that illuminated more nuance of each nonverbal behavior displayed by 2 participants from the sample. As such, one of the contributions of this study was the development of a step-by-step approach elucidating an integrated coding system for mixed methods analysis of nonverbal communication and strategies to guide researchers in analyzing nonverbal communication data. Below we provide a step-by-step approach for an integrated coding system:</p><p>First, determine the duration of the coded segment: The first step is to identify the duration of the video that will be coded. As noted, we used the thin slice method, but if there is a rationale for a longer duration, it is important to justify the length of time that will be coded. It is also critical to identify whether coding will occur toward the beginning, middle, or end of the video, explicitly outlining the start and end times for each clip.</p><p>Second, developing a codebook of nonverbal behaviors: Afterwards, it is important to identify the quantitative and qualitative nonverbal codes that will be incorporated into the codebook. These codes may be informed by literature, theory, or the study. Quantitative codes involve using frequencies or counts to document nonverbal behaviors such as eye gaze, head tilt, and smile. Qualitative codes can provide further details of each behavior. For example, if a qualitative code is based on positive or negative intonation, researchers may expand on this code by describing whether participants appeared uninterested during the interaction, the context of the discussion, and the point at which the participant starts to show signs of disinterest. When developing a codebook, it is encouraged to collaborate with all members of the research team who will be engaging in the coding and data analysis phase to ensure consensus on the codebook, determine distinctions between quantitative and qualitative nonverbal codes, and ensure each code is operationalized to increase reliability among coders.</p><p>Third, the coding process: Determine how many individuals from the research team will be involved in the coding process, how many video clips each individual on the research team will code, the number of times individuals will meet to discuss codes, and strategies to ensure the validity and reliability of the data throughout the coding process. The team should also decide whether software such as MAXQDA, NVivo (QSR International), or any other qualitative software programs will be used to code video data or whether other alternatives, such as Microsoft Excel, will be used to organize coded information.</p><p>Fourth, integrating quantitative and qualitative coded data of nonverbal behaviors: Once all videos have been coded quantitatively and qualitatively, researchers will be tasked with determining the most appropriate ways to integrate the data. Given that nonverbal communication data can be analyzed quantitatively and qualitatively, we encourage researchers to use mixed methods analysis approaches [<xref ref-type="bibr" rid="ref1">1</xref>]. When using mixed methods analysis to analyze nonverbal data, consider the purpose of conducting a mixed methods analysis. These can include but are not limited to reducing data, using a joint display, data transformation, correlating data, merging verbal with nonverbal data to generate new codes or variables, comparing data, confirming data, or using findings based on the qualitative coded data of nonverbal behavior to inform the quantitative analysis [<xref ref-type="bibr" rid="ref1">1</xref>]. Determining the reason for conducting a mixed methods analysis of nonverbal data will depend upon the research questions and the study&#x2019;s objectives. Thus, researchers are encouraged to revisit their research questions and aims frequently to determine the most adequate approach for the mixed methods analysis of nonverbal data.</p><p>Fifth, presenting a visual display and developing meta-inferences: To visually depict the integration of quantitative and qualitative coding of nonverbal data and findings, a joint display can be used to represent this information and aid in generating meta-inferences. A joint display can be used to integrate qualitative and quantitative data, methods, or results using a visual representation [<xref ref-type="bibr" rid="ref25">25</xref>]. To identify meta-inferences, a researcher can match verbal and nonverbal domains to get a better understanding of the phenomenon. For example, if integrating the quantitative and qualitative data using matching, a researcher may code the type of verbal interactions and the context of verbal responses with the corresponding nonverbal behaviors to identify the meta-inferences. When developing meta-inferences, the context of the interaction will be particularly helpful. Analysts familiar with the coding scheme should work together to develop these meta-inferences. As a method of member check, researchers may share the developed meta-inferences with participants to ensure the validity of the findings. This will allow for participants to provide feedback, revisions, and additions to the proposed meta-inferences. We encourage researchers to use creativity to explore the best strategies to visually represent the data and findings, reinforcing thoughtful consideration of the generated meta-inferences.</p></sec><sec id="s4-2"><title>Limitations</title><p>This study has several limitations. First, the nonverbal analysis is based on a small, formative probabilistic sample of only 32 individuals. Results are not likely generalizable, as the goal was to develop methods of nonverbal analysis. A second limitation of this study is that data in the joint display are only displayed for 2 participants. However, we purposefully selected these participants to demonstrate how data can be integrated and analyzed using mixed methods research. Future research can incorporate more participant examples in a joint display to further highlight nuances of integration and analysis of quantitative and qualitative coding of nonverbal behaviors. Furthermore, we present and code several nonverbal behaviors, but we also recognize that this is not an exhaustive list of all nonverbal behaviors present during a patient-physician interaction. In addition, although we provided a codebook and defined each nonverbal behavior, there may be issues of coder drift. However, we aimed to capture relevant nonverbal behaviors in healthcare that appear related to empathic communication and explained these behaviors in the codebook to ensure behaviors were coded consistently. Yet, linking specific behaviors to emotions or underlying intent is tenuous and beyond the scope of this formative study. We encourage researchers to expand on these nonverbal codes by examining the literature in their respective fields to determine additional codes that can be incorporated.</p></sec><sec id="s4-3"><title>Conclusions</title><p>This study aimed to develop and pilot a mixed methods coding system to analyze verbal and nonverbal behaviors using video data to obtain a more comprehensive understanding of nonverbal behaviors. Although nonverbal communication may contradict verbal communication, being cognizant of nonverbal behaviors can help physicians better express their nonverbal communication responses [<xref ref-type="bibr" rid="ref4">4</xref>]. As a result, enhancing nonverbal communication skills in health settings can help to build trust, foster healing, and improve health outcomes [<xref ref-type="bibr" rid="ref4">4</xref>]. Furthermore, we demonstrated how using a mixed methods analysis approach to code nonverbal communication can be particularly beneficial when analyzing video data of patient-physician communication to expand on both verbal and nonverbal behaviors exhibited by each individual. Using mixed methods analysis, we developed a novel approach to analyzing nonverbal communication that builds on existing literature by providing a formative demonstration and proposing an integrated coding system. We aim for this integrated coding system to shed light for researchers analyzing nonverbal communication data across various subdisciplines within health sciences research. We encourage researchers to investigate creative paths for analyzing nonverbal communication data using video recordings through a mixed methods analysis approach that advances discussions on enhancing the patient-physician dyad across multiple clinical settings.</p></sec></sec></body><back><notes><sec><title>Data Availability</title><p>The datasets generated or analyzed during this study are not publicly available due to the protection of participants, given that the data are in video format.</p></sec></notes><fn-group><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">FACS</term><def><p>Facial Affect Coding System</p></def></def-item><def-item><term id="abb2">RIAS</term><def><p>Rotor Interaction Analysis System</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Onwuegbuzie</surname><given-names>AJ</given-names> </name><name name-style="western"><surname>Abrams</surname><given-names>SS</given-names> </name></person-group><article-title>Nonverbal communication analysis as mixed analysis</article-title><source>The Routledge Reviewer&#x2019;s Guide to Mixed Methods Analysis</source><year>2021</year><publisher-name>Routledge</publisher-name><pub-id pub-id-type="doi">10.4324/9780203729434</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Purnell</surname><given-names>L</given-names> </name></person-group><person-group person-group-type="editor"><name name-style="western"><surname>Marty</surname><given-names>DM</given-names> </name><name name-style="western"><surname>Pacquiao</surname><given-names>D</given-names> </name><name name-style="western"><surname>Purnell</surname><given-names>L</given-names> </name></person-group><article-title>Cross cultural communication: verbal and non-verbal communication, interpretation and translation</article-title><source>Global Applications of Culturally Competent Health Care: Guidelines for Practice</source><year>2018</year><publisher-name>Springer International Publishing</publisher-name><fpage>132</fpage><lpage>142</lpage><pub-id pub-id-type="doi">10.1007/978-3-319-69332-3_14</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="book"><person-group person-group-type="editor"><name name-style="western"><surname>Hall</surname><given-names>JA</given-names> </name><name name-style="western"><surname>Knapp</surname><given-names>ML</given-names> </name></person-group><source>Nonverbal Communication</source><year>2013</year><publisher-name>De Gruyter, Inc</publisher-name><pub-id pub-id-type="doi">10.1515/9783110238150</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dalonges</surname><given-names>DA</given-names> </name><name name-style="western"><surname>Fried</surname><given-names>JL</given-names> </name></person-group><article-title>Creating immediacy using verbal and nonverbal methods</article-title><source>J Dent Hyg</source><year>2016</year><month>08</month><volume>90</volume><issue>4</issue><fpage>221</fpage><lpage>225</lpage><pub-id pub-id-type="medline">27551142</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mast</surname><given-names>MS</given-names> </name></person-group><article-title>On the importance of nonverbal communication in the physician-patient interaction</article-title><source>Patient Educ Couns</source><year>2007</year><month>08</month><volume>67</volume><issue>3</issue><fpage>315</fpage><lpage>318</lpage><pub-id pub-id-type="doi">10.1016/j.pec.2007.03.005</pub-id><pub-id pub-id-type="medline">17478072</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Robinson</surname><given-names>JD</given-names> </name></person-group><article-title>Nonverbal communication and physician-patient interaction: review and new directions</article-title><source>The Sage Handbook of Nonverbal Communication</source><year>2006</year><publisher-name>Sage Publications, Inc</publisher-name><fpage>437</fpage><lpage>459</lpage><pub-id pub-id-type="doi">10.4135/9781412976152.n23</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Duffy</surname><given-names>FD</given-names> </name><name name-style="western"><surname>Gordon</surname><given-names>GH</given-names> </name><name name-style="western"><surname>Whelan</surname><given-names>G</given-names> </name><etal/></person-group><article-title>Assessing competence in communication and interpersonal skills: the Kalamazoo II report</article-title><source>Acad Med</source><year>2004</year><month>06</month><volume>79</volume><issue>6</issue><fpage>495</fpage><lpage>507</lpage><pub-id pub-id-type="doi">10.1097/00001888-200406000-00002</pub-id><pub-id pub-id-type="medline">15165967</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hillen</surname><given-names>MA</given-names> </name><name name-style="western"><surname>Mitchell</surname><given-names>KAR</given-names> </name><name name-style="western"><surname>Schouten</surname><given-names>BC</given-names> </name><name name-style="western"><surname>Cyrus</surname><given-names>JW</given-names> </name><name name-style="western"><surname>Brown</surname><given-names>RF</given-names> </name><name name-style="western"><surname>Pieterse</surname><given-names>AH</given-names> </name></person-group><article-title>Systematic review of observational coding systems to assess patient-healthcare professional interactions</article-title><source>Patient Educ Couns</source><year>2025</year><month>06</month><volume>135</volume><fpage>108718</fpage><pub-id pub-id-type="doi">10.1016/j.pec.2025.108718</pub-id><pub-id pub-id-type="medline">40037145</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Harrigan</surname><given-names>JA</given-names> </name></person-group><article-title>Methodology: coding and studying nonverbal behavior</article-title><source>Nonverbal Communication Handbooks of Communication Science</source><year>2013</year><publisher-name>De Gruyter Mouton</publisher-name><fpage>35</fpage><lpage>68</lpage><pub-id pub-id-type="doi">10.1515/9783110238150.35</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Ekman</surname><given-names>P</given-names> </name><name name-style="western"><surname>Friesen</surname><given-names>WV</given-names> </name></person-group><source>Facial Action Coding System</source><year>1978</year><publisher-name>Consulting Psychologists Press</publisher-name><pub-id pub-id-type="doi">10.1037/t27734-000</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="book"><person-group person-group-type="editor"><name name-style="western"><surname>Ekman</surname><given-names>P</given-names> </name><name name-style="western"><surname>Rosenberg</surname><given-names>EL</given-names> </name></person-group><source>What the Face Reveals: Basic and Applied Studies of Spontaneous Expression Using the Facial Action Coding System (FACS)</source><year>1997</year><publisher-name>Oxford University Press</publisher-name><pub-id pub-id-type="doi">10.1093/oso/9780195104462.001.0001</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Cohn</surname><given-names>JF</given-names> </name><name name-style="western"><surname>Ambadar</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Ekman</surname><given-names>P</given-names> </name></person-group><article-title>Observer-based measurement of facial expresion with the facial action coding system</article-title><source>Handbook of Emotion Elicitation and Assessment</source><year>2007</year><publisher-name>Oxford University Press</publisher-name><fpage>203</fpage><lpage>221</lpage><pub-id pub-id-type="doi">10.1093/oso/9780195169157.003.0014</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Pennington</surname><given-names>DR</given-names> </name></person-group><article-title>Coding of non-text data</article-title><source>The SAGE Handbook of Social Media Research Methods</source><year>2017</year><fpage>232</fpage><lpage>251</lpage><pub-id pub-id-type="doi">10.4135/9781473983847.n15</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Parameswaran</surname><given-names>UD</given-names> </name><name name-style="western"><surname>Ozawa-Kirk</surname><given-names>JL</given-names> </name><name name-style="western"><surname>Latendresse</surname><given-names>G</given-names> </name></person-group><article-title>To live (code) or to not: a new method for coding in qualitative research</article-title><source>Qual Soc Work</source><year>2020</year><month>07</month><volume>19</volume><issue>4</issue><fpage>630</fpage><lpage>644</lpage><pub-id pub-id-type="doi">10.1177/1473325019840394</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Roter</surname><given-names>D</given-names> </name><name name-style="western"><surname>Larson</surname><given-names>S</given-names> </name></person-group><article-title>The Roter Interaction Analysis System (RIAS): utility and flexibility for analysis of medical interactions</article-title><source>Patient Educ Couns</source><year>2002</year><month>04</month><volume>46</volume><issue>4</issue><fpage>243</fpage><lpage>251</lpage><pub-id pub-id-type="doi">10.1016/s0738-3991(02)00012-5</pub-id><pub-id pub-id-type="medline">11932123</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Creswell</surname><given-names>JW</given-names> </name><name name-style="western"><surname>Plano Clark</surname><given-names>VL</given-names> </name></person-group><source>Designing and Conducting Mixed Methods Research</source><year>2018</year><edition>3</edition><publisher-name>Sage</publisher-name><pub-id pub-id-type="other">1483344371</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sandelowski</surname><given-names>M</given-names> </name><name name-style="western"><surname>Voils</surname><given-names>CI</given-names> </name><name name-style="western"><surname>Knafl</surname><given-names>G</given-names> </name></person-group><article-title>On quantitizing</article-title><source>J Mix Methods Res</source><year>2009</year><month>07</month><day>1</day><volume>3</volume><issue>3</issue><fpage>208</fpage><lpage>222</lpage><pub-id pub-id-type="doi">10.1177/1558689809334210</pub-id><pub-id pub-id-type="medline">19865603</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Onwuegbuzie</surname><given-names>AJ</given-names> </name><name name-style="western"><surname>Leech</surname><given-names>NL</given-names> </name></person-group><article-title>On qualitizing</article-title><source>Int J Multiple Res Approaches</source><year>2019</year><month>08</month><day>31</day><volume>11</volume><issue>2</issue><fpage>98</fpage><lpage>131</lpage><pub-id pub-id-type="doi">10.29034/ijmra.v11n2editorial2</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Salda&#x00F1;a</surname><given-names>J</given-names> </name></person-group><source>The Coding Manual for Qualitative Researchers</source><year>2021</year><edition>4</edition><publisher-name>SAGE</publisher-name><pub-id pub-id-type="other">1529731747</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kron</surname><given-names>FW</given-names> </name><name name-style="western"><surname>Fetters</surname><given-names>MD</given-names> </name><name name-style="western"><surname>Scerbo</surname><given-names>MW</given-names> </name><etal/></person-group><article-title>Using a computer simulation for teaching communication skills: a blinded multisite mixed methods randomized controlled trial</article-title><source>Patient Educ Couns</source><year>2017</year><month>04</month><volume>100</volume><issue>4</issue><fpage>748</fpage><lpage>759</lpage><pub-id pub-id-type="doi">10.1016/j.pec.2016.10.024</pub-id><pub-id pub-id-type="medline">27939846</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ambady</surname><given-names>N</given-names> </name><name name-style="western"><surname>Rosenthal</surname><given-names>R</given-names> </name></person-group><article-title>Thin slices of expressive behavior as predictors of interpersonal consequences: a meta-analysis</article-title><source>Psychol Bull</source><year>1992</year><volume>111</volume><issue>2</issue><fpage>256</fpage><lpage>274</lpage><pub-id pub-id-type="doi">10.1037/0033-2909.111.2.256</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Perez</surname><given-names>A</given-names> </name><name name-style="western"><surname>Fetters</surname><given-names>MD</given-names> </name><name name-style="western"><surname>Creswell</surname><given-names>JW</given-names> </name><etal/></person-group><article-title>Enhancing nonverbal communication through virtual human technology: protocol for a mixed methods study</article-title><source>JMIR Res Protoc</source><year>2023</year><month>06</month><day>6</day><volume>12</volume><issue>1</issue><fpage>e46601</fpage><pub-id pub-id-type="doi">10.2196/46601</pub-id><pub-id pub-id-type="medline">37279041</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Goetz</surname><given-names>JL</given-names> </name><name name-style="western"><surname>Keltner</surname><given-names>D</given-names> </name><name name-style="western"><surname>Simon-Thomas</surname><given-names>E</given-names> </name></person-group><article-title>Compassion: an evolutionary analysis and empirical review</article-title><source>Psychol Bull</source><year>2010</year><month>05</month><volume>136</volume><issue>3</issue><fpage>351</fpage><lpage>374</lpage><pub-id pub-id-type="doi">10.1037/a0018807</pub-id><pub-id pub-id-type="medline">20438142</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="web"><article-title>VERBI software</article-title><source>MAXQDA 2022</source><year>2021</year><access-date>2025-06-27</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.maxqda.com/">https://www.maxqda.com/</ext-link></comment></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Guetterman</surname><given-names>TC</given-names> </name><name name-style="western"><surname>F&#x00E0;bregues</surname><given-names>S</given-names> </name><name name-style="western"><surname>Sakakibara</surname><given-names>R</given-names> </name></person-group><article-title>Visuals in joint displays to represent integration in mixed methods research: a methodological review</article-title><source>Methods Psychol</source><year>2021</year><month>12</month><volume>5</volume><fpage>100080</fpage><pub-id pub-id-type="doi">10.1016/j.metip.2021.100080</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kraft-Todd</surname><given-names>GT</given-names> </name><name name-style="western"><surname>Reinero</surname><given-names>DA</given-names> </name><name name-style="western"><surname>Kelley</surname><given-names>JM</given-names> </name><name name-style="western"><surname>Heberlein</surname><given-names>AS</given-names> </name><name name-style="western"><surname>Baer</surname><given-names>L</given-names> </name><name name-style="western"><surname>Riess</surname><given-names>H</given-names> </name></person-group><article-title>Empathic nonverbal behavior increases ratings of both warmth and competence in a medical context</article-title><source>PLoS ONE</source><year>2017</year><volume>12</volume><issue>5</issue><fpage>e0177758</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0177758</pub-id><pub-id pub-id-type="medline">28505180</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Brugel</surname><given-names>S</given-names> </name><name name-style="western"><surname>Postma-Nilsenov&#x00E1;</surname><given-names>M</given-names> </name><name name-style="western"><surname>Tates</surname><given-names>K</given-names> </name></person-group><article-title>The link between perception of clinical empathy and nonverbal behavior: the effect of a doctor&#x2019;s gaze and body orientation</article-title><source>Patient Educ Couns</source><year>2015</year><month>10</month><volume>98</volume><issue>10</issue><fpage>1260</fpage><lpage>1265</lpage><pub-id pub-id-type="doi">10.1016/j.pec.2015.08.007</pub-id><pub-id pub-id-type="medline">26320820</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Codebook excerpt derived from Facial Affect Coding System (FACS) and inductive qualitative coding.</p><media xlink:href="formative_v9i1e59328_app1.docx" xlink:title="DOCX File, 18 KB"/></supplementary-material></app-group></back></article>