<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Form Res</journal-id><journal-id journal-id-type="publisher-id">formative</journal-id><journal-id journal-id-type="index">27</journal-id><journal-title>JMIR Formative Research</journal-title><abbrev-journal-title>JMIR Form Res</abbrev-journal-title><issn pub-type="epub">2561-326X</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v10i1e87054</article-id><article-id pub-id-type="doi">10.2196/87054</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>A Conversational Platform (Okaya) for Multimodal Digital Biomarkers of Fatigue, Cognition, and Mental Health: Feasibility Observational Study</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>So</surname><given-names>Matthew</given-names></name><degrees>BS, MS</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Sobolev</surname><given-names>Michael</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Menvielle</surname><given-names>Gregory</given-names></name><degrees>AA, BA, MA</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib></contrib-group><aff id="aff1"><institution>Okaya</institution><addr-line>540 Mohawk Drive</addr-line><addr-line>Boulder</addr-line><addr-line>CO</addr-line><country>United States</country></aff><aff id="aff2"><institution>Cornell Tech</institution><addr-line>New York</addr-line><addr-line>NY</addr-line><country>United States</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Schwartz</surname><given-names>Amy</given-names></name></contrib><contrib contrib-type="editor"><name name-style="western"><surname>Balcarras</surname><given-names>Matthew</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Rodriguez-Conde</surname><given-names>Ivan</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Matthew So, BS, MS, Okaya, 540 Mohawk Drive, Boulder, CO, 80303, United States, 1 9259841028; <email>matthew@okaya.me</email></corresp></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>1</day><month>4</month><year>2026</year></pub-date><volume>10</volume><elocation-id>e87054</elocation-id><history><date date-type="received"><day>03</day><month>11</month><year>2025</year></date><date date-type="rev-recd"><day>27</day><month>02</month><year>2026</year></date><date date-type="accepted"><day>27</day><month>02</month><year>2026</year></date></history><copyright-statement>&#x00A9; Matthew So, Michael Sobolev, Gregory Menvielle. Originally published in JMIR Formative Research (<ext-link ext-link-type="uri" xlink:href="https://formative.jmir.org">https://formative.jmir.org</ext-link>), 1.4.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Formative Research, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://formative.jmir.org">https://formative.jmir.org</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://formative.jmir.org/2026/1/e87054"/><abstract><sec><title>Background</title><p>Collection of multimodal data (video, audio, and text) can yield digital biomarkers relevant to mental health, fatigue, and cognition. However, the feasibility and signal characteristics in operational populations remain underexplored.</p></sec><sec><title>Objective</title><p>The objectives of this study were to (1) extract an evidence-based library of vision, speech, and language features; (2) assess the feasibility of a fully remote conversational platform (Okaya) for collecting analyzable multimodal data; and (3) conduct preliminary signal checks for depression, fatigue, and cognition.</p></sec><sec sec-type="methods"><title>Methods</title><p>Participants were recruited from the US Air Force and US Space Force. All participants completed the Okaya check-in, which included a voice conversation with a large language model. A total of 66 visual, acoustic, and text features were extracted from each interaction between the participant and the large language model. For validation purposes, the study also collected measures of depression (Patient Health Questionnaire&#x2013;9), fatigue (Cancer Fatigue Scale), and cognition (trail making test). We evaluated the feasibility of the platform and correlation between the extracted features and the validated assessments.</p></sec><sec sec-type="results"><title>Results</title><p>A total of 8 unique participants contributed with 62 sessions over a period from March 6, 2025, to August 6, 2025. The platform was deemed feasible as 6 of the 8 participants opted to complete more than one session, and the 3 participants who provided feedback reported high overall experience and usability. From the data perspective, preliminary correlations produced significant results for multiple potential digital biomarkers, including (1) pitch (<italic>P</italic>=.047), volume SD (<italic>P</italic>=.04), volume slope (<italic>P</italic>=.04), automated readability index complexity (<italic>P</italic>=.047), Flesch-Kincaid complexity (<italic>P</italic>=.04), and Gunning Fog complexity (<italic>P</italic>=.04) for depression; (2) pitch (<italic>P</italic>=.009), volume SD (<italic>P</italic>=.007), volume slope (<italic>P</italic>=.02), average F2 formant frequency (<italic>P</italic>=.03), Gunning Fog complexity (<italic>P</italic>=.049), and eyelid droop (<italic>P</italic>=.047) for fatigue; and (3) shimmer (<italic>P</italic>=.03) for cognition. We also observed how features varied over time among participants with multiple sessions.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>The conversational and artificial intelligence&#x2013;enabled platform was feasible among an operational sample as a method to collect multimodal data correlated with depression, fatigue, and cognition. These results align with those for previously discovered digital biomarkers of mental health, fatigue, and cognition and inform the development of personalized models for each user while detecting anomalies in a remote monitoring setting.</p></sec></abstract><kwd-group><kwd>digital biomarkers</kwd><kwd>multimodal sensing</kwd><kwd>computer vision</kwd><kwd>speech acoustics</kwd><kwd>natural language processing</kwd><kwd>artificial intelligence</kwd><kwd>AI</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Mental health, fatigue, and cognitive functioning are critical to overall well-being and operational performance, yet their assessment often relies on intermittent self-report or clinician-administered instruments. Such methods, while clinically validated, can be burdensome, subjective, and limited in their ability to capture dynamic changes over time [<xref ref-type="bibr" rid="ref1">1</xref>-<xref ref-type="bibr" rid="ref9">9</xref>]. Mobile health technology provides an alternative solution due to the ability of apps and sensors to collect high-fidelity and high-frequency data pertaining to activity, behavior, symptoms, and cognition [<xref ref-type="bibr" rid="ref10">10</xref>-<xref ref-type="bibr" rid="ref12">12</xref>]. With increasing demands for continuous and remote monitoring&#x2014;especially in occupational and high-performance settings&#x2014;digital biomarkers have emerged as a promising avenue for objective and scalable assessment of psychological and cognitive states [<xref ref-type="bibr" rid="ref13">13</xref>-<xref ref-type="bibr" rid="ref16">16</xref>].</p><p>Recent advances in computer vision, speech acoustics, and natural language processing have enabled the extraction of behavioral and physiological signals from everyday digital interactions. Multimodal sensing that integrates video, audio, and language data provides an opportunity to quantify indicators of affect, fatigue, and cognitive load with greater ecological validity than traditional laboratory tests [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref17">17</xref>]. Prior research has demonstrated associations between acoustic features (eg, pitch variation [<xref ref-type="bibr" rid="ref18">18</xref>-<xref ref-type="bibr" rid="ref23">23</xref>], jitter [<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref24">24</xref>-<xref ref-type="bibr" rid="ref26">26</xref>], and spectral harmonicity [<xref ref-type="bibr" rid="ref25">25</xref>-<xref ref-type="bibr" rid="ref28">28</xref>]), facial expression dynamics (eg, eye gaze [<xref ref-type="bibr" rid="ref29">29</xref>-<xref ref-type="bibr" rid="ref32">32</xref>] and microexpressions [<xref ref-type="bibr" rid="ref33">33</xref>]), and linguistic markers (eg, sentiment [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref35">35</xref>], complexity [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref36">36</xref>], and emotional valence [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref37">37</xref>-<xref ref-type="bibr" rid="ref41">41</xref>]) with symptoms of depression, stress, and cognitive decline. However, most existing work has been conducted in tightly controlled environments, and less is known about the feasibility of collecting such data in naturalistic or operational contexts [<xref ref-type="bibr" rid="ref42">42</xref>].</p><p>Conversational platforms, especially those powered by large language models, introduce new opportunities for naturalistic data capture [<xref ref-type="bibr" rid="ref43">43</xref>-<xref ref-type="bibr" rid="ref46">46</xref>]. By engaging users through dialogue, these systems can simultaneously elicit and record multimodal signals&#x2014;speech, facial behavior, and language&#x2014;while maintaining a familiar and low-burden interaction format [<xref ref-type="bibr" rid="ref47">47</xref>-<xref ref-type="bibr" rid="ref52">52</xref>]. Such interfaces align with the increasing integration of conversational artificial intelligence (AI) into health, wellness, and performance domains, offering a pathway toward longitudinal, user-centered monitoring of mental and cognitive health. Nevertheless, empirical evaluation of these systems&#x2019; feasibility and usability remains limited, particularly in real-world populations such as operational and occupational groups [<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref54">54</xref>]. These groups can also be more susceptible to experiencing mental and cognitive health challenges due to their job functions [<xref ref-type="bibr" rid="ref55">55</xref>,<xref ref-type="bibr" rid="ref56">56</xref>].</p><p>The objective of this study was to evaluate the feasibility of a fully remote conversational platform, Okaya, for collecting analyzable multimodal data (video, audio, and text) in an operational population from the US Air Force (USAF) and Space Force. Specifically, we sought to determine whether participants would engage with the platform across multiple sessions and whether the system could reliably extract a comprehensive set of visual, acoustic, and linguistic features suitable for future digital biomarker modeling of depression, fatigue, and cognition. Finally, we present a case study for the design of a risk score based on a model of extracted digital biomarkers to illustrate the future potential of the platform.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Study Population</title><p>This study was part of an existing Small Business Innovation Research Direct to Phase II contract with the USAF, and a pool of potential participants from the 71st Special Operations Squadron, provided by the USAF, was introduced to the Okaya platform as part of a pilot program. Participation was entirely optional. A total of 8 unique users participated in the study, contributing 62 sessions (median 2.5, range 1-38, IQR 1.5-7.5). Participants provided data during the period from March 6, 2025, to August 6, 2025.</p></sec><sec id="s2-2"><title>Ethical Considerations</title><p>Ethics approval for this study was obtained from Sterling Institutional Review Board (approval 12300-GMenvielle), and all participants signed an informed consent statement that specified anonymized access to research records, potential risks, and voluntary participation. No data were collected except for email addresses, which were required to access the Okaya platform. No demographic or sample composition information was collected due to requirements from the USAF. Participants received no compensation for taking part.</p></sec><sec id="s2-3"><title>Procedure and Data Collection</title><sec id="s2-3-1"><title>Overview</title><p>Completion of Okaya check-ins [<xref ref-type="bibr" rid="ref57">57</xref>] was entirely voluntary. Data collection took place on the Okaya website, and users were allowed to select the device they preferred to access the website with, such as smartphones, tablets, or laptops. The browser used to access the website was also free to select by the user. Once users had successfully acknowledged the consent statement and registered for an Okaya account, they could complete a workflow as described in <xref ref-type="fig" rid="figure1">Figure 1</xref>. Participants were asked to complete 3 assessments: the Patient Health Questionnaire&#x2013;9 (PHQ-9) [<xref ref-type="bibr" rid="ref58">58</xref>], Cancer Fatigue Scale (CFS) [<xref ref-type="bibr" rid="ref59">59</xref>], and trail making test (TMT) [<xref ref-type="bibr" rid="ref60">60</xref>].</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Illustration of the Okaya platform and data collection. The 6 steps for a successful sample collection include log-in, device checks, Patient Health Questionnaire&#x2013;9 (PHQ-9), Cancer Fatigue Scale (CFS), trail making test (TMT), and Okaya check-in.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v10i1e87054_fig01.png"/></fig><p>Upon successful completion of all 3 assessments, users were presented with a text and voice prompt from a large language model (using OpenAI&#x2019;s GPT-4o-mini model with a custom system prompt, referred to as &#x201C;Sanora&#x201D;), shown in <xref ref-type="fig" rid="figure2">Figure 2</xref>. Users were first required to meet the device and browser requirements, grant permission to access the microphone and camera, and meet an upload speed requirement of 2 Mb/s. Video was recorded continuously, and users could choose between webcam video, blurred video, a landmark representation of their face, an outline of their face, or a set of axes denoting the location of their face, designed to accommodate users with differing levels of comfort with being recorded. Although video was shown on the interface, raw video was not sent to our servers; instead, a set of facial landmarks was extracted via Google MediaPipe. These landmarks were automatically extracted using a proprietary machine learning model, and 478 landmarks were output, each with an x, y, and z coordinate, as well as 52 &#x201C;blendshapes,&#x201D; each representing various facial expressions. The check-in interface, as shown in <xref ref-type="fig" rid="figure2">Figure 2</xref>, included the text of Sanora&#x2019;s prompt at the top; the representation of the user&#x2019;s webcam video at the center; and a control button, either &#x201C;Record,&#x201D; &#x201C;Stop,&#x201D; or &#x201C;Continue,&#x201D; at the bottom of the screen.</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Screenshots of the Okaya check-in. Permission was obtained from the user depicted.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v10i1e87054_fig02.png"/></fig><p>After passing device checks and clicking &#x201C;I&#x2019;m ready to check-in,&#x201D; Sanora would generate a prompt for the user to respond to asking about their general mental well-being, emotions, or experiences they had had recently. Upon clicking &#x201C;Record&#x201D; to record a response to Sanora, the user&#x2019;s microphone started recording, and an automatic transcription service from Amazon Transcribe started transcribing their speech until the user clicked &#x201C;Stop&#x201D; and stopped recording. Both the audio recording and transcribed text were sent back to the Okaya servers. Sanora then used the transcribed text to generate a response. This process continued for 4 turns (5 messages from Sanora and 4 responses from the participant). Upon conversation completion, the user clicked &#x201C;Continue&#x201D; to submit the session for analysis, after which a completion screen would indicate that the results were being processed. They would then be redirected to the research dashboard.</p><p>All back-end processing took place on Amazon Web Services infrastructure, with communication between the website (front end) and back end occurring via Representational State Transfer application programming interface calls. Data were also stored on Amazon Web Services infrastructure.</p></sec><sec id="s2-3-2"><title>Feature Extraction</title><p>From the landmarks, audio, and transcribed text, we extracted 66 features based on the literature. A full list and descriptions of each implemented feature can be found in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>, whereas summaries are shown in <xref ref-type="table" rid="table1">Tables 1</xref><xref ref-type="table" rid="table2"/>-<xref ref-type="table" rid="table3">3</xref>.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Mapping features for depression.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Feature</td><td align="left" valign="bottom">Theoretical reasoning</td><td align="left" valign="bottom">References</td><td align="left" valign="bottom">Implemented features</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="4">Visual features</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Flat affect</td><td align="left" valign="top">A predominant lack of facial expressiveness can be observed in some individuals with depression. This might manifest as a &#x201C;blank&#x201D; or &#x201C;emotionless&#x201D; appearance even in situations that would typically provoke an emotional response.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref33">33</xref>]</td><td align="left" valign="top">Affect_measure</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Facial expression</td><td align="left" valign="top">&#x201C;Sad&#x201D; facial expressions, such as less frequent smiling, have been associated with the severity of depression.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref61">61</xref>-<xref ref-type="bibr" rid="ref63">63</xref>]</td><td align="left" valign="top">Mouth_curvature and eyebrow_droop</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Reduced eye contact</td><td align="left" valign="top">People with depression might avoid making eye contact, which can be a sign of feelings of worthlessness or guilt.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref29">29</xref>-<xref ref-type="bibr" rid="ref32">32</xref>]</td><td align="left" valign="top">Gaze_down_dist, gaze_x_dist, and gaze_y_dist</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Slow movements</td><td align="left" valign="top">Psychomotor retardation can manifest as slowed facial movements or reactions in those with depression.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref61">61</xref>]</td><td align="left" valign="top">Movement_speech_measure</td></tr><tr><td align="left" valign="top" colspan="4">Audio features</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Monotone speech</td><td align="left" valign="top">Individuals with depression might exhibit a lack of variability in pitch, leading to speech that sounds monotonous.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref18">18</xref>-<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref27">27</xref>]</td><td align="left" valign="top">Pitch and pitch_std</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Reduced speech volume</td><td align="left" valign="top">Speaking more softly or with less projection than usual can be indicative of depression.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref64">64</xref>-<xref ref-type="bibr" rid="ref67">67</xref>]</td><td align="left" valign="top">Vol, vol_std, and ealvi</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Reduced vocal prosody</td><td align="left" valign="top">A decrease in the rhythmic and melodic aspect of speech can be indicative of depression.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref18">18</xref>-<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref64">64</xref>,<xref ref-type="bibr" rid="ref68">68</xref>]</td><td align="left" valign="top">Jitter, shimmer, timbre, and formant</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Long pauses</td><td align="left" valign="top">Depression might result in more increased pauses between words or sentences, reflecting hesitancy, slowed thinking, or difficulty in organizing thoughts.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref68">68</xref>,<xref ref-type="bibr" rid="ref69">69</xref>]</td><td align="left" valign="top">Audio_pauses</td></tr><tr><td align="left" valign="top" colspan="4">Transcript features</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Long pauses</td><td align="left" valign="top">Depression might result in more increased pauses between words or sentences, reflecting hesitancy, slowed thinking, or difficulty in organizing thoughts.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref68">68</xref>,<xref ref-type="bibr" rid="ref69">69</xref>]</td><td align="left" valign="top">Transcript_pauses</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Slow speech rate</td><td align="left" valign="top">People with depression might speak more slowly, potentially reflecting slowed cognitive processing.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref68">68</xref>-<xref ref-type="bibr" rid="ref71">71</xref>]</td><td align="left" valign="top">Words_per_s</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Reduced responsiveness</td><td align="left" valign="top">A person with depression might be less verbally responsive in conversations, potentially taking longer to reply or offering shorter answers.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref69">69</xref>,<xref ref-type="bibr" rid="ref70">70</xref>]</td><td align="left" valign="top">Response_latency and transcript_len</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Decreased complexity and length of speech</td><td align="left" valign="top">Speech in individuals with depression might be less complex in terms of vocabulary and sentence structure, and individuals might be less talkative overall.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref34">34</xref>-<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref67">67</xref>]</td><td align="left" valign="top">Complexity, emotion_keyword_prop, and sentiment_score</td></tr></tbody></table></table-wrap><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Mapping features for fatigue.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Feature</td><td align="left" valign="bottom">Theoretical reasoning</td><td align="left" valign="bottom">References</td><td align="left" valign="bottom">Implemented features</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="4">Visual features</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Droopy eyelids</td><td align="left" valign="top">The muscles around the eyes may begin to sag due to tiredness, causing the eyelids to droop.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref72">72</xref>]</td><td align="left" valign="top">Eyelid_droop</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Decreased blink rate</td><td align="left" valign="top">Fatigue can lead to a reduced rate of blinking.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref73">73</xref>-<xref ref-type="bibr" rid="ref77">77</xref>]</td><td align="left" valign="top">Blinks_per_s and blink_len</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Yawning</td><td align="left" valign="top">While yawning is a natural behavior, frequent yawning can be an overt sign of fatigue or drowsiness.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref76">76</xref>,<xref ref-type="bibr" rid="ref78">78</xref>]</td><td align="left" valign="top">Yawns_per_s and yawn_len</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Reduced expressiveness</td><td align="left" valign="top">Fatigue might cause an individual to have fewer facial movements or expressions.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref72">72</xref>]</td><td align="left" valign="top">Mouth_curvature</td></tr><tr><td align="left" valign="top" colspan="4">Audio features</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Decreased volume</td><td align="left" valign="top">A tired individual might speak more softly or with less energy than when they are well rested.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref79">79</xref>]</td><td align="left" valign="top">Vol and vol_std</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Decreased pitch variability</td><td align="left" valign="top">A fatigued voice might sound more monotonous, with less variation in pitch.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref25">25</xref>]</td><td align="left" valign="top">Pitch and pitch_std</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Flatter voice profile</td><td align="left" valign="top">Decreased variation and width of the voice&#x2019;s spectral profile can be indicative of fatigue.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref79">79</xref>]</td><td align="left" valign="top">Timbre and formant</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Increased pauses</td><td align="left" valign="top">There might be more frequent and longer pauses between words or sentences, reflecting slowed cognitive processing or the need to gather thoughts.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref79">79</xref>,<xref ref-type="bibr" rid="ref80">80</xref>]</td><td align="left" valign="top">Audio_pauses</td></tr><tr><td align="left" valign="top" colspan="4">Transcript features</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Increased pauses</td><td align="left" valign="top">There might be more frequent and longer pauses between words or sentences, reflecting slowed cognitive processing or the need to gather thoughts.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref79">79</xref>,<xref ref-type="bibr" rid="ref80">80</xref>]</td><td align="left" valign="top">Transcript_pauses</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Slow speech rate</td><td align="left" valign="top">The overall rate of speech might decrease when a person is tired.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref80">80</xref>]</td><td align="left" valign="top">Words_per_s</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Shortened responses</td><td align="left" valign="top">Fatigued individuals might offer shorter answers or engage less in conversation.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref23">23</xref>]</td><td align="left" valign="top">Transcript_len</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Impaired memory recall</td><td align="left" valign="top">Fatigued individuals might struggle to remember certain words, names, or details, leading to more frequent use of filler words such as &#x201C;um&#x201D; or &#x201C;uh.&#x201D;</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref80">80</xref>]</td><td align="left" valign="top">Hesitations_per_s</td></tr></tbody></table></table-wrap><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Mapping features for attention and cognition.</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Feature</td><td align="left" valign="bottom">Theoretical reasoning</td><td align="left" valign="bottom">References</td><td align="left" valign="bottom">Implemented features</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="4">Visual features</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Decreased blink rate</td><td align="left" valign="top">Blinking might increase when in periods of high attention demand during cognitive tasks.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref81">81</xref>]</td><td align="left" valign="top">Blinks_per_s</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Eye movements</td><td align="left" valign="top">Eye movements outside the line of focus might be reduced when in a period of visual attention.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref82">82</xref>]</td><td align="left" valign="top">Eye_movements_per_s</td></tr><tr><td align="left" valign="top" colspan="4">Audio features</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Volume</td><td align="left" valign="top">Speech volume can indicate differences in arousal and fatigue.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref26">26</xref>]</td><td align="left" valign="top">Vol, vol_std, vol_range, and volume_slope</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Pitch and pitch variability</td><td align="left" valign="top">Pitch might be less varied during periods of low engagement or emotional expressivity.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref83">83</xref>]</td><td align="left" valign="top">Pitch and pitch_std</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Voice timbre</td><td align="left" valign="top">Voice might be breathier or more strained when lacking attention.</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref26">26</xref>]</td><td align="left" valign="top">Jitter, shimmer, and timbre</td></tr><tr><td align="left" valign="top" colspan="4">Transcript features</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Reduced responsiveness</td><td align="left" valign="top">Response latency has been shown to enable the detection of the Stroop effect (delay in reaction time).</td><td align="left" valign="top">[<xref ref-type="bibr" rid="ref84">84</xref>]</td><td align="left" valign="top">Response_latency</td></tr></tbody></table></table-wrap><p>The webcam capture rate varied depending on a number of factors, including the user&#x2019;s device and browser. As this capture rate was not controllable by the Okaya platform, the analysis had to handle landmark frames captured at nonregular times. Generally, the capture rate was 30 to 60 frames per second, but they were usually not captured at a consistent rate. All sessions were interpolated to a capture rate of 30 frames per second, where each time step was a linear interpolation between the 2 neighboring frames. Features were extracted in Python (Python Software Foundation) using the libraries <italic>SciPy</italic> and <italic>NumPy</italic>.</p><p>Similarly, audio capture rate varied per user, so all audio recordings were resampled to a sampling frequency of 22,050 Hz. Features were extracted in Python using the libraries <italic>Librosa</italic>, <italic>NumPy</italic>, <italic>SciPy</italic>, <italic>Parselmouth</italic>, and <italic>Statsmodels</italic>.</p><p>Text features were extracted in Python using the libraries <italic>NumPy</italic>, <italic>Textacy</italic>, and <italic>Natural Language Toolkit</italic> (Team NLTK). The exact implementation of each feature extractor is omitted as the details of the Okaya platform are proprietary.</p><p>Several temporal aggregations were used to calculate feature values: averaged per second, per frame, per response, or per detected event. The exact aggregation and unit used for each feature are listed in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p></sec></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Engagement and Usability Results</title><p>Three participants provided qualitative and survey feedback following completion of their Okaya sessions. Full survey responses can be found in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>. Overall, preliminary user satisfaction with the conversational platform was given a rating of between 4 and 5 out of 5 on overall experience and usability among all participants who provided feedback (3/3). These participants described the platform as intuitive and easy to navigate and reported feeling adequately supported during setup and use. One participant highlighted the responsiveness of the support team and appreciated having direct access to human assistance when needed.</p><p>Motivations for engagement included curiosity about the technology and the opportunity for structured self-reflection during check-ins. One participant noted that the conversations prompted personal insight and self-awareness, describing the process as a valuable moment to &#x201C;intentionally reflect&#x201D; on life aspects discussed during the session. Common barriers to more frequent engagement included time constraints, insufficient or poorly timed reminders, and occasional concerns about privacy when speaking in shared environments. Notably, all participants who completed the survey expressed overall confidence in data privacy and platform security, each giving this aspect a rating of 5 out of 5.</p><p>Suggestions for improvement focused on enhancing convenience and personalization, including text-based reminders with direct log-in links, greater mobile accessibility, and shorter baseline tasks prior to the conversational check-in. Survey participants also expressed interest in receiving personalized summaries or progress visualizations of their responses. All the participants who provided feedback indicated interest in participating in future sessions using the Okaya platform, reinforcing the platform&#x2019;s acceptability and potential for longitudinal engagement within operational populations.</p></sec><sec id="s3-2"><title>Correlation Analysis</title><sec id="s3-2-1"><title>Overview</title><p>Pearson correlations and their associated significance are listed below. Although every participant was included in the dataset and most completed multiple sessions, we only took into account the earliest sample for each participant for the correlation analysis. The earliest sample was used to illustrate the use of an initial sample as a baseline, upon which personalized models can be built, illustrated in the case study below. We also observed low intraparticipant variability for each clinical measure, which hindered the application of more sophisticated statistical models at this stage. For each feature, outliers (defined as any measurement more than 2 SDs from the mean) were removed. The most significant features are plotted against the associated measures in <xref ref-type="fig" rid="figure3">Figure 3</xref>.</p><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Most significant features for each measure and their associated correlation. CFS: Cancer Fatigue Scale; PHQ-9: Patient Health Questionnaire&#x2013;9; TMT: trail making test.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v10i1e87054_fig03.png"/></fig></sec><sec id="s3-2-2"><title>Correlations for Depression</title><p>Exploratory correlation analyses revealed several significant associations between multimodal features and depressive symptom severity as measured using the PHQ-9. Acoustic and linguistic indicators showed the strongest relationships. In this small sample specifically, pitch (<italic>r</italic>=0.71; <italic>P</italic>=.047) and volume slope (<italic>r</italic>=0.78; <italic>P</italic>=.04) were positively correlated with PHQ-9 scores. This suggests preliminary patterns and warrants further exploration into predictive modeling to validate that greater variability and upward trajectories in vocal intensity may correspond to higher self-reported depression levels. In contrast, volume SD was negatively correlated (<italic>r</italic>=&#x2212;0.73; <italic>P</italic>=.04), encouraging a possible exploration into how participants with greater depressive symptoms may exhibit reduced dynamic range in speech.</p><p>Textual complexity metrics&#x2014;including the automated readability index (<italic>r</italic>=&#x2212;0.71; <italic>P</italic>=.047), Flesch-Kincaid complexity (<italic>r</italic>=&#x2212;0.74; <italic>P</italic>=.04), and Gunning Fog index (<italic>r</italic>=&#x2212;0.72; <italic>P</italic>=.04)&#x2014;also demonstrated negative correlations with PHQ-9 scores; thus, the individuals in our preliminary dataset reporting higher depressive symptoms tended to produce shorter or linguistically simpler utterances during conversational interactions. Due to the low sample size, it is impossible to say whether there are any actual meaningful correlations; however, it does warrant further exploration with larger sample sizes and more resilient analyses. Due to our limited sample size, individual correlations are also not expected to be reliable or generalizable at this stage. Together, these relationships support the feasibility of deriving speech- and language-based digital biomarkers of depression from naturalistic conversational data.</p></sec><sec id="s3-2-3"><title>Correlations for Fatigue</title><p>Analysis of correlations between multimodal features and fatigue as measured using the CFS identified several significant acoustic, visual, and linguistic markers. Among acoustic features, pitch (<italic>r</italic>=0.84; <italic>P</italic>=.009), volume slope (<italic>r</italic>=0.84; <italic>P</italic>=.02), and average F2 formant frequency (<italic>r</italic>=0.76; <italic>P</italic>=.03) were positively correlated with fatigue severity, whereas volume SD (<italic>r</italic>=&#x2212;0.85; <italic>P</italic>=.007) showed a strong negative association. This warrants further investigation into how individuals reporting higher fatigue might exhibit flatter, less dynamically modulated vocal patterns, consistent with decreased speech energy and prosodic variation observed in fatigue-related speech studies.</p><p>Additionally, the linguistic Gunning Fog index was inversely correlated with CFS scores (<italic>r</italic>=&#x2212;0.71; <italic>P</italic>=.049), indicating that higher fatigue levels were associated with simpler or less complex speech production. One visual feature, eyelid droop (<italic>r</italic>=0.71; <italic>P</italic>=.047), also demonstrated a significant positive association with fatigue, consistent with prior literature linking ocular and facial muscle changes to tiredness. Together, these findings highlight the sensitivity of both speech acoustics and facial metrics to self-reported fatigue, supporting their potential use as digital biomarkers for real-time fatigue monitoring. However, due to the limited sample size and analysis, individual associations should not be interpreted as predictive or applicable to general populations at this stage.</p></sec><sec id="s3-2-4"><title>Correlations for Attention and Cognition</title><p>Exploratory analyses examining associations between multimodal features and cognitive performance as measured using the TMT identified a single significant relationship. The acoustic feature shimmer, which captures cycle-to-cycle variability in vocal amplitude, was positively correlated with TMT scores (<italic>r</italic>=0.74; <italic>P</italic>=.03). This association points to greater irregularity in voice amplitude when participants had longer task completion times. Further research into how lower cognitive efficiency might potentially relate to fatigue-related motor or attentional variability in speech production may be worthwhile. Although preliminary, this finding aligns with those of prior literature linking changes in vocal stability to fluctuations in attention and executive functioning. These results indicate that fine-grained acoustic features derived from conversational speech may offer a feasible, low-burden proxy for cognitive performance monitoring in remote or operational settings. Once again, although the exact associations found should not be treated as reliable or generalizable at this stage, this does suggest a potential future study with larger samples and analyses.</p></sec></sec><sec id="s3-3"><title>Descriptive Summary of Significant Associations</title><p>To complement the quantitative analyses, we conducted a descriptive review of the distribution of significant multimodal features across clinical domains. As shown in <xref ref-type="table" rid="table4">Table 4</xref>, depression-related features were primarily linguistic and acoustic in nature, with no significant visual markers identified. Fatigue exhibited the broadest feature coverage, encompassing 1 visual variable, 4 acoustic variables, and 1 linguistic variable. When examining fatigue subscales, distinct modality patterns emerged: the physical and affective fatigue components were each associated with multiple acoustic features, whereas the cognitive subscale was linked to both visual and acoustic indicators. In contrast, cognitive performance as measured using the TMT was primarily associated with a single acoustic feature, reflecting a narrower signal profile. It is important to note that feature set size also had an effect; for example, fatigue measures displayed a clear feature representation across audio features, but audio features also made up a larger proportion of the total features compared to visual and text features. There is no clear evidence of any specific feature modality showing broader significance or sensitivity.</p><table-wrap id="t4" position="float"><label>Table 4.</label><caption><p>Count of significant features for each subscale of the clinical measures and feature type out of the total number of features tested.</p></caption><table id="table4" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Clinical measure and subscale</td><td align="left" valign="bottom" colspan="3">Feature categories, n (%)</td></tr><tr><td align="left" valign="bottom"/><td align="left" valign="bottom">Visual features (n=13)</td><td align="left" valign="bottom">Audio features (n=28)</td><td align="left" valign="bottom">Text features (n=25)</td></tr></thead><tbody><tr><td align="left" valign="top">PHQ-9<sup><xref ref-type="table-fn" rid="table4fn1">a</xref></sup></td><td align="left" valign="top">0 (0.0)</td><td align="left" valign="top">3 (10.7)</td><td align="left" valign="top">3 (12.0)</td></tr><tr><td align="left" valign="top">CFS<sup><xref ref-type="table-fn" rid="table4fn2">b</xref></sup></td><td align="left" valign="top">1 (7.7)</td><td align="left" valign="top">4 (14.3)</td><td align="left" valign="top">1 (4.0)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Physical</td><td align="left" valign="top">1 (7.7)</td><td align="left" valign="top">4 (14.3)</td><td align="left" valign="top">3 (12.0)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Affective</td><td align="left" valign="top">0 (0.0)</td><td align="left" valign="top">5 (17.9)</td><td align="left" valign="top">0 (0.0)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Cognitive</td><td align="left" valign="top">1 (7.7)</td><td align="left" valign="top">5 (17.9)</td><td align="left" valign="top">0 (0.0)</td></tr><tr><td align="left" valign="top">TMT<sup><xref ref-type="table-fn" rid="table4fn3">c</xref></sup></td><td align="left" valign="top">0 (0.0)</td><td align="left" valign="top">1 (3.6)</td><td align="left" valign="top">0 (0.0)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Part A</td><td align="left" valign="top">0 (0.0)</td><td align="left" valign="top">0 (0.0)</td><td align="left" valign="top">1 (4.0)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Part B</td><td align="left" valign="top">0 (0.0)</td><td align="left" valign="top">1 (3.6)</td><td align="left" valign="top">0 (0.0)</td></tr></tbody></table><table-wrap-foot><fn id="table4fn1"><p><sup>a</sup>PHQ-9: Patient Health Questionnaire&#x2013;9.</p></fn><fn id="table4fn2"><p><sup>b</sup>CFS: Cancer Fatigue Scale.</p></fn><fn id="table4fn3"><p><sup>c</sup>TMT: trail making test.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s3-4"><title>Risk Score Case Study</title><p>To explore the feasibility of individualized monitoring, we conducted case studies on participants who completed multiple Okaya sessions. For each individual, a composite &#x201C;risk score&#x201D; was calculated by standardizing the significant multimodal features (<italic>z</italic>-scoring within participants) and summing the deviations from the mean, with directionality determined by the feature&#x2019;s correlation with the clinical measure. This approach provided a demonstration of an interpretable, participant-specific index of deviation from baseline functioning across time; however, due to the low sample size and unstable correlations, it is purely used as a demonstration in this case, not a proof of actual meaningful capability.</p><p>An example use case can use a longitudinal visualization of these risk scores to reveal dynamic fluctuations in both depression- and fatigue-related indexes across sessions. For example, in participants with higher variability, increases in risk score can correspond to periods of increased self-reported symptom severity, whereas lower or stable values can align with more consistent well-being reports. Although exploratory, these case studies illustrate how multimodal conversational data can support within-person anomaly detection and temporal tracking of psychological and physiological states. This example provides a demonstration of how individualized, feature-based composite scores may offer a viable framework for early identification of changes in mood, fatigue, or cognitive function in remote monitoring contexts.</p><p>Data on depression and fatigue for the 2 participants with the most sessions are shown in <xref ref-type="fig" rid="figure4">Figures 4</xref> and <xref ref-type="fig" rid="figure5">5</xref>, respectively (TMT only exhibited 1 significant feature and, as such, a composite score was not meaningful).</p><fig position="float" id="figure4"><label>Figure 4.</label><caption><p>Risk score case study for depression.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v10i1e87054_fig04.png"/></fig><fig position="float" id="figure5"><label>Figure 5.</label><caption><p>Risk score case study for fatigue.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v10i1e87054_fig05.png"/></fig></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>We found several multimodal features that correlated with depression, fatigue, and cognition, some of which extended beyond patterns reported in prior literature. As expected, depression was characterized by reduced variability in vocal intensity and lower linguistic complexity, consistent with psychomotor slowing and affective flattening. Fatigue demonstrated a broader multimodal signature involving both acoustic and visual domains, including reduced volume variability, higher pitch, and greater eyelid droop, reflecting decreased energy and arousal. Notably, volume slope, a feature we did not find in the literature, was significant for both depression and fatigue, suggesting that individuals with depression or fatigue might have noticeable patterns in how speech volume progresses over time, especially during utterances provided on the Okaya platform. Additionally, complexity, a feature previously linked to depressive speech, was found to correlate with fatigue, indicating that depression and fatigue may share some similar effects on word choice and diversity [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref36">36</xref>]. Together, these findings highlight the potential for future study of established digital biomarkers in the Okaya platform and the emergence of cross-domain signals that may bridge affective, cognitive, and fatigue-related processes.</p><p>The findings of this feasibility study indicate that the Okaya conversational platform is both usable and acceptable for collecting multimodal data in an operational setting. Participants demonstrated relatively sustained engagement, with most completing multiple sessions, suggesting that the platform&#x2019;s conversational format and remote accessibility supported ongoing participation. Usability feedback further underscored this positive experience&#x2014;participants rated the system highly in terms of ease of use, clarity of instructions, and overall satisfaction. Qualitative feedback revealed that users found the interaction intuitive and meaningful, often describing the check-in as an opportunity for reflection rather than a burdensome task. Importantly, participants expressed high confidence in data privacy and security, which is critical for user trust in AI-enabled health technologies [<xref ref-type="bibr" rid="ref85">85</xref>-<xref ref-type="bibr" rid="ref87">87</xref>]. At the same time, feedback highlighted opportunities for improvement, including more flexible scheduling options, reminders, and greater mobile accessibility. Together, these findings suggest that Okaya&#x2019;s design successfully balanced data collection rigor with user comfort and autonomy, supporting its feasibility for repeated deployment in diverse and high-performance populations. These results provide an encouraging foundation for the platform&#x2019;s next phase of validation and refinement, emphasizing scalability, personalization, and long-term adherence.</p><p>The findings from the case study align closely with the long-term vision for the Okaya platform. The observed fluctuations in individualized risk scores and their correspondence with self-reported symptom changes demonstrate the platform&#x2019;s potential for longitudinal, within-person monitoring&#x2014;a central principle behind the Okaya Index and the simplified risk score we presented. Rather than relying on evidence from population-based studies, the Okaya platform emphasizes personalized baselines and interprets deviations relative to each individual&#x2019;s unique behavioral and physiological profile. This individualized approach reflects the platform&#x2019;s goal of enabling early identification of shifts in emotional, cognitive, or fatigue states. As the platform evolves, integration of these multimodal risk scores into the Okaya Index will enable continuous refinement of trajectories across time and populations, ultimately supporting adaptive, AI-driven insights that can inform both clinical decision-making and support for operational populations ranging from pilots to firefighters. This trajectory aligns with the goal of advancing proactive mental health and performance optimization in real-world settings.</p><p>From a clinical and research perspective, these findings illustrate the promise of conversational, multimodal sensing as a foundation for precision mental health and digital phenotyping. By capturing subtle changes in voice, facial behavior, and language over time, systems such as Okaya can complement traditional self-report and clinician-administered tools, providing a richer, continuous view of mental and cognitive functioning. Such individualized, data-driven assessments have potential applications across health care, occupational performance, and behavioral health monitoring, particularly in cases in which real-time insight and early intervention are critical. Importantly, the Okaya platform&#x2019;s emphasis on <italic>nondiagnostic</italic> and <italic>privacy-preserving</italic> analytics aligns with emerging standards for responsible AI in health technology [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref88">88</xref>,<xref ref-type="bibr" rid="ref89">89</xref>]. As digital biomarkers move closer to clinical integration, continued validation across diverse populations and conditions will be essential to ensure generalizability, interpretability, and equity [<xref ref-type="bibr" rid="ref85">85</xref>,<xref ref-type="bibr" rid="ref86">86</xref>,<xref ref-type="bibr" rid="ref90">90</xref>]. Ultimately, this study of the Okaya platform represents a step toward an ethically grounded, adaptive ecosystem for mental health monitoring&#x2014;one that transforms multimodal data into actionable insights while safeguarding user trust and autonomy.</p><p>This study has several limitations that should be considered when interpreting the findings. First, the sample size was small and limited due to our focus on operational populations, which are harder to recruit, constraining generalizability and the ability to model interindividual variability. Thus, the correlations we report are not meant to be statistically resilient or generalizable at this stage. Therefore, these findings preclude reliable statistical inference of robust estimates of the correlations in these data and, rather, suggest future avenues for discovery. Second, although validated instruments such as the PHQ-9, CFS, and TMT were used for comparison, these measures may not fully capture the temporal dynamics or subtle within-person fluctuations that multimodal digital biomarkers are designed to detect. Traditional self-report and performance-based assessments tend to be episodic and relatively stable, which may underestimate moment-to-moment variability in affect, fatigue, and cognition. Future studies should incorporate higher-resolution clinical measures&#x2014;such as ecological momentary assessment or brief daily self-reports&#x2014;that provide richer temporal data and enable stronger coupling between behavioral features and self-reported states [<xref ref-type="bibr" rid="ref1">1</xref>-<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref87">87</xref>,<xref ref-type="bibr" rid="ref91">91</xref>]. Integrating these temporally aligned measures would allow for more precise modeling of intraindividual change and enhance the ecological validity of the derived digital biomarkers. Finally, while the Okaya platform demonstrated feasibility and acceptability in this study, further validation with larger, more diverse samples and extended longitudinal follow-up is necessary for predictive modeling and design of personalized remote monitoring systems.</p></sec><sec id="s4-2"><title>Conclusions</title><p>The conversational and AI-enabled platform (Okaya) was feasible among an operational sample from the US Air and Space Force as a method for collecting multimodal data correlated with depression, fatigue, and cognition. Future work will examine larger samples with repeated measures to assess the test-retest reliability and predictive validity of multimodal digital biomarkers. Ultimately, the goal of the platform is the development of personalized models for each user while detecting anomalies in a remote monitoring setting.</p></sec></sec></body><back><ack><p>The authors would like to thank Sara Ghandehari, MD, MCSO, FACHE, as well as Nick Uram, PsyD, for their invaluable contributions toward the development of the Okaya platform. The authors are also grateful to Will Jackson for his assistance in literature review. The authors declare the use of generative artificial intelligence (GenAI) in the research and writing process. According to the Generative Artificial Intelligence Delegation Taxonomy (2025), the following tasks were delegated to GenAI tools under full human supervision: literature search and systematization, code generation, code optimization, process automation, creation of algorithms for data analysis, data analysis, and visualization. The GenAI tool used was GPT-4o via the GitHub Copilot platform. Although the code design and functions were entirely defined by the authors, GitHub Copilot was used to generate autocomplete suggestions as the code was being written. The generated code was used for correlation calculations, risk score case study calculation, and generation of all shown figures. Responsibility for the final manuscript lies entirely with the authors. GenAI tools are not listed as authors and do not bear responsibility for the final outcomes.</p></ack><notes><sec><title>Funding</title><p>This study was funded by SmartTec Inc to fulfill deliverables under a Direct to Phase II award from AFWERX (Department of the Air Force; research and innovation).</p></sec><sec><title>Data Availability</title><p>The datasets generated and analyzed during this study are not publicly available due to their use in the development of a proprietary platform as well as confidentiality requirements from the US Department of Defense. Public release of the data could enable reverse engineering of the platform or unauthorized commercial use. However, unidentified data may be available from the corresponding author on reasonable request.</p></sec></notes><fn-group><fn fn-type="con"><p>Conceptualization: M So (lead), GM (equal), M Sobolev (equal)</p><p>Data curation: M So (lead), GM (equal)</p><p>Formal analysis: M So (lead), M Sobolev (equal)</p><p>Funding acquisition: GM</p><p>Investigation: M So (lead), M Sobolev (equal)</p><p>Methodology: M Sobolev (lead), M So (equal), GM (supporting)</p><p>Resources: M So (lead), GM (supporting)</p><p>Software: M So</p><p>Validation: M So (lead), M Sobolev (supporting)</p><p>Visualization: M So</p><p>Writing&#x2014;original draft: M Sobolev (lead), M So (supporting)</p><p>Writing&#x2014;review and editing: M So (lead), M Sobolev (equal), GM (supporting)</p></fn><fn fn-type="conflict"><p>M So and GM are employed by and hold financial interest in SmartTec Inc, who develops the Okaya platform, and could benefit from research outcomes. M Sobolev receives consulting fees from SmartTec Inc.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">CFS</term><def><p>Cancer Fatigue Scale</p></def></def-item><def-item><term id="abb3">PHQ-9</term><def><p>Patient Health Questionnaire&#x2013;9</p></def></def-item><def-item><term id="abb4">TMT</term><def><p>trail making test</p></def></def-item><def-item><term id="abb5">USAF</term><def><p>US Air Force</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sliwinski</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>Mogle</surname><given-names>JA</given-names> </name><name name-style="western"><surname>Hyun</surname><given-names>J</given-names> </name><name name-style="western"><surname>Munoz</surname><given-names>E</given-names> </name><name name-style="western"><surname>Smyth</surname><given-names>JM</given-names> </name><name name-style="western"><surname>Lipton</surname><given-names>RB</given-names> </name></person-group><article-title>Reliability and validity of ambulatory cognitive assessments</article-title><source>Assessment</source><year>2018</year><month>01</month><volume>25</volume><issue>1</issue><fpage>14</fpage><lpage>30</lpage><pub-id pub-id-type="doi">10.1177/1073191116643164</pub-id><pub-id pub-id-type="medline">27084835</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Katz</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>C</given-names> </name><name name-style="western"><surname>Nester</surname><given-names>CO</given-names> </name><etal/></person-group><article-title>T&#x2010;MoCA: a valid phone screen for cognitive impairment in diverse community samples</article-title><source>Alzheimers Dement (Amst)</source><year>2021</year><month>02</month><volume>13</volume><issue>1</issue><fpage>e12144</fpage><pub-id pub-id-type="doi">10.1002/dad2.12144</pub-id><pub-id pub-id-type="medline">33598528</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Singh</surname><given-names>S</given-names> </name><name name-style="western"><surname>Strong</surname><given-names>R</given-names> </name><name name-style="western"><surname>Xu</surname><given-names>I</given-names> </name><etal/></person-group><article-title>Ecological momentary assessment of cognition in clinical and community samples: reliability and validity study</article-title><source>J Med Internet Res</source><year>2023</year><month>06</month><day>2</day><volume>25</volume><fpage>e45028</fpage><pub-id pub-id-type="doi">10.2196/45028</pub-id><pub-id pub-id-type="medline">37266996</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>McHugh</surname><given-names>RK</given-names> </name><name name-style="western"><surname>Behar</surname><given-names>E</given-names> </name></person-group><article-title>Readability of self-report measures of depression and anxiety</article-title><source>J Consult Clin Psychol</source><year>2009</year><month>12</month><volume>77</volume><issue>6</issue><fpage>1100</fpage><lpage>1112</lpage><pub-id pub-id-type="doi">10.1037/a0017124</pub-id><pub-id pub-id-type="medline">19968386</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>McHugh</surname><given-names>RK</given-names> </name><name name-style="western"><surname>Behar</surname><given-names>E</given-names> </name></person-group><article-title>Reply to &#x201C;Further issues in determining the readability of self-report items: comment on McHugh and Behar (2009)&#x201D;</article-title><source>J Consult Clin Psychol</source><year>2012</year><month>12</month><volume>80</volume><issue>6</issue><fpage>1121</fpage><lpage>1122</lpage><pub-id pub-id-type="doi">10.1037/a0030455</pub-id><pub-id pub-id-type="medline">23205507</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Schinka</surname><given-names>JA</given-names> </name></person-group><article-title>Further issues in determining the readability of self-report items: comment on McHugh and Behar (2009)</article-title><source>J Consult Clin Psychol</source><year>2012</year><volume>80</volume><issue>5</issue><fpage>952</fpage><lpage>955</lpage><pub-id pub-id-type="doi">10.1037/a0029928</pub-id><pub-id pub-id-type="medline">22924333</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Levis</surname><given-names>B</given-names> </name><name name-style="western"><surname>Benedetti</surname><given-names>A</given-names> </name><name name-style="western"><surname>Ioannidis</surname><given-names>JP</given-names> </name><etal/></person-group><article-title>Patient Health Questionnaire-9 scores do not accurately estimate depression prevalence: individual participant data meta-analysis</article-title><source>J Clin Epidemiol</source><year>2020</year><month>06</month><volume>122</volume><fpage>115</fpage><lpage>128.e1</lpage><pub-id pub-id-type="doi">10.1016/j.jclinepi.2020.02.002</pub-id><pub-id pub-id-type="medline">32105798</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ma</surname><given-names>S</given-names> </name><name name-style="western"><surname>Kang</surname><given-names>L</given-names> </name><name name-style="western"><surname>Guo</surname><given-names>X</given-names> </name><etal/></person-group><article-title>Discrepancies between self-rated depression and observed depression severity: the effects of personality and dysfunctional attitudes</article-title><source>Gen Hosp Psychiatry</source><year>2021</year><volume>70</volume><fpage>25</fpage><lpage>30</lpage><pub-id pub-id-type="doi">10.1016/j.genhosppsych.2020.11.016</pub-id><pub-id pub-id-type="medline">33689981</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Inoue</surname><given-names>T</given-names> </name><name name-style="western"><surname>Tanaka</surname><given-names>T</given-names> </name><name name-style="western"><surname>Nakagawa</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Utility and limitations of PHQ-9 in a clinic specializing in psychiatric care</article-title><source>BMC Psychiatry</source><year>2012</year><month>07</month><day>3</day><volume>12</volume><fpage>73</fpage><pub-id pub-id-type="doi">10.1186/1471-244X-12-73</pub-id><pub-id pub-id-type="medline">22759625</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sim</surname><given-names>I</given-names> </name></person-group><article-title>Mobile devices and health</article-title><source>N Engl J Med</source><year>2019</year><month>09</month><day>5</day><volume>381</volume><issue>10</issue><fpage>956</fpage><lpage>968</lpage><pub-id pub-id-type="doi">10.1056/NEJMra1806949</pub-id><pub-id pub-id-type="medline">31483966</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Insel</surname><given-names>TR</given-names> </name></person-group><article-title>Digital phenotyping: technology for a new science of behavior</article-title><source>JAMA</source><year>2017</year><month>10</month><day>3</day><volume>318</volume><issue>13</issue><fpage>1215</fpage><lpage>1216</lpage><pub-id pub-id-type="doi">10.1001/jama.2017.11295</pub-id><pub-id pub-id-type="medline">28973224</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mohr</surname><given-names>DC</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>M</given-names> </name><name name-style="western"><surname>Schueller</surname><given-names>SM</given-names> </name></person-group><article-title>Personal sensing: understanding mental health using ubiquitous sensors and machine learning</article-title><source>Annu Rev Clin Psychol</source><year>2017</year><month>05</month><day>8</day><volume>13</volume><fpage>23</fpage><lpage>47</lpage><pub-id pub-id-type="doi">10.1146/annurev-clinpsy-032816-044949</pub-id><pub-id pub-id-type="medline">28375728</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Coravos</surname><given-names>A</given-names> </name><name name-style="western"><surname>Khozin</surname><given-names>S</given-names> </name><name name-style="western"><surname>Mandl</surname><given-names>KD</given-names> </name></person-group><article-title>Developing and adopting safe and effective digital biomarkers to improve patient outcomes</article-title><source>NPJ Digit Med</source><year>2019</year><volume>2</volume><issue>1</issue><fpage>14</fpage><pub-id pub-id-type="doi">10.1038/s41746-019-0090-4</pub-id><pub-id pub-id-type="medline">30868107</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dagum</surname><given-names>P</given-names> </name></person-group><article-title>Digital biomarkers of cognitive function</article-title><source>NPJ Digit Med</source><year>2018</year><volume>1</volume><fpage>10</fpage><pub-id pub-id-type="doi">10.1038/s41746-018-0018-4</pub-id><pub-id pub-id-type="medline">31304295</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jacobson</surname><given-names>NC</given-names> </name><name name-style="western"><surname>Weingarden</surname><given-names>H</given-names> </name><name name-style="western"><surname>Wilhelm</surname><given-names>S</given-names> </name></person-group><article-title>Digital biomarkers of mood disorders and symptom change</article-title><source>NPJ Digit Med</source><year>2019</year><volume>2</volume><fpage>3</fpage><pub-id pub-id-type="doi">10.1038/s41746-019-0078-0</pub-id><pub-id pub-id-type="medline">31304353</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Sobolev</surname><given-names>M</given-names> </name><name name-style="western"><surname>Gullapalli</surname><given-names>BT</given-names> </name><name name-style="western"><surname>Rahman</surname><given-names>T</given-names> </name></person-group><article-title>Advancing the science of digital biomarkers</article-title><conf-name>DigiBiom &#x2019;21: Proceedings of the 2021 Workshop on Future of Digital Biomarkers</conf-name><conf-date>Jun 25, 2021</conf-date><pub-id pub-id-type="doi">10.1145/3469266.3473711</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Garcia-Ceja</surname><given-names>E</given-names> </name><name name-style="western"><surname>Riegler</surname><given-names>M</given-names> </name><name name-style="western"><surname>Nordgreen</surname><given-names>T</given-names> </name><name name-style="western"><surname>Jakobsen</surname><given-names>P</given-names> </name><name name-style="western"><surname>Oedegaard</surname><given-names>KJ</given-names> </name><name name-style="western"><surname>T&#x00F8;rresen</surname><given-names>J</given-names> </name></person-group><article-title>Mental health monitoring with multimodal sensing and machine learning: a survey</article-title><source>Pervasive Mob Comput</source><year>2018</year><month>12</month><volume>51</volume><fpage>1</fpage><lpage>26</lpage><pub-id pub-id-type="doi">10.1016/j.pmcj.2018.09.003</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>K&#x00F6;nig</surname><given-names>A</given-names> </name><name name-style="western"><surname>Tr&#x00F6;ger</surname><given-names>J</given-names> </name><name name-style="western"><surname>Mallick</surname><given-names>E</given-names> </name><etal/></person-group><article-title>Detecting subtle signs of depression with automated speech analysis in a non-clinical sample</article-title><source>BMC Psychiatry</source><year>2022</year><month>12</month><day>27</day><volume>22</volume><issue>1</issue><fpage>830</fpage><pub-id pub-id-type="doi">10.1186/s12888-022-04475-0</pub-id><pub-id pub-id-type="medline">36575442</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Silva</surname><given-names>WJ</given-names> </name><name name-style="western"><surname>Lopes</surname><given-names>L</given-names> </name><name name-style="western"><surname>Galdino</surname><given-names>MK</given-names> </name><name name-style="western"><surname>Almeida</surname><given-names>AA</given-names> </name></person-group><article-title>Voice acoustic parameters as predictors of depression</article-title><source>J Voice</source><year>2024</year><month>01</month><volume>38</volume><issue>1</issue><fpage>77</fpage><lpage>85</lpage><pub-id pub-id-type="doi">10.1016/j.jvoice.2021.06.018</pub-id><pub-id pub-id-type="medline">34353686</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shin</surname><given-names>D</given-names> </name><name name-style="western"><surname>Cho</surname><given-names>WI</given-names> </name><name name-style="western"><surname>Park</surname><given-names>CHK</given-names> </name><etal/></person-group><article-title>Detection of minor and major depression through voice as a biomarker using machine learning</article-title><source>J Clin Med</source><year>2021</year><month>07</month><day>8</day><volume>10</volume><issue>14</issue><fpage>3046</fpage><pub-id pub-id-type="doi">10.3390/jcm10143046</pub-id><pub-id pub-id-type="medline">34300212</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Alpert</surname><given-names>M</given-names> </name><name name-style="western"><surname>Pouget</surname><given-names>ER</given-names> </name><name name-style="western"><surname>Silva</surname><given-names>RR</given-names> </name></person-group><article-title>Reflections of depression in acoustic measures of the patient&#x2019;s speech</article-title><source>J Affect Disord</source><year>2001</year><month>09</month><volume>66</volume><issue>1</issue><fpage>59</fpage><lpage>69</lpage><pub-id pub-id-type="doi">10.1016/s0165-0327(00)00335-9</pub-id><pub-id pub-id-type="medline">11532533</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cohen</surname><given-names>AS</given-names> </name><name name-style="western"><surname>Rodriguez</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Opler</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Evaluating speech latencies during structured psychiatric interviews as an automated objective measure of psychomotor slowing</article-title><source>Psychiatry Res</source><year>2024</year><month>10</month><volume>340</volume><fpage>116104</fpage><pub-id pub-id-type="doi">10.1016/j.psychres.2024.116104</pub-id><pub-id pub-id-type="medline">39137558</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Vogel</surname><given-names>AP</given-names> </name><name name-style="western"><surname>Fletcher</surname><given-names>J</given-names> </name><name name-style="western"><surname>Maruff</surname><given-names>P</given-names> </name></person-group><article-title>Acoustic analysis of the effects of sustained wakefulness on speech</article-title><source>J Acoust Soc Am</source><year>2010</year><month>12</month><volume>128</volume><issue>6</issue><fpage>3747</fpage><lpage>3756</lpage><pub-id pub-id-type="doi">10.1121/1.3506349</pub-id><pub-id pub-id-type="medline">21218906</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ozdas</surname><given-names>A</given-names> </name><name name-style="western"><surname>Shiavi</surname><given-names>RG</given-names> </name><name name-style="western"><surname>Silverman</surname><given-names>SE</given-names> </name><name name-style="western"><surname>Silverman</surname><given-names>MK</given-names> </name><name name-style="western"><surname>Wilkes</surname><given-names>DM</given-names> </name></person-group><article-title>Investigation of vocal jitter and glottal flow spectrum as possible cues for depression and near-term suicidal risk</article-title><source>IEEE Trans Biomed Eng</source><year>2004</year><month>09</month><volume>51</volume><issue>9</issue><fpage>1530</fpage><lpage>1540</lpage><pub-id pub-id-type="doi">10.1109/TBME.2004.827544</pub-id><pub-id pub-id-type="medline">15376501</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gao</surname><given-names>X</given-names> </name><name name-style="western"><surname>Ma</surname><given-names>K</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>H</given-names> </name><etal/></person-group><article-title>A rapid, non-invasive method for fatigue detection based on voice information</article-title><source>Front Cell Dev Biol</source><year>2022</year><volume>10</volume><fpage>994001</fpage><pub-id pub-id-type="doi">10.3389/fcell.2022.994001</pub-id><pub-id pub-id-type="medline">36176279</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hamdan</surname><given-names>AL</given-names> </name><name name-style="western"><surname>Deeb</surname><given-names>R</given-names> </name><name name-style="western"><surname>Sibai</surname><given-names>A</given-names> </name><name name-style="western"><surname>Rameh</surname><given-names>C</given-names> </name><name name-style="western"><surname>Rifai</surname><given-names>H</given-names> </name><name name-style="western"><surname>Fayyad</surname><given-names>J</given-names> </name></person-group><article-title>Vocal characteristics in children with attention deficit hyperactivity disorder</article-title><source>J Voice</source><year>2009</year><month>03</month><volume>23</volume><issue>2</issue><fpage>190</fpage><lpage>194</lpage><pub-id pub-id-type="doi">10.1016/j.jvoice.2007.09.004</pub-id><pub-id pub-id-type="medline">18082369</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhao</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Fan</surname><given-names>HZ</given-names> </name><name name-style="western"><surname>Li</surname><given-names>YL</given-names> </name><etal/></person-group><article-title>Vocal acoustic features as potential biomarkers for identifying/diagnosing depression: a cross-sectional study</article-title><source>Front Psychiatry</source><year>2022</year><volume>13</volume><fpage>815678</fpage><pub-id pub-id-type="doi">10.3389/fpsyt.2022.815678</pub-id><pub-id pub-id-type="medline">35573349</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Arevian</surname><given-names>AC</given-names> </name><name name-style="western"><surname>Bone</surname><given-names>D</given-names> </name><name name-style="western"><surname>Malandrakis</surname><given-names>N</given-names> </name><etal/></person-group><article-title>Clinical state tracking in serious mental illness through computational analysis of speech</article-title><source>PLoS One</source><year>2020</year><volume>15</volume><issue>1</issue><fpage>e0225695</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0225695</pub-id><pub-id pub-id-type="medline">31940347</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hinchliffe</surname><given-names>MK</given-names> </name><name name-style="western"><surname>Lancashire</surname><given-names>MH</given-names> </name><name name-style="western"><surname>Roberts</surname><given-names>FJ</given-names> </name></person-group><article-title>Eye-contact and depression: a preliminary report</article-title><source>Br J Psychiatry</source><year>1970</year><month>11</month><volume>117</volume><issue>540</issue><fpage>571</fpage><lpage>572</lpage><pub-id pub-id-type="doi">10.1192/bjp.117.540.571</pub-id><pub-id pub-id-type="medline">5480708</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Suslow</surname><given-names>T</given-names> </name><name name-style="western"><surname>Hoepfel</surname><given-names>D</given-names> </name><name name-style="western"><surname>Kersting</surname><given-names>A</given-names> </name><name name-style="western"><surname>Bodenschatz</surname><given-names>CM</given-names> </name></person-group><article-title>Depressive symptoms and visual attention to others&#x2019; eyes in healthy individuals</article-title><source>BMC Psychiatry</source><year>2024</year><month>03</month><day>6</day><volume>24</volume><issue>1</issue><fpage>184</fpage><pub-id pub-id-type="doi">10.1186/s12888-024-05633-2</pub-id><pub-id pub-id-type="medline">38448877</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fiquer</surname><given-names>JT</given-names> </name><name name-style="western"><surname>Moreno</surname><given-names>RA</given-names> </name><name name-style="western"><surname>Brunoni</surname><given-names>AR</given-names> </name><name name-style="western"><surname>Barros</surname><given-names>VB</given-names> </name><name name-style="western"><surname>Fernandes</surname><given-names>F</given-names> </name><name name-style="western"><surname>Gorenstein</surname><given-names>C</given-names> </name></person-group><article-title>What is the nonverbal communication of depression? Assessing expressive differences between depressive patients and healthy volunteers during clinical interviews</article-title><source>J Affect Disord</source><year>2018</year><month>10</month><day>1</day><volume>238</volume><fpage>636</fpage><lpage>644</lpage><pub-id pub-id-type="doi">10.1016/j.jad.2018.05.071</pub-id><pub-id pub-id-type="medline">29957481</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Waxer</surname><given-names>P</given-names> </name></person-group><article-title>Nonverbal cues for depression</article-title><source>J Abnorm Psychol</source><year>1974</year><month>06</month><volume>83</volume><issue>3</issue><fpage>319</fpage><lpage>322</lpage><pub-id pub-id-type="doi">10.1037/h0036706</pub-id><pub-id pub-id-type="medline">4844922</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Buyukdura</surname><given-names>JS</given-names> </name><name name-style="western"><surname>McClintock</surname><given-names>SM</given-names> </name><name name-style="western"><surname>Croarkin</surname><given-names>PE</given-names> </name></person-group><article-title>Psychomotor retardation in depression: biological underpinnings, measurement, and treatment</article-title><source>Prog Neuropsychopharmacol Biol Psychiatry</source><year>2011</year><month>03</month><day>30</day><volume>35</volume><issue>2</issue><fpage>395</fpage><lpage>409</lpage><pub-id pub-id-type="doi">10.1016/j.pnpbp.2010.10.019</pub-id><pub-id pub-id-type="medline">21044654</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Had&#x017E;i&#x0107;</surname><given-names>B</given-names> </name><name name-style="western"><surname>Ohse</surname><given-names>J</given-names> </name><name name-style="western"><surname>Alkostantini</surname><given-names>ME</given-names> </name><etal/></person-group><article-title>Emotional dynamics in semi-clinical settings: speech emotion recognition in depression-related interviews</article-title><conf-name>11th International Conference on Information and Communication Technologies for Ageing Well and e-Health</conf-name><conf-date>Apr 6-8, 2025</conf-date><pub-id pub-id-type="doi">10.5220/0013415700003938</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rottenberg</surname><given-names>J</given-names> </name><name name-style="western"><surname>Gross</surname><given-names>JJ</given-names> </name><name name-style="western"><surname>Gotlib</surname><given-names>IH</given-names> </name></person-group><article-title>Emotion context insensitivity in major depressive disorder</article-title><source>J Abnorm Psychol</source><year>2005</year><month>11</month><volume>114</volume><issue>4</issue><fpage>627</fpage><lpage>639</lpage><pub-id pub-id-type="doi">10.1037/0021-843X.114.4.627</pub-id><pub-id pub-id-type="medline">16351385</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Trifu</surname><given-names>RN</given-names> </name><name name-style="western"><surname>Neme&#x0219;</surname><given-names>B</given-names> </name><name name-style="western"><surname>Herta</surname><given-names>DC</given-names> </name><name name-style="western"><surname>Bodea-Hategan</surname><given-names>C</given-names> </name><name name-style="western"><surname>Tala&#x0219;</surname><given-names>DA</given-names> </name><name name-style="western"><surname>Coman</surname><given-names>H</given-names> </name></person-group><article-title>Linguistic markers for major depressive disorder: a cross-sectional study using an automated procedure</article-title><source>Front Psychol</source><year>2024</year><volume>15</volume><fpage>1355734</fpage><pub-id pub-id-type="doi">10.3389/fpsyg.2024.1355734</pub-id><pub-id pub-id-type="medline">38510303</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hartnagel</surname><given-names>LM</given-names> </name><name name-style="western"><surname>Ebner-Priemer</surname><given-names>UW</given-names> </name><name name-style="western"><surname>Foo</surname><given-names>JC</given-names> </name><etal/></person-group><article-title>Linguistic style as a digital marker for depression severity: an ambulatory assessment pilot study in patients with depressive disorder undergoing sleep deprivation therapy</article-title><source>Acta Psychiatr Scand</source><year>2025</year><month>03</month><volume>151</volume><issue>3</issue><fpage>348</fpage><lpage>357</lpage><pub-id pub-id-type="doi">10.1111/acps.13726</pub-id><pub-id pub-id-type="medline">38987940</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Han</surname><given-names>J</given-names> </name><name name-style="western"><surname>Feng</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Li</surname><given-names>N</given-names> </name><etal/></person-group><article-title>Correlation between word frequency and 17 items of Hamilton scale in major depressive disorder</article-title><source>Front Psychiatry</source><year>2022</year><volume>13</volume><fpage>902873</fpage><pub-id pub-id-type="doi">10.3389/fpsyt.2022.902873</pub-id><pub-id pub-id-type="medline">35592381</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shinohara</surname><given-names>S</given-names> </name><name name-style="western"><surname>Nakamura</surname><given-names>M</given-names> </name><name name-style="western"><surname>Omiya</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>Depressive mood assessment method based on emotion level derived from voice: comparison of voice features of individuals with major depressive disorders and healthy controls</article-title><source>Int J Environ Res Public Health</source><year>2021</year><month>05</month><day>19</day><volume>18</volume><issue>10</issue><fpage>5435</fpage><pub-id pub-id-type="doi">10.3390/ijerph18105435</pub-id><pub-id pub-id-type="medline">34069609</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gehricke</surname><given-names>JG</given-names> </name><name name-style="western"><surname>Shapiro</surname><given-names>D</given-names> </name></person-group><article-title>Reduced facial expression and social context in major depression: discrepancies between facial muscle activity and self-reported emotion</article-title><source>Psychiatry Res</source><year>2000</year><month>08</month><day>21</day><volume>95</volume><issue>2</issue><fpage>157</fpage><lpage>167</lpage><pub-id pub-id-type="doi">10.1016/s0165-1781(00)00168-2</pub-id><pub-id pub-id-type="medline">10963801</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cariola</surname><given-names>LA</given-names> </name><name name-style="western"><surname>Sheeber</surname><given-names>LB</given-names> </name><name name-style="western"><surname>Allen</surname><given-names>N</given-names> </name><etal/></person-group><article-title>Language use in depressed and non-depressed mothers and their adolescent offspring</article-title><source>J Affect Disord</source><year>2024</year><month>12</month><day>1</day><volume>366</volume><fpage>290</fpage><lpage>299</lpage><pub-id pub-id-type="doi">10.1016/j.jad.2024.08.131</pub-id><pub-id pub-id-type="medline">39187178</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kouba</surname><given-names>P</given-names> </name><name name-style="western"><surname>&#x0160;motek</surname><given-names>M</given-names> </name><name name-style="western"><surname>Tich&#x00FD;</surname><given-names>T</given-names> </name><name name-style="western"><surname>Kop&#x0159;ivov&#x00E1;</surname><given-names>J</given-names> </name></person-group><article-title>Detection of air traffic controllers&#x2019; fatigue using voice analysis - an EEG validation study</article-title><source>Int J Ind Ergon</source><year>2023</year><month>05</month><volume>95</volume><fpage>103442</fpage><pub-id pub-id-type="doi">10.1016/j.ergon.2023.103442</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Torous</surname><given-names>J</given-names> </name><name name-style="western"><surname>Topol</surname><given-names>EJ</given-names> </name></person-group><article-title>Assessing generative artificial intelligence for mental health</article-title><source>Lancet</source><year>2025</year><volume>406</volume><issue>10504</issue><pub-id pub-id-type="doi">10.1016/S0140-6736(25)01237-1</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Cruciani</surname><given-names>F</given-names> </name><name name-style="western"><surname>Fritsch</surname><given-names>SG</given-names> </name><name name-style="western"><surname>Cleland</surname><given-names>I</given-names> </name><name name-style="western"><surname>Rey</surname><given-names>VF</given-names> </name><name name-style="western"><surname>Nugent</surname><given-names>C</given-names> </name><name name-style="western"><surname>Lukowicz</surname><given-names>P</given-names> </name></person-group><article-title>Few-shot human activity recognition using lightweight language models</article-title><conf-name>2025 International Conference on Activity and Behavior Computing</conf-name><conf-date>Apr 21-25, 2025</conf-date><pub-id pub-id-type="doi">10.1109/ABC64332.2025.11118559</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bedi</surname><given-names>S</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Orr-Ewing</surname><given-names>L</given-names> </name><etal/></person-group><article-title>Testing and evaluation of health care applications of large language models: a systematic review</article-title><source>JAMA</source><year>2025</year><month>01</month><day>28</day><volume>333</volume><issue>4</issue><fpage>319</fpage><lpage>328</lpage><pub-id pub-id-type="doi">10.1001/jama.2024.21700</pub-id><pub-id pub-id-type="medline">39405325</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>H</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>R</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>YC</given-names> </name><name name-style="western"><surname>Kraut</surname><given-names>RE</given-names> </name><name name-style="western"><surname>Mohr</surname><given-names>DC</given-names> </name></person-group><article-title>Systematic review and meta-analysis of AI-based conversational agents for promoting mental health and well-being</article-title><source>NPJ Digit Med</source><year>2023</year><month>12</month><day>19</day><volume>6</volume><issue>1</issue><fpage>236</fpage><pub-id pub-id-type="doi">10.1038/s41746-023-00979-5</pub-id><pub-id pub-id-type="medline">38114588</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Laranjo</surname><given-names>L</given-names> </name><name name-style="western"><surname>Dunn</surname><given-names>AG</given-names> </name><name name-style="western"><surname>Tong</surname><given-names>HL</given-names> </name><etal/></person-group><article-title>Conversational agents in healthcare: a systematic review</article-title><source>J Am Med Inform Assoc</source><year>2018</year><month>09</month><day>1</day><volume>25</volume><issue>9</issue><fpage>1248</fpage><lpage>1258</lpage><pub-id pub-id-type="doi">10.1093/jamia/ocy072</pub-id><pub-id pub-id-type="medline">30010941</pub-id></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rizzo</surname><given-names>A</given-names> </name><name name-style="western"><surname>Lucas</surname><given-names>G</given-names> </name><name name-style="western"><surname>Gratch</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Automatic behavior analysis during a clinical interview with a virtual human</article-title><source>Stud Health Technol Inform</source><year>2016</year><volume>220</volume><fpage>316</fpage><lpage>322</lpage><pub-id pub-id-type="medline">27046598</pub-id></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cohen</surname><given-names>J</given-names> </name><name name-style="western"><surname>Richter</surname><given-names>V</given-names> </name><name name-style="western"><surname>Neumann</surname><given-names>M</given-names> </name><etal/></person-group><article-title>A multimodal dialog approach to mental state characterization in clinically depressed, anxious, and suicidal populations</article-title><source>Front Psychol</source><year>2023</year><volume>14</volume><fpage>1135469</fpage><pub-id pub-id-type="doi">10.3389/fpsyg.2023.1135469</pub-id><pub-id pub-id-type="medline">37767217</pub-id></nlm-citation></ref><ref id="ref50"><label>50</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jiang</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Seyedi</surname><given-names>S</given-names> </name><name name-style="western"><surname>Griner</surname><given-names>E</given-names> </name><etal/></person-group><article-title>Multimodal mental health digital biomarker analysis from remote interviews using facial, vocal, linguistic, and cardiovascular patterns</article-title><source>IEEE J Biomed Health Inform</source><year>2024</year><month>03</month><volume>28</volume><issue>3</issue><fpage>1680</fpage><lpage>1691</lpage><pub-id pub-id-type="doi">10.1109/JBHI.2024.3352075</pub-id><pub-id pub-id-type="medline">38198249</pub-id></nlm-citation></ref><ref id="ref51"><label>51</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Milintsevich</surname><given-names>K</given-names> </name><name name-style="western"><surname>Sirts</surname><given-names>K</given-names> </name><name name-style="western"><surname>Dias</surname><given-names>G</given-names> </name></person-group><article-title>Towards automatic text-based estimation of depression through symptom prediction</article-title><source>Brain Inform</source><year>2023</year><month>02</month><day>13</day><volume>10</volume><issue>1</issue><fpage>4</fpage><pub-id pub-id-type="doi">10.1186/s40708-023-00185-9</pub-id><pub-id pub-id-type="medline">36780049</pub-id></nlm-citation></ref><ref id="ref52"><label>52</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>He</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>L</given-names> </name><name name-style="western"><surname>Qian</surname><given-names>C</given-names> </name><etal/></person-group><article-title>Conversational agent interventions for mental health problems: systematic review and meta-analysis of randomized controlled trials</article-title><source>J Med Internet Res</source><year>2023</year><month>04</month><day>28</day><volume>25</volume><fpage>e43862</fpage><pub-id pub-id-type="doi">10.2196/43862</pub-id><pub-id pub-id-type="medline">37115595</pub-id></nlm-citation></ref><ref id="ref53"><label>53</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>&#x0106;osi&#x0107;</surname><given-names>K</given-names> </name><name name-style="western"><surname>Popovi&#x0107;</surname><given-names>S</given-names> </name><name name-style="western"><surname>Wiederhold</surname><given-names>BK</given-names> </name></person-group><article-title>Enhancing aviation safety through AI-driven mental health management for pilots and air traffic controllers</article-title><source>Cyberpsychol Behav Soc Netw</source><year>2024</year><month>08</month><volume>27</volume><issue>8</issue><fpage>588</fpage><lpage>598</lpage><pub-id pub-id-type="doi">10.1089/cyber.2023.0737</pub-id><pub-id pub-id-type="medline">38916063</pub-id></nlm-citation></ref><ref id="ref54"><label>54</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wiederhold</surname><given-names>BK</given-names> </name></person-group><article-title>From cyberspace to outer space: how technology will preserve mental health in the final frontier</article-title><source>Cyberpsychol Behav Soc Netw</source><year>2025</year><month>01</month><volume>28</volume><issue>1</issue><fpage>1</fpage><lpage>3</lpage><pub-id pub-id-type="doi">10.1089/cyber.2024.0609</pub-id><pub-id pub-id-type="medline">39823387</pub-id></nlm-citation></ref><ref id="ref55"><label>55</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gatti</surname><given-names>M</given-names> </name><name name-style="western"><surname>Palumbo</surname><given-names>R</given-names> </name><name name-style="western"><surname>Di Domenico</surname><given-names>A</given-names> </name><name name-style="western"><surname>Mammarella</surname><given-names>N</given-names> </name></person-group><article-title>Affective health and countermeasures in long-duration space exploration</article-title><source>Heliyon</source><year>2022</year><month>05</month><day>13</day><volume>8</volume><issue>5</issue><fpage>e09414</fpage><pub-id pub-id-type="doi">10.1016/j.heliyon.2022.e09414</pub-id><pub-id pub-id-type="medline">35607498</pub-id></nlm-citation></ref><ref id="ref56"><label>56</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yin</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>J</given-names> </name><name name-style="western"><surname>Fan</surname><given-names>Q</given-names> </name><etal/></person-group><article-title>Long-term spaceflight composite stress induces depression and cognitive impairment in astronauts-insights from neuroplasticity</article-title><source>Transl Psychiatry</source><year>2023</year><month>11</month><day>8</day><volume>13</volume><issue>1</issue><fpage>342</fpage><pub-id pub-id-type="doi">10.1038/s41398-023-02638-5</pub-id><pub-id pub-id-type="medline">37938258</pub-id></nlm-citation></ref><ref id="ref57"><label>57</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>So</surname><given-names>M</given-names> </name><name name-style="western"><surname>Sobolev</surname><given-names>M</given-names> </name><name name-style="western"><surname>Menvielle</surname><given-names>G</given-names> </name></person-group><article-title>Sanora: a conversational AI agent for multimodal digital biomarkers of mental health</article-title><source>TechRxiv</source><access-date>2026-03-22</access-date><comment>Preprint posted online on  Feb 10, 2026</comment><comment><ext-link ext-link-type="uri" xlink:href="https://www.techrxiv.org/users/1027431/articles/1387167-sanora-a-conversational-ai-agent-for-multimodal-digital-biomarkers-of-mental-health">https://www.techrxiv.org/users/1027431/articles/1387167-sanora-a-conversational-ai-agent-for-multimodal-digital-biomarkers-of-mental-health</ext-link></comment></nlm-citation></ref><ref id="ref58"><label>58</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kroenke</surname><given-names>K</given-names> </name><name name-style="western"><surname>Spitzer</surname><given-names>RL</given-names> </name><name name-style="western"><surname>Williams</surname><given-names>JB</given-names> </name></person-group><article-title>The PHQ-9: validity of a brief depression severity measure</article-title><source>J Gen Intern Med</source><year>2001</year><month>09</month><volume>16</volume><issue>9</issue><fpage>606</fpage><lpage>613</lpage><pub-id pub-id-type="doi">10.1046/j.1525-1497.2001.016009606.x</pub-id><pub-id pub-id-type="medline">11556941</pub-id></nlm-citation></ref><ref id="ref59"><label>59</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Okuyama</surname><given-names>T</given-names> </name><name name-style="western"><surname>Akechi</surname><given-names>T</given-names> </name><name name-style="western"><surname>Kugaya</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Development and validation of the Cancer Fatigue Scale</article-title><source>J Pain Symptom Manage</source><year>2000</year><month>01</month><volume>19</volume><issue>1</issue><fpage>5</fpage><lpage>14</lpage><pub-id pub-id-type="doi">10.1016/S0885-3924(99)00138-4</pub-id><pub-id pub-id-type="medline">10687321</pub-id></nlm-citation></ref><ref id="ref60"><label>60</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dahmen</surname><given-names>J</given-names> </name><name name-style="western"><surname>Cook</surname><given-names>D</given-names> </name><name name-style="western"><surname>Fellows</surname><given-names>R</given-names> </name><name name-style="western"><surname>Schmitter-Edgecombe</surname><given-names>M</given-names> </name></person-group><article-title>An analysis of a digital variant of the Trail Making Test using machine learning techniques</article-title><source>Technol Health Care</source><year>2017</year><volume>25</volume><issue>2</issue><fpage>251</fpage><lpage>264</lpage><pub-id pub-id-type="doi">10.3233/THC-161274</pub-id><pub-id pub-id-type="medline">27886019</pub-id></nlm-citation></ref><ref id="ref61"><label>61</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Girard</surname><given-names>JM</given-names> </name><name name-style="western"><surname>Cohn</surname><given-names>JF</given-names> </name><name name-style="western"><surname>Mahoor</surname><given-names>MH</given-names> </name><name name-style="western"><surname>Mavadati</surname><given-names>SM</given-names> </name><name name-style="western"><surname>Hammal</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Rosenwald</surname><given-names>DP</given-names> </name></person-group><article-title>Nonverbal social withdrawal in depression: evidence from manual and automatic analysis</article-title><source>Image Vis Comput</source><year>2014</year><month>10</month><volume>32</volume><issue>10</issue><fpage>641</fpage><lpage>647</lpage><pub-id pub-id-type="doi">10.1016/j.imavis.2013.12.007</pub-id><pub-id pub-id-type="medline">25378765</pub-id></nlm-citation></ref><ref id="ref62"><label>62</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Scherer</surname><given-names>S</given-names> </name><name name-style="western"><surname>Stratou</surname><given-names>G</given-names> </name><name name-style="western"><surname>Mahmoud</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Automatic behavior descriptors for psychological disorder analysis</article-title><conf-name>10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition</conf-name><conf-date>Apr 22-26, 2013</conf-date><pub-id pub-id-type="doi">10.1109/FG.2013.6553789</pub-id></nlm-citation></ref><ref id="ref63"><label>63</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Girard</surname><given-names>JM</given-names> </name><name name-style="western"><surname>Cohn</surname><given-names>JF</given-names> </name><name name-style="western"><surname>Mahoor</surname><given-names>MH</given-names> </name><name name-style="western"><surname>Mavadati</surname><given-names>S</given-names> </name><name name-style="western"><surname>Rosenwald</surname><given-names>DP</given-names> </name></person-group><article-title>Social risk and depression: evidence from manual and automatic facial expression analysis</article-title><conf-name>International Conference on Automatic Face and Gesture Recognition 2013</conf-name><conf-date>Apr 22-26, 2013</conf-date><pub-id pub-id-type="doi">10.1109/FG.2013.6553748</pub-id></nlm-citation></ref><ref id="ref64"><label>64</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Menne</surname><given-names>F</given-names> </name><name name-style="western"><surname>D&#x00F6;rr</surname><given-names>F</given-names> </name><name name-style="western"><surname>Schr&#x00E4;der</surname><given-names>J</given-names> </name><etal/></person-group><article-title>The voice of depression: speech features as biomarkers for major depressive disorder</article-title><source>BMC Psychiatry</source><year>2024</year><month>11</month><day>12</day><volume>24</volume><issue>1</issue><fpage>794</fpage><pub-id pub-id-type="doi">10.1186/s12888-024-06253-6</pub-id><pub-id pub-id-type="medline">39533239</pub-id></nlm-citation></ref><ref id="ref65"><label>65</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wang</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Liang</surname><given-names>L</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>Z</given-names> </name><etal/></person-group><article-title>Fast and accurate assessment of depression based on voice acoustic features: a cross-sectional and longitudinal study</article-title><source>Front Psychiatry</source><year>2023</year><volume>14</volume><fpage>1195276</fpage><pub-id pub-id-type="doi">10.3389/fpsyt.2023.1195276</pub-id><pub-id pub-id-type="medline">37415683</pub-id></nlm-citation></ref><ref id="ref66"><label>66</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Cummins</surname><given-names>N</given-names> </name><name name-style="western"><surname>Sethu</surname><given-names>V</given-names> </name><name name-style="western"><surname>Epps</surname><given-names>J</given-names> </name><name name-style="western"><surname>Krajewski</surname><given-names>J</given-names> </name></person-group><article-title>Probabilistic acoustic volume analysis for speech affected by depression</article-title><conf-name>Interspeech 2014</conf-name><conf-date>Sep 14-18, 2014</conf-date><pub-id pub-id-type="doi">10.21437/Interspeech.2014-311</pub-id></nlm-citation></ref><ref id="ref67"><label>67</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shinohara</surname><given-names>S</given-names> </name><name name-style="western"><surname>Toda</surname><given-names>H</given-names> </name><name name-style="western"><surname>Nakamura</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Evaluation of emotional arousal level and depression severity using voice-derived sound pressure change acceleration</article-title><source>Sci Rep</source><year>2021</year><month>06</month><day>30</day><volume>11</volume><issue>1</issue><fpage>13615</fpage><pub-id pub-id-type="doi">10.1038/s41598-021-92982-7</pub-id><pub-id pub-id-type="medline">34193915</pub-id></nlm-citation></ref><ref id="ref68"><label>68</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mundt</surname><given-names>JC</given-names> </name><name name-style="western"><surname>Vogel</surname><given-names>AP</given-names> </name><name name-style="western"><surname>Feltner</surname><given-names>DE</given-names> </name><name name-style="western"><surname>Lenderking</surname><given-names>WR</given-names> </name></person-group><article-title>Vocal acoustic biomarkers of depression severity and treatment response</article-title><source>Biol Psychiatry</source><year>2012</year><month>10</month><day>1</day><volume>72</volume><issue>7</issue><fpage>580</fpage><lpage>587</lpage><pub-id pub-id-type="doi">10.1016/j.biopsych.2012.03.015</pub-id><pub-id pub-id-type="medline">22541039</pub-id></nlm-citation></ref><ref id="ref69"><label>69</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mundt</surname><given-names>JC</given-names> </name><name name-style="western"><surname>Snyder</surname><given-names>PJ</given-names> </name><name name-style="western"><surname>Cannizzaro</surname><given-names>MS</given-names> </name><name name-style="western"><surname>Chappie</surname><given-names>K</given-names> </name><name name-style="western"><surname>Geralts</surname><given-names>DS</given-names> </name></person-group><article-title>Voice acoustic measures of depression severity and treatment response collected via interactive voice response (IVR) technology</article-title><source>J Neurolinguistics</source><year>2007</year><month>01</month><volume>20</volume><issue>1</issue><fpage>50</fpage><lpage>64</lpage><pub-id pub-id-type="doi">10.1016/j.jneuroling.2006.04.001</pub-id><pub-id pub-id-type="medline">21253440</pub-id></nlm-citation></ref><ref id="ref70"><label>70</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yamamoto</surname><given-names>M</given-names> </name><name name-style="western"><surname>Takamiya</surname><given-names>A</given-names> </name><name name-style="western"><surname>Sawada</surname><given-names>K</given-names> </name><etal/></person-group><article-title>Using speech recognition technology to investigate the association between timing-related speech features and depression severity</article-title><source>PLoS One</source><year>2020</year><volume>15</volume><issue>9</issue><fpage>e0238726</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0238726</pub-id><pub-id pub-id-type="medline">32915846</pub-id></nlm-citation></ref><ref id="ref71"><label>71</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cannizzaro</surname><given-names>M</given-names> </name><name name-style="western"><surname>Harel</surname><given-names>B</given-names> </name><name name-style="western"><surname>Reilly</surname><given-names>N</given-names> </name><name name-style="western"><surname>Chappell</surname><given-names>P</given-names> </name><name name-style="western"><surname>Snyder</surname><given-names>PJ</given-names> </name></person-group><article-title>Voice acoustical measurement of the severity of major depression</article-title><source>Brain Cogn</source><year>2004</year><month>10</month><volume>56</volume><issue>1</issue><fpage>30</fpage><lpage>35</lpage><pub-id pub-id-type="doi">10.1016/j.bandc.2004.05.003</pub-id><pub-id pub-id-type="medline">15380873</pub-id></nlm-citation></ref><ref id="ref72"><label>72</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sundelin</surname><given-names>T</given-names> </name><name name-style="western"><surname>Lekander</surname><given-names>M</given-names> </name><name name-style="western"><surname>Kecklund</surname><given-names>G</given-names> </name><name name-style="western"><surname>Van Someren</surname><given-names>EJ</given-names> </name><name name-style="western"><surname>Olsson</surname><given-names>A</given-names> </name><name name-style="western"><surname>Axelsson</surname><given-names>J</given-names> </name></person-group><article-title>Cues of fatigue: effects of sleep deprivation on facial appearance</article-title><source>Sleep</source><year>2013</year><month>09</month><day>1</day><volume>36</volume><issue>9</issue><fpage>1355</fpage><lpage>1360</lpage><pub-id pub-id-type="doi">10.5665/sleep.2964</pub-id><pub-id pub-id-type="medline">23997369</pub-id></nlm-citation></ref><ref id="ref73"><label>73</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zargari Marandi</surname><given-names>R</given-names> </name><name name-style="western"><surname>Madeleine</surname><given-names>P</given-names> </name><name name-style="western"><surname>Omland</surname><given-names>&#x00D8;</given-names> </name><name name-style="western"><surname>Vuillerme</surname><given-names>N</given-names> </name><name name-style="western"><surname>Samani</surname><given-names>A</given-names> </name></person-group><article-title>Eye movement characteristics reflected fatigue development in both young and elderly individuals</article-title><source>Sci Rep</source><year>2018</year><month>09</month><day>3</day><volume>8</volume><issue>1</issue><fpage>13148</fpage><pub-id pub-id-type="doi">10.1038/s41598-018-31577-1</pub-id><pub-id pub-id-type="medline">30177693</pub-id></nlm-citation></ref><ref id="ref74"><label>74</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Stern</surname><given-names>JA</given-names> </name><name name-style="western"><surname>Boyer</surname><given-names>D</given-names> </name><name name-style="western"><surname>Schroeder</surname><given-names>D</given-names> </name></person-group><article-title>Blink rate: a possible measure of fatigue</article-title><source>Hum Factors</source><year>1994</year><month>06</month><volume>36</volume><issue>2</issue><fpage>285</fpage><lpage>297</lpage><pub-id pub-id-type="doi">10.1177/001872089403600209</pub-id><pub-id pub-id-type="medline">8070793</pub-id></nlm-citation></ref><ref id="ref75"><label>75</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yin</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>B</given-names> </name><name name-style="western"><surname>Hao</surname><given-names>D</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>L</given-names> </name><name name-style="western"><surname>Feng</surname><given-names>Y</given-names> </name></person-group><article-title>Evaluation of VDT-induced visual fatigue by automatic detection of blink features</article-title><source>Sensors (Basel)</source><year>2022</year><month>01</month><day>25</day><volume>22</volume><issue>3</issue><fpage>916</fpage><pub-id pub-id-type="doi">10.3390/s22030916</pub-id><pub-id pub-id-type="medline">35161662</pub-id></nlm-citation></ref><ref id="ref76"><label>76</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Hasan</surname><given-names>F</given-names> </name><name name-style="western"><surname>Kashevnik</surname><given-names>A</given-names> </name></person-group><article-title>State-of-the-art analysis of modern drowsiness detection algorithms based on computer vision</article-title><conf-name>29th Conference of Open Innovations Association (FRUCT)</conf-name><conf-date>May 12-14, 2021</conf-date><pub-id pub-id-type="doi">10.23919/FRUCT52173.2021.9435480</pub-id></nlm-citation></ref><ref id="ref77"><label>77</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yamada</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Kobayashi</surname><given-names>M</given-names> </name></person-group><article-title>Detecting mental fatigue from eye-tracking data gathered while watching video: evaluation in younger and older adults</article-title><source>Artif Intell Med</source><year>2018</year><month>09</month><volume>91</volume><fpage>39</fpage><lpage>48</lpage><pub-id pub-id-type="doi">10.1016/j.artmed.2018.06.005</pub-id><pub-id pub-id-type="medline">30026049</pub-id></nlm-citation></ref><ref id="ref78"><label>78</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Guggisberg</surname><given-names>AG</given-names> </name><name name-style="western"><surname>Mathis</surname><given-names>J</given-names> </name><name name-style="western"><surname>Schnider</surname><given-names>A</given-names> </name><name name-style="western"><surname>Hess</surname><given-names>CW</given-names> </name></person-group><article-title>Why do we yawn?</article-title><source>Neurosci Biobehav Rev</source><year>2010</year><month>07</month><volume>34</volume><issue>8</issue><fpage>1267</fpage><lpage>1276</lpage><pub-id pub-id-type="doi">10.1016/j.neubiorev.2010.03.008</pub-id><pub-id pub-id-type="medline">20382180</pub-id></nlm-citation></ref><ref id="ref79"><label>79</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Krajewski</surname><given-names>J</given-names> </name><name name-style="western"><surname>Trutschel</surname><given-names>U</given-names> </name><name name-style="western"><surname>Golz</surname><given-names>M</given-names> </name><name name-style="western"><surname>Sommer</surname><given-names>D</given-names> </name><name name-style="western"><surname>Edwards</surname><given-names>D</given-names> </name></person-group><article-title>Estimating fatigue from predetermined speech samples transmitted by operator communication systems</article-title><conf-name>Fifth International Driving Symposium on Human Factors in Driver Assessment, Training and Vehicle Design</conf-name><conf-date>Jun 22-25, 2009</conf-date><pub-id pub-id-type="doi">10.17077/drivingassessment.1359</pub-id></nlm-citation></ref><ref id="ref80"><label>80</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>de Vasconcelos</surname><given-names>CA</given-names> </name><name name-style="western"><surname>Vieira</surname><given-names>MN</given-names> </name><name name-style="western"><surname>Kecklund</surname><given-names>G</given-names> </name><name name-style="western"><surname>Yehia</surname><given-names>HC</given-names> </name></person-group><article-title>Speech analysis for fatigue and sleepiness detection of a pilot</article-title><source>Aerosp Med Hum Perform</source><year>2019</year><month>04</month><day>1</day><volume>90</volume><issue>4</issue><fpage>415</fpage><lpage>418</lpage><pub-id pub-id-type="doi">10.3357/AMHP.5134.2019</pub-id><pub-id pub-id-type="medline">30922431</pub-id></nlm-citation></ref><ref id="ref81"><label>81</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Magliacano</surname><given-names>A</given-names> </name><name name-style="western"><surname>Fiorenza</surname><given-names>S</given-names> </name><name name-style="western"><surname>Estraneo</surname><given-names>A</given-names> </name><name name-style="western"><surname>Trojano</surname><given-names>L</given-names> </name></person-group><article-title>Eye blink rate increases as a function of cognitive load during an auditory oddball paradigm</article-title><source>Neurosci Lett</source><year>2020</year><month>09</month><day>25</day><volume>736</volume><fpage>135293</fpage><pub-id pub-id-type="doi">10.1016/j.neulet.2020.135293</pub-id><pub-id pub-id-type="medline">32771601</pub-id></nlm-citation></ref><ref id="ref82"><label>82</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fischer</surname><given-names>B</given-names> </name><name name-style="western"><surname>Breitmeyer</surname><given-names>B</given-names> </name></person-group><article-title>Mechanisms of visual attention revealed by saccadic eye movements</article-title><source>Neuropsychologia</source><year>1987</year><volume>25</volume><issue>1A</issue><fpage>73</fpage><lpage>83</lpage><pub-id pub-id-type="doi">10.1016/0028-3932(87)90044-3</pub-id><pub-id pub-id-type="medline">3574652</pub-id></nlm-citation></ref><ref id="ref83"><label>83</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bloch</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Aviram</surname><given-names>S</given-names> </name><name name-style="western"><surname>Neeman</surname><given-names>R</given-names> </name><etal/></person-group><article-title>Methylphenidate mediated change in prosody is specific to the performance of a cognitive task in female adult ADHD patients</article-title><source>World J Biol Psychiatry</source><year>2015</year><volume>16</volume><issue>8</issue><fpage>635</fpage><lpage>639</lpage><pub-id pub-id-type="doi">10.3109/15622975.2015.1036115</pub-id><pub-id pub-id-type="medline">25945954</pub-id></nlm-citation></ref><ref id="ref84"><label>84</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Holmlund</surname><given-names>TB</given-names> </name><name name-style="western"><surname>Cohen</surname><given-names>AS</given-names> </name><name name-style="western"><surname>Cheng</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Using automated speech processing for repeated measurements in a clinical setting of the behavioral variability in the stroop task</article-title><source>Brain Sci</source><year>2023</year><month>03</month><day>4</day><volume>13</volume><issue>3</issue><fpage>442</fpage><pub-id pub-id-type="doi">10.3390/brainsci13030442</pub-id><pub-id pub-id-type="medline">36979252</pub-id></nlm-citation></ref><ref id="ref85"><label>85</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Haltaufderheide</surname><given-names>J</given-names> </name><name name-style="western"><surname>Ranisch</surname><given-names>R</given-names> </name></person-group><article-title>The ethics of ChatGPT in medicine and healthcare: a systematic review on Large Language Models (LLMs)</article-title><source>NPJ Digit Med</source><year>2024</year><month>07</month><day>8</day><volume>7</volume><issue>1</issue><fpage>183</fpage><pub-id pub-id-type="doi">10.1038/s41746-024-01157-x</pub-id><pub-id pub-id-type="medline">38977771</pub-id></nlm-citation></ref><ref id="ref86"><label>86</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tartaglia</surname><given-names>J</given-names> </name><name name-style="western"><surname>Jaghab</surname><given-names>B</given-names> </name><name name-style="western"><surname>Ismail</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Assessing health technology literacy and attitudes of patients in an urban outpatient psychiatry clinic: cross-sectional survey study</article-title><source>JMIR Ment Health</source><year>2024</year><month>12</month><day>30</day><volume>11</volume><fpage>e63034</fpage><pub-id pub-id-type="doi">10.2196/63034</pub-id><pub-id pub-id-type="medline">39753220</pub-id></nlm-citation></ref><ref id="ref87"><label>87</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sobolev</surname><given-names>M</given-names> </name><name name-style="western"><surname>Vitale</surname><given-names>R</given-names> </name><name name-style="western"><surname>Wen</surname><given-names>H</given-names> </name><etal/></person-group><article-title>The Digital Marshmallow Test (DMT) diagnostic and monitoring mobile health app for impulsive behavior: development and validation study</article-title><source>JMIR Mhealth Uhealth</source><year>2021</year><month>01</month><day>22</day><volume>9</volume><issue>1</issue><fpage>e25018</fpage><pub-id pub-id-type="doi">10.2196/25018</pub-id><pub-id pub-id-type="medline">33480854</pub-id></nlm-citation></ref><ref id="ref88"><label>88</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Radanliev</surname><given-names>P</given-names> </name><name name-style="western"><surname>Santos</surname><given-names>O</given-names> </name><name name-style="western"><surname>Brandon-Jones</surname><given-names>A</given-names> </name><name name-style="western"><surname>Joinson</surname><given-names>A</given-names> </name></person-group><article-title>Ethics and responsible AI deployment</article-title><source>Front Artif Intell</source><year>2024</year><volume>7</volume><fpage>1377011</fpage><pub-id pub-id-type="doi">10.3389/frai.2024.1377011</pub-id><pub-id pub-id-type="medline">38601110</pub-id></nlm-citation></ref><ref id="ref89"><label>89</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Trocin</surname><given-names>C</given-names> </name><name name-style="western"><surname>Mikalef</surname><given-names>P</given-names> </name><name name-style="western"><surname>Papamitsiou</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Conboy</surname><given-names>K</given-names> </name></person-group><article-title>Responsible AI for digital health: a synthesis and a research agenda</article-title><source>Inf Syst Front</source><year>2023</year><month>12</month><volume>25</volume><issue>6</issue><fpage>2139</fpage><lpage>2157</lpage><pub-id pub-id-type="doi">10.1007/s10796-021-10146-4</pub-id></nlm-citation></ref><ref id="ref90"><label>90</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chaturvedi</surname><given-names>RR</given-names> </name><name name-style="western"><surname>Angrisani</surname><given-names>M</given-names> </name><name name-style="western"><surname>Troxel</surname><given-names>WM</given-names> </name><etal/></person-group><article-title>American Life in Realtime: benchmark, publicly available person-generated health data for equity in precision health</article-title><source>PNAS Nexus</source><year>2025</year><month>10</month><volume>4</volume><issue>10</issue><fpage>pgaf295</fpage><pub-id pub-id-type="doi">10.1093/pnasnexus/pgaf295</pub-id><pub-id pub-id-type="medline">41064239</pub-id></nlm-citation></ref><ref id="ref91"><label>91</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wen</surname><given-names>H</given-names> </name><name name-style="western"><surname>Sobolev</surname><given-names>M</given-names> </name><name name-style="western"><surname>Vitale</surname><given-names>R</given-names> </name><etal/></person-group><article-title>mPulse mobile sensing model for passive detection of impulsive behavior: exploratory prediction study</article-title><source>JMIR Ment Health</source><year>2021</year><month>01</month><day>27</day><volume>8</volume><issue>1</issue><fpage>e25019</fpage><pub-id pub-id-type="doi">10.2196/25019</pub-id><pub-id pub-id-type="medline">33502330</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Feature list.</p><media xlink:href="formative_v10i1e87054_app1.docx" xlink:title="DOCX File, 12 KB"/></supplementary-material><supplementary-material id="app2"><label>Multimedia Appendix 2</label><p>Survey responses.</p><media xlink:href="formative_v10i1e87054_app2.docx" xlink:title="DOCX File, 16 KB"/></supplementary-material></app-group></back></article>