<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Form Res</journal-id><journal-id journal-id-type="publisher-id">formative</journal-id><journal-id journal-id-type="index">27</journal-id><journal-title>JMIR Formative Research</journal-title><abbrev-journal-title>JMIR Form Res</abbrev-journal-title><issn pub-type="epub">2561-326X</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v10i1e83630</article-id><article-id pub-id-type="doi">10.2196/83630</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Using AI Chatbot to Assist Students&#x2019; Behavior Management for Obesity Prevention in Middle Schools: Feasibility Study</article-title></title-group><contrib-group><contrib contrib-type="author"><name name-style="western"><surname>Tan</surname><given-names>Qiaoyin</given-names></name><degrees>MS</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Nie</surname><given-names>Yuxin</given-names></name><degrees>MS</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Son</surname><given-names>Paul</given-names></name><degrees>MS</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Qian</surname><given-names>Yizhou</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Staiano</surname><given-names>Amanda E</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Wang</surname><given-names>Fahui</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Rosenkranz</surname><given-names>Richard R</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff4">4</xref></contrib><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Chen</surname><given-names>Senlin</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib></contrib-group><aff id="aff1"><institution>School of Kinesiology, Louisiana State University</institution><addr-line>2124 HPL Field House</addr-line><addr-line>Baton Rouge</addr-line><addr-line>LA</addr-line><country>United States</country></aff><aff id="aff2"><institution>Pennington Biomedical Research Center</institution><addr-line>Baton Rouge</addr-line><addr-line>LA</addr-line><country>United States</country></aff><aff id="aff3"><institution>Department of Geography &#x0026; Anthropology, Louisiana State University</institution><addr-line>Baton Rouge</addr-line><addr-line>LA</addr-line><country>United States</country></aff><aff id="aff4"><institution>Department of Kinesiology &#x0026; Nutrition Sciences, University of Nevada</institution><addr-line>Las Vegas</addr-line><addr-line>NV</addr-line><country>United States</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Schwartz</surname><given-names>Amy</given-names></name></contrib><contrib contrib-type="editor"><name name-style="western"><surname>Balcarras</surname><given-names>Matthew</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Devine</surname><given-names>Katie</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Khan</surname><given-names>Sohail Imran</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Senlin Chen, PhD, School of Kinesiology, Louisiana State University, 2124 HPL Field House, Baton Rouge, LA, 70803, United States, 1 (225) 5787995, 1 (225) 5783680; <email>senlinchen@lsu.edu</email></corresp></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>7</day><month>1</month><year>2026</year></pub-date><volume>10</volume><elocation-id>e83630</elocation-id><history><date date-type="received"><day>05</day><month>09</month><year>2025</year></date><date date-type="accepted"><day>02</day><month>12</month><year>2025</year></date></history><copyright-statement>&#x00A9; Qiaoyin Tan, Yuxin Nie, Paul Son, Yizhou Qian, Amanda E Staiano, Fahui Wang, Richard R Rosenkranz, Senlin Chen. Originally published in JMIR Formative Research (<ext-link ext-link-type="uri" xlink:href="https://formative.jmir.org">https://formative.jmir.org</ext-link>), 7.1.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Formative Research, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://formative.jmir.org">https://formative.jmir.org</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://formative.jmir.org/2026/1/e83630"/><abstract><sec><title>Background</title><p>Adolescent obesity remains a pressing public health challenge, particularly among socioeconomically disadvantaged populations. Artificial intelligence (AI) holds the promise for supporting students in managing daily health behaviors, but few existing studies used AI-based interventions in naturalistic settings such as schools.</p></sec><sec><title>Objective</title><p>This study evaluated the feasibility and preliminary impact of ProudMe Tech (Louisiana State University), an AI-assisted web app designed to help students manage 4 obesity-related behaviors: physical activity, screen time, diet, and sleep.</p></sec><sec sec-type="methods"><title>Methods</title><p>The 8-week, 1-arm pilot intervention study recruited 172 participants from 5 middle schools in Louisiana and used the ProudMe Tech to set behavior goals, track behaviors, record reflections, and receive AI-generated feedback. Both engagement (primary focus) and behavior impacts (secondary focus) were examined.</p></sec><sec sec-type="results"><title>Results</title><p>Engagement metrics indicated varying levels of usage, averaging 8.9 (SD 7.6) behavior entries and 30.0 (SD 28.3) reflections per student, and receiving 33.5 (SD 29.7) AI feedback messages. Overall, participants recorded 6164 valid daily goals, of which 3934 (63.8%) were achieved. Natural language processing of the reflections and AI feedback messages revealed an overall neutral to positive sentiment. Pre- to postcomparisons showed (1) a significant reduction in screen time from 4.3 (SD 2.6) to 3.4 (SD 2.5) hours per day (21.6% decrease; <italic>t</italic><sub>164</sub>=6.18, <italic>P</italic>&#x003C;.001), (2) a small but significant decrease in fruit and vegetable intake from 5.7 (SD 3.8) to 5.2 (SD 3.5) servings per day (8.9% decrease; <italic>t</italic><sub>169</sub>=2.27, <italic>P</italic>=.46), and (3) no significant changes in physical activity and sleep.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>These findings suggest that ProudMe Tech is a feasible AI chatbot that can engage adolescents in health behavior management, but more adaptation is needed to effectively elicit improvements in health behaviors and lower the obesity risk in middle school students.</p></sec></abstract><kwd-group><kwd>adolescence health</kwd><kwd>behavior intervention</kwd><kwd>large language models</kwd><kwd>school-based</kwd><kwd>mobile health</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>The prevalence of adolescent obesity is high and demands innovative interventions. 2024 national surveillance data show that more than 20% of American youth aged 10 to 14 years have obesity, and the prevalence is even higher among racial and ethnic minorities and those from socioeconomically disadvantaged households [<xref ref-type="bibr" rid="ref1">1</xref>]. Early adolescence marks a key developmental stage for the formation of healthy behavioral patterns, as youth have increased autonomy for physical activities, screen time, diet, and sleep&#x2014;4 influential health behaviors related to obesity [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref3">3</xref>]. If not addressed, unhealthy behavior habits and obesity may track into adulthood [<xref ref-type="bibr" rid="ref4">4</xref>]. Furthermore, existing behavior interventions for adolescent obesity prevention have shown limited effectiveness due to reasons such as low participation, poor personalization, and inadequate scalability [<xref ref-type="bibr" rid="ref5">5</xref>]. Researchers have increasingly turned to novel technologies to promote behavior changes to prevent obesity. Digital tools such as mobile apps, wearable trackers, virtual or augmented reality, and gamifications have shown promising feasibility and effect in promoting physical activity, healthy diet, and sleep hygiene [<xref ref-type="bibr" rid="ref6">6</xref>]. These technological innovations have permeated schools and homes and are naturally drawn to adolescents. Some have become important educational technologies in schools [<xref ref-type="bibr" rid="ref7">7</xref>], but most have limited capacity to support continuous self-adjustment or personalized learning at a low cost.</p><p>Large language model-powered artificial intelligence (AI) chatbots represent an emerging and scalable technological intervention for delivering cost-effective and personalized behavioral counseling for adolescents. AI chatbots can simulate natural conversations, respond to user input in real time, and customize messages according to the user&#x2019;s preferences, goals, and progress [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref9">9</xref>]. Early research results of adult health apps showed that compared with traditional manual guidance, AI chatbots enhanced participation and enthusiasm and improved health at a low cost [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref11">11</xref>]. However, little research used AI-based technology for health intervention in the younger populations (eg, for adolescent obesity prevention), especially in naturalistic settings such as schools. Rarely has prior research investigated the feasibility and impact of an AI chatbot in supporting adolescents&#x2019; management of health behaviors.</p><p>To fill the gap, this study aimed to evaluate ProudMe Tech<italic>,</italic> an AI chatbot that helps users (1) set specific, measurable, attainable, realistic, and timely (SMART) goals for physical activity, screen time, diet, and sleep. ProudMe Tech is part of a multicomponent intervention called ProudMe (<underline>Pr</underline>eventing <underline>O</underline>besity <underline>U</underline>sing <underline>D</underline>igital-assisted <underline>M</underline>ovement and <underline>E</underline>ating), which was adapted from the evidence-based SWITCH-MS intervention [<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref13">13</xref>]. The ProudMe intervention has 4 components, including (1) a 12-lesson health and physical education curriculum, (2) the simplified Smarter Lunchrooms Movement strategies, (3) professional development for teachers and staff, and (4) the ProudMe Tech. The design of ProudMe Tech is grounded in the Self-Determination Theory [<xref ref-type="bibr" rid="ref14">14</xref>] and the Self-Regulation Theory [<xref ref-type="bibr" rid="ref15">15</xref>] with the goal to foster users&#x2019; perceived competence, self-efficacy, and intrinsic motivation. When engaging with the ProudMe Tech, users receive informational and emotional social support from the AI as a virtual coach to improve their goal-setting, self-monitoring, and self-reflection skills to manage their health-related behaviors [<xref ref-type="bibr" rid="ref16">16</xref>]. ProudMe Tech implementation was unsuccessful in the first round of ProudMe intervention in 2024 for unexpected reasons in reality (eg, web link blocked by the district IT, challenges with user registration, and lack of teacher buy-in) [<xref ref-type="bibr" rid="ref13">13</xref>], but it had undergone significant refinement and beta testing. The purpose of this study was to field-test the integration of the refined ProudMe Tech into schools by evaluating its user engagement and impact on users&#x2019; health behaviors. The research team hypothesized that adolescents would actively engage with the ProudMe Tech platform and demonstrate favorable short-term changes in targeted health behaviors.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Setting and Participants</title><p>ProudMe Tech, as a component of the larger ProudMe intervention, was implemented in 5 public middle schools recruited within one school district from the Baton Rouge metropolitan area, Louisiana. These 5 schools primarily consisted of socioeconomically disadvantaged student populations, with a mean free and reduced-priced meal percentage of 50.4%. The ProudMe Tech intervention involved students in 6th grade, with the exception of a few 7th-grade classes. Because the ProudMe intervention was approved as an educational program, the participating teachers, at their discretion, were given authority to decide whether they would involve all students or just a selection of students. Of the 670 students taught by the participating teachers, 487 (72.7%) created a ProudMe Tech account. However, only 172 (35.3%) of those who created accounts returned parent and guardian consent forms and participated in the study. In addition, 3 students transferred to other schools during the study period, thus their partial data were removed from analysis.</p><p>ProudMe Tech was implemented in the spring semester of 2025. During the 8-week intervention period, participants accessed the ProudMe website via school-assigned laptop computers during their health and physical education classes and were expected to use the technology for at least 3 days per week. Their health and physical education teachers received standardized training at the beginning of the semester on how to use ProudMe Tech, and these teachers subsequently provided instruction for their students to create user accounts and engage with the AI chatbot. The process required limited teacher facilitation upon the initial instruction. In addition, the users were encouraged to use the technology by logging into the website beyond class time both in and out of school.</p><sec id="s2-1-1"><title>ProudMe Tech Website Overview&#x2003;</title><p>ProudMe Tech is a web-delivered platform designed to help middle school students manage 4 health behaviors, including physical activity, screen time, diet, and sleep. Each user is expected to create a user account, set up daily goals, report their behaviors, type in a written reflection on progress toward each goal, and receive personalized feedback from the AI-powered chatbot (GPT-4). These user data subsequently generate behavior charts (line chart and progress bar chart) which offer intuitive, color&#x2010;coded visualizations of user progress relative to their goals. Users can also access daily reports which provide a calendar&#x2010;based summary of each day&#x2019;s logged activities and reflections. Below provides an outline of the app interface and computer science implementation.</p><sec id="s2-1-1-1"><title>App Interface</title><p>As shown in <xref ref-type="fig" rid="figure1">Figure 1</xref>, on ProudMe Tech, users were instructed to create their own specific, measurable, attainable, realistic, and timely (SMART) goals and consider these principles when reflecting on goal attainment. In the first week of ProudMe Tech engagement, trained teachers taught the users of SMART goal setting and provided a demo on how to use the website app. For physical activity, users can select their activity type (eg, walking, running, basketball, and football) from a dropdown list, activity intensity (ie, strenuous, moderate, and mild), and activity volume (in hours and minutes). Screen time is categorized as academic (eg, online class and homework) and nonacademic (eg, gaming and social media) screen time. Users are prompted to select the type and amount for both goals and their actual behaviors. For diet, users tap &#x201C;Add Fruit/Vegetable,&#x201D; choose specific items (eg, apple and spinach) from the menu, and enter the number of servings consumed. For items not listed by default, users can manually add them. For sleep, users report their bedtime and wakeup time for both goals and actual behaviors (eg, &#x201C;10:30&#x202F;PM-6:45&#x202F;AM&#x201D;), and the system instantly computes sleep duration for users to view results prior to reflection. AI feedback was generated using a rule-based engine that matches logged behaviors to goals and delivers one of several tone-specific messages (eg, praise and suggestion).</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>ProudMe journal.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v10i1e83630_fig01.png"/></fig></sec><sec id="s2-1-1-2"><title>Computer Science Implementation</title><p>The ProudMe Tech web app was developed using a full-stack architecture: React.js (front-end), Node.js and Express.js (back-end), and MongoDB for secure data storage. It was hosted via Render Cloud Platform and deployed using an agile development approach. The app has built-in timestamps and dropdown menus for selecting activity types, entering durations, and categorizing screen time. Back-end logs automatically track every goal, actual behavior, reflection, and AI-generated feedback message. The app has incorporated automated routines that calculate counts and timestamps directly from those logs. In addition, the platform consists of a rule-based AI chatbot module that generates and delivers feedback prompts. The research team built a simple feedback service in Node.js and Express that receives each goal or reflection a user submits. The service pulls that user&#x2019;s latest data from MongoDB and then uses our OpenAI API (Application Programming Interface) key (kept in an environment variable) with the OpenAI package to call GPT4 for context-aware feedback. Key model parameters were specified to ensure reproducibility: temperature=0.9, max_tokens=75, and response_format = &#x201C;json/text.&#x201D; The system runs each response through OpenAI&#x2019;s Moderation API to filter out any inappropriate content, then saves the full conversation plus a 0&#x2010;2 keyword score in our database. Finally, the system pushes real-time feedback to users via <italic>Socket.io</italic> while implementing request throttling through the bottleneck library to ensure optimal performance, security, and compliant API usage.</p></sec></sec></sec><sec id="s2-2"><title>Instrumentation</title><sec id="s2-2-1"><title>Behavior Goal, Actual Behavior, Behavior and Goal Ratio, and Behavioral Impact</title><p>Behavior goals are defined as the specific behavioral targets each student sets for themselves. Participants were prompted to enter a numeric value into the ProudMe Tech app&#x2019;s &#x201C;Goal&#x201D; field (eg, &#x201C;30min Tennis&#x201D; to indicate 30 minutes of playing tennis) for each of the 4 targeted behaviors: physical activity, screen time, diet, and sleep. No default goals were provided, and students set their goals independently without preset guidance from the system. These user-set goals served as the criteria against which actual behavior was compared. Actual behavior captures what the users self-reported for each of the 4 behaviors. Similar to behavior goals, participants were prompted to self-report the type and amount of the health behaviors on the same day (eg, &#x201C;45min Tennis&#x201D; for 45 minutes of playing tennis). To quantify goal attainment, the research team calculated the behavior and goal ratio variable for each behavior entry recorded in the database (ratio=1 means that the goal is met; ratio &#x003E;1 means that the goal is exceeded; ratio &#x003C;1 means that the goal is not achieved). Finally and importantly, the research team extracted a subset of complete baseline and end point behavior entries to determine the pre-to-post behavioral impact of the ProudMe Tech.</p></sec><sec id="s2-2-2"><title>User Reflection</title><p>On the app, for each behavior entry, participants were prompted to write a brief reflection on their goal attainment and how they felt about their progress. Although the raw reflections are qualitative text, the research team derived 3 quantitative measures from them: total number of reflection entries from baseline to end point, length of each entry in characters, and the timestamp of submission. These reflection data were extracted from the ProudMe Tech database without transformation. In addition, sentiment of user reflection was obtained through a sentiment analysis [<xref ref-type="bibr" rid="ref17">17</xref>] of the users&#x2019; reflections upon their daily goals and behaviors (after viewing the feedback received from the AI chatbot). Sentiment analysis enabled us to characterize the user reflections.</p></sec><sec id="s2-2-3"><title>Feedback</title><p>Feedback denotes the AI-generated feedback messages pushed to users in response to their behavior entries. The system prompts that the research team designed explicitly instructed the AI chatbot to generate tone-specific feedback according to users&#x2019; goal achievement percentages for each behavior (eg, praise for meeting goals, educational tips for partial completion, and constructive suggestions for low completion). This design ensures consistent and reproducible feedback generation across users, while also eliciting meaningful and useful feedback for the users&#x2019; behavior management. Users&#x2019; engagement data were recorded automatically in the system, which was processed in real time and then later classified by mood through sentiment analysis. Feedback was quantified using the total number and length of messages delivered to each user.</p></sec></sec><sec id="s2-3"><title>Data Collection</title><p>Prior to data collection, the researchers informed the participants of the research purposes, procedures, benefits, and harms of the project, as well as their rights to participate in, decline, or withdraw from the study. Trained researchers handed out the consent and assent forms to the students, who were instructed to bring home the forms and seek parental consent. The same consent and assent forms were also shared virtually to parents and students by the teachers through the Google Classroom platform and email. During the 8-week school intervention, all data were automatically saved on the ProudMe Tech database instantaneously upon user entries. The participants were initially instructed to create accounts and then engage with the technology. They recorded their behavior goals, actual behaviors, reflections, and called for AI chatbot feedback. Data were extracted from the back-end database for processing and analysis.</p></sec><sec id="s2-4"><title>Data Reduction</title><p>Based on descriptive scatterplots and pre-post summaries, the authors removed extreme observations as outliers that could distort visual scales. For each domain, the 1st and 99th quantiles of the behavior and goal ratio were computed on the pooled baseline-plus-end point data. Physical activity and screen time limited to 0&#x2010;12 hours, fruits and vegetables limited to 0&#x2010;20 servings, and sleep limited to 3&#x2010;12 hours. Observations outside this range were tagged as outliers and excluded. By design, the trimming retained &#x2248;98% of the records. Screen time goals were evaluated using reverse logic, where goals were considered achieved when actual screen use was less than or equal to the preset limit. Missing or zero goals were automatically excluded from the denominator to ensure that only meaningful goal-behavior pairs contributed to the analysis.</p></sec><sec id="s2-5"><title>Data Analyses</title><p>The authors first conducted descriptive analyses of behavior entries (mean, median, SD, maximum, minimum, frequencies, <italic>n</italic>, percentage, etc), including behavior goals, actual behaviors, behavior and goal ratio, users&#x2019; reflections, and feedback. The authors next analyzed the number and percentages of behavior goals met based on the goal and behavior ratio data and then visualized the goal and behavior ratio results by behavior using scatterplots on <italic>ggplot2</italic>. To determine the preliminary behavioral impact of the ProudMe Tech, the authors calculated the changes in behaviors from baseline to end point using absolute (&#x0394;) and relative (&#x0394;%) change scores:</p><disp-formula id="E1"><mml:math id="eqn1"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mi mathvariant="normal">&#x0394;</mml:mi><mml:mo>=</mml:mo><mml:msub><mml:mi>M</mml:mi><mml:mrow><mml:mtext>end point</mml:mtext></mml:mrow></mml:msub><mml:mo>&#x2212;</mml:mo><mml:msub><mml:mi>M</mml:mi><mml:mrow><mml:mtext>baseline</mml:mtext></mml:mrow></mml:msub></mml:mstyle></mml:mrow></mml:mstyle></mml:math></disp-formula><disp-formula id="E2"><mml:math id="eqn2"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mi mathvariant="normal">&#x0394;</mml:mi><mml:mi mathvariant="normal">%</mml:mi><mml:mo>=</mml:mo><mml:mn>100</mml:mn><mml:mo>&#x00D7;</mml:mo><mml:mi mathvariant="normal">&#x0394;</mml:mi><mml:mrow><mml:mo>/</mml:mo></mml:mrow><mml:msub><mml:mi>M</mml:mi><mml:mrow><mml:mtext>baseline</mml:mtext></mml:mrow></mml:msub></mml:mstyle></mml:mrow></mml:mstyle></mml:math></disp-formula><p>To evaluate user engagement with the ProudMe Tech platform, 2 distinct sets of user-generated text data were analyzed using natural language processing: user reflection and feedback. Each dataset underwent tailored analytic procedures to reflect its unique linguistic and functional characteristics. All the reflection and feedback data in the ProudMe Tech were analyzed using the <italic>sentimentr</italic> package (v2.9.0) [<xref ref-type="bibr" rid="ref17">17</xref>] in R (R Core Team). This lexicon-based tool performs sentence-level sentiment scoring by identifying affective vocabulary, negations (eg, &#x201C;not&#x201D;), and intensifiers (eg, &#x201C;very&#x201D;) within the context of grammatical structure. Each sentence received a sentimental score ranging from &#x2212;1 to +1, where higher values indicated a more positive sentimental tone. Scores were aggregated to compute a mean sentiment score per user, reflecting their overall sentimental valence [<xref ref-type="bibr" rid="ref18">18</xref>]. This approach provides an interpretable index of the sentimental tone in users&#x2019; reflections and AI&#x2019;s feedback. The lexicon-based sentiment analysis methods (such as those implemented in the <italic>sentimentr</italic> package) have been increasingly used and reported in health communication and psychological text analysis contexts, supporting their interpretability and reliability in short, user-generated reflections [<xref ref-type="bibr" rid="ref19">19</xref>].</p><p>To categorize the types of feedback received from AI, the authors used a manual, rule-based keyword procedure implemented in R (<italic>mutate(), str_detect() from stringr</italic>) to map each message to one of 4 categories: positive reinforcement (eg, &#x201C;great/awesome/congratulations/nice/excellent/cool&#x201D;), constructive suggestion (eg, &#x201C;try/consider/you could/might want to&#x201D;), educational prompt (eg, &#x201C;helps to/improves/ is important for/ is linked to/ reduces/ promotes/is key for/research shows that&#x201D;), or other (no match).</p><p>To determine the preliminary impact of the ProudMe engagement on health behaviors, the authors conducted paired-samples <italic>t</italic> tests to compare baseline and end point outcomes across each of the 4 health behaviors.</p></sec><sec id="s2-6"><title>Ethical Considerations</title><p>This study was conducted in accordance with the Declaration of Helsinki and was approved by the Louisiana State University Institutional Review Board (IRB; protocol code IRBAM-21&#x2010;0702). Written informed consent was obtained from parents or legal guardians, and assent was obtained from all participating students. All collected data were deidentified prior to analysis and securely stored on encrypted, password-protected servers accessible only to the research team. Participants received a gift card as compensation for their participation.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><p>Across the intervention, the authors captured 6164 goal-behavior pairings (1541 per domain), 5192 reflections, and 5804 AI chatbot feedback messages. A total of 172 participants recorded an average of 8.9 entries (mean 8.9, SD 7.6), with a median of 6 entries. About 53% (92 users) met or exceeded the median. Of the 6164 valid daily goals, 3934 (63.8%) were achieved based on their self-report. <xref ref-type="table" rid="table1">Table 1</xref> presents the demographic characteristics of the participants, most of whom were 12 years old (129/172, 75%) in 6th grade (162/172, 94.7%) with slightly more girls (96/172, 55.8%) than boys (76/172, 44.2%).</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Sample distribution of age, sex, and grade level.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Characteristics</td><td align="left" valign="bottom">Participants (N=172)</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="2">Age (years), n (%)</td></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>11</td><td align="char" char="." valign="top">22 (12.8)</td></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>12</td><td align="char" char="." valign="top">129 (75.0)</td></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>13</td><td align="char" char="." valign="top">18 (10.5)</td></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>14</td><td align="char" char="." valign="top">2 (1.2)</td></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>15</td><td align="char" char="." valign="top">1 (0.6)</td></tr><tr><td align="left" valign="top" colspan="2">Sex, n (%)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Female</td><td align="char" char="." valign="top">96 (55.8)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Male</td><td align="char" char="." valign="top">76 (44.2)</td></tr><tr><td align="left" valign="top" colspan="2">Grade level, n (%)</td></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>6th</td><td align="char" char="." valign="top">162 (94.2)</td></tr><tr><td align="char" char="." valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>7th</td><td align="char" char="." valign="top">10 (5.8)</td></tr></tbody></table></table-wrap><p><xref ref-type="table" rid="table2">Table 2</xref> summarizes the behavior and goal ratios across domains, showing that users on average slightly exceeded their targets. Variability was highest for screen time and physical activity and lowest for diet. <xref ref-type="fig" rid="figure2">Figure 2</xref> presents the trimmed scatterplot distributions of the behavior and goal ratios for each behavior domain. Each dot represents a record of a user. The horizontal axis represents participant identifiers and the vertical axis represents the ratio of the user&#x2019;s actual completion of the behavior to the set goal. The purple reference line indicates the behavior and goal ratio being 1 (goal is met but not exceeded). For physical activity, most of the observations were distributed around 1 (indicating most goals were met), some of them more than 2, indicating that some users reported abnormally high physical activity relative to their goal. For screen time (reverse), most observations were below 1, indicating that the actual screen time of users was generally lower than or close to the target value, and the goal completion was high. For diet, the observations were concentrated between 1 and 3, indicating that many users exceeded their set goals in diet (eg, eating more fruits and vegetables than their self-selected goals). For sleep, the dataset was concentrated and close to 1, indicating that the sleep behavior of most users was consistent with their goals. A correlation analysis indicated no statistically significant association between engagement frequency and self-reported goal completion rate (<italic>r</italic>=0.12, 95% CI &#x2013;0.03, 0.26; <italic>P</italic>=.12).</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Descriptive results of the behavior and goal ratio.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Behaviors</td><td align="left" valign="bottom">n</td><td align="left" valign="bottom">Ratio, mean (SD)</td><td align="left" valign="bottom">Min-Max</td></tr></thead><tbody><tr><td align="left" valign="top">Physical activity</td><td align="char" char="." valign="top">1489</td><td align="left" valign="top">1.3 (1.2)</td><td align="left" valign="top">0-40</td></tr><tr><td align="left" valign="top">Screen time</td><td align="char" char="." valign="top">1447</td><td align="left" valign="top">1.3 (2.0)</td><td align="left" valign="top">0-60</td></tr><tr><td align="left" valign="top">Diet</td><td align="char" char="." valign="top">1471</td><td align="left" valign="top">1.1 (0.7)</td><td align="left" valign="top">0-10</td></tr><tr><td align="left" valign="top">Sleep</td><td align="char" char="." valign="top">1258</td><td align="left" valign="top">1.0 (0.2)</td><td align="left" valign="top">0-2.36</td></tr></tbody></table></table-wrap><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Behavior and goal ratios in 4 behaviors.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v10i1e83630_fig02.png"/></fig><p>Participants posted an average of 30 reflective notes (SD 28.3, range 1&#x2010;127; overall total of 5192), averaging roughly 10 words per entry (mean 9.9, SD 9.2). A density plot, <xref ref-type="fig" rid="figure3">Figure 3</xref>, was used to examine the distribution of average reflection word counts per user, here as a proxy for engagement. As shown in <xref ref-type="fig" rid="figure3">Figure 3</xref>, length ranged from completely blank submissions to a detailed reflection of 195 words, indicating that while most reflections were very brief, a small number of participants were considerably more elaborate and drove the wide variation in word counts (range 0&#x2010;195).</p><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Reflection word count density.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v10i1e83630_fig03.png"/></fig><p><xref ref-type="fig" rid="figure4">Figure 4</xref> plots each user&#x2019;s average reflection sentiment score (<italic>x</italic> axis) against their total number of reflection entries (<italic>y</italic> axis). Each red dot represents a user&#x2019;s combined data: most sentiment scores cluster around zero (neutral tone), ranging from approximately &#x2013;0.25 (more negative reflections) to +0.50 (more positive reflections), while total reflections span from 0 up to over 120 entries. The purple regression line, with its wide gray 95% confidence band, lies nearly flat, indicating a statistically nonsignificant association between how positively or negatively users wrote and how often they reflected.</p><fig position="float" id="figure4"><label>Figure 4.</label><caption><p>Average reflection sentiment versus total reflection.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v10i1e83630_fig04.png"/></fig><p><xref ref-type="table" rid="table3">Table 3</xref> presents a total of 5804 AI-generated feedback messages categorized into 4 tone types. &#x201C;Positive reinforcement&#x201D; dominated the chatbot output, accounting for 3493 messages (60.2%). This was followed by &#x201C;Educational prompts&#x201D; totaling 1595 (27.5%). Only 675 &#x201C;constructive suggestions&#x201D; (11.6%) and 41 &#x201C;other&#x201D; remarks (0.7%) were observed.</p><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Total frequency of feedback tone categories.</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Feedback category</td><td align="left" valign="bottom">Frequency (n)</td><td align="left" valign="bottom">Percentage (%)</td></tr></thead><tbody><tr><td align="left" valign="top">Positive reinforcement</td><td align="left" valign="top">3493</td><td align="left" valign="top">60.2</td></tr><tr><td align="left" valign="top">Educational prompt</td><td align="left" valign="top">1595</td><td align="left" valign="top">27.5</td></tr><tr><td align="left" valign="top">Constructive suggestion</td><td align="left" valign="top">675</td><td align="left" valign="top">11.6</td></tr><tr><td align="left" valign="top">Other</td><td align="left" valign="top">41</td><td align="left" valign="top">0.7</td></tr><tr><td align="left" valign="top">Total</td><td align="left" valign="top">5804</td><td align="left" valign="top">100.0</td></tr></tbody></table></table-wrap><p><xref ref-type="fig" rid="figure5">Figure 5</xref> depicts the distribution of feedback per user. &#x201C;Positive reinforcement&#x201D; was the most frequent category averaging 20.2 messages (SD 19.5) per user. &#x201C;Educational prompts&#x201D; was the second most frequent feedback type (mean 9.2, SD 8.8) followed by &#x201C;constructive suggestions&#x201D; (mean 3.9, SD 4.5,) and &#x201C;Other&#x201D; (mean 0.2, SD 0.5). Overall, the AI chatbot feedback skewed toward affirmation rather than instruction or corrective messaging.</p><fig position="float" id="figure5"><label>Figure 5.</label><caption><p>Distribution of feedback types per student.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v10i1e83630_fig05.png"/></fig><p><xref ref-type="fig" rid="figure6">Figure 6</xref> illustrates the relationship between the average sentiment scores of AI feedback and the total number of feedback received per user. Overall, most sentiment scores were above zero, indicating a generally neutral to mildly positive tone across the feedback messages. The association between the quantity of AI-generated feedback received and the average sentiment score of that feedback was statistically nonsignificant.</p><fig position="float" id="figure6"><label>Figure 6.</label><caption><p>Scatterplot of average feedback sentiment versus total feedback.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v10i1e83630_fig06.png"/></fig><p>In <xref ref-type="table" rid="table4">Table 4</xref>, results indicated that screen time decreased significantly from 4.3 (SD 2.6) hours at baseline to 3.4 (SD 2.5) hours at end point (<italic>t</italic><sub>164</sub>=6.18, <italic>P</italic>&#x003C;.001), reflecting a 21.6% reduction. Fruit and vegetable intake also declined significantly, from 5.7 (SD 3.8) to 5.2 (SD 3.5) servings (<italic>t</italic><sub>169</sub>=2.27, <italic>P</italic>=.02), an 8.9% decrease. In contrast, physical activity and sleep did not significantly change (<italic>P</italic>=.20).</p><table-wrap id="t4" position="float"><label>Table 4.</label><caption><p>Paired <italic>t</italic> test results of the health behaviors by time (baseline and end point).</p></caption><table id="table4" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Behavior</td><td align="left" valign="bottom">N</td><td align="left" valign="bottom">Baseline, mean (SD)</td><td align="left" valign="bottom">End point, mean (SD)</td><td align="left" valign="bottom">&#x0394; (absolute)</td><td align="left" valign="bottom">&#x0394; %</td><td align="left" valign="bottom"><italic>t</italic> test (df)</td><td align="left" valign="bottom"><italic>P</italic> value</td></tr></thead><tbody><tr><td align="left" valign="top">Physical activity (hours)</td><td align="left" valign="top">169</td><td align="left" valign="top">2.5 (2.3)</td><td align="left" valign="top">2.4 (2.3)</td><td align="left" valign="top">&#x2013;0.12</td><td align="left" valign="top">&#x2013;4.7</td><td align="left" valign="top">1.29 (168)</td><td align="left" valign="top">.20</td></tr><tr><td align="left" valign="top">Screen time (hours)</td><td align="left" valign="top">165</td><td align="left" valign="top">4.3 (2.6)</td><td align="left" valign="top">3.4 (2.5)</td><td align="left" valign="top">&#x2013;0.93</td><td align="left" valign="top">&#x2013;21.6</td><td align="left" valign="top">6.18 (164)</td><td align="left" valign="top">&#x003C;.001</td></tr><tr><td align="left" valign="top">Fruit and vegetables (servings)</td><td align="left" valign="top">169</td><td align="left" valign="top">5.7 (3.8)</td><td align="left" valign="top">5.2 (3.5)</td><td align="left" valign="top">&#x2013;0.50</td><td align="left" valign="top">&#x2013;8.9</td><td align="left" valign="top">2.27 (168)</td><td align="left" valign="top">.02</td></tr><tr><td align="left" valign="top">Sleep (hours)</td><td align="left" valign="top">157</td><td align="left" valign="top">7.6 (1.5)</td><td align="left" valign="top">7.7 (1.5)</td><td align="left" valign="top">+0.06</td><td align="left" valign="top">+0.8</td><td align="left" valign="top">&#x2013;0.74 (156)</td><td align="left" valign="top">.46</td></tr></tbody></table></table-wrap></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>The participants demonstrated sustained engagement with ProudMe Tech over the 8-week deployment, though the engagement metrics varied considerably between individuals. Results showed acceptable feasibility of the ProudMe Tech, showing an average of 8.9 behavioral entries, 30 reflections, 33.5 AI-generated chatbot feedback messages per participant, and 63.8% of the daily goals achieved. Compared to existing adolescent AI chatbots or digital health interventions (around 50% usage) [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref10">10</xref>], the usage and engagement metrics reported in this study were deemed acceptable. The users demonstrated a wide spectrum of engagement, with some users engaging with the platform daily, producing frequent and detailed reflections that prompted substantive feedback, while others participated sporadically, resulting in fewer and briefer interactions. Such variations in both quantity and quality of the users&#x2019; behavior entries, reflections, and AI-generated feedback appeared to be characteristic of adolescent engagement patterns as shown in similar prior digital health interventions, where individual motivation, access, and contextual factors play decisive roles [<xref ref-type="bibr" rid="ref20">20</xref>]. It is noteworthy that engagement frequency was not associated with self-reported goal completion rate in this feasibility study. This suggests that higher engagement alone may not necessarily lead to greater goal attainment, possibly due to the brief intervention duration and variability in user motivation. Future studies with larger samples and longer follow-up periods should further explore this relationship and the underlying reasons. Notably, our data records indicated that 72.7% (487) of the students from the participating schools created accounts and engaged with the ProudMe Tech platform, but only 35.3% (172) of them were participants (with signed parental consent and assent). This limited our data analysis to a subsample, which might have constrained the feasibility and impact of the ProudMe Tech interaction. The relatively low enrollment suggests the difficulty of obtaining parental consent for studies that occur at underresourced schools. Future research should address this barrier to attain a higher participation rate to improve feasibility and success.</p><p>Beyond the user engagement metrics, the nature and quality of engagement with ProudMe Tech are meaningful to mention. The participants achieved 63.8% of their goals, eliciting positive reinforcement and educational prompts from the AI chatbot (87.7%). Meeting personal goals and receiving meaningful feedback might have fostered the users&#x2019; perceived competence, self-efficacy, and functional social support through this AI chatbot platform [<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref22">22</xref>]. The combination of regular goal achievement and supportive feedback may have created a reinforcing loop where users have felt competent upon meeting goals and receiving affirmation, encouragement, and constructive suggestions from the chatbot. Previous studies have shown that such feedback structure, in conjunction with encouragement with actionable suggestions, can significantly increase behavior adoption and adherence in youth populations [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref23">23</xref>]. It is important to note that these feedback patterns reflect the programmed structure of the ProudMe chatbot rather than dynamic learning throughout the trial; the observed reinforcement and prompts were generated from a predefined rule-based system responding to user inputs.</p><p>Furthermore, our pre-post comparisons revealed measurable changes in 2 of the 4 health behaviors, underscoring the preliminary impact of the ProudMe Tech engagement on these behaviors. Of the 4 behaviors, screen time decreased by 0.9 hours per day (from 4.3 to 3.4 hours) and fruit and vegetable intake showed a significant decrease (8.9%), but the endpoint average score still exceeded the recommended 5 or more servings per day (ie, 5.18 servings). These changes align with prior evidence that digitally mediated self-monitoring, coupled with timely feedback, can quickly influence screen-related behaviors [<xref ref-type="bibr" rid="ref24">24</xref>]. The unexpected decrease in fruit and vegetable intake suggests possible ceiling effect, as these participants in the present study on average reported over 5 servings per day. Intervention to promote consumption of fruits and vegetables is important, as only 18% of children between 5 and 15 years of age meet this recommendation [<xref ref-type="bibr" rid="ref25">25</xref>]. It is also important to note that our baseline data were collected in early February and end point data in mid-May. Seasonal changes in daylight, weather, and the proximity to end-of-semester exams may have influenced students&#x2019; daily routines related to screen time and dietary intake (even sleep) behaviors independent of the intervention. These significant behavior changes in screen time suggest that these children might have been responsive to immediate environmental cues (eg, device reminders and goal prompts) and were able to make adjustments (eg, reduced recreational device use) [<xref ref-type="bibr" rid="ref26">26</xref>].</p><p>In contrast, physical activity and sleep did not show changes in light of the ProudMe Tech engagement. These results highlight the greater difficulty of modifying these 2 behaviors as the more ingrained, effort-dependent, chronic health behaviors within a short timeframe. One possible explanation is the lack of family engagement in this feasibility study. While adolescents may express interest in being more physically active, many of the daily choices around physical activity opportunities are heavily impacted by the home environment and parental influence. Without consistent parental support, for example, providing transportation to sports and recreational activities or reminding children to stick with sleep routines, children&#x2019;s capacity to make and sustain meaningful changes in these behaviors would be constrained [<xref ref-type="bibr" rid="ref27">27</xref>]. Shifting physical activity and sleep patterns often demand overcoming environmental (eg, access to facilities and food environments) and individual barriers (eg, time constraints and motivation) [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref29">29</xref>] and not all of these barriers were addressed in the ProudMe Tech app.</p><p>Despite the feasibility and impact of the ProudMe Tech intervention, this study has several limitations. First, the relatively small sample size per school and low parental consent rate might have limited the generalizability of the results. However, recruiting a large sample in an intervention study like this, from underresourced schools in particular, is difficult. Our research team used various strategies to recruit and enroll a large sample. Obtaining signed parental consent without coercion from low socioeconomic families (despite incentives) is a significant reality challenge. These said, it is typical for early-stage digital health feasibility trials conducted in real-world school settings to have smaller sample sizes than fully powered randomized controlled trials. Second, our behavioral assessment as well as students&#x2019; goal setting was self-reported in nature, which is susceptible to recall inaccuracies and social desirability bias [<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref31">31</xref>]. The young participants (~12 years old) in this study might have experienced difficulty in setting SMART goals, tracking their health behaviors, and engaging in in-depth self-reflections. Future studies should integrate objective measures (eg, accelerometers and Fitbit trackers) to more accurately capture health behaviors in this age group [<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref32">32</xref>]. Nevertheless, the ultimate purpose of engaging these adolescent users on the ProudMe Tech platform was to educate them about behavior management and provide them with a technology-assisted tool to practice these behavior management skills. Third, the study lacked a control group and was limited to an 8-week implementation period, restricting our ability to draw strong causal inferences about the long-term effectiveness of the intervention [<xref ref-type="bibr" rid="ref23">23</xref>]. Although having a control group with a longer intervention period to compare the behavior impact of the ProudMe Tech would be more preferable, this feasibility study was designed to primarily evaluate engagement, usability, and short-term behavioral change trends rather than efficacy. Future research with emphasis on efficacy or effectiveness of the intervention should adopt the randomized controlled trial designs [<xref ref-type="bibr" rid="ref23">23</xref>]. Fourth, our AI chatbots primarily delivered affirmation and educational prompts and offered relatively few constructive suggestions. Although affirmation and informational prompts can increase motivation and raise awareness, the limited presence of constructive suggestions may have reduced opportunities for adolescents to receive behavior-corrective feedback or actionable strategies [<xref ref-type="bibr" rid="ref18">18</xref>]. Therefore, refining the chatbot algorithm to balance educational information with more frequent constructive suggestions in the future could provide adolescents with not only motivational support but also practical guidance to strengthen self-regulation and long-term health outcomes [<xref ref-type="bibr" rid="ref23">23</xref>]. Finally, the ProudMe Tech should use strategies to educate and engage parents, because parental support plays a critical role in shaping adolescents&#x2019; dietary and physical activity behaviors and reinforcing behavior changes outside of school.</p><p>In summary, our findings suggest that our AI-assisted ProudMe Tech website app can be feasibly integrated into the middle school curriculum to engage adolescents in daily behavior management and elicit meaningful behavior shifts for screen time behaviors, but further adaptations are needed to improve physical activity and sleep behaviors. ProudMe Tech provides an example for low-cost, school-based, AI-assisted health behavior interventions.</p></sec><sec id="s4-2"><title>Conclusions</title><p>These findings suggest that ProudMe Tech is a feasible AI chatbot that can engage adolescents in health behavior management, but more adaptation is needed to effectively elicit improvements in health behaviors and lower the obesity risk in middle school students.</p></sec></sec></body><back><ack><p>We would like to thank the schools (ie, the district and schools&#x2019; administrators), cafeteria managers and crews, teachers, students, and families who participated in the ProudMe Tech project. We acknowledge the following former or current undergraduate and graduate students who contributed to various aspects of the project: Cali Gange, Calia Skinner, Alexandra K LeCompte, Trinity Fegan, Hallie Rivet, Kayla Drummond, Eduardo Howell, Sydney Young, Rachel Hunter, Virginia Bordelon, Katherine Christie, and Victoria Mire assisting with data collection; Bruce Quach, Sam Ashenafi, Sumit Dhungana, Will C. Canup, Hudson Liu, Ashish Kumar, Xin Li, David C. Shepherd for their contributions to technology development; Dino A. Pellisier, Madeline Yi, and Madison E. Lewis for designing art assets for the project; Dr. Stacy Imagbe, Dr. Weidong Li, Jared Smith, and Jordan Mathews for being involved in the intervention development. We further acknowledge Dr. Xin Li and Dr. Beibei Guo for collaborating in grants application. We used the general artificial intelligence (GenAI) tool ChatGPT (OpenAI) to assist with minor language editing (grammar and phrasing). All AI-assisted text was reviewed and revised by the authors to ensure accuracy and appropriateness.</p></ack><notes><sec><title>Funding</title><p>This research was funded by the Eunice Kennedy Shriver National Institute of Child Health and Human Development of the National Institutes of Health (1R15HD108765-01A1), the Louisiana Board of Regents Research Competitiveness Subprogram [LEQSF(2022&#x2010;24)-RD-A-03], and the Helen &#x201C;Bessie&#x201D; Silverberg Pliner Professorship.</p></sec><sec><title>Data Availability</title><p>The datasets generated and analyzed during this study are not publicly available due to confidentiality concerns and ethical restrictions. However, data may be made available from the corresponding author on reasonable request and after approval by the Institutional Review Board.</p></sec></notes><fn-group><fn fn-type="con"><p>Conceptualization: SC</p><p>Resources: RRR, AES, SC</p><p>Investigation: YN (equal), PS (equal), QT (equal), SC (equal)</p><p>Data curation: QT (lead), YQ (supporting), YN (supporting), PS (supporting), SC (supporting)</p><p>Formal analysis: QT (lead), SC (supporting)</p><p>Writing &#x2013; original draft: QT (equal), SC (equal)</p><p>Writing &#x2013; review &#x0026; editing: QT (equal), SC (equal), YN (supporting), PS (supporting), YQ (supporting), AES (supporting), FW (supporting)</p><p>Supervision: SC</p><p>Project administration: SC</p></fn><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">API</term><def><p>Application Programming Interface</p></def></def-item><def-item><term id="abb3">SMART</term><def><p>specific, measurable, attainable, realistic, and timely</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="web"><article-title>Childhood obesity facts</article-title><source>Centers for Disease Control and Prevention</source><year>2024</year><month>04</month><day>2</day><access-date>2025-07-25</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.cdc.gov/obesity/childhood-obesity-facts/childhood-obesity-facts.html">https://www.cdc.gov/obesity/childhood-obesity-facts/childhood-obesity-facts.html</ext-link></comment></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tonkin</surname><given-names>RS</given-names> </name></person-group><article-title>Early intervention with an adolescent twist</article-title><source>Paediatr Child Health</source><year>2001</year><month>09</month><volume>6</volume><issue>7</issue><fpage>424</fpage><lpage>428</lpage><pub-id pub-id-type="doi">10.1093/pch/6.7.424</pub-id><pub-id pub-id-type="medline">20107549</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Spring</surname><given-names>B</given-names> </name><name name-style="western"><surname>Moller</surname><given-names>AC</given-names> </name><name name-style="western"><surname>Coons</surname><given-names>MJ</given-names> </name></person-group><article-title>Multiple health behaviours: overview and implications</article-title><source>J Public Health (Oxf)</source><year>2012</year><month>03</month><volume>34 Suppl 1</volume><issue>Suppl 1</issue><fpage>i3</fpage><lpage>10</lpage><pub-id pub-id-type="doi">10.1093/pubmed/fdr111</pub-id><pub-id pub-id-type="medline">22363028</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Colizzi</surname><given-names>M</given-names> </name><name name-style="western"><surname>Lasalvia</surname><given-names>A</given-names> </name><name name-style="western"><surname>Ruggeri</surname><given-names>M</given-names> </name></person-group><article-title>Prevention and early intervention in youth mental health: is it time for a multidisciplinary and trans-diagnostic model for care?</article-title><source>Int J Ment Health Syst</source><year>2020</year><volume>14</volume><fpage>23</fpage><pub-id pub-id-type="doi">10.1186/s13033-020-00356-9</pub-id><pub-id pub-id-type="medline">32226481</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Welch</surname><given-names>V</given-names> </name><name name-style="western"><surname>Wy</surname><given-names>TJ</given-names> </name><name name-style="western"><surname>Ligezka</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Use of mobile and wearable artificial intelligence in child and adolescent psychiatry: scoping review</article-title><source>J Med Internet Res</source><year>2022</year><month>03</month><day>14</day><volume>24</volume><issue>3</issue><fpage>e33560</fpage><pub-id pub-id-type="doi">10.2196/33560</pub-id><pub-id pub-id-type="medline">35285812</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Canzone</surname><given-names>A</given-names> </name><name name-style="western"><surname>Belmonte</surname><given-names>G</given-names> </name><name name-style="western"><surname>Patti</surname><given-names>A</given-names> </name><etal/></person-group><article-title>The multiple uses of artificial intelligence in exercise programs: a narrative review</article-title><source>Front Public Health</source><year>2025</year><volume>13</volume><fpage>1510801</fpage><pub-id pub-id-type="doi">10.3389/fpubh.2025.1510801</pub-id><pub-id pub-id-type="medline">39957989</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lam</surname><given-names>C</given-names> </name><name name-style="western"><surname>Milne-Ives</surname><given-names>M</given-names> </name><name name-style="western"><surname>Harrington</surname><given-names>R</given-names> </name><etal/></person-group><article-title>Internet of things-enabled technologies as an intervention for childhood obesity: a systematic review</article-title><source>PLOS Digit Health</source><year>2022</year><month>04</month><volume>1</volume><issue>4</issue><fpage>e0000024</fpage><pub-id pub-id-type="doi">10.1371/journal.pdig.0000024</pub-id><pub-id pub-id-type="medline">36812526</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ta</surname><given-names>V</given-names> </name><name name-style="western"><surname>Griffith</surname><given-names>C</given-names> </name><name name-style="western"><surname>Boatfield</surname><given-names>C</given-names> </name><etal/></person-group><article-title>User experiences of social support from companion chatbots in everyday contexts: thematic analysis</article-title><source>J Med Internet Res</source><year>2020</year><month>03</month><day>6</day><volume>22</volume><issue>3</issue><fpage>e16235</fpage><pub-id pub-id-type="doi">10.2196/16235</pub-id><pub-id pub-id-type="medline">32141837</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Han</surname><given-names>R</given-names> </name><name name-style="western"><surname>Todd</surname><given-names>A</given-names> </name><name name-style="western"><surname>Wardak</surname><given-names>S</given-names> </name><name name-style="western"><surname>Partridge</surname><given-names>SR</given-names> </name><name name-style="western"><surname>Raeside</surname><given-names>R</given-names> </name></person-group><article-title>Feasibility and acceptability of chatbots for nutrition and physical activity health promotion among adolescents: systematic scoping review with adolescent consultation</article-title><source>JMIR Hum Factors</source><year>2023</year><month>05</month><day>5</day><volume>10</volume><fpage>e43227</fpage><pub-id pub-id-type="doi">10.2196/43227</pub-id><pub-id pub-id-type="medline">37145858</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>To</surname><given-names>QG</given-names> </name><name name-style="western"><surname>Green</surname><given-names>C</given-names> </name><name name-style="western"><surname>Vandelanotte</surname><given-names>C</given-names> </name></person-group><article-title>Feasibility, usability, and effectiveness of a machine learning-based physical activity chatbot: quasi-experimental study</article-title><source>JMIR Mhealth Uhealth</source><year>2021</year><month>11</month><day>26</day><volume>9</volume><issue>11</issue><fpage>e28577</fpage><pub-id pub-id-type="doi">10.2196/28577</pub-id><pub-id pub-id-type="medline">34842552</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hassoon</surname><given-names>A</given-names> </name><name name-style="western"><surname>Baig</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Naiman</surname><given-names>DQ</given-names> </name><etal/></person-group><article-title>Randomized trial of two artificial intelligence coaching interventions to increase physical activity in cancer survivors</article-title><source>NPJ Digit Med</source><year>2021</year><month>12</month><day>9</day><volume>4</volume><issue>1</issue><fpage>168</fpage><pub-id pub-id-type="doi">10.1038/s41746-021-00539-9</pub-id><pub-id pub-id-type="medline">34887491</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>S</given-names> </name><name name-style="western"><surname>Rosenkranz</surname><given-names>RR</given-names> </name><name name-style="western"><surname>McLoughlin</surname><given-names>GM</given-names> </name><etal/></person-group><article-title>Evaluating the implementation and effectiveness of the SWITCH-MS: an ecological, multi-component adolescent obesity prevention intervention</article-title><source>Int J Environ Res Public Health</source><year>2020</year><month>07</month><day>27</day><volume>17</volume><issue>15</issue><fpage>5401</fpage><pub-id pub-id-type="doi">10.3390/ijerph17155401</pub-id><pub-id pub-id-type="medline">32727086</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nie</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Tan</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Son</surname><given-names>P</given-names> </name><etal/></person-group><article-title>Testing an adapted obesity prevention intervention in under resourced schools: a pilot clustered randomized controlled trial</article-title><source>Sci Rep</source><year>2025</year><volume>15</volume><issue>1</issue><pub-id pub-id-type="doi">10.1038/s41598-025-16754-3</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ryan</surname><given-names>RM</given-names> </name><name name-style="western"><surname>Deci</surname><given-names>EL</given-names> </name></person-group><article-title>Self-determination theory and the facilitation of intrinsic motivation, social development, and well-being</article-title><source>Am Psychol</source><year>2000</year><month>01</month><volume>55</volume><issue>1</issue><fpage>68</fpage><lpage>78</lpage><pub-id pub-id-type="doi">10.1037//0003-066x.55.1.68</pub-id><pub-id pub-id-type="medline">11392867</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zimmerman</surname><given-names>BJ</given-names> </name></person-group><article-title>Academic studing and the development of personal skill: a self-regulatory perspective</article-title><source>Educ Psychol</source><year>1998</year><month>03</month><volume>33</volume><issue>2-3</issue><fpage>73</fpage><lpage>86</lpage><pub-id pub-id-type="doi">10.1080/00461520.1998.9653292</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fuglestad</surname><given-names>PT</given-names> </name><name name-style="western"><surname>Snyder</surname><given-names>M</given-names> </name></person-group><article-title>Status and the motivational foundations of self&#x2010;monitoring</article-title><source>Soc Personal Psychol Compass</source><year>2010</year><month>11</month><volume>4</volume><issue>11</issue><fpage>1031</fpage><lpage>1041</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://compass.onlinelibrary.wiley.com/toc/17519004/4/11">https://compass.onlinelibrary.wiley.com/toc/17519004/4/11</ext-link></comment><pub-id pub-id-type="doi">10.1111/j.1751-9004.2010.00311.x</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Rinker</surname><given-names>T</given-names> </name></person-group><article-title>Sentimentr: calculate text polarity sentiment</article-title><source>GitHub</source><year>2021</year><access-date>2025-07-25</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://github.com/trinker/sentimentr">https://github.com/trinker/sentimentr</ext-link></comment></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gore</surname><given-names>R</given-names> </name><name name-style="western"><surname>Safaee</surname><given-names>MM</given-names> </name><name name-style="western"><surname>Lynch</surname><given-names>CJ</given-names> </name><name name-style="western"><surname>Ames</surname><given-names>CP</given-names> </name></person-group><article-title>A spine-specific lexicon for the sentiment analysis of interviews with adult spinal deformity patients correlates with SF-36, SRS-22, and ODI scores: a pilot study of 25 patients</article-title><source>Information</source><year>2025</year><volume>16</volume><issue>2</issue><fpage>90</fpage><pub-id pub-id-type="doi">10.3390/info16020090</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Villanueva-Miranda</surname><given-names>I</given-names> </name><name name-style="western"><surname>Xie</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Xiao</surname><given-names>G</given-names> </name></person-group><article-title>Sentiment analysis in public health: a systematic review of the current state, challenges, and future directions</article-title><source>Front Public Health</source><year>2025</year><volume>13</volume><fpage>1609749</fpage><pub-id pub-id-type="doi">10.3389/fpubh.2025.1609749</pub-id><pub-id pub-id-type="medline">40620557</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nahum-Shani</surname><given-names>I</given-names> </name><name name-style="western"><surname>Shaw</surname><given-names>SD</given-names> </name><name name-style="western"><surname>Carpenter</surname><given-names>SM</given-names> </name><name name-style="western"><surname>Murphy</surname><given-names>SA</given-names> </name><name name-style="western"><surname>Yoon</surname><given-names>C</given-names> </name></person-group><article-title>Engagement in digital interventions</article-title><source>Am Psychol</source><year>2022</year><month>10</month><volume>77</volume><issue>7</issue><fpage>836</fpage><lpage>852</lpage><pub-id pub-id-type="doi">10.1037/amp0000983</pub-id><pub-id pub-id-type="medline">35298199</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Fuglestad</surname><given-names>PT</given-names> </name><name name-style="western"><surname>Snyder</surname><given-names>M</given-names> </name></person-group><article-title>Self-monitoring</article-title><source>Handbook of Individual Differences in Social Behavior</source><year>2009</year><publisher-name>Guilford Press</publisher-name><fpage>574</fpage><lpage>591</lpage><pub-id pub-id-type="other">9781606230719</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Keller</surname><given-names>MV</given-names> </name><name name-style="western"><surname>Dresel</surname><given-names>M</given-names> </name><name name-style="western"><surname>Daumiller</surname><given-names>M</given-names> </name></person-group><article-title>Do achievement goals and self-efficacy matter for feedback use?</article-title><source>Learn Instr</source><year>2024</year><month>10</month><volume>93</volume><fpage>101948</fpage><pub-id pub-id-type="doi">10.1016/j.learninstruc.2024.101948</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Aggarwal</surname><given-names>A</given-names> </name><name name-style="western"><surname>Tam</surname><given-names>CC</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>D</given-names> </name><name name-style="western"><surname>Li</surname><given-names>X</given-names> </name><name name-style="western"><surname>Qiao</surname><given-names>S</given-names> </name></person-group><article-title>Artificial intelligence-based chatbots for promoting health behavioral changes: systematic review</article-title><source>J Med Internet Res</source><year>2023</year><month>02</month><day>24</day><volume>25</volume><fpage>e40789</fpage><pub-id pub-id-type="doi">10.2196/40789</pub-id><pub-id pub-id-type="medline">36826990</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jones</surname><given-names>A</given-names> </name><name name-style="western"><surname>Armstrong</surname><given-names>B</given-names> </name><name name-style="western"><surname>Weaver</surname><given-names>RG</given-names> </name><name name-style="western"><surname>Parker</surname><given-names>H</given-names> </name><name name-style="western"><surname>von Klinggraeff</surname><given-names>L</given-names> </name><name name-style="western"><surname>Beets</surname><given-names>MW</given-names> </name></person-group><article-title>Identifying effective intervention strategies to reduce children&#x2019;s screen time: a systematic review and meta-analysis</article-title><source>Int J Behav Nutr Phys Act</source><year>2021</year><month>09</month><day>16</day><volume>18</volume><issue>1</issue><fpage>126</fpage><pub-id pub-id-type="doi">10.1186/s12966-021-01189-6</pub-id><pub-id pub-id-type="medline">34530867</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Scheelbeek</surname><given-names>P</given-names> </name><name name-style="western"><surname>Green</surname><given-names>R</given-names> </name><name name-style="western"><surname>Papier</surname><given-names>K</given-names> </name><etal/></person-group><article-title>Health impacts and environmental footprints of diets that meet the Eatwell Guide recommendations: analyses of multiple UK studies</article-title><source>BMJ Open</source><year>2020</year><month>08</month><day>26</day><volume>10</volume><issue>8</issue><fpage>e037554</fpage><pub-id pub-id-type="doi">10.1136/bmjopen-2020-037554</pub-id><pub-id pub-id-type="medline">32847945</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Schrempft</surname><given-names>S</given-names> </name><name name-style="western"><surname>Baysson</surname><given-names>H</given-names> </name><name name-style="western"><surname>Chessa</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Associations between bedtime media use and sleep outcomes in an adult population-based cohort</article-title><source>Sleep Med</source><year>2024</year><month>09</month><volume>121</volume><fpage>226</fpage><lpage>235</lpage><pub-id pub-id-type="doi">10.1016/j.sleep.2024.06.029</pub-id><pub-id pub-id-type="medline">39004013</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hosokawa</surname><given-names>R</given-names> </name><name name-style="western"><surname>Fujimoto</surname><given-names>M</given-names> </name><name name-style="western"><surname>Katsura</surname><given-names>T</given-names> </name></person-group><article-title>Parental support for physical activity and children&#x2019;s physical activities: a cross-sectional study</article-title><source>BMC Sports Sci Med Rehabil</source><year>2023</year><month>07</month><day>25</day><volume>15</volume><issue>1</issue><fpage>90</fpage><pub-id pub-id-type="doi">10.1186/s13102-023-00700-9</pub-id><pub-id pub-id-type="medline">37491297</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Seguin</surname><given-names>R</given-names> </name><name name-style="western"><surname>Connor</surname><given-names>L</given-names> </name><name name-style="western"><surname>Nelson</surname><given-names>M</given-names> </name><name name-style="western"><surname>LaCroix</surname><given-names>A</given-names> </name><name name-style="western"><surname>Eldridge</surname><given-names>G</given-names> </name></person-group><article-title>Understanding barriers and facilitators to healthy eating and active living in rural communities</article-title><source>J Nutr Metab</source><year>2014</year><volume>2014</volume><fpage>146502</fpage><pub-id pub-id-type="doi">10.1155/2014/146502</pub-id><pub-id pub-id-type="medline">25574386</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Deslippe</surname><given-names>AL</given-names> </name><name name-style="western"><surname>Soanes</surname><given-names>A</given-names> </name><name name-style="western"><surname>Bouchaud</surname><given-names>CC</given-names> </name><etal/></person-group><article-title>Barriers and facilitators to diet, physical activity and lifestyle behavior intervention adherence: a qualitative systematic review of the literature</article-title><source>Int J Behav Nutr Phys Act</source><year>2023</year><month>02</month><day>14</day><volume>20</volume><issue>1</issue><fpage>14</fpage><pub-id pub-id-type="doi">10.1186/s12966-023-01424-2</pub-id><pub-id pub-id-type="medline">36782207</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>de Moraes Ferrari</surname><given-names>GL</given-names> </name><name name-style="western"><surname>Kovalskys</surname><given-names>I</given-names> </name><name name-style="western"><surname>Fisberg</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Comparison of self-report versus accelerometer - measured physical activity and sedentary behaviors and their association with body composition in Latin American countries</article-title><source>PLoS One</source><year>2020</year><volume>15</volume><issue>4</issue><fpage>e0232420</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0232420</pub-id><pub-id pub-id-type="medline">32343753</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Klos</surname><given-names>MC</given-names> </name><name name-style="western"><surname>Escoredo</surname><given-names>M</given-names> </name><name name-style="western"><surname>Joerin</surname><given-names>A</given-names> </name><name name-style="western"><surname>Lemos</surname><given-names>VN</given-names> </name><name name-style="western"><surname>Rauws</surname><given-names>M</given-names> </name><name name-style="western"><surname>Bunge</surname><given-names>EL</given-names> </name></person-group><article-title>Artificial intelligence-based chatbot for anxiety and depression in university students: pilot randomized controlled trial</article-title><source>JMIR Form Res</source><year>2021</year><month>08</month><day>12</day><volume>5</volume><issue>8</issue><fpage>e20678</fpage><pub-id pub-id-type="doi">10.2196/20678</pub-id><pub-id pub-id-type="medline">34092548</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Singh</surname><given-names>B</given-names> </name><name name-style="western"><surname>Olds</surname><given-names>T</given-names> </name><name name-style="western"><surname>Brinsley</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Systematic review and meta-analysis of the effectiveness of chatbots on lifestyle behaviours</article-title><source>NPJ Digit Med</source><year>2023</year><month>06</month><day>23</day><volume>6</volume><issue>1</issue><fpage>118</fpage><pub-id pub-id-type="doi">10.1038/s41746-023-00856-1</pub-id><pub-id pub-id-type="medline">37353578</pub-id></nlm-citation></ref></ref-list></back></article>