<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Form Res</journal-id><journal-id journal-id-type="publisher-id">formative</journal-id><journal-id journal-id-type="index">27</journal-id><journal-title>JMIR Formative Research</journal-title><abbrev-journal-title>JMIR Form Res</abbrev-journal-title><issn pub-type="epub">2561-326X</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v10i1e69877</article-id><article-id pub-id-type="doi">10.2196/69877</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Adaptive Feeding Robot With Multisensor Feedback and Predictive Control Using Autoregressive Integrated Moving Average&#x2013;Feed-Forward Neural Network: Simulation Study</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes" equal-contrib="yes"><name name-style="western"><surname>Sadeghi-Esfahlani</surname><given-names>Shabnam</given-names></name><degrees>BSc, PhD</degrees><xref ref-type="aff" rid="aff1"/><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Mohaghegh</surname><given-names>Vahaj</given-names></name><degrees>BEng, MSc, PhD</degrees><xref ref-type="aff" rid="aff1"/><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Sanaei</surname><given-names>Alireza</given-names></name><degrees>BEng, MPhil, PhD</degrees><xref ref-type="aff" rid="aff1"/><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Bilal</surname><given-names>Zainib</given-names></name><degrees>BEng, MSc</degrees><xref ref-type="aff" rid="aff1"/><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Arthur</surname><given-names>Nathon</given-names></name><xref ref-type="aff" rid="aff1"/><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Shirvani</surname><given-names>Hassan</given-names></name><degrees>BEng, MSc, PhD</degrees><xref ref-type="aff" rid="aff1"/><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib></contrib-group><aff id="aff1"><institution>Faculty of Science &#x0026; Engineering, Anglia Ruskin University</institution><addr-line>Bishop Hall Lane</addr-line><addr-line>Chelmsford</addr-line><country>United Kingdom</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Sarvestan</surname><given-names>Javad</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Khalil</surname><given-names>Ahmed</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Schearer</surname><given-names>Eric M</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Shabnam Sadeghi-Esfahlani, BSc, PhD, Faculty of Science &#x0026; Engineering, Anglia Ruskin University, Bishop Hall Lane, Chelmsford, CM1 1SQ, United Kingdom, 44 07944281517; <email>shabnam.sadeghi-esfahlani@aru.ac.uk</email></corresp><fn fn-type="equal" id="equal-contrib1"><label>*</label><p>all authors contributed equally</p></fn></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>27</day><month>1</month><year>2026</year></pub-date><volume>10</volume><elocation-id>e69877</elocation-id><history><date date-type="received"><day>16</day><month>01</month><year>2025</year></date><date date-type="rev-recd"><day>01</day><month>12</month><year>2025</year></date><date date-type="accepted"><day>02</day><month>12</month><year>2025</year></date></history><copyright-statement>&#x00A9; Shabnam Sadeghi-Esfahlani, Vahaj Mohaghegh, Alireza Sanaei, Zainib Bilal, Nathon Arthur, Hassan Shirvani. Originally published in JMIR Formative Research (<ext-link ext-link-type="uri" xlink:href="https://formative.jmir.org">https://formative.jmir.org</ext-link>), 27.1.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Formative Research, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://formative.jmir.org">https://formative.jmir.org</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://formative.jmir.org/2026/1/e69877"/><abstract><sec><title>Background</title><p>Eating is a primary daily activity crucial for maintaining independence and quality of life. Individuals with neuromuscular impairments often struggle with eating due to limitations in current assistive devices, which are predominantly passive and lack adaptive capabilities.</p></sec><sec><title>Objective</title><p>This study aims to introduce an adaptive feeding robot that integrates time series decomposition, autoregressive integrated moving average (ARIMA), and feed-forward neural networks (FFNN). The goal is to enhance feeding precision, efficiency, and personalization, thereby promoting autonomy for individuals with motor impairments.</p></sec><sec sec-type="methods"><title>Methods</title><p>The proposed feeding robot combines information from sensors and actuators to collect real-time data, that is, facial landmarks, mouth status (open or closed), fork-to-mouth and plate distances, as well as the force and angle required for food handling based on the food type. ARIMA and FFNN algorithms analyze data to predict user behavior and adjust feeding actions dynamically. A strain gauge sensor ensures precise force regulation, an ultrasonic sensor optimizes positioning, and facial recognition algorithms verify safety by monitoring mouth conditions and plate contents.</p></sec><sec sec-type="results"><title>Results</title><p>The combined ARIMA+FFNN model achieved a mean squared error (MSE) of 0.008 and an <italic>R</italic><sup>2</sup> of 94%, significantly outperforming the standalone ARIMA (MSE=0.015; <italic>R</italic><sup>2</sup>=85%) and FFNN (MSE=0.012; <italic>R</italic><sup>2</sup>=88%). Feeding success rate improved from 75% to 90% over 150 iterations (<italic>P&#x003C;.</italic>001), and response time decreased by 28% (from 3.6 s to 2.2 s). ANOVA revealed significant differences in success rates across scenarios (<italic>F</italic><sub>3,146</sub>=12.34; <italic>P</italic>= .002), with scenario 1 outperforming scenario 3 (<italic>P</italic>=.030) and scenario 4 (<italic>P</italic>=.010). Object detection showed high accuracy (face detection precision=97%, recall=96%, 95% CI 94%-99%). Force application matched expected ranges with minimal deviation (24 [1] <italic>N</italic> for apples; 7 [0<italic>.</italic>5] <italic>N</italic> for strawberries).</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>Combining predictive algorithms and adaptive learning mechanisms enables the feeding robot to demonstrate substantial improvements in precision, responsiveness, and personalization. These advancements underline its potential to revolutionize assistive technology in rehabilitation, delivering safe and highly personalized feeding assistance to individuals with motor impairments, thereby enhancing their independence.</p></sec></abstract><kwd-group><kwd>assistive technology</kwd><kwd>ARIMA</kwd><kwd>autoregressive integrated moving average</kwd><kwd>FFNN</kwd><kwd>feed-forward neural network</kwd><kwd>feeding robotics</kwd><kwd>forecasting</kwd><kwd>motor impairment</kwd><kwd>time series analysis</kwd><kwd>personalized assistance</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Eating unaided remains impossible for millions of people with upper-limb impairments, turning every meal into a reminder of lost autonomy. Demographic change will magnify this challenge. By 2050, 1 in 6 individuals will be aged over 65 years, up from 1 in 11 in 2024 [<xref ref-type="bibr" rid="ref1">1</xref>]. Aging correlates with disability, and about 1 billion people already live with an impairment, a figure set to rise as populations gray [<xref ref-type="bibr" rid="ref2">2</xref>]. The result is a rapidly growing cohort that needs daily feeding assistance, further straining care networks that are already under pressure [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref3">3</xref>]. Assistive devices are critical, especially in low-resource settings where professional carers are limited. In the United Kingdom, for example, many adults aged over 75 years live alone and receive only brief daily visits, leading to malnutrition and declining health [<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref5">5</xref>].</p><p>Robotic feeding assistants offer a promising solution, demonstrating consistent patience, adaptability, and precision. By tailoring actions to user preferences, they can provide a personalized and empowering dining experience [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref7">7</xref>].</p><p>Several commercial and noncommercial assistive feeding robots have already been developed to support individuals with upper limb impairments [<xref ref-type="bibr" rid="ref8">8</xref>-<xref ref-type="bibr" rid="ref11">11</xref>]. Examples include My Spoon (SECOM Co, Ltd) [<xref ref-type="bibr" rid="ref12">12</xref>], SnackBot (Carnegie Mellon University Robotics Institute) [<xref ref-type="bibr" rid="ref13">13</xref>], the Assistive Robotic Manipulator (Exact Dynamics Ltd) [<xref ref-type="bibr" rid="ref14">14</xref>], Obi Robot (DESiN LLC) [<xref ref-type="bibr" rid="ref15">15</xref>], iEat (Assistive Innovations) [<xref ref-type="bibr" rid="ref16">16</xref>], and Bestic (Camanio Care AB) [<xref ref-type="bibr" rid="ref17">17</xref>]. <xref ref-type="table" rid="table1">Table 1</xref> outlines the key advantages, limitations, and available quantitative evidence for 7 commercial or research-based assistive feeding systems. While some platforms offer user-friendly features such as switch-based control or teach modes, most lack real-time adaptation, user-state sensing, and robust bite-delivery evaluation. The table highlights the current gaps in autonomy, sensor integration, and evidence-based validation across devices. These are predominantly passive, relying on fixed routines with limited adaptability [<xref ref-type="bibr" rid="ref18">18</xref>]. Therefore, they struggle with delayed user responses, varied food types, and environmental challenges such as poor lighting or plate movement.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Comparative analysis of existing assistive feeding robots.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="middle">Robot name and autonomy</td><td align="left" valign="middle">Key advantages</td><td align="left" valign="middle">Key disadvantages</td><td align="left" valign="middle">Quantitative evidence</td></tr></thead><tbody><tr><td align="left" valign="middle">My Spoon [<xref ref-type="bibr" rid="ref12">12</xref>]</td><td align="left" valign="middle"><list list-type="bullet"><list-item><p>Offers automatic, semiautomatic, and manual modes (5-DOF<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup> arm)</p></list-item></list></td><td align="left" valign="middle"><list list-type="bullet"><list-item><p>Requires joystick input</p></list-item><list-item><p>Uses fixed mouth-position routines&#x2014;no real-time adaptation</p></list-item></list></td><td align="left" valign="middle"><list list-type="bullet"><list-item><p>None</p></list-item></list></td></tr><tr><td align="left" valign="middle">SnackBot [<xref ref-type="bibr" rid="ref13">13</xref>]</td><td align="left" valign="middle"><list list-type="bullet"><list-item><p>Mobile platform delivers snacks in human spaces (human-robot interaction)</p></list-item></list></td><td align="left" valign="middle"><list list-type="bullet"><list-item><p>Not designed for bite-by-bite feeding assistance</p></list-item><list-item><p>Lacks user-state feedback</p></list-item></list></td><td align="left" valign="middle"><list list-type="bullet"><list-item><p>The platform was never evaluated for bite delivery or self-feeding</p></list-item></list></td></tr><tr><td align="left" valign="middle">ARM<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup> (iARM, Exact Dynamics; JACO arm, Kinova) [<xref ref-type="bibr" rid="ref14">14</xref>]</td><td align="left" valign="middle"><list list-type="bullet"><list-item><p>Versatile arm mounts on wheelchairs. Performs multiple ADLs<sup><xref ref-type="table-fn" rid="table1fn3">c</xref></sup> (eating, drinking, and manipulation)</p></list-item></list></td><td align="left" valign="middle"><list list-type="bullet"><list-item><p>No autonomous feeding intelligence</p></list-item><list-item><p>Lacks specialized sensors and adaptive control</p></list-item></list></td><td align="left" valign="middle"><list list-type="bullet"><list-item><p>79% of 31 users completed all 16 movements</p></list-item><list-item><p>93% completed a 6-task subset</p></list-item><list-item><p>Bite success not documented</p></list-item></list></td></tr><tr><td align="left" valign="middle">Obi Robot [<xref ref-type="bibr" rid="ref15">15</xref>]</td><td align="left" valign="middle"><list list-type="bullet"><list-item><p>Lightweight, compact tabletop feeder</p></list-item><list-item><p>Simple switch interface, teach mode, 4-bowl food choice</p></list-item></list></td><td align="left" valign="middle"><list list-type="bullet"><list-item><p>Must be manually taught spoon-mouth position</p></list-item><list-item><p>Cannot sense user readiness</p></list-item></list></td><td align="left" valign="middle"><list list-type="bullet"><list-item><p>Speech-interface pilot (n=11) showed positive usability</p></list-item><list-item><p>No bite delivery data reported</p></list-item></list></td></tr><tr><td align="left" valign="middle">Bestic [<xref ref-type="bibr" rid="ref17">17</xref>]</td><td align="left" valign="middle"><list list-type="bullet"><list-item><p>4-DOF arm with rotating bowl (one-button semiautonomous control)</p></list-item></list></td><td align="left" valign="middle"><list list-type="bullet"><list-item><p>Uses fixed preset motions</p></list-item><list-item><p>No real-time adjustment or user-state feedback</p></list-item></list></td><td align="left" valign="middle"><list list-type="bullet"><list-item><p>Focuses on design philosophy and user perceptions.</p></list-item></list></td></tr><tr><td align="left" valign="middle">Meal Buddy [<xref ref-type="bibr" rid="ref19">19</xref>]</td><td align="left" valign="middle"><list list-type="bullet"><list-item><p>Bowl-edge scraper removes excess food (3-DOF)</p></list-item></list></td><td align="left" valign="middle"><list list-type="bullet"><list-item><p>Executes preset feeding sequences only</p></list-item><list-item><p>Lacks force and vision feedback</p></list-item></list></td><td align="left" valign="middle"><list list-type="bullet"><list-item><p>Pilot (n=3 able-bodied): mouth detection accuracy of 73%, 67%, and 52%.</p></list-item><list-item><p>No bite delivery recorded</p></list-item></list></td></tr><tr><td align="left" valign="middle">Mealtime Partner [<xref ref-type="bibr" rid="ref20">20</xref>]</td><td align="left" valign="middle"><list list-type="bullet"><list-item><p>Rotating plate and mechanical spoon lift (one-button bite delivery)</p></list-item></list></td><td align="left" valign="middle"><list list-type="bullet"><list-item><p>Static delivery pattern</p></list-item><list-item><p>No sensing of user state or environment</p></list-item></list></td><td align="left" valign="middle"><list list-type="bullet"><list-item><p>Historical evaluations found high abandonment</p></list-item><list-item><p>Slower than human assistance</p></list-item></list></td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>DOF: degree of freedom.</p></fn><fn id="table1fn2"><p><sup>b</sup>ARM: Assistive Robotic Manipulator.</p></fn><fn id="table1fn3"><p><sup>c</sup>ADL: activities of daily living.</p></fn></table-wrap-foot></table-wrap><p>Various researchers have aimed to improve adaptability and intelligence through sensing and control strategies. Predictive models such as hidden Markov models have been used to estimate bite timing based on social cues and food characteristics, achieving timing errors of 1.57 seconds [<xref ref-type="bibr" rid="ref7">7</xref>]. However, these systems often operate in open-loop and cannot adjust if the user hesitates. Similarly, support vector machines and convolutional neural networks (CNNs) [<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref22">22</xref>] have been employed to locate bites on a plate, but they also lack real-time feedback.</p><p>More advanced systems have integrated vision and control frameworks. For instance, Mashrur et al [<xref ref-type="bibr" rid="ref23">23</xref>] used a Personal Robot 2 (PR2; Willow Garage) equipped with faster region-based CNN and red green blue&#x2013;depth (RGB-D) cameras, achieving 93% precision and an 82.8% food delivery success rate. A study by Park et al [<xref ref-type="bibr" rid="ref8">8</xref>] deployed PR2 with a GUI interface and model predictive controller, enhancing arm safety and anomaly detection, though food handling precision remained a challenge. Hybrid controllers, such as an adaptive neuro-fuzzy inference system&#x2013;proportional integral derivative controller [<xref ref-type="bibr" rid="ref24">24</xref>] and fuzzy logic-based inverse kinematics (IKs) [<xref ref-type="bibr" rid="ref25">25</xref>], have improved control smoothness and efficiency, but they lack real-time mouth-state feedback.</p><p>Artificial neural networks have also been applied to solve IKs [<xref ref-type="bibr" rid="ref26">26</xref>], and low-cost platforms, such as 3D-printed arms with facial recognition [<xref ref-type="bibr" rid="ref27">27</xref>], have broadened accessibility. However, these systems continue to rely on scripted motions. Studies conducted by Serrezuela et al [<xref ref-type="bibr" rid="ref28">28</xref>], Mystkowski et al [<xref ref-type="bibr" rid="ref29">29</xref>], and Gilca [<xref ref-type="bibr" rid="ref30">30</xref>] demonstrate promise in trajectory tracking and facial landmark detection; however, sensor fusion and robustness under environmental variation remain limited. Simulation tools [<xref ref-type="bibr" rid="ref31">31</xref>] and digital twins [<xref ref-type="bibr" rid="ref32">32</xref>] have explored virtual training and control evaluation, but fall short of real-world responsiveness. Together, these limitations highlight the need for a fully adaptive, closed-loop feeding system that anticipates user intent, verifies safe delivery, and dynamically adjusts in real-time. To meet this need, we present a novel autonomous feeding robot that integrates time-series forecasting, machine learning, and multisensor feedback to deliver personalized, safe, and efficient feeding assistance. Our system combines autoregressive integrated moving average (ARIMA) models (to capture linear trends) with a feed-forward neural network (FFNN; to learn nonlinear behaviors), enabling real-time predictions of mouth readiness. Data from vision-based mouth detection, ultrasonic distance sensing, and strain-gauge force feedback closes the loop, allowing the robot to respond to delayed reactions, variable food textures, lighting changes, and plate movement. This study advances assistive feeding robotics through five key contributions:</p><list list-type="order"><list-item><p>Multimodal sensing for safe, precise delivery: integrating computer vision, ultrasonic, and force sensors ensures accurate and gentle food placement.</p></list-item><list-item><p>Hybrid predictive control: fusing ARIMA and neural networks enables real-time prediction of user behavior.</p></list-item><list-item><p>Closed-loop adaptation: a unified control loop compensates for user delays and environmental variability.</p></list-item><list-item><p>Demonstrated performance gains: our system improved feeding success by 15% and reduced response time by 28% over baseline models.</p></list-item><list-item><p>Toward truly adaptive feeding: this study lays the foundation for a learning-driven platform that supports independent eating for individuals with motor impairments.</p></list-item></list></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Robotic Arm Hardware and Control System</title><p>The assistive feeding robot in this study is built around a 4-degree-of-freedom open manipulator, powered by 4 XM Series servo motors. It is equipped with a Raspberry Pi Camera Module 2 for vision, an HC-SR04 ultrasonic sensor for distance measurement, and a load cell to monitor force. The HX711 amplifier reads signals from the load cell and transmits them to an Arduino Nano, which then forwards the data to a Jetson Nano microcontroller (<xref ref-type="fig" rid="figure1">Figure 1</xref>).</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>System architecture of the adaptive feeding robot. ARIMA: autoregressive integrated moving average; CNN: convolutional neural network; FFNN: feed-forward neural network; ROS: Robot Operating System.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v10i1e69877_fig01.png"/></fig><p>To enable efficient communication between sensors and the control logic, a lightweight WebSocket server is hosted on the Jetson Nano, a compact artificial intelligence (AI)-enabled edge computing device with a built-in GPU. The server allows continuous, 2-way data exchange between the camera, ultrasonic sensor, and load cell, and the decision-making module that directs the robotic arm. WebSockets ensure low-latency updates, allowing the system to adjust movements dynamically in response to user behavior. For simulation and real-time testing, a digital twin of the robot was developed in Unity using its networking application programming interfaces (APIs). The Jetson Nano uses the <italic>WebSockets</italic> library of Python (Python Software Foundation) to run the server, while Unity serves as the client via the Native WebSocket library, maintaining synchronized sensor and actuator states between the virtual and physical environments. The Jetson Nano also runs the Robot Operating System (ROS), which manages the robot&#x2019;s control architecture. NVIDIA TensorRT was used to accelerate the inference of neural networks, while the <italic>PyTorch</italic> library was used to train object detection and classification models.</p></sec><sec id="s2-2"><title>Feeding Simulation Scenarios in Unity</title><p>Four scenarios are simulated in Unity to test the control algorithms for the assistive feeding robot.</p><sec id="s2-2-1"><title>Scenario 1: Standard Operation</title><p>The robot operates under normal conditions without external disturbances or unexpected user behavior to measure the success rate of food delivery, timing, and force accuracy. The parameters are (1) feeder distance to plate and the mouth (approximately 10 cm; near), (2) distance to mouth (medium), (3) lighting (standard indoor lighting), (4) user behavior (opens mouth as the feeder gets within 15 cm), and (5) environmental stability (no movement of the table or plate).</p><boxed-text id="box1"><title> Input and time-series decomposition.</title><p><bold>Parameters</bold></p><list list-type="bullet"><list-item><p>Degree of freedom (DOF): for the robotic arm (default=4)</p></list-item><list-item><p>y<sub>t&#x2212;k:t</sub>: past states of the robotic arm in time window t&#x2212;k to t</p></list-item><list-item><p>strain<sub>t&#x2212;k:t</sub>: strain gauge data for the same time window</p></list-item><list-item><p>sonar<sub>t&#x2212;k:t</sub>: sonar sensor data for the same time window</p></list-item><list-item><p>camera<sub>t&#x2212;k:t</sub>: camera data (.eg, user mouth open or closed)</p></list-item><list-item><p>u<sub>t</sub>: optional user input or manual adjustments</p></list-item></list><p><bold>Step 1: initialization</bold></p><list list-type="bullet"><list-item><p>Set the DOFs for the robotic arm (DOF=4).</p></list-item><list-item><p>Initialize the feed-forward neural network model with multiple layers to extract temporal features.</p></list-item><list-item><p>Define static parameters (such as arm configuration or user preferences)</p></list-item></list><p><bold>Step 2: input</bold></p><list list-type="bullet"><list-item><p>Collect past states of the arm: y<sub>t&#x2212;k:t</sub> = {y<sub>t&#x2212;k</sub>, . . . , y<sub>t</sub>}</p></list-item><list-item><p>Collect strain gauge data: strain<sub>t&#x2212;k:t</sub></p></list-item><list-item><p>Collect sonar data: sonar<sub>t&#x2212;k:t</sub></p></list-item><list-item><p>Collect camera data: camera<sub>t&#x2212;k:t</sub></p></list-item><list-item><p>(Optional) receive user input u<sub>t</sub> for manual adjustments or feedback</p></list-item></list><p><bold>Step 3: time-series decomposition</bold></p><list list-type="bullet"><list-item><p>Decompose past arm states, strain gauge, and sonar data into trend, seasonality, and residuals: trend, seasonality, residual = decompose(y<sub>t&#x2212;k:t</sub>,strain<sub>t&#x2212;k:t</sub>, sonar<sub>t&#x2212;k:t</sub>)</p></list-item></list><p><bold>Step 4: autoregressive integrated moving average (ARIMA) model</bold></p><list list-type="bullet"><list-item><p>Fit an ARIMA model to the residuals of the decomposed data: ARIMA_model &#x2190; ARIMA(Residual)</p></list-item><list-item><p>Use the ARIMA model to predict the next state for the residual component</p></list-item></list></boxed-text></sec><sec id="s2-2-2"><title>Scenario 2: Delayed User Response</title><p>The user delays opening their mouth after the robot delivers the food, testing the system&#x2019;s ability to adapt to user behavior. The parameters are (1) distance to plate (near), (2) distance to mouth (near), (3) lighting (standard indoor lighting), (4) user behavior (opens mouth, approximately 1-2 s late), and (5) environmental stability (no movement of the table or plate).</p></sec><sec id="s2-2-3"><title>Scenario 3: Low-Light Conditions</title><p>The system operates in low-light conditions, testing the robustness of the object detection algorithm. Assess object detection accuracy, success rate, and timing under reduced visibility. The parameters are (1) distance to plate (medium), (2) distance to mouth (medium), (3) lighting (dim indoor lighting; approximately 50 lux), (4) user behavior (opens mouth as expected), and environmental stability (no movement of the table or plate).</p></sec><sec id="s2-2-4"><title>Scenario 4: Dynamic Environment</title><p>The table or plate moves slightly during the robot&#x2019;s operation, simulating a dynamic environment, and testing the system&#x2019;s ability to adapt to real-time changes in plate position using the sensor&#x2019;s feedback and ensure successful food delivery. The parameters are (1) distance to plate (medium), (2) distance to mouth (far), (3) lighting (standard indoor lighting), (4) user behavior (opens mouth as expected), and (5) environmental stability (plate shifts, 2-3 cm randomly)</p></sec><sec id="s2-2-5"><title>Object Detection and Classification</title><p>The robot&#x2019;s vision system relies on state-of-the-art object detection techniques, leveraging large datasets and advanced frameworks. ImageNet, a large-scale image database organized according to the WordNet hierarchy, was used for visual object recognition. The system is trained to detect key objects, such as tables, plates, faces, mouths, and various fruit types (including apples and strawberries). The ImageNet Large Scale Visual Recognition Challenge (ILSVRC) dataset was combined with CNNs for further training.</p><p>Since existing datasets were not sufficient for the purpose (specific tasks), transfer learning was applied. A pretrained model was fine-tuned using a custom dataset manually labeled by the research team. The custom dataset was trained on the Jetson Nano, with models preloaded with 1500 objects automatically downloaded during the build process. The system compares real-time captured images with a prebuilt database of reference images, taken under diverse lighting conditions and orientations. This ensures robust and accurate detection of objects and states, even in dynamic environments.</p></sec></sec><sec id="s2-3"><title>Predictive Modeling Using ARIMA and FFNN</title><p>Data from sensors generates time-series interactions, serving as inputs for ARIMA (autoregressive integrated moving average) and FFNN predictions. These predictions guide the robotic arm in achieving precise movements and actions. Time-series decomposition is applied to past states and sensor data to isolate trends and residuals, with the ARIMA model predicting the residual component that captures linear relationships. The FFNN extracts features from the time-series data, learning nonlinear relationships and temporal patterns.</p></sec><sec id="s2-4"><title>Fusion of Time Series Decomposition With ARIMA and FFNN</title><p>To integrate ARIMA into the time-series decomposition framework, the residual component <italic>R</italic><sub><italic>t</italic></sub> was modeled with ARIMA. FFNN was used to capture nonlinear dependencies and refine predictions. The mean squared error (MSE) and <italic>R</italic>&#x00B2; metrics were calculated to evaluate the model&#x2019;s accuracy, yielding a combined model MSE of 0.008 and an <italic>R</italic><sup>2</sup> of 94%.</p><p>We first decompose the time series into trend (<italic>T<sub>t</sub></italic>), seasonality (<italic>S<sub>t</sub></italic>), and residual (<italic>R<sub>t</sub></italic>), and then apply ARIMA to model the residuals. This is enhanced by introducing the FFNN to learn deeper, nonlinear relationships from the interaction data.</p><disp-formula id="E1"><label>(1)</label><mml:math id="eqn1"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mstyle displaystyle="true" scriptlevel="0"><mml:msub><mml:mi>Y</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:msub><mml:mi>T</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>S</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>R</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mstyle></mml:mrow></mml:mstyle></mml:math></disp-formula><p>where <italic>Y<sub>t</sub></italic> is the observed interaction data at time <italic>t</italic> (eg, feeding times and user preferences), <italic>T<sub>t</sub></italic> is the trend component (long-term changes in user interaction), <italic>S<sub>t</sub></italic> is the seasonal component (cyclical patterns in user behavior), and <italic>R<sub>t</sub></italic> is the residual component (remaining patterns or noise after removing trend and seasonality).</p><p>The residuals <italic>R</italic><sub><italic>t</italic></sub> are modeled using the ARIMA model:</p><disp-formula id="E2"><label>(2)</label><mml:math id="eqn2"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mtable columnalign="right left right left right left right left right left right left" rowspacing="3pt" columnspacing="0em 2em 0em 2em 0em 2em 0em 2em 0em 2em 0em" displaystyle="true"><mml:mtr><mml:mtd><mml:msub><mml:mi>R</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mrow/></mml:mtd><mml:mtd><mml:msub><mml:mi>&#x03C6;</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:msub><mml:mi>R</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>&#x03C6;</mml:mi><mml:mn>2</mml:mn></mml:msub><mml:msub><mml:mi>R</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:mo>&#x22EF;</mml:mo><mml:mo>+</mml:mo><mml:msub><mml:mi>&#x03C6;</mml:mi><mml:mi>p</mml:mi></mml:msub><mml:msub><mml:mi>R</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>p</mml:mi></mml:mrow></mml:msub></mml:mtd></mml:mtr><mml:mtr><mml:mtd/><mml:mtd><mml:mi/><mml:mo>+</mml:mo><mml:msub><mml:mi>&#x03D1;</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:msub><mml:mi>&#x03B5;</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>&#x03D1;</mml:mi><mml:mn>2</mml:mn></mml:msub><mml:msub><mml:mi>&#x03B5;</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:mo>&#x22EF;</mml:mo><mml:mo>+</mml:mo><mml:msub><mml:mi>&#x03D1;</mml:mi><mml:mi>q</mml:mi></mml:msub><mml:msub><mml:mi>&#x03B5;</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>q</mml:mi></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>&#x03B5;</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mtd></mml:mtr></mml:mtable></mml:mstyle></mml:mrow></mml:mstyle></mml:math></disp-formula><p>where <italic>&#x03C6;</italic><sub>1</sub><italic>, . . . , &#x03C6;<sub>p</sub></italic> are the parameters for the autoregressive part, which models the dependency of the residuals on their past values; <italic>&#x03B8;</italic><sub>1</sub><italic>, . . . , &#x03B8;<sub>q</sub></italic> are the parameters for the moving average part, which models the dependency of the residuals on past forecast errors (shocks or noise); <italic>&#x03B5;<sub>t</sub></italic> is the white noise (random error term at time <italic>t</italic>); <italic>p</italic> is the order of the autoregressive part; <italic>q</italic> is the order of the moving average part; and <italic>d</italic> is the degree of differencing to make the series stationary (integrated part of ARIMA).</p><p>The residuals <italic>R<sub>t</sub></italic> can also be processed using an FFNN to further refine the predictions by capturing non-linear and complex patterns:</p><disp-formula id="E3"><label>(3)</label><mml:math id="eqn3"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mstyle displaystyle="true" scriptlevel="0"><mml:msubsup><mml:mi>R</mml:mi><mml:mi>t</mml:mi><mml:mrow><mml:mtext>FFNN</mml:mtext></mml:mrow></mml:msubsup><mml:mo>=</mml:mo><mml:mtext>FFNN</mml:mtext><mml:mo stretchy="false">(</mml:mo><mml:msub><mml:mi>R</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:msub><mml:mi>R</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mo>,</mml:mo><mml:mo>&#x2026;</mml:mo><mml:mo>,</mml:mo><mml:msub><mml:mi>R</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>k</mml:mi></mml:mrow></mml:msub><mml:mo stretchy="false">)</mml:mo></mml:mstyle></mml:mrow></mml:mstyle></mml:math></disp-formula><p>where FFNN(<italic>R<sub>t&#x2212;</sub></italic><sub>1</sub><italic>, . . . , R<sub>t&#x2212;k</sub></italic>) represents the application of the FFNN model on the sequence of past residuals, <italic>R<sub>t&#x2212;k</sub></italic> to <italic>R<sub>t&#x2212;</sub></italic><sub>1</sub> for feature extraction and nonlinear modeling, <italic>R</italic><sup>FFNN</sup> is the refined residual prediction after applying the FFNN model, and <italic>k</italic> is the size of the window of past residuals used as input to the FFNN.</p><p>The final combined model is as follows:</p><disp-formula id="E4"><label>(4)</label><mml:math id="eqn4"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mtable columnalign="right left right left right left right left right left right left" rowspacing="3pt" columnspacing="0em 2em 0em 2em 0em 2em 0em 2em 0em 2em 0em" displaystyle="true"><mml:mtr><mml:mtd><mml:msub><mml:mi>Y</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mo>=</mml:mo><mml:mrow/></mml:mtd><mml:mtd><mml:msub><mml:mi>T</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>S</mml:mi><mml:mi>t</mml:mi></mml:msub><mml:mo>+</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msub><mml:mi>&#x03C6;</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:msub><mml:mi>R</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:mo>&#x22EF;</mml:mo><mml:mo>+</mml:mo><mml:msub><mml:mi>&#x03C6;</mml:mi><mml:mi>p</mml:mi></mml:msub><mml:msub><mml:mi>R</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>p</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mtd></mml:mtr><mml:mtr><mml:mtd/><mml:mtd><mml:mi/><mml:mo>+</mml:mo><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:msub><mml:mi>&#x03D1;</mml:mi><mml:mn>1</mml:mn></mml:msub><mml:msub><mml:mi>&#x03B5;</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:mo>&#x22EF;</mml:mo><mml:mo>+</mml:mo><mml:msub><mml:mi>&#x03D1;</mml:mi><mml:mi>q</mml:mi></mml:msub><mml:msub><mml:mi>&#x03B5;</mml:mi><mml:mrow><mml:mi>t</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>q</mml:mi></mml:mrow></mml:msub><mml:mo>+</mml:mo><mml:msub><mml:mi>&#x03B5;</mml:mi><mml:mi>t</mml:mi></mml:msub></mml:mrow><mml:mo>)</mml:mo></mml:mrow><mml:mo>+</mml:mo><mml:msubsup><mml:mi>R</mml:mi><mml:mi>t</mml:mi><mml:mrow><mml:mtext>FFNN</mml:mtext></mml:mrow></mml:msubsup></mml:mtd></mml:mtr></mml:mtable></mml:mstyle></mml:mrow></mml:mstyle></mml:math></disp-formula><p>In this framework, the ARIMA model captures the remaining structure in the residuals <italic>R<sub>t</sub></italic> after removing trend and seasonality, and the FFNN further refines this by learning nonlinear dependencies.</p></sec><sec id="s2-5"><title>Adaptive Learning and Performance Optimization</title><sec id="s2-5-1"><title>Simulation Setup and Evaluation Procedure</title><p>Unity simulations were executed over 150 iterations to assess the system&#x2019;s learning curve. At each iteration, the robot&#x2019;s internal model is updated based on the newly generated data. Approximately 1000 feeding simulations were conducted in Unity&#x2019;s game engine to fine-tune these movements. The Unity environment allowed precise control over variables, enabling reproducible testing of different scenarios. MSE and <italic>R</italic><sup>2</sup> statistical metrics were computed to evaluate model performance. ANOVA was used to compare success rates and timing accuracy across scenarios.</p></sec><sec id="s2-5-2"><title>Fuzzy Logic System</title><p>A fuzzy logic system was integrated to control the feeder fork&#x2019;s actions based on the distance (ultrasonic data), fruit type (camera), force (load cell), and angle (stepper motor). The following rules are: (1) rule 1: if the distance to the plate is near and the distance to the mouth is medium or far, then apply normal stabbing force based on the fruit type; (2) rule 2: if the distance to the plate is medium and the distance to the mouth is near, then reduce the force to avoid overshooting; and (3) rule 3: if the distance to the plate is far, then do not proceed with the stabbing action (safety rule). If the fruit type is apple and the distance to the plate is near, then high force (20&#x2010;30<italic>N</italic>) is applied with a steep angle (approximately 60<italic><sup>&#x25E6;</sup></italic>) for better penetration. If the fruit type is strawberry and the distance to the plate is near, then a low force (5&#x2010;10<italic>N</italic>) with a shallow angle (approximately 45<italic><sup>&#x25E6;</sup></italic>) is needed to prevent damage.</p><p>A fuzzy logic system regulated the force and angle adjustments based on food type and distance. First, if the distance to the plate is near, the distance to the mouth is medium or far, and the fruit type is apple, then high force (20&#x2010;30<italic>N</italic>) is applied with a steep angle (60<italic><sup>&#x25E6;</sup></italic>). Second, if the distance to the plate is near, the distance to the mouth is near, and the fruit type is strawberry, then a low force (5&#x2010;10<italic>N</italic>) is applied with a shallow angle (45<italic><sup>&#x25E6;</sup></italic>).</p><p>The operational block diagram of the feeding robot is shown in <xref ref-type="fig" rid="figure1">Figure 1</xref>. It illustrates the integration between the hardware components (represented by orange dashed boxes) and the software modules (represented by green dashed boxes). The top hardware block includes the Jetson Nano, OpenCR, and Arduino Nano, interfaced with the Pi Camera Module V2.1, HX711 load cell, HC-SR04 ultrasonic sensor, and XM430-W350R servo motors. These components collect real-time sensory data to inform feeding decisions. The software stack comprises TensorRT, <italic>PyTorch</italic>, and a CNN trained on ImageNet, which supports object detection and feature extraction. The control loop is triggered via a hardware start button, followed by time-series initialization and standby mode. Two predictive models, FFNN and ARIMA, work in tandem to drive the feeding logic. The FFNN handles real-time object and face detection tasks (left: food pipeline; right: user monitoring), while ARIMA governs motion planning for food collection and patient feeding. This closed-loop architecture allows the robot to adapt to user behavior dynamically, based on synchronized input from all sensory sources.</p><p>The robot operates in 2 primary sequences: scooping and forking, represented by the green blocks, and feeding, represented by the orange-colored blocks. Sequence planning is illustrated using blue arrows for &#x201C;valid detection&#x201D; and orange-colored arrows for &#x201C;error detection&#x201D; at each step in the process. The block diagram also highlights the implementation of the FFNN in the detection steps and the ARIMA model in the positioning steps. The robotic arm is equipped with joint encoders and a force sensor to generate torque commands that achieve the desired end-effector positions and orientations. <xref ref-type="fig" rid="figure2">Figure 2</xref> illustrates the hardware setup and schematic architecture of the feeding robot, along with its dimensions. <xref ref-type="fig" rid="figure2">Figure 2A</xref> is the top view of the robotics arm, <xref ref-type="fig" rid="figure2">Figure 2B</xref> is the top view and <xref ref-type="fig" rid="figure2">Figure 2C</xref> is the 3D image with a corner view. A control system based on IK equations to execute movements was required for forking food. It includes the detailed mechanical drawings of the robot arm&#x2019;s structure and joint limits. The labeled schematic highlights each degree of freedom of the arm&#x2019;s mechanical range and physical configuration.</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Design and configuration of the assistive robotic feeding arm. (A) 2D schematic of the robot arm showing joint labels, rotational limits, and key dimensions, including the base Y joint, lower and upper Z joints, and the feeder joint. (B) Top-down orthographic view of the robotic arm layout for spatial footprint analysis. (C) 3D perspective rendering of the full robotic assembly illustrating joint orientation and end-effector positioning for feeding tasks.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v10i1e69877_fig02.png"/></fig><p>The controller system was implemented on the physical robot using an open-source ROS. The system&#x2019;s physical components were fabricated using 3D printing and computer numerical control machining. The camera provided a direct line of sight for monitoring the food on the plate and the user&#x2019;s mouth.</p></sec></sec><sec id="s2-6"><title>Ethical Considerations</title><p>This study was conducted in accordance with ethical guidelines and received approval from the Anglia Ruskin University Research Ethics Committee (approval: ETH2425-0342). Participant information and consent forms were obtained before their involvement in the study. This study exclusively used simulated environments and datasets. The decision to avoid live participants was made to prioritize safety and ensure reproducibility during the robot&#x2019;s development phase. Future work may incorporate user trials following ethical approval from the National Institute for Health and Care Research.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><p>After powering on the robot, an initialization subroutine begins, using stored positioning memory, ARIMA optimizations, and FFNN refinements. At each step during the robot&#x2019;s operation, sensory data were used as training data for the FFNN and ARIMA models to enhance the patient experience further. The trained neural network was then used to initialize subsequent robot startup sequences. <xref ref-type="fig" rid="figure3">Figure 3A</xref> shows the front view of the primary camera in Unity, and <xref ref-type="fig" rid="figure3">Figure 3B</xref> highlights the facial landmarks and distances between points on the face to identify whether the mouth is open or closed. <xref ref-type="fig" rid="figure3">Figure 3C</xref> presents 2 perspectives: one from the primary camera and the other from the feeder camera, illustrating the integration of the object detection algorithm.</p><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Simulated robotic feeding scenario in Unity. (A) The assistive robot detects the user&#x2019;s head, mouth, and food position in 3D space using bounding boxes and landmark features. (B) Real-time mouth state detection, achieved through facial landmarks, distinguishes between open and closed mouth states to determine feeding readiness. (C) Task environment visualization: left, a full scene of robot-user interaction showing &#x201C;Mouth: Closed&#x201D; status; center, top-down view of the plate with multiple food items; right, bite delivery scene with mouth proximity detection using bounding box alignment.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v10i1e69877_fig03.png"/></fig><p>The combined ARIMA and FFNN models predict the next system state to optimize portion size and timing while also handling sensor-based adjustments. The strain gauge adjusts pressure for stabbing or scooping food, the sonar sensor adjusts position and timing based on user proximity, and the camera ensures the robotic arm only operates when the user&#x2019;s mouth is open. This real-time camera feedback enhances safety and efficiency by adjusting feeding dynamics based on sensor inputs, optimizing the overall feeding process. Recurring use of the robot by the same patient can improve response time, reduce delays in sequence execution, and smooth the robot&#x2019;s kinematics.</p><p>The performance of the feeding robot was evaluated across 4 simulated scenarios, with a focus on success rates, timing accuracy, and force application. The combined ARIMA and FFNN models demonstrated significant improvements in feeding accuracy and personalization. Object detection, adaptive learning, and statistical validations were conducted to illustrate system robustness.</p><p><xref ref-type="table" rid="table2">Table 2</xref> summarizes the robot&#x2019;s performance metrics. Scenario 1 achieved the highest success rate (95%) and the lowest response time (1.5 s), while scenario 4 demonstrated adaptability to dynamic environments with a success rate of 80%. ANOVA revealed significant differences in success rates across scenarios (<italic>F</italic><sub>3,146</sub>=12.34; <italic>P</italic>=.002), with scenario 1 (mean success rate of 95%, SD approximately 3.2%) outperforming scenario 3 (mean success rate of 85%, SD approximately 4.1%; <italic>P</italic>=.030) and scenario 4 (mean success rate of 80%, SD approximately 5.0%; <italic>P</italic>=.010).</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Average performance metrics across scenarios</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="top">Metric</td><td align="left" valign="top">Scenario 1, mean (SD)</td><td align="left" valign="top">Scenario 2, mean (SD)</td><td align="left" valign="top">Scenario 3, mean (SD)</td><td align="left" valign="top">Scenario 4, mean (SD)</td></tr></thead><tbody><tr><td align="left" valign="top">Success rate (%)</td><td align="left" valign="top">95 (approximately 3.2)</td><td align="left" valign="top">90 (approximately 3.8)</td><td align="left" valign="top">85 (approximately 4.1)</td><td align="left" valign="top">80 (approximately 5.0)</td></tr><tr><td align="left" valign="top">Timing accuracy (s)</td><td align="left" valign="top">1.5 (approximately 0.21)</td><td align="left" valign="top">2.0 (approximately 0.32)</td><td align="left" valign="top">2.5 (approximately 0.41)</td><td align="left" valign="top">2.8 (approximately 0.48)</td></tr><tr><td align="left" valign="top">Force application (N)</td><td align="left" valign="top">25 (2)</td><td align="left" valign="top">20 (4)</td><td align="left" valign="top">18 (4)</td><td align="left" valign="top">22 (5)</td></tr></tbody></table></table-wrap><p>The four scenarios simulated in Unity to test the control algorithms for the assistive feeding robot are as follows:</p><list list-type="order"><list-item><p>Standard operation (scenario 1): the feeding robot demonstrated optimal performance, achieving the highest success rate of 95%. The average response time was 1.5 (SD 0.21) seconds, and the force applied during the feeding process was exact, remaining within 2 N of the desired value. This scenario establishes the robot&#x2019;s baseline capabilities under normal conditions.</p></list-item><list-item><p>Delayed user response (scenario 2): the robot showcased adaptability by adjusting its actions when the user delayed opening their mouth by 1 to 2 seconds. While the success rate decreased to 90% due to delayed feedback, the response time increased to 2 seconds compared to the standard operation. These results highlight the robot&#x2019;s ability to manage user-specific behavior variations effectively.</p></list-item><list-item><p>Low lighting conditions (scenario 3): the object detection system experienced a slight reduction in precision and recall under dim lighting conditions (approximately 50 lux). Consequently, the success rate dropped to 85%, and the response time increased to 2.5 seconds, reflecting the challenges posed by reduced visibility. Despite these limitations, the system maintained a reasonable level of performance, demonstrating robustness.</p></list-item><list-item><p>Dynamic environment (scenario 4): the robot successfully adapted to plate movements of 2 to 3 cm, showcasing its real-time recalibration capabilities. However, this scenario had the lowest success rate at 80% and the highest response time of 2.8 seconds due to increased task complexity. These results emphasize the robot&#x2019;s ability to handle dynamic and unpredictable conditions.</p></list-item></list><p><xref ref-type="table" rid="table3">Table 3</xref> presents the object detection metrics of the system, including precision, recall, and confidence, across key tasks. For example, the plate detection precision was 95%, while the face detection precision reached 97%. These metrics ensure accurate feeding operations, enhancing user safety and satisfaction.</p><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Object detection metrics</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="top">Object</td><td align="left" valign="top">Precision (%)</td><td align="left" valign="top">Recall (%)</td><td align="left" valign="top">Confidence (%)</td></tr></thead><tbody><tr><td align="left" valign="top">Plate</td><td align="left" valign="top">95</td><td align="left" valign="top">93</td><td align="left" valign="top">90</td></tr><tr><td align="left" valign="top">Face</td><td align="left" valign="top">97</td><td align="left" valign="top">96</td><td align="left" valign="top">92</td></tr><tr><td align="left" valign="top">Open mouth</td><td align="left" valign="top">92</td><td align="left" valign="top">91</td><td align="left" valign="top">89</td></tr></tbody></table></table-wrap><p>The combined ARIMA and FFNN model significantly outperformed standalone ARIMA and FFNN models. It achieved the lowest MSE (0.008) compared with 0.015 and 0.012, respectively, and the highest <italic>R</italic><sup>2</sup> (94%), relative to 85% and 88%, respectively. These results indicate the superior capability of the hybrid model to capture user interaction dynamics.</p><p>A fuzzy logic system was used to regulate the force and angle for food forking. Inputs included plate distance (ultrasonic), fruit type (camera classification), and resistance (load cell). Rules were defined for different fruit types:</p><list list-type="bullet"><list-item><p>Apple: high force (20-30 N), steep angle (approximately 60&#x00B0;). During operation, the system measured an average applied force of 24 N (SD 1 N) and an angle of 59&#x00B0; (SD 1&#x00B0;), aligning closely with expectations.</p></list-item><list-item><p>Strawberry: low force (5&#x2013;10 N), shallow angle (approximately 45&#x00B0;). The measured force was 7 N (SD 0.5 N), with an angle of 44&#x00B0; (SD 1&#x00B0;), ensuring minimal fruit damage and controlled delivery.</p></list-item></list><p>Force and angle adjustments were verified by comparing expected versus measured values recorded by sensors during the operation.</p><p><xref ref-type="fig" rid="figure4">Figure 4</xref> illustrates the robotic arm in a real-world setting while performing food targeting, picking, and delivery tasks. In <xref ref-type="fig" rid="figure4">Figure 4A-B</xref>, the apple is successfully forked and picked up, showcasing the effectiveness of the stabbing mechanism. Finally, <xref ref-type="fig" rid="figure4">Figure 4C</xref> shows the robotic arm delivering the food to a designated mouth position. This demonstrates the robot&#x2019;s operational workflow, covering critical steps of targeting, stabbing, and delivering food to a target. The experiment under scenario one yielded an average accuracy rate of 87% over 50 iterations, emphasizing the system&#x2019;s reliability and consistency.</p><fig position="float" id="figure4"><label>Figure 4.</label><caption><p>Operational sequence of the assistive feeding robot during a food delivery task. (A) The robotic arm localizes and targets a food item (apple slice) for acquisition. (B) The robot successfully pierces and lifts the apple slice using a fork-like end-effector. (C) The arm moves the food toward the user&#x2019;s mouth position, demonstrating autonomous bite delivery.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v10i1e69877_fig04.png"/></fig><p><xref ref-type="fig" rid="figure5">Figure 5</xref> illustrates the iterative improvements in success rate and response time over 150 iterations in the fourth scenario of the simulated environment. Key parameters analyzed included stabbing angles, applied forces, and success rates for apples, demonstrating the robot&#x2019;s adaptability to varying conditions. It showed a steady improvement in success rate, starting at approximately 40% in initial iterations and stabilizing at 90% after 90 iterations. This progression underscores the effectiveness of the adaptive learning approach in optimizing performance. <xref ref-type="fig" rid="figure5">Figure 5</xref> also illustrates the corresponding reduction in response time, which starts at 3.6 seconds and improves to 2.2 seconds (<italic>P</italic>&#x003C;.001), highlighting enhanced efficiency.</p><fig position="float" id="figure5"><label>Figure 5.</label><caption><p>Performance improvements across iterations. The red line represents the success rate over 150 iterations. The blue line represents the reduction in response time over iterations.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v10i1e69877_fig05.png"/></fig><p>The kinematic response and stability of the robot have also been enhanced, as demonstrated in <xref ref-type="fig" rid="figure6">Figure 6</xref>. <xref ref-type="fig" rid="figure6">Figure 6A</xref> illustrates the angular movement of each joint during a specific motion sequence. In contrast, <xref ref-type="fig" rid="figure6">Figure 6</xref> depicts the 3D spatial positioning of the actuator during the execution of the same movement set. Both graphs compare the optimized system&#x2019;s performance to the original untrained control system output. Notable improvements in the robot&#x2019;s pathfinding and control system output are evident in both graphs. In <xref ref-type="fig" rid="figure6">Figure 6A</xref>, there is an apparent reduction in the overshooting and undershooting of servo adjustments needed to achieve the desired joint angles, minimizing errors and enhancing precision. These refinements have significantly reduced the jerkiness in the robot&#x2019;s arm movements and diminished vibrations in the final actuator. Consequently, food delivery to the user is smoother and more controlled, which underscores the improved kinematic performance and stability of the robot.</p><fig position="float" id="figure6"><label>Figure 6.</label><caption><p>(A) Joint angle trajectories during movement execution show reduced overshooting and undershooting compared to the untrained control system. (B) The actuator position is in 3D space during the same movement, with an optimized path and control system output. The red line represents the success rate over 150 iterations, while the blue line represents the reduction in response time over iterations.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v10i1e69877_fig06.png"/></fig><p><xref ref-type="fig" rid="figure7">Figure 7</xref> illustrates the facial recognition and mouth detection neural networks integrated into the robot. Using advanced image recognition techniques. Using a VGG 16 convolutional network (proposed by the Visual Geometry Group at the University of Oxford [<xref ref-type="bibr" rid="ref33">33</xref>]), the robot detects and highlights the users&#x2019; facial features. It determines if they have an open mouth or not before approaching the user. The neural network initially separates the user&#x2019;s face from the background and then identifies the control volume box where the user&#x2019;s mouth is located. Depending on the facial characteristics of a user, the neural network constantly updates the status of the user between mouth open and closed. This is a key feature of the robot that enables it to perform the feeding task reliably, providing a layer of system control within the feeding sequence. Whenever the user has a closed mouth, the feeding sequence is halted, waiting for either an override command or the mouth open status to continue the feeding sequence.</p><fig position="float" id="figure7"><label>Figure 7.</label><caption><p>Mouth state detection using neural network image segmentation. (A) The system detects an open mouth using a region-of-interest (ROI) around the lips, highlighted in orange. (B) The system detects a closed mouth with a reduced feature response in the ROI, shown with a darker overlay. This classification supports the robot&#x2019;s decision-making process for safe and timely food delivery.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v10i1e69877_fig07.png"/></fig></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>In this study, we demonstrated that a hybrid control architecture, combining time-series forecasting (ARIMA), nonlinear modeling (FFNN), and multisensor feedback, can significantly enhance the performance of an assistive feeding robot in both simulation and physical hardware. Our approach yielded a combined MSE of 0.008 and an <italic>R</italic><sup>2</sup> of 94%, which improved feeding success rates from 75% to 90% and reduced response times by 28% (from 3.6 s to 2.2 s), outperforming both ARIMA-only and FFNN-only controllers. These results underscore the value of fusing statistical and machine-learning techniques. ARIMA captures linear trends in a user&#x2019;s eating rhythm, while the FFNN models nonlinear variations, together enabling proactive adjustment of timing and portion size. The closed-loop integration of vision-based mouth readiness detection, ultrasonic ranging, and strain-gauge force sensing further enables the system to adapt to delayed user responses, diverse food textures, low-lighting conditions, and plate movements, which are conditions under which prior passive or semiautonomous feeders struggle. Our fuzzy logic&#x2013;driven force and angle controller also proved effective at tailoring grip strength to different foods (eg, apples vs strawberries), minimizing spills and enhancing safety. Compared to legacy systems like My Spoon and Obi Robot, which rely on fixed trajectories or manual &#x201C;teach&#x201D; modes, our robot autonomously adjusts its behavior in real-time, reducing caregiver intervention and improving user autonomy. Despite the promising results, several limitations remain and will guide future research. While the Unity simulation enabled rapid iteration and control benchmarking, it cannot fully replicate the unpredictability of real-world settings, including table height variability, user posture changes, and lighting variation.</p><p>This study focused on only 2 food types, which limits generalizability to more complex diets involving varied textures, consistencies, and utensil requirements (eg, soups or mixed meals). Although the system is designed to operate autonomously, setup and supervision may still be required. User-friendly interfaces, minimal daily calibration, and rapid onboarding for caregivers will be key to adoption. We are exploring guided setup workflows and voice-driven overrides to reduce learning curves. Furthermore, no real-user trials have been conducted, meaning aspects such as user comfort, adaptability, and long-term acceptance remain unexplored. To address these limitations, we are transitioning our control system to a physical robotic arm integrated with the proposed sensor suite. Initial bench-top experiments will assess trajectory accuracy, timing responsiveness, and force safety across a broader range of feeding conditions. Following this, a pilot study involving 3 to 5 individuals with upper-limb impairments is planned to evaluate comfort, performance, and real-world usability. By addressing deployment challenges, including safety, maintenance, training, and personalization, we aim to ensure that future iterations are not only technically robust but also clinically viable and user-friendly. These evaluations will inform iterative improvements and alignment with clinical standards. Ultimately, our goal is to deliver a safe, autonomous feeding solution that promotes independence, comfort, and dignity for individuals with motor impairments across diverse care environments.</p></sec><sec id="s4-2"><title>Limitations and Conclusions</title><p>In this work, we used simulated environments and datasets to validate an adaptive feeding robot that significantly advances the state of assistive dining technology. By integrating time-series forecasting (ARIMA), nonlinear modeling (FFNN), and multisensor feedback (vision, ultrasonic ranging, and strain-gauge force sensing), our system anticipates each user&#x2019;s unique feeding pace. It dynamically adjusts both timing and force for safe, precise spoon-to-mouth delivery. In over 1000 Unity simulations and on physical hardware, the hybrid controller achieved an MSE of 0.008 (<italic>R</italic><sup>2</sup>=94%), increased feeding success from 75% to 90%, and reduced response times by 28% (3.6 s-2.2 s<italic>; P&#x003C;.</italic>001), outperforming ARIMA-only and FFNN-only baselines. Despite these promising results, our study has several limitations. First, performance was primarily evaluated in simulation; real-world variability, such as unstructured environments, diverse plate geometries, and spontaneous user movements, remains to be tested in live trials. Second, we focused on 2 food types (apple and strawberry); future work should encompass a broader range of textures, shapes, and portion sizes. Finally, user acceptance, comfort, and long-term usability were not assessed; ethical considerations surrounding autonomy and trust in human-robot interaction warrant further investigation. Looking ahead, we plan to conduct clinical pilot studies to evaluate real-world efficacy and user satisfaction, extend our sensor suite to include depth cameras and tactile arrays, and explore advanced forecasting methods (eg, recurrent neural networks and transformers) for even finer-grained personalization. By addressing these challenges, we aim to transition from a laboratory prototype to a robust, user-centered assistive solution, empowering individuals with motor impairments to dine independently and with dignity.</p></sec></sec></body><back><ack><p>The authors give special thanks to the technical and administrative staff at Anglia Ruskin University for their assistance throughout the project.</p></ack><notes><sec><title>Funding</title><p>This research was supported financially by the School of Engineering and the Built Environment at Anglia Ruskin University. The authors express their gratitude for the institutional resources, guidance, and funding that made this study possible.</p></sec></notes><fn-group><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">ARIMA</term><def><p>auto regressive integrated moving average</p></def></def-item><def-item><term id="abb2">CNN</term><def><p>convolutional neural network</p></def></def-item><def-item><term id="abb3">FFNN</term><def><p>feed-forward neural network</p></def></def-item><def-item><term id="abb4">IK</term><def><p>inverse kinematic</p></def></def-item><def-item><term id="abb5">ILSVRC</term><def><p>ImageNet Large Scale Visual Recognition Challenge</p></def></def-item><def-item><term id="abb6">MSE</term><def><p>mean squared error</p></def></def-item><def-item><term id="abb7">PR2</term><def><p>Personal Robot 2</p></def></def-item><def-item><term id="abb8">RGB-D</term><def><p>red green blue&#x2013;depth</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="book"><source>World Report on Ageing and Health</source><year>2015</year><access-date>2026-01-16</access-date><publisher-name>World Health Organization</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://www.who.int/publications/i/item/9789241565042">https://www.who.int/publications/i/item/9789241565042</ext-link></comment><pub-id pub-id-type="other">9789241565042</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="web"><article-title>World population ageing</article-title><source>United Nations Department of Economic and Social Affairs</source><year>2015</year><comment><ext-link ext-link-type="uri" xlink:href="https://www.un.org/en/development/desa/population/publications/pdf/ageing/WPA2015_Report.pdf">https://www.un.org/en/development/desa/population/publications/pdf/ageing/WPA2015_Report.pdf</ext-link></comment></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Beard</surname><given-names>JR</given-names> </name><name name-style="western"><surname>Bloom</surname><given-names>DE</given-names> </name></person-group><article-title>Towards a comprehensive public health response to population ageing</article-title><source>Lancet</source><year>2015</year><month>02</month><day>14</day><volume>385</volume><issue>9968</issue><fpage>658</fpage><lpage>661</lpage><pub-id pub-id-type="doi">10.1016/S0140-6736(14)61461-6</pub-id><pub-id pub-id-type="medline">25468151</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Abdi</surname><given-names>J</given-names> </name><name name-style="western"><surname>Al-Hindawi</surname><given-names>A</given-names> </name><name name-style="western"><surname>Ng</surname><given-names>T</given-names> </name><name name-style="western"><surname>Vizcaychipi</surname><given-names>MP</given-names> </name></person-group><article-title>Scoping review on the use of socially assistive robot technology in elderly care</article-title><source>BMJ Open</source><year>2018</year><month>02</month><day>12</day><volume>8</volume><issue>2</issue><fpage>e018815</fpage><pub-id pub-id-type="doi">10.1136/bmjopen-2017-018815</pub-id><pub-id pub-id-type="medline">29440212</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="web"><article-title>Loneliness in older people</article-title><source>National Health Service</source><year>2022</year><access-date>2026-01-02</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.nhs.uk/mental-health/feelings-symptoms-behaviours/feelings-and-symptoms/loneliness-in-older-people/">https://www.nhs.uk/mental-health/feelings-symptoms-behaviours/feelings-and-symptoms/loneliness-in-older-people/</ext-link></comment></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Torres</surname><given-names>E</given-names> </name><name name-style="western"><surname>Pintaric</surname><given-names>T</given-names> </name><name name-style="western"><surname>Penzes</surname><given-names>A</given-names> </name><name name-style="western"><surname>Schmalstieg</surname><given-names>D</given-names> </name></person-group><article-title>Self-feeding robots for the elderly and disabled</article-title><conf-name>In Proceedings of the 1st ACM Conference on Assistive Technologies</conf-name></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Beetz</surname><given-names>M</given-names> </name><name name-style="western"><surname>Jain</surname><given-names>D</given-names> </name><name name-style="western"><surname>M&#x00F6;senlechner</surname><given-names>L</given-names> </name><name name-style="western"><surname>Tenorth</surname><given-names>M</given-names> </name><name name-style="western"><surname>Mosenlechner</surname><given-names>L</given-names> </name></person-group><article-title>Towards performing everyday manipulation activities</article-title><source>Robot Auton Syst</source><year>2010</year><month>09</month><volume>58</volume><issue>9</issue><fpage>1085</fpage><lpage>1095</lpage><pub-id pub-id-type="doi">10.1016/j.robot.2010.05.007</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Park</surname><given-names>D</given-names> </name><name name-style="western"><surname>Hoshi</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Mahajan</surname><given-names>HP</given-names> </name><etal/></person-group><article-title>Active robot-assisted feeding with a general-purpose mobile manipulator: design, evaluation, and lessons learned</article-title><source>Rob Auton Syst</source><year>2020</year><month>02</month><volume>124</volume><fpage>103344</fpage><pub-id pub-id-type="doi">10.1016/j.robot.2019.103344</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Song</surname><given-names>WK</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>J</given-names> </name></person-group><article-title>Novel assistive robot for self-feeding</article-title><source>Robotic Systems-Applications, Control and Programming</source><year>2012</year><publisher-name>InTech</publisher-name><pub-id pub-id-type="doi">10.5772/25756</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Won-Kyung</surname><given-names>S</given-names> </name><name name-style="western"><surname>Won&#x2011;Jin</surname><given-names>S</given-names> </name><name name-style="western"><surname>Yale</surname><given-names>K</given-names> </name><etal/></person-group><article-title>Usability test of KNRC self-feeding robot</article-title><conf-name>2013 IEEE 13th International Conference on Rehabilitation Robotics (ICORR 2013)</conf-name><conf-date>Jun 24-26, 2013</conf-date><pub-id pub-id-type="doi">10.1109/ICORR.2013.6650501</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="web"><article-title>Robot assisted feeding</article-title><source>Personal Robotics Lab</source><access-date>2026-01-02</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://robotfeeding.io">https://robotfeeding.io</ext-link></comment></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="web"><article-title>My spoon assistive robot</article-title><source>SECOM</source><year>2007</year><access-date>2026-01-02</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.secom.co.jp/products/my-spoon">https://www.secom.co.jp/products/my-spoon</ext-link></comment></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Lee</surname><given-names>MK</given-names> </name><name name-style="western"><surname>Forlizzi</surname><given-names>J</given-names> </name><name name-style="western"><surname>Kiesler</surname><given-names>S</given-names> </name><name name-style="western"><surname>Rybski</surname><given-names>P</given-names> </name><name name-style="western"><surname>Antanitis</surname><given-names>J</given-names> </name><name name-style="western"><surname>Savetsila</surname><given-names>S</given-names> </name></person-group><article-title>Personalization in HRI: a longitudinal field experiment</article-title><year>2012</year><conf-name>Proceedings of the Seventh Annual ACM/IEEE International Conference on Human-Robot Interaction</conf-name><conf-date>Mar 5-8, 2012</conf-date><pub-id pub-id-type="doi">10.1145/2157689.2157804</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>King</surname><given-names>CH</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>TL</given-names> </name><name name-style="western"><surname>Fan</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Glass</surname><given-names>JD</given-names> </name><name name-style="western"><surname>Kemp</surname><given-names>CC</given-names> </name></person-group><article-title>Dusty: an assistive mobile manipulator that retrieves dropped objects for people with motor impairments</article-title><source>Disabil Rehabil Assist Technol</source><year>2012</year><month>03</month><volume>7</volume><issue>2</issue><fpage>168</fpage><lpage>179</lpage><pub-id pub-id-type="doi">10.3109/17483107.2011.615374</pub-id><pub-id pub-id-type="medline">22013888</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="web"><article-title>Introducing obi 3</article-title><source>Meet Obi</source><year>2022</year><access-date>2026-01-02</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://meetobi.com/">https://meetobi.com/</ext-link></comment></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="web"><article-title>IEat feeding robot</article-title><source>Avant Innovations</source><year>2016</year><access-date>2026-01-02</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.avantinnovations.com.au/products/ieat-feeding-robot">https://www.avantinnovations.com.au/products/ieat-feeding-robot</ext-link></comment></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>M&#x00FC;ller</surname><given-names>K</given-names> </name></person-group><article-title>Bestic, the eating assistance device</article-title><source>MedicalExpo e-Magazine</source><year>2022</year><access-date>2026-01-02</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://trends.medicalexpo.com/%20medicalexpo-e-magazine/project-118905-423917.html">https://trends.medicalexpo.com/ medicalexpo-e-magazine/project-118905-423917.html</ext-link></comment></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yang</surname><given-names>G</given-names> </name><name name-style="western"><surname>Pang</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Jamal Deen</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Homecare robotic systems for Healthcare 4.0: visions and enabling technologies</article-title><source>IEEE J Biomed Health Inform</source><year>2020</year><month>09</month><volume>24</volume><issue>9</issue><fpage>2535</fpage><lpage>2549</lpage><pub-id pub-id-type="doi">10.1109/JBHI.2020.2990529</pub-id><pub-id pub-id-type="medline">32340971</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="web"><article-title>Meal buddy</article-title><source>Evika</source><year>2024</year><access-date>2026-01-02</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://evika.io/product/meal-buddy/?utm_source=chatgpt.com#details">https://evika.io/product/meal-buddy/?utm_source=chatgpt.com#details</ext-link></comment></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>R</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>TK</given-names> </name><name name-style="western"><surname>Jung</surname><given-names>BJ</given-names> </name><name name-style="western"><surname>Hwang</surname><given-names>JH</given-names> </name><name name-style="western"><surname>Ko</surname><given-names>SY</given-names> </name></person-group><article-title>Simple and effective strategies to pick up foods for a meal assistant robot integrated with an integrated spoon and chopstick mechanism</article-title><source>Intel Serv Robotics</source><year>2025</year><month>03</month><volume>18</volume><issue>2</issue><fpage>325</fpage><lpage>338</lpage><pub-id pub-id-type="doi">10.1007/s11370-025-00585-2</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Park</surname><given-names>D</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>H</given-names> </name><name name-style="western"><surname>Hoshi</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Erickson</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Kapusta</surname><given-names>A</given-names> </name><name name-style="western"><surname>Kemp</surname><given-names>CC</given-names> </name></person-group><article-title>A multimodal execution monitor with anomaly classification for robot-assisted feeding</article-title><conf-name>2017 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)</conf-name><conf-date>Sep 24-28, 2017</conf-date><pub-id pub-id-type="doi">10.1109/IROS.2017.8206437</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Gordon</surname><given-names>EK</given-names> </name><name name-style="western"><surname>Meng</surname><given-names>X</given-names> </name><name name-style="western"><surname>Bhattacharjee</surname><given-names>T</given-names> </name><name name-style="western"><surname>Barnes</surname><given-names>M</given-names> </name><name name-style="western"><surname>Srinivasa</surname><given-names>SS</given-names> </name></person-group><article-title>Adaptive robot-assisted feeding: an online learning framework for acquiring previously unseen food items</article-title><conf-name>2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)</conf-name><conf-date>Oct 24, 2020 to Jan 24, 2021</conf-date><comment><ext-link ext-link-type="uri" xlink:href="https://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=9340668">https://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=9340668</ext-link></comment><pub-id pub-id-type="doi">10.1109/IROS45743.2020.9341359</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mashrur</surname><given-names>T</given-names> </name><name name-style="western"><surname>Ghulam</surname><given-names>Z</given-names> </name><name name-style="western"><surname>French</surname><given-names>G</given-names> </name><name name-style="western"><surname>Abdullah</surname><given-names>HA</given-names> </name></person-group><article-title>Assistive feeding robot for upper limb impairment&#x2014;testing and validation</article-title><source>Int J Adv Robot Syst</source><year>2023</year><month>07</month><day>1</day><volume>20</volume><issue>4</issue><pub-id pub-id-type="doi">10.1177/17298806231183571</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Barhaghtalab</surname><given-names>MH</given-names> </name><name name-style="western"><surname>Sepestanaki</surname><given-names>MA</given-names> </name><name name-style="western"><surname>Mobayen</surname><given-names>S</given-names> </name><name name-style="western"><surname>Jalilvand</surname><given-names>A</given-names> </name><name name-style="western"><surname>Fekih</surname><given-names>A</given-names> </name><name name-style="western"><surname>Meigoli</surname><given-names>V</given-names> </name></person-group><article-title>Design of an adaptive fuzzy-neural inference system&#x2013;based control approach for robotic manipulators</article-title><source>Appl Soft Comput</source><year>2023</year><month>12</month><volume>149</volume><fpage>110970</fpage><pub-id pub-id-type="doi">10.1016/j.asoc.2023.110970</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Crengani&#x0219;</surname><given-names>M</given-names> </name><name name-style="western"><surname>Tera</surname><given-names>M</given-names> </name><name name-style="western"><surname>Biri&#x0219;</surname><given-names>C</given-names> </name><name name-style="western"><surname>G&#x00EE;rjob</surname><given-names>C</given-names> </name></person-group><article-title>Dynamic analysis of a 7 DOF robot using fuzzy logic for inverse kinematics problem</article-title><source>Procedia Comput Sci</source><year>2019</year><volume>162</volume><fpage>298</fpage><lpage>306</lpage><pub-id pub-id-type="doi">10.1016/j.procs.2019.11.288</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Esmat</surname><given-names>M</given-names> </name><name name-style="western"><surname>Abdel-Nasser</surname><given-names>M</given-names> </name><name name-style="western"><surname>Saif</surname><given-names>AWA</given-names> </name></person-group><article-title>Solving inverse kinematics for 3R manipulator using artificial neural networks</article-title><conf-name>2023 20th International Multi-Conference on Systems, Signals &#x0026; Devices (SSD)</conf-name><conf-date>Feb 20-23, 2023</conf-date><pub-id pub-id-type="doi">10.1109/SSD58187.2023.10411151</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Parikh</surname><given-names>P</given-names> </name><name name-style="western"><surname>Sharma</surname><given-names>A</given-names> </name><name name-style="western"><surname>Trivedi</surname><given-names>R</given-names> </name><name name-style="western"><surname>Roy</surname><given-names>D</given-names> </name><name name-style="western"><surname>Joshi</surname><given-names>K</given-names> </name></person-group><article-title>Performance evaluation of an indigenously-designed high performance dynamic feeding robotic structure using advanced additive manufacturing technology, machine learning and robot kinematics</article-title><source>Int J Interact Des Manuf</source><year>2025</year><month>02</month><volume>19</volume><issue>2</issue><fpage>909</fpage><lpage>937</lpage><pub-id pub-id-type="doi">10.1007/s12008-023-01513-3</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Serrezuela</surname><given-names>R</given-names> </name><name name-style="western"><surname>Ch&#x00E1;varro Chavarro</surname><given-names>AF</given-names> </name><name name-style="western"><surname>Tovar Cardozo</surname><given-names>MA</given-names> </name><etal/></person-group><article-title>Kinematic modelling of a robotic arm manipulator using Matlab</article-title><source>ARPN J Eng Appl Sci</source><year>2017</year><access-date>2025-01-02</access-date><volume>12</volume><issue>7</issue><comment><ext-link ext-link-type="uri" xlink:href="https://www.arpnjournals.org/jeas/research_papers/rp_2017/jeas_0417_5860.pdf">https://www.arpnjournals.org/jeas/research_papers/rp_2017/jeas_0417_5860.pdf</ext-link></comment></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mystkowski</surname><given-names>A</given-names> </name><name name-style="western"><surname>Wolniakowski</surname><given-names>A</given-names> </name><name name-style="western"><surname>Kadri</surname><given-names>N</given-names> </name><name name-style="western"><surname>Sewiolo</surname><given-names>M</given-names> </name><name name-style="western"><surname>Scalera</surname><given-names>L</given-names> </name></person-group><article-title>Neural network learning algorithms for high-precision position control and drift attenuation in robotic manipulators</article-title><source>Appl Sci</source><year>2023</year><volume>13</volume><issue>19</issue><fpage>10854</fpage><pub-id pub-id-type="doi">10.3390/app131910854</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gilca</surname><given-names>G</given-names> </name></person-group><article-title>Kinematic modeling of 6 DOF robotic arm with Matlab</article-title><source>Fiability Durability</source><year>2020</year><access-date>2026-01-02</access-date><issue>1</issue><comment><ext-link ext-link-type="uri" xlink:href="https://www.utgjiu.ro/rev_mec/mecanica/pdf/2020-01/28_Gheorghe%20G%C3%8ELC%C4%82%20-%20KINEMATICS%20ANALYSIS%20AND%20MODELING%20IN%20MATLAB%20OF%20THE%206%20DOF%20ROBOTIC%20ARM.pdf">https://www.utgjiu.ro/rev_mec/mecanica/pdf/2020-01/28_Gheorghe%20G%C3%8ELC%C4%82%20-%20KINEMATICS%20ANALYSIS%20AND%20MODELING%20IN%20MATLAB%20OF%20THE%206%20DOF%20ROBOTIC%20ARM.pdf</ext-link></comment></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shimabukuro</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Gushi</surname><given-names>S</given-names> </name><name name-style="western"><surname>Matayoshi</surname><given-names>J</given-names> </name><name name-style="western"><surname>Higa</surname><given-names>H</given-names> </name></person-group><article-title>Self&#x2010;feeding assistive 7&#x2010;DOF robotic arm simulator using solving method of inverse kinematics</article-title><source>Electr Eng Jpn</source><year>2021</year><month>06</month><volume>214</volume><issue>2</issue><pub-id pub-id-type="doi">10.1002/eej.23309</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Makris</surname><given-names>S</given-names> </name><name name-style="western"><surname>Michalos</surname><given-names>G</given-names> </name><name name-style="western"><surname>Chryssolouris</surname><given-names>G</given-names> </name></person-group><article-title>Virtual commissioning of an assembly cell with cooperating robots</article-title><source>Adv Decis Sci</source><year>2012</year><month>09</month><day>30</day><volume>2012</volume><fpage>1</fpage><lpage>11</lpage><pub-id pub-id-type="doi">10.1155/2012/428060</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Simonyan</surname><given-names>K</given-names> </name><name name-style="western"><surname>Zisserman</surname><given-names>A</given-names> </name></person-group><article-title>Very deep convolutional networks for large-scale image recognition</article-title><comment>Preprint posted online on  Dec 23, 2014</comment><pub-id pub-id-type="doi">10.48550/arXiv.1409.1556</pub-id></nlm-citation></ref></ref-list></back></article>